query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
sequencelengths 30
30
| negative_scores
sequencelengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Given a Voronoi Object, this method is in charge of obtaining the volume of each Voronoi Cell, and classifying it as a boundary cell if one of its vertex indices is 1 or if any of its vertices is outside the domain of interest | def voronoiVolumes(self, vor):
volumes = np.array([])
data = vor.points
limits = [[np.min(data[:, 0]), np.max(data[:, 0])], [np.min(data[:, 1]), np.max(data[:, 1])], [np.min(data[:, 2]), np.max(data[:, 2])]]
nonB = [False for _ in data]
for i, region in enumerate(vor.point_region):
indices = vor.regions[region]
if -1 not in indices:
v = vor.vertices[indices]
isWithin = self.checkVertices(v, limits)
if isWithin:
volumes = np.append(volumes, ConvexHull(v).volume)
nonB[i] = True
return volumes, nonB | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def voronoi(geometry,\n pore_volume='pore.volume',\n **kwargs):\n from scipy.special import cbrt\n pore_vols = geometry[pore_volume]\n value = cbrt(6*pore_vols/_sp.pi)\n return value",
"def get_outer_boundary_of_voronoi(self):\n edge = [edge for edge in self.edges if not edge.nxt][0]\n # next(obj for obj in objs if obj.val==5)\n first_vertex = edge.origin\n outer_boundary = []\n while (not edge.get_destination() == first_vertex):\n if(edge.get_destination().is_infinity()):\n edge = edge.twin.nxt\n else:\n outer_boundary.append(edge)\n edge = edge.nxt\n outer_boundary.append(edge)\n return outer_boundary",
"def pyscal_voronoi_volume(self):\n return analyse_voronoi_volume(atoms=self._structure)",
"def get_voronoi_vertices(self, epsilon=2.5e-4, distance_threshold=0, width_buffer=10):\n voro = Voronoi(self._structure.get_extended_positions(width_buffer)+epsilon)\n xx = voro.vertices\n if distance_threshold > 0:\n cluster = AgglomerativeClustering(\n linkage='single',\n distance_threshold=distance_threshold,\n n_clusters=None\n )\n cluster.fit(xx)\n xx = get_average_of_unique_labels(cluster.labels_, xx)\n xx = xx[np.linalg.norm(xx-self._structure.get_wrapped_coordinates(xx, epsilon=0), axis=-1)<epsilon]\n return xx-epsilon",
"def plotVoronoiCell(self, cells):\n for i in cells:\n #i indexes volumes\n i = self.nonBI[i] #now i indexes vor.point_region\n\n vI = self.vor.regions[self.vor.point_region[i]]\n v = self.vor.vertices[vI, :]\n r = v\n\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n fig.set_size_inches(18.5, 9.5)\n ax.set_title('Voronoi Cell of Particle ' + str(i))\n ax.set_xlabel('x [m]')\n ax.set_ylabel('y [m]')\n ax.set_zlabel('z [m]')\n ax.scatter(r[:, 0], r[:, 1], r[:, 2], s=5, alpha=0.5, label='Cell Boundaries')\n ax.scatter(self.data[i, 0], self.data[i, 1], self.data[i, 2], s=25, label='Cell Center')\n ax.set_xlim3d(np.min(self.data[:, 0]), np.max(self.data[:, 0]))\n ax.set_ylim3d(np.min(self.data[:, 1]), np.max(self.data[:, 1]))\n ax.set_zlim3d(np.min(self.data[:, 2]), np.max(self.data[:, 2]))\n # limits = np.vstack((np.array([np.max(self.data[:, 0]), np.max(self.data[:, 1]), np.max(self.data[:, 2])]), np.array([np.min(self.data[:, 0]), np.min(self.data[:, 1]), np.min(self.data[:, 2])])))\n # ax.scatter(limits[:, 0], limits[:, 1], limits[:, 2], s=1)\n ax.legend()",
"def voronoi_polygons(self, voronoi, radius=None):\n\n if voronoi.points.shape[1] != 2:\n raise ValueError(\"Requires 2D input\")\n\n new_regions = []\n new_vertices = voronoi.vertices.tolist()\n\n center = voronoi.points.mean(axis=0)\n if radius is None:\n radius = voronoi.points.ptp().max() * 2\n\n # Construct a map containing all ridges for a given point\n all_ridges = {}\n for (p1, p2), (v1, v2) in zip(voronoi.ridge_points, voronoi.ridge_vertices):\n all_ridges.setdefault(p1, []).append((p2, v1, v2))\n all_ridges.setdefault(p2, []).append((p1, v1, v2))\n\n # Reconstruct infinite regions\n for p1, region in enumerate(voronoi.point_region):\n vertices = voronoi.regions[region]\n\n if all(v >= 0 for v in vertices):\n # finite region\n new_regions.append(vertices)\n continue\n\n # reconstruct a non-finite region\n ridges = all_ridges[p1]\n new_region = [v for v in vertices if v >= 0]\n\n for p2, v1, v2 in ridges:\n if v2 < 0:\n v1, v2 = v2, v1\n if v1 >= 0:\n # finite ridge: already in the region\n continue\n\n # Compute the missing endpoint of an infinite ridge\n\n t = voronoi.points[p2] - voronoi.points[p1] # tangent\n t /= np.linalg.norm(t)\n n = np.array([-t[1], t[0]]) # hyper\n\n midpoint = voronoi.points[[p1, p2]].mean(axis=0)\n direction = np.sign(np.dot(midpoint - center, n)) * n\n far_point = voronoi.vertices[v2] + direction * radius\n\n new_region.append(len(new_vertices))\n new_vertices.append(far_point.tolist())\n\n # sort region counterclockwise\n vs = np.asarray([new_vertices[v] for v in new_region])\n c = vs.mean(axis=0)\n angles = np.arctan2(vs[:, 1] - c[1], vs[:, 0] - c[0])\n new_region = np.array(new_region)[np.argsort(angles)]\n\n # finish\n new_regions.append(new_region.tolist())\n\n return new_regions, np.asarray(new_vertices)",
"def add_boundaries(self):\n\n bound_conns=[]\n bound_coords=[]\n bound_vert_index=[]\n throat_vert_index=[]\n #Find boundary extent\n [x_min,x_max,y_min,y_max,z_min,z_max]=vo.vertex_dimension(self,self.pores(),parm='minmax')\n min_point = np.around(np.array([x_min,y_min,z_min]),10)\n max_point = np.around(np.array([x_max,y_max,z_max]),10)\n Np = self.num_pores()\n Nt = self.num_throats()\n new_throat_count = 0\n # ridge_dict contains a dictionary where the key is a set of 2 neighbouring pores and the value is the vertex indices\n # that form the throat or ridge between them\n for p,v in self._vor.ridge_dict.items():\n # if the vertex with index -1 is contained in list then the ridge is unbounded - ignore these\n if np.all(np.asarray(v) >=0):\n #boundary throats will be those connecting one pore inside the original set and one out\n if (p[0] in range(Np) and p[1] not in range(Np)) or\\\n (p[0] not in range(Np) and p[1] in range(Np)):\n # the dictionary key is not in numerical order so find the pore index inside\n if p[0] in range(Np):\n my_pore=p[0]\n else:\n my_pore=p[1]\n my_pore_coord = self[\"pore.coords\"][my_pore]\n new_pore_coord = my_pore_coord.copy()\n #rounding necessary here to identify the plane as Voronoi can have 1e-17 and smaller errors\n throat_verts = np.around(self._vor.vertices[v],10)\n #find which plane we are aligned with (if any) and align new_pore with throat plane\n if len(np.unique(throat_verts[:,0])) == 1:\n new_pore_coord[0]=np.unique(throat_verts[:,0])\n elif len(np.unique(throat_verts[:,1])) == 1:\n new_pore_coord[1]=np.unique(throat_verts[:,1])\n elif len(np.unique(throat_verts[:,2])) == 1:\n new_pore_coord[2]=np.unique(throat_verts[:,2])\n else:\n new_pore_coord = throat_verts.mean()\n bound_coords.append(new_pore_coord)\n bound_conns.append(np.array([my_pore,new_throat_count+Np]))\n bound_vert_index.append(dict(zip(v,throat_verts)))\n throat_vert_index.append(dict(zip(v,throat_verts)))\n new_throat_count += 1\n\n #Add new pores and connections\n self.extend(pore_coords=bound_coords, throat_conns=bound_conns)\n #Record new number of pores\n Mp = self.num_pores()\n Mt = self.num_throats()\n new_pore_ids = np.arange(Np,Mp)\n new_throat_ids = np.arange(Nt,Mt)\n #Identify which boundary the pore sits on\n front = self.pores()[self['pore.coords'][:,0]==min_point[0]]\n back = self.pores()[self['pore.coords'][:,0]==max_point[0]]\n left = self.pores()[self['pore.coords'][:,1]==min_point[1]]\n right = self.pores()[self['pore.coords'][:,1]==max_point[1]]\n bottom = self.pores()[self['pore.coords'][:,2]==min_point[2]]\n top = self.pores()[self['pore.coords'][:,2]==max_point[2]]\n #Assign labels\n self['pore.boundary'] = False\n self['pore.boundary'][new_pore_ids] = True\n self['pore.right_boundary'] = False\n self['pore.left_boundary'] = False\n self['pore.front_boundary'] = False\n self['pore.back_boundary'] = False\n self['pore.top_boundary'] = False\n self['pore.bottom_boundary'] = False\n self['pore.right_boundary'][right] = True\n self['pore.left_boundary'][left] = True\n self['pore.front_boundary'][front] = True\n self['pore.back_boundary'][back] = True\n self['pore.top_boundary'][top] = True\n self['pore.bottom_boundary'][bottom] = True\n #Save the throat verts\n self[\"pore.vert_index\"][new_pore_ids] = bound_vert_index\n self[\"throat.vert_index\"][new_throat_ids] = throat_vert_index",
"def voronoi(points, buffer_percent=100):\n # Remove duplicate xy points bc that would make delauney fail, and must remember z (if any) for retrieving originals from index results\n seen = set() \n uniqpoints = [ p for p in points if str( p[:2] ) not in seen and not seen.add( str( p[:2] ) )]\n classpoints = [_Point(*point[:2]) for point in uniqpoints]\n\n # Create fake sitepoints around the point extent to correct for infinite polygons\n # For a similar approach and problem see: http://gis.stackexchange.com/questions/11866/voronoi-polygons-that-run-out-to-infinity\n xs,ys = list(zip(*uniqpoints))[:2]\n pointswidth = max(xs) - min(xs)\n pointsheight = max(ys) - min(ys)\n xbuff,ybuff = ( pointswidth / 100.0 * buffer_percent , pointsheight / 100.0 * buffer_percent )\n midx,midy = ( sum(xs) / float(len(xs)) , sum(ys) / float(len(ys)) )\n #bufferbox = [(midx-xbuff,midy-ybuff),(midx+xbuff,midy-ybuff),(midx+xbuff,midy+ybuff),(midx-xbuff,midy+ybuff)] # corner buffer\n bufferbox = [(midx-xbuff,midy),(midx+xbuff,midy),(midx,midy+ybuff),(midx,midy-ybuff)] # mid sides buffer\n classpoints.extend([_Point(*corner) for corner in bufferbox])\n\n # Compute Voronoi\n vertices,edges,poly_dict = tesselator.computeVoronoiDiagram(classpoints)\n\n # Turn unordered result edges into ordered polygons\n polygons = list()\n for sitepoint,polyedges in list(poly_dict.items()):\n polyedges = [edge[1:] for edge in polyedges]\n poly = list()\n firststart,firstend = polyedges.pop(0)\n poly.append(firstend)\n while polyedges:\n curend = poly[-1]\n for i,other in enumerate(polyedges):\n otherstart,otherend = other\n if otherstart == curend:\n poly.append(otherend)\n ##print otherstart,otherend\n polyedges.pop(i)\n break\n elif otherend == curend:\n ##print otherend,otherstart\n poly.append(otherstart)\n polyedges.pop(i)\n break\n # Get vertices from indexes\n try: sitepoint = uniqpoints[sitepoint]\n except IndexError:\n sitepoint = None # fake bbox sitepoints shouldnt be in the results\n poly = [vertices[vi] for vi in poly if vi != -1]\n polygons.append((sitepoint, poly))\n\n # Maybe clip parts of polygons that stick outside screen?\n # ...\n\n return polygons",
"def create_grid_and_edges(data, drone_altitude, safety_distance):\n\n # minimum and maximum north coordinates\n north_min = np.floor(np.min(data[:, 0] - data[:, 3]))\n north_max = np.ceil(np.max(data[:, 0] + data[:, 3]))\n\n # minimum and maximum east coordinates\n east_min = np.floor(np.min(data[:, 1] - data[:, 4]))\n east_max = np.ceil(np.max(data[:, 1] + data[:, 4]))\n\n # given the minimum and maximum coordinates we can\n # calculate the size of the grid.\n north_size = int(np.ceil((north_max - north_min)))\n east_size = int(np.ceil((east_max - east_min)))\n\n # Initialize an empty grid\n grid = np.zeros((north_size, east_size))\n\n # Define a list to hold Voronoi points\n points = []\n # Populate the grid with obstacles\n for i in range(data.shape[0]):\n north, east, alt, d_north, d_east, d_alt = data[i, :]\n\n if alt + d_alt + safety_distance > drone_altitude:\n obstacle = [\n int(north - d_north - safety_distance - north_min),\n int(north + d_north + safety_distance - north_min),\n int(east - d_east - safety_distance - east_min),\n int(east + d_east + safety_distance - east_min),\n ]\n grid[obstacle[0]:obstacle[1] + 1, obstacle[2]:obstacle[3] + 1] = 1\n\n # add center of obstacles to points list\n points.append([north - north_min, east - east_min])\n\n # create a voronoi graph based on\n # location of obstacle centres\n graph = Voronoi(points)\n # check each edge from graph.ridge_vertices for collision\n edges = []\n for v in graph.ridge_vertices:\n p1 = graph.vertices[v[0]].astype(int)\n p2 = graph.vertices[v[1]].astype(int)\n # test each pair p1 and p2 for collision using Bresenham\n # If the edge does not hit an obstacle add it to the list\n in_collision = False\n ridgeline = bresenham(p1[0], p1[1], p2[0], p2[1])\n for b in ridgeline:\n # eliminate out of range points in the line\n if b[0] < 0 or b[0] >= grid.shape[0]:\n in_collision = True\n break\n if b[1] < 0 or b[1] >= grid.shape[1]:\n in_collision = True\n break\n # check if grid cell is an obstacle\n if grid[b[0], b[1]] == 1:\n in_collision = True\n break\n # keep ridge points not in collision\n if not in_collision:\n p1 = (p1[0], p1[1])\n p2 = (p2[0], p2[1])\n edges.append((p1, p2))\n\n return grid, edges",
"def voronoi_tesellation_box(boundary,lng,lat):\n # array with points coordinates\n points = np.zeros((lng.shape[0],2))\n points[:,0] = lng\n points[:,1] = lat\n\n # compute Voronoi tesselation\n vor = Voronoi(points)\n \n # Reconstruct infinite voronoi regions in a 2D diagram to finite regions.\n regions, vertices = voronoi_finite_polygons_2d(vor)\n \n # build box from country boundary\n xmin = boundary.bounds.minx[0]\n xmax = boundary.bounds.maxx[0]\n ymin = boundary.bounds.miny[0]\n ymax = boundary.bounds.maxy[0]\n\n box = Polygon([[xmin, ymin], [xmin, ymax], [xmax, ymax], [xmax, ymin]])\n\n voronoid = [] \n for region in regions:\n polygon = vertices[region]\n # Clipping polygon\n poly = Polygon(polygon)\n voronoid.append(poly.intersection(box))\n \n voronoid = gpd.GeoDataFrame(geometry = voronoid)\n \n vor_lng = vor.points[:,0]\n vor_lat = vor.points[:,1]\n \n voronoid['lng'] = vor_lng\n voronoid['lat'] = vor_lat\n \n return voronoid",
"def voronoi(geometry,\n network,\n propname,\n **params):\n print('voronoi: nothing yet')",
"def volumePDF(self, maxVar=-1, bins=75, threshold=1):\n print('Cluster Identification Based on Voronoi Volumes')\n start = time.time()\n self.vor = Voronoi(self.data)\n self.volumes, self.nonB = self.voronoiVolumes(self.vor)\n self.nonBI = np.arange(0, len(self.vor.point_region))[self.nonB]\n self.volumes_sorted = np.sort(self.volumes)\n self.oldOrder = np.argsort(self.volumes)\n\n if maxVar > 0:\n means = [np.mean(self.volumes_sorted)]\n varMean = []\n topV = -1\n #Discard some very big Voronoi cells which unnecessarily alter the mean volume. Stop once the mean volume does\n #not vary more than maxVar with an elimination of these large cells. Deactivate this part with maxVar= < 0\n for i in range(250):\n volumes = self.volumes_sorted[:-(i + 1)]\n means.append(np.mean(volumes))\n varM = (means[-1] - means[-2])/means[-2]\n varMean.append(varM)\n if np.abs(varM) < maxVar and topV == -1:\n topV = -(i + 1)\n self.oldOrder = self.oldOrder[:topV]\n self.volumes_sorted = self.volumes_sorted[:topV]\n\n self.V = self.volumes_sorted/np.mean(self.volumes_sorted)\n self.bins = np.logspace(np.log(np.min(self.V)), np.log(np.max(self.V)), bins)\n\n self.PDF, _ = np.histogram(self.V, bins=self.bins, density=True)\n self.bins = (self.bins[1:] + self.bins[:-1]) / 2\n\n self.RandomPDF = self.PoissonPDF(self.bins)\n self.intersectPDFs(threshold=threshold)\n self.assignLabels()\n self.times[0] = time.time() - start\n print('Elapsed Time: ' + str(round(time.time() - start, 3)))",
"def calc_convex_hull_volume(df):\n df = sample_by_cell_size(df, 16)\n grid_coordinates = df[['grid_x', 'grid_y']]\n grid_coordinates_arr = np.array(grid_coordinates)\n hull = ConvexHull(grid_coordinates_arr)\n return hull.volume",
"def create_grid_and_edges(data, drone_altitude, safety_distance):\n # minimum and maximum north coordinates\n north_min = np.floor(np.min(data[:, 0] - data[:, 3]))\n north_max = np.ceil(np.max(data[:, 0] + data[:, 3]))\n\n # minimum and maximum east coordinates\n east_min = np.floor(np.min(data[:, 1] - data[:, 4]))\n east_max = np.ceil(np.max(data[:, 1] + data[:, 4]))\n\n # given the minimum and maximum coordinates we can\n # calculate the size of the grid.\n north_size = int(np.ceil(north_max - north_min))\n east_size = int(np.ceil(east_max - east_min))\n\n # Initialize an empty grid\n grid = np.zeros((north_size, east_size))\n # Initialize an empty list for Voronoi points\n points = []\n # Populate the grid with obstacles\n for i in range(data.shape[0]):\n north, east, alt, d_north, d_east, d_alt = data[i, :]\n if alt + d_alt + safety_distance > drone_altitude:\n obstacle = [\n int(np.clip(north - d_north - safety_distance - north_min, 0, north_size-1)),\n int(np.clip(north + d_north + safety_distance - north_min, 0, north_size-1)),\n int(np.clip(east - d_east - safety_distance - east_min, 0, east_size-1)),\n int(np.clip(east + d_east + safety_distance - east_min, 0, east_size-1)),\n ]\n grid[obstacle[0]:obstacle[1]+1, obstacle[2]:obstacle[3]+1] = 1\n # add center of obstacles to points list\n points.append([north - north_min, east - east_min])\n\n graph = Voronoi(points)\n\n edges = []\n for v in graph.ridge_vertices:\n p1 = graph.vertices[v[0]]\n p2 = graph.vertices[v[1]]\n cells = list(bresenham(int(p1[0]), int(p1[1]), int(p2[0]), int(p2[1])))\n hit = False\n\n for c in cells:\n if np.amin(c) < 0 or c[0] >= grid.shape[0] or c[1] >= grid.shape[1]:\n hit = True\n break\n if grid[c[0], c[1]] == 1:\n hit = True\n break\n\n if not hit:\n p1 = (p1[0], p1[1])\n p2 = (p2[0], p2[1])\n edges.append((p1, p2))\n\n return grid, edges, int(north_min), int(east_min)",
"def compute_voronoi_centroid_volume(vertices):\n from scipy.spatial import Delaunay, ConvexHull\n\n tess = Delaunay(vertices)\n dimension = np.shape(vertices)[1]\n\n w = np.zeros((tess.nsimplex, 1))\n cent = np.zeros((tess.nsimplex, dimension))\n for i in range(tess.nsimplex):\n # pylint: disable=E1136\n ch = ConvexHull(tess.points[tess.simplices[i]])\n w[i] = ch.volume\n cent[i, :] = np.mean(tess.points[tess.simplices[i]], axis=0)\n\n volume = np.sum(w)\n centroid = np.matmul(np.divide(w, volume).T, cent)\n\n return centroid, volume",
"def find_open_edges_voronoi(graph, grid):\n edges = []\n for v in graph.ridge_vertices:\n p1 = graph.vertices[v[0]]\n p2 = graph.vertices[v[1]]\n cells = list(bresenham(int(p1[0]), int(p1[1]), int(p2[0]), int(p2[1])))\n hit = False\n\n for c in cells:\n # First check if we're off the map\n if np.amin(c) < 0 or c[0] >= grid.shape[0] or c[1] >= grid.shape[1]:\n hit = True\n break\n # Next check if we're in collision\n if grid[c[0], c[1]] == 1:\n hit = True\n break\n\n # If the edge does not hit on obstacle\n # add it to the list\n if not hit:\n # array to tuple for future graph creation step)\n p1 = (p1[0], p1[1])\n p2 = (p2[0], p2[1])\n edges.append((p1, p2))\n return edges",
"def graph_from_voronoi(vor, geometry):\n graph = nx.Graph()\n for i in vor.ridge_vertices:\n if i[0]>-1 and i[1]>-1:\n point1 = Point(vor.vertices[i][0])\n point2 = Point(vor.vertices[i][1])\n # Eliminate all points outside our geometry.\n if point1.within(geometry) and point2.within(geometry):\n dist = point1.distance(point2)\n graph.add_nodes_from([i[0], i[1]])\n graph.add_edge(i[0], i[1], weight=dist)\n return graph",
"def voronoi_to_image(v, value):\n\n # First create the image.\n im = np.zeros_like(v['binnum'])\n \n # We skip the first bin\n for i in range(1, v['n_bin']+1):\n if (v['area'][i] > 0):\n im[v['reverse'][i]] = value[i-1]\n\n return im",
"def exportVoronoiRegions(self):\n # Remember to compute circumcircles if not done before\n # for t in self.triangles:\n # self.circles[t] = self.circumcenter(t)\n useVertex = {i: [] for i in range(len(self.coords))}\n vor_coors = []\n index = {}\n # Build a list of coordinates and a index per triangle/region\n for tidx, (a, b, c) in enumerate(self.triangles):\n vor_coors.append(self.circles[(a, b, c)][0])\n # Insert triangle, rotating it so the key is the \"last\" vertex\n useVertex[a] += [(b, c, a)]\n useVertex[b] += [(c, a, b)]\n useVertex[c] += [(a, b, c)]\n # Set tidx as the index to use with this triangles\n index[(a, b, c)] = tidx\n index[(c, a, b)] = tidx\n index[(b, c, a)] = tidx\n\n # init regions per coordinate dictionary\n regions = {}\n # Sort each region in a coherent order, and substitude each triangle\n # by its index\n for i in range(4, len(self.coords)):\n v = useVertex[i][0][0] # Get a vertex of a triangle\n r = []\n for _ in range(len(useVertex[i])):\n # Search the triangle beginning with vertex v\n t = [t for t in useVertex[i] if t[0] == v][0]\n r.append(index[t]) # Add the index of this triangle to region\n v = t[1] # Choose the next vertex to search\n regions[i-4] = r # Store region.\n\n return vor_coors, regions",
"def generate_voronoi_diagram(\n num_cells, width, height\n) -> Tuple[Voronoi, List[List[int]]]:\n # Make up data points\n points = np.random.rand(num_cells - 4, 2)\n default = np.array(\n [\n np.array([0.0, 0.0]),\n np.array([1.0, 0.0]),\n np.array([0.0, 1.0]),\n np.array([1.0, 1.0]),\n ]\n )\n points = np.concatenate((points, default), axis=0)\n # Scale them\n points = scale_points(points, width, height)\n # Compute Voronoi tesselation\n vor = Voronoi(points)\n # Plot\n voronoi_plot_2d(vor)\n return vor, points",
"def voronoi(x, centers):\n if np.size(x) == x.shape[0]:\n x = np.reshape(x, (np.size(x), 1))\n if np.size(centers) == centers.shape[0]:\n centers = np.reshape(centers, (np.size(centers), 1))\n if x.shape[1] != centers.shape[1]:\n raise ValueError(\"Inconsistent dimensions for x and centers\")\n\n return _EStep(x, centers)[0]",
"def CellBoundary(self, p_int, , vtkIdList):\n ...",
"def voronoi_finite_polygons_2d(self, vor, radius=None):\n # print(\"Running voronoi_finite_polygons_2d\")\n if vor.points.shape[1] != 2:\n raise ValueError(\"Requires 2D input\")\n new_regions = []\n new_vertices = vor.vertices.tolist()\n new_ridge_vertices = []\n vor_ridge_vertices = vor.ridge_vertices\n for p in vor_ridge_vertices:\n if all(i >= 0 for i in p):\n new_ridge_vertices.append(p)\n\n center = vor.points.mean(axis=0)\n if radius is None:\n radius = vor.points.ptp().max()\n\n all_ridges = {}\n for (p1, p2), (v1, v2) in zip(vor.ridge_points,\n vor.ridge_vertices):\n all_ridges.setdefault(\n p1, []).append((p2, v1, v2))\n all_ridges.setdefault(\n p2, []).append((p1, v1, v2))\n\n # Reconstruct infinite regions\n for p1, region in enumerate(vor.point_region): # p1 is a counter (0,1, etc), region is the region \"name (label)\" for the p1th point\n vertices = vor.regions[region] # Returns the vertices that corresponds to the \"region_th\" region. Region starts at 1\n if all(v >= 0 for v in vertices):\n # finite region\n new_regions.append(vertices)\n continue\n # reconstruct a non-finite region\n ridges = all_ridges[p1] # Get a list of all ridges surrounding that point [(p2, v1, v2)]\n new_region = [v for v in vertices if v >= 0] # new_region contains all the finite vertices from std vor\n for p2, v1, v2 in ridges:\n if v2 < 0: # Why is this here? Just to flip order?\n v1, v2 = v2, v1\n if v1 >= 0: # v1 is always the one that could be at infinity\n # finite ridge: already in the region\n continue\n # Compute the missing endpoint of an\n # infinite ridge\n t = vor.points[p2] - \\\n vor.points[p1] # tangent\n t /= np.linalg.norm(t) # Normalize\n n = np.array([-t[1], t[0]]) # normal\n midpoint = vor.points[[p1, p2]]. \\\n mean(axis=0)\n direction = np.sign(\n np.dot(midpoint - center, n)) * n\n far_point = vor.vertices[v2] + \\\n direction * radius\n new_region.append(len(new_vertices))\n new_vertices.append(far_point.tolist())\n new_ridge_vertices.append([v2, len(new_vertices)-1])\n\n # Sort region counterclockwise.\n vs = np.asarray([new_vertices[v]\n for v in new_region])\n c = vs.mean(axis=0)\n angles = np.arctan2(\n vs[:, 1] - c[1], vs[:, 0] - c[0])\n new_region = np.array(new_region)[\n np.argsort(angles)]\n new_regions.append(new_region.tolist())\n return new_regions, np.asarray(new_vertices), new_ridge_vertices",
"def voronoi_finite_polygons_2d(vor, radius=None):\n\n if vor.points.shape[1] != 2:\n raise ValueError(\"Requires 2D input\")\n\n new_regions = []\n new_vertices = vor.vertices.tolist()\n\n center = vor.points.mean(axis=0)\n if radius is None:\n radius = vor.points.ptp().max()\n\n # Construct a map containing all ridges for a given point\n all_ridges = {}\n for (p1, p2), (v1, v2) in zip(vor.ridge_points, vor.ridge_vertices):\n all_ridges.setdefault(p1, []).append((p2, v1, v2))\n all_ridges.setdefault(p2, []).append((p1, v1, v2))\n\n # Reconstruct infinite regions\n for p1, region in enumerate(vor.point_region):\n # print(p1, region)\n vertices = vor.regions[region]\n\n if all(v >= 0 for v in vertices):\n # finite region\n new_regions.append(vertices)\n continue\n\n # reconstruct a non-finite region\n ridges = all_ridges[p1]\n new_region = [v for v in vertices if v >= 0]\n\n for p2, v1, v2 in ridges:\n if v2 < 0:\n v1, v2 = v2, v1\n if v1 >= 0:\n # finite ridge: already in the region\n continue\n\n # Compute the missing endpoint of an infinite ridge\n\n t = vor.points[p2] - vor.points[p1] # tangent\n t /= np.linalg.norm(t)\n n = np.array([-t[1], t[0]]) # normal\n\n midpoint = vor.points[[p1, p2]].mean(axis=0)\n direction = np.sign(np.dot(midpoint - center, n)) * n\n far_point = vor.vertices[v2] + direction * radius\n\n new_region.append(len(new_vertices))\n new_vertices.append(far_point.tolist())\n\n # sort region counterclockwise\n vs = np.asarray([new_vertices[v] for v in new_region])\n c = vs.mean(axis=0)\n angles = np.arctan2(vs[:, 1] - c[1], vs[:, 0] - c[0])\n new_region = np.array(new_region)[np.argsort(angles)]\n\n # finish\n new_regions.append(new_region.tolist())\n\n return new_regions, np.asarray(new_vertices)",
"def load_voronoi_binning(name, VoronoiSN):\n \n voronoi_file = _voronoi_filename(name, VoronoiSN)\n try:\n hdul = fits.open(voronoi_file)\n mainheader = hdul[0].header\n snlimit = mainheader['SNLIMIT']\n binnum = hdul['BINNUMBER'].data\n hdr = hdul['BINNUMBER'].header\n mask = hdul['MASK'].data\n hdul.close()\n n_bin = np.max(binnum).astype(int)\n\n # And we will need the reverse lookup. This is inefficient\n # in python compared to IDL.\n r = []\n area = []\n for i in range(n_bin+1):\n inds = np.where(binnum == i)\n r.append(inds)\n area.append(len(inds))\n \n\n result = {'binnum': binnum, 'snlimit': snlimit, 'header': hdr,\n 'mask': mask, 'n_bin': n_bin, 'reverse': r, 'area': area}\n except:\n print(\"An error occurred when trying to load the Voronoi binning\")\n raise\n result = None\n\n return result",
"def _voronoi_finite_polygons_2d(vor, radius=None):\n if vor.points.shape[1] != 2:\n raise ValueError(\"Requires 2D input\")\n\n new_regions = []\n new_vertices = vor.vertices.tolist()\n center = vor.points.mean(axis=0)\n if radius is None:\n radius = vor.points.ptp().max()\n # Construct a map containing all ridges for a given point\n all_ridges = {}\n for (p1, p2), (v1, v2) in zip(vor.ridge_points, vor.ridge_vertices):\n all_ridges.setdefault(p1, []).append((p2, v1, v2))\n all_ridges.setdefault(p2, []).append((p1, v1, v2))\n\n # Reconstruct infinite regions\n for p1, region in enumerate(vor.point_region):\n vertices = vor.regions[region]\n if all(v >= 0 for v in vertices):\n # finite region\n new_regions.append(vertices)\n continue\n # reconstruct a non-finite region\n ridges = all_ridges[p1]\n new_region = [v for v in vertices if v >= 0]\n\n for p2, v1, v2 in ridges:\n if v2 < 0:\n v1, v2 = v2, v1\n if v1 >= 0:\n # finite ridge: already in the region\n continue\n # Compute the missing endpoint of an infinite ridge\n t = vor.points[p2] - vor.points[p1] # tangent\n t /= np.linalg.norm(t)\n n = np.array([-t[1], t[0]]) # normal\n\n midpoint = vor.points[[p1, p2]].mean(axis=0)\n direction = np.sign(np.dot(midpoint - center, n)) * n\n far_point = vor.vertices[v2] + direction * radius\n new_region.append(len(new_vertices))\n new_vertices.append(far_point.tolist())\n\n # sort region counterclockwise\n vs = np.asarray([new_vertices[v] for v in new_region])\n c = vs.mean(axis=0)\n angles = np.arctan2(vs[:, 1] - c[1], vs[:, 0] - c[0])\n new_region = np.array(new_region)[np.argsort(angles)]\n # finish\n new_regions.append(new_region.tolist())\n return new_regions, np.asarray(new_vertices)",
"def voronoi_finite_polygons_2d(vor, radius=None):\n\n if vor.points.shape[1] != 2:\n raise ValueError(\"Requires 2D input\")\n\n new_regions = []\n new_vertices = vor.vertices.tolist()\n\n center = vor.points.mean(axis=0)\n if radius is None:\n radius = vor.points.ptp().max()\n\n # Construct a map containing all ridges for a given point\n all_ridges = {}\n for (p1, p2), (v1, v2) in zip(vor.ridge_points, vor.ridge_vertices):\n all_ridges.setdefault(p1, []).append((p2, v1, v2))\n all_ridges.setdefault(p2, []).append((p1, v1, v2))\n\n # Reconstruct infinite regions\n for p1, region in enumerate(vor.point_region):\n vertices = vor.regions[region]\n\n if all(v >= 0 for v in vertices):\n # finite region\n new_regions.append(vertices)\n continue\n\n # reconstruct a non-finite region\n ridges = all_ridges[p1]\n new_region = [v for v in vertices if v >= 0]\n\n for p2, v1, v2 in ridges:\n if v2 < 0:\n v1, v2 = v2, v1\n if v1 >= 0:\n # finite ridge: already in the region\n continue\n\n # Compute the missing endpoint of an infinite ridge\n\n t = vor.points[p2] - vor.points[p1] # tangent\n t /= np.linalg.norm(t)\n n = np.array([-t[1], t[0]]) # normal\n\n midpoint = vor.points[[p1, p2]].mean(axis=0)\n direction = np.sign(np.dot(midpoint - center, n)) * n\n far_point = vor.vertices[v2] + direction * radius\n\n new_region.append(len(new_vertices))\n new_vertices.append(far_point.tolist())\n\n # sort region counterclockwise\n vs = np.asarray([new_vertices[v] for v in new_region])\n c = vs.mean(axis=0)\n angles = np.arctan2(vs[:,1] - c[1], vs[:,0] - c[0])\n new_region = np.array(new_region)[np.argsort(angles)]\n\n # finish\n new_regions.append(new_region.tolist())\n\n return new_regions, np.asarray(new_vertices)",
"def Voronoi_cell(hull, centers, vertex, original_fan):\n # make a copy so that the original does not get mutated\n fan = copy.deepcopy(original_fan)\n\n # start by moving the center\n # of the first one in the list of adjacent faces\n # to the resulting polygon\n simplex = fan.pop(0)\n result = [centers[simplex]]\n\n # find the vertices of this face\n simplex_vertices = hull.simplices[simplex]\n\n # there should only be two other vertices on this face\n # other than our given vertex\n\n # pick one of them and mark it 'known'\n # the other one will be common to the next simplex to consider\n known_vertex, common_vertex = [x for x in simplex_vertices if x != vertex]\n\n while fan:\n # the collection of faces is not exhausted yet\n assert known_vertex in simplex_vertices\n\n known_vertex_index = list(simplex_vertices).index(known_vertex)\n\n # next simplex to consider\n # it is the simplex which is opposite to the known vertex\n simplex = hull.neighbors[simplex][known_vertex_index]\n\n assert simplex in fan\n\n # now move its center to our resulting polygon\n fan.remove(simplex)\n result.append(centers[simplex])\n\n # and repeat the process\n simplex_vertices = hull.simplices[simplex]\n known_vertex = common_vertex\n\n # of the three vertices of the simplex\n # one should be our given vertex\n # and one should already have been processed\n remaining = [x for x in hull.simplices[simplex]\n if x != vertex and x != known_vertex]\n\n assert len(remaining) == 1\n common_vertex = remaining[0]\n\n return numpy.array(result)",
"def voronoi_finite_polygons_2d(vor, radius=None):\n\n if vor.points.shape[1] != 2:\n raise ValueError(\"Requires 2D input\")\n\n new_regions = []\n new_vertices = vor.vertices.tolist()\n\n center = vor.points.mean(axis=0)\n if radius is None:\n radius = vor.points.ptp().max()\n\n # Construct a map containing all ridges for a given point\n all_ridges = {}\n for (p1, p2), (v1, v2) in zip(vor.ridge_points, vor.ridge_vertices):\n all_ridges.setdefault(p1, []).append((p2, v1, v2))\n all_ridges.setdefault(p2, []).append((p1, v1, v2))\n\n # Reconstruct infinite regions\n for p1, region in enumerate(vor.point_region):\n vertices = vor.regions[region]\n\n if all(v >= 0 for v in vertices):\n # finite region\n new_regions.append(vertices)\n continue\n\n # reconstruct a non-finite region\n ridges = all_ridges[p1]\n new_region = [v for v in vertices if v >= 0]\n\n for p2, v1, v2 in ridges:\n if v2 < 0:\n v1, v2 = v2, v1\n if v1 >= 0:\n # finite ridge: already in the region\n continue\n\n # Compute the missing endpoint of an infinite ridge\n\n t = vor.points[p2] - vor.points[p1] # tangent\n t /= np.linalg.norm(t)\n n = np.array([-t[1], t[0]]) # normal\n\n midpoint = vor.points[[p1, p2]].mean(axis=0)\n direction = np.sign(np.dot(midpoint - center, n)) * n\n far_point = vor.vertices[v2] + direction * radius\n\n new_region.append(len(new_vertices))\n new_vertices.append(far_point.tolist())\n\n # sort region counterclockwise\n vs = np.asarray([new_vertices[v] for v in new_region])\n c = vs.mean(axis=0)\n angles = np.arctan2(vs[:,1] - c[1], vs[:,0] - c[0])\n new_region = np.array(new_region)[np.argsort(angles)]\n\n # finish\n new_regions.append(new_region.tolist())\n\n return new_regions, np.asarray(new_vertices)",
"def voronoi_finite_polygons_2d(vor, radius=None):\n\n if vor.points.shape[1] != 2:\n raise ValueError(\"Requires 2D input\")\n\n new_regions = []\n new_vertices = vor.vertices.tolist()\n\n center = vor.points.mean(axis=0)\n if radius is None:\n radius = vor.points.ptp().max()\n\n # Construct a map containing all ridges for a given point\n all_ridges = {}\n for (p1, p2), (v1, v2) in zip(vor.ridge_points, vor.ridge_vertices):\n all_ridges.setdefault(p1, []).append((p2, v1, v2))\n all_ridges.setdefault(p2, []).append((p1, v1, v2))\n\n # Reconstruct infinite regions\n for p1, region in enumerate(vor.point_region):\n vertices = vor.regions[region]\n\n if all(v >= 0 for v in vertices):\n # finite region\n new_regions.append(vertices)\n continue\n\n # reconstruct a non-finite region\n ridges = all_ridges[p1]\n new_region = [v for v in vertices if v >= 0]\n\n for p2, v1, v2 in ridges:\n if v2 < 0:\n v1, v2 = v2, v1\n if v1 >= 0:\n # finite ridge: already in the region\n continue\n\n # Compute the missing endpoint of an infinite ridge\n\n t = vor.points[p2] - vor.points[p1] # tangent\n t /= np.linalg.norm(t)\n n = np.array([-t[1], t[0]]) # normal\n\n midpoint = vor.points[[p1, p2]].mean(axis=0)\n direction = np.sign(np.dot(midpoint - center, n)) * n\n far_point = vor.vertices[v2] + direction * radius\n\n new_region.append(len(new_vertices))\n new_vertices.append(far_point.tolist())\n\n # sort region counterclockwise\n vs = np.asarray([new_vertices[v] for v in new_region])\n c = vs.mean(axis=0)\n angles = np.arctan2(vs[:,1] - c[1], vs[:,0] - c[0])\n new_region = np.array(new_region)[np.argsort(angles)]\n\n # finish\n new_regions.append(new_region.tolist())\n\n return new_regions, np.asarray(new_vertices)"
] | [
"0.6815165",
"0.6787288",
"0.65719616",
"0.6431204",
"0.63884205",
"0.6349469",
"0.6345909",
"0.62319404",
"0.6147917",
"0.6102491",
"0.60988635",
"0.6086924",
"0.60158694",
"0.5977933",
"0.596923",
"0.59498054",
"0.5903642",
"0.58810025",
"0.5857406",
"0.57596266",
"0.57325995",
"0.57234365",
"0.5634847",
"0.5594376",
"0.5575781",
"0.5552444",
"0.5549962",
"0.5528961",
"0.55258197",
"0.55258197"
] | 0.6796481 | 1 |
Given a set of Voronoi Vertices, this simple methods checks if all of them are maintained within the range of the form [[xmin, xmax], [ymin, ymax], [zmin, zmax]] expressed in limits | def checkVertices(vertices, limits):
isWithin = True
for i,v in enumerate(vertices):
x = v[0]
y = v[1]
z = v[2]
if x < limits[0][0] or x > limits[0][1]:
isWithin = False
break
if y < limits[1][0] or y > limits[1][1]:
isWithin = False
break
if z < limits[2][0] or z > limits[2][1]:
isWithin = False
break
return isWithin | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def in_box_bounds(self, test_vec):\n above_min = np.greater(test_vec, self.lower_vertex).all()\n below_max = np.greater(self.upper_vertex, test_vec).all()\n return above_min and below_max",
"def add_boundaries(self):\n\n bound_conns=[]\n bound_coords=[]\n bound_vert_index=[]\n throat_vert_index=[]\n #Find boundary extent\n [x_min,x_max,y_min,y_max,z_min,z_max]=vo.vertex_dimension(self,self.pores(),parm='minmax')\n min_point = np.around(np.array([x_min,y_min,z_min]),10)\n max_point = np.around(np.array([x_max,y_max,z_max]),10)\n Np = self.num_pores()\n Nt = self.num_throats()\n new_throat_count = 0\n # ridge_dict contains a dictionary where the key is a set of 2 neighbouring pores and the value is the vertex indices\n # that form the throat or ridge between them\n for p,v in self._vor.ridge_dict.items():\n # if the vertex with index -1 is contained in list then the ridge is unbounded - ignore these\n if np.all(np.asarray(v) >=0):\n #boundary throats will be those connecting one pore inside the original set and one out\n if (p[0] in range(Np) and p[1] not in range(Np)) or\\\n (p[0] not in range(Np) and p[1] in range(Np)):\n # the dictionary key is not in numerical order so find the pore index inside\n if p[0] in range(Np):\n my_pore=p[0]\n else:\n my_pore=p[1]\n my_pore_coord = self[\"pore.coords\"][my_pore]\n new_pore_coord = my_pore_coord.copy()\n #rounding necessary here to identify the plane as Voronoi can have 1e-17 and smaller errors\n throat_verts = np.around(self._vor.vertices[v],10)\n #find which plane we are aligned with (if any) and align new_pore with throat plane\n if len(np.unique(throat_verts[:,0])) == 1:\n new_pore_coord[0]=np.unique(throat_verts[:,0])\n elif len(np.unique(throat_verts[:,1])) == 1:\n new_pore_coord[1]=np.unique(throat_verts[:,1])\n elif len(np.unique(throat_verts[:,2])) == 1:\n new_pore_coord[2]=np.unique(throat_verts[:,2])\n else:\n new_pore_coord = throat_verts.mean()\n bound_coords.append(new_pore_coord)\n bound_conns.append(np.array([my_pore,new_throat_count+Np]))\n bound_vert_index.append(dict(zip(v,throat_verts)))\n throat_vert_index.append(dict(zip(v,throat_verts)))\n new_throat_count += 1\n\n #Add new pores and connections\n self.extend(pore_coords=bound_coords, throat_conns=bound_conns)\n #Record new number of pores\n Mp = self.num_pores()\n Mt = self.num_throats()\n new_pore_ids = np.arange(Np,Mp)\n new_throat_ids = np.arange(Nt,Mt)\n #Identify which boundary the pore sits on\n front = self.pores()[self['pore.coords'][:,0]==min_point[0]]\n back = self.pores()[self['pore.coords'][:,0]==max_point[0]]\n left = self.pores()[self['pore.coords'][:,1]==min_point[1]]\n right = self.pores()[self['pore.coords'][:,1]==max_point[1]]\n bottom = self.pores()[self['pore.coords'][:,2]==min_point[2]]\n top = self.pores()[self['pore.coords'][:,2]==max_point[2]]\n #Assign labels\n self['pore.boundary'] = False\n self['pore.boundary'][new_pore_ids] = True\n self['pore.right_boundary'] = False\n self['pore.left_boundary'] = False\n self['pore.front_boundary'] = False\n self['pore.back_boundary'] = False\n self['pore.top_boundary'] = False\n self['pore.bottom_boundary'] = False\n self['pore.right_boundary'][right] = True\n self['pore.left_boundary'][left] = True\n self['pore.front_boundary'][front] = True\n self['pore.back_boundary'][back] = True\n self['pore.top_boundary'][top] = True\n self['pore.bottom_boundary'][bottom] = True\n #Save the throat verts\n self[\"pore.vert_index\"][new_pore_ids] = bound_vert_index\n self[\"throat.vert_index\"][new_throat_ids] = throat_vert_index",
"def box_in_range(self,x,y,z,d, x_range, y_range, z_range, d_range):\n return np.logical_and.reduce((\n x > x_range[0], x < x_range[1],\n y > y_range[0], y < y_range[1],\n z > z_range[0], z < z_range[1],\n d > d_range[0], d < d_range[1]))",
"def _get_voronoi_poly_points(vert_index_list, voronoi_vertices,\n voronoi_centroid):\n voronoi_poly_points = []\n if -1 not in vert_index_list and len(vert_index_list) > 3:\n voronoi_poly_points = voronoi_vertices[vert_index_list]\n elif vert_index_list.size > 0:\n # ASSUME RECTANGLE\n vert_index_list = vert_index_list[vert_index_list >= 0]\n voronoi_poly_points = voronoi_vertices[vert_index_list]\n # CASE 1: 2 valid voronoi vertices\n if vert_index_list.size == 2:\n center_lon = voronoi_centroid[0]\n center_lat = voronoi_centroid[1]\n corner_lon1 = voronoi_poly_points[0][0]\n corner_lat1 = voronoi_poly_points[0][1]\n corner_lon2 = voronoi_poly_points[1][0]\n corner_lat2 = voronoi_poly_points[1][1]\n\n # check if need to add points in lon or lat\n if abs(corner_lon1-corner_lon2) > abs(corner_lat1-corner_lat2):\n dLat = center_lat - corner_lat1\n # append the corners in order\n voronoi_poly_points = np.array([\n [corner_lon1, corner_lat1],\n [corner_lon2, corner_lat2],\n [corner_lon2, center_lat + dLat],\n [corner_lon1, center_lat + dLat]\n ])\n else:\n dLon = center_lon - corner_lon1\n # append the corners in order\n voronoi_poly_points = np.array([\n [corner_lon1, corner_lat1],\n [corner_lon2, corner_lat2],\n [center_lon + dLon, corner_lat2],\n [center_lon + dLon, corner_lat1]\n ])\n # CASE 2: 1 valid voronoi vertex\n elif vert_index_list.size == 1:\n center_lon = voronoi_centroid[0]\n center_lat = voronoi_centroid[1]\n corner_lon = voronoi_poly_points[0][0]\n corner_lat = voronoi_poly_points[0][1]\n dLat = center_lat - corner_lat\n dLon = center_lon - corner_lon\n # append the corners in order\n voronoi_poly_points = np.array([\n [corner_lon, corner_lat],\n [center_lon + dLon, corner_lat],\n [center_lon + dLon, center_lat + dLat],\n [corner_lon, center_lat + dLat]\n ])\n\n return voronoi_poly_points",
"def bounds(lines):\n min_x = bench_util.Max\n min_y = bench_util.Max\n max_x = bench_util.Min\n max_y = bench_util.Min\n \n for line in lines.itervalues():\n for x, y in line:\n min_x = min(min_x, x)\n min_y = min(min_y, y)\n max_x = max(max_x, x)\n max_y = max(max_y, y)\n \n return ((min_x, min_y), (max_x, max_y))",
"def voronoiVolumes(self, vor):\n volumes = np.array([])\n data = vor.points\n limits = [[np.min(data[:, 0]), np.max(data[:, 0])], [np.min(data[:, 1]), np.max(data[:, 1])], [np.min(data[:, 2]), np.max(data[:, 2])]]\n nonB = [False for _ in data]\n for i, region in enumerate(vor.point_region):\n indices = vor.regions[region]\n if -1 not in indices:\n v = vor.vertices[indices]\n isWithin = self.checkVertices(v, limits)\n if isWithin:\n volumes = np.append(volumes, ConvexHull(v).volume)\n nonB[i] = True\n return volumes, nonB",
"def boundary_check(limits : tuple, coords : tuple) -> bool:\n xl,xh,yl,yh = limits\n x,y = coords\n bound_x = xl <= x and x < xh\n bound_y = yl <= y and y < yh\n return bound_x and bound_y",
"def valid_pixel_coordinates(u, v, IMAGE_HEIGHT, IMAGE_WIDTH):\n return (u >= 0 and v >= 0 and v < IMAGE_HEIGHT and u < IMAGE_WIDTH)",
"def roi_vecs(layer_coords, vec_coords, region):\n \n if region == 'crown':\n #find threshold for vectors inside roi\n start_x_lst = []\n stop_x_lst = []\n for i in range(1,5):\n start_x_lst.append(layer_coords[i][0][0])\n stop_x_lst.append(layer_coords[i][-1][0])\n\n start_x = max(start_x_lst)\n stop_x = min(stop_x_lst)\n \n roi_vec_coords = [i for i in vec_coords if i[0][0] in list(range(start_x, stop_x+5))]\n \n return roi_vec_coords\n \n elif region == 'fundus':\n #find threshold for vectors inside roi\n start_x_lst = []\n stop_x_lst = []\n for i in range(1,5):\n start_x_lst.append(layer_coords[i][0][0])\n stop_x_lst.append(layer_coords[i][-1][0])\n\n start_x = max(start_x_lst)\n stop_x = min(stop_x_lst)\n\n # roi_vec_coords = [i for i in vec_coords if i[1][0] in list(range(start_x-10, stop_x+3))]\n roi_vec_coords = [i for i in vec_coords if i[0][0] in list(range(stop_x, start_x))]\n \n # print(roi_vec_coords)\n return roi_vec_coords",
"def bounding_box_filter(points, x_range, y_range, z_range):\n min_x, max_x = x_range\n min_y, max_y = y_range\n min_z, max_z = z_range\n\n bound_x = np.logical_and(points[:, 0] > min_x, points[:, 0] < max_x)\n bound_y = np.logical_and(points[:, 1] > min_y, points[:, 1] < max_y)\n bound_z = np.logical_and(points[:, 2] > min_z, points[:, 2] < max_z)\n\n bb_filter = np.logical_and(np.logical_and(bound_x, bound_y), bound_z)\n\n return points[bb_filter]",
"def voronoi(points, buffer_percent=100):\n # Remove duplicate xy points bc that would make delauney fail, and must remember z (if any) for retrieving originals from index results\n seen = set() \n uniqpoints = [ p for p in points if str( p[:2] ) not in seen and not seen.add( str( p[:2] ) )]\n classpoints = [_Point(*point[:2]) for point in uniqpoints]\n\n # Create fake sitepoints around the point extent to correct for infinite polygons\n # For a similar approach and problem see: http://gis.stackexchange.com/questions/11866/voronoi-polygons-that-run-out-to-infinity\n xs,ys = list(zip(*uniqpoints))[:2]\n pointswidth = max(xs) - min(xs)\n pointsheight = max(ys) - min(ys)\n xbuff,ybuff = ( pointswidth / 100.0 * buffer_percent , pointsheight / 100.0 * buffer_percent )\n midx,midy = ( sum(xs) / float(len(xs)) , sum(ys) / float(len(ys)) )\n #bufferbox = [(midx-xbuff,midy-ybuff),(midx+xbuff,midy-ybuff),(midx+xbuff,midy+ybuff),(midx-xbuff,midy+ybuff)] # corner buffer\n bufferbox = [(midx-xbuff,midy),(midx+xbuff,midy),(midx,midy+ybuff),(midx,midy-ybuff)] # mid sides buffer\n classpoints.extend([_Point(*corner) for corner in bufferbox])\n\n # Compute Voronoi\n vertices,edges,poly_dict = tesselator.computeVoronoiDiagram(classpoints)\n\n # Turn unordered result edges into ordered polygons\n polygons = list()\n for sitepoint,polyedges in list(poly_dict.items()):\n polyedges = [edge[1:] for edge in polyedges]\n poly = list()\n firststart,firstend = polyedges.pop(0)\n poly.append(firstend)\n while polyedges:\n curend = poly[-1]\n for i,other in enumerate(polyedges):\n otherstart,otherend = other\n if otherstart == curend:\n poly.append(otherend)\n ##print otherstart,otherend\n polyedges.pop(i)\n break\n elif otherend == curend:\n ##print otherend,otherstart\n poly.append(otherstart)\n polyedges.pop(i)\n break\n # Get vertices from indexes\n try: sitepoint = uniqpoints[sitepoint]\n except IndexError:\n sitepoint = None # fake bbox sitepoints shouldnt be in the results\n poly = [vertices[vi] for vi in poly if vi != -1]\n polygons.append((sitepoint, poly))\n\n # Maybe clip parts of polygons that stick outside screen?\n # ...\n\n return polygons",
"def get_polygon_constraints(self, range_polygones=range(3, 5), print_out=False):\n rows_A = []\n rows_b = []\n for m in range_polygones:\n if (print_out):\n print('checking {}-polygones'.format(m))\n polygons = self.get_convex_polygons(m)\n row_A, row_b = self.get_polygon_constraints_m(polygons, print_out)\n rows_A.append(row_A)\n rows_b.append(row_b)\n return np.vstack(rows_A), np.hstack(rows_b)",
"def vsize(min, max):\n return lambda mate: any(min <= v <= max for v in mate['read_info'].v_list)",
"def geometric_area_filtering(boxes, divider):\n correct_boxes = []\n mx = -1\n for box in boxes:\n polygon = Polygon([box.x0, box.x1, box.x2, box.x3])\n if polygon.area > mx:\n mx = polygon.area\n for box in boxes:\n polygon = Polygon([box.x0, box.x1, box.x2, box.x3])\n if polygon.area >= mx / divider:\n correct_boxes.append(box)\n return correct_boxes",
"def overlap_checker(x1, y1, x2, y2, all_coord):\n overlaps = False\n i = 0\n start = 0\n for i in range(int(len(all_coord)/4)):\n b = all_coord[start:start + 4]\n start += 4\n try:\n if (max(b[0], b[2]) <= min(x1, x2) or max(x1, x2) <= min(b[0], b[2]) or max(b[1], b[3]) <= min(y1, y2) or max(y1, y2) <= min(b[1], b[3])):\n if not (min(x1, x2) <= min(b[0], b[2]) and min(y1, y2) <= min(b[1], b[3]) and max(x1, x2) >= max(b[0], b[2]) and max(y1, y2) >= max(b[1], b[3])):\n if not (min(b[0], b[2]) <= min(x1, x2) and min(b[1], b[3]) <= min(y1, y2) and max(b[0], b[2]) >= max(x1, x2) and max(b[1], b[3]) >= max(y1, y2)):\n overlaps = False\n else:\n return True\n else:\n return True\n else:\n return True\n except TypeError:\n overlaps = False\n if not overlaps:\n return False",
"def get_points_in_range(self, x, y, z):\n\n return_set = []\n for point in self.get_points():\n min_x = x - 1000\n max_x = x + 1000\n if point[0] < min_x or max_x < point[0]:\n continue\n\n min_y = y - 1000\n max_y = y + 1000\n if point[1] < min_y or max_y < point[1]:\n continue\n\n min_z = z - 1000\n max_z = z + 1000\n if point[2] < min_z or max_z < point[2]:\n continue\n return_set.append(point)\n return return_set",
"def _inside_bounds(A, B):\n for axis in 'xyz':\n minA, maxA = axis_bounds(A, axis)\n minB, maxB = axis_bounds(B, axis)\n if (minA <= minB) or (maxA >= maxB):\n return False\n\n return True",
"def equatorial_zone_vertices(vertices, pole, width=5):\n return [i\n for i, v in enumerate(vertices)\n if np.abs(np.dot(v, pole)) < np.abs(np.sin(np.pi * width / 180))]",
"def point_in_rectangle(point: Vector, rect_min: Vector, rect_max: Vector) -> bool:\n return rect_min[0] <= point[0] <= rect_max[0] and rect_min[1] <= point[1] <= rect_max[1]",
"def board_bounds(live_coords):\n if not live_coords:\n return False\n min_x = live_coords[0][0]\n max_x = live_coords[0][0]\n min_y = live_coords[0][1]\n max_y = live_coords[0][1]\n for i, j in live_coords:\n if min_x > i:\n min_x = i\n if i > max_x:\n max_x = i\n if min_y > j:\n min_y = j\n if j > max_y:\n max_y = j\n return [[min_x, min_y], [max_x, max_y]]",
"def get_outer_boundary_of_voronoi(self):\n edge = [edge for edge in self.edges if not edge.nxt][0]\n # next(obj for obj in objs if obj.val==5)\n first_vertex = edge.origin\n outer_boundary = []\n while (not edge.get_destination() == first_vertex):\n if(edge.get_destination().is_infinity()):\n edge = edge.twin.nxt\n else:\n outer_boundary.append(edge)\n edge = edge.nxt\n outer_boundary.append(edge)\n return outer_boundary",
"def checkRange(x,y,w,h,maxW,maxH):\n if x < 0:\n x = 0\n if y < 0:\n y = 0\n if x + w >= maxW:\n w = maxW-x-1\n if y + h >= maxH:\n h = maxH-y-1\n return [x,y,w,h]",
"def ValidClusterRanges(self):\n for cluster_range in self.cluster_ranges:\n the_range = cluster_range.split(\"-\")\n print(f\"Checking that range {the_range} falls within our data area\")\n try:\n if int(the_range[0]) < self.low_data_cluster or int(the_range[1]) > self.high_data_cluster:\n print(f\"False. {the_range[0]} or {the_range[1]} is outside of our data area\")\n return False\n except TypeError as t_err:\n print(f\"Error. Range does not appear to be an int\")\n return False\n return True",
"def check_coord_in_range(self, x, y):\n return 0 <= x < self.cols and 0 <= y < self.lines",
"def get_voronoi_polygons(input_pts, bbox=None):\n if not isinstance(input_pts, np.ndarray):\n input_pts = np.array(input_pts)\n\n if bbox is None:\n x_min = input_pts[:, 0].min()\n x_max = input_pts[:, 0].max()\n y_min = input_pts[:, 1].min()\n y_max = input_pts[:, 1].max()\n x_range = (x_max - x_min) * const.BBOX_MODIFIER\n y_range = (y_max - y_min) * const.BBOX_MODIFIER\n bbox = (x_min - x_range, y_min - y_range,\n x_max + x_range, y_max + y_range)\n\n # Constructing Delaunay triangulation, consisting of points and triangles.\n # (triangles are arrays of indexes of points)\n triangulation = matplotlib.tri.Triangulation(input_pts[:, 0],\n input_pts[:, 1])\n triangles = triangulation.triangles\n triangles_count = triangles.shape[0]\n\n # input_pts[triangles] = array of triangles: [[pt1], ..., ...] -- triangle.\n circle_centers = get_circles_centers(input_pts[triangles])\n\n segments = []\n for i in range(triangles_count):\n for j in range(3):\n neighbor = triangulation.neighbors[i][j]\n\n if neighbor != -1: # Trying to connect circle centers\n # Fitting centers to bbox.\n start, end = circle_centers[i], circle_centers[neighbor]\n\n if not check_inside(start, bbox):\n start = move_point(start, end, bbox)\n if start is None:\n continue\n\n if not check_inside(end, bbox):\n end = move_point(end, start, bbox)\n if end is None:\n continue\n\n segments.append([start, end])\n\n else: # Trying to create line leading to the bbox.\n # Ignore center outside of bbox\n if not check_inside(circle_centers[i], bbox):\n continue\n\n first, second, third = (input_pts[triangles[i, j]],\n input_pts[triangles[i, (j+1) % 3]],\n input_pts[triangles[i, (j+2) % 3]])\n\n edge = np.array([first, second])\n vector = np.array([[0, 1], [-1, 0]]).dot(edge[1] - edge[0])\n\n def line(pt):\n return ((pt[0] - first[0]) * (second[1] - first[1]) /\n (second[0] - first[0]) - pt[1] + first[1])\n\n orientation = (np.sign(line(third)) *\n np.sign(line(first + vector)))\n if orientation > 0:\n vector = -orientation * vector\n shift = calculate_shift(circle_centers[i], vector, bbox)\n if shift is not None:\n segments.append([circle_centers[i],\n circle_centers[i] + shift * vector])\n\n return segments",
"def create_grid_and_edges(data, drone_altitude, safety_distance):\n\n # minimum and maximum north coordinates\n north_min = np.floor(np.min(data[:, 0] - data[:, 3]))\n north_max = np.ceil(np.max(data[:, 0] + data[:, 3]))\n\n # minimum and maximum east coordinates\n east_min = np.floor(np.min(data[:, 1] - data[:, 4]))\n east_max = np.ceil(np.max(data[:, 1] + data[:, 4]))\n\n # given the minimum and maximum coordinates we can\n # calculate the size of the grid.\n north_size = int(np.ceil((north_max - north_min)))\n east_size = int(np.ceil((east_max - east_min)))\n\n # Initialize an empty grid\n grid = np.zeros((north_size, east_size))\n\n # Define a list to hold Voronoi points\n points = []\n # Populate the grid with obstacles\n for i in range(data.shape[0]):\n north, east, alt, d_north, d_east, d_alt = data[i, :]\n\n if alt + d_alt + safety_distance > drone_altitude:\n obstacle = [\n int(north - d_north - safety_distance - north_min),\n int(north + d_north + safety_distance - north_min),\n int(east - d_east - safety_distance - east_min),\n int(east + d_east + safety_distance - east_min),\n ]\n grid[obstacle[0]:obstacle[1] + 1, obstacle[2]:obstacle[3] + 1] = 1\n\n # add center of obstacles to points list\n points.append([north - north_min, east - east_min])\n\n # create a voronoi graph based on\n # location of obstacle centres\n graph = Voronoi(points)\n # check each edge from graph.ridge_vertices for collision\n edges = []\n for v in graph.ridge_vertices:\n p1 = graph.vertices[v[0]].astype(int)\n p2 = graph.vertices[v[1]].astype(int)\n # test each pair p1 and p2 for collision using Bresenham\n # If the edge does not hit an obstacle add it to the list\n in_collision = False\n ridgeline = bresenham(p1[0], p1[1], p2[0], p2[1])\n for b in ridgeline:\n # eliminate out of range points in the line\n if b[0] < 0 or b[0] >= grid.shape[0]:\n in_collision = True\n break\n if b[1] < 0 or b[1] >= grid.shape[1]:\n in_collision = True\n break\n # check if grid cell is an obstacle\n if grid[b[0], b[1]] == 1:\n in_collision = True\n break\n # keep ridge points not in collision\n if not in_collision:\n p1 = (p1[0], p1[1])\n p2 = (p2[0], p2[1])\n edges.append((p1, p2))\n\n return grid, edges",
"def _in_bounds(self, x, y):\r\n return 0 <= x < 8 and 0 <= y < 8",
"def square_boundaries(px , py, pz, incx, incy, incz, min_x, min_y, min_z, max_x, max_y, max_z):\n\n if px < min_x or px > max_x: \n pcx = px - incx \n\n if py < min_y or py > max_y:\n pcy = py - incy \n\n if pz < min_z or pz > max_z:\n pcz = pz - incz \n\n return pcx, pcy, pcz",
"def create_bounds(dict, number_of_nodes):\n\n x_min = dict[\"x_min\"]\n x_max = dict[\"x_max\"]\n u_min = dict[\"u_min\"]\n u_max = dict[\"u_max\"]\n\n v_min = []\n v_max = []\n for k in range(number_of_nodes - 1):\n v_min += x_min\n v_max += x_max\n v_min += u_min\n v_max += u_max\n\n if \"tf_min\" in dict:\n if \"tf_max\" in dict:\n tf_min = dict[\"tf_min\"]\n tf_max = dict[\"tf_max\"]\n v_min.append(tf_min)\n v_max.append(tf_max)\n\n v_min += x_min\n v_max += x_max\n\n return vertcat(*v_min), vertcat(*v_max)",
"def is_inside(self, coordinates: tuple) -> bool:\n if len(coordinates) != 2:\n raise IndexError(\"Coordinates consist of x and y\")\n x, y = coordinates\n if (self.MIN_X <= x <= self.MAX_X) and (self.MIN_Y <= y <= self.MAX_Y):\n return True\n else:\n return False"
] | [
"0.6560554",
"0.62124604",
"0.61240464",
"0.6044191",
"0.60034245",
"0.59941345",
"0.5940613",
"0.5885276",
"0.5870029",
"0.5853414",
"0.58431876",
"0.5830641",
"0.58024144",
"0.57611763",
"0.5759779",
"0.57531",
"0.57513684",
"0.5713757",
"0.57067126",
"0.5697985",
"0.56684226",
"0.5656381",
"0.56546456",
"0.56405175",
"0.56296784",
"0.562391",
"0.56052756",
"0.55964154",
"0.5584375",
"0.5579286"
] | 0.70169705 | 0 |
This method determines at which normalized Voronoi volumes do the Random PDF and the obtained PDF intersect | def intersectPDFs(self, threshold=1):
diff = np.abs(self.PDF - self.RandomPDF)
half = np.argmax(self.RandomPDF)
start = np.nonzero(self.PDF > 0.5*np.max(self.PDF))[0][0]
end = np.nonzero(self.RandomPDF[half:] < 0.5*np.max(self.RandomPDF))[0][0] + half
if start == 0 and half == 0:
self.cut1 = 0
else:
self.cut1 = np.argmin(diff[start:half]) + start
self.V1 = self.bins[self.cut1] * threshold
self.cut2 = np.argmin(diff[half:end]) + half
self.V2 = self.bins[self.cut2] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def volumePDF(self, maxVar=-1, bins=75, threshold=1):\n print('Cluster Identification Based on Voronoi Volumes')\n start = time.time()\n self.vor = Voronoi(self.data)\n self.volumes, self.nonB = self.voronoiVolumes(self.vor)\n self.nonBI = np.arange(0, len(self.vor.point_region))[self.nonB]\n self.volumes_sorted = np.sort(self.volumes)\n self.oldOrder = np.argsort(self.volumes)\n\n if maxVar > 0:\n means = [np.mean(self.volumes_sorted)]\n varMean = []\n topV = -1\n #Discard some very big Voronoi cells which unnecessarily alter the mean volume. Stop once the mean volume does\n #not vary more than maxVar with an elimination of these large cells. Deactivate this part with maxVar= < 0\n for i in range(250):\n volumes = self.volumes_sorted[:-(i + 1)]\n means.append(np.mean(volumes))\n varM = (means[-1] - means[-2])/means[-2]\n varMean.append(varM)\n if np.abs(varM) < maxVar and topV == -1:\n topV = -(i + 1)\n self.oldOrder = self.oldOrder[:topV]\n self.volumes_sorted = self.volumes_sorted[:topV]\n\n self.V = self.volumes_sorted/np.mean(self.volumes_sorted)\n self.bins = np.logspace(np.log(np.min(self.V)), np.log(np.max(self.V)), bins)\n\n self.PDF, _ = np.histogram(self.V, bins=self.bins, density=True)\n self.bins = (self.bins[1:] + self.bins[:-1]) / 2\n\n self.RandomPDF = self.PoissonPDF(self.bins)\n self.intersectPDFs(threshold=threshold)\n self.assignLabels()\n self.times[0] = time.time() - start\n print('Elapsed Time: ' + str(round(time.time() - start, 3)))",
"def find_voting_precincts_in_district(state=48, district=7, leg_body='US-REP'):\r\n vps_in_district_GeoJSON = get_voting_precincts_geojson_filename(\r\n state=state, district=district, leg_body=leg_body)\r\n \r\n if not os.path.isfile(vps_in_district_GeoJSON):\r\n voting_precincts_file = get_statewide_voting_precincts_geojson_filename(state)\r\n \r\n district_file = get_district_geojson_filename(\r\n state=state, district=district, leg_body=leg_body)\r\n \r\n get_district_file(state=state, district=district, leg_body=leg_body)\r\n\r\n get_statewide_voting_precincts(state=state)\r\n \r\n print( \"Finding voting precincts in district\" )\r\n district_boundary = gpd.read_file(district_file)\r\n voting_precincts = gpd.read_file(voting_precincts_file)\r\n \r\n print( \"Finding voting precincts that touch the district boundary\" )\r\n vps_touching_district_bool = voting_precincts.touches(district_boundary.geometry[0])\r\n \r\n print( \"Finding voting precincts that intersect the district boundary\" )\r\n vps_intersecting_district_bool = voting_precincts.intersects(district_boundary.geometry[0])\r\n \r\n print( \"Filtering the voting precincts\" )\r\n for index in vps_touching_district_bool[vps_touching_district_bool==True].index:\r\n vps_intersecting_district_bool.loc[index] = False\r\n\r\n vps_in_district = voting_precincts[vps_intersecting_district_bool]\r\n \r\n print( \"Finding blockgroups to filter based on threshold\" )\r\n intersections = vps_in_district.intersection(district_boundary.geometry[0])\r\n\r\n areas_of_intersections = intersections.area\r\n indx_out = []\r\n for vp_index, vp in vps_in_district.iterrows():\r\n area_of_intersection = areas_of_intersections[vp_index]\r\n vp_area = GeoSeries(vp.geometry).area[0]\r\n\r\n share_of_intersection = area_of_intersection / vp_area\r\n \r\n if share_of_intersection < 0.10:\r\n indx_out.append(vp_index)\r\n\r\n #print( \"\\nBlock Group: \", bg.GEOID )\r\n #print( \"Area: \", str(bg_area) )\r\n #print( \"Share of Intersection: \", str(share_of_intersection) )\r\n \r\n vps_to_remove_bool = pd.Series([False]*len(voting_precincts))\r\n\r\n for index in indx_out:\r\n vps_to_remove_bool.loc[index] = True\r\n\r\n vps_to_remove = voting_precincts[vps_to_remove_bool]\r\n\r\n for index in vps_to_remove_bool[vps_to_remove_bool==True].index:\r\n vps_intersecting_district_bool.loc[index] = False\r\n\r\n vps_in_district = voting_precincts[vps_intersecting_district_bool]\r\n if 'PREC' in list(vps_in_district.columns.values):\r\n vps_in_district = vps_in_district.rename(columns={'PREC':'PRECINCT'})\r\n\r\n # See issue #367 https://github.com/geopandas/geopandas/issues/367\r\n try: \r\n os.remove(vps_in_district_GeoJSON)\r\n except OSError:\r\n pass\r\n vps_in_district.to_file(vps_in_district_GeoJSON, driver='GeoJSON')\r\n \r\n vps_in_district.sort_values(by=['PRECINCT'])[['PRECINCT']].to_csv(\"vps.csv\", index=False)",
"def add_boundaries(self):\n\n bound_conns=[]\n bound_coords=[]\n bound_vert_index=[]\n throat_vert_index=[]\n #Find boundary extent\n [x_min,x_max,y_min,y_max,z_min,z_max]=vo.vertex_dimension(self,self.pores(),parm='minmax')\n min_point = np.around(np.array([x_min,y_min,z_min]),10)\n max_point = np.around(np.array([x_max,y_max,z_max]),10)\n Np = self.num_pores()\n Nt = self.num_throats()\n new_throat_count = 0\n # ridge_dict contains a dictionary where the key is a set of 2 neighbouring pores and the value is the vertex indices\n # that form the throat or ridge between them\n for p,v in self._vor.ridge_dict.items():\n # if the vertex with index -1 is contained in list then the ridge is unbounded - ignore these\n if np.all(np.asarray(v) >=0):\n #boundary throats will be those connecting one pore inside the original set and one out\n if (p[0] in range(Np) and p[1] not in range(Np)) or\\\n (p[0] not in range(Np) and p[1] in range(Np)):\n # the dictionary key is not in numerical order so find the pore index inside\n if p[0] in range(Np):\n my_pore=p[0]\n else:\n my_pore=p[1]\n my_pore_coord = self[\"pore.coords\"][my_pore]\n new_pore_coord = my_pore_coord.copy()\n #rounding necessary here to identify the plane as Voronoi can have 1e-17 and smaller errors\n throat_verts = np.around(self._vor.vertices[v],10)\n #find which plane we are aligned with (if any) and align new_pore with throat plane\n if len(np.unique(throat_verts[:,0])) == 1:\n new_pore_coord[0]=np.unique(throat_verts[:,0])\n elif len(np.unique(throat_verts[:,1])) == 1:\n new_pore_coord[1]=np.unique(throat_verts[:,1])\n elif len(np.unique(throat_verts[:,2])) == 1:\n new_pore_coord[2]=np.unique(throat_verts[:,2])\n else:\n new_pore_coord = throat_verts.mean()\n bound_coords.append(new_pore_coord)\n bound_conns.append(np.array([my_pore,new_throat_count+Np]))\n bound_vert_index.append(dict(zip(v,throat_verts)))\n throat_vert_index.append(dict(zip(v,throat_verts)))\n new_throat_count += 1\n\n #Add new pores and connections\n self.extend(pore_coords=bound_coords, throat_conns=bound_conns)\n #Record new number of pores\n Mp = self.num_pores()\n Mt = self.num_throats()\n new_pore_ids = np.arange(Np,Mp)\n new_throat_ids = np.arange(Nt,Mt)\n #Identify which boundary the pore sits on\n front = self.pores()[self['pore.coords'][:,0]==min_point[0]]\n back = self.pores()[self['pore.coords'][:,0]==max_point[0]]\n left = self.pores()[self['pore.coords'][:,1]==min_point[1]]\n right = self.pores()[self['pore.coords'][:,1]==max_point[1]]\n bottom = self.pores()[self['pore.coords'][:,2]==min_point[2]]\n top = self.pores()[self['pore.coords'][:,2]==max_point[2]]\n #Assign labels\n self['pore.boundary'] = False\n self['pore.boundary'][new_pore_ids] = True\n self['pore.right_boundary'] = False\n self['pore.left_boundary'] = False\n self['pore.front_boundary'] = False\n self['pore.back_boundary'] = False\n self['pore.top_boundary'] = False\n self['pore.bottom_boundary'] = False\n self['pore.right_boundary'][right] = True\n self['pore.left_boundary'][left] = True\n self['pore.front_boundary'][front] = True\n self['pore.back_boundary'][back] = True\n self['pore.top_boundary'][top] = True\n self['pore.bottom_boundary'][bottom] = True\n #Save the throat verts\n self[\"pore.vert_index\"][new_pore_ids] = bound_vert_index\n self[\"throat.vert_index\"][new_throat_ids] = throat_vert_index",
"def innerProd(vcfResults):\n both = vcfResults.get(\"both\", 0)\n onlyX = vcfResults.get(\"onlyX\", 0)\n onlyY = vcfResults.get(\"onlyY\", 0)\n \n # return (both - onlyX - onlyY)/(both + onlyX + onlyY) \n return (both)/(both + onlyX + onlyY)\n # Distance heuristic = # correct variants / total \n # => % correct variants called",
"def volume_similarity_pd(pd1,pd2):\n\tvolume_similarity = {}\n\n\t# print(\"aaaaa\")\n\n\t# union = vtk.vtkBooleanOperationPolyDataFilter()\n\t# union.SetOperationToDifference()\n\t# union.SetInputData(0,pd1)\n\t# union.SetInputData(1,pd2)\n\t# union.Update()\n\t# u = union.GetOutput()\n\n\t# massUnion = vtk.vtkMassProperties()\n\t# massUnion.SetInputData(u)\n\n\t# intersection = vtk.vtkBooleanOperationPolyDataFilter()\n\t# intersection.SetOperationToIntersection()\n\t# intersection.SetInputData(0,pd1)\n\t# intersection.SetInputData(1,pd2)\n\t# intersection.Update()\n\t# i = intersection.GetOutput()\n\t# massIntersection = vtk.vtkMassProperties()\n\t# massIntersection.SetInputData(i)\n\n\t# # metrics\n\t# tqdm.write(\"intersection vol: {:.2f}\".format(massIntersection.GetVolume()))\n\t# tqdm.write(\"union vol: {:.2f}\".format(massUnion.GetVolume()))\n\n\t# volume_similarity[\"jaccard\"] = 1 - massIntersection.GetVolume()/massUnion.GetVolume()\n\n\t# tqdm.write(\"Jaccard distance: {:.2f}\".format(volume_similarity[\"jaccard\"]))\n\n\thausdorffDistFilter = vtk.vtkHausdorffDistancePointSetFilter()\n\thausdorffDistFilter.SetInputData(0, pd1)\n\thausdorffDistFilter.SetInputData(1, pd2)\n\thausdorffDistFilter.Update()\n\n\tvolume_similarity[\"hausdorff\"] = hausdorffDistFilter.GetHausdorffDistance()\n\tvolume_similarity[\"relative0\"] = hausdorffDistFilter.GetRelativeDistance()[0]\n\tvolume_similarity[\"relative1\"] = hausdorffDistFilter.GetRelativeDistance()[1]\n\ttqdm.write(\"Hausdorff distance: {:.2f} mm\".format(volume_similarity[\"hausdorff\"]))\n\ttqdm.write(\"Relative distance from pd1 to pd2: {:.2f} mm\".format(volume_similarity[\"relative0\"]))\n\ttqdm.write(\"Relative distance from pd2 to pd1: {:.2f} mm\".format(volume_similarity[\"relative1\"]))\n\n\treturn volume_similarity, hausdorffDistFilter.GetOutput(0), hausdorffDistFilter.GetOutput(1)",
"def voronoiVolumes(self, vor):\n volumes = np.array([])\n data = vor.points\n limits = [[np.min(data[:, 0]), np.max(data[:, 0])], [np.min(data[:, 1]), np.max(data[:, 1])], [np.min(data[:, 2]), np.max(data[:, 2])]]\n nonB = [False for _ in data]\n for i, region in enumerate(vor.point_region):\n indices = vor.regions[region]\n if -1 not in indices:\n v = vor.vertices[indices]\n isWithin = self.checkVertices(v, limits)\n if isWithin:\n volumes = np.append(volumes, ConvexHull(v).volume)\n nonB[i] = True\n return volumes, nonB",
"def SH_FindOverlap(xcenter, ycenter, xlength, ylength, xp_corner, yp_corner):\n\n areaClipped = 0.0\n top = ycenter + 0.5 * ylength\n bottom = ycenter - 0.5 * ylength\n\n left = xcenter - 0.5 * xlength\n right = xcenter + 0.5 * xlength\n\n nVertices = 4 # input detector pixel vertices\n MaxVertices = 9\n # initialize xPixel, yPixel to the detector pixel corners.\n # xPixel,yPixel will become the clipped polygon vertices inside the cube pixel\n # xnew,ynew xpixel and ypixel of size MaxVertices\n\n xPixel = []\n yPixel = []\n\n xnew = []\n ynew = []\n\n for j in range(0, 9):\n xnew.append(0.0)\n ynew.append(0.0)\n xPixel.append(0.0)\n yPixel.append(0.0)\n\n\n # Xpixel, YPixel closed (5 corners)\n for i in range(0, 4):\n xPixel[i] = xp_corner[i]\n yPixel[i] = yp_corner[i]\n xPixel[4] = xp_corner[0]\n yPixel[4] = yp_corner[0]\n\n\n for i in range(0, 4): # 0:left, 1: right, 2: bottom, 3: top\n nVertices2 = 0\n for j in range(0, nVertices):\n x1 = xPixel[j]\n y1 = yPixel[j]\n x2 = xPixel[j + 1]\n y2 = yPixel[j + 1]\n condition = calcCondition(i, x1, y1, x2, y2, left, right, top, bottom)\n x = 0\n y = 0\n\n if condition == 1:\n x, y = solveIntersection(i, x1, y1, x2, y2,\n left, right, top, bottom)\n nVertices2 = addpoint(x, y, xnew, ynew, nVertices2);\n nVertices2 = addpoint(x2, y2, xnew, ynew, nVertices2)\n\n elif condition == 2:\n nVertices2 = addpoint(x2, y2, xnew, ynew, nVertices2)\n elif condition == 3:\n x, y = solveIntersection(i, x1, y1, x2, y2,\n left, right, top, bottom)\n nVertices2 = addpoint(x, y, xnew, ynew, nVertices2)\n\n#\tcondition == 4: points outside\n# Done looping over J corners\n nVertices2 = addpoint(xnew[0], ynew[0], xnew, ynew, nVertices2) # close polygon\n\n if nVertices2 > MaxVertices:\n raise Error2DPolygon(\" Failure in finding the clipped polygon, nVertices2 > 9 \")\n\n\n nVertices = nVertices2 - 1;\n\n for k in range(0, nVertices2):\n xPixel[k] = xnew[k]\n yPixel[k] = ynew[k]\n\n# done loop over top,bottom,left,right\n nVertices = nVertices + 1\n\n\n if nVertices > 0:\n areaClipped = FindAreaPoly(nVertices, xPixel, yPixel);\n\n\n return areaClipped;",
"def voronoi(points, buffer_percent=100):\n # Remove duplicate xy points bc that would make delauney fail, and must remember z (if any) for retrieving originals from index results\n seen = set() \n uniqpoints = [ p for p in points if str( p[:2] ) not in seen and not seen.add( str( p[:2] ) )]\n classpoints = [_Point(*point[:2]) for point in uniqpoints]\n\n # Create fake sitepoints around the point extent to correct for infinite polygons\n # For a similar approach and problem see: http://gis.stackexchange.com/questions/11866/voronoi-polygons-that-run-out-to-infinity\n xs,ys = list(zip(*uniqpoints))[:2]\n pointswidth = max(xs) - min(xs)\n pointsheight = max(ys) - min(ys)\n xbuff,ybuff = ( pointswidth / 100.0 * buffer_percent , pointsheight / 100.0 * buffer_percent )\n midx,midy = ( sum(xs) / float(len(xs)) , sum(ys) / float(len(ys)) )\n #bufferbox = [(midx-xbuff,midy-ybuff),(midx+xbuff,midy-ybuff),(midx+xbuff,midy+ybuff),(midx-xbuff,midy+ybuff)] # corner buffer\n bufferbox = [(midx-xbuff,midy),(midx+xbuff,midy),(midx,midy+ybuff),(midx,midy-ybuff)] # mid sides buffer\n classpoints.extend([_Point(*corner) for corner in bufferbox])\n\n # Compute Voronoi\n vertices,edges,poly_dict = tesselator.computeVoronoiDiagram(classpoints)\n\n # Turn unordered result edges into ordered polygons\n polygons = list()\n for sitepoint,polyedges in list(poly_dict.items()):\n polyedges = [edge[1:] for edge in polyedges]\n poly = list()\n firststart,firstend = polyedges.pop(0)\n poly.append(firstend)\n while polyedges:\n curend = poly[-1]\n for i,other in enumerate(polyedges):\n otherstart,otherend = other\n if otherstart == curend:\n poly.append(otherend)\n ##print otherstart,otherend\n polyedges.pop(i)\n break\n elif otherend == curend:\n ##print otherend,otherstart\n poly.append(otherstart)\n polyedges.pop(i)\n break\n # Get vertices from indexes\n try: sitepoint = uniqpoints[sitepoint]\n except IndexError:\n sitepoint = None # fake bbox sitepoints shouldnt be in the results\n poly = [vertices[vi] for vi in poly if vi != -1]\n polygons.append((sitepoint, poly))\n\n # Maybe clip parts of polygons that stick outside screen?\n # ...\n\n return polygons",
"def create_grid_and_edges(data, drone_altitude, safety_distance):\n\n # minimum and maximum north coordinates\n north_min = np.floor(np.min(data[:, 0] - data[:, 3]))\n north_max = np.ceil(np.max(data[:, 0] + data[:, 3]))\n\n # minimum and maximum east coordinates\n east_min = np.floor(np.min(data[:, 1] - data[:, 4]))\n east_max = np.ceil(np.max(data[:, 1] + data[:, 4]))\n\n # given the minimum and maximum coordinates we can\n # calculate the size of the grid.\n north_size = int(np.ceil((north_max - north_min)))\n east_size = int(np.ceil((east_max - east_min)))\n\n # Initialize an empty grid\n grid = np.zeros((north_size, east_size))\n\n # Define a list to hold Voronoi points\n points = []\n # Populate the grid with obstacles\n for i in range(data.shape[0]):\n north, east, alt, d_north, d_east, d_alt = data[i, :]\n\n if alt + d_alt + safety_distance > drone_altitude:\n obstacle = [\n int(north - d_north - safety_distance - north_min),\n int(north + d_north + safety_distance - north_min),\n int(east - d_east - safety_distance - east_min),\n int(east + d_east + safety_distance - east_min),\n ]\n grid[obstacle[0]:obstacle[1] + 1, obstacle[2]:obstacle[3] + 1] = 1\n\n # add center of obstacles to points list\n points.append([north - north_min, east - east_min])\n\n # create a voronoi graph based on\n # location of obstacle centres\n graph = Voronoi(points)\n # check each edge from graph.ridge_vertices for collision\n edges = []\n for v in graph.ridge_vertices:\n p1 = graph.vertices[v[0]].astype(int)\n p2 = graph.vertices[v[1]].astype(int)\n # test each pair p1 and p2 for collision using Bresenham\n # If the edge does not hit an obstacle add it to the list\n in_collision = False\n ridgeline = bresenham(p1[0], p1[1], p2[0], p2[1])\n for b in ridgeline:\n # eliminate out of range points in the line\n if b[0] < 0 or b[0] >= grid.shape[0]:\n in_collision = True\n break\n if b[1] < 0 or b[1] >= grid.shape[1]:\n in_collision = True\n break\n # check if grid cell is an obstacle\n if grid[b[0], b[1]] == 1:\n in_collision = True\n break\n # keep ridge points not in collision\n if not in_collision:\n p1 = (p1[0], p1[1])\n p2 = (p2[0], p2[1])\n edges.append((p1, p2))\n\n return grid, edges",
"def random_projection_split(data, indices, rng_state):\n dim = data.shape[1]\n\n # Select two random points, set the hyperplane between them\n left_index = tau_rand_int(rng_state) % indices.shape[0]\n right_index = tau_rand_int(rng_state) % indices.shape[0]\n right_index += left_index == right_index\n right_index = right_index % indices.shape[0]\n left = indices[left_index]\n right = indices[right_index]\n\n # Compute the normal vector to the hyperplane (the vector between\n # the two points) and the offset from the origin\n hyperplane_offset = 0.0\n hyperplane_vector = np.empty(dim, dtype=np.float32)\n\n for d in range(dim):\n hyperplane_vector[d] = data[left, d] - data[right, d]\n hyperplane_offset -= hyperplane_vector[d] * (\n data[left, d] + data[right, d]) / 2.0\n\n # For each point compute the margin (project into normal vector, add offset)\n # If we are on lower side of the hyperplane put in one pile, otherwise\n # put it in the other pile (if we hit hyperplane on the nose, flip a coin)\n n_left = 0\n n_right = 0\n side = np.empty(indices.shape[0], np.int8)\n for i in range(indices.shape[0]):\n margin = hyperplane_offset\n for d in range(dim):\n margin += hyperplane_vector[d] * data[indices[i], d]\n\n if margin == 0:\n side[i] = tau_rand_int(rng_state) % 2\n if side[i] == 0:\n n_left += 1\n else:\n n_right += 1\n elif margin > 0:\n side[i] = 0\n n_left += 1\n else:\n side[i] = 1\n n_right += 1\n\n # Now that we have the counts allocate arrays\n indices_left = np.empty(n_left, dtype=np.int64)\n indices_right = np.empty(n_right, dtype=np.int64)\n\n # Populate the arrays with indices according to which side they fell on\n n_left = 0\n n_right = 0\n for i in range(side.shape[0]):\n if side[i] == 0:\n indices_left[n_left] = indices[i]\n n_left += 1\n else:\n indices_right[n_right] = indices[i]\n n_right += 1\n\n return indices_left, indices_right",
"def random_projection_cosine_split(data, indices, rng_state):\n dim = data.shape[1]\n\n # Select two random points, set the hyperplane between them\n left_index = tau_rand_int(rng_state) % indices.shape[0]\n right_index = tau_rand_int(rng_state) % indices.shape[0]\n right_index += left_index == right_index\n right_index = right_index % indices.shape[0]\n left = indices[left_index]\n right = indices[right_index]\n\n left_norm = norm(data[left])\n right_norm = norm(data[right])\n \n if left_norm == 0.0:\n left_norm = 1.0\n \n if right_norm == 0.0:\n right_norm = 1.0\n\n # Compute the normal vector to the hyperplane (the vector between\n # the two points)\n hyperplane_vector = np.empty(dim, dtype=np.float32)\n\n for d in range(dim):\n hyperplane_vector[d] = ((data[left, d] / left_norm) -\n (data[right, d] / right_norm))\n\n hyperplane_norm = norm(hyperplane_vector)\n if hyperplane_norm == 0.0:\n hyperplane_norm = 1.0\n \n for d in range(dim):\n hyperplane_vector[d] = hyperplane_vector[d] / hyperplane_norm\n\n # For each point compute the margin (project into normal vector)\n # If we are on lower side of the hyperplane put in one pile, otherwise\n # put it in the other pile (if we hit hyperplane on the nose, flip a coin)\n n_left = 0\n n_right = 0\n side = np.empty(indices.shape[0], np.int8)\n for i in range(indices.shape[0]):\n margin = 0.0\n for d in range(dim):\n margin += hyperplane_vector[d] * data[indices[i], d]\n\n if margin == 0:\n side[i] = tau_rand_int(rng_state) % 2\n if side[i] == 0:\n n_left += 1\n else:\n n_right += 1\n elif margin > 0:\n side[i] = 0\n n_left += 1\n else:\n side[i] = 1\n n_right += 1\n\n # Now that we have the counts allocate arrays\n indices_left = np.empty(n_left, dtype=np.int64)\n indices_right = np.empty(n_right, dtype=np.int64)\n\n # Populate the arrays with indices according to which side they fell on\n n_left = 0\n n_right = 0\n for i in range(side.shape[0]):\n if side[i] == 0:\n indices_left[n_left] = indices[i]\n n_left += 1\n else:\n indices_right[n_right] = indices[i]\n n_right += 1\n\n return indices_left, indices_right",
"def varietySample(V, x, count_max, R2, eps_bound):\n \n #x = np.array(V.gens);\n N = len(x)\n t = sp.symbols(\"t\") #Parameter of line\n #count_max = 200\n count = 0\n #N = length(x)\n P = np.empty([N,0])\n while count < count_max:\n # Corrupt the variety with bounded noise \n epsilon = np.random.uniform(-eps_bound, eps_bound)\n Ve = V + epsilon \n \n # Get a line u + v t in space \n U = sp.Matrix(np.random.randn(2, N+1));\n Ur = np.array(U.rref()[0].transpose().tolist(), dtype=float)\n u = Ur[1:, 0]\n v = Ur[1:, 1]\n \n L = u + v * t \n \n #substitute in the line and find real roots\n VL = Ve.subs([i for i in zip(x, L)])\n cVL = sp.Poly(VL).coeffs()\n rVL = np.roots(cVL)\n r_real = np.real(rVL[np.isreal(rVL)])\n \n \n \n #recover points of intersection and append to array\n p = u[:, np.newaxis] + np.outer(v, r_real) \n pnorm = np.sum(p**2, 0)\n \n pcand= p[:, pnorm <= R2]\n\n # if pcand.shape[1] <= 1:\n # pcand0 = pcand\n # else:\n # #pcand0 = pcand[:, 0]\n # pcand0 = pcand[..., 0][:, np.newaxis] #this is dumb\n #\n # P = np.concatenate([P, pcand0], 1)\n P = np.concatenate([P, pcand], 1)\n \n #start new sampling iteration\n count = count + np.size(pcand, 1)\n \n return P",
"def intersect(self, ray):\n # TODO A5 (Step3 and Step4) implement this function\n # For step 4, check if uvs and normals are not None (respectively)\n # If so, then interpolate them\n\n # batch_intersect returns t, beta, gamma, i\n posns = self.posns\n uvs = self.uvs\n inds = self.inds\n normals = self.normals\n t, beta, gamma, i = batch_intersect(posns[inds[:, :]], ray)\n if (t == np.inf):\n return no_hit\n vs = posns[inds[i, :]]\n P = ray.origin + t * ray.direction\n\n if (t == np.inf):\n return no_hit\n else:\n\n alpha = 1 - beta - gamma\n\n if uvs is not None:\n\n uv0 = uvs[inds[i][0]]\n uv1 = uvs[inds[i][1]]\n uv2 = uvs[inds[i][2]]\n\n uv = alpha * uv0 + beta * uv1 + gamma * uv2\n\n else:\n\n A = np.linalg.norm(np.cross(vs[1] - vs[0], vs[2] - vs[0])) / 2\n areaA = np.linalg.norm(np.cross(vs[1] - P, vs[2] - P)) / 2\n areaB = np.linalg.norm(np.cross(vs[0] - P, vs[2] - P)) / 2\n areaC = np.linalg.norm(np.cross(vs[0] - P, vs[1] - P)) / 2\n u = areaB / A\n v = areaC / A\n uv = vec([u, v])\n\n if normals is not None:\n\n n0 = normals[inds[i][0]]\n n1 = normals[inds[i][1]]\n n2 = normals[inds[i][2]]\n\n unit_normal = normalize(alpha * n0 + beta * n1 + gamma * n2)\n\n else:\n unit_normal = normalize(np.cross(vs[0] - vs[2], vs[1] - vs[2]))\n\n return Hit(t, P, unit_normal, uv, self.material)",
"def getYesPoints(pshapes, proj, dx, nmax, touch_center=True):\n\n mxmin = 9e10\n mxmax = -9e10\n mymin = 9e10\n mymax = -9e10\n for pshape in pshapes:\n pxmin, pymin, pxmax, pymax = pshape.bounds\n if pxmin < mxmin:\n mxmin = pxmin\n if pxmax > mxmax:\n mxmax = pxmax\n if pymin < mymin:\n mymin = pymin\n if pymax > mymax:\n mymax = pymax\n\n if not touch_center:\n geodict = GeoDict.createDictFromBox(mxmin, mxmax, mymin, mymax, dx, dx)\n img = rasterizeShapes(pshapes, geodict)\n #now get the numpy array of x/y coordinates where covgrid == 1\n idx = np.where(img == 1)[0]\n x, y = np.unravel_index(idx, (geodict.ny, geodict.nx))\n yespoints = list(zip(x.flatten(), y.flatten()))\n nrows = geodict.ny\n ncols = geodict.nx\n xvar = np.arange(geodict.xmin, geodict.xmax+geodict.dx, geodict.dx)\n yvar = np.arange(geodict.ymin, geodict.ymax+geodict.dy, geodict.dy)\n else:\n xvar = np.arange(mxmin, mxmax+dx, dx)\n yvar = np.arange(mymin, mymax+dx, dx)\n ncols = len(xvar)\n nrows = len(yvar)\n if nmax is not None:\n if ncols*nrows > nmax:\n aspect = ncols/nrows\n ncols = np.sqrt(nmax*aspect)\n nrows = nmax/ncols\n ncols = int(ncols)\n nrows = int(nrows)\n #re-calculate dx here...\n tdx = (mxmax-mxmin)/ncols\n tdy = (mymax-mymin)/nrows\n dx = np.max(tdx, tdy)\n xvar = np.arange(mxmin, mxmax+dx, dx)\n yvar = np.arange(mymin, mymax+dx, dx)\n\n #Get the \"yes\" points to sample from\n yespoints = []\n idx = []\n shapeidx = 0\n if pshapes[0].type == 'Polygon':\n #loop over shapes, projecting each one, then get the sample points\n for pshape in pshapes:\n if not shapeidx % 1000:\n print('Searching polygon %i of %i' % (shapeidx, len(pshapes)))\n shapeidx += 1\n pxmin, pymin, pxmax, pymax = pshape.bounds\n leftcol = np.where((pxmin - xvar) >= 0)[0].argmax()\n rightcol = np.where((xvar - pxmax) >= 0)[0][0]\n bottomrow = np.where((pymin - yvar) >= 0)[0].argmax()\n toprow = np.where((yvar - pymax) >= 0)[0][0]\n xp = np.arange(xvar[leftcol], xvar[rightcol]+dx, dx)\n yp = np.arange(yvar[bottomrow], yvar[toprow]+dx, dx)\n xmesh, ymesh = np.meshgrid(xp, yp)\n xy = list(zip(xmesh.flatten(), ymesh.flatten()))\n for point in xy:\n ix = np.where(xvar == point[0])[0][0]\n iy = np.where(yvar == point[1])[0][0]\n if pshape.contains(Point(point)):\n yespoints.append(point)\n idx.append(np.ravel_multi_index((iy, ix), (nrows, ncols), mode='raise', order='C'))\n else:\n yespoints = []\n for pshape in pshapes:\n yespoints.append(pshape.coords[0])\n\n return (np.array(yespoints), nrows, ncols, xvar, yvar, idx)",
"def get_intersected_basins_ppt_data(all_basin_geoms , month, year, conv2Inches):\n \n global gSpatialIndex\n print(\"Processing Prism Dataset\")\n ppt_bounds, ppt_data, hdr_dict = get_monthly_prism_ppt_data(year = year, month = month, plotPPTBounds = False)\n print(\"-Extracting precipitation data\")\n ppt_gdf = convert_pptData_to_GDF(ppt_bounds, ppt_data, hdr_dict, plotHeatMap = False)\n\n intersected_basins = {}\n print(\"---Creating Spatial RTree Index for month:\", month)\n \n # Create a copy of a global index to reduce time.\n # Check if it works correctly.\n \n if(gSpatialIndex == 0):\n gSpatialIndex = ppt_gdf.sindex\n\n print(\"-Creating basin intersections\")\n for basin_file_name, basin_geom in all_basin_geoms.items():\n possible_matches_index = list(gSpatialIndex.intersection(basin_geom.bounds))\n possible_matches = ppt_gdf.iloc[possible_matches_index]\n precise_matches = possible_matches[possible_matches.intersects(basin_geom)]\n if(conv2Inches):\n precise_matches[\"Precipitation\"] = precise_matches[\"Precipitation\"]/25.4\n intersected_basins[basin_file_name] = precise_matches\n \n print(\"Completed processing \")\n return intersected_basins",
"def stratify(self):\n self.logger.info(\"UQpy: Creating Voronoi stratification ...\")\n\n initial_seeds = self.seeds\n if self.seeds is None:\n initial_seeds = stats.uniform.rvs(size=[self.seeds_number, self.dimension], random_state=self.random_state)\n\n if self.decomposition_iterations == 0:\n cent, vol = self.create_volume(initial_seeds)\n self.volume = np.asarray(vol)\n else:\n for i in range(self.decomposition_iterations):\n cent, vol = self.create_volume(initial_seeds)\n initial_seeds = np.asarray(cent)\n self.volume = np.asarray(vol)\n\n self.seeds = initial_seeds\n self.logger.info(\"UQpy: Voronoi stratification created.\")",
"def checkintersection(p1,p2,p3,p4):\n def isonsegment(i,j,k):\n return ((i.x <= k.x or j.x <= k.x) and (k.x <= i.x or k.x <= j.x) and\n (i.y <= k.y or j.y <= k.y) and (k.y <= i.y or k.x <= j.y))\n\n def computedirection(i,j,k):\n a = (k.x - i.x) * (j.y - i.y);\n b = (j.x - i.x) * (k.y - i.y);\n if a < b:\n return -1\n elif a > b:\n return 1\n else:\n return 0\n\n # return no intersection if they\n if p1.x == p3.x and p1.y == p3.y:\n return False \n if p1.x == p4.x and p1.y == p4.y:\n return False\n if p2.x == p3.x and p2.y == p3.y:\n return False\n if p2.x == p4.x and p2.y == p4.y:\n return False\n\n\n d1 = computedirection(p3,p4,p1)\n d2 = computedirection(p3,p4,p2)\n d3 = computedirection(p1,p2,p3)\n d4 = computedirection(p1,p2,p4)\n return ((((d1 > 0 and d2 < 0) or (d1 < 0 and d2 > 0)) and\n ((d3 > 0 and d4 < 0) or (d3 < 0 and d4 > 0))) or\n (d1 == 0 and isonsegment(p3,p4,p1)) or\n (d2 == 0 and isonsegment(p3,p4,p2)) or\n (d3 == 0 and isonsegment(p1,p2,p3)) or\n (d4 == 0 and isonsegment(p1,p2,p4)))",
"def voronoi_sub_mask_1d_index_to_pixeliztion_1d_index_from_grids_and_geometry(\n grid,\n mask_1d_index_to_nearest_pixelization_1d_index,\n sub_mask_1d_index_to_mask_1d_index,\n pixel_centres,\n pixel_neighbors,\n pixel_neighbors_size,\n):\n\n sub_mask_1d_index_to_pixeliztion_1d_index = np.zeros((grid.shape[0]))\n\n for sub_mask_1d_index in range(grid.shape[0]):\n\n nearest_pixelization_1d_index = mask_1d_index_to_nearest_pixelization_1d_index[\n sub_mask_1d_index_to_mask_1d_index[sub_mask_1d_index]\n ]\n\n while True:\n\n nearest_pixelization_pixel_center = pixel_centres[\n nearest_pixelization_1d_index\n ]\n\n sub_pixel_to_nearest_pixelization_distance = (\n (grid[sub_mask_1d_index, 0] - nearest_pixelization_pixel_center[0]) ** 2\n + (grid[sub_mask_1d_index, 1] - nearest_pixelization_pixel_center[1])\n ** 2\n )\n\n closest_separation_from_pixelization_to_neighbor = 1.0e8\n\n for neighbor_pixelization_1d_index in range(\n pixel_neighbors_size[nearest_pixelization_1d_index]\n ):\n\n neighbor = pixel_neighbors[\n nearest_pixelization_1d_index, neighbor_pixelization_1d_index\n ]\n\n separation_from_neighbor = (\n grid[sub_mask_1d_index, 0] - pixel_centres[neighbor, 0]\n ) ** 2 + (grid[sub_mask_1d_index, 1] - pixel_centres[neighbor, 1]) ** 2\n\n if (\n separation_from_neighbor\n < closest_separation_from_pixelization_to_neighbor\n ):\n closest_separation_from_pixelization_to_neighbor = (\n separation_from_neighbor\n )\n closest_neighbor_pixelization_1d_index = (\n neighbor_pixelization_1d_index\n )\n\n neighboring_pixelization_1d_index = pixel_neighbors[\n nearest_pixelization_1d_index, closest_neighbor_pixelization_1d_index\n ]\n sub_pixel_to_neighboring_pixelization_distance = (\n closest_separation_from_pixelization_to_neighbor\n )\n\n if (\n sub_pixel_to_nearest_pixelization_distance\n <= sub_pixel_to_neighboring_pixelization_distance\n ):\n sub_mask_1d_index_to_pixeliztion_1d_index[\n sub_mask_1d_index\n ] = nearest_pixelization_1d_index\n break\n else:\n nearest_pixelization_1d_index = neighboring_pixelization_1d_index\n\n return sub_mask_1d_index_to_pixeliztion_1d_index",
"def compute_intersecting(voxel, R, kdt, max_segment): \n\tsubset = np.unique(si[kdt.query_radius(voxel, r=R+max_segment)[0]]).astype(np.int)\n\treturn subset[np.array([track_roi_intersection_check(s, voxel, sq_dist_thr=R**2) for s in tracks[subset]])]",
"def search_diversification(self):\n self.stage = 2\n # Find index of least explored region,\n # express in binary\n b = bin(np.argmin(self.LTM))[2:]\n # Pad binary number\n if len(b) < self.x0.shape[0]:\n pad = self.x0.shape[0] - len(b)\n b = ''.join(['0' for i in range(pad)]) + b\n # Store digits in column vector\n d = len(b)\n b = np.array(list(b), dtype='int').reshape((d,1))\n # Generate random positive vector\n base = self.r_uni(low=0, high=2, size=(d,1))\n # Transform vector into appropriate region\n return base * (b * 2 - 1)",
"def intersect(f, df, g, dg):\n \"*** YOUR CODE HERE ***\"",
"def plotVolumePDFs(self, topV=3, noSecond=True):\n take = self.bins < topV\n fig = plt.figure()\n plt.plot(self.bins[take], self.PDF[take], label='Preferential Distribution')\n plt.plot(self.bins[take], self.RandomPDF[take], label='Random Distribution')\n plt.plot(self.V1*np.ones(50), np.linspace(0, self.PDF[self.cut1]), '--', label='First Intersection - V = ' + str(round(self.V1, 2)))\n if not noSecond:\n plt.plot(self.V2 * np.ones(50), np.linspace(0, self.PDF[self.cut2]), '--', label='Second Intersection - V = ' + str(round(self.V2, 2)))\n plt.xlim([0, topV])\n plt.title('Voronoi Cell Volume PDF')\n plt.xlabel('Normed Volume [-]')\n plt.ylabel('PDF [-]')\n plt.legend()",
"def exportVoronoiRegions(self):\n # Remember to compute circumcircles if not done before\n # for t in self.triangles:\n # self.circles[t] = self.circumcenter(t)\n useVertex = {i: [] for i in range(len(self.coords))}\n vor_coors = []\n index = {}\n # Build a list of coordinates and a index per triangle/region\n for tidx, (a, b, c) in enumerate(self.triangles):\n vor_coors.append(self.circles[(a, b, c)][0])\n # Insert triangle, rotating it so the key is the \"last\" vertex\n useVertex[a] += [(b, c, a)]\n useVertex[b] += [(c, a, b)]\n useVertex[c] += [(a, b, c)]\n # Set tidx as the index to use with this triangles\n index[(a, b, c)] = tidx\n index[(c, a, b)] = tidx\n index[(b, c, a)] = tidx\n\n # init regions per coordinate dictionary\n regions = {}\n # Sort each region in a coherent order, and substitude each triangle\n # by its index\n for i in range(4, len(self.coords)):\n v = useVertex[i][0][0] # Get a vertex of a triangle\n r = []\n for _ in range(len(useVertex[i])):\n # Search the triangle beginning with vertex v\n t = [t for t in useVertex[i] if t[0] == v][0]\n r.append(index[t]) # Add the index of this triangle to region\n v = t[1] # Choose the next vertex to search\n regions[i-4] = r # Store region.\n\n return vor_coors, regions",
"def test_volume_3d(self):\n # generate voronoi mesh \n mesh = Mesh3d(self.particles, self.bound)\n print(\"building mesh...\")\n mesh.build_geometry()\n print(\"mesh complete\")\n\n # calculate voronoi volumes of all real particles \n real_indices = self.particles[\"tag\"] == ParticleTAGS.Real\n tot_vol = np.sum(self.particles[\"volume\"][real_indices])\n\n self.assertAlmostEqual(tot_vol, 1.0)",
"def newmatch(self):\n\n objectpnts = self.kif.getObjectPcd()\n # normals\n objectnormals = tools.estimatenormals(objectpnts)",
"def getCandidate(self):\n data = self.data.copy()\n for p in self.pruned:\n if p in data:\n data.remove(p)\n tmp = data.copy()\n for d in tmp:\n if d in data:\n pastart = [self.drange[1] if i+self.radius>self.drange[1] else i+self.radius for i in d.getLocationMax()]\n pamax = [self.drange[1] for j in range(self.dim)]\n pruned = (self.index.intersection(tuple(pastart+pamax),objects=True))\n for p in pruned:\n if p.object in data:\n data.remove(p.object)\n return data",
"def multiple_intersections():\n levels = np.array([966., 937.2, 925., 904.6, 872.6, 853., 850., 836., 821., 811.6, 782.3,\n 754.2, 726.9, 700., 648.9, 624.6, 601.1, 595., 587., 576., 555.7,\n 534.2, 524., 500., 473.3, 400., 384.5, 358., 343., 308.3, 300., 276.,\n 273., 268.5, 250., 244.2, 233., 200.]) * units.mbar\n temperatures = np.array([18.2, 16.8, 16.2, 15.1, 13.3, 12.2, 12.4, 14., 14.4,\n 13.7, 11.4, 9.1, 6.8, 4.4, -1.4, -4.4, -7.3, -8.1,\n -7.9, -7.7, -8.7, -9.8, -10.3, -13.5, -17.1, -28.1, -30.7,\n -35.3, -37.1, -43.5, -45.1, -49.9, -50.4, -51.1, -54.1, -55.,\n -56.7, -57.5]) * units.degC\n dewpoints = np.array([16.9, 15.9, 15.5, 14.2, 12.1, 10.8, 8.6, 0., -3.6, -4.4,\n -6.9, -9.5, -12., -14.6, -15.8, -16.4, -16.9, -17.1, -27.9, -42.7,\n -44.1, -45.6, -46.3, -45.5, -47.1, -52.1, -50.4, -47.3, -57.1,\n -57.9, -58.1, -60.9, -61.4, -62.1, -65.1, -65.6,\n -66.7, -70.5]) * units.degC\n return levels, temperatures, dewpoints",
"def create_grid_and_edges(data, drone_altitude, safety_distance):\n # minimum and maximum north coordinates\n north_min = np.floor(np.min(data[:, 0] - data[:, 3]))\n north_max = np.ceil(np.max(data[:, 0] + data[:, 3]))\n\n # minimum and maximum east coordinates\n east_min = np.floor(np.min(data[:, 1] - data[:, 4]))\n east_max = np.ceil(np.max(data[:, 1] + data[:, 4]))\n\n # given the minimum and maximum coordinates we can\n # calculate the size of the grid.\n north_size = int(np.ceil(north_max - north_min))\n east_size = int(np.ceil(east_max - east_min))\n\n # Initialize an empty grid\n grid = np.zeros((north_size, east_size))\n # Initialize an empty list for Voronoi points\n points = []\n # Populate the grid with obstacles\n for i in range(data.shape[0]):\n north, east, alt, d_north, d_east, d_alt = data[i, :]\n if alt + d_alt + safety_distance > drone_altitude:\n obstacle = [\n int(np.clip(north - d_north - safety_distance - north_min, 0, north_size-1)),\n int(np.clip(north + d_north + safety_distance - north_min, 0, north_size-1)),\n int(np.clip(east - d_east - safety_distance - east_min, 0, east_size-1)),\n int(np.clip(east + d_east + safety_distance - east_min, 0, east_size-1)),\n ]\n grid[obstacle[0]:obstacle[1]+1, obstacle[2]:obstacle[3]+1] = 1\n # add center of obstacles to points list\n points.append([north - north_min, east - east_min])\n\n graph = Voronoi(points)\n\n edges = []\n for v in graph.ridge_vertices:\n p1 = graph.vertices[v[0]]\n p2 = graph.vertices[v[1]]\n cells = list(bresenham(int(p1[0]), int(p1[1]), int(p2[0]), int(p2[1])))\n hit = False\n\n for c in cells:\n if np.amin(c) < 0 or c[0] >= grid.shape[0] or c[1] >= grid.shape[1]:\n hit = True\n break\n if grid[c[0], c[1]] == 1:\n hit = True\n break\n\n if not hit:\n p1 = (p1[0], p1[1])\n p2 = (p2[0], p2[1])\n edges.append((p1, p2))\n\n return grid, edges, int(north_min), int(east_min)",
"def plotVoronoiCell(self, cells):\n for i in cells:\n #i indexes volumes\n i = self.nonBI[i] #now i indexes vor.point_region\n\n vI = self.vor.regions[self.vor.point_region[i]]\n v = self.vor.vertices[vI, :]\n r = v\n\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n fig.set_size_inches(18.5, 9.5)\n ax.set_title('Voronoi Cell of Particle ' + str(i))\n ax.set_xlabel('x [m]')\n ax.set_ylabel('y [m]')\n ax.set_zlabel('z [m]')\n ax.scatter(r[:, 0], r[:, 1], r[:, 2], s=5, alpha=0.5, label='Cell Boundaries')\n ax.scatter(self.data[i, 0], self.data[i, 1], self.data[i, 2], s=25, label='Cell Center')\n ax.set_xlim3d(np.min(self.data[:, 0]), np.max(self.data[:, 0]))\n ax.set_ylim3d(np.min(self.data[:, 1]), np.max(self.data[:, 1]))\n ax.set_zlim3d(np.min(self.data[:, 2]), np.max(self.data[:, 2]))\n # limits = np.vstack((np.array([np.max(self.data[:, 0]), np.max(self.data[:, 1]), np.max(self.data[:, 2])]), np.array([np.min(self.data[:, 0]), np.min(self.data[:, 1]), np.min(self.data[:, 2])])))\n # ax.scatter(limits[:, 0], limits[:, 1], limits[:, 2], s=1)\n ax.legend()",
"def optimumBins(self, b0=100, b1=10000, n=100):\n self.intersections = []\n for i in np.linspace(b0, b1, n):\n self.volumePDF(bins=i)\n self.intersections.append(self.V1)\n\n plt.figure()\n plt.plot(np.linspace(b0, b1, n), self.intersections)\n plt.xlabel('Number of Bins [-]')\n plt.ylabel('Normed Voronoi Volume of Intersection [-]')\n plt.title('Evolution of Intersection Volume with Number of Bins')"
] | [
"0.6731937",
"0.5723254",
"0.56301624",
"0.5573013",
"0.55410963",
"0.5535367",
"0.55213",
"0.5521294",
"0.54995483",
"0.54817283",
"0.5443941",
"0.53623676",
"0.53475815",
"0.53398156",
"0.5310163",
"0.5289071",
"0.5283833",
"0.5266256",
"0.5254226",
"0.5251348",
"0.5249623",
"0.5227493",
"0.5217785",
"0.5180239",
"0.51655275",
"0.5151044",
"0.5129552",
"0.5091387",
"0.5090549",
"0.50902677"
] | 0.68404734 | 0 |
This method tracks the evolution of the first intersection between PDFs with the number of bins in the PDF | def optimumBins(self, b0=100, b1=10000, n=100):
self.intersections = []
for i in np.linspace(b0, b1, n):
self.volumePDF(bins=i)
self.intersections.append(self.V1)
plt.figure()
plt.plot(np.linspace(b0, b1, n), self.intersections)
plt.xlabel('Number of Bins [-]')
plt.ylabel('Normed Voronoi Volume of Intersection [-]')
plt.title('Evolution of Intersection Volume with Number of Bins') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def intersectPDFs(self, threshold=1):\n diff = np.abs(self.PDF - self.RandomPDF)\n half = np.argmax(self.RandomPDF)\n start = np.nonzero(self.PDF > 0.5*np.max(self.PDF))[0][0]\n end = np.nonzero(self.RandomPDF[half:] < 0.5*np.max(self.RandomPDF))[0][0] + half\n\n if start == 0 and half == 0:\n self.cut1 = 0\n else:\n self.cut1 = np.argmin(diff[start:half]) + start\n self.V1 = self.bins[self.cut1] * threshold\n\n self.cut2 = np.argmin(diff[half:end]) + half\n self.V2 = self.bins[self.cut2]",
"def Find_CentralStar_positioninPDF(spectra,thex0,all_titles,object_name,dir_top_img,all_filt,date,figname,right_edge = 1900,NBIMGPERROW=2,vmin=0,vmax=2000,downsampling=1,verbose=False):\n NBIMGPERROW=2\n NBSPEC=len(spectra)\n \n MAXIMGROW=max(2,int(m.ceil(float(NBSPEC)/float(NBIMGPERROW))))\n\n # fig file specif\n NBIMGROWPERPAGE=5 # number of rows per pages\n PageNum=0 # page counter\n \n figfilename=os.path.join(dir_top_img,figname)\n pp = PdfPages(figfilename) # create a pdf file\n \n titlepage='Central star for obj: {} , date : {} (backg. rem.)'.format(object_name,date)\n \n spec_index_min=100 # cut the left border\n spec_index_max=1900 # cut the right border\n star_halfwidth=70\n \n all_mean = [] # list of mean and sigma for the main central star\n all_sigma= []\n \n \n for index in np.arange(0,NBSPEC):\n \n \n # new pdf page \n if index%(NBIMGPERROW*NBIMGROWPERPAGE) == 0:\n f, axarr = plt.subplots(NBIMGROWPERPAGE,NBIMGPERROW,figsize=(25,30))\n f.suptitle(titlepage,size=20)\n \n # index of image in the pdf page \n indexcut=index-PageNum*(NBIMGROWPERPAGE*NBIMGPERROW) \n ix=indexcut%NBIMGPERROW\n iy=indexcut/NBIMGPERROW\n \n \n \n star_index=int(thex0[index])\n \n cutspectra=np.copy(spectra[index][star_index-star_halfwidth:star_index+star_halfwidth]) \n X=np.arange(cutspectra.shape[0])+star_index-star_halfwidth\n \n # fast fit of a gaussian, bidouille\n \n x = np.sum(X*cutspectra)/np.sum(cutspectra)\n width = 0.5*np.sqrt(np.abs(np.sum((X-x)**2*cutspectra)/np.sum(cutspectra)))\n themax = cutspectra.max()\n \n all_mean.append(int(x))\n all_sigma.append(int(width))\n \n fit = lambda t : themax*np.exp(-(t-x)**2/(2*width**2))\n \n \n #print 'mean,width, max =',x,width,themax\n thelabel='fit m={}, $\\sigma$= {}'.format(int(x),int(width))\n axarr[iy,ix].plot(X,cutspectra,'r-',label='data')\n axarr[iy,ix].plot(X,fit(X), 'b-',label=thelabel)\n \n thetitle=\"{} : {} : {} \".format(index,all_titles[index],all_filt[index])\n axarr[iy,ix].set_title(thetitle,color='blue',fontweight='bold',fontsize=16)\n axarr[iy,ix].grid(True)\n \n max_y_to_plot=cutspectra.max()*1.2\n \n \n axarr[iy,ix].set_ylim(0,max_y_to_plot)\n axarr[iy,ix].legend(loc=1)\n\n axarr[iy,ix].text(star_index-star_halfwidth/2,max_y_to_plot*1.1/1.2, all_filt[index],verticalalignment='top', horizontalalignment='right',color='blue',fontweight='bold', fontsize=20)\n \n # save the pdf page at the botton end of the page\n if (index+1)%(NBIMGPERROW*NBIMGROWPERPAGE) == 0:\n PageNum+=1 # increase page Number\n f.savefig(pp, format='pdf')\n f.show()\n \n \n f.savefig(pp, format='pdf') \n f.show()\n pp.close()\n\n\n\n return all_mean,all_sigma",
"def CALSPECAbsLineIdentificationinPDF(spectra,pointing,all_titles,object_name,dir_top_images,all_filt,date,figname,tagname,NBIMGPERROW=2):\n \n \n NBSPEC=len(spectra)\n \n MAXIMGROW=max(2,int(m.ceil(float(NBSPEC)/float(NBIMGPERROW))))\n \n \n # fig file specif\n NBIMGROWPERPAGE=5 # number of rows per pages\n PageNum=0 # page counter\n \n figfilename=os.path.join(dir_top_images,figname)\n \n pp = PdfPages(figfilename) # create a pdf file\n \n \n titlepage='WL calibrated 1D Spectra 1D for obj : {} date :{}'.format(object_name,date)\n \n \n all_wl= [] # containers for wavelength\n \n \n for index in np.arange(0,NBSPEC):\n \n \n # new pdf page \n if index%(NBIMGPERROW*NBIMGROWPERPAGE) == 0:\n f, axarr = plt.subplots(NBIMGROWPERPAGE,NBIMGPERROW,figsize=(25,30))\n f.suptitle(titlepage,size=20)\n \n # index of image in the pdf page \n indexcut=index-PageNum*(NBIMGROWPERPAGE*NBIMGPERROW) \n ix=indexcut%NBIMGPERROW\n iy=indexcut/NBIMGPERROW\n \n \n spec = spectra[index]\n \n # calibrate\n grating_name=get_disperser_filtname(all_filt[index])\n X_Size_Pixels=np.arange(spec.shape[0])\n lambdas = Pixel_To_Lambdas(grating_name,X_Size_Pixels,pointing[index],False)\n \n \n all_wl.append(lambdas)\n \n #plot\n axarr[iy,ix].plot(lambdas,spec,'r-',lw=2,label=tagname)\n \n thetitle=\"{} : {} : {} \".format(index,all_titles[index],all_filt[index])\n axarr[iy,ix].set_title(thetitle,color='blue',fontweight='bold',fontsize=16)\n \n \n #axarr[iy,ix].text(600.,spec.max()*1.1, all_filt[index],verticalalignment='top', horizontalalignment='left',color='blue',fontweight='bold', fontsize=20)\n axarr[iy,ix].legend(loc='best',fontsize=16)\n axarr[iy,ix].set_xlabel('Wavelength [nm]', fontsize=16)\n axarr[iy,ix].grid(True)\n \n YMIN=0.\n YMAX=spec.max()*1.2\n \n for line in LINES:\n if line == O2 or line == HALPHA or line == HBETA or line == HGAMMA or line == HDELTA or line ==O2B or line == O2Y or line == O2Z:\n axarr[iy,ix].plot([line['lambda'],line['lambda']],[YMIN,YMAX],'-',color='red',lw=0.5)\n axarr[iy,ix].text(line['lambda'],0.9*(YMAX-YMIN),line['label'],verticalalignment='bottom', horizontalalignment='center',color='red', fontweight='bold',fontsize=16)\n \n \n axarr[iy,ix].set_ylim(YMIN,YMAX)\n axarr[iy,ix].set_xlim(np.min(lambdas),np.max(lambdas))\n axarr[iy,ix].set_xlim(0,1200.)\n \n if (index+1)%(NBIMGPERROW*NBIMGROWPERPAGE) == 0:\n PageNum+=1 # increase page Number\n f.savefig(pp, format='pdf')\n f.show()\n \n \n f.savefig(pp, format='pdf') \n f.show()\n pp.close() \n \n return all_wl",
"def calc_intersection_true_data_baselines_data_cnt():\n# final_sen_scores = linux_base_path + \"/final_sen_scores/\"\n final_sen_scores = base_path + \"\\\\final_sen_scores\\\\\"\n top_k_docs_values = [50]\n lambda_f = \"1\"\n claim_sen_true_relevance_dict = read_pickle(\"claim_sen_relevance_dict_\"+curr_source)\n claims_dict = read_pickle(\"claim_dict\")\n intersection_count_avg_all_claims =0\n exclude = set(string.punctuation)\n \n for k_val in top_k_docs_values:\n intersection_count = 0\n for alpha in range(10,11,1): #change just for test!\n for beta in range(0,1,1):\n (alpha_f,beta_f) = turn_to_float([alpha,beta])\n curr_baseline_final_sen_score_dict = read_pickle(final_sen_scores+\"clm_num_key_final_ranked_list_sen_alpha_\"+str(alpha_f)+\"_beta_\"+str(beta_f)+\"_top_k_docs_\"+str(k_val)+\"_lambda_\"+str(lambda_f)+\"_sorted\")\n for clm in curr_baseline_final_sen_score_dict.keys():\n curr_clm_baseline_set = set()\n curr_true_relevance_set = set()\n for (sen,score) in curr_baseline_final_sen_score_dict[clm]:\n sen_no_punc = ''.join(ch for ch in sen if ch not in exclude)\n sen_no_space = sen_no_punc.replace(\" \",\"\")\n curr_clm_baseline_set.add(sen_no_space)\n for (sen,score) in claim_sen_true_relevance_dict[claims_dict[clm]]:\n sen_no_punc = ''.join(ch for ch in sen if ch not in exclude)\n sen_no_space = sen_no_punc.replace(\" \",\"\")\n curr_true_relevance_set.add(sen_no_space) \n intersection_count += len(curr_clm_baseline_set.intersection(curr_true_relevance_set))\n intersection_count_avg_all_claims += float(float(intersection_count)/float(110))\n intersection_count_avg_all_claims = float(float(intersection_count_avg_all_claims)/float(len(curr_baseline_final_sen_score_dict.keys())))\n print \"intersection_count_avg_all_claims:\"+str(intersection_count_avg_all_claims)",
"def test_WIMP_cut_region_on_true_data(bolo_name, mass, analysis):\n\t\n\n\t#Load 2D PDF\n\tfWIMP2D, f = PyRPl.open_ROOT_object(\"./ROOT_files/WIMP_PDF2D_\" + analysis + \".root\", \"WIMP_\" + mass + \"_GeV\")\n\n\t#Load cut value on PDF for 95% WIMP box\n\tcut_val_90, cut_val_99 = 0,0\n\twith open (\"./Text_files/WIMP_PDF_90_and_99_cut_value_\" + analysis + \".txt\", \"r\") as fcut:\n\t\tstuff = [elem.rstrip().split(\",\") for elem in fcut.readlines()]\n\t\tfor elem in stuff:\n\t\t\tmass_val = elem[0]\n\t\t\tif int(mass)==int(mass_val):\n\t\t\t\tcut_val_90 = float(elem[1])\n\t\t\t\tcut_val_99 = float(elem[2])\n\t\n\n\tdata_path = \"/home/irfulx204/mnt/tmain/Desktop/Run308_Analyse_ERA/Fond_ERA_merged/\"\n\tfilou = TFile(data_path + bolo_name + \"_\" + analysis + \"_fond.root\", \"read\")\n\ttree = filou.Get(\"data\")\n\tnum_pass_cut =0\n\n\thpass = TH2F(\"hpass\", \"hpass\", 100, 0, 15, 100, 0, 15)\n\n\t# #T Check that the events are found where expected\n\t# arr1 = np.random.uniform(0,15,size=(200000,2))\n\t# for i in range(arr1.shape[0]):\n\t# \tPDF_val = fWIMP2D.Eval(arr1[i][0], arr1[i][1])\n\t# \tif (cut_val_99<PDF_val<cut_val_90):\n\t# \t# if (cut_val_99<PDF_val<cut_val_90):\n\t# \t\tnum_pass_cut+=1\n\t# \t\thpass.Fill(arr1[i][0], arr1[i][1])\t\t\n\n\t# hpass.Draw()\n\t# raw_input()\n\n\tfor k in range(tree.GetEntries()):\n\t\ttree.GetEntry(k)\n\t\tER=(1+8./3)*0.5*(tree.EC1+tree.EC2)-0.33*(1.5*tree.EIA+4*tree.EIB+1.5*tree.EIC+4*tree.EID)\n\t\tPDF_val = fWIMP2D.Eval(ER, 0.5*(tree.EIB+tree.EID))\n\t\tif (cut_val_99<PDF_val<cut_val_90 and 0.5*(tree.EIB+tree.EID)>0.7):\n\t\t# if (cut_val_99<PDF_val<cut_val_90):\n\t\t\tnum_pass_cut+=1\n\t\t\thpass.Fill(0.5*(tree.EC1+tree.EC2), 0.5*(tree.EIB+tree.EID))\n\n\tprint num_pass_cut\n\thpass.Draw()\n\traw_input()",
"def volumePDF(self, maxVar=-1, bins=75, threshold=1):\n print('Cluster Identification Based on Voronoi Volumes')\n start = time.time()\n self.vor = Voronoi(self.data)\n self.volumes, self.nonB = self.voronoiVolumes(self.vor)\n self.nonBI = np.arange(0, len(self.vor.point_region))[self.nonB]\n self.volumes_sorted = np.sort(self.volumes)\n self.oldOrder = np.argsort(self.volumes)\n\n if maxVar > 0:\n means = [np.mean(self.volumes_sorted)]\n varMean = []\n topV = -1\n #Discard some very big Voronoi cells which unnecessarily alter the mean volume. Stop once the mean volume does\n #not vary more than maxVar with an elimination of these large cells. Deactivate this part with maxVar= < 0\n for i in range(250):\n volumes = self.volumes_sorted[:-(i + 1)]\n means.append(np.mean(volumes))\n varM = (means[-1] - means[-2])/means[-2]\n varMean.append(varM)\n if np.abs(varM) < maxVar and topV == -1:\n topV = -(i + 1)\n self.oldOrder = self.oldOrder[:topV]\n self.volumes_sorted = self.volumes_sorted[:topV]\n\n self.V = self.volumes_sorted/np.mean(self.volumes_sorted)\n self.bins = np.logspace(np.log(np.min(self.V)), np.log(np.max(self.V)), bins)\n\n self.PDF, _ = np.histogram(self.V, bins=self.bins, density=True)\n self.bins = (self.bins[1:] + self.bins[:-1]) / 2\n\n self.RandomPDF = self.PoissonPDF(self.bins)\n self.intersectPDFs(threshold=threshold)\n self.assignLabels()\n self.times[0] = time.time() - start\n print('Elapsed Time: ' + str(round(time.time() - start, 3)))",
"def ratio_4_doc(shot, dir, num_probes = 16):\n # data = [[0] *3 for i in range(num_probes)]\n # magdata = hdr.getMagData(shot)\n probe_locs = get_probeLocs_calib_setup(shot)\n data=hdr.getquikData(shot)\n time,eastcurrent,westcurrent = loadcurrent(shot)#using eastcurrent\n ratios = [[0]*3 for i in range(num_probes)]\n for probe in range(num_probes):\n ratio =1\n inverted = False\n # fig, (ax1, ax2, ax3) = plt.subplots(3, 1, sharex=True)\n B=sp.signal.detrend(cumtrapz(data.unCalibData[dir,probe,:], data.time))\n plot_time = data.time[:-1]\n if(np.max(B[2000:6000]) < abs(np.average(B[2000:6000]))):\n # print(\"\\ninverted!\")\n inverted = True\n # B = B* -1\n # ratio = -1\n\n r = probe_locs[probe]\n max_current = polyPeak_noPlot(time,eastcurrent)\n # if(np.max(eastcurrent) < -1*(np.min(eastcurrent))):\n # max_current = -1*np.min(eastcurrent)\n helmB = helmholtz2(r,max_current)\n\n # THis is intentional! I am only using shots where the cmponent is lined\n # up with the z-direction of the helmholz field\n # helmB[2] = helmB[2]*-1\n max_theoretical = np.max(helmB[2])\n max_measured = polyPeak_noPlot(plot_time, B)\n\n\n ratio = ratio * max_theoretical/max_measured\n if ratio > 30000 or ratio < -30000:\n ratio = 0\n\n\n ratios[probe][dir] = ratio\n # print(\"\\tRatio is: %f\" %(ratio))\n # if(inverted and ratio <0):\n # print(\"Inverted and ratio reflects that\")\n # elif(not inverted and ratio <0):\n if probe ==1:\n print(\"\\n Ratio: %5f \\n\\t max_measured: %3f, \\n\\t max_theoretical: %5f\"%(ratio,max_measured,max_theoretical ) )\n\n # Compute the median of the non-zero elements\n # m = np.median(foo[foo > 0])\n # Assign the median to the zero elements\n # foo[foo == 0] = m\n return ratios",
"def hist_and_thresh(self):\n bins, occ, _ = self.histogram()\n self.thresh = np.mean(bins) # initial guess\n self.peaks_and_thresh() # in case peak calculation fails\n # if np.size(self.peak_indexes) == 2: # est_param will only find one peak if the number of bins is small\n # # set the threshold where the fidelity is max\n # self.search_fidelity(self.peak_centre[0], self.peak_widths[0] ,self.peak_centre[1])\n try: \n thresh = threshold_minimum(np.array(self.stats['Counts']), len(bins))\n int(np.log(thresh)) # if thresh <= 0 this gives ValueError\n self.thresh = thresh\n except (ValueError, RuntimeError, OverflowError): pass\n try:\n # atom is present if the counts are above threshold\n self.stats['Atom detected'] = [x // self.thresh for x in self.stats['Counts']]\n # self.fidelity, self. err_fidelity = np.around(self.get_fidelity(), 4) # this is a relatively slow operation\n except (ValueError, OverflowError): pass\n return bins, occ, self.thresh",
"def __init__(self, nBin, realHists, imagHists, normHists, indexHists, coma, integralReal = None, integralImag = None):\n\t\tself.bin3pi = nBin\n\t\tself.binCenter = 0.52 + .04*nBin\n\t\tif not len(realHists) == len(imagHists) or not len(imagHists) == len(normHists) or not len(normHists) == len(indexHists):\n\t\t\tprint \"Numbers of histogams do not match:\"\n\t\t\tprint \" real:\",len(realHists)\n\t\t\tprint \" imag:\",len(imagHists)\n\t\t\tprint \" norm:\",len(normHists)\n\t\t\tprint \" index:\",len(indexHists)\n\t\t\traise ValueError(\"Histogram size mismatch\")\n\t\tself.nSect = len(realHists)\n\t\tif self.nSect == 0:\n\t\t\traise ValueError(\"No histograms given.\")\n\t\tself.nBins = [ ]\n\t\tself.totalBins = 0\n\t\tself.sectors = [ ]\n\t\tfor s in range(self.nSect):\n\t\t\tbinMax = 0\n\t\t\tfor bin in range(realHists[s].GetNbinsY()):\n\t\t\t\tm2Pi = realHists[s].GetYaxis().GetBinCenter( bin+1)\n\t\t\t\tm3Pi = realHists[s].GetXaxis().GetBinCenter(nBin+1)\n\t\t\t\tif utils.isValidPhaseSpace(m3Pi, m2Pi):\n#\t\t\t\tif realHists[s].GetBinContent(nBin + 1, bin+1) != 0.:\n\t\t\t\t\tbinMax = bin\n\t\t\tself.nBins.append(binMax+1)\n\t\t\tself.totalBins += binMax+1\n\t\t\tself.sectors.append(realHists[s].GetTitle().split('_')[0])\n\t\tself.reals = np.zeros((self.totalBins))\n\t\tself.imags = np.zeros((self.totalBins))\n\t\tself.norms = np.zeros((self.totalBins))\n#\t#\tCMwrite(\"__init__\")\n\t\tself.coma = np.zeros((2*self.totalBins,2*self.totalBins))\n\t\tself.hasIntegralMatrix = False\n\t\tif integralReal and integralImag:\n\t\t\tself.hasIntegralMatrix = True\n\t\t\tself.integralMatrix = np.zeros((self.totalBins, self.totalBins), dtype = complex)\n\t\telif integralReal:\n\t\t\traise RuntimeError(\"Cannot handle real integral matrix only, need also imaginary\")\n\t\telif integralImag:\n\t\t\traise RuntimeError(\"Cannot handle imaginary integral matrix only, need also real\")\n\t\tself.binCenters = np.zeros((self.totalBins))\n\t\tself.numLim = 2.e-8\n\t\tself.ownPinv = True\n\t\tcount = 0\n\t\tfor s in range(self.nSect):\n\t\t\tfor bin in range(self.nBins[s]):\n\t\t\t\tself.reals[count] = realHists[s].GetBinContent(nBin + 1, bin + 1)\n\t\t\t\tself.imags[count] = imagHists[s].GetBinContent(nBin + 1, bin + 1)\n\t\t\t\tself.norms[count] = normHists[s].GetBinContent(nBin + 1, bin + 1)\n\t\t\t\tself.binCenters[count] = realHists[s].GetYaxis().GetBinCenter(bin + 1)\n\t\t\t\tcomaIndex = int(round(indexHists[s].GetBinContent(nBin + 1, bin + 1)))\n\t\t\t\tcount2 = 0\n\t\t\t\tfor s2 in range(self.nSect):\n\t\t\t\t\tfor bin2 in range(self.nBins[s2]):\n\t\t\t\t\t\tcomaIndex2 = int(round(indexHists[s2].GetBinContent(nBin + 1, bin2 + 1)))\n\t\t\t\t\t\tself.coma[2*count , 2*count2 ] = coma.GetBinContent(2*comaIndex+1, 2*comaIndex2+1)\n\t\t\t\t\t\tself.coma[2*count , 2*count2+1] = coma.GetBinContent(2*comaIndex+1, 2*comaIndex2+2)\n\t\t\t\t\t\tself.coma[2*count+1, 2*count2 ] = coma.GetBinContent(2*comaIndex+2, 2*comaIndex2+1)\n\t\t\t\t\t\tself.coma[2*count+1, 2*count2+1] = coma.GetBinContent(2*comaIndex+2, 2*comaIndex2+2)\n\t\t\t\t\t\tif self.hasIntegralMatrix:\n\t\t\t\t\t\t\tval = integralReal.GetBinContent(comaIndex+1, comaIndex2+1) + 1.j*integralImag.GetBinContent(comaIndex+1, comaIndex2+1)\n\t\t\t\t\t\t\tself.integralMatrix[count,count2] = val\n\t\t\t\t\t\tcount2 += 1\n\t\t\t\tcount +=1\n\t\tself.hasMassRange = False\n\t\tself.makeComaInv()\n\t\tself.borders = [0]\n\t\tfor i in range(self.nSect):\n\t\t\tself.borders.append(self.borders[-1] + self.nBins[i])\n\t\tself.nZero = 0\n\t\tself.zeroModes = [ ]\n\t\tself.zeroModeNumbers = [ ]\n\t\tself.zeroModeTitles = [ ]\n\t\tself.zeroEigenvalues = [ ]\n\t\tself.hasTheo = False\n\t\tself.chi2init = False\n\t\tself.zeroModesRemovedFromComa = False\n\t\tself.globalPhaseRemoved = False\n\t\tself.specialCOMAs = { }\n\t\tself.hasZMP = False\n\t\tself.zeroModeParameters = None\n\t\tself.hasRandomizedAmplitudes = False",
"def intersection(st, ave):\n\treturn (st+ave)*(st+ave+1)//2 + ave",
"def nextIntersectors(self, inter):\n assert self.poly1.pnFacesInPoly() and self.poly2.pnFacesInPoly()\n otherFInters = self.getIntersectorList(inter.f.vertices)\n # First intersector\n pInters = self.getIntersectorList(inter.pe.pFace.vertices)\n otherI1 = next(filter(lambda x: x is not None and x.f == inter.f and x != inter,\n pInters),\n None)\n if otherI1 is None:\n # The pFace does not intersect inter.f a second time,\n # looking for a place where inter.f intersects the pFace\n otherI1 = next(filter(lambda x: x is not None and x.f == inter.pe.pFace,\n otherFInters),\n None)\n if otherI1 is None:\n # polyhedron(inter.f.vertices + inter.pe.pFace.vertices,\n # inter.pe.pFace.edges() + inter.f.edges(),\n # [inter.f, inter.pe.pFace]).plot(True, col=('none', 'k', 'r'))\n # # print(inter.f, '\\n\\n', inter.pe.pFace)\n assert all(v in self.poly1.vertices for v in inter.f.vertices) or all(v in self.poly2.vertices for v in inter.f.vertices)\n assert self.poly1.facesInVertices() and self.poly2.facesInVertices()\n assert self.poly1.pnFacesInPoly() and self.poly2.pnFacesInPoly()\n assert self.poly1.nonDoubleVertices() and self.poly2.nonDoubleVertices()\n # # print('\\n', [(i, i.f) for i in filter(lambda x: x is not False, pInters)])\n # # print([sum(min(v.dist(v2) for v2 in inter.f.vertices) for v in i.f.vertices) for i in filter(lambda x: x is not False, pInters)])\n # # print('\\n', [(i, i.f) for i in filter(lambda x: x is not False, otherFInters)])\n # # print([sum(min(v.dist(v2) for v2 in inter.pe.pFace.vertices) for v in i.f.vertices) for i in filter(lambda x: x is not False, otherFInters)])\n raise ValueError('No intersector found')\n # Second intersector\n nInters = self.getIntersectorList(inter.pe.nFace.vertices)\n otherI2 = next(filter(lambda x: x is not None and x.f == inter.f and x != inter,\n nInters),\n None)\n if otherI2 is None:\n # The nFace does not intersect inter.f a second time,\n # looking for a place where inter.f intersects the nFace\n otherI2 = next(filter(lambda x: x is not None and x.f == inter.pe.nFace,\n otherFInters),\n None)\n if otherI2 is None:\n polyhedron(inter.f.vertices + inter.pe.nFace.vertices,\n inter.pe.nFace.edges() + inter.f.edges(),\n [inter.f, inter.pe.nFace]).plot(True, col=('none', 'k', 'r'))\n # # print(inter.f, inter.pe.pFace)\n raise ValueError('No intersector found')\n inter.adjacents = (otherI1, otherI2)\n return (otherI1, otherI2)",
"def analyze_natural_focusing(ps_beg, beamline, gamma, id_slices, zplot):\n\n ps_before = ps_beg\n\n count_UND = 0\n\n for element in beamline:\n ps_after = np.dot( element.M1, ps_before ) +element.M2\n\n # Check whether this element is an undulatorself.\n if isinstance(element, Undulator):\n count_UND += 1\n # The phase space distribution along the bunch before and after the\n # bunch.\n ps_s_before = beam_property_along_s(ps_before, id_slices)\n ps_s_after = beam_property_along_s(ps_after, id_slices)\n\n label1 = 'Before UND '+str(count_UND)\n label2 = 'After UND '+str(count_UND)\n save_name = 'Natural_focusing_in_UND'+str(count_UND)\n plt.figure()\n plt.plot(zplot[0:-1], ps_s_before[3,:]*gamma, label = label1)\n plt.plot(zplot[0:-1], ps_s_after[3,:]*gamma, label = label2)\n plt.grid()\n plt.legend()\n plt.savefig(save_name)\n ## End if\n\n ps_before = ps_after\n\n return",
"def intersection(st, ave):\n return (st+ave)*(st+ave+1)//2 + ave",
"def intersection(st, ave):\n return (st+ave)*(st+ave+1)//2 + ave",
"def intersection(st, ave):\n return (st+ave)*(st+ave+1)//2 + ave",
"def intersection(st, ave):\n return (st+ave)*(st+ave+1)//2 + ave",
"def intersection(st, ave):\n return (st+ave)*(st+ave+1)//2 + ave",
"def computation_gr(particles,p_types,dist,i,j,nbins, rmax):\n i=np.where(p_types == i)[0][0]\n j=np.where(p_types == j)[0][0]\n\n\n if len(p_types)>1:\n #indexes to delete if there is more than one type of particles\n i_axis0=[]\n i_axis1=[]\n for k in range(len(p_types)):\n if k!=i:\n i_axis0.append(particles[k])\n if k!=j:\n i_axis1.append(particles[k])\n dist = np.delete(dist,np.hstack(i_axis0), axis=0)\n dist = np.delete(dist,np.hstack(i_axis1), axis=1)\n\n\n\n bin_count = np.zeros((nbins,3))\n bin_ends = -rmax*np.cos(np.linspace(np.pi/2,np.pi,num=nbins+1))\n\n vol_old=0\n for i in range(nbins):\n bin_count[i,0]=0.5*(bin_ends[i+1]+bin_ends[i]) #Count position in the middle of the bin only needed in the first\n rmax_bin=bin_ends[i+1]\n indexes=np.where(dist<=rmax_bin)\n dist[indexes]=1000\n bin_count[i,1]=len(indexes[0])/len(particles[j])\n print(len(particles[j]))\n vol_new=4/3*np.pi*rmax_bin**3\n bin_count[i,2]=bin_count[i,1]/(vol_new-vol_old)\n\n rho_ave=256/6.71838**3 #np.sum(bin_count[:,1])/(4/3*np.pi*rmax**3)\n\n print(rho_ave)\n\n bin_count[:,2]=bin_count[:,2]/rho_ave**2 #g(r)=rho(r)/rho_ave\n\n return bin_count",
"def cfdProcessGeometry(self):\r\n \r\n # self.faceCentroids']= [[] for i in range(self.numberOfFaces'])]\r\n # self.faceSf']= [[] for i in range(self.numberOfFaces'])]\r\n # self.faceAreas']= [[] for i in range(self.numberOfFaces'])]\r\n \r\n ## Linear weight of distance from cell center to face\r\n self.faceWeights= [[0] for i in range(self.numberOfFaces)]\r\n\r\n ## Not\r\n self.faceCF= [[0, 0, 0] for i in range(self.numberOfFaces)]\r\n \r\n self.faceCf= [[0,0,0] for i in range(self.numberOfFaces)]\r\n \r\n self.faceFf= [[0,0,0] for i in range(self.numberOfFaces)]\r\n \r\n self.wallDist= [[] for i in range(self.numberOfFaces)]\r\n \r\n self.wallDistLimited= [[] for i in range(self.numberOfFaces)]\r\n \r\n self.elementCentroids= [[] for i in range(self.numberOfElements)]\r\n self.elementVolumes= [[] for i in range(self.numberOfElements)]\r\n \r\n \"\"\"\r\n Calculate:\r\n -face centroids (faceCentroids)\r\n -face normal (Sf)\r\n -face areas (faceAreas)\r\n \"\"\"\r\n \r\n #find cell with largest number of points\r\n maxPoints=len(max(self.faceNodes, key=len))\r\n forCross1 = [[] for i in range(maxPoints)]\r\n forCross2 = [[] for i in range(maxPoints)]\r\n local_faceCentroid=[[] for i in range(maxPoints)]\r\n \r\n for iFace in range(self.numberOfFaces):\r\n theNodeIndices = self.faceNodes[iFace]\r\n theNumberOfFaceNodes = len(theNodeIndices)\r\n \r\n #compute a rough centre of the face\r\n local_centre = [0,0,0]\r\n \r\n for iNode in theNodeIndices:\r\n local_centre = local_centre + self.nodeCentroids[int(iNode)]\r\n \r\n local_centre = local_centre/theNumberOfFaceNodes\r\n \r\n for iTriangle in range(theNumberOfFaceNodes):\r\n \r\n point1 = local_centre\r\n point2 = self.nodeCentroids[int(theNodeIndices[iTriangle])]\r\n \r\n if iTriangle < theNumberOfFaceNodes-1:\r\n point3 = self.nodeCentroids[int(theNodeIndices[iTriangle+1])]\r\n else:\r\n point3 = self.nodeCentroids[int(theNodeIndices[0])]\r\n \r\n local_faceCentroid[iTriangle].append((point1+point2+point3)/3)\r\n \r\n left=point2-point1\r\n right=point3-point1\r\n \r\n forCross1[iTriangle].append(left)\r\n forCross2[iTriangle].append(right)\r\n \r\n \r\n local_Sf=[np.zeros([self.numberOfFaces,3]) for i in range(maxPoints)]\r\n local_area=[np.zeros([self.numberOfFaces,3]) for i in range(maxPoints)]\r\n \r\n centroid=np.zeros([self.numberOfFaces,3])\r\n area=np.zeros([self.numberOfFaces])\r\n Sf=np.zeros([self.numberOfFaces,3])\r\n \r\n #cells with fewer faces than others are full of zeros\r\n for i in range(maxPoints):\r\n \r\n forCrossLeft=np.vstack(np.array(forCross1[i]))\r\n forCrossRight=np.vstack(np.array(forCross2[i]))\r\n \r\n local_Sf[i]=0.5*np.cross(forCrossLeft,forCrossRight)\r\n local_area[i]=np.linalg.norm(local_Sf[i],axis=1)\r\n \r\n centroid = centroid + np.array(local_faceCentroid[i])*local_area[i][:,None]\r\n Sf=Sf+local_Sf[i]\r\n area=area+local_area[i]\r\n \r\n self.faceCentroids=centroid/area[:,None]\r\n self.faceSf=Sf\r\n self.faceAreas=area \r\n \r\n \r\n \"\"\"\r\n Pure python version - causes slowness due to iterative np.cross()\r\n \"\"\"\r\n \r\n # for iFace in range(self.numberOfFaces):\r\n # theNodeIndices = self.faceNodes[iFace]\r\n # theNumberOfFaceNodes = len(theNodeIndices)\r\n # \r\n # #compute a rough centre of the face\r\n # local_centre = [0,0,0]\r\n # \r\n # for iNode in theNodeIndices:\r\n # \r\n # local_centre = local_centre + self.nodeCentroids[int(iNode)]\r\n # \r\n # local_centre = local_centre/theNumberOfFaceNodes\r\n # centroid = [0, 0, 0]\r\n # Sf = [0,0,0]\r\n # area = 0\r\n # \r\n # #finds area of virtual triangles and adds them to the find to find face area\r\n # #and direction (Sf)\r\n # \r\n # \r\n # \r\n # for iTriangle in range(theNumberOfFaceNodes):\r\n # point1 = local_centre\r\n # point2 = self.nodeCentroids[int(theNodeIndices[iTriangle])]\r\n # \r\n # if iTriangle < theNumberOfFaceNodes-1:\r\n # point3 = self.nodeCentroids[int(theNodeIndices[iTriangle+1])]\r\n # else:\r\n # point3 = self.nodeCentroids[int(theNodeIndices[0])]\r\n # \r\n # local_centroid = (point1 + point2 + point3)/3\r\n # \r\n # left=point2-point1\r\n # right=point3-point1\r\n # x = 0.5*((left[1] * right[2]) - (left[2] * right[1]))\r\n # y = 0.5*((left[2] * right[0]) - (left[0] * right[2]))\r\n # z = 0.5*((left[0] * right[1]) - (left[1] * right[0]))\r\n # local_Sf=np.array([x,y,z])\r\n # \r\n # local_area = np.linalg.norm(local_Sf)\r\n # \r\n # centroid = centroid + local_area*local_centroid\r\n # Sf = Sf + local_Sf\r\n # area = area + local_area\r\n # centroid = centroid/area\r\n # self.faceCentroids[iFace]=centroid\r\n # self.faceSf[iFace]=Sf\r\n # self.faceAreas[iFace]=area\r\n \r\n \r\n \"\"\"\r\n Calculate:\r\n -element centroids (elementCentroids)\r\n -element volumes (elementVolumes)\r\n \"\"\"\r\n for iElement in range(self.numberOfElements):\r\n \r\n theElementFaces = self.elementFaces[iElement]\r\n \r\n #compute a rough centre of the element\r\n local_centre = [0,0,0]\r\n \r\n for iFace in range(len(theElementFaces)):\r\n faceIndex = theElementFaces[iFace]\r\n local_centre = local_centre + self.faceCentroids[faceIndex]\r\n \r\n local_centre = local_centre/len(theElementFaces)\r\n \r\n localVolumeCentroidSum = [0,0,0]\r\n localVolumeSum = 0\r\n \r\n for iFace in range(len(theElementFaces)):\r\n faceIndex = theElementFaces[iFace]\r\n \r\n Cf = self.faceCentroids[faceIndex]-local_centre\r\n \r\n faceSign = -1\r\n if iElement == self.owners[faceIndex]:\r\n faceSign = 1\r\n \r\n local_Sf = faceSign*self.faceSf[faceIndex]\r\n \r\n localVolume = np.dot(local_Sf,Cf)/3\r\n \r\n localCentroid = 0.75*self.faceCentroids[faceIndex]+0.25*local_centre\r\n \r\n localVolumeCentroidSum = localVolumeCentroidSum + localCentroid*localVolume\r\n \r\n localVolumeSum = localVolumeSum + localVolume\r\n \r\n self.elementCentroids[iElement]=localVolumeCentroidSum/localVolumeSum\r\n self.elementVolumes[iElement]=localVolumeSum\r\n \r\n \r\n for iFace in range(self.numberOfInteriorFaces):\r\n \r\n n=self.faceSf[iFace]/np.linalg.norm(self.faceSf[iFace])\r\n own=self.owners[iFace]\r\n nei = self.neighbours[iFace]\r\n \r\n self.faceCF[iFace]=self.elementCentroids[nei]-self.elementCentroids[own]\r\n self.faceCf[iFace]=self.faceCentroids[iFace]-self.elementCentroids[own]\r\n self.faceFf[iFace]=self.faceCentroids[iFace]-self.elementCentroids[nei]\r\n self.faceWeights[iFace]=(-np.dot(self.faceFf[iFace],n))/(-np.dot(self.faceFf[iFace],n)+np.dot(self.faceCf[iFace],n))\r\n \r\n for iBFace in range(self.numberOfInteriorFaces, self.numberOfFaces):\r\n \r\n \r\n n=self.faceSf[iBFace]/np.linalg.norm(self.faceSf[iBFace])\r\n own=self.owners[iBFace]\r\n \r\n self.faceCF[iBFace]=self.faceCentroids[iBFace]-self.elementCentroids[own]\r\n self.faceCf[iBFace]=self.faceCentroids[iBFace]-self.elementCentroids[own] \r\n self.faceWeights[iBFace]=1\r\n self.wallDist[iBFace]= max(np.dot(self.faceCf[iBFace], n), 1e-24)\r\n self.wallDistLimited[iBFace]= max(self.wallDist[iBFace], 0.05*np.linalg.norm(self.faceCf[iBFace]))",
"def intersection(x, y, f, p):",
"def ShowOneContourCutBKG(index,all_images,all_pointing,thex0,they0,all_titles,object_name,all_expo,dir_top_img,all_filt):\n plt.figure(figsize=(15,6))\n spec_index_min=100 # cut the left border\n spec_index_max=1900 # cut the right border\n star_halfwidth=70\n \n YMIN=-100\n YMAX=100\n \n figname='contourCutBKG_{}_{}.pdf'.format(all_filt[index],index)\n \n figfilename=os.path.join(dir_top_img,figname) \n \n #center is approximately the one on the original raw image (may be changed)\n #x0=int(all_pointing[index][0])\n x0=int(thex0[index])\n \n \n # Extract the image \n full_image=np.copy(all_images[index])\n \n # refine center in X,Y\n star_region_X=full_image[:,x0-star_halfwidth:x0+star_halfwidth]\n \n profile_X=np.sum(star_region_X,axis=0)\n profile_Y=np.sum(star_region_X,axis=1)\n\n NX=profile_X.shape[0]\n NY=profile_Y.shape[0]\n \n X_=np.arange(NX)\n Y_=np.arange(NY)\n \n avX,sigX=weighted_avg_and_std(X_,profile_X**4) # take squared on purpose (weigh must be >0)\n avY,sigY=weighted_avg_and_std(Y_,profile_Y**4)\n \n x0=int(avX+x0-star_halfwidth)\n \n \n # find the center in Y on the spectrum\n yprofile=np.sum(full_image[:,spec_index_min:spec_index_max],axis=1)\n y0=np.where(yprofile==yprofile.max())[0][0]\n\n # cut the image in vertical and normalise by exposition time\n reduc_image=full_image[y0-10:y0+10,:]=0\n reduc_image=full_image[y0+YMIN:y0+YMAX,x0:spec_index_max]/all_expo[index]\n \n reduc_image[:,0:100]=0 # erase central star\n \n X_Size_Pixels=np.arange(0,reduc_image.shape[1])\n Y_Size_Pixels=np.arange(0,reduc_image.shape[0])\n Transverse_Pixel_Size=Y_Size_Pixels-int(float(Y_Size_Pixels.shape[0])/2.)\n \n # calibration in wavelength\n #grating_name=all_filt[index].replace('dia ','')\n grating_name=get_disperser_filtname(all_filt[index])\n \n lambdas=Pixel_To_Lambdas(grating_name,X_Size_Pixels,all_pointing[index],True)\n \n #if grating_name=='Ron200':\n # holo = Hologram('Ron400',verbose=True)\n #else: \n # holo = Hologram(grating_name,verbose=True)\n #lambdas=holo.grating_pixel_to_lambda(X_Size_Pixels,all_pointing[index])\n #if grating_name=='Ron200':\n # lambdas=lambdas*2.\n \n\n X,Y=np.meshgrid(lambdas,Transverse_Pixel_Size) \n T=np.transpose(reduc_image)\n \n \n cs=plt.contourf(X, Y, reduc_image, 100, alpha=1., cmap='jet',origin='lower')\n C = plt.contour(X, Y, reduc_image ,50, colors='white', linewidth=.001,origin='lower') \n \n \n cbar = plt.colorbar(cs) \n \n for line in LINES:\n if line == O2 or line == HALPHA or line == HBETA or line == HGAMMA:\n plt.plot([line['lambda'],line['lambda']],[YMIN,YMAX],'-',color='lime',lw=0.5)\n plt.text(line['lambda'],YMAX*0.8,line['label'],verticalalignment='bottom', horizontalalignment='center',color='lime', fontweight='bold',fontsize=16)\n \n \n \n plt.axis([X.min(), X.max(), Y.min(), Y.max()]); plt.grid(True)\n plt.title(all_titles[index])\n plt.grid(color='white', ls='solid')\n plt.text(200,-5.,all_filt[index],verticalalignment='bottom', horizontalalignment='center',color='yellow', fontweight='bold',fontsize=16)\n plt.xlabel('$\\lambda$ (nm)')\n plt.ylabel('pixels')\n plt.ylim(YMIN,YMAX)\n plt.xlim(0.,1200.)\n plt.savefig(figfilename)",
"def get_intersected_basins_ppt_data(all_basin_geoms , month, year, conv2Inches):\n \n global gSpatialIndex\n print(\"Processing Prism Dataset\")\n ppt_bounds, ppt_data, hdr_dict = get_monthly_prism_ppt_data(year = year, month = month, plotPPTBounds = False)\n print(\"-Extracting precipitation data\")\n ppt_gdf = convert_pptData_to_GDF(ppt_bounds, ppt_data, hdr_dict, plotHeatMap = False)\n\n intersected_basins = {}\n print(\"---Creating Spatial RTree Index for month:\", month)\n \n # Create a copy of a global index to reduce time.\n # Check if it works correctly.\n \n if(gSpatialIndex == 0):\n gSpatialIndex = ppt_gdf.sindex\n\n print(\"-Creating basin intersections\")\n for basin_file_name, basin_geom in all_basin_geoms.items():\n possible_matches_index = list(gSpatialIndex.intersection(basin_geom.bounds))\n possible_matches = ppt_gdf.iloc[possible_matches_index]\n precise_matches = possible_matches[possible_matches.intersects(basin_geom)]\n if(conv2Inches):\n precise_matches[\"Precipitation\"] = precise_matches[\"Precipitation\"]/25.4\n intersected_basins[basin_file_name] = precise_matches\n \n print(\"Completed processing \")\n return intersected_basins",
"def pdf_d(iout,run='',data='../data',iv='d',i4=0,nbin=100,xlim=[-4,3],lnd=False):\n s = di.snapshot(iout,run=run,data=data)\n n = nbin\n bins = np.linspace(xlim[0],xlim[1],n+1)\n htot = 0.0\n i = 0\n for p in s.patches:\n i += 1\n if i%1000==0:\n print('{:.1f}%'.format(i/len(s.patches)*100.0))\n d = p.var(iv,i4=i4)\n if lnd:\n logd = d/np.log(10.)\n else:\n logd = np.log10(d)\n h,e = np.histogram(logd,bins=bins)\n htot += h\n pl.hist(bins[0:n],bins=bins,weights=htot,log=True,density=True)\n return bins,htot",
"def viz2(img1, interest_points1, img2, interest_points2, matches, PATCH_SIZE, threshold, min_sigma, max_sigma, num_sigma):\n \n\n\tfig = plt.figure(figsize=(10,5))\n\tax1 = fig.add_subplot(121)\n\tax2 = fig.add_subplot(122)\n\n #adding the two images to axes \n\tax1.imshow(img1, cmap='gray')\n\tax2.imshow(img2, cmap='gray')\n\n\tpositionimg1 = ax1.get_position()\n\tnew_pos = [positionimg1.x0+0.09, positionimg1.y0+0.025, \\\n\t\tpositionimg1.width / 1.1, positionimg1.height / 1.1] \n\tax1.set_position(new_pos)\n\n\tx1 = [a[1] for a in interest_points1] #blob detection x axis\n\ty1 = [a[0] for a in interest_points1] #blob detection y axis\n\ts1 = [a[2] for a in interest_points1] #blob detected at sigma \n \n\tx2 = [a[1] for a in interest_points2] #blob detection x axis\n\ty2 = [a[0] for a in interest_points2] #blob detection y axis\n\ts2 = [a[2] for a in interest_points2] #blob detected at sigma \n \n\tdifferences = [a[2] for a in matches]\n\n\n\tweighted_differences = normalize(differences)\n\n #iterating through the input list of matches\n\tfor coordinates, difference in zip(matches, weighted_differences):\n\t\tcord_a = (coordinates[0][1], coordinates[0][0]) #extracting coordinates for interest point in img1\n\t\tcord_b = (coordinates[1][1], coordinates[1][0]) #extracting coordinates for interest point in img2\n\t\tif difference <=0.33:\n\t\t\tcolor = \"green\"\n\t\telif difference > 0.33 and difference <= 0.66:\n\t\t\tcolor = \"yellow\"\n\t\telse:\n\t\t\tcolor = \"red\"\n\n\t#defining the path from cord_a to cord_b\n\t\tcon = ConnectionPatch(xyA=cord_a, xyB=cord_b, coordsA=\"data\", coordsB=\"data\",\n\t\t\t\t\t\t\t axesA=ax2, axesB=ax1, color=color) #arrowstyle='->')\n\t#adding line to axes2 \n\t\tax2.add_artist(con)\n\n #showing the image // can be changed to saving the image locally \n\tfor x, y, s in zip(x1, y1, s1):\n\t\tax1.scatter(x, y, alpha=1, facecolors='none', edgecolors='r', s=s**2) #plotting the input interest points for img1\n\tfor x, y, s in zip(x2, y2, s2):\n\t\tax2.scatter(x, y, alpha=1, facecolors='none', edgecolors='r', s=s**2) #plotting the input interest points for img2\n\tax1.axis('off')\n\tax2.axis('off')\n\ttitle = 'Patch Size=' + str(PATCH_SIZE) + ', Threshold=' + str(threshold) + ', min sigma=' + \\\n\tstr(min_sigma) + ', max sigma=' + str(max_sigma) + ', num sigma=' + str(num_sigma)\n\tplt.title(title, x=+0.1)\n\t#plt.show()\n\tplt.savefig(title+'.png')\n\n\n\treturn",
"def intersection(self, axis2):",
"def analyze(self, event):\n\t\tJets = Collection(event, \"Jet\")\n\t\tjets = [j for j in Jets if j.pt >= 20]\n\t\tgenpart = Collection(event, \"GenPart\")\n\t\tgenParts = [l for l in genpart]\n\t\t# get the particles when they have a mother ---> getting the daughters only \n\t\tdaughters = [l for l in genpart if l.genPartIdxMother>= 0 ]\n\t\tevent.nIsr = 0\n\t\tfor jet in jets:\n\t\t\tif jet.pt <30.0: continue\n\t\t\tif abs(jet.eta )>2.4: continue\n\t\t\tmatched = False\n\t\t\tfor i,mc in enumerate(genParts):\n\t\t\t\t# if it's matched doesn't make sence to correct it\n\t\t\t\tif matched: break\n\t\t\t\t# check if it's quark from top or not\n\t\t\t\tif (mc.status!=23 or abs(mc.pdgId)>5): continue\n\t\t\t\tmomid = abs(genParts[mc.genPartIdxMother].pdgId)\n\t\t\t\tif not (momid==6 or momid==23 or momid==24 or momid==25 or momid>1e6): continue\n\t\t\t\tfor idau in range(len(daughters)) :\n\t\t\t\t\t# look for the products of the jet and match jet with gen daughters of the quark \n\t\t\t\t\tif i == daughters[idau].genPartIdxMother:\n\t\t\t\t\t\tdR = math.sqrt(deltaR2(jet.eta,jet.phi, daughters[idau].eta,daughters[idau].phi))\n\t\t\t\t\t\tif dR<0.3:\n\t\t\t\t\t\t\t# if matched escape\n\t\t\t\t\t\t\tmatched = True\n\t\t\t\t\t\t\tbreak\n\t\t\t# if not matched correct it \n\t\t\tif not matched:\n\t\t\t\tevent.nIsr+=1\n\t\t# fill the output with nisr\n\t\tself.out.fillBranch(\"nIsr\",event.nIsr)\n\t\tnISRweight = 1\n\t\t#https://indico.cern.ch/event/592621/contributions/2398559/attachments/1383909/2105089/16-12-05_ana_manuelf_isr.pdf\n\t\tISRweights_Mar17 = { 0: 1, 1 : 0.920, 2 : 0.821, 3 : 0.715, 4 : 0.662, 5 : 0.561, 6 : 0.511}\n\t\tISRweights_ICHEP16 = { 0: 1, 1 : 0.882, 2 : 0.792, 3 : 0.702, 4 : 0.648, 5 : 0.601, 6 : 0.515}\n\t\tISRweightssyst_Mar17 = { 0: 0.0, 1 : 0.040, 2 : 0.090, 3 : 0.143, 4 : 0.169, 5 : 0.219, 6 : 0.244}\n\t\tISRweightssyst_ICHEP16 = { 0: 0.0, 1 : 0.059, 2 : 0.104, 3 : 0.149, 4 : 0.176, 5 : 0.199, 6 : 0.242}\n\t\t\n\t\tif self.ICHEP16 == True and self.Mar17 == False:\n\t\t\tISRweights = ISRweights_ICHEP16\n\t\t\tISRweightssyst = ISRweightssyst_ICHEP16\n\t\t\t\n\t\telif self.ICHEP16 == False and self.Mar17 == True: \n\t\t\tISRweights = ISRweights_Mar17\n\t\t\tISRweightssyst = ISRweightssyst_Mar17\n\t\t\t\n\t\tnISRforWeights = int(event.nIsr)\n\t\tif event.nIsr > 6:\n\t\t\tnISRforWeights = 6\n\t\tC_ISR = 1.090\n\t\tC_ISR_up = 1.043\n\t\tC_ISR_down = 1.141\n\t\tnISRweight = C_ISR * ISRweights[nISRforWeights]\n\t\tnISRweightsyst_up = C_ISR_up * (ISRweights[nISRforWeights] + ISRweightssyst[nISRforWeights])\n\t\tnISRweightsyst_down = C_ISR_down * (ISRweights[nISRforWeights] - ISRweightssyst[nISRforWeights])\n\t\t\n\t\tself.out.fillBranch(\"nISRweight\",nISRweight)\n\t\tself.out.fillBranch(\"nISRttweightsyst_up\",nISRweightsyst_up)\n\t\tself.out.fillBranch(\"nISRttweightsyst_down\",nISRweightsyst_down)\n\n\n # ------ Forwarded Message --------\n # Subject: Re: question for ttbar ISR reweighting\n # Date: Sat, 14 Jan 2017 20:24:14 +0100\n # From: Manuel Franco Sevilla <manuel.franco.sevilla@cern.ch>\n #The [Nom, Up, Down] values we find for the events with Nisr = 0 are:\n #[1.090, 1.043, 1.141]: TTJets_Tune\n #[1.096, 1.046, 1.151]: TTJets_SingleLeptFromT\n #[1.116, 1.055, 1.185]: TTJets_DiLept\n\t\t\n\t\t\n\t\treturn True",
"def calculate_bin_edges(n_bins, geo):\n #Gefittete offsets: x,y,factor: factor*(x+x_off)\n #[6.19, 0.064, 1.0128]\n \n #print \"Reading detector geometry in order to calculate the detector dimensions from file \" + fname_geo_limits\n #geo = np.loadtxt(fname_geo_limits)\n\n # derive maximum and minimum x,y,z coordinates of the geometry input [[first_OM_id, xmin, ymin, zmin], [last_OM_id, xmax, ymax, zmax]]\n geo_limits = np.nanmin(geo, axis = 0), np.nanmax(geo, axis = 0)\n #print ('Detector dimensions [[first_OM_id, xmin, ymin, zmin], [last_OM_id, xmax, ymax, zmax]]: ' + str(geo_limits))\n\n x_bin_edges = np.linspace(geo_limits[0][1] - 9.95, geo_limits[1][1] + 9.95, num=n_bins[0] + 1) #try to get the lines in the bin center 9.95*2 = average x-separation of two lines\n y_bin_edges = np.linspace(geo_limits[0][2] - 9.75, geo_limits[1][2] + 9.75, num=n_bins[1] + 1) # Delta y = 19.483\n z_bin_edges = np.linspace(geo_limits[0][3] - 4.665, geo_limits[1][3] + 4.665, num=n_bins[2] + 1) # Delta z = 9.329\n\n #offset_x, offset_y, scale = [6.19, 0.064, 1.0128]\n #x_bin_edges = (x_bin_edges + offset_x )*scale\n #y_bin_edges = (y_bin_edges + offset_y )*scale\n\n #calculate_bin_edges_test(geo, y_bin_edges, z_bin_edges) # test disabled by default. Activate it, if you change the offsets in x/y/z-bin-edges\n\n return x_bin_edges, y_bin_edges, z_bin_edges",
"def tabulate_pdf(self):\n\n from mitsuba.core import Float, Vector2f, ScalarVector2f\n\n extents = self.bounds.extents()\n endpoint = self.bounds.max - extents / ScalarVector2f(self.res)\n\n # Compute a set of nodes where the PDF should be evaluated\n x, y = ek.meshgrid(\n ek.linspace(Float, self.bounds.min.x, endpoint.x, self.res.x),\n ek.linspace(Float, self.bounds.min.y, endpoint.y, self.res.y)\n )\n\n endpoint = extents / ScalarVector2f(self.res)\n eps = 1e-4\n nx = ek.linspace(Float, eps, endpoint.x * (1 - eps), self.ires)\n ny = ek.linspace(Float, eps, endpoint.y * (1 - eps), self.ires)\n wx = [1 / (self.ires - 1)] * self.ires\n wy = [1 / (self.ires - 1)] * self.ires\n wx[0] = wx[-1] = wx[0] * .5\n wy[0] = wy[-1] = wy[0] * .5\n\n integral = 0\n\n self.histogram_start = time.time()\n for yi, dy in enumerate(ny):\n for xi, dx in enumerate(nx):\n xy = self.domain.map_forward(Vector2f(x + dx, y + dy))\n pdf = self.pdf_func(xy)\n integral = ek.fmadd(pdf, wx[xi] * wy[yi], integral)\n self.histogram_end = time.time()\n\n self.pdf = integral * (ek.hprod(extents / ScalarVector2f(self.res))\n * self.sample_count)\n\n # A few sanity checks\n pdf_min = ek.hmin(self.pdf) / self.sample_count\n if not pdf_min >= 0:\n self._log('Failure: Encountered a cell with a '\n 'negative PDF value: %f' % pdf_min)\n self.fail = True\n\n self.pdf_sum = ek.hsum(self.pdf) / self.sample_count\n if self.pdf_sum > 1.1:\n self._log('Failure: PDF integrates to a value greater '\n 'than 1.0: %f' % self.pdf_sum)\n self.fail = True",
"def ShowOneContourBKG(index,all_images,all_pointing,thex0,they0,all_titles,object_name,all_expo,dir_top_img,all_filt):\n \n figname='contourBKG_{}_{}.pdf'.format(all_filt[index],index)\n \n plt.figure(figsize=(15,6))\n spec_index_min=100 # cut the left border\n spec_index_max=1900 # cut the right border\n star_halfwidth=70\n \n YMIN=-100\n YMAX=100\n \n figfilename=os.path.join(dir_top_img,figname) \n \n #center is approximately the one on the original raw image (may be changed)\n #x0=int(all_pointing[index][0])\n x0=int(thex0[index])\n \n \n # Extract the image \n full_image=np.copy(all_images[index])\n \n # refine center in X,Y\n star_region_X=full_image[:,x0-star_halfwidth:x0+star_halfwidth]\n \n profile_X=np.sum(star_region_X,axis=0)\n profile_Y=np.sum(star_region_X,axis=1)\n\n NX=profile_X.shape[0]\n NY=profile_Y.shape[0]\n \n X_=np.arange(NX)\n Y_=np.arange(NY)\n \n avX,sigX=weighted_avg_and_std(X_,profile_X**4) # take squared on purpose (weigh must be >0)\n avY,sigY=weighted_avg_and_std(Y_,profile_Y**4)\n \n x0=int(avX+x0-star_halfwidth)\n \n \n # find the center in Y on the spectrum\n yprofile=np.sum(full_image[:,spec_index_min:spec_index_max],axis=1)\n y0=np.where(yprofile==yprofile.max())[0][0]\n\n # cut the image in vertical and normalise by exposition time\n reduc_image=full_image[y0+YMIN:y0+YMAX,x0:spec_index_max]/all_expo[index] \n reduc_image[:,0:100]=0 # erase central star\n \n X_Size_Pixels=np.arange(0,reduc_image.shape[1])\n Y_Size_Pixels=np.arange(0,reduc_image.shape[0])\n Transverse_Pixel_Size=Y_Size_Pixels-int(float(Y_Size_Pixels.shape[0])/2.)\n \n # calibration in wavelength\n #grating_name=all_filt[index].replace('dia ','')\n grating_name=get_disperser_filtname(all_filt[index])\n \n lambdas=Pixel_To_Lambdas(grating_name,X_Size_Pixels,all_pointing[index],True)\n \n #if grating_name=='Ron200':\n # holo = Hologram('Ron400',verbose=True)\n #else: \n # holo = Hologram(grating_name,verbose=True)\n #lambdas=holo.grating_pixel_to_lambda(X_Size_Pixels,all_pointing[index])\n #if grating_name=='Ron200':\n # lambdas=lambdas*2.\n \n\n X,Y=np.meshgrid(lambdas,Transverse_Pixel_Size) \n T=np.transpose(reduc_image)\n \n \n cs=plt.contourf(X, Y, reduc_image, 100, alpha=1., cmap='jet',origin='lower')\n #C = plt.contour(X, Y, reduc_image ,10, colors='white', linewidth=.01,origin='lower')\n \n cbar = plt.colorbar(cs) \n \n for line in LINES:\n if line == O2 or line == HALPHA or line == HBETA or line == HGAMMA:\n plt.plot([line['lambda'],line['lambda']],[YMIN,YMAX],'-',color='lime',lw=0.5)\n plt.text(line['lambda'],YMAX*0.8,line['label'],verticalalignment='bottom', horizontalalignment='center',color='lime', fontweight='bold',fontsize=16)\n \n \n \n plt.axis([X.min(), X.max(), Y.min(), Y.max()]); plt.grid(True)\n plt.title(all_titles[index])\n plt.grid(color='white', ls='solid')\n plt.text(200,-5.,all_filt[index],verticalalignment='bottom', horizontalalignment='center',color='yellow', fontweight='bold',fontsize=16)\n plt.xlabel('$\\lambda$ (nm)')\n plt.ylabel('pixels')\n plt.ylim(YMIN,YMAX)\n plt.xlim(0.,1200.)\n plt.savefig(figfilename)",
"def analyze(self, event):\n nloSF = 1.0\n boson_pt=0\n mjj=0\n #print ' - event %d:'%(event._entry)\n\n if self._worker: \n\n genParticles = Collection(event, \"GenPart\")\n boson_found = False\n lep1 = None\n lep2 = None\n for part in genParticles:\n #if ( (part.pdgId == 23 or abs(part.pdgId) == 24) and (part.statusFlags & 0x2000)>0 and (part.statusFlags & 0x100)>0 ):\n if ( (abs(part.pdgId)>10 and abs(part.pdgId) < 17) and ( (part.status == 1 and (part.statusFlags & 0x1)>0) or ((part.statusFlags & 0x1)>0 and (part.statusFlags & 0x2)>0) ) ):\n if (part.genPartIdxMother>=0):\n mother = genParticles[part.genPartIdxMother]\n #print ' --- event %d pdgid %d --- pdg mother %d: %d '%(event._entry,part.pdgId,part.genPartIdxMother,mother.pdgId)\n \n if (mother.pdgId == 23 or abs(mother.pdgId) == 24):\n boson_pt = mother.pt\n boson_found = True\n break\n else:\n if (part.pdgId>0):\n lep1 = part\n else:\n lep2 = part\n\n else:\n #print ' --- event %d pdgid %d --- no mother'%(event._entry,part.pdgId)\n if (part.pdgId>0):\n lep1 = part\n else:\n lep2 = part\n\n if (not boson_found and lep1 is not None and lep2 is not None):\n boson_pt = (lep1.p4()+lep2.p4()).Pt()\n boson_found = True\n\n if (not boson_found):\n idx=0\n print ' --- event %d boson not found:'%(event._entry)\n for part in genParticles:\n print ' ------ part %d: pdgid %d pT %3.3f status %d flags %d mother %d'%(idx,part.pdgId,part.pt,part.status,part.statusFlags,part.genPartIdxMother)\n idx += 1\n\n #if (part.genPartIdxMother>=0):\n #print ' ------ pdg mother %d: %d '%(part.genPartIdxMother,genParticles[part.genPartIdxMother].pdgId)\n\n\n\n genJets = Collection(event, \"GenJet\")\n #idx = 0\n #for genjet in genJets:\n # print 'Jet %d: pT=%3.3f GeV'%(idx,genjet.pt)\n # idx += 1\n if (len(genJets)>1):\n mjj = (genJets[0].p4()+genJets[1].p4()).M()\n \n nloSF = self._worker.getSF(boson_pt,mjj)\n if boson_found: self.counter += 1\n\n self.out.fillBranch(\"nloSF_%s\"%(self.process),nloSF)\n self.out.fillBranch(\"gen_boson_pt\",boson_pt)\n self.out.fillBranch(\"gen_mjj\",mjj)\n return True"
] | [
"0.69076186",
"0.5713038",
"0.56754965",
"0.5481251",
"0.5395632",
"0.5379876",
"0.5379819",
"0.52434504",
"0.51830965",
"0.51680446",
"0.51668954",
"0.51181227",
"0.5096356",
"0.5096356",
"0.5096356",
"0.5096356",
"0.5096356",
"0.5079256",
"0.5078702",
"0.50733835",
"0.50440705",
"0.50280464",
"0.5027401",
"0.50198585",
"0.50180995",
"0.50178856",
"0.49911222",
"0.49846637",
"0.4976729",
"0.49706167"
] | 0.6194617 | 1 |
Plots all particles, sorting them into cluster or noncluster particles according to the Voronoi classification | def plotClusters(self):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
fig.set_size_inches(18.5, 9.5)
ax.set_title('Identification of Cluster Particles with Voronoi Volumes', fontsize=22)
ax.set_xlabel('x [m]', fontsize=18)
ax.set_ylabel('y [m]', fontsize=18)
ax.set_zlabel('z [m]', fontsize=18)
strength = np.linspace(0, 0.8, len(self.unique_labels))
np.random.shuffle(strength)
colors = [plt.cm.nipy_spectral(each) for each in strength]
np.random.shuffle(strength)
colorsB = [plt.cm.nipy_spectral(each) for each in strength]
for k, col, colB in zip(self.unique_labels, colors, colorsB):
a = 1
s = 3
if k == -1:
# Black used for noise.
col = [1, 0, 0]
a = 0.3
s = 1
class_member_mask = (self.labels == k)
xy = self.data[class_member_mask]
if len(xy) > 0:
ax.scatter(xy[:, 0], xy[:, 1], xy[:, 2], c=np.reshape(np.array(col), (1, -1)),
edgecolors=np.reshape(np.array(colB), (1, -1)), alpha=a, s=s, label='Cluster ' + str(k)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def plotResults(self):\n\n clusters = self.data[[i for i in range(len(self.data)) if self.vorLabels[i] != 0], :]\n vorLabels = [self.vorLabels[i] for i in range(len(self.data)) if self.vorLabels[i] != 0]\n\n self.plot = voronoiPlot(clusters, self.skel, self.skelLabels, self.isCorrect, vorLabels)\n self.plot.snapPlot()",
"def visualise(self):\n\n # Initialise figure\n params = {\"figure.figsize\": (5, 5)}\n pylab.rcParams.update(params)\n self.fig = plt.figure()\n self.ax = self.fig.add_subplot(111)\n\n # Add particles if selected\n print(self.crds)\n cmap=cm.get_cmap('coolwarm')\n norm=Normalize(0,20)\n print(np.max(self.radii))\n print(np.max(self.weights))\n if self.vis_particles:\n if self.vis_vortype==-2:\n radii=self.weights\n if self.param>10:\n self.param=(self.param-10)/2+10\n colour=cmap(norm(self.param))\n else:\n radii=self.radii\n radii=self.weights\n colour='orange'\n colour=(0.8,0.687,0.287,1)\n colour='gold'\n patches = []\n patches_pnts = []\n patches_absent = []\n for i,c in enumerate(self.crds):\n patches.append(Circle(c,radius=radii[i]))\n if radii[i]>0:\n patches_pnts.append(Circle(c,radius=0.1))\n else:\n patches_absent.append(Circle(c,radius=0.1))\n self.ax.add_collection(PatchCollection(patches, facecolor=colour, edgecolor='k', alpha=0.5))\n self.ax.add_collection(PatchCollection(patches_pnts, facecolor='k', alpha=1,zorder=1))\n if self.vis_vortype==2:\n self.ax.add_collection(PatchCollection(patches_absent, facecolor='k', alpha=0.5,zorder=1))\n else:\n self.ax.add_collection(PatchCollection(patches_absent, facecolor='k', alpha=1,zorder=1))\n\n # Add voronoi\n if self.vis_vortype!=0:\n patches = []\n colours = []\n if self.vis_cellcolour==1:\n cell_colours = self.init_cell_colours()\n else:\n cell_colours = [(0,0,0,0)]*100\n for i in range(self.m):\n patches.append(Polygon(self.rings[i],True))\n colours.append(cell_colours[self.rings[i][:,0].size])\n self.ax.add_collection(PatchCollection(patches, facecolor=colours, edgecolor='k', linewidth=1, zorder=0))\n\n # Sandbox\n # print(np.max(self.radii))\n # cmap=cm.get_cmap('coolwarm')\n # norm=Normalize(0,np.max(20))\n sandbox=False\n if sandbox:\n # z=16\n # w=np.zeros_like(self.radii)\n # mask=2*self.radii>z\n # w[mask]=z**0.5*np.sqrt(2*self.radii[mask]-z)\n # patches = []\n # for i,c in enumerate(self.crds):\n # patches.append(Circle(c,radius=w[i]))\n # self.ax.add_collection(PatchCollection(patches, facecolor=cmap(norm(z)), edgecolor='k'))\n with open('./phi.dat','w') as f:\n for z in np.arange(0,np.max(self.radii)*2+0.5,0.01):\n w=np.zeros_like(self.radii)\n mask=2*self.radii>z\n w[mask]=z**0.5*np.sqrt(2*self.radii[mask]-z)\n phi=np.sum(np.pi*w**2)/52359.9\n # phi=np.sum(np.pi*w**2)/1309\n f.write('{:.6f} {:.6f}\\n'.format(z,phi))\n\n\n\n # Set axes\n buffer = 1.6\n lim = buffer*np.max(np.abs(self.crds))\n self.ax.set_xlim((-lim,lim))\n self.ax.set_ylim((-lim,lim))\n self.ax.set_axis_off()\n\n # Show figure\n if self.vis_save:\n plt.savefig('{}_{}_{}.png'.format(self.prefix,self.frame,self.vis_vortype),dpi=400)\n plt.show()",
"def visualize(title, particles):\n\n plt.figure(figsize=(10,10))\n plt.title(\"Best configuration for \" + str(len(particles)) + \" particles\", size=25)\n plt.xlabel(\"xcoordinate\", size=18)\n plt.ylabel(\"ycoordinate\", size=18)\n\n plt.xticks(size=13)\n plt.yticks(size=13)\n\n circle = plt.Circle((0, 0), 1)\n circle.set_edgecolor(\"red\")\n circle.set_facecolor(\"none\")\n fig = plt.gcf()\n ax = fig.gca()\n\n ax.add_artist(circle)\n plt.xlim(-1.1,1.1)\n plt.ylim(-1.1,1.1)\n\n # draw all the particles\n for particle in particles:\n plt.scatter(particle.x, particle.y)\n\n fig.savefig(title)",
"def plot(self, \n\t\t\t voronoi: bool = False):\n\t\tif (voronoi):\n\t\t\tif (len(self.X) == 2):\n\t\t\t\tfrom verticapy.learn.plot import voronoi_plot\n\t\t\t\tquery = \"SELECT GET_MODEL_ATTRIBUTE(USING PARAMETERS model_name = '{}', attr_name = 'centers')\".format(self.name)\n\t\t\t\tself.cursor.execute(query)\n\t\t\t\tclusters = self.cursor.fetchall()\n\t\t\t\tvoronoi_plot(clusters = clusters, columns = self.X)\n\t\t\telse:\n\t\t\t\traise ValueError(\"Voronoi Plots are only available in 2D\")\n\t\telse:\n\t\t\tvdf = vDataFrame(self.input_relation, self.cursor)\n\t\t\tself.predict(vdf, \"kmeans_cluster\")\n\t\t\tif (len(self.X) <= 3):\n\t\t\t\tvdf.scatter(columns = self.X, catcol = \"kmeans_cluster\", max_cardinality = 100, max_nb_points = 10000)\n\t\t\telse:\n\t\t\t\traise ValueError(\"Clustering Plots are only available in 2D or 3D\")",
"def plotVoronoiCell(self, cells):\n for i in cells:\n #i indexes volumes\n i = self.nonBI[i] #now i indexes vor.point_region\n\n vI = self.vor.regions[self.vor.point_region[i]]\n v = self.vor.vertices[vI, :]\n r = v\n\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n fig.set_size_inches(18.5, 9.5)\n ax.set_title('Voronoi Cell of Particle ' + str(i))\n ax.set_xlabel('x [m]')\n ax.set_ylabel('y [m]')\n ax.set_zlabel('z [m]')\n ax.scatter(r[:, 0], r[:, 1], r[:, 2], s=5, alpha=0.5, label='Cell Boundaries')\n ax.scatter(self.data[i, 0], self.data[i, 1], self.data[i, 2], s=25, label='Cell Center')\n ax.set_xlim3d(np.min(self.data[:, 0]), np.max(self.data[:, 0]))\n ax.set_ylim3d(np.min(self.data[:, 1]), np.max(self.data[:, 1]))\n ax.set_zlim3d(np.min(self.data[:, 2]), np.max(self.data[:, 2]))\n # limits = np.vstack((np.array([np.max(self.data[:, 0]), np.max(self.data[:, 1]), np.max(self.data[:, 2])]), np.array([np.min(self.data[:, 0]), np.min(self.data[:, 1]), np.min(self.data[:, 2])])))\n # ax.scatter(limits[:, 0], limits[:, 1], limits[:, 2], s=1)\n ax.legend()",
"def plot_clusters(self):\n pass",
"def plot(self):\n\t\tif (2 <= len(self.X) <= 3):\n\t\t\tvDataFrame(self.name, self.cursor).scatter(columns = self.X, catcol = \"dbscan_cluster\", max_cardinality = 100, max_nb_points = 10000)\n\t\telse:\n\t\t\traise ValueError(\"Clustering Plots are only available in 2D or 3D\")",
"def plotVolumeContours(self):\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n fig.set_size_inches(18.5, 9.5)\n ax.set_title('Particle Positions Colored by Voronoi Volumes', fontsize=22)\n ax.set_xlabel('x [m]', fontsize=18)\n ax.set_ylabel('y [m]', fontsize=18)\n ax.set_zlabel('z [m]', fontsize=18)\n pos = ax.scatter(self.data[self.nonB, 0], self.data[self.nonB, 1], self.data[self.nonB, 2], s=10, c=self.volumes, cmap='plasma')\n cbar = fig.colorbar(pos, ax=ax)\n cbar.ax.tick_params(labelsize=15)",
"def DisplayCentroids(Centroids,outputs,ax,N=1,sections=1):\r\n\r\n SliceValues = np.linspace(float(min(Centroids[:,0])),float(max(Centroids[:,0])),sections+1) # Create boundaries in x for each slice.\r\n idx1 = np.asarray((Centroids[:,0]>=SliceValues[N-1]))*np.asarray((Centroids[:,0]<=SliceValues[N]))\r\n\r\n idx1 = idx1.flatten() \r\n\r\n CentroidSlice = Centroids[idx1,:]\r\n \r\n outputSlice = outputs[idx1,:]\r\n\r\n # Plot Data-------------------------------------------------------------------------------------------------------\r\n ax.scatter(CentroidSlice[:,0],CentroidSlice[:,1],CentroidSlice[:,2],c = [float(N) for N in outputSlice],cmap = 'bwr')\r\n ax.set_zlabel('z')\r\n ax.set_ylabel('y')\r\n ax.set_xlabel('x')",
"def cluster_plot(self):\r\n train = StandardScaler().fit_transform(self.X)\r\n pca = PCA(n_components=3)\r\n pca_component = pca.fit_transform(self.X)\r\n fig = plt.figure(figsize=(10,8))\r\n sns.set_palette(sns.color_palette(\"cubehelix\", 8))\r\n ax = Axes3D(fig)\r\n ax.scatter(pca_component[:,0].tolist(),pca_component[:,1].tolist(),pca_component[:,2].tolist(),c=self.labels,marker='v')\r\n ax.legend()\r\n plt.show()",
"def plot(self, p: int):\n self.compute_clusters(p)\n self.plot_clusters()",
"def plot_vor(self,x,ax,tri=False):\n\n L = self.L\n grid_x, grid_y = np.mgrid[-1:2, -1:2]\n grid_x[0, 0], grid_x[1, 1] = grid_x[1, 1], grid_x[0, 0]\n grid_y[0, 0], grid_y[1, 1] = grid_y[1, 1], grid_y[0, 0]\n y = np.vstack([x + np.array([i * L, j * L]) for i, j in np.array([grid_x.ravel(), grid_y.ravel()]).T])\n\n c_types_print = np.tile(self.c_types,9)\n bleed = 0.1\n c_types_print = c_types_print[(y<L*(1+bleed)).all(axis=1)+(y>-L*bleed).all(axis=1)]\n y = y[(y<L*(1+bleed)).all(axis=1)+(y>-L*bleed).all(axis=1)]\n regions, vertices = self.voronoi_finite_polygons_2d(Voronoi(y))\n\n\n ax.set(aspect=1,xlim=(0,self.L),ylim=(0,self.L))\n if type(self.c_types) is list:\n # ax.scatter(x[:, 0], x[:, 1],color=\"grey\",zorder=1000)\n for region in regions:\n polygon = vertices[region]\n plt.fill(*zip(*polygon), alpha=0.4, color=\"grey\")\n\n else:\n cols = self.cols\n if self.plot_scatter is True:\n for j,i in enumerate(np.unique(self.c_types)):\n ax.scatter(x[self.c_types==i, 0], x[self.c_types==i, 1],color=cols[i],zorder=1000)\n patches = []\n for i, region in enumerate(regions):\n patches.append(Polygon(vertices[region], True,facecolor=cols[c_types_print[i]],ec=(1,1,1,1)))\n\n p = PatchCollection(patches, match_original=True)\n # p.set_array(c_types_print)\n ax.add_collection(p)\n if tri is not False:\n for TRI in tri:\n for j in range(3):\n a, b = TRI[j], TRI[np.mod(j + 1, 3)]\n if (a >= 0) and (b >= 0):\n X = np.stack((x[a], x[b])).T\n ax.plot(X[0], X[1], color=\"black\")",
"def plot_clusters(true_data, preds, cluster_center, cluster_name, savefig=\"\", title=\"\"):\n\n colors = plt.cm.get_cmap('hsv', len(cluster_name)+1) # get colors for each cluster using get_cmap. This will give us len(cluster_name) colors in a object form.\n \n for i, c in enumerate(cluster_name): # iterate through each cluster name\n if c == -1: # -1 is given by DBScan for noise\n clrs = 'grey' # make it grey\n label = 'Noise' # label it 'Noise'\n else:\n clrs = colors(c) # get color for it\n label=f'Cluster {c}' # label it by its name\n df = true_data[preds == c] # get the points from dataset whose prediction was cluster `c`\n x, y = df.iloc[:, 0], df.iloc[:, 1] # x and y axis\n plt.scatter( # plotting the x and y axis\n x, y,\n label=label,\n color=clrs\n )\n if c != -1:\n plt.text(\n cluster_center[i][0] + 0.03, cluster_center[i][1] + 0.1,\n f\"Cluster {i}\",\n weight='bold',\n fontsize=9,\n )\n \n plt.scatter(\n cluster_center[:, 0], cluster_center[:, 1], # plotting the cluster centers\n s=250, marker='*',\n c='red', edgecolor='black',\n label='Centroids'\n )\n \n plt.title(title)\n plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))\n plt.tight_layout()\n if savefig != \"\" : plt.savefig(f\"{savefig}.png\")\n plt.show()\n plt.close()",
"def show_clusters_in_2D_pca(data: np.ndarray, clusters: np.ndarray, num_clusters: int = 10,\n title: str = \"\", block: bool = True):\n try:\n fig, ax = _show_clusters_in_2D_whatever(data, clusters, PCA, num_clusters, title, block)\n except:\n raise\n\n return fig, ax",
"def plot(X, clusters, cluster_num):\n centroids = []\n X = np.asarray(X)\n for i in range(0,cluster_num):\n centroids.append(clusters[i].centroid)\n \n np_centroids = np.asarray(centroids)\n \n color = [\"g\", \"r\", \"b\", \"c\", \"m\", \"b\"]\n \n fig = figure()\n ax = fig.gca(projection='3d')\n for i in range(len(X)):\n ax.scatter(X[i][0], X[i][1], X[i][2], c=color)\n \n ax.scatter(np_centroids[:, 0],np_centroids[:, 1],\n np_centroids[:, 2], marker = \"x\", s=150,\n linewidths = 5, zorder = 100, c=color\n )",
"def gHST_plot_PL(xy, NL, KL, BM, params, factor=1, climv=1., title=''):\n try:\n NP, NN = np.shape(NL)\n except:\n print 'There is only one particle to plot.'\n NP = 1\n NN = 0\n\n # The pivot points are called R_p\n R_p = xy[:, 0:2]\n # pylab.sca(ax)\n ax = plt.gca()\n ax.set_axis_bgcolor('#d9d9d9') # '#E8E8E8')\n # Pivot positions\n Rx = R_p[:, 0]\n Ry = R_p[:, 1]\n\n l = params['l']\n diffx = xy[:, 2]\n diffy = xy[:, 3]\n\n # Make the circles\n patch = [None] * NP\n mag = np.sqrt(diffx ** 2 + diffy ** 2)\n mag[mag == 0] = 1\n\n # angles= np.arccos(diffx/mag)\n # angles[diffy<0] = 2*np.pi-angles[diffy<0]\n angles = np.mod(np.arctan2(diffy, diffx), 2. * np.pi)\n\n # the displayed points\n scat_x = Rx + factor * diffx\n scat_y = Ry + factor * diffy\n\n # the actual points\n ss_x = Rx + diffx\n ss_y = Ry + diffy\n\n # the circles\n patch = [patches.Circle((Rx[i], Ry[i]), radius=factor * mag[i]) for i in range(len(Rx))]\n\n z = np.zeros(len(scat_x))\n\n # Initialize streches vector to be longer than necessary\n inc = 0\n stretches = np.zeros(3 * len(R_p))\n\n test = list(stretches)\n for i in range(len(R_p)):\n if NN > 0:\n # for j, k in zip(Ni[i], Nk[i]):\n for j, k, q in zip(NL[i], KL[i], BM[i]):\n if i < j and abs(k) > 0:\n # the distance between the actual points\n n1 = float(np.linalg.norm(R_p[i] - R_p[j]))\n stretches[inc] = n1 - q\n test[inc] = [R_p[(i, j), 0], R_p[(i, j), 1]]\n inc += 1\n\n test = test[0:inc]\n lines = [zip(x, y) for x, y in test]\n stretch = np.array(stretches[0:inc])\n\n # LINE Segments based on STretch --> lines_st\n lines_st = LineCollection(lines, array=stretch, cmap='coolwarm', linewidth=4)\n lines_st.set_clim([-climv, climv])\n lines_st.set_zorder(3)\n\n p = PatchCollection(patch, cmap='isolum_rainbow', alpha=0.6)\n\n p.set_array(P.array(angles))\n p.set_clim([0, 2 * np.pi])\n p.set_zorder(1)\n\n ax.add_collection(p)\n ax.add_collection(lines_st)\n\n fig = plt.gcf()\n axcb = fig.colorbar(lines_st)\n axcb.set_label('Strain')\n axcb.set_clim(vmin=-climv, vmax=climv)\n\n # Plot masses\n ax.set_aspect('equal')\n s = absolute_sizer()\n scat_fg = ax.scatter(scat_x, scat_y, s=s(0.02), c=angles, vmin=0., vmax=2. * np.pi, cmap='isolum_rainbow',\n alpha=1, zorder=2)\n\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n\n plt.title(title)\n\n return [scat_fg, lines_st, p]",
"def classify_defect_clusters_modifier(frame, data):\n\n if data.particles.count == 0:\n # No particles there to classify, create empty properties anyway\n data.particles_.create_property('Si_V', dtype=int, components=1)\n data.particles_.create_property('Si_I', dtype=int, components=1)\n data.particles_.create_property('Si_C', dtype=int, components=1)\n data.particles_.create_property('C_V', dtype=int, components=1)\n data.particles_.create_property('C_I', dtype=int, components=1)\n data.particles_.create_property('C_Si', dtype=int, components=1)\n return\n\n # TODO Create numpy arrays containing the number of Si vacancies,\n # interstitials, etc for each particle site in `data.particles`. These\n # next lines are just placeholders!\n si_vacancy = data.particles[\"vacancy_mask\"][...] * data.particles[\"Is Si Site\"][...]\n si_interstitial = (data.particles[\"Is Si Site\"][...] & (data.particles[\"Si Occupancy\"][...] > 1)) * (\n data.particles[\"Si Occupancy\"][...] - 1) + (\n (data.particles[\"Is C Site\"][...]) * data.particles[\"Si Occupancy\"][...]) - (\n data.particles[\"Is C Site\"][...] & data.particles[\"antisite_mask\"][...])\n si_antisite = data.particles[\"antisite_mask\"][...] * data.particles[\"Is Si Site\"][...]\n c_vacancy = data.particles[\"vacancy_mask\"][...] * data.particles[\"Is C Site\"][...]\n c_interstitial = (data.particles[\"Is C Site\"][...] & (data.particles[\"C Occupancy\"][...] > 1)) * (\n data.particles[\"C Occupancy\"][...] - 1) + (\n (data.particles[\"Is Si Site\"][...]) * data.particles[\"C Occupancy\"][...]) - (\n data.particles[\"Is Si Site\"][...] & data.particles[\"antisite_mask\"][...])\n c_antisite = data.particles[\"antisite_mask\"][...] * data.particles[\"Is C Site\"][...]\n\n\n data.particles_.create_property('Si_V', data=si_vacancy.astype(int))\n data.particles_.create_property('Si_I', data=si_interstitial.astype(int))\n data.particles_.create_property('Si_C', data=si_antisite.astype(int))\n data.particles_.create_property('C_V', data=c_vacancy.astype(int))\n data.particles_.create_property('C_I', data=c_interstitial.astype(int))\n data.particles_.create_property('C_Si', data=c_antisite.astype(int))",
"def plot_clusters(self):\n w = self.w\n h = self.h\n largest_cluster, largest_cluster_size = self.get_largest_cluster()\n\n # Compute space step\n dx = 1. / max(w, h)\n\n # Create figure\n ax = self.create_figure()\n\n for i in range(w + 1):\n for j in range(h + 1):\n if self.cluster[i, j] == largest_cluster:\n color = self.largest_cluster_color\n else:\n color = self.other_clusters_color\n # Plot horizontal edge\n if i <= w - 1 and self.sample[i, j, 0] == 1:\n ax.plot([i * dx, (i + 1) * dx], [j * dx, j * dx],\n color=color)\n # Plot vertical edge\n if j <= h - 1 and self.sample[i, j, 1] == 1:\n ax.plot([i * dx, i * dx], [j * dx, (j + 1) * dx],\n color=color)\n\n self.set_title(ax)\n self.set_legend(ax, largest_cluster_size)",
"def partial_visualize_in_2d(self, cluster_index=[5,12,35,44,64,75,81]):\n for i in cluster_index:\n list_x = []\n list_y = []\n for j in self.cluster[i]:\n list_x.append(self.code[0][j,0])\n list_y.append(self.code[0][j,1])\n plt.scatter(list_x,list_y, label=self.skill[self.present_skill[i]])\n plt.legend()\n plt.show()\n return",
"def show(self):\n from matplotlib import pyplot as plt\n from mpl_toolkits.mplot3d import Axes3D\n\n fig = plt.figure()\n ax = Axes3D(fig)\n pos = self.cluster.get_positions()\n from itertools import combinations\n for tri in self.mesh.simplices:\n for comb in combinations(tri, 2):\n x1 = pos[comb[0], 0]\n x2 = pos[comb[1], 0]\n y1 = pos[comb[0], 1]\n y2 = pos[comb[1], 1]\n z1 = pos[comb[0], 2]\n z2 = pos[comb[1], 2]\n ax.plot([x1, x2], [y1, y2], zs=[z1, z2], color=\"black\")\n plt.show()",
"def plot_samples(self, df_clusters, n_trajectories_per_cluster=5, path_to_save='', file_name='', save=True):\n\n\n # create colormap for trajectories\n self.clusters = df_clusters['cluster'].unique()\n\n # cluster loop\n for c in self.clusters:\n\n self.fig = plt.figure(figsize=(13,15))\n self.ax = self.fig.add_subplot(1,1,1)\n\n # plot background map\n self.plot()\n\n df_clus = df_clusters[df_clusters['cluster'] == c]\n # Separate medoids. Always plot medoids. Medoids are cool.\n df_med_clus = df_clus[df_clus['medoids'] == 1]\n\n trip = list(map(int, df_med_clus['locations_list'].tolist()[0]))\n df_trip = pd.DataFrame(data={'pro_com': trip})\n # get centroids of each trip\n self.df_trip_entroids = self.get_centroids_trip(df_trip)\n self.plot_single_trajectory()\n\n\n # trajectory loop\n if n_trajectories_per_cluster > 1:\n\n df_sample = df_clus[df_clus['medoids'] == 0][:n_trajectories_per_cluster-1]\n\n for t in range(n_trajectories_per_cluster-1):\n\n trip = list(map(int, df_sample.iloc[[t], :]['locations_list'].tolist()[0]))\n df_trip = pd.DataFrame(data={'pro_com': trip})\n # get centroids of each trip\n self.df_trip_entroids = self.get_centroids_trip(df_trip)\n self.plot_single_trajectory()\n \n if save:\n cluster_wise_plot_path = path_to_save+\"clusterwise_trajectories/\"\n if not os.path.exists(cluster_wise_plot_path):\n os.makedirs(cluster_wise_plot_path)\n plt.savefig(cluster_wise_plot_path+file_name+'_clusterwise_trajectories_'+str(c)+'.png')",
"def draw(pre, features, poi_, mark_poi=False, f1_name=\"feature 1\", f2_name=\"feature 2\"):\n\n # plot each cluster with a different color--add more colors for\n # drawing more than five clusters\n colors = [\"b\", \"c\", \"k\", \"m\", \"g\"]\n for ii, pp in enumerate(pre):\n plt.scatter(features[ii][0], features[ii][1], color=colors[pre[ii]])\n\n # if you like, place red stars over points that are POIs (just for funsies)\n if mark_poi:\n for ii, pp in enumerate(pre):\n if poi_[ii]:\n plt.scatter(features[ii][0], features[ii][1], color=\"r\", marker=\"*\")\n plt.xlabel(f1_name)\n plt.ylabel(f2_name)\n plt.show()",
"def plot_bindetect(motifs, cluster_obj, conditions, args):\r\n\twarnings.filterwarnings(\"ignore\")\r\n\r\n\tcond1, cond2 = conditions\r\n\tn_IDS = cluster_obj.n\r\n\r\n\t#Link information from motifs / clusters\r\n\tdiff_scores = {}\r\n\tfor motif in motifs:\r\n\t\tdiff_scores[motif.prefix] = {\"change\": motif.change,\r\n\t\t\t\t\t\t\t\t\t\"pvalue\": motif.pvalue,\r\n\t\t\t\t\t\t\t\t\t\"log10pvalue\": -np.log10(motif.pvalue) if motif.pvalue > 0 else -np.log10(1e-308),\t#smallest possible number before python underflows\r\n\t\t\t\t\t\t\t\t\t\"volcano_label\": motif.name,\t#shorter name\r\n\t\t\t\t\t\t\t\t\t\"overview_label\": \"{0} ({1})\".format(motif.name, motif.id) \t\t#the name which was output used in bindetect output\r\n\t\t\t\t\t\t\t\t\t}\r\n\t\r\n\txvalues = np.array([diff_scores[TF][\"change\"] for TF in diff_scores])\r\n\tyvalues = np.array([diff_scores[TF][\"log10pvalue\"] for TF in diff_scores])\r\n\r\n\t#### Define the TFs to plot IDs for ####\r\n\ty_min = np.percentile(yvalues[yvalues < -np.log10(1e-300)], 95)\t\r\n\tx_min, x_max = np.percentile(xvalues, [5,95])\r\n\r\n\tfor TF in diff_scores:\r\n\t\tif diff_scores[TF][\"change\"] < x_min or diff_scores[TF][\"change\"] > x_max or diff_scores[TF][\"log10pvalue\"] > y_min:\r\n\t\t\tdiff_scores[TF][\"show\"] = True\r\n\t\t\tif diff_scores[TF][\"change\"] < 0:\r\n\t\t\t\tdiff_scores[TF][\"color\"] = \"blue\"\r\n\t\t\telif diff_scores[TF][\"change\"] > 0:\r\n\t\t\t\tdiff_scores[TF][\"color\"] = \"red\"\r\n\t\t\telse:\r\n\t\t\t\tdiff_scores[TF][\"color\"] = \"black\" #if change was 0\r\n\t\telse:\r\n\t\t\tdiff_scores[TF][\"show\"] = False \r\n\t\t\tdiff_scores[TF][\"color\"] = \"black\"\r\n\r\n\tnode_color = cluster_obj.node_color\r\n\tIDS = np.array(cluster_obj.names)\r\n\t\r\n\t\"\"\"\r\n\t#Set cluster names\r\n\tfor motif_name in diff_scores:\r\n\t\tfor cluster in cluster_obj.clusters:\r\n\r\n\t\t\tif motif_name in cluster_obj.clusters[cluster][\"member_names\"]:\r\n\t\t\t\tdiff_scores[motif_name][\"cluster_name\"] = cluster_obj.clusters[cluster][\"cluster_name\"]\r\n\r\n\t\t\tif motif_name == cluster_obj.clusters[cluster][\"representative\"]:\r\n\t\t\t\tdiff_scores[TF][\"show\"] = True\r\n\t\t\t\tdiff_scores[motif_name][\"representative\"] = True\r\n\t\"\"\"\r\n\r\n\t#--------------------------------------- Figure --------------------------------#\r\n\r\n\t#Make figure\r\n\tno_rows, no_cols = 2,2\t\r\n\th_ratios = [1,max(1,n_IDS/25)]\r\n\tl = 10+7*(n_IDS/25) \t\t\t#length of plot\r\n\tlimit = 2**16/100-1\t\t\t\t#matplotlib limit of 2**16 pixels -> /100 to get figsize\r\n\tl = limit if l > limit else l \t#set cap on length\r\n\tfigsize = (8, l)\r\n\r\n\tfig = plt.figure(figsize = figsize)\r\n\tgs = gridspec.GridSpec(no_rows, no_cols, height_ratios=h_ratios)\r\n\tgs.update(hspace=0.0001, bottom=0.00001, top=0.999999)\r\n\r\n\tax1 = fig.add_subplot(gs[0,:])\t#volcano\r\n\tax2 = fig.add_subplot(gs[1,0])\t#long scatter overview\r\n\tax3 = fig.add_subplot(gs[1,1]) #dendrogram\r\n\t\r\n\t######### Volcano plot on top of differential values ########\r\n\tax1.set_title(\"BINDetect volcano plot\", fontsize=16, pad=20)\r\n\tax1.scatter(xvalues, yvalues, color=\"black\", s=5)\r\n\r\n\t#Add +/- 10% to make room for labels\r\n\tylim = ax1.get_ylim()\r\n\ty_extra = (ylim[1] - ylim[0]) * 0.1\r\n\tax1.set_ylim(ylim[0], ylim[1] + y_extra)\r\n\r\n\txlim = ax1.get_xlim()\r\n\tx_extra = (xlim[1] - xlim[0]) * 0.1\r\n\tlim = np.max([np.abs(xlim[0]-x_extra), np.abs(xlim[1]+x_extra)])\r\n\tax1.set_xlim(-lim, lim)\r\n\r\n\tx0,x1 = ax1.get_xlim()\r\n\ty0,y1 = ax1.get_ylim()\r\n\tax1.set_aspect((x1-x0)/(y1-y0))\t\t#square volcano plot\r\n\r\n\t#Decorate plot\r\n\tax1.set_xlabel(\"Differential binding score\")\r\n\tax1.set_ylabel(\"-log10(pvalue)\")\r\n\r\n\t########### Dendrogram over similarities of TFs #######\r\n\t\r\n\t#Only plot dendrogram if there was more than one TF\r\n\tn_ids = len(IDS)\r\n\tif n_ids > 1:\r\n\t\tdendro_dat = dendrogram(cluster_obj.linkage_mat, labels=list(IDS), no_labels=True, orientation=\"right\", ax=ax3, above_threshold_color=\"black\", link_color_func=lambda k: cluster_obj.node_color[k])\r\n\t\tlabels = dendro_dat[\"ivl\"]\t#Now sorted for the order in dendrogram\r\n\t\tax3.set_xlabel(\"Transcription factor distance\\n(Clusters below threshold are colored)\")\r\n\r\n\t\tax3.set_ylabel(\"Transcription factor clustering based on TFBS overlap\", rotation=270, labelpad=20)\r\n\t\tax3.yaxis.set_label_position(\"right\")\r\n\r\n\t\t#Set aspect of dendrogram/changes\r\n\t\tx0,x1 = ax3.get_xlim()\r\n\t\ty0,y1 = ax3.get_ylim()\r\n\t\tax3.set_aspect(((x1-x0)/(y1-y0)) * n_ids/10)\r\n\telse:\r\n\t\tax3.axis('off')\r\n\t\tlabels = IDS\r\n\r\n\t########## Differential binding scores per TF ##########\r\n\tax2.set_xlabel(\"Differential binding score\\n\" + \"(\" + cond2 + r' $\\leftarrow$' + r'$\\rightarrow$ ' + cond1 + \")\") #First position in comparison equals numerator in log2fc division\r\n\tax2.xaxis.set_label_position('bottom') \r\n\tax2.xaxis.set_ticks_position('bottom') \r\n\r\n\tno_labels = len(labels)\r\n\tax2.set_ylim(0.5, no_labels+0.5)\r\n\tax2.set_ylabel(\"Transcription factors\")\r\n\r\n\tax2.set_yticks(range(1,no_labels+1))\r\n\tax2.set_yticklabels([diff_scores[TF][\"overview_label\"] for TF in labels])\r\n\tax2.axvline(0, color=\"grey\", linestyle=\"--\") \t#Plot line at middle\r\n\r\n\t#Plot scores per TF\r\n\tfor y, TF in enumerate(labels):\t#labels are the output motif names from output\r\n\t\t\r\n\r\n\t\tidx = np.where(IDS == TF)[0][0]\r\n\t\tscore = diff_scores[TF][\"change\"]\r\n\r\n\t\t#Set coloring based on change/pvalue\r\n\t\tif diff_scores[TF][\"show\"] == True:\r\n\t\t\tfill = \"full\"\r\n\t\telse:\r\n\t\t\tfill = \"none\"\r\n\r\n\t\tax2.axhline(y+1, color=\"grey\", linewidth=1)\r\n\t\tax2.plot(score, y+1, marker='o', color=node_color[idx], fillstyle=fill)\r\n\t\tax2.yaxis.get_ticklabels()[y].set_color(node_color[idx])\r\n\r\n\t#Set x-axis ranges\r\n\tlim = np.max(np.abs(ax2.get_xlim()))\r\n\tax2.set_xlim((-lim, lim))\t#center on 0\r\n\r\n\t#set aspect\r\n\tx0,x1 = ax2.get_xlim()\r\n\ty0,y1 = ax2.get_ylim()\r\n\tax2.set_aspect(((x1-x0)/(y1-y0)) * n_IDS/10)\t\t#square volcano plot\r\n\r\n\tplt.tight_layout() #tight layout before setting ids in volcano plot\r\n\r\n\t######### Color points and set labels in volcano ########\r\n\ttxts = []\r\n\tfor TF in diff_scores:\r\n\t\tcoord = [diff_scores[TF][\"change\"], diff_scores[TF][\"log10pvalue\"]]\r\n\t\tax1.scatter(coord[0], coord[1], color=diff_scores[TF][\"color\"], s=4.5)\r\n\r\n\t\tif diff_scores[TF][\"show\"] == True:\r\n\t\t\ttxts.append(ax1.text(coord[0], coord[1], diff_scores[TF][\"volcano_label\"], fontsize=9))\r\n\r\n\t#Plot custom legend for colors\r\n\tlegend_elements = [Line2D([0],[0], marker='o', color='w', markerfacecolor=\"red\", label=\"Higher scores in {0}\".format(conditions[0])),\r\n\t\t\t\t\t\tLine2D([0],[0], marker='o', color='w', markerfacecolor=\"blue\", label=\"Higher scores in {0}\".format(conditions[1]))]\r\n\tl = ax1.legend(handles=legend_elements, loc=\"lower left\", framealpha=0.5)\r\n\tadjust_text(txts, ax=ax1, add_objects=[l], text_from_points=True, arrowprops=dict(arrowstyle='-', color='black', lw=0.5)) #, expand_text=(0.1,1.2), expand_objects=(0.1,0.1))\r\n\t\r\n\t\"\"\"\r\n\t#Add arrows to other cluster members\r\n\tprint(txts[0].__dict__)\r\n\tlabel_positions = {text._text:text for text in txts}\r\n\tprint(label_positions)\r\n\tfor TF in diff_scores:\r\n\t\tif diff_scores[TF][\"show\"]:\r\n\t\t\tcluster_name = diff_scores[TF][\"cluster_name\"]\r\n\t\t\t\r\n\t\t\tif cluster_name in label_positions: \r\n\t\t\t\tprint(cluster_name)\r\n\r\n\t\t\t\tpoint_x, point_y = diff_scores[TF][\"change\"], diff_scores[TF][\"log10pvalue\"]\r\n\t\t\t\ttext_x, text_y = label_positions[cluster_name]._x, label_positions[cluster_name]._y\r\n\t\t\t\tlen_x, len_y = text_x - point_x, text_y - point_y\r\n\r\n\t\t\t\tax1.arrow(point_x, point_y, len_x, len_y, linestyle=\"-\", color=\"black\", lw=0.5)\r\n\t\"\"\"\r\n\r\n\treturn(fig)",
"def gHST_plot(xy, NL, KL, BM, params, factor=1, climv=1., title=''):\n try:\n NP, NN = np.shape(NL)\n except:\n print 'There is only one particle to plot.'\n NP = len(xy)\n NN = 0\n\n # Extract the euler angles\n theta = xy[:, 2]\n phi = xy[:, 3]\n # The pivot points are called R_p\n R_p = xy[:, 0:2]\n # pylab.sca(ax)\n ax = plt.gca()\n # ax.set_axis_bgcolor('#E8E8E8')\n Rx = R_p[:, 0]\n Ry = R_p[:, 1]\n\n l = params['l']\n diffx = l * np.sin(xy[:, 2]) * np.cos(xy[:, 3])\n diffy = l * np.sin(xy[:, 2]) * np.sin(xy[:, 3])\n\n # Make the circles\n patch = [None] * NP\n mag = np.sqrt(diffx ** 2 + diffy ** 2)\n mag[mag == 0] = 1\n # angles= np.arccos(diffx/mag)\n # angles[diffy<0] = 2*np.pi-angles[diffy<0]\n angles = np.mod(phi, 2 * np.pi)\n\n # the displayed points\n scat_x = Rx + factor * diffx\n scat_y = Ry + factor * diffy\n\n # the actual points\n ss_x = Rx + diffx\n ss_y = Ry + diffy\n\n # the circles\n patch = [patches.Circle((Rx[i], Ry[i]), radius=factor * mag[i]) for i in range(len(Rx))]\n\n z = np.zeros(len(scat_x))\n\n # Initialize streches vector to be longer than necessary\n inc = 0\n stretches = np.zeros(3 * len(R_p))\n\n test = list(stretches)\n for i in range(len(R_p)):\n # for j, k in zip(Ni[i], Nk[i]):\n if NN > 0:\n for j, k, q in zip(NL[i], KL[i], BM[i]):\n if i < j and abs(k) > 0:\n # the distance between the actual points\n n1 = float(np.linalg.norm(R_p[i] - R_p[j]))\n stretches[inc] = n1 - q\n test[inc] = [R_p[(i, j), 0], R_p[(i, j), 1]]\n inc += 1\n\n test = test[0:inc]\n # print 'test = ', test\n lines = [zip(x, y) for x, y in test]\n stretch = np.array(stretches[0:inc])\n # print 'stretch = ', stretch\n\n # LINE Segments based on STretch --> lines_st\n lines_st = LineCollection(lines, array=stretch, cmap='seismic', linewidth=4)\n lines_st.set_clim([-climv, climv])\n if np.mean(theta) < np.pi * 0.5:\n print ''\n lines_st.set_zorder(0)\n else:\n lines_st.set_zorder(3)\n\n p = PatchCollection(patch, cmap='isolum_rainbow', alpha=0.6)\n\n p.set_array(P.array(angles))\n p.set_clim([0, 2 * np.pi])\n p.set_zorder(1)\n\n ax.add_collection(p)\n ax.add_collection(lines_st)\n\n fig = plt.gcf()\n axcb = fig.colorbar(lines_st)\n axcb.set_label('Strain')\n axcb.set_clim(vmin=-climv, vmax=climv)\n\n # Plot masses\n ax.set_aspect('equal')\n s = absolute_sizer()\n scat_fg = ax.scatter(scat_x, scat_y, s=s(0.02), c=angles, vmin=0., vmax=2. * np.pi, cmap='isolum_rainbow',\n alpha=1, zorder=2)\n\n # ax.set_xticklabels([])\n # ax.set_yticklabels([])\n\n pylab.title(title)\n\n return [scat_fg, lines_st, p]",
"def showEntireDataset(wl_listG, wl_listV, tsvd_graphlet_vectors, kpca_graphlet_gram, tsvd_shortestpath_vectors,\n kpca_shortestpath_gram, classes):\n for i in range(1, 8):\n if (i == 6):\n data_tsvd = tsvd_graphlet_vectors\n data_kpca = kpca_graphlet_gram\n elif (i == 7):\n data_tsvd = tsvd_shortestpath_vectors\n data_kpca = kpca_shortestpath_gram\n else:\n data_tsvd = wl_listV[i - 1]\n data_kpca = wl_listG[i - 1]\n fig = plt.figure(figsize=(15, 15))\n if (i == 6):\n fig.suptitle('Graphlet', fontsize=25)\n elif (i == 7):\n fig.suptitle('Shortest Path', fontsize=25)\n else:\n fig.suptitle(f'Weisfeiler-Lehman {i}', fontsize=25)\n ax1 = fig.add_subplot(221)\n ax2 = fig.add_subplot(222)\n ax3 = fig.add_subplot(223, projection='3d')\n ax4 = fig.add_subplot(224, projection='3d')\n ax1.title.set_text('2D TruncatedSVD')\n ax2.title.set_text('2D KernelPCA')\n ax3.title.set_text('3D TruncatedSVD')\n ax4.title.set_text('3D KernelPCA')\n ax1.scatter(data_tsvd[:, 0], data_tsvd[:, 1], c=classes)\n ax2.scatter(data_kpca[:, 0], data_kpca[:, 1], c=classes)\n ax3.scatter3D(data_tsvd[:, 0], data_tsvd[:, 1], data_tsvd[:, 2], c=classes)\n ax4.scatter3D(data_kpca[:, 0], data_kpca[:, 1], data_kpca[:, 2], c=classes)\n plt.show()\n print(\"________________________________________________________________________________________\")\n print()",
"def main():\n # Initialize the Serpinski set\n print(\"==> Making serpinski set...\")\n my_serpinski = Serpinski(400, 400, 0)\n num = 8\n print(\"==> Generating\", num, \"levels of subsets :)\")\n for _ in range(9):\n my_serpinski.add_subset()\n # Draw Serpinski\n # print(\"==> Drawing the set. This might take quite some time!\\\n # Damn Inefficient!\")\n # my_serpinski.draw_me()\n\n # Initialize Coordinates\n length = 50000 # Number of random dots\n x_coord = []\n y_coord = []\n index = 0\n\n # try length particles in serp set\n print(\"==> Randomly choosing\", length, \"dots...\")\n while index < length:\n # Chech if dot in bound\n rand_y = np.random.uniform(low=400.0 - 200.0 * np.sqrt(3) / 2.0,\n high=400.0)\n # rand_x in triangle // condition //\n diff = 400.0 - rand_y\n x_diff = diff / np.sqrt(3)\n rand_x = np.random.uniform(low=400.0 - x_diff,\n high=400 + x_diff)\n\n if my_serpinski.is_bound(rand_x, rand_y):\n x_coord.append(rand_x)\n y_coord.append(rand_y)\n index += 1\n\n # Draw image using scatter\n print(\"Scattering the dots ;)\")\n plt.scatter(x_coord, y_coord, s=0.1)\n # Show image\n dpi = 600\n print(\"==> Saving to .jpg with dpi=\", dpi)\n plt.savefig(\"fractalstuff.jpg\", dpi=dpi, bbox_inches='tight')",
"def distortion_of_kmeans_clustering(data_table):\n num_iritations = 5\n singleton_list = []\n for line in data_table:\n singleton_list.append(alg_cluster.Cluster(set([line[0]]), line[1], line[2], line[3], line[4]))\n distortion_list = []\n for num in range(20, 5, -1):\n cluster_list = kmeans_clustering(singleton_list,num, num_iritations)\n distortion = compute_distortion(data_table, cluster_list)\n distortion_list.append(distortion)\n return distortion_list\n\n#####################################################################\n# Code to load cancer data, compute a clustering and\n# visualize the results\n\n\n# def run_example():\n# \"\"\"\n# Load a data table, compute a list of clusters and\n# plot a list of clusters\n#\n# Set DESKTOP = True/False to use either matplotlib or simplegui\n# \"\"\"\n# data_table = load_data_table(DATA_3108_URL)\n# singleton_list = []\n# for line in data_table:\n# singleton_list.append(alg_cluster.Cluster(set([line[0]]), line[1], line[2], line[3], line[4]))\n num_clusters = 16\n # cluster_list = sequential_clustering(singleton_list, num_clusters)\n # print(\"Displaying\", len(cluster_list), \"sequential clusters\")\n #\n # cluster_list = alg_project3_solution.hierarchical_clustering(singleton_list, num_clusters)\n # print(\"Displaying\", len(cluster_list), \"hierarchical clusters\")\n #\n # cluster_list = alg_project3_solution.kmeans_clustering(singleton_list, num_clusters, 5)\n # print(\"Displaying\", len(cluster_list), \"k-means clusters\")\n\n # draw the clusters using matplotlib or simplegui\n #\n # if DESKTOP:\n # # alg_clusters_matplotlib.plot_clusters(data_table, cluster_list, False)\n # alg_clusters_matplotlib.plot_clusters(data_table, cluster_list, True) #add cluster centers\n\n # else:\n # alg_clusters_simplegui.PlotClusters(data_table, cluster_list) # use toggle in GUI to add cluster centers",
"def show_clusters_in_2D_tsne(data: np.ndarray, clusters: np.ndarray, num_clusters: int = 10,\n title: str = \"\", block: bool = True):\n try:\n fig, ax = _show_clusters_in_2D_whatever(data, clusters, TSNE, num_clusters, title, block, verbose=True)\n except:\n raise\n\n return fig, ax",
"def display_clusters(assign):\n for c in assign:\n plt.plot(c[0], c[1], \"r*\")\n plt.plot(\n [p[0] for p in assign[c]],\n [p[1] for p in assign[c]],\n \"o\"\n )\n plt.show()\n plt.close()",
"def display(self):\n scatter_plot(self.points, self.hull_points, self.color, self.title)"
] | [
"0.67489004",
"0.6729127",
"0.67152536",
"0.6654016",
"0.66303104",
"0.6488753",
"0.6219483",
"0.6171645",
"0.61682975",
"0.6068598",
"0.60134363",
"0.59585845",
"0.5934958",
"0.59331185",
"0.5924034",
"0.58946514",
"0.5883884",
"0.5861032",
"0.5849204",
"0.58180285",
"0.5810715",
"0.5800909",
"0.57413197",
"0.5735432",
"0.5732358",
"0.5727424",
"0.57153493",
"0.5715153",
"0.569901",
"0.56958604"
] | 0.7175168 | 0 |
Plots a single Voronoi cell, with its Voronoi vertices as well. To gain perspective wrt to the rest of the points, the limits of the plots are set according to the limits of all the point positions. | def plotVoronoiCell(self, cells):
for i in cells:
#i indexes volumes
i = self.nonBI[i] #now i indexes vor.point_region
vI = self.vor.regions[self.vor.point_region[i]]
v = self.vor.vertices[vI, :]
r = v
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
fig.set_size_inches(18.5, 9.5)
ax.set_title('Voronoi Cell of Particle ' + str(i))
ax.set_xlabel('x [m]')
ax.set_ylabel('y [m]')
ax.set_zlabel('z [m]')
ax.scatter(r[:, 0], r[:, 1], r[:, 2], s=5, alpha=0.5, label='Cell Boundaries')
ax.scatter(self.data[i, 0], self.data[i, 1], self.data[i, 2], s=25, label='Cell Center')
ax.set_xlim3d(np.min(self.data[:, 0]), np.max(self.data[:, 0]))
ax.set_ylim3d(np.min(self.data[:, 1]), np.max(self.data[:, 1]))
ax.set_zlim3d(np.min(self.data[:, 2]), np.max(self.data[:, 2]))
# limits = np.vstack((np.array([np.max(self.data[:, 0]), np.max(self.data[:, 1]), np.max(self.data[:, 2])]), np.array([np.min(self.data[:, 0]), np.min(self.data[:, 1]), np.min(self.data[:, 2])])))
# ax.scatter(limits[:, 0], limits[:, 1], limits[:, 2], s=1)
ax.legend() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def plot_vor(self,x,ax,tri=False):\n\n L = self.L\n grid_x, grid_y = np.mgrid[-1:2, -1:2]\n grid_x[0, 0], grid_x[1, 1] = grid_x[1, 1], grid_x[0, 0]\n grid_y[0, 0], grid_y[1, 1] = grid_y[1, 1], grid_y[0, 0]\n y = np.vstack([x + np.array([i * L, j * L]) for i, j in np.array([grid_x.ravel(), grid_y.ravel()]).T])\n\n c_types_print = np.tile(self.c_types,9)\n bleed = 0.1\n c_types_print = c_types_print[(y<L*(1+bleed)).all(axis=1)+(y>-L*bleed).all(axis=1)]\n y = y[(y<L*(1+bleed)).all(axis=1)+(y>-L*bleed).all(axis=1)]\n regions, vertices = self.voronoi_finite_polygons_2d(Voronoi(y))\n\n\n ax.set(aspect=1,xlim=(0,self.L),ylim=(0,self.L))\n if type(self.c_types) is list:\n # ax.scatter(x[:, 0], x[:, 1],color=\"grey\",zorder=1000)\n for region in regions:\n polygon = vertices[region]\n plt.fill(*zip(*polygon), alpha=0.4, color=\"grey\")\n\n else:\n cols = self.cols\n if self.plot_scatter is True:\n for j,i in enumerate(np.unique(self.c_types)):\n ax.scatter(x[self.c_types==i, 0], x[self.c_types==i, 1],color=cols[i],zorder=1000)\n patches = []\n for i, region in enumerate(regions):\n patches.append(Polygon(vertices[region], True,facecolor=cols[c_types_print[i]],ec=(1,1,1,1)))\n\n p = PatchCollection(patches, match_original=True)\n # p.set_array(c_types_print)\n ax.add_collection(p)\n if tri is not False:\n for TRI in tri:\n for j in range(3):\n a, b = TRI[j], TRI[np.mod(j + 1, 3)]\n if (a >= 0) and (b >= 0):\n X = np.stack((x[a], x[b])).T\n ax.plot(X[0], X[1], color=\"black\")",
"def plot_vor_boundary(self,x,ax,tri=False):\n\n x = x[~np.isnan(x[:,0])]\n c_types_print = np.ones(x.shape[0],dtype=np.int32)*-1\n c_types_print[:self.n_C] = self.c_types\n regions, vertices = self.voronoi_finite_polygons_2d(Voronoi(x))\n\n\n ax.set(aspect=1,xlim=(0,self.L),ylim=(0,self.L))\n if type(self.c_types) is list:\n # ax.scatter(x[:, 0], x[:, 1],color=\"grey\",zorder=1000)\n for region in regions:\n polygon = vertices[region]\n plt.fill(*zip(*polygon), alpha=0.4, color=\"grey\")\n\n else:\n cols = self.cols\n patches = []\n if self.plot_scatter is True:\n ax.scatter(x[:self.n_C, 0], x[:self.n_C, 1], color=\"black\", zorder=1000)\n ax.scatter(x[self.n_C:, 0], x[self.n_C:, 1], color=\"grey\", zorder=1000)\n\n for i, region in enumerate(regions):\n patches.append(Polygon(vertices[region], True,facecolor=cols[c_types_print[i]],edgecolor=\"white\",alpha=0.5))\n\n p = PatchCollection(patches, match_original=True)\n # p.set_array(c_types_print)\n ax.add_collection(p)\n if tri is not False:\n for TRI in tri:\n for j in range(3):\n a, b = TRI[j], TRI[np.mod(j + 1, 3)]\n if (a >= 0) and (b >= 0):\n X = np.stack((x[a], x[b])).T\n ax.plot(X[0], X[1], color=\"black\")",
"def plot(self, \n\t\t\t voronoi: bool = False):\n\t\tif (voronoi):\n\t\t\tif (len(self.X) == 2):\n\t\t\t\tfrom verticapy.learn.plot import voronoi_plot\n\t\t\t\tquery = \"SELECT GET_MODEL_ATTRIBUTE(USING PARAMETERS model_name = '{}', attr_name = 'centers')\".format(self.name)\n\t\t\t\tself.cursor.execute(query)\n\t\t\t\tclusters = self.cursor.fetchall()\n\t\t\t\tvoronoi_plot(clusters = clusters, columns = self.X)\n\t\t\telse:\n\t\t\t\traise ValueError(\"Voronoi Plots are only available in 2D\")\n\t\telse:\n\t\t\tvdf = vDataFrame(self.input_relation, self.cursor)\n\t\t\tself.predict(vdf, \"kmeans_cluster\")\n\t\t\tif (len(self.X) <= 3):\n\t\t\t\tvdf.scatter(columns = self.X, catcol = \"kmeans_cluster\", max_cardinality = 100, max_nb_points = 10000)\n\t\t\telse:\n\t\t\t\traise ValueError(\"Clustering Plots are only available in 2D or 3D\")",
"def voronoi(points, buffer_percent=100):\n # Remove duplicate xy points bc that would make delauney fail, and must remember z (if any) for retrieving originals from index results\n seen = set() \n uniqpoints = [ p for p in points if str( p[:2] ) not in seen and not seen.add( str( p[:2] ) )]\n classpoints = [_Point(*point[:2]) for point in uniqpoints]\n\n # Create fake sitepoints around the point extent to correct for infinite polygons\n # For a similar approach and problem see: http://gis.stackexchange.com/questions/11866/voronoi-polygons-that-run-out-to-infinity\n xs,ys = list(zip(*uniqpoints))[:2]\n pointswidth = max(xs) - min(xs)\n pointsheight = max(ys) - min(ys)\n xbuff,ybuff = ( pointswidth / 100.0 * buffer_percent , pointsheight / 100.0 * buffer_percent )\n midx,midy = ( sum(xs) / float(len(xs)) , sum(ys) / float(len(ys)) )\n #bufferbox = [(midx-xbuff,midy-ybuff),(midx+xbuff,midy-ybuff),(midx+xbuff,midy+ybuff),(midx-xbuff,midy+ybuff)] # corner buffer\n bufferbox = [(midx-xbuff,midy),(midx+xbuff,midy),(midx,midy+ybuff),(midx,midy-ybuff)] # mid sides buffer\n classpoints.extend([_Point(*corner) for corner in bufferbox])\n\n # Compute Voronoi\n vertices,edges,poly_dict = tesselator.computeVoronoiDiagram(classpoints)\n\n # Turn unordered result edges into ordered polygons\n polygons = list()\n for sitepoint,polyedges in list(poly_dict.items()):\n polyedges = [edge[1:] for edge in polyedges]\n poly = list()\n firststart,firstend = polyedges.pop(0)\n poly.append(firstend)\n while polyedges:\n curend = poly[-1]\n for i,other in enumerate(polyedges):\n otherstart,otherend = other\n if otherstart == curend:\n poly.append(otherend)\n ##print otherstart,otherend\n polyedges.pop(i)\n break\n elif otherend == curend:\n ##print otherend,otherstart\n poly.append(otherstart)\n polyedges.pop(i)\n break\n # Get vertices from indexes\n try: sitepoint = uniqpoints[sitepoint]\n except IndexError:\n sitepoint = None # fake bbox sitepoints shouldnt be in the results\n poly = [vertices[vi] for vi in poly if vi != -1]\n polygons.append((sitepoint, poly))\n\n # Maybe clip parts of polygons that stick outside screen?\n # ...\n\n return polygons",
"def voronoi(geometry,\n network,\n propname,\n **params):\n print('voronoi: nothing yet')",
"def plot_vor_colored(self,x,ax,cmap):\n\n L = self.L\n grid_x, grid_y = np.mgrid[-1:2, -1:2]\n grid_x[0, 0], grid_x[1, 1] = grid_x[1, 1], grid_x[0, 0]\n grid_y[0, 0], grid_y[1, 1] = grid_y[1, 1], grid_y[0, 0]\n y = np.vstack([x + np.array([i * L, j * L]) for i, j in np.array([grid_x.ravel(), grid_y.ravel()]).T])\n\n cmap_print = np.tile(cmap.T,9).T\n bleed = 0.1\n cmap_print = cmap_print[(y<L*(1+bleed)).all(axis=1)+(y>-L*bleed).all(axis=1)]\n y = y[(y<L*(1+bleed)).all(axis=1)+(y>-L*bleed).all(axis=1)]\n regions, vertices = self.voronoi_finite_polygons_2d(Voronoi(y))\n\n\n ax.set(aspect=1,xlim=(0,self.L),ylim=(0,self.L))\n if type(self.c_types) is list:\n # ax.scatter(x[:, 0], x[:, 1],color=\"grey\",zorder=1000)\n for region in regions:\n polygon = vertices[region]\n plt.fill(*zip(*polygon), alpha=0.4, color=\"grey\")\n\n else:\n patches = []\n for i, region in enumerate(regions):\n patches.append(Polygon(vertices[region], True,facecolor=cmap_print[i],edgecolor=\"white\",alpha=0.5))\n\n p = PatchCollection(patches, match_original=True)\n # p.set_array(c_types_print)\n ax.add_collection(p)",
"def plotResults(self):\n\n clusters = self.data[[i for i in range(len(self.data)) if self.vorLabels[i] != 0], :]\n vorLabels = [self.vorLabels[i] for i in range(len(self.data)) if self.vorLabels[i] != 0]\n\n self.plot = voronoiPlot(clusters, self.skel, self.skelLabels, self.isCorrect, vorLabels)\n self.plot.snapPlot()",
"def generate_voronoi_diagram(\n num_cells, width, height\n) -> Tuple[Voronoi, List[List[int]]]:\n # Make up data points\n points = np.random.rand(num_cells - 4, 2)\n default = np.array(\n [\n np.array([0.0, 0.0]),\n np.array([1.0, 0.0]),\n np.array([0.0, 1.0]),\n np.array([1.0, 1.0]),\n ]\n )\n points = np.concatenate((points, default), axis=0)\n # Scale them\n points = scale_points(points, width, height)\n # Compute Voronoi tesselation\n vor = Voronoi(points)\n # Plot\n voronoi_plot_2d(vor)\n return vor, points",
"def plot_voronoi_polys_with_points_in_area(ax, area_shape, poly_shapes, points, poly_to_pt_assignments=None,\n area_color='white', area_edgecolor='black',\n voronoi_and_points_cmap='tab20',\n voronoi_color=None, voronoi_edgecolor=None,\n points_color=None, points_markersize=5, points_marker='o',\n voronoi_labels=None, voronoi_label_fontsize=10, voronoi_label_color=None,\n point_labels=None, point_label_fontsize=7, point_label_color=None,\n plot_area_opts=None,\n plot_voronoi_opts=None,\n plot_points_opts=None):\n plot_area_opts = plot_area_opts or {}\n plot_voronoi_opts = plot_voronoi_opts or {'alpha': 0.5}\n plot_points_opts = plot_points_opts or {}\n\n _plot_polygon_collection_with_color(ax, [area_shape], color=area_color, edgecolor=area_edgecolor, **plot_area_opts)\n\n if voronoi_and_points_cmap and poly_to_pt_assignments and \\\n not all(map(bool, (voronoi_color, voronoi_edgecolor, points_color))):\n voronoi_color, points_color = colors_for_voronoi_polys_and_points(poly_shapes, poly_to_pt_assignments,\n cmap_name=voronoi_and_points_cmap)\n\n if voronoi_color is None and voronoi_edgecolor is None:\n voronoi_edgecolor = 'black' # better visible default value\n\n plot_voronoi_polys(ax, poly_shapes, color=voronoi_color, edgecolor=voronoi_edgecolor,\n labels=voronoi_labels, label_fontsize=voronoi_label_fontsize, label_color=voronoi_label_color,\n **plot_voronoi_opts)\n\n plot_points(ax, points, points_markersize, points_marker, color=points_color,\n labels=point_labels, label_fontsize=point_label_fontsize, label_color=point_label_color,\n **plot_points_opts)",
"def visualise(self):\n\n # Initialise figure\n params = {\"figure.figsize\": (5, 5)}\n pylab.rcParams.update(params)\n self.fig = plt.figure()\n self.ax = self.fig.add_subplot(111)\n\n # Add particles if selected\n print(self.crds)\n cmap=cm.get_cmap('coolwarm')\n norm=Normalize(0,20)\n print(np.max(self.radii))\n print(np.max(self.weights))\n if self.vis_particles:\n if self.vis_vortype==-2:\n radii=self.weights\n if self.param>10:\n self.param=(self.param-10)/2+10\n colour=cmap(norm(self.param))\n else:\n radii=self.radii\n radii=self.weights\n colour='orange'\n colour=(0.8,0.687,0.287,1)\n colour='gold'\n patches = []\n patches_pnts = []\n patches_absent = []\n for i,c in enumerate(self.crds):\n patches.append(Circle(c,radius=radii[i]))\n if radii[i]>0:\n patches_pnts.append(Circle(c,radius=0.1))\n else:\n patches_absent.append(Circle(c,radius=0.1))\n self.ax.add_collection(PatchCollection(patches, facecolor=colour, edgecolor='k', alpha=0.5))\n self.ax.add_collection(PatchCollection(patches_pnts, facecolor='k', alpha=1,zorder=1))\n if self.vis_vortype==2:\n self.ax.add_collection(PatchCollection(patches_absent, facecolor='k', alpha=0.5,zorder=1))\n else:\n self.ax.add_collection(PatchCollection(patches_absent, facecolor='k', alpha=1,zorder=1))\n\n # Add voronoi\n if self.vis_vortype!=0:\n patches = []\n colours = []\n if self.vis_cellcolour==1:\n cell_colours = self.init_cell_colours()\n else:\n cell_colours = [(0,0,0,0)]*100\n for i in range(self.m):\n patches.append(Polygon(self.rings[i],True))\n colours.append(cell_colours[self.rings[i][:,0].size])\n self.ax.add_collection(PatchCollection(patches, facecolor=colours, edgecolor='k', linewidth=1, zorder=0))\n\n # Sandbox\n # print(np.max(self.radii))\n # cmap=cm.get_cmap('coolwarm')\n # norm=Normalize(0,np.max(20))\n sandbox=False\n if sandbox:\n # z=16\n # w=np.zeros_like(self.radii)\n # mask=2*self.radii>z\n # w[mask]=z**0.5*np.sqrt(2*self.radii[mask]-z)\n # patches = []\n # for i,c in enumerate(self.crds):\n # patches.append(Circle(c,radius=w[i]))\n # self.ax.add_collection(PatchCollection(patches, facecolor=cmap(norm(z)), edgecolor='k'))\n with open('./phi.dat','w') as f:\n for z in np.arange(0,np.max(self.radii)*2+0.5,0.01):\n w=np.zeros_like(self.radii)\n mask=2*self.radii>z\n w[mask]=z**0.5*np.sqrt(2*self.radii[mask]-z)\n phi=np.sum(np.pi*w**2)/52359.9\n # phi=np.sum(np.pi*w**2)/1309\n f.write('{:.6f} {:.6f}\\n'.format(z,phi))\n\n\n\n # Set axes\n buffer = 1.6\n lim = buffer*np.max(np.abs(self.crds))\n self.ax.set_xlim((-lim,lim))\n self.ax.set_ylim((-lim,lim))\n self.ax.set_axis_off()\n\n # Show figure\n if self.vis_save:\n plt.savefig('{}_{}_{}.png'.format(self.prefix,self.frame,self.vis_vortype),dpi=400)\n plt.show()",
"def check_forces(self,x,F):\n Vor = Voronoi(x)\n fig, ax = plt.subplots()\n ax.set(aspect=1)\n voronoi_plot_2d(Vor, ax=ax)\n ax.scatter(x[:, 0], x[:, 1])\n ax.quiver(x[:, 0], x[:, 1], F[:, 0], F[:, 1])\n fig.show()",
"def voronoi_diagram(self, seeds, samples):\n from .bipartite_graph import cross_knn\n # checks\n if seeds.shape[0] != self.V:\n raise ValueError(\"The numberof seeds is not as expected\")\n if np.size(seeds) == self.V:\n seeds = np.reshape(seeds, (np.size(seeds), 1))\n if np.size(samples) == samples.shape[0]:\n samples = np.reshape(samples, (np.size(samples), 1))\n if seeds.shape[1] != samples.shape[1]:\n raise ValueError(\"The seeds and samples do not belong \\\n to the same space\")\n\n #1. define the graph knn(samples, seeds, 2)\n j = cross_knn(samples, seeds, 2).edges[:, 1]\n\n #2. put all the pairs i the target graph\n Ns = np.shape(samples)[0]\n self.E = Ns\n self.edges = np.array(\n [j[2 * np.arange(Ns)], j[2 * np.arange(Ns) + 1]]).T\n self.weights = np.ones(self.E)\n\n #3. eliminate the redundancies and set the weights\n self.cut_redundancies()\n self.symmeterize()\n self.set_gaussian(seeds)",
"def plot_voronoi_polys(ax, poly_shapes, color=None, edgecolor=None, labels=None, label_fontsize=10, label_color=None,\n **kwargs):\n\n _plot_polygon_collection_with_color(ax, poly_shapes, color=color, edgecolor=edgecolor, **kwargs)\n\n if labels:\n # plot labels using matplotlib's text()\n n_labels = len(labels)\n n_features = len(poly_shapes)\n if n_labels != n_features:\n raise ValueError('number of labels (%d) must match number of Voronoi polygons (%d)'\n % (n_labels, n_features))\n\n for i, (p, lbl) in enumerate(zip(poly_shapes, labels)):\n tx, ty = p.centroid.coords[0]\n ax.text(tx, ty, lbl, fontsize=label_fontsize, color=_color_for_labels(label_color, color, i))",
"def Voronoi_cell(hull, centers, vertex, original_fan):\n # make a copy so that the original does not get mutated\n fan = copy.deepcopy(original_fan)\n\n # start by moving the center\n # of the first one in the list of adjacent faces\n # to the resulting polygon\n simplex = fan.pop(0)\n result = [centers[simplex]]\n\n # find the vertices of this face\n simplex_vertices = hull.simplices[simplex]\n\n # there should only be two other vertices on this face\n # other than our given vertex\n\n # pick one of them and mark it 'known'\n # the other one will be common to the next simplex to consider\n known_vertex, common_vertex = [x for x in simplex_vertices if x != vertex]\n\n while fan:\n # the collection of faces is not exhausted yet\n assert known_vertex in simplex_vertices\n\n known_vertex_index = list(simplex_vertices).index(known_vertex)\n\n # next simplex to consider\n # it is the simplex which is opposite to the known vertex\n simplex = hull.neighbors[simplex][known_vertex_index]\n\n assert simplex in fan\n\n # now move its center to our resulting polygon\n fan.remove(simplex)\n result.append(centers[simplex])\n\n # and repeat the process\n simplex_vertices = hull.simplices[simplex]\n known_vertex = common_vertex\n\n # of the three vertices of the simplex\n # one should be our given vertex\n # and one should already have been processed\n remaining = [x for x in hull.simplices[simplex]\n if x != vertex and x != known_vertex]\n\n assert len(remaining) == 1\n common_vertex = remaining[0]\n\n return numpy.array(result)",
"def plotVolumeContours(self):\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n fig.set_size_inches(18.5, 9.5)\n ax.set_title('Particle Positions Colored by Voronoi Volumes', fontsize=22)\n ax.set_xlabel('x [m]', fontsize=18)\n ax.set_ylabel('y [m]', fontsize=18)\n ax.set_zlabel('z [m]', fontsize=18)\n pos = ax.scatter(self.data[self.nonB, 0], self.data[self.nonB, 1], self.data[self.nonB, 2], s=10, c=self.volumes, cmap='plasma')\n cbar = fig.colorbar(pos, ax=ax)\n cbar.ax.tick_params(labelsize=15)",
"def make_voronoi(xy, xy2=None, rout=40., x0=0., y0=0):\r\n nbins = len(xy)\r\n if xy2 is not None:\r\n xy = np.concatenate((xy, xy2))\r\n circle = cv.circle_xy(rout)\r\n circle = np.add(circle, [x0, y0])\r\n points = np.concatenate((xy, circle))\r\n polygons = np.array(voronoi_polygons(points))[:nbins]\r\n return polygons",
"def plot_potential(self):\n imshow(self.U, extent=(self.x[0], self.x[-1], self.y[0], self.y[-1]), aspect='auto', interpolation='None')\n xlabel('x')\n ylabel('y')",
"def plot_vertices(self, f=None, index_row=0, index_col=0, show=True, plotter=None, cmap='jet', title='',\n title_location=\"upper_edge\", font_size=10, font_color='black', camera=None):\n\n if not plotter:\n plotter = pv.Plotter()\n plotter.subplot(index_column=index_col, index_row=index_row)\n plotter.add_text(title, position=title_location, font_size=font_size, color=font_color)\n if camera is not None:\n plotter.set_position(camera[0])\n plotter.set_focus(camera[1])\n plotter.set_viewup(camera[2])\n plotter.add_mesh(self.vertices, scalars=f, cmap=cmap, render_points_as_spheres=True)\n if show:\n plotter.show()\n return plotter",
"def create_grid_and_edges(data, drone_altitude, safety_distance):\n # minimum and maximum north coordinates\n north_min = np.floor(np.min(data[:, 0] - data[:, 3]))\n north_max = np.ceil(np.max(data[:, 0] + data[:, 3]))\n\n # minimum and maximum east coordinates\n east_min = np.floor(np.min(data[:, 1] - data[:, 4]))\n east_max = np.ceil(np.max(data[:, 1] + data[:, 4]))\n\n # given the minimum and maximum coordinates we can\n # calculate the size of the grid.\n north_size = int(np.ceil(north_max - north_min))\n east_size = int(np.ceil(east_max - east_min))\n\n # Initialize an empty grid\n grid = np.zeros((north_size, east_size))\n # Initialize an empty list for Voronoi points\n points = []\n # Populate the grid with obstacles\n for i in range(data.shape[0]):\n north, east, alt, d_north, d_east, d_alt = data[i, :]\n if alt + d_alt + safety_distance > drone_altitude:\n obstacle = [\n int(np.clip(north - d_north - safety_distance - north_min, 0, north_size-1)),\n int(np.clip(north + d_north + safety_distance - north_min, 0, north_size-1)),\n int(np.clip(east - d_east - safety_distance - east_min, 0, east_size-1)),\n int(np.clip(east + d_east + safety_distance - east_min, 0, east_size-1)),\n ]\n grid[obstacle[0]:obstacle[1]+1, obstacle[2]:obstacle[3]+1] = 1\n # add center of obstacles to points list\n points.append([north - north_min, east - east_min])\n\n graph = Voronoi(points)\n\n edges = []\n for v in graph.ridge_vertices:\n p1 = graph.vertices[v[0]]\n p2 = graph.vertices[v[1]]\n cells = list(bresenham(int(p1[0]), int(p1[1]), int(p2[0]), int(p2[1])))\n hit = False\n\n for c in cells:\n if np.amin(c) < 0 or c[0] >= grid.shape[0] or c[1] >= grid.shape[1]:\n hit = True\n break\n if grid[c[0], c[1]] == 1:\n hit = True\n break\n\n if not hit:\n p1 = (p1[0], p1[1])\n p2 = (p2[0], p2[1])\n edges.append((p1, p2))\n\n return grid, edges, int(north_min), int(east_min)",
"def _get_voronoi_poly_points(vert_index_list, voronoi_vertices,\n voronoi_centroid):\n voronoi_poly_points = []\n if -1 not in vert_index_list and len(vert_index_list) > 3:\n voronoi_poly_points = voronoi_vertices[vert_index_list]\n elif vert_index_list.size > 0:\n # ASSUME RECTANGLE\n vert_index_list = vert_index_list[vert_index_list >= 0]\n voronoi_poly_points = voronoi_vertices[vert_index_list]\n # CASE 1: 2 valid voronoi vertices\n if vert_index_list.size == 2:\n center_lon = voronoi_centroid[0]\n center_lat = voronoi_centroid[1]\n corner_lon1 = voronoi_poly_points[0][0]\n corner_lat1 = voronoi_poly_points[0][1]\n corner_lon2 = voronoi_poly_points[1][0]\n corner_lat2 = voronoi_poly_points[1][1]\n\n # check if need to add points in lon or lat\n if abs(corner_lon1-corner_lon2) > abs(corner_lat1-corner_lat2):\n dLat = center_lat - corner_lat1\n # append the corners in order\n voronoi_poly_points = np.array([\n [corner_lon1, corner_lat1],\n [corner_lon2, corner_lat2],\n [corner_lon2, center_lat + dLat],\n [corner_lon1, center_lat + dLat]\n ])\n else:\n dLon = center_lon - corner_lon1\n # append the corners in order\n voronoi_poly_points = np.array([\n [corner_lon1, corner_lat1],\n [corner_lon2, corner_lat2],\n [center_lon + dLon, corner_lat2],\n [center_lon + dLon, corner_lat1]\n ])\n # CASE 2: 1 valid voronoi vertex\n elif vert_index_list.size == 1:\n center_lon = voronoi_centroid[0]\n center_lat = voronoi_centroid[1]\n corner_lon = voronoi_poly_points[0][0]\n corner_lat = voronoi_poly_points[0][1]\n dLat = center_lat - corner_lat\n dLon = center_lon - corner_lon\n # append the corners in order\n voronoi_poly_points = np.array([\n [corner_lon, corner_lat],\n [center_lon + dLon, corner_lat],\n [center_lon + dLon, center_lat + dLat],\n [corner_lon, center_lat + dLat]\n ])\n\n return voronoi_poly_points",
"def mri_point_plot(self, vcol=1):\n img = self.voxels\n points = self.point_position \n ax = []\n fig = plt.figure(figsize=(9, 8))\n # TODO make this setable in the function call\n columns = 3\n rows = 2\n\n for i in range(points.shape[0]):\n im_slice = int(np.round(points[i, vcol]))\n if vcol == 0:\n im = img[im_slice, :, :]\n elif vcol == 1:\n im = img[:, im_slice, :]\n else:\n im = img[:, :, im_slice]\n ax.append( fig.add_subplot(rows, columns, i+1))\n ax[-1].set_title(\"Image depth: \"+str(im_slice)) # set title\n plt.imshow(im)\n plot_cols = np.array([0, 1, 2])\n plot_cols = plot_cols[plot_cols != vcol]\n plt.plot(points[i, min(plot_cols)], points[i, max(plot_cols)], 'ro')\n\n plt.show()",
"def pointsToVoronoiGridShapefile(lat, lon, vor_shp_path, extent=None):\n voronoi_centroids = _get_voronoi_centroid_array(lat, lon, extent)\n\n # set-up output polygon shp\n log(\"Creating output polygon shp {0}\"\n .format(os.path.basename(vor_shp_path)))\n if os.path.exists(vor_shp_path):\n os.remove(vor_shp_path)\n drv = ogr.GetDriverByName('ESRI Shapefile')\n outShp = drv.CreateDataSource(vor_shp_path)\n osr_geographic_proj = osr.SpatialReference()\n osr_geographic_proj.ImportFromEPSG(4326)\n layer = outShp.CreateLayer('', osr_geographic_proj, ogr.wkbPolygon)\n layer.CreateField(ogr.FieldDefn('GRID_LAT', ogr.OFTReal))\n layer.CreateField(ogr.FieldDefn('GRID_LON', ogr.OFTReal))\n layerDefn = layer.GetLayerDefn()\n\n # find nodes surrounding polygon centroid\n # sort nodes in counterclockwise order\n # create polygon perimeter through nodes\n log(\"Building Voronoi polygons...\")\n # compute voronoi\n voronoi_manager = Voronoi(voronoi_centroids)\n voronoi_vertices = voronoi_manager.vertices\n voronoi_regions = voronoi_manager.regions\n for point_id, region_index in enumerate(voronoi_manager.point_region):\n vert_index_list = np.array(voronoi_regions[region_index])\n voronoi_centroid = voronoi_centroids[point_id]\n voronoi_poly_points = _get_voronoi_poly_points(vert_index_list,\n voronoi_vertices,\n voronoi_centroid)\n if len(voronoi_poly_points) == 4:\n poly = ogr.Geometry(ogr.wkbPolygon)\n ring = ogr.Geometry(ogr.wkbLinearRing)\n for node in voronoi_poly_points:\n ring.AddPoint(node[0], node[1])\n\n # grab first node to close ring\n ring.AddPoint(voronoi_poly_points[0][0], voronoi_poly_points[0][1])\n\n poly.AddGeometry(ring)\n feat = ogr.Feature(layerDefn)\n feat.SetField('GRID_LON', float(voronoi_centroid[0]))\n feat.SetField('GRID_LAT', float(voronoi_centroid[1]))\n feat.SetGeometry(poly)\n layer.CreateFeature(feat)",
"def create_grid_and_edges(data, drone_altitude, safety_distance):\n\n # minimum and maximum north coordinates\n north_min = np.floor(np.min(data[:, 0] - data[:, 3]))\n north_max = np.ceil(np.max(data[:, 0] + data[:, 3]))\n\n # minimum and maximum east coordinates\n east_min = np.floor(np.min(data[:, 1] - data[:, 4]))\n east_max = np.ceil(np.max(data[:, 1] + data[:, 4]))\n\n # given the minimum and maximum coordinates we can\n # calculate the size of the grid.\n north_size = int(np.ceil((north_max - north_min)))\n east_size = int(np.ceil((east_max - east_min)))\n\n # Initialize an empty grid\n grid = np.zeros((north_size, east_size))\n\n # Define a list to hold Voronoi points\n points = []\n # Populate the grid with obstacles\n for i in range(data.shape[0]):\n north, east, alt, d_north, d_east, d_alt = data[i, :]\n\n if alt + d_alt + safety_distance > drone_altitude:\n obstacle = [\n int(north - d_north - safety_distance - north_min),\n int(north + d_north + safety_distance - north_min),\n int(east - d_east - safety_distance - east_min),\n int(east + d_east + safety_distance - east_min),\n ]\n grid[obstacle[0]:obstacle[1] + 1, obstacle[2]:obstacle[3] + 1] = 1\n\n # add center of obstacles to points list\n points.append([north - north_min, east - east_min])\n\n # create a voronoi graph based on\n # location of obstacle centres\n graph = Voronoi(points)\n # check each edge from graph.ridge_vertices for collision\n edges = []\n for v in graph.ridge_vertices:\n p1 = graph.vertices[v[0]].astype(int)\n p2 = graph.vertices[v[1]].astype(int)\n # test each pair p1 and p2 for collision using Bresenham\n # If the edge does not hit an obstacle add it to the list\n in_collision = False\n ridgeline = bresenham(p1[0], p1[1], p2[0], p2[1])\n for b in ridgeline:\n # eliminate out of range points in the line\n if b[0] < 0 or b[0] >= grid.shape[0]:\n in_collision = True\n break\n if b[1] < 0 or b[1] >= grid.shape[1]:\n in_collision = True\n break\n # check if grid cell is an obstacle\n if grid[b[0], b[1]] == 1:\n in_collision = True\n break\n # keep ridge points not in collision\n if not in_collision:\n p1 = (p1[0], p1[1])\n p2 = (p2[0], p2[1])\n edges.append((p1, p2))\n\n return grid, edges",
"def get_voronoi_vertices(self, epsilon=2.5e-4, distance_threshold=0, width_buffer=10):\n voro = Voronoi(self._structure.get_extended_positions(width_buffer)+epsilon)\n xx = voro.vertices\n if distance_threshold > 0:\n cluster = AgglomerativeClustering(\n linkage='single',\n distance_threshold=distance_threshold,\n n_clusters=None\n )\n cluster.fit(xx)\n xx = get_average_of_unique_labels(cluster.labels_, xx)\n xx = xx[np.linalg.norm(xx-self._structure.get_wrapped_coordinates(xx, epsilon=0), axis=-1)<epsilon]\n return xx-epsilon",
"def plot_cell(cell_centre=[0, 0, 0], CELL=np.eye(3), color='k'):\n\n uvw = np.array([[0., 0, 0], [1, 0, 0], [1, 0, 1], [1, 1, 1], [1, 1, 0], [0, 1, 0], [0, 1, 1],\n [0, 0, 1], [1, 0, 1], [1, 0, 0], [1, 1, 0], [1, 1, 1], [0, 1, 1], [0, 1, 0], [0, 0, 0], [0, 0, 1]])\n uvw = uvw - 0.5 # plot around box centre\n bpos = np.dot(uvw, CELL)\n bpos = bpos + cell_centre\n plt.plot(bpos[:, 0], bpos[:, 1], bpos[:, 2], c=color) # cell box",
"def plot(self):\n layout = self.graph.layout(\"kk\")\n bbox = igraph.BoundingBox(600, 600)\n figure = igraph.Plot(bbox=bbox, background=\"white\")\n bbox = bbox.contract(100)\n figure.add(self.graph, layout = layout, bbox=bbox)\n figure.show()",
"def voronoi(geometry,\n pore_volume='pore.volume',\n **kwargs):\n from scipy.special import cbrt\n pore_vols = geometry[pore_volume]\n value = cbrt(6*pore_vols/_sp.pi)\n return value",
"def voronoi_tesellation_box(boundary,lng,lat):\n # array with points coordinates\n points = np.zeros((lng.shape[0],2))\n points[:,0] = lng\n points[:,1] = lat\n\n # compute Voronoi tesselation\n vor = Voronoi(points)\n \n # Reconstruct infinite voronoi regions in a 2D diagram to finite regions.\n regions, vertices = voronoi_finite_polygons_2d(vor)\n \n # build box from country boundary\n xmin = boundary.bounds.minx[0]\n xmax = boundary.bounds.maxx[0]\n ymin = boundary.bounds.miny[0]\n ymax = boundary.bounds.maxy[0]\n\n box = Polygon([[xmin, ymin], [xmin, ymax], [xmax, ymax], [xmax, ymin]])\n\n voronoid = [] \n for region in regions:\n polygon = vertices[region]\n # Clipping polygon\n poly = Polygon(polygon)\n voronoid.append(poly.intersection(box))\n \n voronoid = gpd.GeoDataFrame(geometry = voronoid)\n \n vor_lng = vor.points[:,0]\n vor_lat = vor.points[:,1]\n \n voronoid['lng'] = vor_lng\n voronoid['lat'] = vor_lat\n \n return voronoid",
"def draw_grid(self, min_x, max_x, min_y, max_y, min_z, max_z) -> None:\n from pymol import cmd\n from math import sin, cos\n \n # Prepare dimensions\n angle1 = 0.0\n angle2 = 0.0\n min_x = x - min_x\n max_x = max_x - x \n min_y = y - min_y \n max_y = max_y - y \n min_z = z - min_z \n max_z = max_z - z \n\n # Get positions of grid vertices\n # P1\n x1 = -min_x * cos(angle2) - (-min_y) * sin(angle1) * sin(angle2) + (-min_z) * cos(angle1) * sin(angle2) + x\n\n y1 = -min_y * cos(angle1) + (-min_z) * sin(angle1) + y\n\n z1 = min_x * sin(angle2) + min_y * sin(angle1) * cos(angle2) - min_z * cos(angle1) * cos(angle2) + z\n \n # P2\n x2 = max_x * cos(angle2) - (-min_y) * sin(angle1) * sin(angle2) + (-min_z) * cos(angle1) * sin(angle2) + x\n\n y2 = (-min_y) * cos(angle1) + (-min_z) * sin(angle1) + y\n \n z2 = (-max_x) * sin(angle2) - (-min_y) * sin(angle1) * cos(angle2) + (-min_z) * cos(angle1) * cos(angle2) + z\n \n # P3\n x3 = (-min_x) * cos(angle2) - max_y * sin(angle1) * sin(angle2) + (-min_z) * cos(angle1) * sin(angle2) + x\n\n y3 = max_y * cos(angle1) + (-min_z) * sin(angle1) + y\n\n z3 = -(-min_x) * sin(angle2) - max_y * sin(angle1) * cos(angle2) + (-min_z) * cos(angle1) * cos(angle2) + z\n \n # P4\n x4 = (-min_x) * cos(angle2) - (-min_y) * sin(angle1) * sin(angle2) + max_z * cos(angle1) * sin(angle2) + x\n\n y4 = (-min_y) * cos(angle1) + max_z * sin(angle1) + y\n\n z4 = -(-min_x) * sin(angle2) - (-min_y) * sin(angle1) * cos(angle2) + max_z * cos(angle1) * cos(angle2) + z\n\n \n # P5\n x5 = max_x * cos(angle2) - max_y * sin(angle1) * sin(angle2) + (-min_z) * cos(angle1) * sin(angle2) + x\n\n y5 = max_y * cos(angle1) + (-min_z) * sin(angle1) + y\n\n z5 = (-max_x) * sin(angle2) - max_y * sin(angle1) * cos(angle2) + (-min_z) * cos(angle1) * cos(angle2) + z\n \n # P6\n x6 = max_x * cos(angle2) - (-min_y) * sin(angle1) * sin(angle2) + max_z * cos(angle1) * sin(angle2) + x\n\n y6 = (-min_y) * cos(angle1) + max_z * sin(angle1) + y\n\n z6 = (-max_x) * sin(angle2) - (-min_y) * sin(angle1) * cos(angle2) + max_z * cos(angle1) * cos(angle2) + z\n \n # P7\n x7 = (-min_x) * cos(angle2) - max_y * sin(angle1) * sin(angle2) + max_z * cos(angle1) * sin(angle2) + x\n\n y7 = max_y * cos(angle1) + max_z * sin(angle1) + y\n\n z7 = -(-min_x) * sin(angle2) - max_y * sin(angle1) * cos(angle2) + max_z * cos(angle1) * cos(angle2) + z\n\n # P8\n x8 = max_x * cos(angle2) - max_y * sin(angle1) * sin(angle2) + max_z * cos(angle1) * sin(angle2) + x\n\n y8 = max_y * cos(angle1) + max_z * sin(angle1) + y\n\n z8 = (-max_x) * sin(angle2) - max_y * sin(angle1) * cos(angle2) + max_z * cos(angle1) * cos(angle2) + z \n\n # Create box object\n if \"grid\" in cmd.get_names(\"objects\"):\n cmd.delete(\"grid\")\n\n # Create vertices\n cmd.pseudoatom(\"grid\", name=\"v2\", pos=[x2, y2, z2], color=\"white\")\n cmd.pseudoatom(\"grid\", name=\"v3\", pos=[x3, y3, z3], color=\"white\")\n cmd.pseudoatom(\"grid\", name=\"v4\", pos=[x4, y4, z4], color=\"white\")\n cmd.pseudoatom(\"grid\", name=\"v5\", pos=[x5, y5, z5], color=\"white\")\n cmd.pseudoatom(\"grid\", name=\"v6\", pos=[x6, y6, z6], color=\"white\")\n cmd.pseudoatom(\"grid\", name=\"v7\", pos=[x7, y7, z7], color=\"white\")\n cmd.pseudoatom(\"grid\", name=\"v8\", pos=[x8, y8, z8], color=\"white\")\n\n # Connect vertices\n cmd.select(\"vertices\", \"(name v3,v7)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v2,v6)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v5,v8)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v2,v5)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v4,v6)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v4,v7)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v3,v5)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v6,v8)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.select(\"vertices\", \"(name v7,v8)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.pseudoatom(\"grid\", name=\"v1x\", pos=[x1, y1, z1], color='white')\n cmd.pseudoatom(\"grid\", name=\"v2x\", pos=[x2, y2, z2], color='white')\n cmd.select(\"vertices\", \"(name v1x,v2x)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.pseudoatom(\"grid\", name=\"v1y\", pos=[x1, y1, z1], color='white')\n cmd.pseudoatom(\"grid\", name=\"v3y\", pos=[x3, y3, z3], color='white')\n cmd.select(\"vertices\", \"(name v1y,v3y)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.pseudoatom(\"grid\", name=\"v4z\", pos=[x4, y4, z4], color='white')\n cmd.pseudoatom(\"grid\", name=\"v1z\", pos=[x1, y1, z1], color='white')\n cmd.select(\"vertices\", \"(name v1z,v4z)\")\n cmd.bond(\"vertices\", \"vertices\")\n cmd.delete(\"vertices\")",
"def plotVects(vList, colors='k'):\n polyhedron([vertex(v) for v in vList] + [vertex(0, 0, 0)], [edge(v) for v in vList], []).plot(plotEdges=True)"
] | [
"0.672641",
"0.65702593",
"0.65630525",
"0.65132475",
"0.64372486",
"0.6365562",
"0.6342777",
"0.6291183",
"0.62710994",
"0.59524184",
"0.58798623",
"0.5819559",
"0.5795768",
"0.57930243",
"0.57612365",
"0.56800836",
"0.5656755",
"0.5648358",
"0.5621595",
"0.5620752",
"0.55955374",
"0.55606186",
"0.5557822",
"0.55158794",
"0.5500937",
"0.54693717",
"0.54684323",
"0.5433653",
"0.5422495",
"0.53983533"
] | 0.78701174 | 0 |
This method is simply in charge of plotting a bar plot comparing cluster volumes | def volumePlot(self, top=10):
fig = plt.figure()
fig.set_size_inches(18.5, 9.5)
ax = fig.add_subplot(111)
label = ['Cluster ' + str(i) for i in range(1, len(self.volumesC) + 1)]
volumesC = np.sort(self.volumesC)[::-1][:top]
sortI = np.argsort(self.volumesC)[::-1][:top]
label = [label[i] for i in sortI]
cmap = plt.get_cmap('plasma')
c = cmap(volumesC)
ax.bar(range(top), volumesC, tick_label=label, width=0.5, color=c)
ax.tick_params(labelsize=18)
plt.ylabel('Volume [m^3]', fontsize=18)
plt.title('Volume per Cluster', fontsize=22)
plt.savefig('Voronoi Volumes per Cluster') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def bar_charts(cluster, genelist, groups=[\"SP\", \"SL06\", \"SL12\", \"SL24\",\"SL48\", \"SL96\", \"FL\", \"FP06\", \"FP12\", \"FP24\",\"FP48\", \"FP96\" ], postfix=''):\n\n limits = cluster.reorder_matrix(groups)\n pp = PdfPages(cluster.exportPath[0:-4] + postfix + '.bar_plots.pdf')\n\n # get kegg pathways and NCBI values for each gene:\n ko_dict = genematch.cbir_to_pathway(genelist.keys()) # ko_dict = {gene:str(pathway)}\n go_monster = genematch.GO_maker()\n ncbi_terms = genematch.cbir_ncbi(genelist)\n\n for gene in genelist:\n # get gene details for later use:\n ignore, kotermdic = genematch.cbir_to_kegg([gene],reversedic=True)\n\n anova = degs_anova(cluster, onegene=gene, groups=groups)\n\n try:\n koterm = kotermdic[gene]\n except KeyError:\n koterm = 'no KO'\n\n genegos = go_monster.findem(gene)\n godesc = \"\".join([ \"%s %s %s\\n\" % (g, genegos[g][1], genegos[g][0]) for g in genegos ])\n\n # calculate mean/SEM...\n if gene in cluster.column_header:\n pos = cluster.column_header.index(gene)\n else:\n continue\n gm = [groups[0]] * (limits[0]) # matrix of group names for Tukey's post hoc\n v = [numpy.average(cluster.data_matrix[:limits[0],pos])] # averages\n se = [numpy.std(cluster.data_matrix[:limits[0],pos])/numpy.sqrt(limits[0]+1)] #SEM\n for i in range(len(groups)-1):\n gm += [groups[i+1]] * (limits[i+1]-limits[i])\n v.append(numpy.average(cluster.data_matrix[limits[i]:limits[i + 1],pos]))\n se.append(numpy.std(cluster.data_matrix[limits[i]:limits[i + 1],pos])/numpy.sqrt(limits[i+1]-limits[i]+1))\n\n # calculate tukey's post-hoc values and plot:\n tfig, taxes = plt.subplots()\n\n try:\n posthoc = pairwise_tukeyhsd(cluster.data_matrix[:,pos],gm)\n except Exception as inst:\n verbalise(\"R\", \"Tukey calculation error - check that you have >1 value for each category.\")\n print inst\n continue\n phimg = posthoc.plot_simultaneous(comparison_name='SP', \\\n ax=taxes, ylabel='Groups', xlabel='Normalised Expression', \\\n labelorder = [\"SP\", \"SL06\", \"SL12\", \"SL24\",\"SL48\", \"SL96\", \\\n \"FL\", \"FP06\", \"FP12\", \"FP24\",\"FP48\", \"FP96\" ])\n\n # plot_simultaneous does not correctly report the y-axis labels. So to fix:\n taxes.set_xticks(numpy.arange(13.0)*1) # increase to gain all labels\n plt.tight_layout() # resets axes\n xlabels = taxes.get_xticklabels() # gets values I need\n\n labelist = [xtick.get_text() for xtick in xlabels] # creates an ordered list of labels\n labelist.pop(0) # removes first element (blank label)\n taxes.set_xticks(numpy.arange(12.0)*1) # now create the right number of ticks\n taxes.set_xticklabels(labelist) # reset with new names\n title_string = \"%s %s(ANOVA P-value %.8f)\\n%s\\n KEGG ortholog %s:\\n%s\\n%s\"\n taxes.set_title(title_string % (os.path.basename(cluster.exportPath[:-4]), gene, anova[gene], ncbi_terms[gene], koterm, ko_dict[gene], godesc), fontsize=12 )\n\n plt.tight_layout()\n plt.savefig(pp, format='pdf')\n #plt.show(phimg)\n plt.close()\n # print summary to file:\n tukeys_h = open(cluster.exportPath[:-4] + '.tukeys.txt','a')\n tukeys_h.write('Gene ' + str(gene) + ':\\n')\n tukeys_h.write(str(posthoc) + '\\n\\n')\n tukeys_h.close()\n\n \"\"\"\n # create box plot of expression values:\n ind = numpy.arange(len(groups)) # x-coords for bars\n width = 0.35 # box width\n\n fig, ax = plt.subplots()\n rects1 = ax.bar(ind, v, width, color='r', yerr=se)\n\n # add details:\n ax.set_ylabel('Normalised Expression')\n ax.set_title('Gene Expression for %s (%s):\\n %s\\n%s' % (str(gene), koterm, ko_dict[gene], godesc), fontsize=12 )\n ax.set_xticks(ind+width)\n ax.set_xticklabels(groups)\n\n plt.tight_layout()\n plt.savefig(pp, format='pdf')\n plt.show()\n \"\"\"\n pp.close()",
"def barplot_tsne_clust(agg_clust_labels, vector,\n save_tag=False, save_path='Bar_Test.png', plot_n_in_cluster=False):\n vector = np.asarray(vector)\n curr_bar = {}\n curr_means = []\n curr_SEM = []\n cluster_n = []\n n_in_cluster = []\n for curr_clust in range(0, np.max(agg_clust_labels)+1):\n curr_bar[str(curr_clust)] = vector[agg_clust_labels == curr_clust]\n n_in_cluster.append(len(vector[agg_clust_labels == curr_clust]))\n cluster_n.append(curr_clust+1)\n curr_means.append(np.mean(vector[agg_clust_labels == curr_clust]))\n curr_SEM.append(stats.sem(vector[agg_clust_labels == curr_clust]))\n bar_cmap = plt.get_cmap('plasma_r', np.max(agg_clust_labels)+1)\n fig, ax = plt.subplots()\n rects1 = ax.bar(cluster_n, curr_means, color=bar_cmap.colors, yerr=curr_SEM)\n # get y range so can scale where text is\n ymin, ymax = plt.ylim()\n scaled_val = (ymax - ymin)/18\n if plot_n_in_cluster == True:\n for i in range(0, len(rects1)):\n rect = rects1[i]\n height = rect.get_height()\n if height > 0:\n plt.text(rect.get_x() + rect.get_width()/2.0, -1*scaled_val, '%d' % int(n_in_cluster[i]),\n ha='center', va='bottom')\n elif height <= 0:\n plt.text(rect.get_x() + rect.get_width()/2.0, 0, '%d' % int(n_in_cluster[i]),\n ha='center', va='bottom')\n # plt.ylabel(curr_stat)\n plt.xlabel('Cluster')\n if save_tag == True:\n save_root = save_path[0:save_path.rfind('/')+1]\n print(save_root)\n if not os.path.exists(save_root):\n os.makedirs(save_root)\n plt.savefig(save_path, bbox_inches='tight', dpi = 150)\n plt.show()",
"def plot_clusters(cluster_1, cluster_2):\r\n plt.figure(figsize=(14, 7))\r\n plt.bar([i - 0.1 for i in cluster_1.keys()], cluster_1.values(), width=0.2, align='center', color='b',\r\n label='German Population')\r\n plt.bar([i + 0.1 for i in cluster_2.keys()], cluster_2.values(), width=0.2, align='center', color='g',\r\n label='Customer Population')\r\n plt.title('German Population versus Customers')\r\n plt.xlabel('Cluster No.')\r\n plt.ylabel('Cluster %')\r\n plt.xticks(range(1, len(cluster_1) + 1))\r\n plt.legend()\r\n plt.savefig('cluster_map.png')\r\n plt.show()\r\n\r\n return",
"def plot_clusters_cuisines(i,cuisine_countries_clusters):\n df=pd.DataFrame(group_by_cluster.iloc[i,:])\n df.reset_index(level=0, inplace=True)\n df.columns=['cuisine','count']\n df=df.sort_values(by='count',ascending=False)\n sns.set(rc={'figure.figsize':(11.7,5.27)})\n sns.barplot(x=\"cuisine\", y='count', data=df)\n plt.xticks(rotation=90)\n plt.title('cluster '+str(i)+ ' count: '+str(Counter(cuisine_countries_clusters)[i]))\n plt.tight_layout()\n plt.show()",
"def get_clusters_bar(ensemble, grouping, clustering, normalize):\n\tif grouping not in ['cluster','annotation','dataset','NeuN']:\n\t\tgrouping = 'cluster'\n\tclusters = get_clusters(ensemble, grouping, clustering)\n\n\tif (normalize=='true'):\n\t\tclusters['ncells_norm'] = clusters.groupby('groups')['ncells'].transform(lambda x: 100*x / x.sum())\n\t\tclusters['y'] = clusters['ncells_norm']\n\t\tytitle = 'Percent of cells per cluster'\n\telse:\n\t\tclusters['y'] = clusters['ncells']\n\t\tytitle = 'Number of cells per cluster'\n\n\t# Stacked bar chart by modality. Appropriate for integrated clustering only\n\tmu = clusters['modality'].unique().tolist()\n\tdata = list();\n\tfor mi in mu:\n\t\tclustersu = clusters[clusters['modality']==mi]\n\t\ttrace = Bar(\n\t\t\ty=clustersu['y'],\n\t\t\tx=clustersu['groups'],\n\t\t\tname=mi+' cells',\n\t\t\thoverinfo='text',\n\t\t\t)\n\t\tif (normalize=='true'):\n\t\t\ttrace['text'] = [str(round(i,1))+'% '+mi+' cells' for i in clustersu['y']]\n\t\telse:\n\t\t\ttrace['text'] = [str(i)+' '+mi+' cells' for i in clustersu['y']]\n\t\tdata.append(trace)\n\n\tlayout = Layout(\n\t autosize=True,\n\t height=450,\n\t width=1000,\n\t title=ytitle,\n\t titlefont={'color': 'rgba(1,2,2,1)',\n\t 'size': 20},\n\t barmode='stack',\n\t xaxis={\n\t 'title': 'Cluster',\n\t 'titlefont': {\n\t 'size': 17\n\t },\n\t 'type': 'category',\n\t 'tickvals':clusters['groups'].unique(),\n\t 'anchor': 'y',\n\t 'ticks': 'outside',\n\t 'ticklen': 4,\n\t 'tickangle': -45,\n\t 'tickwidth': 0.5,\n\t 'showticklabels': True,\n\t 'tickfont': {\n\t 'size': 12\n\t },\n\t 'showline': True,\n\t 'zeroline': False,\n\t 'showgrid': True,\n\t 'linewidth': 1,\n\t 'mirror': True,\n\t },\n\t yaxis={\n\t 'title': \"Number of cells\",\n\t 'titlefont': {\n\t 'size': 15\n\t },\n\t 'type': 'linear',\n\t 'anchor': 'x',\n\t 'ticks': 'outside',\n\t # 'tickcolor': 'white',\n\t 'ticklen': 4,\n\t 'tickwidth': 0.5,\n\t 'showticklabels': True,\n\t 'tickfont': {\n\t 'size': 12\n\t },\n\t 'showline': True,\n\t 'zeroline': False,\n\t 'showgrid': True,\n\t 'linewidth': 1,\n\t 'mirror': True,\n\t },\n\t)\n\treturn plotly.offline.plot(\n\t\t{\n\t\t\t'data': data,\n\t\t\t'layout': layout\n\t\t},\n\t\toutput_type='div',\n\t\tshow_link=False,\n\t\tinclude_plotlyjs=False)",
"def plot_bv_bar(df, xcolname, ycolname, icol=0):\n # set plot size\n fig, ax = plt.subplots(figsize=(8,6))\n \n # plotting... box\n sns.barplot(ax=ax, data = df\n , x = str(xcolname)\n , y = str(ycolname)\n , color = sns.color_palette()[icol]);\n \n \n # title and labels\n plt.title(xcolname+' Vs '+ycolname, fontsize=20)\n plt.xlabel(xcolname+ ' (units)', fontsize=16)\n plt.ylabel(ycolname+ ' (units)', fontsize=16)\n \n return plt.show()",
"def plot_uv_bar(df, colname, colorid=0):\n if (colname in list(df.columns)):\n \n # Set figure size \n fig, ax = plt.subplots(figsize=(8,6))\n \n # set colorid for bar plot\n base_color = sns.color_palette()[colorid]\n\n # variable counts to calculate percentage\n cdict_count = df[colname].value_counts().to_dict() \n total_count = df.shape[0]\n \n \n if (len(list(cdict_count.keys())) > 5):\n # max.count to position the %\n maxcount_pct= np.max(list(cdict_count.values()))*0.125\n # max. no. of categories Vs % rotation \n rottext_pct = 90 \n # font size for % display\n fontsiz_pct = 12\n else:\n # max.count to position the %\n maxcount_pct= np.max(list(cdict_count.values()))*0.075\n # max. no. of categories Vs % rotation \n rottext_pct = 0 \n # font size for % display\n fontsiz_pct = 16\n \n \n # plotting...\n sns.countplot(data = df, x = colname\n , order = list(cdict_count.keys())\n , color = base_color\n , saturation = 0.7)\n\n # title and labels\n plt.title('Order of '+ colname, fontsize=20)\n plt.xlabel(colname + ' Type', fontsize=16)\n plt.ylabel('Count', fontsize=16)\n \n # x-,y- ticks\n locs, labels = plt.xticks(fontsize=16)\n plt.yticks(fontsize=16)\n\n # display % count information on each tower of bar plot\n for loc, label in zip(locs, labels):\n count = cdict_count[label.get_text()]\n pct_string = '{:0.1f}%'.format(count*100/total_count)\n plt.text(loc, count-maxcount_pct, pct_string, ha='center', color='w', fontsize=fontsiz_pct, rotation=rottext_pct)\n\n return plt.show()\n\n else:\n \n print(' >>>Error:',colname,' is not in DataFrame')",
"def bar_plot(df, data_pt):\n \n x=df.loc[data_pt]\n y= df.columns.tolist()\n sorte=x.tolist()\n a=sorted(zip(sorte, y))[-10:]\n y=[y for _, y in a]\n ## soru burda yapıp altı ona göre duzeliyecegim birde\n \n x = df[y].loc[data_pt]\n \n # Here we modify the tickangle of the xaxis, resulting in rotated labels.\n #title={'text': \"<b>Comparing features with Golden for Cycle {}\".format(cycle),\n # 'y':0.9,'x':0.5,'xanchor': 'center','yanchor': 'top'}\n\n \n trace = {'type': 'bar',\n 'orientation':'h',\n 'x' : x,\n 'y' : y}\n data = Data([trace])\n layout = {'title' : \"<b>Reconstruction error in each dimension for cycle{}\".format(data_pt),\n 'titlefont':{'size' : 20},\n 'xaxis' : {'title': '<b>Reconstruction Error',\n 'titlefont':{'size' : 20},\n 'tickangle': -45, 'tickfont': {'size':15} ,},\n \n 'yaxis' : {'title': '<b>Features',\n 'titlefont':{'size' : 20},\n 'tickfont': {'size':15},},\n 'margin' : {'l':100, 'r' : 1, 'b': 200, 't': 100, 'pad' : 1},\n 'height' : 600, 'width' : 800,\n }\n \n fig = Figure(data = data, layout = layout)\n \n return pyo.iplot(fig)",
"def plot_clusters(self):\n pass",
"def plot_cluster_histogram(clusters, saveAs=None):\n \n cluster_labels = np.unique(clusters)\n k = len(cluster_labels)\n \n heights = [(clusters == l).sum() for l in cluster_labels]\n \n plt.close('all') \n fig, ax = plt.subplots(1,1)\n sns.set_style('white')\n ax.bar(range(1, k+1), heights) \n show_values_on_bars(ax)\n plt.title('n={} clusters'.format(k))\n plt.xlabel('Clusters', labelpad=10)\n plt.ylabel('Number of strains', labelpad=10)\n \n if saveAs is not None:\n plt.savefig(saveAs, dpi=300)\n else:\n plt.show()",
"def plot_class_balances(df, col):\n\n ser_counts = df[col].value_counts()\n ser_counts.plot.bar()\n plt.title(col + ' Counts \\n(classes={})'.format(ser_counts.shape[0]))\n \n plt.show()",
"def eda_plot():\n\n df1 = pd.read_csv('eda_malware.csv')\n df2 = pd.read_csv('eda_random.csv')\n df3 = pd.read_csv('eda_popular.csv')\n\n df = pd.concat([df1, df2, df3], ignore_index=True)\n df['label'].replace([0,1],['Benign','Malware'],inplace=True)\n\n colors = ['#EAB6AB','#D9E6F3','#CBAACB','#CCE2CB', '#FFAEA5', '#A2E1DB', '#97C1A9']\n # b vs. m: node types counts\n f1 = pd.crosstab(df['label'], df['node_types_counts'])\n\n f1 = pd.DataFrame({\"3 Types\": [1, 4], \"4 Types\": [1, 407], \"5 Types\": [245, 5768], \"6 Types\": [39, 1113], \"7 Types\": [83, 487], \"8 Types\": [154, 368], \"9 Types\": [103, 286]}).rename(index={0:'Benign', 1:'Malware'})\n f1.plot(kind='bar', color=colors)\n fig = plt.gcf()\n plt.legend(loc='upper left')\n plt.title('Benign vs. Malicious: Number of Node Types')\n fig.savefig('bv_node_types.png')\n\n # for a better look, limit type 5 malware to 2k counts only\n f1 = pd.DataFrame({\"3 Types\": [1, 4], \"4 Types\": [1, 407], \"5 Types\": [245, 2000], \"6 Types\": [39, 1113], \"7 Types\": [83, 487], \"8 Types\": [154, 368], \"9 Types\": [103, 286]}).rename(index={0:'Benign', 1:'Malware'})\n f1.plot(kind='bar', color=colors)\n fig = plt.gcf()\n plt.legend(loc='upper left')\n plt.title('Benign vs. Malicious: Number of Node Types')\n fig.savefig('bv_node_types1.png')\n\n # node types\n # for malware: extract node types info for node types counts > 5, and sum up each types counts\n node_types = df[(df['label'] == 'Malware') & (df['node_types_counts'] >= 5)]['node_types'] #series\n lst = [ast.literal_eval(s) for s in node_types]\n\n c = Counter()\n for d in lst:\n c.update(d)\n\n df_nt = pd.DataFrame(dict(c).items(), columns=['node_types', 'counts'])\n df_nt = df_nt.sort_values(by=['counts'])\n\n sizes = [215060, 2823059, 3135725, 5641356, 10679709, 16547701]\n labels = ['Others', 'static,Node', 'public,static,Node', 'Node', 'external,Node', 'public,Node']\n\n colors = ['#EAB6AB','#D9E6F3','#CBAACB','#CCE2CB', '#FFAEA5', '#A2E1DB']\n\n fig1, ax1 = plt.subplots(figsize=(7, 7))\n ax1.pie(sizes, labels=labels, autopct='%1.1f%%',\n shadow=False, startangle=90, colors=colors)\n ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.\n plt.title('Malware: Top Node Types and Its Counts', y=1.05)\n\n plt.show()\n fig1.savefig('counts_pie_m.png')\n\n # for benign: extract node types info for node types counts, and sum up each types counts\n node_types = df[(df['label'] == 'Benign')]['node_types'] #series\n lst = [ast.literal_eval(s) for s in node_types]\n\n c = Counter()\n for d in lst:\n c.update(d)\n\n df_nt = pd.DataFrame(dict(c).items(), columns=['node_types', 'counts'])\n df_nt = df_nt.sort_values(by=['counts'])\n\n sizes = [77967, 2892033, 2964924, 5287258, 6478196, 20364339]\n labels = ['Others', 'staticNode', 'public,staticNode', 'external,Node', 'Node', 'public,Node']\n\n colors = ['#EAB6AB','#D9E6F3','#CBAACB','#CCE2CB', '#FFAEA5', '#A2E1DB']\n\n fig1, ax1 = plt.subplots(figsize=(7, 7))\n ax1.pie(sizes, labels=labels, autopct='%1.1f%%',\n shadow=False, startangle=90, colors=colors)\n ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.\n plt.title('Benign: Top Node Types and Its Counts', y=1.05)\n\n plt.show()\n fig1.savefig('counts_pie_b.png')\n\n # benign vs malware: counts\n sizes = [8435, 802]\n labels = ['Benign', 'Malware']\n\n colors = ['#EAB6AB','#D9E6F3']\n\n fig1, ax1 = plt.subplots(figsize=(7, 7))\n ax1.pie(sizes, labels=labels, autopct='%1.1f%%',\n shadow=False, startangle=90, colors=colors)\n ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.\n plt.title('Number of Benign vs. Malware', y=1.05)\n\n plt.show()\n fig1.savefig('bm_counts.png')\n\n # number of edges vs number of nodes\n groups = df.groupby('label')\n colors = ['#FFAEA5', '#A2E1DB']\n\n # Plot\n fig, ax = plt.subplots()\n ax.margins(0.05) # Optional, just adds 5% padding to the autoscaling\n for name, group in groups:\n if name == 'Benign':\n c = colors[0]\n else:\n c = colors[1]\n ax.plot(group.number_edges, group.number_nodes, marker='o', linestyle='', ms=4, label=name, c=c)\n ax.legend()\n ax.set_xlabel('Number of Edges')\n ax.set_ylabel('Number of Nodes')\n ax.set_title('Benign & Malware: Number of Edges vs. Number of Nodes', y=1.05)\n\n plt.show()\n fig.savefig('bm_edges_nodes.png')",
"def bar_chart(self, df, n_groups, dict):\n fig, ax = plt.subplots()\n # choose bar width (standard 0.8 chosen)\n bar_width = 0.35\n # get an index to set the ticks for the x axis\n\n index = np.arange(n_groups)\n indexes = df.index.tolist()\n print(indexes)\n df[\"index\"] = indexes\n\n # make barchart for permutation test\n ax.bar(index, df[\"perm\"], bar_width, color='b', linewidth=4,\n label='Permutation test')\n # make barchart for t-test\n ax.bar(index + bar_width, df[\"t_test\"], bar_width, color='r',\n label='t-test')\n\n ax.set_xlabel(dict[\"xlabel\"])\n ax.set_ylabel(dict[\"ylabel\"])\n ax.set_title(dict[\"title\"])\n ax.set_xticks(index + bar_width / 2)\n ax.set_xticklabels(dict[\"xtickslabels\"])\n ax.legend()\n\n fig.tight_layout()\n plt.show()",
"def plot_cluster_rd_space(n_cluster, algo = 'KMeans', pca = True):\n df = apps_df.copy()\n \n \n # optionally set downloads to log. Does not change much though.\n #df = df[df['downloads'] <= 2000000]\n mean_rating = df['rating'].mean()\n mean_downloads = math.log(df[df[algo] >= 0]['downloads'].mean())\n cluster_ratings = [df[df[str(algo)] == i]['rating'].mean() for i in range(n_cluster)]\n cluster_downloads = [math.log(df[df[algo] == i]['downloads'].mean()) for i in range(n_cluster)]\n \n font = {'size' : 16}\n plt.rc('font', **font)\n sns.set_style(\"whitegrid\")\n plt.figure(figsize=(8,8))\n plt.scatter(cluster_downloads, cluster_ratings, marker = '.', color = 'black', s=150)\n plt.xlim(min(cluster_downloads), max(cluster_downloads))\n plt.ylim(min(cluster_ratings)-0.01,max(cluster_ratings)+0.01)\n plt.axvline(x=mean_downloads, ymin=0, ymax=max(cluster_ratings), linestyle = '--')\n plt.axhline(y=mean_rating, xmin=0, xmax=max(cluster_downloads), linestyle = '--')\n plt.xlabel('avg. downloads (log scale)', fontsize=16)\n plt.ylabel('avg. ratings', fontsize=16)\n for i in range(n_cluster):\n plt.annotate(str(i), \n (cluster_downloads[i],cluster_ratings[i]))\n plt.axhspan(ymin=min(cluster_ratings)-0.01, ymax=mean_rating, facecolor='c', alpha=0.5)\n plt.axhspan(mean_rating, max(cluster_ratings)+0.01, facecolor='r', alpha=0.5)\n plt.axvspan(min(cluster_downloads), mean_downloads, facecolor='grey', alpha=0.3)\n plt.axvspan(mean_downloads, max(cluster_downloads), facecolor='w', alpha=0.5)\n plt.tick_params(axis='x', labelsize=16)\n plt.tick_params(axis='y', labelsize=16)\n #if pca: \n # plt.title('{} PCA Clusters in Download / Rating Space'.format(algo))\n #else: \n # plt.title('{} NON_PCA Clusters in Download / Rating Space'.format(algo))\n\n plt.show()",
"def plot_chrom_classification(df_status, ax=None, add_cbar=True, cbar_ax=None):\n df = df_status.copy()\n df.replace('Pass', 0, inplace=True)\n df.replace('Possible loss', 1, inplace=True)\n df.replace('Possible gain', 2, inplace=True)\n df.replace('Fail', 3, inplace=True)\n\n cp = sns.color_palette()\n c_loss, c_neut, c_gain = sns.diverging_palette(255, 133, l=60, n=3)\n cmap = matplotlib.colors.ListedColormap([c_neut, c_loss, c_gain, cp[2]])\n\n if not ax:\n f = plt.figure()\n ax = f.add_subplot(111)\n\n nd = df.as_matrix()\n\n cax = ax.imshow(nd, aspect='equal', cmap=cmap, interpolation=None, vmin=0, vmax=3)# , vmax=1, vmin=-1)\n ax.set_yticks(np.arange(0, df.shape[0]))\n ax.set_xticks(np.arange(0, df.shape[1]))\n ax.set_xticks(np.arange(0.5, df.shape[1]+0.5), minor=True)\n\n for y in np.arange(0.5, df.shape[0], 1):\n ax.axhline(y, linestyle='--', color='black', linewidth=1)\n \n ax.set_yticklabels([s.replace('_', '') for s in df.index])\n ax.set_xticklabels(df.columns);\n ax.grid(which='minor', color='black', linewidth=1)\n ax.set_xlabel('chromosome')\n \n #colorbar\n if add_cbar:\n cbar = ax.figure.colorbar(cax, ticks=[0.375, 0.75+0.375, 1.5+0.375, 2.25+0.375], cax=cbar_ax, orientation='horizontal')\n cbar.ax.set_xticklabels(['Pass', 'Pos. Loss', 'Pos. Gain', 'Fail'])\n cbar.ax.xaxis.tick_top()\n cbar.ax.tick_params(axis='x', which='major', pad=0)\n for y in [0.25, 0.5, 0.75]:\n cbar.ax.axvline(y, color='black', linewidth=1)\n \n return ax",
"def bar_plot(df_NP):\n cnt = Counter()\n for tax_list in df_NP.taxonomy:\n for tax in list(tax_list):\n if tax != 'no':\n cnt[tax] += 1\n plt.bar(cnt.keys(),cnt.values())\n plt.xlabel('taxonomic provenance')\n plt.ylabel('number of molecules')\n plt.title('number of aglycons with taxonomies')\n plt.savefig(\"output_data/Barplot.png\")\n print(\"BAR PLOT DONE\")",
"def plot_norm_bar(df, title, figsize=(12,7)):\n fig, ax = plt.subplots(ncols=1, figsize=figsize)\n fig.suptitle(title)\n cat_value_counts = df.fillna('missing').value_counts(normalize=True)\n sns.barplot(y = cat_value_counts.index, x= cat_value_counts.values*100)\n ax.set(xlabel= 'percentage', ylabel=str(df.name))\n \n plt.plot()\n\n return",
"def plot_colors(hist, centroids):\n bar = np.zeros((50, 300, 3), dtype=\"uint8\")\n startX = 0\n\n for (percent, color) in zip(hist, centroids):\n # plot the relative percentage of each cluster\n endX = startX + (percent * 300)\n cv2.rectangle(bar, (int(startX), 0), (int(endX), 50),\n color.astype(\"uint8\").tolist(), -1)\n startX = endX\n\n # return the bar chart\n return bar",
"def createChart(cladeGroup, data, taxonomyDict, outputFile):\n dfData = []\n for clade in cladeGroup: \n temp, other, totalTemp = valueCountsSpecies(data, cladeGroup[clade], taxonomyDict)\n relativeTemp = {}\n for val in temp:\n relativeTemp[val] = (temp[val] / sum(list(temp.values())))*100\n dfData.append(relativeTemp)\n\n tempDF = pd.DataFrame(dfData, index=list(cladeGroup.keys()))\n tempDF = tempDF.fillna(0)\n\n # Plotting\n sns.set(rc={'figure.figsize':(20,15)}, font_scale=2)\n ax = tempDF.plot(kind=\"bar\", stacked=True, colormap=ListedColormap(sns.color_palette(\"twilight\", 12)), rot=0)\n for rect in ax.patches:\n # Find where everything is located\n height = rect.get_height()\n width = rect.get_width()\n x = rect.get_x()\n y = rect.get_y()\n \n # The height of the bar is the data value and can be used as the label\n label_text = f'{height:.2f}%' # f'{width:.2f}' to format decimal values\n \n # ax.text(x, y, text)\n label_x = x + width / 2\n label_y = y + height / 2\n \n # only plot labels greater than given width\n if height > 0.00:\n ax.text(label_x, label_y, label_text, ha='center', va='center', fontsize=20, color=\"w\")\n\n plt.legend(loc=\"center right\", bbox_to_anchor=(1.25, 0.5), ncol=1)\n plt.savefig(outputFile, bbox_inches=\"tight\")\n plt.show()\n return",
"def compte(df):\n\n df.value_counts()[:100].plot(kind='bar')\n plt.show()",
"def plot_individual_bar_chart_graph(data_values, title,\r\n number_of_keys,\r\n max_val,\r\n vals_for_bar_chart,\r\n file_in):\r\n\r\n n_groups = len(vals_for_bar_chart)\r\n fig, ax = plt.subplots()\r\n index = np.arange(n_groups)\r\n bar_width = 0.9\r\n opacity = 0.4\r\n # print vals_for_bar_chart\r\n rects1 = plt.bar(index,\r\n vals_for_bar_chart,\r\n bar_width,\r\n alpha=opacity,\r\n color='b') # label='whatever'\r\n plt.xlabel('number in cluster')\r\n plt.ylabel('Count')\r\n plt.title(title+\"_barchart\")\r\n plt.legend()\r\n pylab.grid(True)\r\n ax.set_yscale('symlog')\r\n ax.set_xscale('symlog')\r\n plt.tight_layout()\r\n plt.show()\r\n pylab.savefig(file_in + \"_\" + title + '_barchart.png')\r\n plt.close()\r\n pylab.close()",
"def plot_closeness_barplots(seqidss,alis,legend,colors=None,limsy=[0,1],out=None,clusterings=None,annot=True):\n newpats = seqidss.keys()\n data=[]\n for i,alig in enumerate(alis):\n print legend[i]\n bardata={}\n seqids=[]\n for k in seqidss.keys():\n if seqidss[k][i]!=None: seqids.append(seqidss[k][i])\n seqids=list(set(seqids))\n if clusterings==None:\n pats,seq_dict=get_clustered_bins(seqids,alig)\n else:\n f=open(clusterings[i],\"r\")\n seq_dict=pickle.load(f)\n pats= seq_dict.keys()\n dfDists, dfCount = get_closeness(pats,seq_dict,isdiagonal=True)\n for npat in newpats:\n aux=False\n for col in dfDists.columns:\n if npat in col:\n if dfCount[col][col] < clim:\n bardata[npat]=(0, dfCount[col][col])\n else:\n bardata[npat]=(dfDists[col][col], dfCount[col][col])\n aux=True\n break\n if aux==False:\n bardata[npat]=(0, 0)\n data.append(bardata)\n if len(newpats) % 6 == 0: rows=len(newpats)/6\n else: rows=len(newpats)/6+1\n fig = plt.figure(figsize=(5*6, 4*rows))\n for j,pat in enumerate(data[0].keys()):\n values=[]\n for i,prot in enumerate(data):\n values.append(prot[pat][0])\n print pat,zip(legend,values)\n ax = fig.add_subplot(rows, 6, j+1)\n ax.bar(legend,values,color=colors)\n ax.set_ylabel('Conservation level',fontsize=17)\n ax.set_title('%s'%pat,fontsize=20)\n ax.set_ylim(0, 1)\n x0,x1 = ax.get_xlim()\n y0,y1 = ax.get_ylim()\n ax.xaxis.set_visible(False)\n ax.set_aspect(abs(x1-x0)/abs(y1-y0))\n plt.legend()\n if out!=None:\n fig.savefig(\"%s.png\"%(out),bbox_inches='tight')",
"def visualize_data(df):\n # Remove 'not available'\n genres = df.genre.unique().tolist()\n remove_index = genres.index('Not Available')\n genres.pop(remove_index)\n print('Genres: ', genres)\n\n # Extract number of songs in each genre\n genre_counts = df.genre.value_counts().tolist()\n genre_counts.pop(remove_index)\n print('Counts: ', genre_counts)\n\n # Plot bar graph\n plt.bar(genres, genre_counts)\n plt.xlabel('Genres')\n plt.ylabel('Count')\n plt.show()",
"def plots(corpus_parts, corpus):\n \"\"\"\n given the data obtained by the function percentage_hapaxes(dv_corpus, tokenized_corpus),\n the graphic for the percentage of hapaxes per partition is plotted\n \"\"\"\n h_parts = hapaxes_parts(corpus_parts)\n part_size = [x for x in range(len(h_parts))]\n \n percent_h = percentage_hapaxes(corpus_parts, corpus)\n percent_length = [i for i in range(len(percent_h))] \n \n fig, (ax1, ax2) = plt.subplots(1, 2)\n plt.setp(ax1, xticks=np.arange(0, len(part_size), 1))\n plt.setp(ax2, xticks=np.arange(0, len(percent_length), 1))\n fig.suptitle('Number (left) and percentage (right) of hapaxes in each part')\n ax1.bar(part_size, h_parts)\n ax2.bar(percent_length, percent_h) \n return plt.show()",
"def show_class_imbalance(df, title='Class Imbalance', PATH=None):\n ax = sns.barplot(x=[\"Normal\", \"Clickbait\"], y=df.groupby(['target']).target.count())\n ax.set_title(title, size=20)\n plt.xticks([0,1],[\"Normal\", \"Clickbait\"], size = 20)\n ax.set_ylabel(\"Document Count\", size=17)\n ax.set_xlabel(\"Article Class\", size=20)\n if PATH:\n plt.savefig(PATH, bbox_inches=\"tight\", transparent=True)\n return ax",
"def plot_umap_clusters(ax, df):\n labels = cluster_labels(df['cluster_id'])\n color_map = get_cluster_color_map(df['cluster_id'].values)\n\n if -1 in labels:\n df_noise = df[df['cluster_id'] < 0]\n ax.scatter(\n df_noise['umap1'].values,\n df_noise['umap2'].values,\n color=color_map[-1],\n s=2,\n label=labels[-1],\n )\n\n text_labels = []\n for cluster_id, cluster_df in df[df['cluster_id'] >= 0].groupby('cluster_id'):\n ax.scatter(\n cluster_df['umap1'].values,\n cluster_df['umap2'].values,\n color=color_map[cluster_id],\n s=2,\n label=labels[int(cluster_id)],\n )\n\n label_pos = df.groupby('cluster_id').mean()\n text_labels = [\n ax.text(label_pos.at[c, 'umap1'], label_pos.at[c, 'umap2'], c)\n for c in list(labels.keys()) if c >= 0\n ]\n adjust_text(\n text_labels, ax=ax,\n force_points=(0.1, 0.1)\n )\n\n ax.legend(\n frameon=False, markerscale=5,\n scatterpoints=1, bbox_to_anchor=(0.96, 0.85))\n ax.set_xlabel('Comp. 1')\n ax.set_ylabel('Comp. 2')\n seaborn.despine(ax=ax, offset=0, trim=True)",
"def plot_balance_class(classes):\n unique, counts = np.unique(classes, return_counts=True)\n plt.bar(unique, counts)\n plt.title('Class Frequency')\n plt.xlabel('Class')\n plt.ylabel('Frequency')\n plt.show()",
"def visualizeData(df):\n for column in df:\n df[column].value_counts().plot(kind = 'bar', rot = 'vertical', use_index = False)",
"def plot_multi_abundance(\n bracken_combined, plot_file, min_percent\n):\n\n nrow, ncol = 1, 2\n\n fig, ax = plt.subplots(\n nrows=nrow, ncols=ncol, figsize=(\n ncol*14, nrow*14\n )\n )\n\n data = pandas.read_csv(bracken_combined, sep='\\t', index_col='name', header=0)\n # Use percentage rather than total reads across samples\n data = data[[c for c in data.columns if 'bracken_frac' in c]]\n data.columns = [d.replace(\".bracken_frac\", \"\") for d in data.columns]\n\n if min_percent > 0:\n keep_idx = []\n for i, row in data.iterrows():\n keep_col = [True for v in row if v >= min_percent]\n if any(keep_col):\n keep_idx.append(row.name)\n data = data[data.index.isin(keep_idx)]\n\n # Separate viruses\n viruses = []\n for name in data.index.tolist():\n if 'virus' in name.lower():\n viruses.append(data[data.index == name])\n viruses = pandas.concat(viruses)\n data = data.drop(viruses.index.tolist())\n\n human = data[data.index == 'Homo sapiens']\n data = data.drop('Homo sapiens')\n print(human)\n\n REMAINING_PATHOGENS = [p for p in PATHOGENS if p in data.index.tolist()]\n pathogens = data[data.index.isin(REMAINING_PATHOGENS)].sort_index()\n data = data.drop(REMAINING_PATHOGENS)\n\n REMAINING_CONTAMINATION = [p for p in CONTAM if p in data.index.tolist()]\n contams = data[data.index.isin(REMAINING_CONTAMINATION)].sort_index()\n data = data.drop(REMAINING_CONTAMINATION)\n\n print(pathogens)\n print(contams)\n\n viruses_collapsed = collapse_taxa(viruses, suffix=\"virus\")\n\n print(viruses_collapsed)\n\n other_collapsed = collapse_taxa(data, genus=True)\n\n print(other_collapsed)\n\n combined = []\n for name, df in {\n 'Human': human, 'Pathogens': pathogens, 'Contamination': contams,\n 'Viruses': viruses_collapsed, 'Microbes': other_collapsed\n }.items():\n df['domain'] = [name for _ in range(len(df))]\n combined.append(df)\n combined = pandas.concat(combined)\n\n print(combined)\n\n panel1 = combined[combined['domain'] != 'Microbes']\n panel2 = combined[combined['domain'] == 'Microbes']\n\n panel1.reset_index(level=0, inplace=True)\n panel2.reset_index(level=0, inplace=True)\n panel1.rename(columns={'index': 'taxon'}, inplace=True)\n panel2.rename(columns={'index': 'taxon'}, inplace=True)\n\n print(panel1)\n print(panel2)\n #\n panel1_melt = panel1.melt(id_vars=['taxon', 'domain'], value_name=\"abundance\", var_name=\"sample\")\n panel2_melt = panel2.melt(id_vars=['taxon', 'domain'], value_name=\"abundance\", var_name=\"sample\")\n\n print(panel1_melt)\n print(panel2_melt)\n\n panel1_melt['abundance'] = [None if ab == 0. else ab for ab in panel1_melt['abundance']]\n panel2_melt['abundance'] = [None if ab == 0. else ab for ab in panel2_melt['abundance']]\n p1 = sns.scatterplot(\n data=panel1_melt, x=\"sample\", y=\"taxon\", hue=\"domain\",\n size=\"abundance\", legend=False, sizes=(70, 2000), ax=ax[0]\n )\n\n p2 = sns.scatterplot(\n data=panel2_melt, x=\"sample\", y=\"taxon\", hue=\"domain\", size=\"abundance\", legend=False, sizes=(50, 2000), ax=ax[1]\n )\n\n # plot grid behind markers\n # p1.grid(ls=\"dotted\", zorder=1, linewidth=0.1)\n # p2.grid(ls=\"dotted\", zorder=1, linewidth=0.1)\n # take care of long labels\n fig.autofmt_xdate()\n\n plt.tight_layout()\n p1.set_ylabel(\"\")\n p1.set_ylabel(\"\")\n p2.set_ylabel(\"\")\n p2.set_ylabel(\"\")\n fig.savefig(f'{plot_file}')",
"def compare_plot_instances(data_causal):\n col_names = data_causal.columns.values # get the columns' names\n dimension = 2 # TODO: figure out better way to organize plots by location\n\n fig = plt.figure()\n i = 1\n for cond in col_names:\n ax = fig.add_subplot(len(col_names)/dimension, dimension, i)\n df_compare = data_causal.groupby(cond)[cond].count() # displays num instances assigned to each condition\n ax = df_compare.plot(kind='bar', title=cond)\n ax.set_xlabel(cond)\n ax.set_ylabel(\"count instances\")\n i += 1\n fig.tight_layout()\n plt.show()"
] | [
"0.6847775",
"0.65582955",
"0.6480496",
"0.64798915",
"0.6471344",
"0.6433542",
"0.6422154",
"0.63602215",
"0.6318244",
"0.62704057",
"0.6259151",
"0.6216085",
"0.6214352",
"0.6189896",
"0.61803657",
"0.61696666",
"0.61290234",
"0.6117881",
"0.6092647",
"0.6063931",
"0.6029081",
"0.6022612",
"0.60105103",
"0.5986418",
"0.5978904",
"0.59783334",
"0.59411156",
"0.5923591",
"0.59166497",
"0.5911596"
] | 0.7068967 | 0 |
For a Voronoi ridge specified by i, this method processes the adjacent Voronoi cell centers, assigning the corresponding cluster label to each of them. | def forPointPair(self, i):
areCluster = [self.isCluster[j] for j in self.pairs[i]]
if sum(areCluster) > 1:
#If at least two neighboring cells are cluster cells, four possible cases exist: 1. none of them have been previously
#labeled and thus a new cluster label has to be defined, 2. all have been labeled with the same cluster label
#and as a result nothing is to be done, 3. only few of them has been labeled with a cluster label which is
#then propagated to the other cells, 4. or several have been assigned different cluster labels, and thus the older
#cluster label has to be propagated.
labels = [self.labels[j] for j in self.pairs[i]]
already = [j != -1 for j in labels]
if sum(already) == 0: #None of the cell centers have been assigned a cluster label
for j,p in enumerate(self.pairs[i]):
if areCluster[j]:
self.labels[p] = self.maxLabel
self.maxLabel += 1
else: #At least one of the cell centers has been assigned a cluster label
contesting = [j for j in labels if j != -1]
toAssign = min(contesting)
for j,p in enumerate(self.pairs[i]):
if areCluster[j]:
if labels[j] == -1:
self.labels[p] = toAssign
elif labels[j] != toAssign:
self.propagateLabel(toAssign, labels[j])
self.maxLabel = np.max(self.labels) + 1 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def assign_labels_to_centroids(self):\n labelled_centroids = []\n for i in range(len(self.final_clusters)):\n labels = map(lambda x: x[0], self.final_clusters[i])\n # pick the most common label\n most_common = Counter(labels).most_common(1)[0][0]\n c = np.round(len([item for item in self.final_clusters[i] if item[0]==1])/len(self.final_clusters[i]),2)\n if c>=0.46:\n most_common = 1.0\n centroid = (most_common, self.final_centroids[i])\n labelled_centroids.append(centroid)\n\n self.labelled_centroids = labelled_centroids\n print(\"cluster_0: \", np.round(len([item for item in self.final_clusters[0] if item[0]==1])/len(self.final_clusters[0]),2), \"size_0: \", len(self.final_clusters[0]))\n print(\"cluster_1: \", np.round(len([item for item in self.final_clusters[1] if item[0]==1])/len(self.final_clusters[1]),2), \"size_1: \", len(self.final_clusters[1]))\n #print(\"cluster_2: \", np.round(len([item for item in self.final_clusters[2] if item[0]==1])/len(self.final_clusters[2]),2), \"size_2: \", len(self.final_clusters[2]))\n #print(\"cluster_3: \", np.round(len([item for item in self.final_clusters[3] if item[0]==1])/len(self.final_clusters[3]),2), \"size_2: \", len(self.final_clusters[3]))",
"def updateClusterInfo(self):\n self.nPoints = len(self.labels)\n self.n = len(np.unique(self.labels))\n self.centers = [ [0.0 for j in range(3)] for i in range(self.n)]",
"def assignLabels(self):\n clusters = np.arange(0, len(self.V))[self.V < self.V1] #indexes self.V, volumes_sorted, and oldOrder\n self.clusterV = self.volumes_sorted[clusters]\n clusters = self.oldOrder[clusters] #indexes volumes\n self.clusters = self.nonBI[clusters] #indexes self.vor and self.data\n self.easyLabel = np.zeros(len(self.data))\n self.easyLabel[self.clusters] = 1\n print('Out of ' + str(len(self.data)) + ' particles, ' + str(len(self.clusters)) + ' (' + str(round(len(self.clusters)*100/len(self.data), 3)) +' %) are labelled as cluster particles.')",
"def plotVoronoiCell(self, cells):\n for i in cells:\n #i indexes volumes\n i = self.nonBI[i] #now i indexes vor.point_region\n\n vI = self.vor.regions[self.vor.point_region[i]]\n v = self.vor.vertices[vI, :]\n r = v\n\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n fig.set_size_inches(18.5, 9.5)\n ax.set_title('Voronoi Cell of Particle ' + str(i))\n ax.set_xlabel('x [m]')\n ax.set_ylabel('y [m]')\n ax.set_zlabel('z [m]')\n ax.scatter(r[:, 0], r[:, 1], r[:, 2], s=5, alpha=0.5, label='Cell Boundaries')\n ax.scatter(self.data[i, 0], self.data[i, 1], self.data[i, 2], s=25, label='Cell Center')\n ax.set_xlim3d(np.min(self.data[:, 0]), np.max(self.data[:, 0]))\n ax.set_ylim3d(np.min(self.data[:, 1]), np.max(self.data[:, 1]))\n ax.set_zlim3d(np.min(self.data[:, 2]), np.max(self.data[:, 2]))\n # limits = np.vstack((np.array([np.max(self.data[:, 0]), np.max(self.data[:, 1]), np.max(self.data[:, 2])]), np.array([np.min(self.data[:, 0]), np.min(self.data[:, 1]), np.min(self.data[:, 2])])))\n # ax.scatter(limits[:, 0], limits[:, 1], limits[:, 2], s=1)\n ax.legend()",
"def _relocate_clusters(self, cluster_labels):\n for cluster_label in range(self.k):\n if cluster_labels[cluster_label] is not None:\n # mean of the pixels assigned to cluster\n p_sum, p_count = np.asarray(\n cluster_labels[\n cluster_label\n ]).sum(axis=0), len(cluster_labels[cluster_label])\n self._clusters[cluster_label] = p_sum / p_count",
"def create_clusters(self):\n ex = 0\n print 'Iter - Purity Gini Index'\n while ex < self.MAX_ITERATION:\n new_clusters = np.zeros(self.centroids.shape)\n distances = euclidean_distances(self.vectors, self.centroids).argmin(axis=1)\n for i in range(self.K):\n indexes = np.argwhere(distances == i)\n data = self.vectors[indexes.transpose()[0]]\n if data.shape[0] > 1:\n new_clusters[i] = (np.sum(data, axis=0) / data.shape[0])\n else:\n new_clusters[i] = np.sum(data, axis=0)\n print ex, '----', self.cal_purity()\n ex += 1\n if np.allclose(self.centroids, new_clusters, atol=self.TOLERANCE):\n break\n self.centroids = new_clusters",
"def run(self):\n for l in self.uniqueSkel:\n mask = np.arange(len(self.skel))[self.skelLabels == l]\n counts = self.findNearest(mask)\n self.memberships[l] = counts\n\n #self.memberships is an array of as many rows as skeleton labels and as many columns as Voronoi cluster labels,\n #where the i-th row shows for all skeleton points of cluster label i, how many belong to each of the Voronoi\n #cluster labels. More precisely, the j-th column of the i-th row of this array shows how many skeleton points\n #of cluster label i have a closest Voronoi cell center of label j.\n\n print('Out of ' + str(len(self.skel)) + ' skeleton points, ' + str(sum(self.memberships[:, 0])) + ' (' + str(round(sum(self.memberships[:, 0]) * 100/len(self.skel), 3)) + ' %) appear in areas classified as void areas by Voronoi')\n\n for l in self.uniqueSkel:\n members = sum(self.skelLabels == l)\n topVor = np.argsort(self.memberships[l])[::-1][:5] - 1\n counts = np.sort(self.memberships[l])[::-1][:5]\n print('For the ' + str(members) + ' skeleton points with label ' + str(l) + ': ')\n for i in range(5):\n if counts[i] > 0:\n if topVor[i] == -1:\n add = ' ' + str(counts[i]) + ' ( ' + str(round(counts[i] * 100 / members, 3)) + ' %) are not associated with a Voronoi cluster cell'\n else:\n add = ' ' + str(counts[i]) + ' ( ' + str(round(counts[i] * 100/ members, 3)) + ' %) belong to the Voronoi Cluster with label ' + str(topVor[i])\n print(add)\n\n self.plotResults()",
"def cluster(self):\n\n result_nominatim = self.nominatim()\n try:\n coord = [(float( i['lat'] ), float( i['lon'] )) for i in result_nominatim]\n except:\n return None\n #print( \"coord\", coord )\n kms_per_radian = 6371.0088\n # Augmenter cette valeur augmente le nombre d'éléments dans un cluster et change les résultats\n epsilon = 2 / kms_per_radian\n # Adapter le nombre de clusters (min_sample) au nombre d'entités dans array ?\n db = DBSCAN( eps=epsilon, min_samples=1, algorithm='ball_tree',\n metric='haversine' ).fit( np.radians( coord ) )\n cluster_labels = db.labels_\n #print( \"cluster\", cluster_labels )\n num_clusters = len( set( cluster_labels ) )\n #print( \"num clusters\", num_clusters )\n counts = np.bincount( cluster_labels )\n #print( \"count\", counts )\n maxi = np.argmax( counts )\n #print( \"maxi\", maxi )\n itemindex = np.where( cluster_labels == maxi )[0]\n #print( \"itemindex\", itemindex )\n\n lat: List[float] = [float( result_nominatim[index]['lat'] ) for index in itemindex]\n lon: List[float] = [float( result_nominatim[index]['lon'] ) for index in itemindex]\n\n # on récupère la moyenne des coordonnées du plus gros cluster. Cette moyenne équivaut au centroide :\n # https://gis.stackexchange.com/questions/12120/calculate-midpoint-from-a-series-of-latitude-and-longitude-coordinates\n\n average = {\"lat\": sum( lat ) / len( lat ), \"lon\": sum( lon ) / len( lon )}\n\n #print( list( zip( cluster_labels, [x['display_name'] for x in results] ) ) )\n #print( \"plus proche de moyenne\", closest( results, average ) )\n return closest( result_nominatim, average )",
"def plotClusters(self):\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n fig.set_size_inches(18.5, 9.5)\n ax.set_title('Identification of Cluster Particles with Voronoi Volumes', fontsize=22)\n ax.set_xlabel('x [m]', fontsize=18)\n ax.set_ylabel('y [m]', fontsize=18)\n ax.set_zlabel('z [m]', fontsize=18)\n\n strength = np.linspace(0, 0.8, len(self.unique_labels))\n np.random.shuffle(strength)\n colors = [plt.cm.nipy_spectral(each) for each in strength]\n np.random.shuffle(strength)\n colorsB = [plt.cm.nipy_spectral(each) for each in strength]\n\n for k, col, colB in zip(self.unique_labels, colors, colorsB):\n a = 1\n s = 3\n if k == -1:\n # Black used for noise.\n col = [1, 0, 0]\n a = 0.3\n s = 1\n\n class_member_mask = (self.labels == k)\n xy = self.data[class_member_mask]\n if len(xy) > 0:\n ax.scatter(xy[:, 0], xy[:, 1], xy[:, 2], c=np.reshape(np.array(col), (1, -1)),\n edgecolors=np.reshape(np.array(colB), (1, -1)), alpha=a, s=s, label='Cluster ' + str(k))",
"def _compute_centroids(self):\n\n for i in range(0, self.k):\n cluster = np.argwhere(self.assigned_clusters == i)\n cluster_points = self.data[cluster].squeeze()\n self.centroids[i] = np.mean(cluster_points, axis=0)",
"def voronoi_labelling(self, seed):\n import heapq\n if hasattr(seed, '__iter__') == False:\n seed = [seed]\n try:\n if (self.weights < 0).any():\n raise ValueError('some weights are non-positive')\n except:\n raise ValueError('undefined weights')\n dist, active = np.inf * np.ones(self.V), np.ones(self.V)\n label = - np.ones(self.V, np.int_)\n idx, neighb, weight = self.compact_neighb()\n dist[seed] = 0\n label[seed] = np.arange(len(seed))\n dg = list(zip(np.zeros_like(seed), seed))\n heapq.heapify(dg)\n for j in range(self.V):\n end = False\n while True:\n if len(dg) == 0:\n end = True\n break\n node = heapq.heappop(dg)\n if active[node[1]]:\n break\n if end:\n break\n dwin, win = node\n active[win] = False\n # the folllowing loop might be vectorized\n for i in range(idx[win], idx[win + 1]):\n l, newdist = neighb[i], dwin + weight[i]\n if newdist < dist[l]:\n heapq.heappush(dg, (newdist, l))\n dist[l] = newdist\n label[l] = label[win]\n return label",
"def iter_node(self,i):\n nd = self.nodes[i]\n for kn in nd.get_close():\n # for kn in nd.get_known():\n # for kn in nd.neighbours:\n kn_node = self.nodes[kn.lindex]\n nd.add_known_nodes(kn.path_len,kn_node.get_close())",
"def buildTree(self):\n self.initClusters()\n while len(self.centroids) > 1:\n print \"Iteration %d\" % self.labelcount\n for i in self.centroids:\n \tprint self.centroids[i], i\n \n l1, l2 = self.closestClusters()\n n1, n2 = self.getNumPoints(l1, l2)\n\n self.centroids[('Cl_%d' % self.labelcount, l1, l2)] = \\\n (n1*self.centroids[l1] + n2*self.centroids[l2])/(n1 + n2)\n \n del self.centroids[l1]\n del self.centroids[l2]\n self.labelcount += 1\n\n\n return ('Cl_%d' % (self.labelcount-1), l1, l2)",
"def initClusters(self):\n if len(self.labelList) != len(self.pointList):\n \traise ValueError(\"Label List and Point List not the same length!\")\n for i in range(len(self.labelList)):\n self.centroids[self.labelList[i]] = self.pointList[i]\n self.pointcounts[self.labelList[i]] = 1",
"def __initCluster(self):\n data_size, cluster_center = self.data_size, self.cluster_center\n self.cluster_temp = np.zeros(data_size, dtype=int)\n self.cluster_upper_bound = np.full(len(cluster_center), float('inf'), dtype=float)\n for center in cluster_center:\n self.cluster_temp[center] = center",
"def find_centroid_for_each(self):",
"def cluster_bal_iter(self):\n # moving\n for j,cluster in enumerate(self.clusters):\n cluster.move()\n self.clusters_allocate_cells()\n for j,cluster in enumerate(self.clusters):\n cluster.calc()\n #print j, '\\t', cluster.center, '\\t', cluster.np, '\\t', cluster.size\n \n # resizing\n for j,cluster in enumerate(self.clusters):\n cluster.resize()\n self.clusters_allocate_cells()\n for j,cluster in enumerate(self.clusters):\n cluster.calc()\n #print j, '\\t', cluster.center, '\\t', cluster.np, '\\t', cluster.size\n \n self.calc()",
"def _calculate_nearest_cluster(self, pixels, cluster_labels):\n\n # assign pixel (RGB) to nearest cluster label (index)\n for index, rgb in pixels:\n rgb_vector = np.tile(rgb, (self.k, 1))\n self._labels[index] = np.argmin(\n self._euclid_distance(rgb_vector, self._clusters), axis=0)\n\n if cluster_labels[self._labels[index]] is None:\n cluster_labels[self._labels[index]] = list()\n\n cluster_labels[self._labels[index]].append(rgb)\n\n return cluster_labels",
"def _assign_clusters(self):\n\n dist = np.zeros((self.k, ))\n distortion = 0\n\n for index in range(0, self.data.shape[0]):\n for i in range(0, self.k):\n dist[i] = np.linalg.norm(self.data[index] - self.centroids[i])\n\n self.assigned_clusters[index] = np.argmin(dist)\n distortion += np.min(dist)\n\n return distortion",
"def cluster(self):\n print(\"Calculating distances\")\n self.all_distances()\n\n print(\"Start making sets\")\n clusters = self.clusters\n\n # Generates a set with neighbours for each point\n for row in self.distances:\n clusters.append(set(np.where(row < self.distance_threshold)[0].tolist()))\n\n print(\"Merging sets\")\n for cluster1 in range(self.point_count):\n for cluster2 in range(self.point_count):\n if clusters[cluster2] is not None and clusters[cluster1] is not None:\n if not clusters[cluster1].isdisjoint(clusters[cluster2]) and cluster1 != cluster2:\n clusters[cluster1].update(clusters[cluster2])\n clusters[cluster2] = None\n # Deletes empty clusters\n clusters = [points for points in clusters if points is not None]\n # Sorts clusters by their size\n clusters.sort(key=len, reverse=True)\n # Builds main set\n for point_set in clusters[0:self.cluster_count_threshold]:\n self.main_cluster.update(point_set)\n\n self.main_cluster = list(self.main_cluster)\n self.clusters = clusters",
"def assign(self):\n\n for s in self.spots:\n if self.cells[s[:2]] == 0:\n label = find_nearest_region(self.cells, *s[:2])\n else:\n label = self.cells[s[:2]]\n\n s.region = label",
"def cluster(self):\r\n\t\tself.clusterer.fit(self.koopman_feature_array)\r\n\t\tself.labels = self.clusterer.labels_\r\n\t\tfor j in range(max(self.labels)+1):\r\n\t\t\tself.koop_cluster_list.append([self.koop_list[i] for i in range(len(self.labels)) if self.labels[i] == j])\r\n\t\t\tself.koop_cluster_memb_prob_list.append([self.clusterer.probabilities_[i] for i in range(len(self.labels)) if self.labels[i] == j])",
"def draw_clusters(img, p1, p2, k, label, thres, padding):\n for i in range(k):\n color = np.random.uniform(low=0, high=255, size=3)\n index = np.where(label == i)[0]\n if len(index) <= thres:\n continue\n\n # plot for one cluster\n start = p1[index]\n end = p2[index]\n img = draw_circles(img, start, color)\n img = draw_circles(img, end, color)\n img = draw_arrows(img, start, end, color)\n min_x, min_y = np.amin(end, axis=0).astype(int) - padding\n max_x, max_y = np.amax(end, axis=0).astype(int) + padding\n img = cv2.rectangle(img, (min_x, min_y), (max_x, max_y), color, 2)\n return img",
"def visualize_clustering_results(cluster_points: list, labels: list) -> None:\n\n # First, split out the point tuples by label.\n points_by_label = defaultdict(list)\n for idx, point in enumerate(cluster_points):\n points_by_label[labels[idx]].append(point)\n\n # Next, stack the points for each label into a single array.\n big_xy_list_by_label = {}\n for label, points_for_that_label in points_by_label.items():\n big_xy_list_by_label[label] = np.stack(tuple(points_for_that_label))\n\n # Compute the centroids of each point cloud for labeling.\n centroids_by_label = {}\n for label, arr in big_xy_list_by_label.items():\n length = arr.shape[0]\n sum_x = np.sum(arr[:, 0])\n sum_y = np.sum(arr[:, 1])\n centroid = sum_x / length, sum_y / length\n centroids_by_label[label] = centroid\n\n # Initialize a counter to iterate through the color map\n i = 0\n plt.rcParams.update({\"font.size\": 22, \"font.weight\": \"bold\"})\n fig, ax = plt.subplots(figsize=(20, 20))\n for label, coords in centroids_by_label.items():\n ax.scatter(\n big_xy_list_by_label[label][:, 0],\n big_xy_list_by_label[label][:, 1],\n c=COLOR_DICT[i],\n s=50,\n alpha=0.5,\n label=label,\n )\n # plt.scatter(coords[0], coords[1], c=color_dict[i], label=label, s=100, alpha=0)\n ax.annotate(label, xy=coords, textcoords=\"data\", color=\"black\")\n i += 1\n ax.legend(loc=\"best\")\n plt.show()",
"def compute_cell_location_fast(seg_img: np.ndarray, all_labels: np.ndarray) \\\n -> (nx.graph, np.ndarray):\n g = nx.Graph()\n centers = cell_center_fast(seg_img, all_labels) # was 6\n\n # Compute vertices\n for i in all_labels:\n if i != 0:\n g.add_node(i)\n\n # Compute edges\n for i in all_labels:\n if i != 0:\n for j in all_labels:\n if j != 0:\n if i != j:\n pos1 = centers[i]\n pos2 = centers[j]\n distance = np.sqrt((pos1[0]-pos2[0])**2 +\n (pos1[1]-pos2[1])**2 + (pos1[2]-pos2[2])**2)\n\n g.add_edge(i, j, weight=distance)\n return g, centers",
"def cluster(self):\n center_index = np.random.choice(range(100), self.K, replace=False)\n self.centers = np.array([self.X[i] for i in center_index])\n self.cluster_sizes = np.zeros(self.K)\n member_of = np.zeros(100, dtype=int)\n min_dist = np.array([distance.euclidean(self.centers[0], point) for point in self.X])\n self.cluster_sizes[0] = 100\n flag = True\n while flag:\n flag = False\n for i, point in enumerate(self.X):\n for j, center in enumerate(self.centers):\n if member_of[i] != j:\n dist = distance.euclidean(point, center)\n if dist < min_dist[i]:\n flag = True\n current = member_of[i]\n self.cluster_sizes[current] -= 1\n self.cluster_sizes[j] += 1\n member_of[i] = j\n min_dist[i] = dist\n if np.count_nonzero(self.cluster_sizes) != self.K:\n return self.cluster()\n self.centers = np.zeros((self.K, 2), dtype='d')\n for i, point in enumerate(self.X):\n center = member_of[i]\n self.centers[center] += point\n for i, center in enumerate(self.centers):\n center /= self.cluster_sizes[i]",
"def labelNeighbours26(data, label, x0,y0,z0, index):\n shape = label.shape;\n for xp in range(max(0,-1+x0),min(2+x0, shape[0])):\n for yp in range(max(0,-1+y0),min(2+y0, shape[1])):\n for zp in range(max(0,-1+z0),min(2+z0, shape[2])):\n if data[xp,yp,zp] and label[xp,yp,zp] == 0:\n label[xp,yp,zp] = index;\n label = labelNeighbours26(data, label, xp,yp,zp, index);\n return label;",
"def compute_cell_center(seg_img: np.ndarray, labels: np.ndarray, results: np.ndarray) \\\n -> np.ndarray:\n for label in labels:\n if label != 0:\n all_points_z, all_points_x, all_points_y = np.where(seg_img == label)\n avg_z = np.round(np.mean(all_points_z))\n avg_x = np.round(np.mean(all_points_x))\n avg_y = np.round(np.mean(all_points_y))\n results[label] = [avg_z, avg_x, avg_y]\n\n return results",
"def form_clusters(self, labelled_data, unlabelled_centroids):\n # enumerate because centroids are arrays which are unhashable,\n centroids_indices = range(len(unlabelled_centroids))\n # initialize an empty list for each centroid. The list will contain\n # all the datapoints that are closer to that centroid than to any other.\n # That list is the cluster of that centroid.\n clusters = {c: [] for c in centroids_indices}\n \n for (label, Xi) in labelled_data:\n # for each datapoint, pick the closest centroid.\n smallest_distance = float(\"inf\")\n for cj_index in centroids_indices:\n cj = unlabelled_centroids[cj_index]\n distance = np.linalg.norm(Xi - cj)\n if distance < smallest_distance:\n closest_centroid_index = cj_index\n smallest_distance = distance\n # allocate that datapoint to the cluster of that centroid.\n clusters[closest_centroid_index].append((label,Xi))\n return list(clusters.values())",
"def cluster_cal(self):\n self.Cluster = []\n for i in range(self.nodenum):\n neighborhood_node = self.neighbor_node(i)\n Node_num = len(neighborhood_node)\n Count = self.neighbor_edge(neighborhood_node)\n if(Node_num == 0 or Node_num == 1):\n self.Cluster.append(0.5)\n else:\n self.Cluster.append(Count/(Node_num*(Node_num - 1)))\n \n self.cluster_coeff = np.average(self.Cluster)"
] | [
"0.6171579",
"0.6096346",
"0.6085736",
"0.60224193",
"0.60157436",
"0.5906784",
"0.59040046",
"0.5876902",
"0.5802641",
"0.5690463",
"0.5554491",
"0.5509175",
"0.543474",
"0.5428747",
"0.5418654",
"0.5400812",
"0.53619266",
"0.5357413",
"0.5356873",
"0.5355492",
"0.5326791",
"0.53175026",
"0.5307307",
"0.5303562",
"0.5293198",
"0.5289179",
"0.52846754",
"0.5261245",
"0.52536666",
"0.52128583"
] | 0.6685486 | 0 |
This method solves a conflict of labels by propagating the older (lower) label to the Voronoi cells labeled with the newer label | def propagateLabel(self, l1, l2):
if l1 != l2:
winner = min(l1, l2)
loser = max(l1, l2)
loserN = 0
superiorN = 0
for i,l in enumerate(self.labels):
if l == loser:
loserN += 1
self.labels[i] = winner
if l > loser:
superiorN += 1
self.labels[i] = l - 1
# print('Loser Label is ' + str(loser) + ' . With ' + str(loserN) + ' associated cells. Winner label is ' + str(winner)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def propagate_labels(image,labels,conflict=0):\n rlabels,_ = label(image)\n cors = correspondences(rlabels,labels,False)\n outputs = zeros(amax(rlabels)+1,'i')\n oops = -(1<<30)\n for o,i in cors.T:\n if outputs[o]!=0: outputs[o] = oops\n else: outputs[o] = i\n outputs[outputs==oops] = conflict\n outputs[0] = 0\n return outputs[rlabels]",
"def heuristic2_label_OBD(n, P, label, critical=None):\n print \"trying to label \" + str(n) + \" with \" + str(label)\n nodes_labeled = []\n if ('critical' in P.node[n].keys()) and (P.node[n]['critical']==True) and (P.node[n]['OBDlabel'] != label) :\n print \"FAIL on critical and not the same label.\"\n return (False, []) # being critical, we could avoid failure only if the label to set would be the same (it happens)\n else:\n P.node[n]['OBDlabel'] = label\n nodes_labeled.append(n) # this is a list that gets passed through recursions\n if critical == True:\n P.node[n]['critical'] = True\n # labeling part done\n flag_critical = False # if I will label more than one neighbor from now on, then the labels will be critical (not to be changed by others)\n new_label = label + 1\n neighbors = P.neighbors(n)\n for neigh in neighbors:\n if 'OBDlabel' in P.node[neigh].keys():\n if P.node[neigh]['OBDlabel'] > new_label:\n new_label = P.node[neigh]['OBDlabel']\n # we got maximum of current label or any node that neighbors have - now we label them all with that\n neighbors_to_label = []\n for neigh in neighbors:\n if 'OBDlabel' in P.node[neigh].keys():\n if (P.node[neigh]['OBDlabel'] >= P.node[n]['OBDlabel']) or (P.node[neigh]['OBDlabel'] == None): # now they can have it, but set to None (because of removal in failers)\n neighbors_to_label.append(neigh)\n else: # if set and smaller than mine, leave them alone\n pass\n else: # if not set, then not lower and not labelled\n neighbors_to_label.append(neigh)\n # now we have all the neighbors that need to be labeled\n if len(neighbors_to_label) > 1:\n flag_critical = True\n # and now the recursive step - labeling all these nodes\n permutations = itertools.permutations(neighbors_to_label) # iterator : gets exhausted as we access elements\n for perm in permutations:\n print \"trying perm: \" + str(perm)\n this_run_success = True\n this_run_labeled = []\n for el in perm:\n (s, nl) = heuristic2_label_OBD(el, P, new_label, flag_critical)\n this_run_labeled = this_run_labeled + nl\n if s == False:\n this_run_success = False\n break\n if this_run_success == False:\n # then unlabel all that were labelled up to now\n for nn in this_run_labeled:\n print \"removing label of \" + str(nn)\n P.node[nn]['OBDlabel'] = None\n P.node[nn]['critical'] = False\n else: # obviously success is True, we managed to label all others...\n nodes_labeled = nodes_labeled + this_run_labeled\n print \"Win in labeling neighbors of \" + str(n)\n return (True, nodes_labeled)\n break\n # if no permutation is successful, we end up returning the last line\n return (False, nodes_labeled)\n print \"FAIL of all permutations from \" + str(n)",
"def generate_label(incidentes_viales_df):\n incidentes_viales_df.codigo_cierre.mask(incidentes_viales_df.codigo_cierre ==\n r\"(A) La unidad de atención a emergencias fue despachada, \"\n \"llegó al lugar de los hechos y confirmó la emergencia reportada\",\n 'A', inplace=True)\n incidentes_viales_df.codigo_cierre.mask(incidentes_viales_df.codigo_cierre ==\n r'(N) La unidad de atención a emergencias fue despachada, '\n 'llegó al lugar de los hechos, pero en el sitio del evento '\n 'nadie solicitó el apoyo de la unidad',\n 'N', inplace=True)\n incidentes_viales_df.codigo_cierre.mask(incidentes_viales_df.codigo_cierre ==\n r'(D) El incidente reportado se registró en dos o más '\n 'ocasiones procediendo a mantener un único reporte (afirmativo,'\n ' informativo, negativo o falso) como el identificador para el '\n 'incidente',\n 'D', inplace=True)\n incidentes_viales_df.codigo_cierre.mask(incidentes_viales_df.codigo_cierre ==\n r'(F) El operador/a o despachador/a identifican, antes de dar '\n 'respuesta a la emergencia, que ésta es falsa. O al ser '\n 'despachada una unidad de atención a emergencias en el lugar '\n 'de los hechos se percatan que el incidente no corresponde al '\n 'reportado inicialmente',\n 'F', inplace=True)\n incidentes_viales_df.codigo_cierre.mask(incidentes_viales_df.codigo_cierre ==\n r'(I) El incidente reportado es afirmativo y se añade '\n 'información adicional al evento',\n 'I', inplace=True)\n\n incidentes_viales_df['label'] = np.where(\n (incidentes_viales_df.codigo_cierre == 'F') | (incidentes_viales_df.codigo_cierre == 'N'), 1, 0)\n\n return incidentes_viales_df",
"def voronoi_labelling(self, seed):\n import heapq\n if hasattr(seed, '__iter__') == False:\n seed = [seed]\n try:\n if (self.weights < 0).any():\n raise ValueError('some weights are non-positive')\n except:\n raise ValueError('undefined weights')\n dist, active = np.inf * np.ones(self.V), np.ones(self.V)\n label = - np.ones(self.V, np.int_)\n idx, neighb, weight = self.compact_neighb()\n dist[seed] = 0\n label[seed] = np.arange(len(seed))\n dg = list(zip(np.zeros_like(seed), seed))\n heapq.heapify(dg)\n for j in range(self.V):\n end = False\n while True:\n if len(dg) == 0:\n end = True\n break\n node = heapq.heappop(dg)\n if active[node[1]]:\n break\n if end:\n break\n dwin, win = node\n active[win] = False\n # the folllowing loop might be vectorized\n for i in range(idx[win], idx[win + 1]):\n l, newdist = neighb[i], dwin + weight[i]\n if newdist < dist[l]:\n heapq.heappush(dg, (newdist, l))\n dist[l] = newdist\n label[l] = label[win]\n return label",
"def heuristic2B_label_OBD(n, P, label, critical=None):\n nodes_labeled = []\n\n flag_critical = False # if I will label more than one neighbor from now on, then the labels will be critical (not to be changed by others)\n new_label = label + 1\n \n neighbors = P.neighbors(n)\n for neigh in neighbors:\n if 'OBDlabel' in P.node[neigh].keys(): # if it has a label\n if P.node[neigh]['OBDlabel'] > new_label: # and it is higher than what I would use for labeling\n new_label = P.node[neigh]['OBDlabel']\n # we got maximum of current label or any node that neighbors have - now we label them all with that\n \n neighbors_to_label = []\n for neigh in neighbors:\n if 'OBDlabel' in P.node[neigh].keys():\n if (P.node[neigh]['OBDlabel'] >= P.node[n]['OBDlabel']) or (P.node[neigh]['OBDlabel'] == None): # now they can have it, but set to None (because of removal in failers)\n neighbors_to_label.append(neigh)\n else: # if set and smaller than mine, leave them alone\n pass\n else: # if not set, then not lower and not labelled\n neighbors_to_label.append(neigh)\n # now we have all the neighbors that need to be labeled\n \n if len(neighbors_to_label) > 1:\n flag_critical = True\n # and now labeling all these nodes\n \n for neigh in neighbors_to_label:\n if ('critical' in P.node[neigh].keys()) and (P.node[neigh]['critical']==True) and (P.node[neigh]['OBDlabel'] != new_label) :\n return (False, nodes_labeled) # being critical, we could avoid failure only if the label to set would be the same (it happens)\n else:\n P.node[neigh]['OBDlabel'] = new_label\n nodes_labeled.append(neigh) # this is a list that gets passed through recursions\n if flag_critical == True:\n P.node[neigh]['critical'] = True\n # labeling part done\n \n # and now recursive step - going into each neighbor to continue, in any order if necessary\n permutations = itertools.permutations(neighbors_to_label) # iterator : gets exhausted as we access elements\n for perm in permutations:\n this_run_success = True\n this_run_labeled = []\n for el in perm:\n (s, nl) = heuristic2B_label_OBD(el, P, new_label, flag_critical)\n this_run_labeled = this_run_labeled + nl\n if s == False:\n this_run_success = False\n if this_run_success == False:\n # then unlabel all that were labelled up to now\n for nn in this_run_labeled:\n P.node[nn]['OBDlabel'] = None\n P.node[nn]['critical'] = False\n else: # obviously success is True, we managed to label all others...\n nodes_labeled = nodes_labeled + this_run_labeled\n return (True, nodes_labeled)\n break\n # if no permutation is successful, we end up returning the last line\n return (False, nodes_labeled)",
"def splitCell(buff,index,ref_label,new_label):\n cell_before = np.copy(buff[:,:,index-1])\n cell_after = np.copy(buff[:,:,index])\n \n mask_after = cell_after ==ref_label\n \n cell_before[np.logical_not(mask_after)] = 0\n \n mask_ref_label = cell_before ==ref_label\n mask_new_label = cell_before==new_label\n \n after_sure_ref = np.logical_and(mask_ref_label,mask_after)\n after_sure_new = np.logical_and(mask_new_label,mask_after)\n after_unsure = np.logical_and(mask_after,np.logical_not(np.logical_or(after_sure_ref,after_sure_new) ) )\n\n xref,yref = np.where(after_sure_ref)\n ref_pts = np.concatenate((xref.reshape(-1,1),yref.reshape(-1,1)),axis=1)\n xnew,ynew = np.where(after_sure_new)\n new_pts = np.concatenate((xnew.reshape(-1,1),ynew.reshape(-1,1)),axis=1)\n \n labels_ref = np.ones(xref.shape[0])\n labels_new = np.zeros(xnew.shape[0])\n labels = np.concatenate((labels_ref,labels_new),axis=0)\n labels.reshape(-1,1)\n X= np.concatenate((ref_pts,new_pts),axis = 0)\n \n xu,yu = np.where(after_unsure)\n u_pts = np.concatenate((xu.reshape(-1,1),yu.reshape(-1,1)),axis=1)\n neigh = KNeighborsClassifier(n_neighbors=5)\n neigh.fit(X, labels)\n pred = neigh.predict(u_pts)\n for i in range(pred.shape[0]):\n #if pred is 1 goes to ref if 0 goes to new\n if pred[i]==1:\n after_sure_ref[u_pts[i,0],u_pts[i,1]]=True\n else:\n after_sure_new[u_pts[i,0],u_pts[i,1]]=True\n #Assigning the new values to the thing:\n buff[after_sure_ref,index] = ref_label\n buff[after_sure_new,index] = new_label",
"def tidy_restriction_labels(self,direction='up',temporary=None,underline_unique=None):\n\t\tc = self.seqframe\n\t\tif direction=='up':\n\t\t l=-15\n\t\t base_shift=0\n\t\telse:\n\t\t\tl=15\n\t\t\tbase_shift=30\n\n\t\t#clear the temporary rects and lines from previous time\n\t\tself.seqframe.delete('templabelrect')\n\t\tself.seqframe.delete('templine')\n\t\tcurrentsites=[]\n\t\t#use seperate lists depending on whether temp sites are being drawn\n\t\tif temporary:\n\t\t\tfor obj in self.temp_objs.keys():\n\t\t\t\tcurrentsites.append(obj)\n\t\telse:\n\t\t\tfor obj in self.new_seq_win_objs.keys():\n\t\t\t\tcurrentsites.append(obj)\n\t\t#for each obj, see if it overlaps nearest, and if so - move it up/down\n\t\tfor obj in currentsites:\n\t\t\tbox = c.bbox(obj)\n\t\t\tx1=box[0]\n\t\t\ty1=box[1]\n\t\t\tx2=box[2]\n\t\t\ty2=box[3]\n\t\t\toverlap=[]\n\t\t\toverlap=c.find_overlapping(x1,y1,x2,y2)\n\t\t\tif overlap:\n\t\t\t\tfor overobj in overlap:\n\t\t\t\t\tif overobj != obj:\n\t\t\t\t\t#if overlap is a text label, move it upwards until\n\t\t\t\t\t#it no longer overlaps another label\n\t\t\t\t\t\tif 'textlabel' in c.gettags(overobj):\n\t\t\t\t\t\t\tc.move(overobj, 0, l)\n\n\t\t#Now plot rectangle, lift text and then remove sites from list for next time\n\t\t#first determine if unique site from tag\n\t\tfor obj in currentsites:\n\t\t\t#first draw the lines..\n\t\t\tcoords = c.coords(obj)\n\t\t\tenzyme = c.gettags(obj)[3]\n\t\t\tsitetag = c.gettags(obj)[4]\n\t\t\tx=coords[0]\n\t\t\ty1=coords[1]\n\t\t\ty2=self.seq_row+base_shift\n\t\t\tyorg_here=y2\n\t\t\t# If we have a protein sequence then lift the line a bit\n\t\t\tif self.data.has_key('ORF_selected'):\n\t\t\t\tif direction=='up':\n\t\t\t\t\tyorg_here=(y2-15)/self.y_scale\n\t\t\t\telse:\n\t\t\t\t\tyorg_here=(y2-15)*self.y_scale\n # If a comparison sequence is loaded raise line more\n\t\t\tif self.show_comp_sequence.get()==1:\n\t\t\t\tif direction=='up':\n\t\t\t\t\tif self.maxseqlevel==0:\n\t\t\t\t\t\tyorg_here=(yorg_here-15)/self.y_scale\n\t\t\t\t\telse:\n\t\t\t\t\t\tyorg_here=yorg_here-(self.maxseqlevel*15)/self.y_scale\n \t\t\t\t\tif self.primer_displayed==1 and self.maxseqlevel>0:\n\t\t\t\t\t\tyorg_here=(yorg_here-15)/self.y_scale\n\t\t\t\telse:\n\t\t\t\t yorg_here=(yorg_here-15)*self.y_scale\n\n\t\t\tif temporary:\n\t\t\t\tline=self.seqframe.create_line(x,yorg_here-5,x,y1,fill=self.linecol,\n\t\t\t\t\t\t width=2,stipple='gray25',tag=('templine',enzyme,sitetag,direction))\n\t\t\telse:\n\t\t\t\tline=self.seqframe.create_line(x,yorg_here-5,x,y1,fill=self.linecol,\n\t\t\t\t\t\twidth=2,stipple='gray25',tag=('line',enzyme,sitetag,direction))\n\t\t\t#lower the lines behind other objects\n\t\t\tc.tag_lower(line)\n\t\t\tif 'u' in c.gettags(obj):\n\t\t\t\tfillcolour='pink'\n\t\t\telse:\n\t\t\t\tfillcolour='lightyellow'\n\t\t\tbox = c.bbox(obj)\t\t\t\n\t\t\tif temporary:\n\t\t\t\trect = c.create_rectangle(box,tag=('templabelrect',enzyme,sitetag),fill=fillcolour)\n\t\t\telse:\n\t\t\t\trect = c.create_rectangle(box,tag=('labelrect',enzyme,sitetag),fill=fillcolour)\t\t\t\t\n\t\t\tc.tag_raise(obj)",
"def update_labels(mask1, mask2):\n # Find the object in mask2 that has maximum overlap with an object in max1,\n # (as a fraction of the objects pixels in mask1)\n def get_max_overlap(mask1, mask2, label1):\n # Count overlapping pixels.\n labels, counts = np.unique(mask2[mask1 == label1], return_counts=True)\n # Sort labels by counts (ascending).\n labels_sorted = labels[np.argsort(counts)]\n counts_sorted = counts[np.argsort(counts)]\n # Select new label with maximum overlap.\n max_overlap = labels_sorted[-1]\n return max_overlap\n \n def main(mask1, mask2):\n if not (mask1.shape == mask2.shape):\n raise ValueError(\"Masks do not have the same shape.\")\n # Initialize blank mask.\n updated_mask = np.zeros(mask2.shape)\n # Go one-by-one through the labels in mask2\n for label in np.unique(mask2)[1:]:\n # Find label in mask1 with maximum overlap with nuc from mask2.\n mask1_besthit = get_max_overlap(mask2, mask1, label)\n # Find reverse: best hit for the mask1 label in mask2.\n mask2_besthit = get_max_overlap(mask1, mask2, mask1_besthit)\n # If the labels are reciprocal best hits, update label in \n # new mask to have the shape of the object in mask 2 with \n # the label propagated from mask1.\n if ((mask2_besthit == label) and (mask1_besthit != 0)):\n updated_mask[mask2 == label] = mask1_besthit\n\n return updated_mask\n return main(mask1, mask2)",
"def autolabel(rects):",
"def fix_label(df):\n df_label = df['OVERALL_DIAGNOSIS']\n\n df_label.replace({0: -1, 1: 1}, inplace=True)\n df = df.drop(['OVERALL_DIAGNOSIS'], axis=1)\n df = pd.concat([df_label, df], axis=1)\n df.columns.values[0] = \"label\"\n return df",
"def move_restriction_label(self, obj, y): \n c=self.seqframe\n oldy = c.coords(obj)[1]\n enzyme = c.gettags(obj)[3]\n site = c.gettags(obj)[4]\n \n #print enzyme, site, obj \n rects=c.find_withtag('labelrect')+c.find_withtag('templabelrect')\n lines=c.find_withtag('line')+c.find_withtag('templine')\n \n for item in rects:\n if site in c.gettags(item) and enzyme in c.gettags(item):\n rect = item \n for item in lines:\n tags=c.gettags(item)\n if site in tags and enzyme in tags:\n line = item\n x1,y1,x2,y2 = c.coords(line)\n c.delete(line)\n line=c.create_line(x1, y1,x1,y, fill=self.linecol,\n width=2,stipple='gray25',\n tag=tags)\n c.tag_lower(line)\n \n c.move(obj, 0, y-oldy)\n c.move(rect, 0, y-oldy)\n c.tag_raise(rect)\n c.tag_raise(obj) \n return",
"def findLabel(self, x, y):\n #- xLabel -\n if x in self.xPoints:\n xLabel = self.xPoints.index(self.boundary_xyCoord[k][0])\n else:\n # Find the points on the left and right of this point in xPoints\n done = 0\n k = 0\n while done == 0:\n if x > self.xPoints[k]: \n done = 1;\n leftP = (self.xPoints[k],y)\n rightP = (self.xPoints[k+1],y)\n else:\n k += 1 \n\n # Check whether the left or right point is an internal mesh point\n if leftP in self.internal_xyCoord and rightP in self.internal_xyCoord:\n # It is impossible to have both the left and right points inside a \n # grid unless we deal with a non-convex shape (thid should be \n # addressed in future developments) \n raise customError('**Error! Both the left or right points of the given point are inside the mesh') \n elif leftP in self.internal_xyCoord:\n # If the left point is inside the grid the label of the current i\n # point should be one plus the label of the point on its left\n xLabel = self.xPoints.index(leftP[0]) + 1\n elif rightP in self.internal_xyCoord:\n # If the right point is inside the grid the label of the current \n # point should be one minos the label of the point on its right\n xLabel = self.xPoints.index(rightP[0]) - 1\n else:\n raise customError('**Error! Neither the left or right points of the given point are inside the mesh') \n\n #- yLabel -\n if y in self.self.yPoints:\n yLabel = self.yPoints.index(self.boundary_xyCoord[k][0])\n else:\n # Find the points on the left and right of this point in xPoints\n done = 0\n k = 0\n while done == 0:\n if y > self.yPoints[k]: \n done = 1;\n lowerP = (x,self.yPoints[k])\n upperP = (x,self.yPoints[k+1])\n else:\n k += 1 \n\n # Check whether the lower or upper point is an internal mesh point\n if lowerP in self.internal_xyCoord and upperP in self.internal_xyCoord:\n # It is impossible to have both the lower and upper points \n # inside a grid unless we deal with a non-convex shape \n raise customError('**Error! Both the lower or upper points of the given point are inside the mesh') \n elif lowerP in self.internal_xyCoord:\n # If the lower point is inside the grid the label of the current \n # point should be one plus the label of the point on its lower\n yLabel = self.xPoints.index(lowerP[0]) + 1\n elif upperP in self.internal_xyCoord:\n # If the upper point is inside the grid the label of the current \n # point should be one minos the label of the point on its upper\n yLabel = self.xPoints.index(upperP[0]) - 1\n else:\n raise customError('**Error! Neither the lower or upper points of the given point are inside the mesh')",
"def replaceLabels(self, old, new):\n for c in self.iterCurves():\n c.update({'label': c.getAttribute('label').replace(old, new)})",
"def UpdateLabel(self) -> _n_6_t_0:",
"def _update_label(self, outer_pos, inner_pos, new_label):\n r, c = outer_pos\n ir, ic = inner_pos\n self.inner_boards[r][c][ir][ic][\"text\"] = new_label",
"def updateLabels(correspondance_list,labels_list,i,image):\n l_prev_index = len(labels_list[(i-1)%2])\n l_curr_index = len(labels_list[i%2])\n prev_index = labels_list[(i-1)%2]\n index_changes = []\n ref_image = np.copy(image)\n\n if l_prev_index==l_curr_index: #Same number of cells\n \n for x,y in correspondance_list: \n labels_list[i%2][y] = prev_index[x]\n image[ref_image==y+1] = prev_index[x]\n \n if l_curr_index > l_prev_index: # Apparition\n new_index = max(prev_index)+2\n for x,y in correspondance_list: \n if x<l_prev_index:\n labels_list[i%2][y] = prev_index[x] \n image[ref_image==y+1] = prev_index[x]\n \n else:\n labels_list[i%2][y] = new_index\n image[ref_image==y+1] = new_index\n index_changes.append(new_index)\n new_index+=1\n \n if l_curr_index < l_prev_index: # Disparition\n for x,y in correspondance_list: \n if y<l_curr_index:\n labels_list[i%2][y] = prev_index[x]\n image[ref_image==y+1] = prev_index[x]\n else:\n index_changes.append(prev_index[x])\n return index_changes",
"def test_issue_replace_labels(self):\n pass",
"def _rectified_relabel(infr, cc_subgraphs):\n # Determine which names can be reused\n from wbia.scripts import name_recitifer\n\n infr.print('grouping names for rectification', 3)\n grouped_oldnames_ = [\n list(nx.get_node_attributes(subgraph, 'name_label').values())\n for count, subgraph in enumerate(cc_subgraphs)\n ]\n # Make sure negatives dont get priority\n grouped_oldnames = [\n [n for n in group if len(group) == 1 or n > 0] for group in grouped_oldnames_\n ]\n infr.print(\n 'begin rectification of %d grouped old names' % (len(grouped_oldnames)), 2\n )\n new_labels = name_recitifer.find_consistent_labeling(\n grouped_oldnames, verbose=infr.verbose >= 3\n )\n infr.print('done rectifying new names', 2)\n new_flags = [\n not isinstance(n, int) and n.startswith('_extra_name') for n in new_labels\n ]\n\n for idx in ut.where(new_flags):\n new_labels[idx] = infr._next_nid()\n\n for idx, label in enumerate(new_labels):\n if label < 0 and len(grouped_oldnames[idx]) > 1:\n # Remove negative ids for grouped items\n new_labels[idx] = infr._next_nid()\n return new_labels",
"def relabel_variables(self, mapping, inplace=True):\n graph = self.graph\n ising_linear_ranges = self.ising_linear_ranges\n ising_quadratic_ranges = self.ising_quadratic_ranges\n\n try:\n old_labels = set(mapping.keys())\n new_labels = set(mapping.values())\n except TypeError:\n raise ValueError(\"mapping targets must be hashable objects\")\n\n for v in new_labels:\n if v in graph and v not in old_labels:\n raise ValueError(('A variable cannot be relabeled \"{}\" without also relabeling '\n \"the existing variable of the same name\").format(v))\n\n if not inplace:\n return Specification(nx.relabel_nodes(graph, mapping, copy=True), # also checks the mapping\n tuple(mapping.get(v, v) for v in self.decision_variables),\n self.feasible_configurations, # does not change\n vartype=self.vartype, # does not change\n ising_linear_ranges={mapping.get(v, v): ising_linear_ranges[v] for v in graph},\n ising_quadratic_ranges={mapping.get(v, v): {mapping.get(u, u): r\n for u, r in neighbors.items()}\n for v, neighbors in ising_quadratic_ranges.items()})\n else:\n # now we need the ising_linear_ranges and ising_quadratic_ranges\n shared = old_labels & new_labels\n\n if shared:\n # in this case we need to transform to an intermediate state\n # counter will be used to generate the intermediate labels, as an easy optimization\n # we start the counter with a high number because often variables are labeled by\n # integers starting from 0\n counter = itertools.count(2 * len(self))\n\n old_to_intermediate = {}\n intermediate_to_new = {}\n\n for old, new in mapping.items():\n if old == new:\n # we can remove self-labels\n continue\n\n if old in new_labels or new in old_labels:\n\n # try to get a new unique label\n lbl = next(counter)\n while lbl in new_labels or lbl in old_labels:\n lbl = next(counter)\n\n # add it to the mapping\n old_to_intermediate[old] = lbl\n intermediate_to_new[lbl] = new\n\n else:\n old_to_intermediate[old] = new\n # don't need to add it to intermediate_to_new because it is a self-label\n\n Specification.relabel_variables(self, old_to_intermediate, inplace=True)\n Specification.relabel_variables(self, intermediate_to_new, inplace=True)\n return self\n\n # modifies graph in place\n nx.relabel_nodes(self.graph, mapping, copy=False)\n\n # this is always a new object\n self.decision_variables = tuple(mapping.get(v, v) for v in self.decision_variables)\n\n # we can just relabel in-place without worrying about conflict\n for v in old_labels:\n if v in mapping:\n ising_linear_ranges[mapping[v]] = ising_linear_ranges[v]\n del ising_linear_ranges[v]\n\n # need to do the deeper level first\n for neighbors in ising_quadratic_ranges.values():\n for v in list(neighbors):\n if v in mapping:\n neighbors[mapping[v]] = neighbors[v]\n del neighbors[v]\n\n # now the top level\n for v in old_labels:\n if v in mapping:\n ising_quadratic_ranges[mapping[v]] = ising_quadratic_ranges[v]\n del ising_quadratic_ranges[v]\n\n return self",
"def mapping_leaves(leaves1, leaves2, label1, label2, nodes1, links1, nodes2, links2, mode, ED, nnlabel):\n \n # Varibles for labeled leaves\n nleaves1 = []\n nleaves2 = []\n nlabel = []\n \n # Variables for unlabeled leaves\n UKleaves1 = []\n UKleaves2 = []\n UKlabel1 = []\n UKlabel2 = []\n \n nleaves = np.zeros(len(leaves1))\n label = np.zeros(len(leaves1))\n UK1idx = []\n UK2idx = []\n for i in range(0, len(leaves1)):\n if label1[i] in label2 and label1[i] < MAX_NODES:\n nleaves1.append(leaves1[i])\n nlabel.append(label1[i])\n idx = label2.index(label1[i])\n nleaves[idx] = leaves1[i]\n label[idx] = nnlabel[i]\n else:\n UKleaves1.append(leaves1[i])\n UKlabel1.append(label1[i])\n UK1idx.append(i)\n if label2[i] in label1 and label2[i] < MAX_NODES:\n nleaves2.append(leaves2[i])\n else:\n UKleaves2.append(leaves2[i])\n UKlabel2.append(label2[i])\n UK2idx.append(i)\n if len(UK1idx)>0:\n # Calculated the distance matrix from unmatched leaves to matched leaves\n dist1 = get_tree_dist_between_leaves(UKleaves1, nleaves1, nodes1, links1, mode, ED)\n dist2 = get_tree_dist_between_leaves(UKleaves2, nleaves2, nodes2, links2, mode, ED)\n # Calculate resorting rule with minimum weight matching of distance matrices\n dict1 = map_nodes_leaves(dist2, dist1)\n for i in range(0, len(dict1)):\n # Update labels using resorting rule.\n nleaves[UK2idx[i]] = leaves1[UK1idx[dict1[i]]]\n label[UK2idx[i]] = nnlabel[UK1idx[dict1[i]]]\n return nleaves, label",
"def propagate_labels_simple(regions,labels):\n rlabels,_ = label(regions)\n cors = correspondences(rlabels,labels,False)\n outputs = zeros(amax(rlabels)+1,'i')\n for o,i in cors.T: outputs[o] = i\n outputs[0] = 0\n return outputs[rlabels]",
"def assignLabels(self):\n clusters = np.arange(0, len(self.V))[self.V < self.V1] #indexes self.V, volumes_sorted, and oldOrder\n self.clusterV = self.volumes_sorted[clusters]\n clusters = self.oldOrder[clusters] #indexes volumes\n self.clusters = self.nonBI[clusters] #indexes self.vor and self.data\n self.easyLabel = np.zeros(len(self.data))\n self.easyLabel[self.clusters] = 1\n print('Out of ' + str(len(self.data)) + ' particles, ' + str(len(self.clusters)) + ' (' + str(round(len(self.clusters)*100/len(self.data), 3)) +' %) are labelled as cluster particles.')",
"def _refinement_random_walker(\n self,\n ds_labels,\n ds_maskROI,\n ds_mask,\n target_label):\n\n ds_labels[(ds_maskROI == False) & ds_mask] = target_label\n ds_labels[(ds_maskROI == False) & (ds_mask == False)] = -1\n\n labels = zoom(\n ds_labels,\n zoom=np.float32(\n self.size) /\n self.ds_size,\n order=0)\n maskROI = zoom(\n ds_maskROI,\n zoom=np.float32(\n self.size) /\n self.ds_size,\n order=0).astype(\n np.bool)\n\n # Extract labelled and unlabelled vertices\n m_unlabeled = (labels == 0) & (maskROI)\n m_foreground = (labels == target_label)\n\n unlabeled = np.ravel_multi_index(np.where(m_unlabeled), self.size)\n labeled = np.ravel_multi_index(np.where(labels != 0), self.size)\n #labeled = np.ravel_multi_index(np.where((m_foreground) | (labels > 0)), self.size)\n\n # Preparing the right handside of the equation BT xs\n B = self.L[unlabeled][:, labeled]\n mask = (labels[labels != 0]).flatten() == target_label\n fs = sparse.csr_matrix(mask).transpose()\n rhs = B * fs\n\n # Preparing the left handside of the equation Lu\n Lu = self.L[unlabeled][:, unlabeled]\n\n # Solve the linear equation Lu xu = -BT xs\n if self._pyamg_found:\n ml = ruge_stuben_solver(Lu)\n M = ml.aspreconditioner(cycle='V')\n else:\n M = None\n xu = cg(Lu, -rhs.todense(), tol=1e-3, M=M, maxiter=120)[0]\n\n probability = np.zeros(self.size, dtype=np.float32)\n probability[m_unlabeled] = xu\n probability[m_foreground] = 1\n\n return probability",
"def incorporate(self, other, label_name, jump_point_name):\n labels = []\n for ii in other.__sections:\n ii.replace_entry_point(jump_point_name)\n labels += ii.gather_labels()\n labels.remove(jump_point_name)\n labels.sort(key=len, reverse=True)\n for ii in other.__sections:\n ii.replace_labels(labels, label_name)\n self.add_sections(other.__sections)",
"def labelingLVQ(self):\n numLabels = len(np.unique(self.y))\n for i, x in enumerate(self.x):\n w = self.find_closest(x)[0]\n for nl in range(numLabels):\n if self.y[i] == nl:\n self.labels[nl, w[0], w[1]] += 1\n return self.labels",
"def process_cell(self, neighbourhood: List[Cell], old_cell: Cell) -> Cell:",
"def assign(self):\n\n for s in self.spots:\n if self.cells[s[:2]] == 0:\n label = find_nearest_region(self.cells, *s[:2])\n else:\n label = self.cells[s[:2]]\n\n s.region = label",
"def labelNeighbours26(data, label, x0,y0,z0, index):\n shape = label.shape;\n for xp in range(max(0,-1+x0),min(2+x0, shape[0])):\n for yp in range(max(0,-1+y0),min(2+y0, shape[1])):\n for zp in range(max(0,-1+z0),min(2+z0, shape[2])):\n if data[xp,yp,zp] and label[xp,yp,zp] == 0:\n label[xp,yp,zp] = index;\n label = labelNeighbours26(data, label, xp,yp,zp, index);\n return label;",
"def _add_labels(self):\n coords = self['pore.coords']\n self['pore.front'] = coords[:,0]<(0.1*self._Lx)\n self['pore.back'] = coords[:,0]>(0.9*self._Lx)\n self['pore.left'] = coords[:,1]<(0.1*self._Ly)\n self['pore.right'] = coords[:,1]>(0.9*self._Ly)\n self['pore.bottom'] = coords[:,2]<(0.1*self._Lz)\n self['pore.top'] = coords[:,2]>(0.9*self._Lz)\n bnds = self.pores(labels=['front','back','left','right','bottom','top'])\n self['pore.boundary'] = False\n self['pore.boundary'] = bnds",
"def change_vector_label(row_index, att_data, solutions_found, changed_variables, variables):\n\n original_vector = att_data.copy()\n changes = 0\n found_solution = 0\n _, error, temp = scale_input_and_detect_single(row_index, att_data)\n previous_best_error = error[row_index]\n temp = sort_temp_and_drop(row_index, temp)\n prev_col_name = None\n num_changes_without_optimizations = 0\n last_optimization = 0\n newBest = att_data.copy()\n optimized = False\n changed_variables[row_index] = variables[max_concealable_variables]\n while changes < budget and (changes - last_optimization) < patience and not(found_solution):\n col_name = choose_column(row_index, temp, prev_col_name, num_changes_without_optimizations,\n changed_variables, max_concealable_variables)\n prev_col_name = col_name\n if debug:\n print('______________________________')\n print(col_name)\n print('______________________________')\n\n values = np.arange(\n normal_op_ranges[col_name]['min'], normal_op_ranges[col_name]['max']+0.1, normal_op_ranges[col_name]['step'])\n # print(values)\n att_data = att_data.append(\n [att_data] * (len(values)), ignore_index=True)\n att_data = att_data[:-1] # delete eccessive lenght\n # substitute column values usign normal operations\n att_data[col_name] = values\n att_data, error = scale_input_and_detect(row_index, att_data)\n if error < previous_best_error:\n if debug:\n print(error, previous_best_error)\n previous_best_error = error\n newBest = att_data.copy()\n last_optimization = changes\n num_changes_without_optimizations = 0\n optimized = True\n try:\n if not(col_name) in changed_variables[row_index]:\n changed_variables[row_index].append(col_name)\n except:\n changed_variables[row_index] = [col_name]\n else:\n optimized = False\n\n if error < theta:\n solutions_found = solutions_found + 1\n found_solution = 1\n print('Found solution number: ' + str(solutions_found))\n\n if optimized == False:\n num_changes_without_optimizations = num_changes_without_optimizations + 1\n\n att_data = newBest.copy()\n _, error, temp = scale_input_and_detect_single(\n row_index, att_data)\n temp = sort_temp_and_drop(row_index, temp)\n changes = changes + 1\n if debug:\n print(temp)\n print('--__--__--')\n print(changes)\n print('--__--__--')\n compute_mutation_factor(original_vector, att_data.copy())\n\n return newBest.copy(), solutions_found"
] | [
"0.62960845",
"0.5878045",
"0.5808923",
"0.57926816",
"0.5774841",
"0.56972456",
"0.5681934",
"0.56174916",
"0.5592107",
"0.5577403",
"0.5546397",
"0.5535639",
"0.5447317",
"0.5445591",
"0.5424166",
"0.5400711",
"0.5394687",
"0.53925383",
"0.53905594",
"0.53837645",
"0.5355904",
"0.5348101",
"0.5318508",
"0.5251459",
"0.5248637",
"0.52416563",
"0.5223394",
"0.5211978",
"0.5197126",
"0.5177091"
] | 0.65136176 | 0 |
Main method of the class, in charge of examining skeleton cluster label and presenting results | def run(self):
for l in self.uniqueSkel:
mask = np.arange(len(self.skel))[self.skelLabels == l]
counts = self.findNearest(mask)
self.memberships[l] = counts
#self.memberships is an array of as many rows as skeleton labels and as many columns as Voronoi cluster labels,
#where the i-th row shows for all skeleton points of cluster label i, how many belong to each of the Voronoi
#cluster labels. More precisely, the j-th column of the i-th row of this array shows how many skeleton points
#of cluster label i have a closest Voronoi cell center of label j.
print('Out of ' + str(len(self.skel)) + ' skeleton points, ' + str(sum(self.memberships[:, 0])) + ' (' + str(round(sum(self.memberships[:, 0]) * 100/len(self.skel), 3)) + ' %) appear in areas classified as void areas by Voronoi')
for l in self.uniqueSkel:
members = sum(self.skelLabels == l)
topVor = np.argsort(self.memberships[l])[::-1][:5] - 1
counts = np.sort(self.memberships[l])[::-1][:5]
print('For the ' + str(members) + ' skeleton points with label ' + str(l) + ': ')
for i in range(5):
if counts[i] > 0:
if topVor[i] == -1:
add = ' ' + str(counts[i]) + ' ( ' + str(round(counts[i] * 100 / members, 3)) + ' %) are not associated with a Voronoi cluster cell'
else:
add = ' ' + str(counts[i]) + ' ( ' + str(round(counts[i] * 100/ members, 3)) + ' %) belong to the Voronoi Cluster with label ' + str(topVor[i])
print(add)
self.plotResults() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def main():\n\n dist = \"Euclidean\"\n path = \"\"\n k_v = 2\n error = []\n k_vals = []\n\n for i in range(len(sys.argv)):\n if sys.argv[i] == \"--path\":\n path = sys.argv[i+1]\n if sys.argv[i] == \"--k\":\n k_v = int(sys.argv[i+1])\n if sys.argv[i] == \"[--distance Manhattan]\":\n dist = \"Manhattan\"\n if sys.argv[i] == \"[--distance Minkowski]\":\n dist = \"Minkowski\"\n\n\n training_data = create_data(path)\n\n for k in range(2,10):\n k_vals.append(k)\n if k>2:\n for i in range(len(training_data)):\n training_data[i].remove(training_data[i][-1])\n trained_data, centroids = get_clusters(training_data, k, dist)\n error.append(rms(trained_data, dist))\n plot_error(k_vals, error)\n\n for i in range(len(training_data)):\n training_data[i].remove(training_data[i][-1])\n\n trained_data, centroids = get_clusters(training_data, k_v, dist)\n\n test_clusters(trained_data, centroids)",
"def main():\n parser = argparse.ArgumentParser(description=\"Wrapper of the scikit-learn AgglomerativeClustering method. \", formatter_class=lambda prog: argparse.RawTextHelpFormatter(prog, width=99999))\n parser.add_argument('--config', required=False, help='Configuration file')\n\n # Specific args of each building block\n required_args = parser.add_argument_group('required arguments')\n required_args.add_argument('--input_dataset_path', required=True, help='Path to the input dataset. Accepted formats: csv.')\n required_args.add_argument('--output_results_path', required=True, help='Path to the clustered dataset. Accepted formats: csv.')\n parser.add_argument('--output_plot_path', required=False, help='Path to the clustering plot. Accepted formats: png.')\n\n args = parser.parse_args()\n args.config = args.config or \"{}\"\n properties = settings.ConfReader(config=args.config).get_prop_dic()\n\n # Specific call of each building block\n agglomerative_clustering(input_dataset_path=args.input_dataset_path,\n output_results_path=args.output_results_path,\n output_plot_path=args.output_plot_path,\n properties=properties)",
"def main():\n\n# The following codes loads the data set into a 2D np array called data\n\twith open('complete_data.csv') as features_file:\n\t\tcsv_reader = csv.DictReader(features_file, delimiter = ',')\n\t\tdata = []\n\t\tcounter = 0\n\t\tfor row in csv_reader:\n\t\t\tprint(\"csv_reader row:\", row)\n\t\t\t# if(counter == 20):\n\t\t\t# \tbreak\n\t\t\tcounter+=1\n\t\t\tcleaned_row = []\n\t\t\tcleaned_row.append(row['track'])\n\t\t\tcleaned_row.append(row['loudness'])\n\t\t\tcleaned_row.append(row['score'])\n\t\t\tdata.append(np.array(cleaned_row))\n\t\tdata = random.sample(list(data), 30)\n\t\tdata = np.array(data)\n\n\n\tX = []\n\tY = []\n\tcounter = 0\n\tfor row in data:\n\t\t# if(counter == 10):\n\t\t# \tbreak\n\t\t# counter+=1\n\t\tY.append(row[0])\n\t\tl = [float(i) for i in row[1:]]\n\t\tX.append(l)\n\tX = np.array(X)\n\tY = np.array(Y)\n\n\tcentroid_indices2,centroids2 = sk_learn_cluster(X,Y,3)\n\n\tplot_word_clusters(data, centroids2, centroid_indices2 )",
"def main():\n data = Dummy(n_samples=500, n_dim=3)\n X = data.get_dummy()\n clustering = Kmeans(X, K=5, display=False)\n clustering.run()\n print(f\"Number of iterations: {clustering.num_iterations}\\n\")\n\n \"\"\" Test example of clustering_kmeans with unknown number of clusters K \"\"\"\n clustering = Kmeans(X,)\n clustering.silhouette_find_k()\n print(f\"Number of centroids found: {clustering.num_K}\")",
"def main():\n nlp = spacy.load(\"en_core_web_sm\")\n\n notebook_dir = os.getcwd()\n situ_df = pd.read_csv('data/interim/calltaker_situation.csv', \n keep_default_na = False, \n converters = {'sop': eval})\n doc_term_bow, corpus, dictionary = get_dct_dtmatrix(nlp, situ_df['sop'])\n tfidf_situ = TfidfModel(doc_term_bow)\n tfidf_mtx = bow2tfidf(doc_term_bow, tfidf_situ)\n km_190 = KMeans(n_clusters = 190, random_state = 2020).fit(tfidf_mtx)\n\n situ_topics_kmeans_tfidf = situ_df.copy()\n situ_topics_kmeans_tfidf['cluster'] = km_190.labels_\n situ_topics_kmeans_tfidf = situ_topics_kmeans_tfidf.sort_values(by = ['cluster', 'type', 'juri'], ignore_index = True)\n situ_topics_kmeans_tfidf['situ_lst'] = situ_topics_kmeans_tfidf['situation'].apply(lambda x: [x])\n situ_topics_kmeans_tfidf.to_csv('data/interim/situ_topics_kmeans_tfidf.csv', index = False)",
"def main():\n logfile = setup_log(os.path.join(os.environ['hel'], 'logs',\n 'thresh_cluster_fsl'))\n logfile.info('Threshold and cluster.')\n logfile.info('Doing the wgc PairedTres data. \\\n This is the main result for the difference between \\\n View1 and View2 in weighted global connectivity')\n outdir = os.path.join(os.environ['hel'], 'graph_analyses',\n 'randomise_global_connectivity')\n\n os.chdir(outdir)\n prefx = 'wgc_PairedTres_n10000'\n corrctd_p = '{}_clustere_corrp_tstat2.nii.gz'.format(\n prefx)\n stat = '{}_tstat2.nii.gz'.format(prefx)\n outfilename = '{}_thresh_clustere_corrp_tstat2'.format(\n prefx)\n fsl_maths(logfile, corrctd_p, stat, outfilename)\n clust_in = '{}.nii.gz'.format(outfilename)\n clst_indx = '{}_cluster_index'.format(outfilename)\n lmax_f = '{}_lmax.txt'.format(outfilename)\n clst_sz = '{}_cluster_size'.format(outfilename)\n logfile.info('Now doing cluster for wgc.')\n cluster(logfile, clust_in, clst_indx, lmax_f, clst_sz)",
"def main(argv):\n dataset_filename = argv[0]\n clusters_filename = dataset_filename + \".clusters.json\"\n output_filename = dataset_filename + \".output.json\"\n log_file = dataset_filename + \".log\"\n\n logger, handler = initialize_logger(log_file)\n logger.info('Start: Version 1.0.1')\n logger.debug('Logger initialized')\n logger.debug('sys.argv: %r', sys.argv)\n\n logger.debug('Loading dataset')\n dataset = load_dataset(dataset_filename)\n logger.info('Dataset loaded')\n\n logger.info('Trying to load clusters from %s', clusters_filename)\n clusters = None\n try:\n clusters = json.load(open(clusters_filename, 'r'))\n except FileNotFoundError:\n logger.warning('Clusters data file not found')\n except json.decoder.JSONDecodeError:\n logger.warning('File broken. Not Json Decodable')\n\n if not clusters:\n logger.debug('Clustering data points')\n clusters = clustering(dataset, logger)\n logger.debug(\n 'Dumping clusters data into json file: %s', clusters_filename)\n json.dump(clusters, open(clusters_filename, 'w'))\n logger.info('Data points clustered')\n\n logger.debug('Calculating meta-feature indicators')\n features = meta_features.meta_features(clusters)\n logger.debug(\n 'Dumping meta-feature indicators into json file: %s',\n clusters_filename)\n json.dump(features, open(output_filename, 'w'))\n logger.info('Meta-feature indicators calculated')\n\n logger.info('Completed')\n logger.removeHandler(handler)",
"def main():\n arguments = docopt(__doc__, version='cluster_parameter_extractor 1.0 BETA')\n\n input_file = arguments['--input']\n output_file = arguments[\"--output\"]\n process_synthetic = arguments[\"--synthetic_peptides\"]\n\n # make sure the input file exists\n if not os.path.isfile(input_file):\n print(\"Error: Cannot find input file '\" + input_file + \"'\")\n sys.exit(1)\n\n # make sure the output file does not exist\n if os.path.isfile(output_file):\n print(\"Error: Output file exists '\" + output_file + \"'\")\n sys.exit(1)\n\n with open(output_file, \"w\") as OUT:\n # write the header\n OUT.write(\"id\\tprecursor_mz\\tav_charge\\tsize\\tidentified_spec_count\\tunidentified_spec_count\\t\"\n \"max_ratio\\tmax_il_ratio\\tprecursor_mz_range\\tsequences\\t\"\n \"max_sequence\\tmax_sequence_count\\tmax_sequence_mods\\t\"\n \"second_max_sequence\\tsecond_max_sequence_count\\tsecond_max_sequence_mods\\tn_input_files\\t\"\n \"max_consensus_peak_rel_tic\\tmax_consensus_peak_mz\")\n\n if process_synthetic:\n OUT.write(\"\\tsynth_count\\tsynth_ratio\\tsynth_max_sequence\")\n\n OUT.write(\"\\n\")\n\n # process the file\n parser = clustering_parser.ClusteringParser(input_file)\n\n for cluster in parser:\n cluster_line = process_cluster(cluster)\n OUT.write(cluster_line)\n\n # process synthetic peptides\n if process_synthetic:\n synth_line = process_synthetic_peptides(cluster)\n OUT.write(\"\\t\" + synth_line)\n\n OUT.write(\"\\n\")\n\n print(\"Results written to \" + output_file)",
"def main():\r\n mvip, user, user_pass, mvip_node = get_inputs()\r\n payload = build_payload()\r\n headers, url = build_auth(mvip, user, user_pass, mvip_node)\r\n response_json = connect_cluster(headers, url, payload)\r\n paired_vols = get_replication_status(response_json)\r\n payload = get_vol_stats(paired_vols)\r\n response_json = connect_cluster(headers, url, payload)\r\n parse_volume_stats(paired_vols, response_json)",
"def main():\n\n # Handling arguments\n args = get_args()\n all_clusters = args.all_clusters\n all_datacenters = args.all_datacenters\n all_hosts = args.all_hosts\n clusters = []\n if args.clusters:\n clusters = args.clusters\n debug = args.debug\n allow_fqdn = args.allow_fqdn\n datacenters = []\n if args.datacenters:\n datacenters = args.datacenters\n hosts = []\n if args.hosts:\n hosts = args.hosts\n host_configure_agent = args.host_configure_agent\n hosts_file = None\n if args.hosts_file:\n hosts_file = args.hosts_file\n hv_username = None\n if args.hv_username:\n hv_username = args.hv_username\n hv_password = None\n if args.hv_password:\n hv_password = args.hv_password\n hv_management_network = None\n if args.hv_management_network:\n hv_management_network = args.hv_management_network\n hv_data_network = None\n if args.hv_data_network:\n hv_data_network = args.hv_data_network\n hv_vm_network = None\n if args.hv_vm_network:\n hv_vm_network = args.hv_vm_network\n hv_mc_network = None\n if args.hv_mc_network:\n hv_mc_network = args.hv_mc_network\n log_file = None\n if args.logfile:\n log_file = args.logfile\n nuage_enterprise = args.nuage_enterprise\n nuage_host = args.nuage_host\n nuage_port = args.nuage_port\n nuage_password = None\n if args.nuage_password:\n nuage_password = args.nuage_password\n nuage_username = args.nuage_username\n nuage_vrs_ovf = None\n if args.nuage_vrs_ovf:\n nuage_vrs_ovf = args.nuage_vrs_ovf\n nosslcheck = args.nosslcheck\n verbose = args.verbose\n vcenter_host = args.vcenter_host\n vcenter_name = vcenter_host\n if args.vcenter_name:\n vcenter_name = args.vcenter_name\n vcenter_https_port = args.vcenter_https_port\n vcenter_http_port = args.vcenter_http_port\n vcenter_password = None\n if args.vcenter_password:\n vcenter_password = args.vcenter_password\n vcenter_username = args.vcenter_username\n\n # Logging settings\n if debug:\n log_level = logging.DEBUG\n elif verbose:\n log_level = logging.INFO\n else:\n log_level = logging.WARNING\n\n logging.basicConfig(filename=log_file, format='%(asctime)s %(levelname)s %(message)s', level=log_level)\n logger = logging.getLogger(__name__)\n\n # Input checking\n if not all_datacenters and len(datacenters) < 1:\n logger.critical('Not all datacenters have to be present in the Nuage Deployment tool (--all-datacenters option NOT enabled), but also no datacenters specified (at least one --datacenter)')\n return 1\n if not all_clusters and len(clusters) < 1:\n logger.critical('Not all clusters have to be present in the Nuage Deployment tool (--all-clusters option NOT enabled), but also no clusters specified (at least one --cluster)')\n return 1\n if not all_hosts and len(hosts) < 1 and not hosts_file:\n logger.critical('Not all hosts have to be present in the Nuage Deployment tool (--all-hosts option NOT enabled), but also no hosts specified (at least one --host or specify a file with the host information via --hosts-file)')\n return 1\n if all_datacenters and len(datacenters) > 0:\n logger.warning('You enabled all datacenters and added individual datacenter options, --all-datacenters takes precendence and overwrites the specified datacenters.')\n datacenters = []\n if all_clusters and len(clusters) > 0:\n logger.warning('You enabled all clusters and added individual cluster options, --all-clusters takes precendence and overwrites the specified clusters.')\n clusters = []\n if all_hosts and len(hosts) > 0 and not hosts_file:\n logger.warning('You enabled all hosts and added individual hosts options, --all-hosts takes precendence and overwrites the specified hosts.')\n hosts = []\n elif all_hosts and len(hosts) < 1 and hosts_file:\n logger.warning('You enabled all hosts and provided a hosts file, the hosts file takes precendence over the --all-hosts flag and this flag will be ignored.')\n all_hosts = False\n elif not all_hosts and len(hosts) > 0 and hosts_file:\n logger.warning('You specified host with the --host argument and provided a hosts file, the hosts file takes precendence over the --host paramerters and these will be ignored.')\n hosts = []\n\n # CSV Handling\n hosts_list = None\n if hosts_file:\n hosts_list = {}\n # CSV fields:\n # VM Name, Resource Pool, Folder, MAC Address, Post Script\n logger.debug('Parsing csv %s' % hosts_file)\n\n if not os.path.isfile(hosts_file):\n logger.critical('CSV file %s does not exist, exiting' % hosts_file)\n return 1\n\n with open(hosts_file, 'rb') as hostlist:\n hosts_list_raw = csv.reader(hostlist, delimiter=',', quotechar='\"')\n for row in hosts_list_raw:\n logger.debug('Found CSV row: %s' % ','.join(row))\n # Adding IP to the hosts variable so it can also be used in further handling if it's a valid IP\n if allow_fqdn or ip_address_is_valid(row[0]):\n hosts_list[row[0]] = row\n hosts.append(row[0])\n else:\n logger.warning('Found an invalid IP %s in the hosts file and FQDNs are not allowed, skipping line' % row[0])\n\n # Getting user password for Nuage connection\n if nuage_password is None:\n logger.debug('No command line Nuage password received, requesting Nuage password from user')\n nuage_password = getpass.getpass(prompt='Enter password for Nuage host %s for user %s: ' % (nuage_host, nuage_username))\n\n # Getting user password for vCenter connection\n if vcenter_password is None:\n logger.debug('No command line vCenter password received, requesting vCenter password from user')\n vcenter_password = getpass.getpass(prompt='Enter password for vCenter host %s for user %s: ' % (vcenter_host, vcenter_username))\n\n # Getting user password for hosts\n if hv_password is None:\n logger.debug('No command line Host password received, requesting Host password from user')\n hv_password = getpass.getpass(prompt='Enter password for the hosts inside vCenter %s for user %s: ' % (vcenter_host, hv_username))\n\n try:\n vc = None\n nc = None\n\n # Connecting to Nuage\n try:\n logger.info('Connecting to Nuage server %s:%s with username %s' % (nuage_host, nuage_port, nuage_username))\n nc = vsdk.NUVSDSession(username=nuage_username, password=nuage_password, enterprise=nuage_enterprise, api_url=\"https://%s:%s\" % (nuage_host, nuage_port))\n nc.start()\n except IOError:\n pass\n\n if not nc or not nc.is_current_session():\n logger.error('Could not connect to Nuage host %s with user %s and specified password' % (nuage_host, nuage_username))\n return 1\n\n # Connecting to vCenter\n try:\n logger.info('Connecting to vCenter server %s:%s with username %s' % (vcenter_host, vcenter_https_port, vcenter_username))\n if nosslcheck:\n vc = SmartConnectNoSSL(host=vcenter_host, user=vcenter_username, pwd=vcenter_password, port=int(vcenter_https_port))\n else:\n vc = SmartConnect(host=vcenter_host, user=vcenter_username, pwd=vcenter_password, port=int(vcenter_https_port))\n\n except IOError:\n pass\n\n if not vc:\n logger.error('Could not connect to vCenter host %s with user %s and specified password' % (vcenter_host, vcenter_username))\n return 1\n\n logger.debug('Registering vCenter disconnect at exit')\n atexit.register(Disconnect, vc)\n\n logger.info('Connected to both Nuage & vCenter servers')\n\n # Check if the vCenter exists in Nuage vCenter Deployment Tool\n nuage_vcenter = None\n logger.debug('Checking if vCenter %s is already present in Nuage vCenter Deployment Tool' % vcenter_name)\n for nvc in nc.user.vcenters.get():\n if nvc.ip_address == vcenter_host:\n logger.debug('Found vCenter %s, not recreating' % vcenter_name)\n nuage_vcenter = nvc\n break\n\n # If th vCenter does not exist in Nuage vCenter Deployment Tool, create it\n if not nuage_vcenter:\n logger.debug('vCenter %s with IP %s not found in the Nuage vCenter Deployment Tool, creating' % (vcenter_name, vcenter_host))\n nuage_vcenter = vsdk.NUVCenter(name=vcenter_name, ip_address=vcenter_host, user_name=vcenter_username, password=vcenter_password, http_port=vcenter_http_port, https_port=vcenter_https_port, ovf_url=nuage_vrs_ovf)\n nc.user.create_child(nuage_vcenter)\n logger.info('Created vCenter %s in the Nuage vCenter Deployment Tool' % vcenter_name)\n\n # Datacenter Handling\n # Gathering all Datacenters inside the vCenter\n logger.debug('Gathering all Datacenters from vCenter')\n content = vc.content\n obj_view = content.viewManager.CreateContainerView(content.rootFolder, [vim.Datacenter], True)\n vc_dc_list = obj_view.view\n obj_view.Destroy()\n\n # Gathering all Datacenters inside the Nuage vCenter\n logger.debug('Gathering all Datacenter from the Nuage vCenter entry')\n nc_dc_list = nuage_vcenter.vcenter_data_centers.get()\n\n # Parsing all datacenters\n for vc_dc in vc_dc_list:\n if all_datacenters or vc_dc.name in datacenters:\n logger.debug('vCenter Datacenter %s is in list that has to be present in the Nuage vCenter Deployment Tool, checking if it already exists.' % vc_dc.name)\n handle_vdt_datacenter(logger=logger, nc=nc, vc=vc, nuage_vcenter=nuage_vcenter, vc_dc=vc_dc, nc_dc_list=nc_dc_list, vcenter_name=vcenter_name, all_clusters=all_clusters, all_hosts=all_hosts, clusters=clusters, hosts=hosts, hosts_list=hosts_list, hv_username=hv_username, hv_password=hv_password, hv_management_network=hv_management_network, hv_data_network=hv_data_network, hv_vm_network=hv_vm_network, hv_mc_network=hv_mc_network, host_configure_agent=host_configure_agent, allow_fqdn=allow_fqdn)\n\n logger.info('Completed all tasks.')\n return 0\n\n except vmodl.MethodFault as e:\n logger.critical('Caught vmodl fault: %s' % e.msg)\n return 1\n except Exception as e:\n logger.critical('Caught exception: %s' % str(e))\n return 1",
"def main():\n parser = mkOptionParser()\n options, args= parser.parse_args()\n\n if len(args) != 8:\n parser.error(\"Incorrect number of arguments\")\n\n\n# inFN = args[0]\n inFile = args[0]\n outFN = args[1]\n global sampleDown\n sampleDown = int(args[2])\n global chromosome\n chromosome = args[3]\n global lowerMAF\n lowerMAF = float(args[4])\n global upperMAF\n upperMAF =float( args[5])\n global window\n window=int(args[6])\n global jump\n jump = int(args[7])\n# if inFN == '-':\n# inFile = sys.stdin\n# else:\n# inFile = open(inFN, 'r')\n\n if outFN == '-':\n outFile = sys.stdout\n else:\n outFile = open(outFN, 'w')\n\n\n \n clusterHaplotypes(inFile, outFile, sampleDown, chromosome, lowerMAF, upperMAF, window, jump)",
"def main(args):\r\n # Parse inputs\r\n parser = _argparse()\r\n argp = parser.parse_args(args[1:])\r\n # Read training data\r\n sparse_training = sp.sparse.load_npz( argp.training_data )\r\n training_data = np.array( sparse_training.todense() )\r\n training_labels = np.array(pd.read_csv(argp.training_labels,index_col = 0 ))\r\n\r\n # Train classifier \r\n classifier = train_classifier.train_LR( training_data, training_labels, num_genes = argp.num_genes, pct_var = argp.pca_variance/100, save_results = False )\r\n\r\n training_predictions, training_probabilities = predict_cell_types( training_data, classifier )\r\n confusion_mat = pd.DataFrame(confusion_matrix( training_labels, training_predictions ),\r\n index = np.unique(training_labels), columns = np.unique(training_labels)).to_csv(\r\n \"results/classification/confusion_matrix.csv\")\r\n # Make predictions for all cells\r\n sparse_scRNA_expression = sp.sparse.load_npz( argp.test_data )\r\n scRNA_expression = np.array( sparse_scRNA_expression.todense() )\r\n predicted_cell_types, prediction_probabilities = predict_cell_types( scRNA_expression, classifier ) \r\n \r\n sequencing_metrics = pd.read_csv(\"data/combined/processed_metrics.csv\",index_col = 0)\r\n # Save outputs\r\n pd.DataFrame(prediction_probabilities, \r\n index = sequencing_metrics.index.values,\r\n columns = np.unique(predicted_cell_types) ).to_csv(\"results/classification/prediction_probabilities.csv\")\r\n pd.DataFrame(predicted_cell_types, \r\n index = sequencing_metrics.index.values,\r\n columns = [\"predicted_cell_type\"] ).to_csv(\"results/classification/predicted_cell_types.csv\")",
"def run4cluster(self):\n folderPath = os.path.join(self.folderpath, 'final/clusterData')\n foldernames = [i for i in os.listdir(os.path.join(\n self.rootpath, folderPath)) if os.path.isdir(os.path.join(self.rootpath, folderPath, i))]\n for foldername in foldernames:\n # if foldername != '1':\n # continue\n print(foldername)\n folderFullPath = os.path.join(folderPath, foldername)\n print(folderFullPath)\n print(\"Running code for {}\".format(folderFullPath))\n print(\"Running getTopicPmi.py ...\")\n # subprocess.call(args)\n try:\n self.getTopicPmi(folderFullPath, 1)\n except Exception as e:\n print(\"ERROR!!!\")\n print(e)\n\n print(\"Running extractSVOs ...\")\n self.extractSVOs(folderFullPath)\n print(\"Running getQuery ...\")\n self.getQuery(folderFullPath)\n print(\"Running getSimilarityStatements2Tweets ...\")\n self.getSimilarityStatements2Tweets(folderFullPath)\n print(\"Runnig getSnippets ...\")\n self.getSnippets(folderFullPath)\n print(\"Running getCorpus4Classification ...\")\n self.getCorpus4Classification(folderFullPath, 'cluster')",
"def segment_func1(self):\n # computing neighboors graph\n A = self.normal_graph()\n\n # SpectralClustering segmentation\n sc = SpectralClustering(3, affinity='precomputed', n_init=10, assign_labels='discretize')\n labels = sc.fit_predict(A)\n\n return labels",
"def test_main_on_cluster(self):\r\n\r\n command = \" \".join([\"denoiser.py\",\r\n \"--force\", \"-o\", self.test_dir, \"-c\", \"-n\", \"2\",\r\n \"-i\", \"%s/qiime/support_files/denoiser/TestData/denoiser_test_set.sff.txt\" % PROJECT_HOME,\r\n \"-f\", \"%s/qiime/support_files/denoiser/TestData/test_set_seqs.fna\" % PROJECT_HOME])\r\n\r\n result = Popen(command, shell=True, universal_newlines=True,\r\n stdout=PIPE, stderr=STDOUT).stdout.read()\r\n self.result_dir = self.test_dir\r\n\r\n observed = \"\".join(list(open(self.result_dir + \"centroids.fasta\")))\r\n self.assertEqual(observed, self.expected)",
"def main():\n\n trainData = os.getcwd() + '/data/traindata.txt'\n trainLabels = os.getcwd() + '/data/trainlabels.txt'\n\n #testData = os.getcwd() + '/data/traindata.txt'\n #testLabels = os.getcwd() + '/data/trainlabels.txt'\n\n testData = os.getcwd() + '/data/testdata.txt'\n testLabels = os.getcwd() + '/data/testlabels.txt'\n\n #trainData = os.getcwd() + '/data/toyData.txt'\n #trainLabels = os.getcwd() + '/data/toyLabel.txt'\n #testData = os.getcwd() +'/data/toyTestData.txt'\n #testLabels = os.getcwd() + '/data/toyTestLabel.txt'\n\n #print(trainData, trainLabels)\n myClassifier = NBClassifier.new(NBClassifier.MODE_BERNOULI)\n myClassifier.setTrainData(trainData, trainLabels)\n #print(myClassifier)\n\n #singleTestData = ['Chinese', 'Chinese', 'Chinese', 'Tokyo', 'Japan']\n #prediction = myClassifier.predict(singleTestData)\n #print(f'{singleTestData} >>> {prediction}')\n predictions = myClassifier.predictSet(testData)\n accuracy = myClassifier.reportAccuracy(testLabels)\n\n #print(predictions)\n print(accuracy)",
"def run(self):\n for i,p in enumerate(self.pairs):\n self.forPointPair(i)\n if i % 100000 == 0:\n print('Percentage Processed: ' + str(round(i * 100 / len(self.pairs), 3)) + '. Existing Cluster Labels: ', len(np.unique(self.labels)))",
"def main():\r\n args = Parameters().parse()\r\n # #\r\n # args.method = 'student_res18_pre'\r\n args.method = 'student_esp_d'\r\n args.dataset = 'camvid_light'\r\n args.data_list = \"/ssd/yifan/SegNet/CamVid/test.txt\"\r\n args.data_dir = \"/ssd/yifan/\"\r\n args.num_classes = 11\r\n # args.method='psp_dsn_floor'\r\n args.restore_from = \"./checkpoint/Camvid/ESP/base_57.8.pth\"\r\n # args.restore_from=\"/teamscratch/msravcshare/v-yifan/ESPNet/train/0.4results_enc_01_enc_2_8/model_298.pth\"\r\n # args.restore_from = \"/teamscratch/msravcshare/v-yifacd n/sd_pytorch0.5/checkpoint/snapshots_psp_dsn_floor_1e-2_40000_TEACHER864/CS_scenes_40000.pth\"\r\n # args.restore_from = \"/teamscratch/msravcshare/v-yifan/sd_pytorch0.5/checkpoint/snapshots_psp_dsn_floor_1e-2_40000_TEACHER5121024_esp/CS_scenes_40000.pth\"\r\n # args.data_list = '/teamscratch/msravcshare/v-yifan/deeplab_v3/dataset/list/cityscapes/train.lst'\r\n args.batch_size = 1\r\n print(\"Input arguments:\")\r\n for key, val in vars(args).items():\r\n print(\"{:16} {}\".format(key, val))\r\n\r\n h, w = map(int, args.input_size.split(','))\r\n input_size = (h, w)\r\n\r\n print(args)\r\n output_path = args.output_path\r\n if not os.path.exists(output_path):\r\n os.makedirs(output_path)\r\n # args.method='psp_dsn'\r\n deeplab = get_segmentation_model(args.method, num_classes=args.num_classes)\r\n\r\n ignore_label = 255\r\n id_to_trainid = {-1: ignore_label, 0: ignore_label, 1: ignore_label, 2: ignore_label,\r\n 3: ignore_label, 4: ignore_label, 5: ignore_label, 6: ignore_label,\r\n 7: 0, 8: 1, 9: ignore_label, 10: ignore_label, 11: 2, 12: 3, 13: 4,\r\n 14: ignore_label, 15: ignore_label, 16: ignore_label, 17: 5,\r\n 18: ignore_label, 19: 6, 20: 7, 21: 8, 22: 9, 23: 10, 24: 11, 25: 12, 26: 13, 27: 14,\r\n 28: 15, 29: ignore_label, 30: ignore_label, 31: 16, 32: 17, 33: 18}\r\n\r\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = args.gpu\r\n # args.restore_from=\"/teamscratch/msravcshare/v-yifan/sd_pytorch0.3/checkpoint/snapshots_resnet_psp_dsn_1e-4_5e-4_8_20000_DSN_0.4_769light/CS_scenes_20000.pth\"\r\n # if 'dense' in args.method:\r\n #\r\n if args.restore_from is not None:\r\n saved_state_dict = torch.load(args.restore_from)\r\n c_keys = saved_state_dict.keys()\r\n for i in c_keys:\r\n flag = i.split('.')[0]\r\n if 'module' in flag:\r\n deeplab = nn.DataParallel(deeplab)\r\n deeplab.load_state_dict(saved_state_dict)\r\n if 'module' not in flag:\r\n deeplab = nn.DataParallel(deeplab)\r\n # if 'dense' not in args.method:\r\n # deeplab = nn.DataParallel(deeplab)\r\n model = deeplab\r\n model.eval()\r\n model.cuda()\r\n # args.dataset='cityscapes_light'\r\n testloader = data.DataLoader(get_segmentation_dataset(args.dataset, root=args.data_dir, list_path=args.data_list,\r\n crop_size=(360, 480), mean=IMG_MEAN, scale=False,\r\n mirror=False),\r\n batch_size=args.batch_size, shuffle=False, pin_memory=True)\r\n\r\n data_list = []\r\n confusion_matrix = np.zeros((args.num_classes, args.num_classes))\r\n\r\n palette = get_palette(20)\r\n\r\n image_id = 0\r\n for index, batch in enumerate(testloader):\r\n if index % 100 == 0:\r\n print('%d processd' % (index))\r\n if args.side:\r\n image, label, _, size, name = batch\r\n elif 'sd' in args.dataset:\r\n _, image, label, size, name = batch\r\n else:\r\n image, label, size, name = batch\r\n # print('image name: {}'.format(name))\r\n size = size[0].numpy()\r\n output = predict_esp(model, image)\r\n # seg_pred = np.asarray(np.argmax(output, axis=3), dtype=np.uint8)\r\n result = np.asarray(np.argmax(output, axis=3), dtype=np.uint8)\r\n # result=cv2.resize(result, (1024, 1024), interpolation=cv2.INTER_NEAREST)\r\n m_seg_pred = ma.masked_array(result, mask=torch.eq(label, 255))\r\n ma.set_fill_value(m_seg_pred, 20)\r\n seg_pred = m_seg_pred\r\n\r\n for i in range(image.size(0)):\r\n image_id += 1\r\n print('%d th segmentation map generated ...' % (image_id))\r\n args.store_output = 'True'\r\n output_path = './esp_camvid_base/'\r\n if not os.path.exists(output_path):\r\n os.mkdir(output_path)\r\n if args.store_output == 'True':\r\n # print('a')\r\n output_im = PILImage.fromarray(seg_pred[i])\r\n output_im.putpalette(palette)\r\n output_im.save(output_path + '/' + name[i] + '.png')\r\n\r\n seg_gt = np.asarray(label.numpy()[:, :size[0], :size[1]], dtype=np.int)\r\n ignore_index = seg_gt != 255\r\n seg_gt = seg_gt[ignore_index]\r\n seg_pred = seg_pred[ignore_index]\r\n confusion_matrix += get_confusion_matrix(seg_gt, seg_pred, args.num_classes)\r\n\r\n pos = confusion_matrix.sum(1)\r\n res = confusion_matrix.sum(0)\r\n tp = np.diag(confusion_matrix)\r\n\r\n IU_array = (tp / np.maximum(1.0, pos + res - tp))\r\n mean_IU = IU_array.mean()\r\n\r\n print({'meanIU': mean_IU, 'IU_array': IU_array})\r\n\r\n print(\"confusion matrix\\n\")\r\n print(confusion_matrix)",
"def _load_cluster(self):",
"def main():\n rs = redshift(config_file=CONFIG_FILENAME)\n \n # check if cluster already available\n try:\n clust_avail = check_available(rs)\n except rs_client.exceptions.ClusterNotFoundFault:\n clust_avail = False\n\n # if cluster not available, create it\n if not clust_avail:\n create_cluster(rs) \n \n print(f'Cluster is available. Cluster information: \\n{rs.get_cluster_info()}')",
"def build(self, verbose=True):\n # initially: one cluster\n self.labels = np.zeros((self.n_pts, ), dtype=int)\n self.int_paths = np.zeros((self.n_pts, ), dtype=int)\n self.n_clusters = 1\n\n # create the root and add it to a FIFO queue of nodes to process\n root = SpectralNode(\n np.arange(self.n_pts), 0, name=\"1\") # '1' by convention\n to_split = PriorityQueue()\n to_split.push(root)\n\n # recursively split\n #nrecs = 0\n while len(to_split) > 0:\n # get the node with highest priority\n node = to_split.pop()\n left, right = self.split(node)\n\n # push to the priority queue\n if node.has_children:\n # node was split: push the children\n to_split.push(left)\n to_split.push(right)\n else:\n # node is a leaf: update the cluster tree paths for the concerned points\n self.int_paths[node.ids] = int(node.name, 2)\n # Note: outliers (not in node.ids) have default '0' path\n\n # to save all partial labelings, do\n #nrecs += 1\n #np.save('labels_%04d_split_%s.npy' % (nrecs, node.name), self.labels)\n\n if verbose:\n self._print_split_infos(node, left, right, len(to_split))\n\n # check we don't have a too small number of leaves\n assert self.n_clusters >= self.min_leaves, \\\n \"BUG: not enough clusters {0}\".format(self.n_clusters)",
"def main():\n\n config = None\n\n try:\n args = get_args()\n config = process_config(args.config)\n raise RuntimeError(\"Missing or invalid arguments\")\n except Exception as e:\n logging.error(\"Failed\", exc_info=e)\n\n print(\"Create the data generator.\")\n # data_loader = MnistDataLoader(config=config)\n data_loader = IrisDataLoader(config=config)\n train_data = data_loader.get_train_data()\n test_data = data_loader.get_test_data()\n\n print(\"Build the model\")\n # cnn_model = ConvModel(config=config).build_model()\n cnn_model = ANNModel(config=config).build_model()\n\n print(\"Load the best weights\")\n cnn_model.load_weights(\"experiments/{}/{}/checkpoints/{}-weights.best.hdf5\".format(\n config.evaluation.date, config.exp.name, config.exp.name))\n\n print(\"Evaluate the model\")\n print(\"Training Metrics\")\n evaluate(model=cnn_model, data=train_data)\n print(\"Testing Metrics\")\n evaluate(model=cnn_model, data=test_data)\n\n # print(\"Visualize loss and accuracy for Training and Validation data\")\n # plot_history(config=config)\n\n # print(\"Plotting ROC Curve\")\n # plot_roc(model=cnn_model, data=test_data)\n\n print(\"Classifcation Accuracy Report\")\n classification_accuracy_report(model=cnn_model, data=test_data)",
"def main(verbose=True):\n if verbose: \n print(\"\\n---------------\")\n printCommonSNPCounts()\n print(\"---------------\")\n \n print(\"Charles River\")\n print(\"---------------\") \n getCommonSNPIndices(\"C\", save=True)\n print(\"---------------\")\n \n print(\"Harlan River\")\n getCommonSNPIndices(\"H\", save=True)\n print(\"---------------\")\n else:\n getCommonSNPIndices(\"C\", save=True)\n getCommonSNPIndices(\"H\", save=True)",
"def test_main_on_cluster(self):\n\n command = \" \".join( [\"%s/denoiser.py\" % get_qiime_scripts_dir(),\n \"--force\",\"-o\", self.test_dir, \"-c\", \"-n\", \"2\",\n \"-i\", \"%s/qiime/support_files/denoiser/TestData/denoiser_test_set.sff.txt\" % PROJECT_HOME, \n \"-f\", \"%s/qiime/support_files/denoiser/TestData/test_set_seqs.fna\" % PROJECT_HOME] )\n\n result = Popen(command,shell=True,universal_newlines=True,\\\n stdout=PIPE,stderr=STDOUT).stdout.read()\n self.result_dir = self.test_dir\n\n observed = \"\".join(list(open(self.result_dir+ \"centroids.fasta\")))\n self.assertEqual(observed, self.expected)",
"def main(oct_pos):\n print(\"-\" * 20 + \" main started\")\n for ip_port in session_array.keys():\n ip = ip_port.split(\":\")[0]\n ip_array.append(ip)\n iqn = session_array[ip_port]\n host = iqn.split(\":\")[1]\n\n print(\"+\" + \"-\"*123 + \"+\")\n # Determine if the host and the cluster are in the same host network or are connecting via a routed network\n for ip_comp in ip_array:\n determine_oct = int(ip_comp.split(\".\")[oct_pos])\n #print(block_start, determine_oct, block_end)\n if determine_oct >= block_start and determine_oct <= block_end:\n #print(\"+\" + \"-\"*123 + \"+\")\n prettyPrint(\"Pass, IP is in the right subnet\", ip_comp, host, 120)\n #prettyPrint(\"Hostname for IP\", host, 80)\n \n else:\n #print(\"+\" + \"-\"*123 + \"+\")\n prettyPrint(\"Fail, IP is not in the right subnet\", ip_comp, host, 120)\n #prettyPrint(\"Hostname for IP\", host, 80)\n #print(\"+\" + \"-\"*123 + \"+\")\n\n print(\"+\" + \"-\"*123 + \"+\")",
"def main(dataset=None, min_dens=1 * 10 ** -6, eps=0.0001, h=7):\n if dataset is None:\n dataset = pd.read_csv(\"iris.txt\", header=None, names=[\"x1\", \"x2\", \"x3\", \"x4\", \"label\"])\n print(f\"Inputs mindensity:{min_dens}\\teps:{eps}\\th:{h}\")\n cluster_map, cluster_center = denclue(np.array(dataset.iloc[:, [0, 1, 2, 3]]), min_dens, eps, h)\n dataset[\"cluster\"] = -1\n for c, points in cluster_map.items():\n dataset.at[points, \"cluster\"] = c\n print(\"Attractor:\", cluster_center[c])\n print(\"Points in cluster:\", points)\n print(\"Size of each cluster\")\n print(\"Clusters Assigned by Algorithm:\", \"\\n\", dataset.groupby(by=\"cluster\").count()['x1'])\n print(\"Original Clusters:\", \"\\n\", dataset.groupby(by=\"label\").count()['x1'])\n print(\"Purity:\", calculate_purity(dataset, len(cluster_map)))",
"def main():\n\n dir_path =r'/Users/dustin/CS/projects/ship_detector/data/ships-in-satellite-imagery/shipsnet/'\n\n data_array, label_array = read_images(dir_path)\n\n array_info(data_array, label_array)\n\n image_info(data_array[0,:], plot_image=False)\n\n split_ratios = [0.8, 0.1, 0.1] #splitting the dataset into 80% train, 10% dev, 10% test\n\n X_train, X_dev, X_test, Y_train, Y_dev, Y_test = dataset_split(data_array, label_array, split_ratios)",
"def main():\n feature_extraction_model = \"HOG\"\n # feature_extraction_models = [\"CM\", \"HOG\"]\n feature_extraction_model_1 = \"CM\"\n dimension_reduction_model = \"PCA\"\n k_value = 10\n dim_k_value = 40\n # K_value = 20\n # lab_folder = \"Dataset3/Labelled/Set1\"\n # unlab_folder = \"Dataset3/Unlabelled/Set 2\"\n lab_folder = get_input_folder(\"Labelled Folder\")\n unlab_folder = get_input_folder(\"Classify\")\n start = time.time()\n # ================================================================================================================\n # labelled Images\n dim_red = DimensionReduction(feature_extraction_model, dimension_reduction_model, dim_k_value,\n folder_metadata=lab_folder,\n metadata_collection=\"labelled\")\n obj_feat_lab = dim_red.get_object_feature_matrix()\n features_list_lab = np.array(obj_feat_lab['featureVector'].tolist())\n images_list_lab = np.array(obj_feat_lab['imageId'])\n # filtering the labelled set\n dorsal_list, palmar_list = filter_images_by_label(images_list_lab)\n\n # unlabelled images\n dim_red = DimensionReduction(feature_extraction_model, dimension_reduction_model, dim_k_value,\n folder_metadata=unlab_folder,\n metadata_collection=\"unlabelled\")\n obj_feat_unlab = dim_red.get_object_feature_matrix()\n features_list_unlab = np.array(obj_feat_unlab['featureVector'].tolist())\n images_list_unlab = np.array(obj_feat_unlab['imageId'])\n\n # ================================================================================================================\n # labelled Images\n dim_red = DimensionReduction(feature_extraction_model_1, dimension_reduction_model, dim_k_value,\n folder_metadata=lab_folder,\n metadata_collection=\"labelled\")\n obj_feat_lab_1 = dim_red.get_object_feature_matrix()\n features_list_lab_1 = np.array(obj_feat_lab_1['featureVector'].tolist())\n # images_list_lab = np.array(obj_feat_lab_1['imageId'])\n # filtering the labelled set\n\n\n # unlabelled images\n dim_red = DimensionReduction(feature_extraction_model_1, dimension_reduction_model, dim_k_value,\n folder_metadata=unlab_folder,\n metadata_collection=\"unlabelled\")\n obj_feat_unlab_1 = dim_red.get_object_feature_matrix()\n features_list_unlab_1 = np.array(obj_feat_unlab_1['featureVector'].tolist())\n # images_list_unlab = np.array(obj_feat_unlab['imageId'])\n features_list_lab = np.concatenate((features_list_lab, features_list_lab_1), axis=1)\n features_list_unlab = np.concatenate((features_list_unlab, features_list_unlab_1), axis=1)\n\n # ================================================================================================================\n\n dorsal_list, palmar_list = filter_images_by_label(images_list_lab)\n features_list = np.concatenate((features_list_lab, features_list_unlab))\n images_list = np.concatenate((images_list_lab, images_list_unlab))\n images_list = list(images_list)\n # Finding Similarity Matrix\n cos_sim = cosine_similarity(features_list)\n sim_graph = np.empty((0, len(cos_sim)))\n for row in cos_sim:\n k_largest = np.argsort(-np.array(row))[1:k_value + 1]\n sim_graph_row = [d if i in k_largest else 0 for i, d in enumerate(row)]\n sim_graph = np.append(sim_graph, np.array([sim_graph_row]), axis=0)\n\n row_sums = sim_graph.sum(axis=1)\n sim_graph = sim_graph / row_sums[:, np.newaxis]\n idx = 0\n results_dorsal = ppr(sim_graph, images_list, dorsal_list)\n results_palmar = ppr(sim_graph, images_list, palmar_list)\n final_results = {}\n\n for img in images_list_unlab:\n if results_dorsal[img] < results_palmar[img]:\n final_results[img] = \"dorsal\"\n else:\n final_results[img] = \"palmar\"\n\n actual_labels = fetch_actual_labels(images_list_unlab)\n print(\"Classification\")\n no_correct = 0\n correctly_classified = []\n incorrectly_classified = []\n print(\"| ImageId | Prediction | Actual |\")\n for r in final_results:\n print(\"| {} | {} | {} |\".format(r, final_results[r], actual_labels[r]))\n if final_results[r] == actual_labels[r]:\n correctly_classified.append(r)\n no_correct += 1\n else:\n incorrectly_classified.append(r)\n\n print(\"Correctly classified: {}\\n\".format(correctly_classified))\n print(\"InCorrectly classified: {}\\n\".format(incorrectly_classified))\n\n print(\"Classification Accuracy: {}%\".format(no_correct / len(images_list_unlab) * 100))\n print(\"Execution time: {} seconds\".format(time.time() - start))",
"def start_algorithm(self):\r\n vectors = self.vectorize_data()\r\n kmeans = KMeans(init='k-means++', n_clusters=self.cluster_amount, n_init=10)\r\n kmeans.fit(vectors)\r\n return self.cluster_tweet(kmeans.labels_)",
"def main():\n if sys.argv[1] == \"start\":\n start_cluster(sys.argv[2], sys.argv[3], int(sys.argv[4]),\n int(sys.argv[5]), sys.argv[6], sys.argv[7],\n int(sys.argv[8]))\n elif sys.argv[1] == \"stop\":\n stop_cluster()\n else:\n print 'Unknown Option'"
] | [
"0.68848646",
"0.6841816",
"0.65161455",
"0.6477813",
"0.6471169",
"0.6439347",
"0.6436151",
"0.64239186",
"0.63982856",
"0.63937217",
"0.6327966",
"0.632562",
"0.6311031",
"0.6308998",
"0.62794244",
"0.6248479",
"0.62176025",
"0.62134105",
"0.6181992",
"0.61715055",
"0.6161926",
"0.61579186",
"0.6141384",
"0.61399263",
"0.61354005",
"0.61267996",
"0.6124936",
"0.6123534",
"0.60953534",
"0.60719675"
] | 0.68589205 | 1 |
For a list i of indexes of skeleton point positions, this method examines the closest Voronoi cel center to each skeleton point, and based on this counts how many of the skeleton points belong to each Voronoi label. Note that memberships is a vector where its ith element shows how many of the skeleton positions have a closest Voronoi cell of label i. | def findNearest(self, i):
skel = self.skel[i, :]
closest = self.nbrs.kneighbors(skel, return_distance=False)
memberships = np.zeros(len(self.uniqueVor))
for j, c in enumerate(closest):
c = c[0]
nearLabel = self.vorLabels[c]
memberships[nearLabel] += 1
if nearLabel == 0:
self.isCorrect[i[j]] = 0
return memberships | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def run(self):\n for l in self.uniqueSkel:\n mask = np.arange(len(self.skel))[self.skelLabels == l]\n counts = self.findNearest(mask)\n self.memberships[l] = counts\n\n #self.memberships is an array of as many rows as skeleton labels and as many columns as Voronoi cluster labels,\n #where the i-th row shows for all skeleton points of cluster label i, how many belong to each of the Voronoi\n #cluster labels. More precisely, the j-th column of the i-th row of this array shows how many skeleton points\n #of cluster label i have a closest Voronoi cell center of label j.\n\n print('Out of ' + str(len(self.skel)) + ' skeleton points, ' + str(sum(self.memberships[:, 0])) + ' (' + str(round(sum(self.memberships[:, 0]) * 100/len(self.skel), 3)) + ' %) appear in areas classified as void areas by Voronoi')\n\n for l in self.uniqueSkel:\n members = sum(self.skelLabels == l)\n topVor = np.argsort(self.memberships[l])[::-1][:5] - 1\n counts = np.sort(self.memberships[l])[::-1][:5]\n print('For the ' + str(members) + ' skeleton points with label ' + str(l) + ': ')\n for i in range(5):\n if counts[i] > 0:\n if topVor[i] == -1:\n add = ' ' + str(counts[i]) + ' ( ' + str(round(counts[i] * 100 / members, 3)) + ' %) are not associated with a Voronoi cluster cell'\n else:\n add = ' ' + str(counts[i]) + ' ( ' + str(round(counts[i] * 100/ members, 3)) + ' %) belong to the Voronoi Cluster with label ' + str(topVor[i])\n print(add)\n\n self.plotResults()",
"def voronoi_labelling(self, seed):\n import heapq\n if hasattr(seed, '__iter__') == False:\n seed = [seed]\n try:\n if (self.weights < 0).any():\n raise ValueError('some weights are non-positive')\n except:\n raise ValueError('undefined weights')\n dist, active = np.inf * np.ones(self.V), np.ones(self.V)\n label = - np.ones(self.V, np.int_)\n idx, neighb, weight = self.compact_neighb()\n dist[seed] = 0\n label[seed] = np.arange(len(seed))\n dg = list(zip(np.zeros_like(seed), seed))\n heapq.heapify(dg)\n for j in range(self.V):\n end = False\n while True:\n if len(dg) == 0:\n end = True\n break\n node = heapq.heappop(dg)\n if active[node[1]]:\n break\n if end:\n break\n dwin, win = node\n active[win] = False\n # the folllowing loop might be vectorized\n for i in range(idx[win], idx[win + 1]):\n l, newdist = neighb[i], dwin + weight[i]\n if newdist < dist[l]:\n heapq.heappush(dg, (newdist, l))\n dist[l] = newdist\n label[l] = label[win]\n return label",
"def label_simplex(grid, simplex, thresh):\n coords = [grid[:,x] for x in simplex]\n dist = squareform(pdist(coords,'euclidean'))\n adjacency = dist<thresh\n adjacency = adjacency.astype(int) \n graph = csr_matrix(adjacency)\n n_components, labels = connected_components(csgraph=graph, directed=False, return_labels=True)\n\n return n_components",
"def _count_subset_neighbors(v, X):\n return len(set(v.neighbors).intersection(X))",
"def numNeighbors(minesSet, row_index, cols_index, num_cols, num_rows):\n mines = 0\n for j in np.arange(max(0, cols_index-1), min(num_cols-1, cols_index+1)+1):\n for i in np.arange(max(0, row_index-1), min(num_rows-1, row_index+1)+1):\n if ((i, j) in minesSet):\n mines+=1\n return mines",
"def neighbor_count(A):\n sum2 = lambda A, B: map2(add, A, B)\n neighbors = ((-1, -1), (-1, 0), (-1, 1),\n (0, -1), (0, 1),\n (1, -1), (1, 0), (1, 1))\n return reduce(sum2,\n map(lambda d: rotate2(A, d[0], d[1]),\n neighbors))",
"def vertexMemberships(matchingMatrix,R=180):\n\n labels = np.arange(1,R+1)\n\n inds = matchingMatrix!=0;\n mm = np.zeros((matchingMatrix.shape))\n mm[inds] = 1;\n\n idMatrix = mm * labels\n \n labelVerts = {}.fromkeys(list(labels))\n \n for L in labels:\n \n tempColumn = idMatrix[:,L-1]\n inds = np.where(tempColumn == L)[0]\n \n labelVerts[L] = inds\n \n return labelVerts",
"def find_centroid_for_each(self):",
"def count_ones(self):\r\n count = 0\r\n for x in range(self.xspan):\r\n for y in range(self.yspan):\r\n if (self.cells[x][y] == 1):\r\n count = count + 1\r\n return count",
"def test_count_neighbors(self):\n m, n = 5, 5\n k, p = 0.2, 0.7\n agents = [ConwayAgent(ii, ii & 0x1 == 1) for ii in range(m * n)]\n C = ConwayModel(m, n, k, p, agents)\n\n to_count = np.array([[0, 1, 0], [0, 0, 1], [1, 0, 0]])\n expected = np.array([[1, 1, 2], [2, 3, 1], [0, 2, 1]])\n result = C.count_neighbors(to_count)\n self.assertTrue(np.all(expected == result))",
"def test_make_neighbors(position):\n\n def result_row(i, size):\n return [i] + [i + 1] * (size - 2) + [i]\n\n size = position.size\n neigh_counts = [0] * (size ** 2)\n first_row = result_row(2, size)\n last_row = result_row(2, size)\n middle_row = result_row(3, size)\n desired_result = first_row + (middle_row) * (size - 2) + last_row\n\n for c, neighs in go.make_neighbors(size=size):\n for pt in list(neighs):\n neigh_counts[pt] += 1\n\n assert desired_result == neigh_counts",
"def footprint_corner_indices():",
"def get_labelPositions(y_list, x_list):\n n_labels = len(y_list)\n\n # GET BORDER POINTS\n x_min, x_max = get_min_max(x_list)\n x_mid = (x_max - x_min) / 2\n\n y_min, y_max = get_min_max(y_list)\n y_mid = (y_max - y_min) / 2\n # Border points\n bp1 = np.array(list(product([x_min, x_max, x_mid], \n [y_min, y_max, y_mid])))[:-1]\n\n # Top right points\n # bp2 = np.array(list(product([0., 1.0, 0.75], \n # [0., 1.0, 0.75])))[:-1]\n\n # Bottom right points\n # bp3 = np.array(list(product([0., 1.0, 0.25], \n # [0., 1.0, 0.25])))[:-1] \n #border_points = np.vstack([bp1, bp2, bp3])\n border_points = np.vstack([bp1])\n n_border = border_points.shape[0]\n\n # Initialize placeholders\n ref_points = np.zeros((n_border + n_labels, 2))\n\n label_positions = np.zeros((n_labels, 2))\n label_indices = np.zeros(n_labels, int)\n\n \n \n ref_points[:n_border] = border_points\n\n for i in range(n_labels):\n # GET POSITIONS\n n_points = x_list[i].size\n xy_points = np.zeros((n_points, 2))\n\n xy_points[:, 0] = x_list[i]\n xy_points[:, 1] = y_list[i]\n \n # GET REF POINTS\n dist = get_pairwise_distances(xy_points, ref_points[:n_border + i])\n\n # GET MINIMUM DISTANCES\n min_dist = dist.min(axis=1)\n\n # GET MAXIMUM MINIMUM DISTANCE\n label_index = np.argmax(min_dist)\n label_pos = xy_points[label_index]\n\n ref_points[n_border + i] = label_pos\n label_positions[i] = label_pos\n label_indices[i] = label_index\n\n return label_positions, label_indices",
"def count_alive_neighbors(self, status):\n kernel = np.array(\n [[1, 1, 1],\n [1, 0, 1],\n [1, 1, 1]])\n\n count = convolve2d(status, kernel, mode='same', boundary=\"wrap\")\n return count",
"def getSkeletonPoints(self):\n self.SkeletonPoints = []\n for s in self.Intersections:\n self.SkeletonPoints.append(s.centroid)",
"def calc_bpti_centroid(traj_list):\n sums = np.zeros(shape=(5, 1653))\n cnts = [0 for i in range(5)]\n label = getLabelList()\n for n, traj in enumerate(prdist):\n for i in range(0, len(traj), 40):\n try:\n idx = (n*400) + (i // 1000)\n state = label[idx]\n # Exclude any near transition frames\n if idx < 3 or idx > 4121:\n continue\n if state == label[idx-2] == label[idx-1] == label[idx+1] == label[idx+2]:\n sums[state] += traj[i]\n cnts[state] += 1\n except IndexError as err:\n pass # ignore idx errors due to tail end of DEShaw data\n cent = [sums[i] / cnts[i] for i in range(5)]\n return (np.array(cent))",
"def ray_label_simplex(grid, simplex, thresh):\n coords = [grid[:,x] for x in simplex]\n dist = squareform(pdist(coords,'euclidean'))\n adjacency = dist<thresh\n adjacency = adjacency.astype(int) \n graph = csr_matrix(adjacency)\n n_components, labels = connected_components(csgraph=graph, directed=False, return_labels=True)\n\n return n_components",
"def houses(self):\n num = 0\n points = 0\n # TODO: add pattern matching\n if \"s\" in self.__as_str:\n num += 1\n if \"f\" in self.__as_str:\n num += 1\n if \"1\" in self.__as_str or \"2\" in self.__as_str or \"3\" in self.__as_str or \"4\" in self.__as_str:\n num += 1\n if \"o\" in self.__as_str:\n num += 1\n if \"p\" in self.__as_str:\n num += 1\n for i in range(4):\n for j in range(4):\n if self.as_list[i][j] == 'h':\n if 'f' in self.neighbours(i, j):\n points += 1\n else:\n points += num\n return points",
"def labelNeighbours26(data, label, x0,y0,z0, index):\n shape = label.shape;\n for xp in range(max(0,-1+x0),min(2+x0, shape[0])):\n for yp in range(max(0,-1+y0),min(2+y0, shape[1])):\n for zp in range(max(0,-1+z0),min(2+z0, shape[2])):\n if data[xp,yp,zp] and label[xp,yp,zp] == 0:\n label[xp,yp,zp] = index;\n label = labelNeighbours26(data, label, xp,yp,zp, index);\n return label;",
"def testHClusters(cntsDf, members, cols=None, min_count=5):\n\n if cols is None:\n cols = cntsDf.columns\n\n tot = cntsDf.sum()\n Ncells = tot.sum()\n uCDR3 = list(cntsDf.index)\n\n results = []\n\n for cid, m in members.items():\n notM = [i for i in range(cntsDf.shape[0]) if not i in m]\n obs = np.concatenate((np.sum(cntsDf[cols].values[m, :], axis=0, keepdims=True),\n np.sum(cntsDf[cols].values[notM, :], axis=0, keepdims=True)), axis=0)\n if np.sum(obs, axis=1)[0] > min_count:\n \"\"\"Inner product of the marginal totals along both axes, divided by total cells\"\"\"\n expect = np.dot(np.sum(obs, keepdims=True, axis=1),\n np.sum(obs, keepdims=True, axis=0)) / Ncells\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n chi2 = (obs - expect)**2 / expect\n\n sum_chi2 = np.sum(chi2)\n\n degf = len(cols) - 1\n pvalue = 1 - stats.chi2.cdf(sum_chi2, degf)\n results.append({'cid':cid,\n 'chi2':sum_chi2,\n 'pvalue':pvalue,\n 'observed':tuple(obs[0, :]),\n 'observed_prop':(obs / np.sum(obs, axis=0))[0, :],\n 'expected':tuple(expect[0, :]),\n 'expected_prop':(expect / np.sum(obs, axis=0))[0, :],\n 'members':tuple(m),\n 'labels':cols})\n else:\n results.append({'cid':cid,\n 'chi2':np.nan,\n 'pvalue':np.nan,\n 'observed':tuple(obs[0, :]),\n 'observed_prop': (obs / np.sum(obs, axis=0))[0, :],\n 'expected':(np.nan, )*len(cols),\n 'expected_prop': (np.nan, )*len(cols),\n 'members':tuple(m),\n 'labels':cols})\n resDf = pd.DataFrame(results)\n\n if 'adjustwithin' in sys.modules:\n resDf.loc[:, 'FWER-pvalue'] = adjustnonnan(resDf['pvalue'], method='holm')\n resDf.loc[:, 'FDR-qvalue'] = adjustnonnan(resDf['pvalue'], method='fdr_bh')\n return resDf.set_index('cid')",
"def count_pairs(clusters_list, cluster_labels):\n algorithm_pairs = 0\n intersecting_pairs = 0\n for points_in_cluster in clusters_list:\n algorithm_pairs += (len(points_in_cluster)**2 - len(points_in_cluster)) / 2\n for pair in itertools.combinations(points_in_cluster, 2):\n if cluster_labels[pair[0]] == cluster_labels[pair[1]]:\n intersecting_pairs += 1\n return algorithm_pairs, intersecting_pairs",
"def count_backbone_contacts(fuzzball_path):\n print(fuzzball_path)\n\n fuzzball = prody.loadAtoms(fuzzball_path)\n fuzzball_hv = fuzzball.getHierView()\n ligand = fuzzball_hv['X', 1]\n ligand_resname = ligand.getResname()\n\n constraints = Generate_Constraints(ligand_resname)\n\n motif_count = 0\n bb_contacts = 0\n\n for index, motif in enumerate(fuzzball_hv.iterResidues(), start=1):\n if index != 1:\n constraint_dict = constraints.determine_constraint_atoms(motif, ligand)\n if constraint_dict is False: continue\n if constraint_dict['residue']['atom_names'][0] in ['C', 'CA', 'O', 'N']:\n bb_contacts += 1\n motif_count += 1\n\n return {'motifs': motif_count,\n 'bb_contacts': bb_contacts,\n 'fuzzball': fuzzball_path}",
"def count_neighbor_mines(self, i, j):\n n_neighbor_mines = -1\n if not self.mines[i, j]:\n n_neighbor_mines = np.count_nonzero(\n self.mines[(i-1 if i > 0 else 0):i+2, (j-1 if j > 0 else 0):j+2])\n return n_neighbor_mines",
"def label_centroids_heuristically(self, centroids: np.ndarray):\n\n cluster_centroids_labels = [(\"\", {}) for c in centroids]\n\n centre_point = centroids[0]\n heuristic_centroids = np.array(\n [\n centre_point + [-30, 30],\n centre_point + [30, 30],\n centre_point + [0, -48.125],\n ]\n )\n heuristic_centroid_labels = [\n ConstJoint.LEFT_EYE,\n ConstJoint.RIGHT_EYE,\n ConstJoint.MOUTH,\n ]\n labeled = [False for c in centroids]\n used_label = [False for c in heuristic_centroids]\n while self.__are_labels_matched_with_centroids(cluster_centroids_labels, \"\"):\n min_dist_square = math.inf\n min_centroid = 0\n min_cluster = 0\n current_cluster = {}\n for i, c in enumerate(centroids):\n if labeled[i]:\n continue\n for j, cl in enumerate(heuristic_centroids):\n if used_label[j]:\n continue\n diff = c - cl\n dist_square = diff.dot(diff)\n\n if dist_square < min_dist_square:\n min_centroid = i\n current_cluster = c\n min_cluster = j\n min_dist_square = dist_square\n\n cluster_centroids_labels[min_centroid] = (\n heuristic_centroid_labels[min_cluster],\n current_cluster,\n )\n labeled[min_centroid] = True\n used_label[min_cluster] = True\n\n return cluster_centroids_labels",
"def findClosetCentroids(X, centroids):\n\tm, n = X.shape\n\tK = centroids.shape[0]\n\tidx = np.zeros(m) # m\n\n\tfor i in range(m):\n\t\ttemp = np.tile(X[i, :], K).reshape(centroids.shape)\n\t\tidx[i] = np.argmin(np.sum((centroids - temp) ** 2, axis=1))\n\treturn idx",
"def count(seats: List[str]) -> int:\n # Map dimensions\n m = len(seats)\n n = len(seats[0]) if m else 0\n \n count = 0\n \n # Count locations filled with \"#\"\n for i in range(m):\n for j in range(n):\n if seats[i][j] == \"#\":\n count += 1\n\n return count",
"def contains ( self, pos ):\n \n inds = in_hull(pos[:2,:].T, array(self.edges).reshape(-1,2), \\\n border = self.include_border ,tol = self.abs_tol)\n \n # if none inside, take nearest\n if ~inds.any() and self.default_nearest:\n dr2 = array(self.edges).reshape(-1,2).mean(0)\n inds[argmin(dr2)] = True\n \n return inds",
"def test_clusters(trained_data, centroids):\n\n for c in range(len(centroids)):\n count_1 = 0\n count_0 = 0\n for p in range(len(trained_data)):\n if trained_data[p][-2] == 0 and trained_data[p][-1] == centroids[c]:\n count_0 += 1\n if trained_data[p][-2] == 1 and trained_data[p][-1] == centroids[c]:\n count_1 += 1\n print (\"Centroid \", c+1, \":\", centroids[c])\n print(\"Number of 1's: \", count_1)\n print(\"Number of 0's: \", count_0)\n print(\"Percent 1's: \", round((count_1/(count_1 + count_0))*100,2))\n print(\"Percent 0's: \", round((count_0 / (count_1 + count_0)) * 100,2))\n print(\"****************\")",
"def count_neighbor_flags(self, i, j):\n return np.count_nonzero(self.flags[(i-1 if i > 0 else 0):i+2, (j-1 if j > 0 else 0):j+2])",
"def identify_leaflets_cluster(self,pts,vec,topologize_time_limit=30,max_count_asymmetry=0.05):\n\t\timport scipy\n\t\timport sklearn\n\t\timport sklearn.neighbors\n\t\timport sklearn.cluster\n\t\tnlipids = len(pts)\n\t\t#---time limit on the topologize function which joins broken bilayers e.g. a saddle that crosses PBCs\n\t\ttry:\n\t\t\twith time_limit(topologize_time_limit): \n\t\t\t\twrapper = topologize(pts,vec,\n\t\t\t\t\t**({'tol':self.topologize_tolerance} if self.topologize_tolerance else {}))\n\t\texcept TimeoutException: \n\t\t\tstatus('topologize failed to join the bilayer. '\n\t\t\t\t'if it is broken over PBCs e.g. a saddle, this is a serious error which may go undetected. '\n\t\t\t\t'make sure you always inspect the topology later.',tag='error')\n\t\t\twrapper = np.zeros((len(pts),3))\n\t\tfindframe = pts + wrapper*np.array(vec)\n\t\t#---ensure that all points are in the box\n\t\tfindframe += vec*(findframe<0) - vec*(findframe>vec)\n\t\t#---previous calculation of connectivity was done manually\n\t\tif False:\n\t\t\t#---conservative cutoff gets lots of nearby points\n\t\t\tcutoff = 10.0\n\t\t\tcutoff_short = 2.0\n\t\t\t#---make a K-D tree from the points\n\t\t\ttree = scipy.spatial.ckdtree.cKDTree(findframe,boxsize=np.concatenate((vec,vec))+0.*eps)\n\t\t\t#---find the nearest reference points for each instantaneous point\n\t\t\tclose,nns = tree.query(findframe,distance_upper_bound=cutoff,k=20)\n\t\t\t#---construct the neighbor list\n\t\t\tsubjects = np.where(np.all((close<cutoff,close>0),axis=0))\n\t\t\t#---get the pairs of neighbors\n\t\t\tsubjects,neighbors = subjects[0],nns[subjects]\n\t\t\tpds = np.ones((nlipids,nlipids))*0.0\n\t\t\tpds[tuple((np.arange(nlipids),np.arange(nlipids)))] = 0.0\n\t\t\tnears = np.where(np.all((close>0,close<=cutoff_short),axis=0))\n\t\t\tpds[tuple((nears[0],nns[nears]))] = 1.0#close[nears]\n\t\t\tpds[tuple((nns[nears],nears[0]))] = 1.0#close[nears]\n\t\tconnectivity = sklearn.neighbors.kneighbors_graph(findframe,\n\t\t\tn_neighbors=self.cluster_neighbors,include_self=False)\n\t\tward = sklearn.cluster.AgglomerativeClustering(n_clusters=2,\n\t\t\tconnectivity=connectivity,linkage='complete').fit(findframe)\n\t\timono = ward.labels_\n\t\tif np.mean(imono)==0.5: \n\t\t\tstatus('[STATUS] perfect split is %0.5f'%np.mean(imono))\n\t\telif (np.all(np.array(imono)==0) or np.all(np.array(imono)==1) or \n\t\t\tnp.abs(np.mean(imono)-0.5)>=max_count_asymmetry):\n\t\t\tstatus('[STATUS] split is %0.5f'%np.mean(imono))\n\t\t\tstatus('[STATUS] one side has %d'%np.sum(imono))\n\t\t\tstatus('[WARNING] leaflets were not distinguished')\n\t\t\traise Exception('[ERROR] failed to identify leaflets. '\n\t\t\t\t'DEVELOPMENT NOTE!? use legacy or a different cutoff?')\n\t\telse: status('[STATUS] some lipids might be flipped %d %.5f'%(np.sum(imono),np.mean(imono)))\n\t\treturn np.array(imono)"
] | [
"0.70501083",
"0.573867",
"0.5303514",
"0.5237649",
"0.5178863",
"0.5108157",
"0.5104332",
"0.50592184",
"0.50462115",
"0.5019821",
"0.5018306",
"0.5014337",
"0.5011559",
"0.49469694",
"0.49384215",
"0.49318594",
"0.49196318",
"0.49063683",
"0.4904543",
"0.4899196",
"0.48954192",
"0.48678064",
"0.48651925",
"0.4841963",
"0.48381466",
"0.48316404",
"0.4824644",
"0.48231936",
"0.48150507",
"0.48101792"
] | 0.63530916 | 1 |
Perform n Bernoulli trials with success probability p and return number of successes. | def perform_bernoulli_trials(n, p):
# Initialize number of successes: n_success
n_success = 0
# Perform trials
for i in range(n):
# Choose random number between zero and one: random_number
random_number = np.random.random()
# If less than p, it's a success so add one to n_success
if random_number < p:
n_success += 1
return n_success | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def perform_bernoulli_trials(n, p):\n # Initialize number of successes: n_success\n n_success = 0\n\n # Perform trials\n for i in range(n):\n # Choose random number between zero and one: random_number\n random_number = np.random.random()\n\n # If less than p, it's a success so add one to n_success\n if random_number < p:\n n_success += 1\n\n return n_success",
"def perform_bernoulli_trials(n, p):\n # Initialize number of successes: n_success\n n_success = 0\n\n\n # Perform trials\n for i in range(n):\n # Choose random number between zero and one: random_number\n random_number = np.random.random()\n\n\n # If less than p, it's a success so add one to n_success\n if random_number < p:\n n_success += 1\n\n return n_success",
"def perform_bernoulli_trials(n, p):\n # Initialize number of successes: n_success\n n_success = 0\n\n\n # Perform trials\n for i in range(n):\n # Choose random number between zero and one: random_number\n random_number = np.random.random()\n\n # If less than p, it's a success so add one to n_success\n if random_number< p:\n n_success += 1\n\n return n_success",
"def probability_of_all_successes(p: float, r: int, n: int) -> float:\n\n if r == 1:\n return pow(p, n)\n elif n == 0:\n return 1\n else:\n result = 0\n for x in range(0, n+1):\n result += pow(p, x) * pow(1-p, n-x) * probability_of_all_successes(p, r-1, n-x)\n return result",
"def binomial(n: int, p: float) -> int:\n return sum(bernoulli_trial(p) for _ in range(n))",
"def bernoulli_trial(p: float) -> int:\n return 1 if random.random() < p else 0",
"def binomial(n, p):\n sum_ans = 0\n for k in range(n):\n sum_ans = sum_ans + bernoulli(p)\n return sum_ans",
"def Bernoulli(p, succ=1, fail=0, symbol=None):\n\n return BernoulliPSpace(p, succ, fail, symbol).value",
"def chance(n, p):\n total = 0.0\n for k in range(n+1):\n total += comb(n, k, exact=False) * p**k * (1-p) ** (n-k)\n return total",
"def prob1(n):\n#raise NotImplementedError(\"Problem 1 Incomplete\")\n if n == 0 :\n raise ValueError(\"Sampling 0 points is not defined.\")\n total = 0\n for i in xrange(n) :\n if np.random.normal() > 3 :\n total += 1\n return float(total)/n",
"def is_prime(n, number_of_tests=5):\n passes = 0\n prime = True #assume prime\n for i in xrange(number_of_tests):\n passes += 1\n random_int = random.randint(2, n-1)\n test = pow(random_int, n-1, n)\n if test != 1:\n prime = False\n break\n if prime:\n return 0\n else:\n return passes",
"def prob1(n):\n\n # create a giant draw from a normal distribution\n random_draws = np.random.normal(loc= 0, scale = 1, size = n)\n\n # mask the values\n mask = random_draws > 3\n\n return np.sum(mask)/float(n)",
"def bernoulli(p):\r\n if np.random.random() < p:\r\n return 0\r\n else:\r\n return 1",
"def simulate_rerolling(p: float, n: int) -> int:\n\n counter = 0\n new_n = n\n while new_n > 0:\n for _ in range(new_n):\n ran = random.random()\n if ran < p:\n new_n -= 1\n counter += 1\n return counter",
"def bernoulli(n):\n\n x, res, s, c = Rat(0), Rat(0), Rat(0), Rat(-1)\n for k in range(1, n+2):\n c *= 1 - Rat(n + 2)/k\n s += x**n\n x += 1\n res += c*s/k\n return res",
"def test_probability_of_all_successes():\n\n assert(probability_of_all_successes(1/2,1,2) == 0.25)\n assert(are_close(probability_of_all_successes(1/6,1,2), 1/36, 0.001))\n assert(are_close(probability_of_all_successes(1/2,2,2), 7/16, 0.001))",
"def bernoulli(p):\n bern = rn.binomial(1,p)\n return bern",
"def test(numTrials):\n # Your Code Here\n hits = 0.0\n for i in range(numTrials):\n result = trial()\n #print result\n hits += result\n return hits / numTrials",
"def bernoulli_num(n):\n return mp.bernoulli(n)",
"def probability(n, k, p):\n prob = 0\n power = expotentation_by_squaring((1-p), n)\n count_mult = math.log(n, 2)\n p_fraction = p/(1-p)\n count_mult += 1\n for i in range(0, k+1):\n element = newton(n, i)*power\n prob += element\n power *= p_fraction\n count_mult += 2\n return prob, count_mult",
"def binomialTest(k, n, p = 0.5, exact = False):\n\tassert(k <= n)\n\tassert(k >= 0 and n > 0)\n\tn = int(n)\n\tk = int(k)\n\tp_value = 1.0\n\n\t# Trivial cases where p = 0 or p = 1\n\tif p == 0.0: # Must then have k = 0\n\t\tif k > 0:\n\t\t\treturn 0.0\n\t\telse:\n\t\t\treturn 1.0\n\tif p == 1.0: # Must then have k = n\n\t\tif k <= n:\n\t\t\treturn 1.0\n\n\tif k == 0:\n\t\t# Probability of at least zero successes is 1\n\t\tp_value = 1.0\n\telif k == n:\n\t\t# Probability of all successes\n\t\tp_value = p**n\n\telse:\n\t\tif not exact and n*p > 30 and n*(1-p) > 30:\n\t\t\t# Use normal approximation\n\t\t\tmu = n*p\n\t\t\tsd = math.sqrt(n*p*(1-p))\n\t\t\tz = (k-mu)/sd\n\t\t\tif z < 0.0:\n\t\t\t\tp_value = 1-Prob_Z(z)\n\t\t\telse:\n\t\t\t\tp_value = Prob_Z(z)\n\t\telse:\n\t\t\tp_value = p**n # The last term in the sum\n\t\t\tfor j in range(k,n):\n\t\t\t\t# Compute logarithm of (n choose j) p^j (1-p)^ (n-j), the\n\t\t\t\t# binomial probability. Use logarithm to avoid overflow\n\t\t\t\t# problems with potentially enormous factorials.\n\t\t\t\tlog_p = logChoose(n,j) + j*math.log(p) + (n-j)*math.log(1-p)\n\t\t\t\tp_value += math.exp(log_p)\n\t\t\tif p_value > 1.0:\n\t\t\t\tp_value = 1.0\n\treturn p_value",
"def prior(n=10):\r\n p = []\r\n trials = 0\r\n acc = 0\r\n while acc < n:\r\n trials += 1\r\n r = np.random.rand(2) * np.array([4, 2]) + np.array([-2, -1])\r\n # print(\"r: \", r)\r\n if r[1] + r[0] >= -1 and r[1] - r[0] >= -1:\r\n p.append(r)\r\n acc += 1\r\n # print(\"trials: \", trials, \", acc: \", acc)\r\n return p",
"def game1(n):\r\n\twin=0\r\n\tfor i in range(n):\r\n\t\tif game(1)==1:\r\n\t\t\twin+=1\r\n\tprob1=win/n\r\n\treturn prob1",
"def bin_cdf(n, p, x):\n\n # p C (bin_dist) ** 0 ) *(1-bin_dist)** p\n\n # n = (p)=20\n # x = x = 1 = r\n # nCr = n! / r!(n-r)\n\n \n\n\n\n\n\n\n\n\n def bin_dist(n, p, x):\n \"\"\"\n Given n number of trials, p the probability of success,\n what is the probability of having x successes?\n\n Your function should raise a ValueError if x is higher\n than n.\n\n If you need to compute combinations, you can import the\n function \"comb\" from the package \"scipy.special\"\n\n :param n: number of trials (int)\n :param p: probability of success\n :param x: number of successes (int)\n :return: probability of having x successes\n :rtype: float\n :raise ValueError: if x > n\n \"\"\"\n def factorial(x):\n if x >= 0:\n \n factorial = 1\n\n for i in range(1, x + 1):\n factorial = float(factorial * i)\n # print(f' The factorial of {x} is {factorial}') \n return factorial\n\n else:\n raise ValueError(\"Sorry x cannot be a negative number\")\n\n def combination(n, r):\n \"\"\"\n Given n total number of items,\n what is the number of possible ways\n to choose r items from it?\n\n :param n: total number of items (integer)\n :param r: number of items to arrange (int)\n :return: number of combinations\n :rtype: integer\n \"\"\"\n\n \n\n \n numerator = factorial(n)\n denominator = factorial(r)\n subtracted_answer = factorial(n-r)\n \n\n answer = numerator/(denominator * subtracted_answer)\n print(answer)\n return answer \n\n # from scipy.special import comb\n if x > n:\n raise ValueError(\"Error, x must be less than n\")\n else:\n\n\n prob_success = float((combination(n, x)) * ((p**x)*((1-p)**(n-x))))\n\n print(prob_success)\n return prob_success \n \n # an= 1-bin_dist(n,p,x)\n # print(f'word{an}')\n # n= 12\n # p=0.25\n # # x=0??\n # ((n!)/ (x!*(n-x)!)) * (p**x) * (1-p)**(n-x)\n sum_prob = []\n for i in range(x+1):\n print(i)\n prob = bin_dist(n,p,x=i)\n sum_prob.append(prob)\n print(sum_prob)\n total =sum(sum_prob)\n print(total)",
"def prob(throw, n, d=6, type='classical'):\n count = 0\n table = throw_table(n, d, type)\n for t in table:\n if sum(t) == throw:\n count += 1\n \n return float(count)/len(table)",
"def prbs(m, n):\n return np.array(np.random.rand(m, n) > 0.5, dtype=np.int) - 0.5",
"def Ballie_PSW_test(n, max_trivial_trials=100):\n for i in range(max_trivial_trials):\n if primes[i] == n:\n return True\n if n % primes[i] == 0:\n return False\n if primes[i] ** 2 >= n:\n return True\n if not fermat_strong_test(n, 2):\n return False\n if not lucas_selfridge_test(n):\n return False\n return True",
"def is_probable_prime( n, trials = 5 ):\n\n if n in smallprimeset: return True\n\n if n < 2: return False\n # special case 2\n if n == 2:\n return True\n # ensure n is odd\n if n % 2 == 0:\n return False\n # write n-1 as 2**s * d\n # repeatedly try to divide n-1 by 2\n s = 0\n d = n - 1\n while True:\n quotient, remainder = divmod( d, 2 )\n if remainder == 1:\n break\n s += 1\n d = quotient\n assert( ( 2 ** s ) * d == n - 1 )\n\n # test the base a to see whether it is a witness for the compositeness of n\n def try_composite( a ):\n if pow( a, d, n ) == 1:\n return False\n for i in range( s ):\n if pow( a, 2 ** i * d, n ) == n - 1:\n return False\n return True # n is definitely composite\n\n for i in range( trials ):\n a = random.randrange( 2, n )\n if try_composite( a ):\n return False\n\n return True # no base tested showed n as composite",
"def isprime(n):\n if n!=int(n):\n return False\n n=int(n)\n #Miller-Rabin test for prime\n if n==0 or n==1 or n==4 or n==6 or n==8 or n==9:\n return False\n\n if n==2 or n==3 or n==5 or n==7:\n return True\n s = 0\n d = n-1\n while d%2==0:\n d>>=1\n s+=1\n assert(2**s * d == n-1)\n\n def trial_composite(a):\n if pow(a, d, n) == 1:\n return False\n for i in range(s):\n if pow(a, 2**i * d, n) == n-1:\n return False\n return True\n\n for i in range(8):#number of trials\n a = random.randrange(2, n)\n if trial_composite(a):\n return False\n\n return True",
"def Binomial(n, p, succ=1, fail=0, symbol=None):\n\n return BinomialPSpace(n, p, succ, fail, symbol).value"
] | [
"0.8913371",
"0.89072156",
"0.8900664",
"0.7544249",
"0.75013113",
"0.7363845",
"0.7102742",
"0.6818586",
"0.6704421",
"0.6702102",
"0.6523535",
"0.6489954",
"0.6467495",
"0.6451158",
"0.6448554",
"0.63664526",
"0.6310604",
"0.6278471",
"0.6261832",
"0.62180156",
"0.615847",
"0.6149887",
"0.6014968",
"0.5989105",
"0.5911164",
"0.5899697",
"0.58803844",
"0.5879385",
"0.5869965",
"0.58381236"
] | 0.8910949 | 1 |
Test get_posts without ids | def test_get_posts_missing_ids(client):
response = client.simulate_get('/page/get_records')
assert response.status_code == 400 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_list_posts_fail(self):\n response = self.client.get(reverse('posts:post-list'))\n self.assertEqual(response.status_code, 200)",
"def testGetNonExistantPost(self):\n response = self.client.get(\"/api/posts/1\", headers=[(\"Accept\", \"application/json\")])\n\n self.assertEqual(response.status_code, 404)\n self.assertEqual(response.mimetype, \"application/json\")\n\n data = json.loads(response.data)\n self.assertEqual(data[\"message\"], \"Could not find post with id 1\")",
"def test_not_author_delete_post(self):\n self.client.login(username=\"Bill\", password=\"newpass1234\")\n response = self.client.post('/posts/1/delete/', {\"next\": \"\"})\n self.assertNotEqual(list(Post.objects.filter(id=1)), [])",
"def test_no_error_if_filter_post_by_wrong_user_id(api_client, user_id):\n r = api_client.get(path=f\"/users/{user_id}/posts\")\n assert r.status_code == 200",
"def test_api_can_get_a_post(self):\n post = Post.objects.get()\n response = self.client.get(\n '/posts/',\n kwargs={'pk':post.id},\n format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertContains(response, post)",
"def test_get_post_by_id(api_client):\n post_id = 10\n r = api_client.get(path=f\"/posts/{post_id}\").json()\n assert r[\"id\"] == post_id",
"def assertNoRepeatGuids(context, posts):\n guids = [p['guid'] for p in posts]\n context.assertTrue(len(set(guids)) == len(posts), \"Some guids repeated\")",
"def test_get_all_posts(self):\n self.login_client('test_user', 'testing')\n # hit the API endpoint\n response = self.client.get(\n reverse(\"post-list-create\")\n )\n # fetch the data from db\n expected = Post.objects.all()\n serialized = PostSerializerSchema(expected, many=True)\n self.assertEqual(response.data, serialized.data)\n self.assertEqual(response.status_code, status.HTTP_200_OK)",
"def _filter_posts(posts):\n\n return filter(_filter_post, posts)",
"def test_home_view_with_a_draft_post(self):\n category = create_category('Category 1')\n author = create_author('Author 1')\n create_post(category=category, author=author, name='Draft Post', content='Lorem ipsum dolor sit amet, consectetur adipiscing elit. Praesent sollicitudin.', status='Draft')\n create_question(question_text=\"Past question.\", days=-30)\n response = self.client.get(reverse('blog.home'))\n self.assertContains(response, \"No posts are available.\")\n self.assertQuerysetEqual(response.context['posts'], [])",
"def test_api_can_get_a_post(self):\n post = Post.objects.get()\n response = self.client.get(\n reverse('details', kwargs={'pk': post.id}), format=\"json\")\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertContains(response, post)",
"def test_view_posts(self):\n url = reverse('blog_api:listcreate')\n response = self.client.get(url, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)",
"def test_author_delete_post(self):\n self.client.login(username=\"John\", password=\"newpass1234\")\n response = self.client.post('/posts/1/delete/')\n self.assertEqual(list(Post.objects.filter(id=1)), [])",
"def load_posts(post_ids, current_user_id=None):\r\n logging.warn(\"Ids===={}\".format(post_ids))\r\n\r\n # If list is not used, or any call that trigger __iter__ will end up with the query syntax\r\n # rather than the data itself.\r\n #posts_query = Post.objects.filter(id__in=post_ids).limit(100).allow_filtering()\r\n #post_counters = list(PostCounter.objects.filter(id__in=post_ids).limit(100).allow_filtering())\r\n\r\n post_objects = []\r\n # ok ,\r\n for post_id in post_ids:\r\n p = Post.objects.get(id=post_id)\r\n\r\n try:\r\n pc = PostCounter.objects.get(id=post_id) #filter(lambda x: x.id == post.id, post_counters)\r\n stats = pc._as_dict()\r\n del stats['id']\r\n p.__dict__['statistics'] = stats\r\n except DoesNotExist, dne:\r\n pass\r\n\r\n if current_user_id is not None:\r\n try:\r\n pv = PostVote.objects.get(post_id=post_id, user_id=current_user_id)\r\n p.__dict__['upvoted'] = True\r\n except DoesNotExist, dne:\r\n pass\r\n post_objects.append(p)\r\n\r\n return post_objects",
"def test_posts_page(self):\n\n with self.client:\n result = self.client.get('/posts')\n self.assertEqual(result.status_code, 200)\n self.assertIn(b'<h5 class=\"card-title title-preview\">test 1 title</h5>', result.data)",
"def filter_posts(request):\n if request.is_ajax():\n id_user = int(request.POST.get('id_user'))\n if id_user>0:\n return render(request, \"posts_list.html\",\n {\"posts\": Post.objects.filter(\n author_id=id_user).order_by('date_pub'),\n })\n else:\n return render(request, \"posts_list.html\",\n {\"posts\": Post.objects.all().order_by('date_pub'),\n })\n return None",
"def test_post_creation_unauthenticated(self):\n url = reverse('post-list', args=[self.topic1.url_name])\n payload = {\n 'author': self.user1.id,\n 'title': 'Creating a post while being unauthenticated',\n 'content': 'Rich content 5',\n }\n response = self.client.post(url, payload)\n\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n new_post = Post.objects.filter(\n author=self.user1,\n title=payload.get('title'),\n content=payload.get('content'),\n topic=self.topic1\n )\n self.assertFalse(new_post.exists())",
"def test_no_op(self):\n request = RequestFactory().get('/?tags=')\n qs = MockQuerySet()\n filter = TestFilterSet(request.GET, qs)\n self.assertNotIn('tags__slug__in', filter.qs.filters)",
"def test_posts(self):\n self.resource._request.register_uri(\n 'GET', '/users/dotzero/posts?page=2', 'fixture_post.json')\n\n response = self.resource.posts('dotzero', 2)\n\n self.assertTrue('data' in response)\n self.assertTrue('server_time' in response)",
"def test_return_list_of_posts(self):\n self.create_new_user()\n self.create_new_posts()\n response = self.c.get('/wall/',\n content_type=\"application/json\")\n\n assert 200 == response.status_code\n assert 2 == len(response.json()['data']['posts'])\n assert response.json()['data']['posts'][0]['message'].startswith('All animals are equal')\n assert response.json()['data']['posts'][1]['message'].startswith('War is peace')",
"def test_deleting_post(self):\n\n delete_post(1)\n post = Post.query.get(1)\n self.assertEqual(post, None)",
"def test_api_can_delete_post(self):\n post = Post.objects.get()\n response = self.client.delete(\n reverse('details', kwargs={'pk': post.id}),\n format='json',\n follow=True)\n\n self.assertEquals(response.status_code, status.HTTP_204_NO_CONTENT)",
"def test_api_can_delete_post(self):\n post = Post.objects.get()\n response = self.client.delete(\n reverse('details', kwargs={'pk': post.id}),\n format='json',\n follow=True)\n\n self.assertEquals(response.status_code, status.HTTP_204_NO_CONTENT)",
"def test_home_view_with_a_published_post(self):\n category = create_category('Category 1')\n author = create_author('Author 1')\n create_post(category=category, author=author, name='Published Post', content='Lorem ipsum dolor sit amet, consectetur adipiscing elit. Praesent sollicitudin.', status='Published')\n create_question(question_text=\"Past question.\", days=-30)\n response = self.client.get(reverse('blog.home'))\n self.assertQuerysetEqual(\n response.context['posts'],\n ['<Post: Published Post>']\n )",
"def test_empty_posts(self, client, site, homepage):\n response = client.get(homepage.relative_url(site))\n assertTemplateNotUsed(response, \"snippets/carousel.html\")",
"def test_removePost(self):\n\t\tself.client.force_authenticate(user = User.objects.get(id=3))\n\n\t\tpost5 = Post.objects.create(author=User.objects.get(id=3), \n\t\t\ttext=\"Mahmut is best computer geek I have ever met\",\n\t\t\tgroup=Group.objects.get(id=3))\n\n\t\turl = \"/posts/4/\"\n\t\tresponse = self.client.delete(url, format='json')\n\t\tself.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n\n\t\turl = \"/posts/3/\"\n\t\tresponse = self.client.delete(url, format='json')\n\t\tself.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)",
"def test_get_post_list_logged_out(self):\n url = reverse('post-list')\n response = self.client.get(url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)",
"def get_posts(self): #return list of posts that are associated with this blog_id\n return Post.find_posts_for_blog_id(self.blog_id) #this will return a list of posts objects",
"def test_home_view_with_two_published_posts(self):\n category = create_category('Category 1')\n author = create_author('Author 1')\n create_post(category=category, author=author, name='Published Post 1',\n content='Lorem ipsum dolor sit amet, consectetur adipiscing elit. Praesent sollicitudin.',\n status='Published')\n create_post(category=category, author=author, name='Published Post 2',\n content='Lorem ipsum dolor sit amet, consectetur adipiscing elit. Praesent sollicitudin.',\n status='Published')\n create_question(question_text=\"Past question.\", days=-30)\n response = self.client.get(reverse('blog.home'))\n self.assertQuerysetEqual(\n response.context['posts'],\n ['<Post: Published Post 2>', '<Post: Published Post 1>']\n )",
"def test_deleting_patient_posts(self):\n\n data = {\"post\": 1}\n result = self.client.post(\"/delete-post\", data=data)\n post = Post.query.get(1)\n\n self.assertEqual(result.status_code, 200)\n self.assertIsNone(post)"
] | [
"0.676717",
"0.65408266",
"0.6508336",
"0.64182526",
"0.63817656",
"0.6342567",
"0.617842",
"0.61667824",
"0.6114686",
"0.6045117",
"0.5996226",
"0.5942765",
"0.59425503",
"0.5932324",
"0.5903278",
"0.58965063",
"0.5880125",
"0.5872758",
"0.5825675",
"0.577318",
"0.576698",
"0.57662946",
"0.57662946",
"0.57406086",
"0.5735323",
"0.5730913",
"0.57251436",
"0.57196236",
"0.56959844",
"0.5689359"
] | 0.65633947 | 1 |
Test create_record with empty post body | def test_create_record_empty(client):
response = client.simulate_post('/page/create_record')
assert response.status_code == 400 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_create_record(self):\n body = [RecordModel()]\n response = self.client.open(\n '//records/create',\n method='POST',\n data=json.dumps(body),\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))",
"def test_create_record(self):\n pass",
"def test_create_empty_payload(self):\n response = self.client.post('/exercises/', data={})\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)",
"def test_create_non_effective(self):\n response = self.client.post(\n reverse('contacts'),\n data=json.dumps(self.valid_payload),\n content_type='application/json'\n )\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)",
"def test_empty_data(self, client):\n url = reverse('users:create')\n response = client.post(url)\n assert response.status_code == 200\n assert 'This field is required.' in str(response.content)",
"def test_create_empty_payload(self):\n response = self.client.post('/routines/', data={})\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)",
"def test_empty_optionals(self):\n data = self.valid_payload\n data[\"telephone\"] = \"\"\n data[\"cellphone\"] = \"\"\n data[\"activity_description\"] = \"\"\n data[\"about\"] = \"\"\n data[\"institute\"] = \"\"\n response = self.client.post(\n reverse('contacts'),\n data=json.dumps(data),\n content_type='application/json'\n )\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)",
"def test_control_create_with_empty_field(self, field):\n request = self.prepare_control_request_body()\n request[field] = None\n\n response = self.api.post(all_models.Control, data=request)\n\n self.assert400(response)",
"def test_empty_optionals(self):\n data = self.valid_payload\n # data[\"telephone\"] = \"\"\n # data[\"cellphone\"] = \"\"\n data[\"activity_description\"] = \"\"\n # data[\"about\"] = \"\"\n response = self.client.post(\n reverse('contacts'),\n data=json.dumps(data),\n content_type='application/json'\n )\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)",
"def test_create(self):\n pass",
"def test_create_risk_with_empty_field(self, field):\n risk_body = self.generate_risk_body()\n risk_body[field] = None\n\n response = self.api.post(all_models.Risk, data=risk_body)\n\n self.assert400(response)",
"def test_create_model_without_target(test_client, dataset):\n response = test_client.post('/create',\n data={\"file\": dataset},\n content_type=\"multipart/form-data\")\n assert response.status_code == 400",
"def test_api_can_create_a_post(self):\n self.assertEqual(self.response.status_code, status.HTTP_201_CREATED)",
"def test_create_a_post(self):\n self.login_client('test_user', 'testing')\n # hit the API endpoint\n response = self.make_a_request(\n kind=\"post\",\n data=self.valid_data\n )\n self.assertEqual(response.data, self.valid_data)\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n # test with invalid data\n response = self.make_a_request(\n kind=\"post\",\n data=self.invalid_data\n )\n self.assertEqual(\n response.data[\"message\"],\n \"Both title and body are required to add a song\"\n )\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)",
"def test_no_optionals(self):\n data = self.valid_payload\n del data[\"telephone\"]\n del data[\"cellphone\"]\n del data[\"activity_description\"]\n del data[\"about\"]\n del data[\"institute\"]\n response = self.client.post(\n reverse('contacts'),\n data=json.dumps(data),\n content_type='application/json'\n )\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)",
"def test_create_new_user_blank_fields(self):\n self.maxDiff = None\n data = {\n 'email': '',\n 'password': '',\n }\n\n response = self.client.post(\n reverse('user-list'),\n data,\n format='json',\n )\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n content = {\n 'email': ['This field may not be blank.'],\n 'password': ['This field may not be blank.'],\n }\n self.assertEqual(json.loads(response.content), content)",
"def test_case_empty(self):\n data = {\"numbers\": \"\"}\n response = self.client.post(\"/api/hi\", data)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)",
"def test_create_busines_non_effective(self):\n response = self.client.post(\n reverse('contacts'),\n data=json.dumps(self.valid_payload),\n content_type='application/json'\n )\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)",
"def test_create_invalid(self):\n url = '/api/users/'\n data = {}\n username = str(uuid1())[:8]\n # Response should be status 400 where essential parameters are missing.\n response = self.client.post(url, json.dumps(data), content_type='application/json')\n self.assertEqual(response.status_code, 400)\n data['EmailAddress'] = '{}@dbca.wa.gov.au'.format(username)\n response = self.client.post(url, json.dumps(data), content_type='application/json')\n self.assertEqual(response.status_code, 400)\n data['DisplayName'] = 'Doe, John'\n response = self.client.post(url, json.dumps(data), content_type='application/json')\n self.assertEqual(response.status_code, 400)\n data['SamAccountName'] = username\n response = self.client.post(url, json.dumps(data), content_type='application/json')\n self.assertEqual(response.status_code, 201) # Now valid.",
"def test_items_create_empty_user(patch_mongo):\n item = {\n \"content\": \"lorem ipsum\",\n \"priority\": \"high\",\n \"status\": \"backlog\",\n \"users\": [],\n }\n\n response = client.post(\"/item\", json=item)\n assert response.status_code == status.HTTP_400_BAD_REQUEST",
"def test_create_user_missing_data(self):\n data = {\"firstname\": \"John\"}\n res = self.post(url=\"/users\", data=data)\n self.assertException(res, exc.DataIsMissing)\n\n users = User.query.all()\n self.assertEqual(len(users), 5)",
"def test_no_optionals(self):\n data = self.valid_payload\n # del data[\"telephone\"]\n # del data[\"cellphone\"]\n del data[\"activity_description\"]\n # del data[\"about\"]\n response = self.client.post(\n reverse('contacts'),\n data=json.dumps(data),\n content_type='application/json'\n )\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)",
"def test_empty(self):\n # No data\n self.client.post(self.new_url, {})\n self.assertEqual(Snippet.objects.count(), 0)\n\n data = self.valid_form_data()\n\n # No content\n data['content'] = ''\n self.client.post(self.new_url, data)\n self.assertEqual(Snippet.objects.count(), 0)\n\n # Just some spaces\n data['content'] = ' '\n self.client.post(self.new_url, data)\n self.assertEqual(Snippet.objects.count(), 0)\n\n # Linebreaks or tabs only are not valid either\n data['content'] = '\\n\\t '\n self.client.post(self.new_url, data)\n self.assertEqual(Snippet.objects.count(), 0)",
"def test_post_no_content(self):\n response = self.post(title='foo')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)",
"def test_create_model_without_file(test_client, dataset):\n response = test_client.post('/create?target=Species',\n data={\"file\": \"No file\"},\n content_type=\"multipart/form-data\")\n assert response.status_code == 400",
"def test_perform_create(self):\n data = {\n 'name': 'Jane Joe',\n 'crm': 1234,\n 'email': 'jane@joe.com',\n 'phone': '+55998754128'\n }\n response = self.unath_client.post(reverse('doctor-list'), data=data)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n response = self.client.post(reverse('doctor-list'), data=data)\n self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)",
"def test_create_serializer_with_no_notes(self):\n end_time = get_utc_now()\n start_time = get_time_relative_units_ago(end_time, hours=8)\n\n # empty string with notes here will still fail if it's broken -- this is with an RequestFactory\n # to mimic how data is sent, i think APIClient serializes something\n post_data = {\"start_time\": start_time, \"end_time\": end_time, \"notes\": \"\"}\n\n context = create_api_request_context(self.url, self.user_1, post_data)\n\n serializer = SleepLogCreateUpdateSerializer(data=post_data, context=context)\n valid = serializer.is_valid()\n\n self.assertTrue(valid, serializer.errors)",
"def test_create_dyn():\n # create an id.\n # the_id = str(uuid.uuid1())\n the_id = 'from-test-dyndb'\n\n # create a row of data\n row = {'id': the_id, 'company': 'test company',\n 'title': 'CEO', 'type': '1',\n 'location': 'Shambhala', 'snippet': 'This is a test.',\n 'salary': '$100', 'source': 'LinkedIn',\n 'the_type': 'Some type', 'link': 'my link',\n 'updated': '2021-01-01 00:00:00'}\n\n # create the record and get the bool.\n success = dyn_crud.create_record(row)\n\n # run the test.\n assert True if success else False",
"def test_create_empty_user(patch_mongo):\n user = {\n \"name\": \"\"\n }\n\n response = client.put(\"/user\", json=user)\n assert response.status_code == status.HTTP_400_BAD_REQUEST",
"def test_data_object_post(self):\n pass"
] | [
"0.7912478",
"0.77811354",
"0.7402375",
"0.7200082",
"0.7187702",
"0.7172909",
"0.6987341",
"0.6975383",
"0.68595964",
"0.67589927",
"0.67496216",
"0.67295873",
"0.6715952",
"0.67064273",
"0.6694563",
"0.66786766",
"0.66364396",
"0.6629828",
"0.6628264",
"0.662336",
"0.66098005",
"0.66036314",
"0.65643615",
"0.65620065",
"0.6548578",
"0.6528579",
"0.6528229",
"0.65163106",
"0.65042794",
"0.6493933"
] | 0.83715445 | 0 |
line original line of setuRace( racename.... pos index of the open parhenthesis isRuntimeRace True if the race is been created from interface (not loaded from file) return True if succefully created, False otherwise or exception launched | def newRace(self, line, pos, isRuntimeRace=False):
c = line.find(")")
if c == -1:
params = []
else:
params = line[pos:c].split(",")
# --SetupRace has several parameters:
# -- racename to show on screen (no spaces nor underscore)
# -- array of checkpoints positions
# -- is loop ? true or false (in lowercase, please)
# -- Print Altitude of the next checpoint on screen ? true or false (in lowercase, please)
r = raceClass(self)
try:
r.name = params[0][params[0].find("'") + 1 :params[0].rfind("'")]
r.pointsVar[0] = params[1].replace(" ", "")
r.isLoop = params[2] == "true"
r.showAltitude = params[3] == "true"
if not isRuntimeRace:
self._getRange(r.pointsVar)
self._parseArray(r.pointsVar, r, r.points)
except Exception:
del r
return False
else:
self.races.append(r)
self.modified = isRuntimeRace
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def mutatraj(self,line):\n line=line.strip().split()\n no=line[1];mutares=line[2]\n typelines=open(self.mainpath+'/A-R.dat','r').readlines() \n mutapath=os.path.join(self.mainpath,no+mutares)\n if not os.path.exists(mutapath):os.makedirs(mutapath)\n rosettapdb=os.path.join(mutapath,'rospdbs')\n rosettamuta=os.path.join(mutapath,'rosmutapdbs')\n amberpdb=os.path.join(mutapath,'ambpdbs')\n minpdb=os.path.join(mutapath,'minpdbs')\n if not os.path.exists(rosettapdb):os.makedirs(rosettapdb)\n if not os.path.exists(rosettamuta):os.makedirs(rosettamuta)\n if not os.path.exists(amberpdb):os.makedirs(amberpdb)\n if not os.path.exists(minpdb):os.makedirs(minpdb)\n res=''\n res+='parm '+self.mainpath+'/'+str(self.top)+'\\n';res+='trajin '+self.mainpath+'/'+self.crd+' 1 '+str(self.mutaframes)+'\\n'\n res+='trajout snap.pdb multi chainid A\\n'\n f=open(mutapath+'/traj.in','w')\n f.write(res)\n f.close()\n os.system('cpptraj < '+no+mutares+'/traj.in')\n res=''\n res+='NATRO'+'\\n';res+='start'+'\\n'\n res+=' '+str(no)+' A '+' PIKAA '+str(mutares)+'\\n'\n resfile=mutapath+'/resfile.in'\n f=open(resfile,'w')\n f.write(res)\n f.close()\n inputpdbs=self._findfile('./','snap.pdb')\n print(inputpdbs)\n for i in range(len(inputpdbs)):\n AtoR(self.mainpath,rosettapdb,inputpdbs[i],typelines)\n inputf=os.path.join(rosettapdb,inputpdbs[i])\n fileno=re.sub(\"\\D\",\"\",inputpdbs[i])\n cmd='fixbb.default.linuxgccrelease -in:file:s '+inputf+' -out:path:all '+rosettamuta+' -resfile '+resfile+' -out:suffix .'+str(fileno)+' -overwrite -ex1:level 2 -ex2:level 2 -ex3:level 0 -ex4:level 0' \n os.system(cmd)\n RtoA(rosettamuta,amberpdb,inputpdbs[i],typelines)\n self.minimize(amberpdb,inputpdbs[i],minpdb)\n self.calint(no,mutares,'wild')\n self.calint(no,mutares,'muta')",
"def new_line(script_l, character, episode):\n if up.check(\"characters\", character):\n char_id = up.giveId(\"characters\", character)\n else:\n up.insertCharacter(character)\n char_id = up.giveId(\"characters\", character)\n if up.check(\"episodes\", episode):\n ep_id = up.giveId(\"episodes\", episode)\n else:\n up.insertEpisode(episode)\n ep_id = up.giveId(\"episodes\", episode)\n if up.check(\"script\", script_l) and up.check(\"characters\", character) and up.check(\"episodes\", episode):\n return \"line exists\"\n else:\n engine.execute(f\"\"\"\n INSERT INTO script (script_l, characters_char_id, episodes_ep_id) VALUES\n (\"{script_l}\", \"{char_id}\", \"{ep_id}\");\n \"\"\")\n return f\"successfully loaded: {character},{script_l},{episode}\"",
"def is_line(self): \n return False",
"def insertLine(row):\n if check(\"script\", row[\"dialogue\"]) and check(\"characters\", row[\"character\"]) and check(\"episodes\", row[\"episode\"]):\n return \"line exists\"\n else:\n if check(\"characters\", row[\"character\"]):\n char_id = giveId(\"characters\", row[\"character\"])\n else:\n insertCharacter(row[\"character\"])\n char_id = giveId(\"characters\", row[\"character\"])\n \n if check(\"episodes\", row[\"episode\"]):\n ep_id = giveId(\"episodes\", row[\"episode\"])\n else:\n insertEpisode(row[\"episode\"])\n ep_id = giveId(\"episodes\", row[\"episode\"])\n #meme optional insert somehow\n #meme_id = 0\n engine.execute(f\"\"\"\n INSERT INTO script (line_n, script_l, characters_char_id, episodes_ep_id) VALUES\n (\"{row['line']}\", \"{row['dialogue']}\", \"{char_id}\", \"{ep_id}\");\n \"\"\")",
"def check_line(self):\n if not self.hosts and not self.line:\n self.msg(\"There is no line here. You can create one with +line/createline.\")\n return\n return True",
"def test_first_line_amiramesh(self):\n self.assertEqual(self.header.designation.filetype, 'AmiraMesh')",
"def read_gro(strucC,in_gro,coordupdate=False,set_chaintoresidue=False,verbose=False,debug = False):\n # atomicpy functions\n\n \n\n try:\n with open(in_gro,'r') as F:\n Lines = F.readlines()\n F.close()\n except IOError:\n print \" Specified .gro file \",in_gro,\" does not exisit \"\n sys.exit(\"Invalid file \")\n\n # Check to see if a previous read has occured\n pt_update = False \n n_pt = int( Lines[1])\n if( len(strucC.ptclC) > 0 ):\n pt_update = True\n # Check of conistent number of atoms\n pt_cnt = len(strucC.ptclC)\n if( pt_cnt != n_pt):\n print \" Current structure has %d atoms and %s has %d\"%(pt_cnt,in_gro,n_pt)\n sys.exit(\" Inconsistent number of atoms \" )\n #\n # Read in .gro file\n #\n line_cnt = 0\n ptcl_cnt = 0 \n for line in Lines :\n line_cnt = line_cnt + 1\n if( line_cnt > 2 and len(line) >= 44 and ptcl_cnt < n_pt): # skip header\n # Set particle i \n ptcl_cnt += 1 \n #\n residue_i = int(line[0:5].strip())\n resname_i = line[5:10].strip() \n g = line[10:15].strip()\n particle_i = int(line[15:20].strip())\n x = units.convert_nm_angstroms( float( line[20:28] ))\n y = units.convert_nm_angstroms( float(line[28:36]))\n z = units.convert_nm_angstroms( float(line[36:44]))\n #r_i = numpy.array( [float(x)*10,float(y)*10,float(z)*10] )\n r_i = [x,y,z]\n if(debug):\n print \" particle \",ptcl_cnt,g,r_i\n if( pt_update ):\n pt_i = strucC.ptclC[ptcl_cnt]\n else:\n pt_i = Particle( )\n pt_i.position = r_i\n if( not coordupdate ):\n add_dict = pt_i.tagsDict\n add_dict[\"residue\"] = int(residue_i)\n add_dict[\"resname\"] = resname_i\n add_dict[\"label\"] = str(g)\n # set as defualts \n add_dict[\"fftype\"] = \"??\"\n add_dict[\"qgroup\"] = 1\n if( set_chaintoresidue ):\n add_dict[\"chain\"] = int(residue_i)\n else:\n add_dict[\"chain\"] = 1 \n pt_i.setTagsDict(add_dict)\n\n if( not pt_update ):\n strucC.ptclC.put(pt_i)\n # \n # Get lattice vector from last line\n #\n line = Lines[-1]\n col = line.split()\n n_vec = int( len(col))\n if( n_vec == 3 ):\n strucC.latvec[0][0] = units.convert_nm_angstroms(float( col[0] ) )\n strucC.latvec[1][1] = units.convert_nm_angstroms(float( col[1] ) )\n strucC.latvec[2][2] = units.convert_nm_angstroms(float( col[2] ) )\n if( n_vec == 9 ):\n strucC.latvec[0][0] = units.convert_nm_angstroms(float( col[0] ) )\n strucC.latvec[1][1] = units.convert_nm_angstroms(float( col[1] ) )\n strucC.latvec[2][2] = units.convert_nm_angstroms(float( col[2] ) )\n strucC.latvec[0][1] = units.convert_nm_angstroms(float( col[3] ) )\n strucC.latvec[0][2] = units.convert_nm_angstroms(float( col[4] ) )\n strucC.latvec[1][0] = units.convert_nm_angstroms(float( col[5] ) )\n strucC.latvec[1][2] = units.convert_nm_angstroms(float( col[6] ) )\n strucC.latvec[2][0] = units.convert_nm_angstroms(float( col[7] ) )\n strucC.latvec[2][1] = units.convert_nm_angstroms(float( col[8] ) )\n\n if( debug ):\n print \" Box size \",strucC.latvec[0][0],strucC.latvec[1][1],strucC.latvec[2][2],\" angstorms \"",
"def read_gro(strucC,in_gro,coordupdate=False,set_chaintoresidue=False,verbose=False,debug = False):\n # atomicpy functions\n\n \n\n try:\n with open(in_gro,'r') as F:\n Lines = F.readlines()\n F.close()\n except IOError:\n print \" Specified .gro file \",in_gro,\" does not exisit \"\n sys.exit(\"Invalid file \")\n\n # Check to see if a previous read has occured\n pt_update = False \n n_pt = int( Lines[1])\n if( len(strucC.ptclC) > 0 ):\n pt_update = True\n # Check of conistent number of atoms\n pt_cnt = len(strucC.ptclC)\n if( pt_cnt != n_pt):\n print \" Current structure has %d atoms and %s has %d\"%(pt_cnt,in_gro,n_pt)\n sys.exit(\" Inconsistent number of atoms \" )\n #\n # Read in .gro file\n #\n line_cnt = 0\n ptcl_cnt = 0 \n for line in Lines :\n line_cnt = line_cnt + 1\n if( line_cnt > 2 and len(line) >= 44 and ptcl_cnt < n_pt): # skip header\n # Set particle i \n ptcl_cnt += 1 \n #\n residue_i = int(line[0:5].strip())\n resname_i = line[5:10].strip() \n g = line[10:15].strip()\n particle_i = int(line[15:20].strip())\n x = units.convert_nm_angstroms( float( line[20:28] ))\n y = units.convert_nm_angstroms( float(line[28:36]))\n z = units.convert_nm_angstroms( float(line[36:44]))\n #r_i = numpy.array( [float(x)*10,float(y)*10,float(z)*10] )\n r_i = [x,y,z]\n if(debug):\n print \" particle \",ptcl_cnt,g,r_i\n if( pt_update ):\n pt_i = strucC.ptclC[ptcl_cnt]\n else:\n pt_i = Particle( )\n pt_i.position = r_i\n if( not coordupdate ):\n add_dict = pt_i.tagsDict\n add_dict[\"residue\"] = int(residue_i)\n add_dict[\"resname\"] = resname_i\n add_dict[\"label\"] = str(g)\n # set as defualts \n add_dict[\"fftype\"] = \"??\"\n add_dict[\"qgroup\"] = 1\n if( set_chaintoresidue ):\n add_dict[\"chain\"] = int(residue_i)\n else:\n add_dict[\"chain\"] = 1 \n pt_i.setTagsDict(add_dict)\n\n if( not pt_update ):\n strucC.ptclC.put(pt_i)\n # \n # Get lattice vector from last line\n #\n line = Lines[-1]\n col = line.split()\n n_vec = int( len(col))\n if( n_vec == 3 ):\n strucC.latvec[0][0] = units.convert_nm_angstroms(float( col[0] ) )\n strucC.latvec[1][1] = units.convert_nm_angstroms(float( col[1] ) )\n strucC.latvec[2][2] = units.convert_nm_angstroms(float( col[2] ) )\n if( n_vec == 9 ):\n strucC.latvec[0][0] = units.convert_nm_angstroms(float( col[0] ) )\n strucC.latvec[1][1] = units.convert_nm_angstroms(float( col[1] ) )\n strucC.latvec[2][2] = units.convert_nm_angstroms(float( col[2] ) )\n strucC.latvec[0][1] = units.convert_nm_angstroms(float( col[3] ) )\n strucC.latvec[0][2] = units.convert_nm_angstroms(float( col[4] ) )\n strucC.latvec[1][0] = units.convert_nm_angstroms(float( col[5] ) )\n strucC.latvec[1][2] = units.convert_nm_angstroms(float( col[6] ) )\n strucC.latvec[2][0] = units.convert_nm_angstroms(float( col[7] ) )\n strucC.latvec[2][1] = units.convert_nm_angstroms(float( col[8] ) )\n\n if( debug ):\n print \" Box size \",strucC.latvec[0][0],strucC.latvec[1][1],strucC.latvec[2][2],\" angstorms \"",
"def is_line(self):\n return False",
"def is_line(self):\n return True",
"def is_line(self):\n return True",
"def line_exists():\n global _current_line\n return _current_line is not None",
"async def line_to_obj(raw_line: bytearray, ref: Ref) -> Optional[ObjectRec]:\n # secondary_update = None\n if raw_line[0:1] == b\"0\":\n return None\n\n if raw_line[0:1] == b'-':\n rec = ref.obj_store[int(raw_line[1:], 16)]\n rec.alive = 0\n await mark_dead(rec.id)\n\n if 'Weapon' in rec.Type:\n impacted = await determine_contact(rec, type='impacted', ref=ref)\n if impacted:\n rec.impacted = impacted[0]\n rec.impacted_dist = impacted[1]\n sql = create_impact_stmt()\n vals = (ref.session_id, rec.parent, rec.impacted, rec.id,\n ref.time_offset, rec.impacted_dist)\n await DB.execute(sql, *vals)\n return rec\n\n comma = raw_line.find(b',')\n rec_id = int(raw_line[0:comma], 16)\n try:\n rec = ref.obj_store[rec_id]\n rec.update_last_seen(ref.time_offset)\n rec.updates += 1\n\n except KeyError:\n # Object not yet seen...create new record...\n rec = ObjectRec(id_=rec_id,\n session_id=ref.session_id,\n first_seen=ref.time_offset,\n last_seen=ref.time_offset)\n ref.obj_store[rec_id] = rec\n\n while True:\n last_comma = comma + 1\n comma = raw_line.find(b',', last_comma)\n if comma == -1:\n break\n\n chunk = raw_line[last_comma:comma]\n eq_loc = chunk.find(b\"=\")\n key = chunk[0:eq_loc]\n val = chunk[eq_loc + 1:]\n\n if key == b\"T\":\n i = 0\n pipe_pos_end = -1\n while i < COORD_KEY_LEN:\n pipe_pos_start = pipe_pos_end + 1\n pipe_pos_end = chunk[eq_loc + 1:].find(b'|', pipe_pos_start)\n if pipe_pos_start == -1:\n break\n\n coord = chunk[eq_loc + 1:][pipe_pos_start:pipe_pos_end]\n if coord != b'':\n c_key = COORD_KEYS[i]\n if c_key == \"lat\":\n rec.lat = float(coord) + ref.lat\n elif c_key == \"lon\":\n rec.lon = float(coord) + ref.lon\n else:\n rec.update_val(c_key, float(coord))\n i += 1\n else:\n rec.update_val(\n key.decode('UTF-8') if key != b'Group' else 'grp', val.decode('UTF-8'))\n\n rec.compute_velocity(ref.time_since_last)\n\n if rec.updates == 1 and rec.should_have_parent():\n parent_info = await determine_contact(rec, type='parent', ref=ref)\n if parent_info:\n rec.parent = parent_info[0]\n rec.parent_dist = parent_info[1]\n\n return rec",
"def run_unknown(self, line):\n pass",
"def is_line_busy(self) -> bool:",
"def _test_line(\n self, line, manager_data=None\n ): # pylint: disable=too-many-branches # pragma: no cover\n\n if PyFunceble.CONFIGURATION[\"db_type\"] == \"json\" and manager_data is not None:\n autocontinue = AutoContinue(self.file, parent_process=False)\n inactive_db = InactiveDB(self.file)\n mining = Mining(self.file)\n else:\n # We use the previously initiated autocontinue instance.\n autocontinue = self.autocontinue\n\n # We use the previously initiated inactive database instance.\n inactive_db = self.inactive_db\n\n # We use the previously initiated mining instance.\n mining = self.mining\n\n # We remove cariage from the given line.\n line = line.strip()\n\n if not line or line[0] == \"#\":\n # We line is a comment line.\n\n # We return None, there is nothing to test.\n return None\n\n if Regex(line, self.regex_ignore, escape=False, return_data=False).match():\n # The line match our list of elemenet\n # to ignore.\n\n # We return None, there is nothing to test.\n return None\n\n # We format the line, it's the last\n # rush before starting to filter and test.\n subject = self._format_line(line)\n\n if (\n not PyFunceble.CONFIGURATION[\"local\"]\n and PyFunceble.Check(subject).is_reserved_ipv4()\n ):\n # * We are not testing for local components.\n # and\n # * The subject is a reserved IPv4.\n\n # We return None, there is nothing to test.\n return None\n\n if PyFunceble.CONFIGURATION[\"filter\"]:\n # We have to filter.\n\n if Regex(\n subject, PyFunceble.CONFIGURATION[\"filter\"], return_data=False\n ).match():\n # The line match the given filter.\n\n # We get the status of the current line.\n status = self.__process_test(subject)\n else:\n # The line does not match the given filter.\n\n # We return None.\n return None\n else:\n # We do not have to filter.\n\n # We get the status of the current line.\n status = self.__process_test(subject)\n\n # We add the line into the auto continue database.\n autocontinue.add(subject, status)\n\n if status.lower() in self.list_of_up_statuses:\n # The status is in the list of UP status.\n\n # We mine if necessary.\n mining.mine(subject, self.file_type)\n\n if subject in inactive_db:\n # The subject is in the inactive database.\n\n # We generate the suspicous file.\n Generate(\n subject, \"file_domain\", PyFunceble.STATUS[\"official\"][\"up\"]\n ).analytic_file(\"suspicious\")\n\n # And we remove the current subject from\n # the inactive database.\n inactive_db.remove(subject)\n else:\n # The status is not in the list of UP status.\n\n # We add the current subject into the\n # inactive database.\n inactive_db.add(subject, status)\n\n if (\n self.complements_test_started\n and PyFunceble.CONFIGURATION[\"db_type\"] == \"json\"\n ):\n # We started the test of the complements.\n\n if \"complements\" in autocontinue.database:\n # The complement index is present.\n\n while subject in autocontinue.database[\"complements\"]:\n # We loop untill the line is not present into the\n # database.\n\n # We remove the currently tested element.\n autocontinue.database[\"complements\"].remove(subject)\n\n # We save the current state.\n autocontinue.save()\n\n if manager_data is None:\n # We are not in a multiprocess environment.\n\n # We update the counters\n autocontinue.update_counters()\n\n # We process the autosaving if it is necessary.\n self.autosave.process(test_completed=False)\n elif PyFunceble.CONFIGURATION[\"db_type\"] == \"json\":\n # We are in a multiprocess environment.\n\n # We save everything we initiated into the server process\n manager_data.append(\n {\n \"autocontinue\": autocontinue.database,\n \"inactive_db\": inactive_db.database,\n \"mining\": mining.database,\n }\n )\n\n # We return None.\n return None",
"def line_frame_number_unique(self, line):\n hash_code = get_parent_hash(line)\n if hash_code == \"p\":\n return True\n\n # this is a pythonic way of doing\n if self.find_list_for_new_line(line) is None:\n return True\n\n return False",
"def update(self, line):",
"def main(name, line1, line2, orbital_filename):\n #name = \"TERRA\"\n #line1 = \"1 25994U 99068A 16048.43680378 .00000258 00000-0 67198-4 0 9999\"\n #line2 = \"2 25994 98.1982 124.4247 0001352 105.3907 254.7441 14.57126067859938\"\n satellite = ephem.readtle(name, line1, line2)\n \n\n # Landsat 8\n #name = \"Landsat8\"\n #line1=\"1 39084U 13008A 16051.82349873 .00000188 00000-0 51829-4 0 9999\"\n #line2=\"2 39084 98.1988 123.2603 0001265 89.4360 270.6984 14.57110027160810\"\n #LD8 = ephem.readtle(name, line1, line2)\n \n\n sun = ephem.Sun()\n fov = np.radians(68.6)\n\n \"\"\"\n Make pandas dataframe to store swath information\n \"\"\"\n import pandas as pd\n data = {\"DateTime\": [],\"DOY\":[],\"Month\": [],\n \"orbit_id\":[], \"ground_lat\": [], \n \"ground_lon\": [], \"swath_width\": []}\n swaths = pd.DataFrame(data)\n swaths.set_index(keys=\"DateTime\")\n # generate shapefile\n\n orbit_id = 0\n # need to do splitted by hemisphere unfortunately..\n for orbit in make_an_orbit(satellite):\n #import pdb; pdb.set_trace()\n if len(orbit) > 1:\n \"\"\"\n So worth doing processing on orbit...\n\n \"\"\"\n sun = ephem.Sun()\n\n print(orbit[0].datetime)\n\n for overpass in orbit:\n overpass.only_daytime_overpasses(sun)\n overpass.derive_swath_width(fov)\n \"\"\"\n Create a tempoary dataframe for this orbit\n \"\"\"\n epoch = datetime.datetime(1970, 1, 1)\n #import pdb; pdb.set_trace()\n tmp_d = {\"DateTime\": [(o.datetime - epoch).total_seconds() for o in orbit],\n \"DOY\":[int(o.datetime.strftime('%j')) for o in orbit],\n \"Month\": [o.datetime.month for o in orbit],\n \"orbit_id\": orbit_id * np.ones(len(orbit)),\n \"ground_lat\": [o.lat for o in orbit],\n \"ground_lon\": [o.long for o in orbit],\n \"swath_width\": [o.swath_width for o in orbit]}\n tmp = pd.DataFrame(tmp_d)\n tmp.set_index(keys=\"DateTime\")\n #import pdb; pdb.set_trace()\n orbit_id +=1 \n \"\"\"\n Append to main dataframe\n \"\"\"\n swaths = swaths.append(tmp)\n #swaths.set_index(keys=\"DateTime\")\n\n \"\"\"\n Save the DataFrame to a file\n \"\"\"\n swaths = swaths.set_index(keys=\"DateTime\")\n #swaths.set_index(keys=\"DateTime\")\n #import pdb; pdb.set_trace()\n swaths.to_csv(orbital_filename, header=True)",
"def check_inst(obsid):\n line = 'operation=retrieve\\n'\n line = line + 'dataset=flight\\n'\n line = line + 'level=1\\n'\n line = line + 'detector=hrc\\n'\n line = line + 'filetype=evt1\\n'\n line = line + 'obsid=' + str(obsid) + '\\n'\n line = line + 'go\\n'\n\n with open('zline', 'w') as fo:\n fo.write(line)\n\n cmd = ' /proj/sot/ska/bin/arc5gl -user isobe -script zline >' + zspace \n os.system(cmd)\n\n data = mcf.read_data_file(zspace, remove=1)\n dfile = ''\n for ent in data:\n mc = re.search('hrcf', ent)\n if mc is not None:\n dfile = ent\n\n if dfile == '':\n return 'na'\n\n flist = pyfits.open(dfile)\n try:\n inst = flist[0].header['DETNAM']\n except:\n inst = flist[1].header['DETNAM']\n flist.close()\n\n mc = re.search('HRC-I', inst)\n if mc is not None:\n inst = 'i'\n else:\n inst = 's'\n\n mcf.rm_file(dfile)\n\n return inst",
"def check_record(idline,nclline,sepline,qualiline):\n return check_idline(idline) and check_sepline(sepline)",
"def forward(cargo):\n # If intersection is reached change state\n line_follower.run_forward()\n #print(\"Running forward\")\n\n # return\n new_state = \"follow\"\n txt = \"follow line..\"\n return (new_state, txt)",
"def event11515372():\n header(11515372, 1)\n skip_if_event_flag_off(4, EVENT.JareelDead)\n chr.disable(CHR.AbyssalPrinceJareel)\n chr.kill(CHR.AbyssalPrinceJareel, False)\n chr.disable_backread(CHR.AbyssalPrinceJareel)\n end()\n chr.disable_ai(CHR.AbyssalPrinceJareel)\n if_event_flag_on(1, 11515373)\n if_player_inside_region(1, REGION.JareelArena)\n if_condition_true(0, 1)\n chr.enable_ai(CHR.AbyssalPrinceJareel)\n boss.enable_boss_health_bar(CHR.AbyssalPrinceJareel, TEXT.AbyssalPrinceJareelBossName)",
"def __init__(self):\n #State constants\n self.STATE_WAITING = 0# default state\n self.STATE_PLAY = 1# lasts until end of row, then reverts to waiting\n self.STATE_MARK = 2# marks sub-trajectory\n self.STATE_STOP = 3# program cleanup before exit\n self.STATE_NEXT = 4# reverts to waiting after next sample row is loaded.\n self.STATE_PREV = 5# reverts to waiting after previous sample row is loaded.\n self.state = self.STATE_WAITING # initial program state\n\n #Color constants\n self.BLACK = (0,0,0)\n self.WHITE = (255,255,255)\n self.RED = (255,0,0)\n self.GRAY = (211,211,211)\n self.GREEN = (0,255,0)\n\n # MPO info\n self.mpo_color = 0 # red (0) if in normal trajectory section, white(1) if in sub-trajectory.\n self.first_index = True # indicates if the first index of sub-trajectory has been chosen.\n self.first = 0 # index of first sub-trajectory point\n self.second = 0 # index of second sub-trajectory point\n self.sub_index_list = [] # format is: [(number of data row, start index in row, end index in row),...]\n\n # Landmark info\n self.num_lndmrks = 0# number of landmarks in the file\n self.bg_img_path = '' # background image path\n self.PATTERN_INFO_ROW = 0 # index of information about pattern\n self.LANDMARK_VERTEX_ROW = 1 # index of first row containing landmark vertices\n self.BG_IMAGE_INDEX = 0 # index in row where background image path is stored.\n self.LNDMRK_NUM_INDEX = 3 # index in row where number of landmarks is specified\n\n # File handling\n self.file_name = '' # path of file name\n self.ifile = '' # file to read\n self.ofile = '' #file to write\n self.reader = '' # csv reader object\n self.writer = '' # csv writer object\n self.bg = False # flag if file has a bg image.\n\n # Row info\n self.row = '' # current row from file\n self.row_length = 0 # length of current row \n self.tot_rows = 0 # total amount of rows in file\n self.data_start_row = 0 # row number where path data starts\n self.data_row_num = 0 # number of current data row\n self.paths = [] # holds mpo path data\n self.row_index = 0 # holds current mpo position\n self.tot_data_rows = 0\n\n # Pygame info\n self.screen_width = 600 # default values\n self.screen_height = 600 # default values\n self.SCREEN_SIZE = self.screen_width, self.screen_height \n self.WIDTH_INDEX = 1 # index in pattern info row that specifies window width\n self.HEIGHT_INDEX = 2 # index in pattern info row that specifies window height",
"def is_new_snp(self,seqid,pos,allele):\r\n self.seqid = seqid\r\n self.source = \"gff3_manager\"\r\n self.type = \"SNP\"\r\n self.start = pos\r\n self.end = pos\r\n self.score = \".\"\r\n self.strand = \"+\"\r\n self.phase = \".\"\r\n self.attributes.id = seqid+\"_\"+str(pos)\r\n self.attributes.note = \"new sequence variant found after sequencing\"\r\n self.attributes.allele = allele\r\n self.attributes.active = True\r\n self.attributes.discovered = True\r\n self.attributes.validated = False",
"def get_line_type(line):\n\n line_type = None\n if line.find('Train') != -1:\n line_type = 'train'\n elif line.find('Test') != -1:\n line_type = 'test'\n return line_type",
"def test_create_experiment_race_condition_broken(self, monkeypatch):\n with OrionState(experiments=[config]) as cfg:\n parent = create_experiment(config[\"name\"])\n child = create_experiment(\n config[\"name\"],\n space={\"y\": \"uniform(0, 10)\"},\n branching={\"enable\": True},\n storage=cfg.storage_config,\n )\n\n def insert_race_condition(self, query):\n is_auto_version_query = query == {\n \"name\": config[\"name\"],\n \"refers.parent_id\": parent.id,\n }\n if is_auto_version_query:\n data = [child.configuration]\n # The query returns no other child, never!\n else:\n data = [parent.configuration]\n\n insert_race_condition.count += int(is_auto_version_query)\n\n return data\n\n insert_race_condition.count = 0\n\n monkeypatch.setattr(\n setup_storage().__class__, \"fetch_experiments\", insert_race_condition\n )\n\n with pytest.raises(RaceCondition) as exc:\n create_experiment(\n config[\"name\"],\n space={\"y\": \"uniform(0, 10)\"},\n branching={\"enable\": True},\n )\n\n assert insert_race_condition.count == 2\n assert \"There was a race condition during branching and new version\" in str(\n exc.value\n )",
"def __check_external_code__(self, line, name):\n line, _ = self.find_vars_in_str(line)\n words = line.split()\n self.E_str = f\"check_{name}_command\"\n\n corr_syn = f\"The correct syntax for running a bit of {name} code is:\\n\\n\"\n corr_syn += f\" {name} \" + \" {\\n\\n ...\\n\\n }\"\n\n # Check the braces are opened and closed properly\n if self.file_ltxt[self.line_num+1] != \"{\":\n self.print_error(f\"You must open a bracket for the {name} command\"+\"\\n\\n\"\n + corr_syn)\n\n # Get the filetxt after the command\n rest_filetxt = '\\n'.join(self.file_ltxt[self.line_num:])\n if gen_parse.get_bracket_close(rest_filetxt, \"{\", \"}\") == -1:\n self.print_error(f\"You must close a brace in the {name} command.\"+\"\\n\\n\"\n + corr_syn)\n\n # Find where the little script ends\n brack_num, new_lines = 1, []\n for end_line, new_line in enumerate(self.file_ltxt[self.line_num+2:]):\n if new_line == '{': brack_num += 1\n elif new_line == '}': brack_num -= 1\n\n if brack_num > 0: new_lines.append(new_line)\n elif brack_num == 0: break\n\n end_line += self.line_num + 2\n\n return end_line",
"def checkSuccess():\n try:\n relin = np.genfromtxt(\"{}reliability.in\".format(basedir), dtype=str)\n # Accurate argument order\n args = [str(\"\\'\"+probpath+\"\\'\"), str(\"\\'\"+obpath+\"\\'\"),\n str(\"\\'\"+outfile+\"\\'\"),\n fcsthr, str(\"\\'\"+variable+\"\\'\"),\n rthresh, sixhour, nbrhd, rbox_bounds[0],\n rbox_bounds[1], rbox_bounds[2], rbox_bounds[3]]\n success = True # Assume success initially\n # Ensure that each argument was placed into the proper line of the\n # reliability input file\n for ind, line in enumerate(relin):\n # If an argument doesn't line up with the rel in arg, set False\n print(str(args[ind]).replace('\\\\', ''), line)\n if (str(args[ind]).replace('\\\\', '') != line):\n success = False\n print(success)\n except:\n success = False\n return success",
"def checkSuccess():\n try:\n relin = np.genfromtxt(\"{}reliability.in\".format(basedir), dtype=str)\n # Accurate argument order\n args = [str(\"\\'\"+probpath+\"\\'\"), str(\"\\'\"+obpath+\"\\'\"),\n str(\"\\'\"+outfile+\"\\'\"),\n fcsthr, str(\"\\'\"+variable+\"\\'\"),\n rthresh, sixhour, nbrhd, rbox_bounds[0],\n rbox_bounds[1], rbox_bounds[2], rbox_bounds[3]]\n success = True # Assume success initially\n # Ensure that each argument was placed into the proper line of the\n # reliability input file\n for ind, line in enumerate(relin):\n # If an argument doesn't line up with the rel in arg, set False\n print(str(args[ind]).replace('\\\\', ''), line)\n if (str(args[ind]).replace('\\\\', '') != line):\n success = False\n print(success)\n except:\n success = False\n return success"
] | [
"0.5446975",
"0.5230643",
"0.5115488",
"0.50672674",
"0.50217557",
"0.48601362",
"0.4847687",
"0.4847687",
"0.48320773",
"0.48207554",
"0.48207554",
"0.48164436",
"0.4721421",
"0.46992648",
"0.4686111",
"0.46842042",
"0.46652928",
"0.4639667",
"0.4631652",
"0.45743117",
"0.45695606",
"0.4559103",
"0.45348722",
"0.45189258",
"0.4500967",
"0.448638",
"0.4477088",
"0.4471809",
"0.44675982",
"0.44675982"
] | 0.7775612 | 0 |
return a list with race names | def getRaceList(self):
l = []
for r in self.races:
l.append(r.name)
return l | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def race(self, instance):\r\n return '/'.join([raza.name for raza in instance.user.profile.race.all()])",
"def names(self) -> list[str]:",
"def getNames(self) -> List[unicode]:\n ...",
"def get_seq_names(self) -> List[str]:\n return [seq.Name.lower() for seq in self.Sequencers]",
"def donor_names():\n names = list()\n for name in donor_db:\n names = names + [name[0]]\n return names",
"def get_names(self):\r\n names = []\r\n for p in self.people:\r\n names.append(p.get_name())\r\n return names",
"def names(self) -> List:\n ...",
"def names(cls) -> List[str]:",
"def furanose_names(self):\n output = set()\n for item in self.monomers():\n if item in self.furanose_fac:\n output.add(self.furanose_fac[item][\"name\"])\n return list(output)",
"def Student_names(l:list)->list:\n result=[]\n for s in l:\n result.append(s.name)\n return result",
"def name(self):\n return [o.name for o in self.obs]",
"def teammates_player_names(self):\n return [p.name for p in self.teammates]",
"def monomer_names(self):\n output = set()\n for item in self.monomers():\n if item in self.pyranose_fac:\n output.add(self.pyranose_fac[item][\"name\"])\n return list(output)",
"def get_room_names(soup: bs4.BeautifulSoup) -> Iterable[str]:\n return set(x.string for x in soup.Lecture.find_all(\"RaumBez\"))",
"def get_names(r):\n names = []\n for name in r[\"results\"]:\n x = name[\"name\"]\n name.append(x)\n return name",
"def name_get(self):\n res = []\n for employee in self:\n name = employee.name\n name = ' '.join([name or '', employee.middle_name or '', employee.last_name or ''])\n res.append((employee.id, name))\n return res",
"def _list_of_availability_strings():\n names = [availability.name for availability in Availability]\n return names",
"def get_nice_names(self) -> List[str]:\n result = []\n for elements in self._get_results_list():\n result.append(elements[1])\n return result",
"def names(self) -> Sequence[str]:\n return pulumi.get(self, \"names\")",
"def names(self) -> Sequence[str]:\n return pulumi.get(self, \"names\")",
"def pyranose_names(self):\n output = set()\n for item in self.pyranoses():\n if item in self.pyranose_fac:\n output.add(self.pyranose_fac[item][\"name\"])\n return list(output)",
"def nameList(self):\r\n return [self.name.lower(), self.code] + self._otherNames",
"def return_names(self):\n return self.__name_list",
"def get_rnames(self):\n for row in self._get_references_node():\n yield row['name']",
"def get_all_names(self):\r\n return [person.name for person in self.__person_repository.elements]",
"def getResiduesByName(self, resn):\n\n\t\treslist = []\n\t\tfor chn in self.chain:\n\t\t\tfor res in chn.residue:\n\t\t\t\tif res.name == resn:\n\t\t\t\t\treslist.append(res)\n\n\t\treturn reslist",
"def namelist(self):\n return []",
"def currentAntennaNames(carmaOnly=False) :\n a=s.getAntennaAssignments()\n namelist = []\n for i in a:\n cname = i.carmaAntennaName\n tname = i.typedAntennaName\n if (carmaOnly) :\n names = i.carmaAntennaName\n else :\n names = \"%s(%s)\" %(cname,tname)\n namelist.append(names)\n return namelist",
"def typedAntennaNames() :\n a=s.getAntennaAssignments()\n namelist = []\n for i in a:\n namelist.append( i.typedAntennaName )\n return namelist",
"def donor_names(self):\n return [donor.name for donor in self.donors]"
] | [
"0.72669965",
"0.65117115",
"0.63962364",
"0.6379379",
"0.6349073",
"0.6317211",
"0.61734116",
"0.615667",
"0.6151759",
"0.6090637",
"0.6047119",
"0.6046609",
"0.60214084",
"0.5969723",
"0.59672964",
"0.5934459",
"0.5914037",
"0.58994067",
"0.587215",
"0.587215",
"0.58706605",
"0.5866512",
"0.5852108",
"0.5838008",
"0.5826312",
"0.5825605",
"0.5825467",
"0.5824818",
"0.5806762",
"0.5782141"
] | 0.7996814 | 0 |
get a list to put in the interface irace the integer index of races list | def getCheckpointList(self, irace):
r = []
if irace >= len(self.races):
return r # new race
# msg = "Toolkit Error: trying to get a checkpoint from race that doesn't exists (bad index %d, actual length %d " % (irace, len(self.races))
# raise showedError(msg)
therace = self.races[irace]
if len(therace.points) > 0:
for i in range(len(therace.points)):
line = " %.3d - %s " % (i, therace.points[i]['gate'])
r.append(line)
self.raceIndex = irace
therace.showCheckpoints(True)
return r | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getRaceList(self):\n\t\tl = []\n\t\tfor r in self.races:\n\t\t\tl.append(r.name)\n\t\treturn l",
"def enrollment_list(loc:List[CrimeStatistics])->List[int]:\n # return [] #stub\n # template from List[CrimeStatistics]\n # enrollments is all enrollment values seen so far\n enrollments = [] # type: List[int]\n \n for cs in loc:\n enrollments.append(cs.enrollment)\n return enrollments",
"def construct_seq(ind_i):\n track_i = track_list[ind_i]\n select_indices_i = track_i.sample_rois()\n seq_roi_list = [track_i.roi_list[i] for i in select_indices_i]\n return seq_roi_list",
"def get_xc_races_by_year(self, year):\n try:\n results = self.__get_results('CALL GetXcRacesByYear({0})'.format(year))\n return [XcRace(result).get_json() for result in results] if results else []\n except Exception as e:\n logging.exception(e)\n raise",
"def reader(list, index_list):\r\n\tnewlist = []\r\n\tfor i in index_list:\r\n\t\tnewlist.append(list[i])\r\n\treturn newlist",
"def extrage_litere(cuvant):\n icao_litere = []\n for litera in cuvant:\n icao_litere.append(ICAO[litera])\n return icao_litere",
"def get_species_list(index: int, list_species: List[str]) -> List[str]:\n species = []\n for sp in list_species:\n species.extend([sp] * index)\n return species",
"def test_get_races(self):\n self.populate_database()\n response = self.client.get(\"/api/elections/1/races\",\n headers=[(\"Accept\", \"application/json\")])\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.mimetype, \"application/json\")\n\n races = json.loads(response.data.decode(\"ascii\"))\n self.assertEqual(len(races), 1)\n self.assertEqual(races[0][\"id\"], 1)",
"def get_playable_race_index(self, region, namespace, **filters):\n filters['namespace'] = namespace\n return self.get_resource('data/wow/playable-race/index', region, **filters)",
"def index():\n return list()",
"def get_match_list(summoner, begin_index=-1, begin_time=0, end_time=0, champions=None, ranked_queues=None, seasons=None):\n if(ranked_queues and isinstance(ranked_queues, list)):\n for queue in ranked_queues:\n if queue not in cassiopeia.type.core.common.ranked_queues:\n raise ValueError(\"{queue} is not a ranked queue\".format(queue=queue))\n elif(ranked_queues):\n if ranked_queues not in cassiopeia.type.core.common.ranked_queues:\n raise ValueError(\"{queue} is not a ranked queue\".format(queue=ranked_queues))\n\n # Convert core types to API-ready types\n if(isinstance(begin_time, datetime.datetime)):\n epoch = datetime.datetime.utcfromtimestamp(0)\n delta = begin_time - epoch\n begin_time = int(delta.total_seconds() * 1000)\n if(isinstance(end_time, datetime.datetime)):\n epoch = datetime.datetime.utcfromtimestamp(0)\n delta = end_time - epoch\n end_time = int(delta.total_seconds() * 1000)\n\n champion_ids = [champion.id for champion in champions] if isinstance(champions, list) else champions.id if champions else None\n queues = [queue.value for queue in ranked_queues] if isinstance(ranked_queues, list) else ranked_queues.value if ranked_queues else None\n seasons = [season.value for season in seasons] if isinstance(seasons, list) else seasons.value if seasons else None\n\n history = cassiopeia.dto.matchlistapi.get_match_list(summoner.id, begin_index, begin_time, end_time, champion_ids, queues, seasons)\n\n # Load required data if loading policy is eager\n if(cassiopeia.core.requests.load_policy is cassiopeia.type.core.common.LoadPolicy.eager):\n cassiopeia.riotapi.get_champions() if history.champion_ids else None\n\n return [cassiopeia.type.core.matchlist.MatchReference(ref) for ref in history.matches]",
"def list():",
"def list():",
"def build_olympic_id_list(self):\n\n # Get the URL where all the games are and parse it.\n all_games_url = u'http://www.olympic.org/olympic-games'\n request = requests.get(all_games_url)\n soup = BeautifulSoup(request.content)\n\n # Find the list of links.\n link_list = soup.select('.iocRiaContent')[0].select('li span a')\n\n # Loop through that list.\n for link in link_list:\n\n # For each Olympic game, get the URL of the detail page.\n olympic_url = link.attrs['href']\n\n # Request it and parse it.\n request = requests.get(olympic_url)\n soup = BeautifulSoup(request.content)\n\n # Grab the olympic_css DOM object.\n # Parse out the ID from the href.\n olympic_id = soup.select(self.olympic_css)[0].attrs['href']\n olympic_id = olympic_id.split('games=')[1].split('&')[0]\n\n # Try to append this to the olympic_ids list.\n # Fail if it's not an integer for some reason.\n try:\n olympic_id = int(olympic_id)\n self.olympic_ids.append(olympic_id)\n except ValueError:\n print u'%s is not an integer.' % olympic_id",
"def roster(self) -> list:\n return [student\n for grade in sorted(self.students)\n for student in self.students[grade]]",
"def GetSpectraFromIndexList(all_wl,all_spectra,idx_list):\n NBSPEC=len(all_spectra)\n \n \n all_wl_sel=[]\n all_spectra_sel=[]\n \n for idx in np.arange(0,NBSPEC):\n if idx in idx_list:\n all_wl_sel.append(all_wl[idx])\n all_spectra_sel.append(all_spectra[idx])\n return all_wl_sel,all_spectra_sel",
"def create_start_list(conn, race_date, file_name=\"\"):\n start_lists_dir = Path(\"startlists\")\n start_lists_dir.mkdir(exist_ok=True)\n webscorer_headers = ('Bib', 'First name', 'Last name', 'Team name', 'Age', 'Gender', 'Distance')\n startlist = conn.execute(\"\"\"SELECT registration_id, first_name, last_name, club, dob, registrations.gender, race_genders.gender\n FROM registrations\n LEFT JOIN race_genders USING(registration_id)\n \"\"\").fetchall()\n \n if not file_name.strip():\n file_name = \"startlist{}.csv\".format(race_date.strftime('%Y%m%d'))\n elif not file_name.strip().endswith(\".csv\"):\n file_name = \"{}.csv\".format(file_name)\n outfile = start_lists_dir / file_name\n\n if len(startlist) > 0:\n for i,entry in enumerate(startlist):\n startlist[i] = list(entry)\n\n # Alter non-binary genders\n gender = startlist[i].pop()\n if gender != None:\n if startlist[i][5].lower() == \"non-binary\":\n startlist[i][5] = gender\n else:\n print(\"WARN: Conflicting genders for registration_id={}\".format(startlist[i][0]))\n print(\"\\t Please check that entries in race_genders only match registrations with 'Non-binary' genders.\")\n elif startlist[i][5].lower() == \"non-binary\":\n print(\"WARN: No race gender specified for registration_id={}\".format(startlist[i][0]))\n print(\"\\t Please add an entry to race_genders and recreate this startlist\")\n \n # Replace DoB with age at time of race\n startlist[i][4] = years_between(startlist[i][4], race_date)\n \n # Race distance\n startlist[i].append('5km')\n \n # We don't know what distance each runner will undertake ahead of the\n # event. We just need to include each distance at least once.\n startlist[0][-1] = '10km'\n else:\n print(\"WARN: the database was empty when creating a start list\")\n\n with open(outfile, 'w', newline='') as csvfile:\n writer = csv.writer(csvfile)\n writer.writerow(webscorer_headers)\n writer.writerows(startlist)\n print(\"New startlist located at: {}\".format(outfile))",
"def race(self, instance):\r\n return '/'.join([raza.name for raza in instance.user.profile.race.all()])",
"def get_chakra(self, chakra_list):\n for c in chakra_list:\n if c.raga == self.name:\n self.chakra = c.name\n self.chakra_num = c.num\n break",
"def appendRobot(self, cnx):\n nbJoueur= len(self.lstRob)\n #name = 'Joueur ' + str(nbJoueur+1)\n name = str(nbJoueur+1)\n newRob= Robot( cnx, name)\n pos = random.choice(self._couloirs) # Trouve une position au hasard dans les cases de type couloirs\n self._couloirs.remove(pos) # Enleve cette case des cases possibles pour les robots\n newRob.pos= pos\n robInList = self.lstRob.append(newRob) # Ajoute le robot dans le labyrinthe\n robInList = self.lstRob[len(self.lstRob)-1] #Reference sur le robot créé\n return robInList # A toute fin utile reference du robot créé dans la liste",
"def set_index_list(index_a):\n indexes = list(range(len(game_data.data)))\n if index_a != -1:\n indexes.pop(index_a)\n return indexes",
"def get_from_list(self,list_,index):\r\n\r\n\r\n try:\r\n return list_[self._index_to_int(index)]\r\n except IndexError:\r\n self._index_error(list_,index)",
"def cargar_atril(self,lista,bolsa):\n self.atril = lista\n self.bolsa = bolsa",
"def __getitem__(self, index):\n return self._games[index]",
"def getLigandResIds(ligchemid:str, struct: Structure)->List[Residue]:\n \"\"\"*ligchemids are of type https://www.rcsb.org/ligand/IDS\"\"\"\n ligandResidues: List[Residue] = list(filter(lambda x: x.get_resname() == ligchemid, list( struct.get_residues() )))\n return ligandResidues",
"def get_ROIs(self, base):\n locs3d = self.locs3d\n #print loc3d\n base_locs = locs3d[base]\n ROI_dic = dict((i, [Id]) for i,Id in enumerate(base))\n for i, loc in enumerate(locs3d):\n if i not in base:\n dist = np.sqrt(np.sum((base_locs - loc)**2, 1))\n min_i = np.argmin(dist)\n ROI_dic[min_i].append(i)\n out = ROI_dic.values()\n return out",
"def get_items_to_index(self):\n\t\treturn []",
"def grAList() -> list:\n return [2, 5, 6, 9, 10, 11, 13, 17, 18, 30]",
"def __getitem__(self, index):\n return self.chromosome_list[index]",
"def get_canidate_list(self, *name, **params):\n\n if \"row\" in params:\n row = params[\"row\"]\n positions = [ (row, col) for col in range(9) ]\n\n elif \"col\" in params:\n col = params[\"col\"]\n positions = [ (row, col) for row in range(9) ]\n\n elif \"cell\" in params:\n row,col = self.calc_group_corner(params[\"cell\"])\n positions = [ (i, j) for i in range(row, row + 3) for j in range(col, col + 3) ]\n\n else:\n positions = [ (row, col) for row in range(9) for col in range(9) ]\n\n return dict([ (position, self.get_canidates(cell=position)) for position in positions ])"
] | [
"0.6523289",
"0.54145765",
"0.5309539",
"0.5160551",
"0.51471996",
"0.51424783",
"0.512308",
"0.5070747",
"0.50332165",
"0.5015577",
"0.4969441",
"0.49606994",
"0.49606994",
"0.4927924",
"0.4920787",
"0.49111506",
"0.49075723",
"0.49041528",
"0.48502666",
"0.4835571",
"0.48252138",
"0.48235422",
"0.48212218",
"0.47988248",
"0.47746116",
"0.47633475",
"0.47631806",
"0.4744663",
"0.47441566",
"0.47352424"
] | 0.5926581 | 1 |
show or hide checkpoints of this race | def showCheckpoints(self, value=True):
try:
self.owner.showingcheckpoints = True
for i in range(len(self.points)):
if self.points[i]['entry'] is None and value:
self.points[i]['entry'] = self.owner._ogreWin.addGeneralObject(self.points[i]['gate'] + '.odef',
self.points[i]['pos'].asTuple,
self.points[i]['rot'].asTuple)
#set up callback, just a pointer to the method
elif not self.points[i]['entry'] is None:
self.points[i]['entry'].visible = value
finally:
self.owner.showingcheckpoints = False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def view():\n\n checkpoint_ini = parse_checkpoint_ini()\n run_start = checkpoint_ini[\"refget_ena_checkpoint\"][\"run_start\"]\n msg = \"the ena-refget-scheduler is currently configured to run, starting \" \\\n + \"from \" + run_start\n print(msg)",
"def show(self) -> None:\n thr_is_alive = self._spin_thread and self._spin_thread.is_alive()\n if self._hide_spin is None:\n raise RuntimeError(\"hide_spin is None\")\n\n if thr_is_alive and self._hide_spin.is_set():\n with self._stdout_lock:\n # clear the hidden spinner flag\n self._hide_spin.clear()\n # clear the current line so the spinner is not appended to it\n self._clear_line()",
"def checkpoint():",
"def hide(self):\n \n self.save_scores()\n super().hide()",
"def visible(self, show):",
"def show( self ):\n if self.visible == 1:#ohnheiser hack and time() - self.lastMotion > self.delay:\n self.visible = 2\n if self.visible == 2:\n self.deiconify()",
"def show(self):\n if not self.init_run and self.flow_auto_update:\n self.run_all()\n self.init_run = True\n self.flow_view.show()",
"def show_task2(self):\n self._show_task(self.controller.EXPECTED)",
"def hide(self) -> None:\n thr_is_alive = self._spin_thread and self._spin_thread.is_alive()\n if self._hide_spin is None:\n raise RuntimeError(\"hide_spin is None\")\n\n if thr_is_alive and not self._hide_spin.is_set():\n with self._stdout_lock:\n # set the hidden spinner flag\n self._hide_spin.set()\n self._clear_line()\n\n # flush the stdout buffer so the current line\n # can be rewritten to\n sys.stdout.flush()",
"def toggle_viz(self):\n\n return self.hide() if self.viz else self.show()",
"def displaysuspicions(self):\n raise NotImplementedError()",
"def show(self):\n if self.visible == 1 and time() - self.lastMotion > self.delay:\n self.visible = 2\n if self.visible == 2:\n self.deiconify()",
"def should_show():",
"def show_task1(self):\n self._show_task(self.controller.CURRENT)",
"def show_box(self):\n self.permanent_show = not self.permanent_show",
"def display_decision(self):\n logger.debug(u\"{} Decision\".format(self.joueur))\n debut = datetime.now()\n self.currentperiod.EXPERIENCE_NOM_COURT_decision = yield(self.remote.callRemote(\n \"display_decision\"))\n self.currentperiod.EXPERIENCE_NOM_COURT_decisiontime = (datetime.now() - debut).seconds\n self.joueur.info(u\"{}\".format(self.currentperiod.EXPERIENCE_NOM_COURT_decision))\n self.joueur.remove_waitmode()",
"def show(self):\r\n if self.visible == 1 and time() - self.lastMotion > self.delay:\r\n self.visible = 2\r\n if self.visible == 2:\r\n self.deiconify()",
"def show(self):\r\n if self.visible == 1 and time() - self.lastMotion > self.delay:\r\n self.visible = 2\r\n if self.visible == 2:\r\n self.deiconify()",
"def getCheckpointList(self, irace):\n\t\tr = []\n\t\tif irace >= len(self.races):\n\t\t\treturn r # new race\n#\t\t\tmsg = \"Toolkit Error: trying to get a checkpoint from race that doesn't exists (bad index %d, actual length %d \" % (irace, len(self.races))\n#\t\t\traise showedError(msg)\n\t\ttherace = self.races[irace]\n\t\tif len(therace.points) > 0:\n\t\t\tfor i in range(len(therace.points)):\n\t\t\t\tline = \" %.3d - %s \" % (i, therace.points[i]['gate'])\n\t\t\t\tr.append(line)\n\t\t\tself.raceIndex = irace\n\t\t\ttherace.showCheckpoints(True)\n\t\treturn r",
"def show_state(self):\n print \"I don't know how to show_state.\"",
"def _showhide_logs(self, widget):\n\n\t\tif self.main.view_logs.get_active():\n\t\t\tself.logger.notebook.show()\n\n\t\telse:\n\t\t\tself.logger.notebook.hide()",
"def toggled(self, b):\n self.group.setVisible(b)\n\n for line in (self.rLine, self.gLine, self.bLine):\n line.setVisible(b)\n\n self.parent.image.timeLine.setVisible(not b)",
"def hidden():\n return False",
"def show_status():\n\n pass",
"def show_state(self):\n print(\"I don't know how to show_state.\")",
"def display_data(self, condition):\r\n if condition:\r\n self.data_pause = False\r\n else:\r\n self.data_pause = True",
"def showInvisibles(self: Self, event: Event = None) -> None:\n c = self\n showInvisiblesHelper(c, True)",
"def is_hidden():\n return False",
"def is_hidden():\n return False",
"def event_beforehide(self):\n logging.warning('beforehide undefined')"
] | [
"0.5596374",
"0.55433595",
"0.5335721",
"0.53062624",
"0.52888876",
"0.52786565",
"0.5234352",
"0.5203127",
"0.5177826",
"0.51289123",
"0.51103914",
"0.5096311",
"0.5087622",
"0.5080283",
"0.5055638",
"0.50398296",
"0.5034587",
"0.5034587",
"0.5012699",
"0.5010719",
"0.49965394",
"0.49809256",
"0.49754655",
"0.49704355",
"0.49613523",
"0.49409997",
"0.49404743",
"0.49385026",
"0.49385026",
"0.49223027"
] | 0.61171097 | 0 |
Subclass this method in a platform module to configure the DMD. This method should return a reference to the DMD's platform interface method will will receive the frame data. | def configure_dmd(self):
raise NotImplementedError | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def configure_dmd(self) -> \"DmdPlatformInterface\":\n raise NotImplementedError",
"def configure_rgb_dmd(self, name: str) -> \"DmdPlatformInterface\":\n raise NotImplementedError",
"def __init__(self, dataFrame):\n self.dataFrame = dataFrame",
"def __init__(self):\n super().__init__()\n self.dmdParams = {} # dmd settings container\n self.printTag = 'DMD' # print tag\n self._dynamicHandling = True # This ROM is able to manage the time-series on its own. No need for special treatment outside\n self.pivotParameterID = None # pivot parameter\n # variables filled up in the training stages\n self._amplitudes = {} # {'target1': vector of amplitudes,'target2':vector of amplitudes, etc.}\n self._eigs = {} # {'target1': vector of eigenvalues,'target2':vector of eigenvalues, etc.}\n self._modes = {} # {'target1': matrix of dynamic modes,'target2':matrix of dynamic modes, etc.}\n self.__Atilde = {} # {'target1': matrix of lowrank operator from the SVD,'target2':matrix of lowrank operator from the SVD, etc.}\n self.pivotValues = None # pivot values (e.g. time)\n self.KDTreeFinder = None # kdtree weighting model\n self.timeScales = {} # time-scales (training and dmd). {'training' and 'dmd':{t0:float,'dt':float,'intervals':int}}\n self.featureVals = None # feature values",
"def expose_data(self):\r\n return _ExposedFarmData(self._platforms, self._awaiting, self._channels)",
"def configure(self):\n\n self.platform.configure()",
"def __init__(self):\n self.hmd = None\n self.vr_render_models = None\n self.render_width = 0\n self.render_height = 0",
"def __init__(self):\r\n super(DataTarget, self).__init__()",
"def configure_rgb_dmd(self):\n raise NotImplementedError",
"def from_hdf(self, hdf=None, group_name=None):\n super(ParameterMaster, self).from_hdf(hdf=hdf, group_name=group_name)\n with self.project_hdf5.open(\"input\") as hdf5_input:\n self.iteration_frame = pandas.DataFrame(hdf5_input[\"dataframe\"])",
"def __call__(self):\n\n # create dataframes of relevant sections from the INP\n for ix, sect in enumerate(self.config['inp_sections']):\n if ix == 0:\n df = create_dataframeINP(self.inp.path, sect, comment_cols=False)\n else:\n df_other = create_dataframeINP(self.inp.path, sect, comment_cols=False)\n df = df.join(df_other)\n\n if self.rpt:\n for rpt_sect in self.config['rpt_sections']:\n df = df.join(create_dataframeRPT(self.rpt.path, rpt_sect))\n\n # add conduit coordinates\n xys = df.apply(lambda r: get_link_coords(r, self.inp.coordinates, self.inp.vertices), axis=1)\n df = df.assign(coords=xys.map(lambda x: x[0]))\n\n # make inlet/outlet node IDs string type\n df.InletNode = df.InletNode.astype(str)\n df.OutletNode = df.OutletNode.astype(str)\n\n return df",
"def _ensure_dframe(self):\n if self.dframe is None:\n self.dframe = self.dataset.dframe()",
"def initDataFrame(self,referenceID, content):\r\n # Strip any colons in the mac address\r\n self.referenceID = referenceID\r\n\r\n # Set the frame content\r\n self.content = str(content)\r\n\r\n # Set the content length\r\n self.contentLength = len(self.content)\r\n\r\n # Set the correct frame message type\r\n self.mesgType = MULTIPLEXER_DATA_FORWARD",
"def setDataFrame(self, dataFrame, copyDataFrame=False):\n if not isinstance(dataFrame, pandas.core.frame.DataFrame):\n raise TypeError(\"not of type pandas.core.frame.DataFrame\")\n\n self.layoutAboutToBeChanged.emit()\n if copyDataFrame:\n self._dataFrame = dataFrame.copy()\n else:\n self._dataFrame = dataFrame\n\n self._columnDtypeModel = ColumnDtypeModel(dataFrame)\n self._columnDtypeModel.dtypeChanged.connect(self.propagateDtypeChanges)\n self._columnDtypeModel.changeFailed.connect(\n lambda columnName, index, dtype: self.changingDtypeFailed.emit(columnName, index, dtype)\n )\n self.layoutChanged.emit()\n self.dataChanged.emit()\n self.dataFrameChanged.emit()",
"def __init__(self, epics_only=False, *args, **kwargs):\n self._kwargs = {}\n self._detectors = {}\n self._det_list = [] \n self._det_aliases = {}\n self._psplots = {}\n self._event_functions = {}\n self._source_attrs = []\n self._evt_time_last = (0,0)\n self.ievent = 0\n self._reloadOnLoadRun = False\n self._reloadOnNextEvent = False\n self.psana_cfg_dict = {}\n self._default_module_path = ''\n\n# self._user_attrs = {}\n# self._histograms = {}\n \n for key in kwargs:\n self._kwargs[key] = kwargs[key] \n if key in self._exp_defaults:\n setattr(self,key,kwargs[key])\n print 'setting ',key, kwargs[key]\n\n self._device_config = read_device_config(**kwargs)\n self._device_sets = self._device_config['device_sets'] \n self._device_types = self._device_config['device_types'] \n\n for det in self._device_sets:\n if 'det' in self._device_sets[det]:\n if ('detName' in self._device_sets[det]['det'] or\n 'typeName' in self._device_sets[det]['det']):\n self._det_list.append(det)\n if 'det_key' in self._device_sets[det]['det']:\n det_key = self._device_sets[det]['det']['det_key']\n self._det_aliases[det_key] = det \n else:\n pass\n \n# if 'pvs' in self._device_sets[det]:\n# for attr in self._device_sets[det]['pvs']:\n# pvbase = self._device_sets[det]['pvs'][attr]['base']\n# alias = '_'.join([det,attr])\n# self.add_pv(pvbase, alias)\n\n self.set_exp_defaults(**kwargs)\n if not self._kwargs.get('noload'):\n self.data_source = self.get_data_source(**kwargs)\n print 'Data Source = ', self.data_source\n else:\n self.data_source = None\n\n if not self.data_source:\n self._kwargs['noload'] = True\n else:\n kwargs['run'] = self.run\n\n# if self._kwargs.get('noload') or self.live:\n# if self._kwargs.get('epics_live'):\n# self.set_kwargs(ami=True)\n \n if self._kwargs.get('ami'):\n print 'loading ami'\n self.load_ami(**kwargs)\n\n if not self._kwargs.get('noload'):\n print 'loading run'\n self.load_run(*args, **kwargs)\n self._no_epicsStore = False\n \n print 'Instrument = ', self.instrument\n\n if self._kwargs.get('epics_live'): # and self._kwargs.get('epics_file'):\n print 'loading epics'\n self.load_epicsLive(**kwargs)\n\n if self.ds and self.live:\n self.next_event()\n \n if self.ds and self._reloadOnNextEvent:\n self.next_event()\n \n if not self.ds:\n self._no_epicsStore = True\n self._no_evtData = True\n for det in self._device_sets:\n if 'pvs' in self._device_sets[det]:\n print 'Adding epics ',det\n self.add_detector(det)",
"def from_platform(self):\n project_name = self.platform_params['project_name']\n project_id = self.platform_params['project_id']\n dataset_name = self.platform_params['dataset_name']\n dataset_id = self.platform_params['dataset_id']\n item_filepath = self.platform_params['item_filepath']\n item_id = self.platform_params['item_id']\n\n # load remote item\n if dataset_id is None:\n self.project = dl.projects.get(project_name=project_name, project_id=project_id)\n if self.project is None:\n raise ValueError('Project doesnt exists. name: %s, id: %s' % (project_name, project_id))\n self.dataset = self.project.datasets.get(dataset_name=dataset_name, dataset_id=dataset_id)\n else:\n self.dataset = dl.datasets.get(dataset_id=dataset_id)\n if self.dataset is None:\n raise ValueError('Dataset doesnt exists. name: %s, id: %s' % (dataset_name, dataset_id))\n self.item = self.dataset.items.get(filepath=item_filepath, item_id=item_id)\n if self.item is None:\n raise ValueError('Item doesnt exists. name: %s, id: %s' % (item_filepath, item_id))\n self.labels = {label.tag: label.rgb for label in self.dataset.labels}\n _, ext = os.path.splitext(self.item.filename[1:])\n video_filename = os.path.join(self.dataset.__get_local_path__(), self.item.filename[1:])\n if not os.path.isdir(os.path.dirname(video_filename)):\n os.makedirs(os.path.dirname(video_filename))\n if not os.path.isfile(video_filename):\n self.item.download(local_path=os.path.dirname(video_filename), to_items_folder=False)\n self.video_source = video_filename\n self.video_annotations = self.item.annotations.list()",
"def __init__(self, dataframe):\n self._dataframe = dataframe \n self._data_grouped_by_manufacturer = self._group_by_manufacturer()\n self._data_agg_by_mean_value = self._agg_by_mean()\n self._formatted_data = self._format_data()",
"def setup(self):\r\n ScriptedLoadableModuleWidget.setup(self)\r\n\r\n # Load widget from .ui file (created by Qt Designer).\r\n # Additional widgets can be instantiated manually and added to self.layout.\r\n uiWidget = slicer.util.loadUI(self.resourcePath('UI/RecordHerniaData.ui'))\r\n self.layout.addWidget(uiWidget)\r\n self.ui = slicer.util.childWidgetVariables(uiWidget)\r\n\r\n # Set scene in MRML widgets. Make sure that in Qt designer the top-level qMRMLWidget's\r\n # \"mrmlSceneChanged(vtkMRMLScene*)\" signal in is connected to each MRML widget's.\r\n # \"setMRMLScene(vtkMRMLScene*)\" slot.\r\n uiWidget.setMRMLScene(slicer.mrmlScene)\r\n\r\n # Create logic class. Logic implements all computations that should be possible to run\r\n # in batch mode, without a graphical user interface.\r\n self.logic = TMSRecordDataModuleLogic()\r\n self.recordingStarted = False\r\n self.camerasStarted = False\r\n self.moduleDir = os.path.dirname(slicer.modules.tmsrecorddatamodule.path)\r\n self.logic.setupScene()\r\n\r\n # Buttons\r\n self.ui.StartStopRecordingButton.connect('clicked(bool)', self.onStartStopRecordingClicked)\r\n self.ui.startCamerasButton.connect('clicked(bool)',self.onStartStopCamerasClicked)",
"def __init__(self, df):\n self.df = df",
"def __init__(self, *args, **kwargs):\n ignore_version = kwargs.pop('ignore_version', False)\n\n super(Hdf5, self).__init__(*args, **kwargs)\n\n # If True, always translate __getitem__ requests according to the\n # schema, even if __getitem__ requests a dataset that exists\n self.always_translate = False\n\n self._version = self.attrs.get('version')\n if isinstance(self._version, bytes):\n self._version = self._version.decode()\n self._timesteps = {}\n\n # Connect the schema map to this object\n if self._version in SCHEMA:\n self.schema = SCHEMA[self._version]\n elif self._version is None:\n self.schema = {}\n elif not ignore_version:\n raise KeyError(\"Unknown schema version %s\" % self._version)\n\n # Connect the schema dataset providers to this object\n if self._version in SCHEMA_DATASET_PROVIDERS:\n self.dataset_providers = SCHEMA_DATASET_PROVIDERS[self._version]\n else:\n self.dataset_providers = {}",
"def __init__(self, cfg):\n super(DKInfluxDB, self).__init__(cfg, 'influxdb')",
"def setup(self, ds):\n pass",
"def __init__(self, platform_name, sensor_key, data_service):\n super().__init__(platform_name, sensor_key, data_service)\n\n self._attributes = {}",
"def __init__(self, parent): \n \n self.parent = parent\n \n self.custom_channel_name = _qstring(parent.rhd)\n self.native_channel_name = _qstring(parent.rhd)\n self.native_order = np.int16(struct.unpack('h', parent.rhd.read(2)))[0]\n self.custom_order = np.int16(struct.unpack('h', parent.rhd.read(2)))[0]\n self.signal_type = np.int16(struct.unpack('h', parent.rhd.read(2)))[0]\n self.channel_enabled = np.int16(struct.unpack('h', parent.rhd.read(2)))[0]\n self.chip_channel = np.int16(struct.unpack('h', parent.rhd.read(2)))[0]\n self.board_stream = np.int16(struct.unpack('h', parent.rhd.read(2)))[0]\n self.spike_scope_voltage_trigger_mode= np.int16(struct.unpack('h', parent.rhd.read(2)))[0]\n self.spike_scope_voltage_threshold = np.int16(struct.unpack('h', parent.rhd.read(2)))[0]\n self.spike_scope_digital_trigger_channel = np.int16(struct.unpack('h', parent.rhd.read(2)))[0]\n self.spike_scope_digital_edge_polarity = np.int16(struct.unpack('h', parent.rhd.read(2)))[0]\n self.electrode_impedance_magnitude = np.float32(struct.unpack('f', parent.rhd.read(4)))[0]\n self.electrode_impedance_phase = np.float32(struct.unpack('f', parent.rhd.read(4)))[0]\n\n if self.signal_type == 0 and self.channel_enabled:#Add name to the amplifier channel list\n parent._AMPLIFIER_CHANNELS.append(self.native_channel_name)\n\n if self.signal_type == 1 and self.channel_enabled:#Add name to the aux channel list\n parent._AUX_CHANNELS.append(self.native_channel_name)\n\n if self.signal_type == 2 and self.channel_enabled:#Supply voltage\n parent._SUPPLY_VOLTAGE_CHANNELS.append(self.native_channel_name)\n\n if self.signal_type == 3 and self.channel_enabled:#usb board adc input channel\n parent._ADC_INPUT_CHANNELS.append(self.native_channel_name)\n\n if self.signal_type == 4 and self.channel_enabled:#usb board digital input channel\n parent._DIGITAL_INPUT_CHANNELS.append(self.native_channel_name)",
"def _configure(self):\n Component._configure(self)\n self.dataDim = self.inventory.dataDim\n self.reader = self.inventory.reader\n self.coordsys = self.inventory.coordsys\n return",
"def port_maker(self, platform):\n raise NotImplementedError()",
"def __init__(self, df, extras=None, **kwtraits):\n\n super(PandasPlotData, self).__init__(**kwtraits) #Trait initialization \n \n if len(df.shape) > 2:\n raise NotImplementedError('Multidimensional dfs of order 3 or higher \\\n\t are not supported by in PandasPlotData') #Do 1d arrays work?\n \n self.set_df(df)",
"def __init__(self, link=None, first_slice_angle=None, floor=None, plot_empty_cells_type=None, auto_scaling=None, style=None, series_axis=None, value_axis=None, show_data_table=None, is3_d=None, chart_area=None, elevation=None, side_wall=None, type=None, title=None, walls=None, back_wall=None, chart_data_table=None, height_percent=None, gap_width=None, legend=None, chart_object=None, is_rectangular_cornered=None, second_category_axis=None, second_value_axis=None, placement=None, name=None, size_with_window=None, right_angle_axes=None, plot_visible_cells=None, show_legend=None, pivot_source=None, depth_percent=None, print_size=None, gap_depth=None, shapes=None, walls_and_gridlines2_d=None, n_series=None, rotation_angle=None, plot_area=None, category_axis=None, perspective=None, hide_pivot_field_buttons=None, page_setup=None, **kw):\n self.container = {}\n\t\t \n \"\"\"\n Chart - a model defined in Swagger\n \"\"\"\n\n self.container['link'] = None\n self.container['first_slice_angle'] = None\n self.container['floor'] = None\n self.container['plot_empty_cells_type'] = None\n self.container['auto_scaling'] = None\n self.container['style'] = None\n self.container['series_axis'] = None\n self.container['value_axis'] = None\n self.container['show_data_table'] = None\n self.container['is3_d'] = None\n self.container['chart_area'] = None\n self.container['elevation'] = None\n self.container['side_wall'] = None\n self.container['type'] = None\n self.container['title'] = None\n self.container['walls'] = None\n self.container['back_wall'] = None\n self.container['chart_data_table'] = None\n self.container['height_percent'] = None\n self.container['gap_width'] = None\n self.container['legend'] = None\n self.container['chart_object'] = None\n self.container['is_rectangular_cornered'] = None\n self.container['second_category_axis'] = None\n self.container['second_value_axis'] = None\n self.container['placement'] = None\n self.container['name'] = None\n self.container['size_with_window'] = None\n self.container['right_angle_axes'] = None\n self.container['plot_visible_cells'] = None\n self.container['show_legend'] = None\n self.container['pivot_source'] = None\n self.container['depth_percent'] = None\n self.container['print_size'] = None\n self.container['gap_depth'] = None\n self.container['shapes'] = None\n self.container['walls_and_gridlines2_d'] = None\n self.container['n_series'] = None\n self.container['rotation_angle'] = None\n self.container['plot_area'] = None\n self.container['category_axis'] = None\n self.container['perspective'] = None\n self.container['hide_pivot_field_buttons'] = None\n self.container['page_setup'] = None\n\n if link is not None:\n self.link = link\n if first_slice_angle is not None:\n self.first_slice_angle = first_slice_angle\n if floor is not None:\n self.floor = floor\n if plot_empty_cells_type is not None:\n self.plot_empty_cells_type = plot_empty_cells_type\n if auto_scaling is not None:\n self.auto_scaling = auto_scaling\n if style is not None:\n self.style = style\n if series_axis is not None:\n self.series_axis = series_axis\n if value_axis is not None:\n self.value_axis = value_axis\n if show_data_table is not None:\n self.show_data_table = show_data_table\n if is3_d is not None:\n self.is3_d = is3_d\n if chart_area is not None:\n self.chart_area = chart_area\n if elevation is not None:\n self.elevation = elevation\n if side_wall is not None:\n self.side_wall = side_wall\n if type is not None:\n self.type = type\n if title is not None:\n self.title = title\n if walls is not None:\n self.walls = walls\n if back_wall is not None:\n self.back_wall = back_wall\n if chart_data_table is not None:\n self.chart_data_table = chart_data_table\n if height_percent is not None:\n self.height_percent = height_percent\n if gap_width is not None:\n self.gap_width = gap_width\n if legend is not None:\n self.legend = legend\n if chart_object is not None:\n self.chart_object = chart_object\n if is_rectangular_cornered is not None:\n self.is_rectangular_cornered = is_rectangular_cornered\n if second_category_axis is not None:\n self.second_category_axis = second_category_axis\n if second_value_axis is not None:\n self.second_value_axis = second_value_axis\n if placement is not None:\n self.placement = placement\n if name is not None:\n self.name = name\n if size_with_window is not None:\n self.size_with_window = size_with_window\n if right_angle_axes is not None:\n self.right_angle_axes = right_angle_axes\n if plot_visible_cells is not None:\n self.plot_visible_cells = plot_visible_cells\n if show_legend is not None:\n self.show_legend = show_legend\n if pivot_source is not None:\n self.pivot_source = pivot_source\n if depth_percent is not None:\n self.depth_percent = depth_percent\n if print_size is not None:\n self.print_size = print_size\n if gap_depth is not None:\n self.gap_depth = gap_depth\n if shapes is not None:\n self.shapes = shapes\n if walls_and_gridlines2_d is not None:\n self.walls_and_gridlines2_d = walls_and_gridlines2_d\n if n_series is not None:\n self.n_series = n_series\n if rotation_angle is not None:\n self.rotation_angle = rotation_angle\n if plot_area is not None:\n self.plot_area = plot_area\n if category_axis is not None:\n self.category_axis = category_axis\n if perspective is not None:\n self.perspective = perspective\n if hide_pivot_field_buttons is not None:\n self.hide_pivot_field_buttons = hide_pivot_field_buttons\n if page_setup is not None:\n self.page_setup = page_setup",
"def _setup(self):\n\n from AlGDock.topology import Topology\n self.top = Topology(self.args)\n self.top_RL = Topology(self.args, includeReceptor=True)\n\n # Initialize rmsd calculation function\n from AlGDock.RMSD import hRMSD\n self.get_rmsds = hRMSD(self.args.FNs['prmtop']['L'], \\\n self.top.inv_prmtop_atom_order_L)\n\n # Obtain reference pose\n if self.data['CD'].pose > -1:\n if ('starting_poses' in self.data['CD'].confs.keys()) and \\\n (self.data['CD'].confs['starting_poses'] is not None):\n starting_pose = np.copy(self.data['CD'].confs['starting_poses'][0])\n else:\n (confs, Es) = self._get_confs_to_rescore(site=False, \\\n minimize=False, sort=False)\n if self.args.params['CD']['pose'] < len(confs):\n starting_pose = np.copy(confs[self.args.params['CD']['pose']])\n self.data['CD'].confs['starting_poses'] = [np.copy(starting_pose)]\n else:\n self._clear('CD')\n self._store_infinite_f_RL()\n raise Exception('Pose index greater than number of poses')\n else:\n starting_pose = None\n\n from AlGDock.system import System\n self.system = System(self.args,\n self.log,\n self.top,\n self.top_RL,\n starting_pose=starting_pose)\n\n # Measure the binding site\n if (self.args.params['CD']['site'] == 'Measure'):\n self.args.params['CD']['site'] = 'Sphere'\n if self.args.params['CD']['site_measured'] is not None:\n (self.args.params['CD']['site_max_R'],self.args.params['CD']['site_center']) = \\\n self.args.params['CD']['site_measured']\n else:\n print '\\n*** Measuring the binding site ***'\n self.system.setParams(\n self.system.paramsFromAlpha(1.0, 'CD', site=False))\n (confs, Es) = self._get_confs_to_rescore(site=False, minimize=True)\n if len(confs) > 0:\n # Use the center of mass for configurations\n # within 20 RT of the lowest energy\n cutoffE = Es['total'][-1] + 20 * (R * self.T)\n coms = []\n for (conf, E) in reversed(zip(confs, Es['total'])):\n if E <= cutoffE:\n self.top.universe.setConfiguration(\n Configuration(self.top.universe, conf))\n coms.append(np.array(self.top.universe.centerOfMass()))\n else:\n break\n print ' %d configurations fit in the binding site' % len(coms)\n coms = np.array(coms)\n center = (np.min(coms, 0) + np.max(coms, 0)) / 2\n max_R = max(\n np.ceil(np.max(np.sqrt(np.sum(\n (coms - center)**2, 1))) * 10.) / 10., 0.6)\n self.args.params['CD']['site_max_R'] = max_R\n self.args.params['CD']['site_center'] = center\n self.top.universe.setConfiguration(\n Configuration(self.top.universe, confs[-1]))\n if ((self.args.params['CD']['site_max_R'] is None) or \\\n (self.args.params['CD']['site_center'] is None)):\n raise Exception('No binding site parameters!')\n else:\n self.args.params['CD']['site_measured'] = \\\n (self.args.params['CD']['site_max_R'], \\\n self.args.params['CD']['site_center'])\n\n # Read the reference ligand and receptor coordinates\n import AlGDock.IO\n IO_crd = AlGDock.IO.crd()\n if self.args.FNs['inpcrd']['R'] is not None:\n if os.path.isfile(self.args.FNs['inpcrd']['L']):\n lig_crd = IO_crd.read(self.args.FNs['inpcrd']['L'], multiplier=0.1)\n self.data['CD'].confs['receptor'] = IO_crd.read(\\\n self.args.FNs['inpcrd']['R'], multiplier=0.1)\n elif self.args.FNs['inpcrd']['RL'] is not None:\n complex_crd = IO_crd.read(self.args.FNs['inpcrd']['RL'], multiplier=0.1)\n lig_crd = complex_crd[self.top_RL.L_first_atom:self.top_RL.L_first_atom + \\\n self.top.universe.numberOfAtoms(),:]\n self.data['CD'].confs['receptor'] = np.vstack(\\\n (complex_crd[:self.top_RL.L_first_atom,:],\\\n complex_crd[self.top_RL.L_first_atom + self.top.universe.numberOfAtoms():,:]))\n elif self.args.FNs['inpcrd']['L'] is not None:\n self.data['CD'].confs['receptor'] = None\n if os.path.isfile(self.args.FNs['inpcrd']['L']):\n lig_crd = IO_crd.read(self.args.FNs['inpcrd']['L'], multiplier=0.1)\n else:\n lig_crd = None\n\n if lig_crd is not None:\n self.data['CD'].confs['ligand'] = lig_crd[self.top.\n inv_prmtop_atom_order_L, :]\n self.top.universe.setConfiguration(\\\n Configuration(self.top.universe,self.data['CD'].confs['ligand']))\n if self.top_RL.universe is not None:\n self.top_RL.universe.setConfiguration(\\\n Configuration(self.top_RL.universe, \\\n np.vstack((self.data['CD'].confs['receptor'],self.data['CD'].confs['ligand']))))\n\n if self.args.params['CD']['rmsd'] is not False:\n if self.args.params['CD']['rmsd'] is True:\n if lig_crd is not None:\n rmsd_crd = lig_crd[self.top.inv_prmtop_atom_order_L, :]\n else:\n raise Exception('Reference structure for rmsd calculations unknown')\n else:\n rmsd_crd = IO_crd.read(self.args.params['CD']['rmsd'], \\\n natoms=self.top.universe.numberOfAtoms(), multiplier=0.1)\n rmsd_crd = rmsd_crd[self.top.inv_prmtop_atom_order_L, :]\n self.data['CD'].confs['rmsd'] = rmsd_crd\n\n self.get_rmsds.set_ref_configuration(self.data['CD'].confs['rmsd'])\n\n # If configurations are being rescored, start with a docked structure\n (confs, Es) = self._get_confs_to_rescore(site=False, minimize=False)\n if len(confs) > 0:\n self.top.universe.setConfiguration(\n Configuration(self.top.universe, confs[-1]))\n\n from AlGDock.simulation_iterator import SimulationIterator\n self.iterator = SimulationIterator(self.args, self.top, self.system)\n\n # Load progress\n from AlGDock.postprocessing import Postprocessing\n Postprocessing(self.args, self.log, self.top, self.top_RL, self.system, self.data, self.save).run(readOnly=True)\n\n self.calc_f_L(readOnly=True)\n self.calc_f_RL(readOnly=True)\n\n if self.args.random_seed > 0:\n np.random.seed(self.args.random_seed)",
"def get_frame_data(self):\n # FrameObject is a dictionary of slot names and values.\n frameObject = self.pgdb.sendPgdbFnCall('get-frame-object', self.frameid)\n if not frameObject:\n raise PythonCycError(\"Could not retrieve frame \"+self.frameid+\" from organism (orgid) \"+self.pgdb._orgid)\n else:\n self._gotframe = True\n # Modify slot names to allow Python's syntax (e.g., '_' instead of '-').\n for slot in frameObject:\n self.__dict__[convertLispIdtoPythonId(slot)] = frameObject[slot]\n return self"
] | [
"0.7257079",
"0.5769627",
"0.5761178",
"0.57511663",
"0.5472211",
"0.5416792",
"0.5408845",
"0.5397852",
"0.53871006",
"0.5366009",
"0.53453034",
"0.5293592",
"0.52530783",
"0.5228261",
"0.5221702",
"0.5218166",
"0.52106",
"0.5198825",
"0.51540816",
"0.5151581",
"0.5141395",
"0.5126665",
"0.5103629",
"0.51013196",
"0.5088175",
"0.5086921",
"0.5086042",
"0.50859916",
"0.5085467",
"0.5075684"
] | 0.6840002 | 1 |
Initialise I2C platform and set feature. | def __init__(self, machine):
super().__init__(machine)
self.features['has_i2c'] = True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, machine):\n super().__init__(machine)\n\n # Set default platform features. Each platform interface can change\n # these to notify the framework of the specific features it supports.\n self.features['has_drivers'] = True\n self.features['max_pulse'] = 255",
"def __init__(self, machine):\n super().__init__(machine)\n\n # Set default platform features. Each platform interface can change\n # these to notify the framework of the specific features it supports.\n self.features['has_drivers'] = True\n self.features['max_pulse'] = 255",
"def __init__(self, machine):\n self.machine = machine # type: MachineController\n self.features = {}\n super().__init__()\n self.debug = False\n\n # Set default platform features. Each platform interface can change\n # these to notify the framework of the specific features it supports.\n self.features['has_dmds'] = False\n self.features['has_rgb_dmds'] = False\n self.features['has_accelerometers'] = False\n self.features['has_i2c'] = False\n self.features['has_servos'] = False\n self.features['has_lights'] = False\n self.features['has_switches'] = False\n self.features['has_drivers'] = False\n self.features['tickless'] = False\n self.features['has_segment_displays'] = False\n self.features['has_hardware_sound_systems'] = False\n self.features['has_steppers'] = False\n self.features['allow_empty_numbers'] = False\n self.features['hardware_eos_repulse'] = False",
"def __init__(self, machine):\n self.machine = machine\n self.features = {}\n self.log = None\n self.debug = False\n\n # Set default platform features. Each platform interface can change\n # these to notify the framework of the specific features it supports.\n self.features['has_dmd'] = False\n self.features['has_rgb_dmd'] = False\n self.features['has_accelerometers'] = False\n self.features['has_i2c'] = False\n self.features['has_servos'] = False\n self.features['has_matrix_lights'] = False\n self.features['has_gis'] = False\n self.features['has_leds'] = False\n self.features['has_switches'] = False\n self.features['has_drivers'] = False\n self.features['tickless'] = False",
"def _init_hardware(self):\n return",
"def _initialize_hardware(self):\n # Import\n try:\n import board\n import busio\n import adafruit_vl6180x\n except Exception as ex:\n logging.error(\n '\\n *** ERROR importing Adafruit libraries: {}'.format(\n ex,\n ),\n )\n\n # Things failed, so we must be running locally, not on a widget;\n # don't bother hooking up the VL6180X\n return\n\n # Initialize I2C and VL6180X\n try:\n i2c = busio.I2C(board.SCL, board.SDA)\n self._sensor = adafruit_vl6180x.VL6180X(i2c)\n except Exception as ex:\n logging.error(\n '\\n *** ERROR initializing I2C/LSM303: {}'.format(ex),\n )\n\n self._initialize_id_led()",
"def __init__(self, i2c, address=_SGP30_DEFAULT_I2C_ADDR):\n self._i2c = i2c\n self._addr = address\n self.serial = self._i2c_read_words_from_cmd(command=[0x36, 0x82], reply_size=3, delay=0.01)\n featureset = self._i2c_read_words_from_cmd([0x20, 0x2f], 1, 0.01)\n if featureset[0] != _SGP30_FEATURESET:\n raise RuntimeError('SGP30 Not detected')\n self.initialise_indoor_air_quality()",
"def __init__(self, i2c: I2C, address: int = _SGP30_DEFAULT_I2C_ADDR) -> None:\n self._device = I2CDevice(i2c, address)\n\n # get unique serial, its 48 bits so we store in an array\n self.serial = self._i2c_read_words_from_cmd([0x36, 0x82], 0.01, 3)\n # get featureset\n featureset = self._i2c_read_words_from_cmd([0x20, 0x2F], 0.01, 1)\n if featureset[0] not in _SGP30_FEATURESETS:\n raise RuntimeError(\"SGP30 Not detected\")\n self.iaq_init()",
"def __init__(self):\n i2c.Pn532_i2c.__init__(self)\n self._uid = False",
"def configure(self):\n\n self.platform.configure()",
"def __init__(self):\n self.hw = dev_hwinfo.device()\n self.ethKey=\"Ethernet\"\n self.ethAllInterfaceName=[]\n dir_path = os.path.dirname(os.path.realpath(__file__))\n self.myDefine = init_define.main()\n self.mPlatform=self.hw.getPlatform()",
"def __init__(self, machine):\n super().__init__(machine)\n self.features['has_hardware_sound_systems'] = True",
"def Initialise(self):\n self.__m_Platform.Initialise()\n self.__m_Pump.Initialise( False )",
"def platform_start(self):\n self.platform.start()",
"def _setup_io_devices(self) -> None:\n # Add PCI\n self.platform.pci_host.pio = self.iobus.mem_side_ports\n\n # Add Ethernet card\n self.ethernet = IGbE_e1000(\n pci_bus=0, pci_dev=0, pci_func=0, InterruptLine=1, InterruptPin=1\n )\n\n self.ethernet.host = self.platform.pci_host\n self.ethernet.pio = self.iobus.mem_side_ports\n self.ethernet.dma = self.iobus.cpu_side_ports\n\n if self.get_cache_hierarchy().is_ruby():\n for device in self._off_chip_devices + self._on_chip_devices:\n device.pio = self.iobus.mem_side_ports\n\n else:\n for device in self._off_chip_devices:\n device.pio = self.iobus.mem_side_ports\n for device in self._on_chip_devices:\n device.pio = self.get_cache_hierarchy().get_mem_side_port()\n\n self.bridge = Bridge(delay=\"10ns\")\n self.bridge.mem_side_port = self.iobus.cpu_side_ports\n self.bridge.cpu_side_port = (\n self.get_cache_hierarchy().get_mem_side_port()\n )\n self.bridge.ranges = [\n AddrRange(dev.pio_addr, size=dev.pio_size)\n for dev in self._off_chip_devices\n ]\n\n # PCI\n self.bridge.ranges.append(AddrRange(0x2F000000, size=\"16MB\"))\n self.bridge.ranges.append(AddrRange(0x30000000, size=\"256MB\"))\n self.bridge.ranges.append(AddrRange(0x40000000, size=\"512MB\"))",
"def setUp(self):\n self.platform = wirelesstagpy.WirelessTags(username=USERNAME, password=PASSWORD)\n self.tag_outdoor = wirelesstagpy.SensorTag(MOCK.OUTDOOR_PROBE, self.platform)\n self.platform._tags[\"fake-1\"] = self.tag_outdoor # pylint: disable=protected-access",
"def _initialize_hardware(self):\n # Import\n try:\n from gpiozero import MCP3008\n except Exception as ex:\n logging.error('\\n *** ERROR importing gpiozero: {}'.format(ex))\n\n # Things failed, must be running locally, not on a widget, so don't\n # bother initializing the MCP3008\n return\n\n # Initialize the MCP3008\n try:\n self._sensor = MCP3008(channel=0)\n except Exception as ex:\n logging.error('\\n *** ERROR initializing MCP3008: {}'.format(ex))\n return\n\n # Start force loop thread\n threading.Thread(target=self._force_loop, daemon=True).start()",
"def __init__(self):\n\n super().__init__()\n\n self.active = True\n self.driver = Driver.instance()\n self.sensor_manager = SensorManager.instance()\n\n self.pwm = Adafruit_PCA9685.PCA9685(address=0x40, busnum=1) # create PCA9685-object at I2C-port\n self.pwm.set_pwm_freq(50)\n\n GPIO.setwarnings(False)\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(20, GPIO.OUT)\n GPIO.setup(21, GPIO.OUT)\n GPIO.setup(26, GPIO.OUT)\n self.driven_distance = 0",
"def __init__(self):\n try: \n self.i2c = busio.I2C(board.SCL, board.SDA)\n self.mpu = adafruit_mpu6050.MPU6050(self.i2c)\n \n except: \n print(\"No IMU connection\")",
"def init(self):\n self.reset()\n\n self.__interface.send_command('POWER_SETTING')\n self.__interface.send_data(0x37)\n self.__interface.send_data(0x00)\n\n self.__interface.send_command('PANEL_SETTING')\n self.__interface.send_data(0xCF)\n self.__interface.send_data(0x08)\n\n self.__interface.send_command('BOOSTER_SOFT_START')\n self.__interface.send_data(0xc7)\n self.__interface.send_data(0xcc)\n self.__interface.send_data(0x28)\n\n self.__interface.send_command('POWER_ON')\n self.wait_until_idle()\n\n self.__interface.send_command('PLL_CONTROL')\n self.__interface.send_data(0x3c)\n\n self.__interface.send_command('TEMPERATURE_CALIBRATION')\n self.__interface.send_data(0x00)\n\n self.__interface.send_command('VCOM_AND_DATA_INTERVAL_SETTING')\n self.__interface.send_data(0x77)\n\n self.__interface.send_command('TCON_SETTING')\n self.__interface.send_data(0x22)\n\n self.__interface.send_command('TCON_RESOLUTION')\n self.__interface.send_data(0x02) #source 640\n self.__interface.send_data(0x80)\n self.__interface.send_data(0x01) #gate 384\n self.__interface.send_data(0x80)\n\n self.__interface.send_command('VCM_DC_SETTING')\n self.__interface.send_data(0x1E) #decide by LUT file\n\n self.__interface.send_command(0xe5, False) #FLASH MODE\n self.__interface.send_data(0x03)",
"def _setup(self) -> None:\n self._api = get_api(\n self._password,\n self._host,\n self._username,\n self._port,\n self._ssl,\n )\n\n self._info = self._api.get_info()\n self.device_name = self._info.get(\"DeviceName\", DEFAULT_NAME)\n self.model = self._info.get(\"ModelName\")\n self.firmware_version = self._info.get(\"Firmwareversion\")\n\n for model in MODELS_V2:\n if self.model.startswith(model):\n self._method_version = 2",
"def open(self):\n self._i2c.open(bus=self._i2c_bus)\n self._configure_i2c_library_functions()\n if self.debug:\n print('VL53L1X: Opened I2C bus {}'.format(self._i2c_bus))",
"def use_i2c():\n _LIB.oled_click_use_i2c()",
"def initialize(self):\n raise DeviceException(DeviceException.FEATURE_NOT_IMPLEMENTED)",
"def doInitializeDevice(self):\n super().doInitializeDevice()",
"async def init_provider(self):\n self.dsp_name = \"OpenStack\"\n await self._provider.init(image_names=self.config[\"images\"].values())",
"def __init__(self, address=0x68, **kwargs):\n I2CDevice.__init__(self, address, **kwargs)\n logger.info(\"Created new si5324 instance with address 0x{:02X}.\".format(address))\n self.iCAL_required = True # An iCAL is required at least once before run",
"def __init__(self):\n GPIO.setwarnings(False)\n GPIO.cleanup() # Reset the high and low levels of the GPIO port\n #The following code defines the GPIO used to control the L298N chip. This definition is different for different Raspberry Pi driver boards.\n self.Motor_A_EN = 17\n self.Motor_B_EN = 4\n self.Motor_A_Pin1 = 27\n self.Motor_A_Pin2 = 18\n self.Motor_B_Pin1 = 21\n self.Motor_B_Pin2 = 26\n self.setup()",
"def _init_io(self):\n GPIO.setwarnings(False)\n GPIO.setmode( GPIO.BCM )\n pins = [ self._spi_dc ]\n for pin in pins:\n GPIO.setup( pin, GPIO.OUT )",
"def test_setup_platform(self, store_mock):\n config = {\n ip.DOMAIN: {\n \"platform\": \"microsoft_face_identify\",\n \"source\": {\"entity_id\": \"camera.demo_camera\"},\n \"group\": \"Test Group1\",\n },\n \"camera\": {\"platform\": \"demo\"},\n mf.DOMAIN: {\"api_key\": \"12345678abcdef6\"},\n }\n\n with assert_setup_component(1, ip.DOMAIN):\n setup_component(self.hass, ip.DOMAIN, config)\n self.hass.block_till_done()\n\n assert self.hass.states.get(\"image_processing.microsoftface_demo_camera\")"
] | [
"0.69223726",
"0.69223726",
"0.6696322",
"0.6669789",
"0.661578",
"0.6472706",
"0.6393477",
"0.6341301",
"0.6272525",
"0.62672776",
"0.61610824",
"0.61264825",
"0.6056944",
"0.604401",
"0.6024838",
"0.59611595",
"0.591767",
"0.59086883",
"0.5897186",
"0.5890242",
"0.5880437",
"0.5869196",
"0.5864167",
"0.58258975",
"0.5819685",
"0.5806675",
"0.58021945",
"0.57550603",
"0.5738162",
"0.57249886"
] | 0.7734488 | 0 |
Write an 8bit value to a specific address and register via I2C. | def i2c_write8(self, address, register, value):
raise NotImplementedError | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def write(self, register, value): #good\r\n\t\tself.i2c.write8(register, value)",
"def write8(self, register, value):\n raise NotImplementedError",
"def _i2c_write(self, register, value, bank=None):\n if bank is not None:\n self.set_bank(bank)\n self.i2c.write_byte_data(self.address, register, value)",
"def _write_register(self, reg, value):\n _buf = bytearray([reg, (value >> 8) & 0xFF, value & 0xFF])\n with self.i2c_device as i2c:\n i2c.writeto(self._i2c_addr, _buf)",
"def write_byte(fd, reg, b):\n write_read_i2c(fd, bytes([reg, b]), 0)",
"def swd_write8(self, output, value):\n return self.swd_write(output, value, 8)",
"def write_io_byte(self, address, value):\n return self.io.write(self.regs.resolve_address(address), value)",
"def write_register(self, device_id, address, value):\n register_array = Array('B', [0x00, 0x00, 0x00, 0x00])\n register_array[0] = (value >> 24) & 0xFF\n register_array[1] = (value >> 16) & 0xFF\n register_array[2] = (value >> 8) & 0xFF\n register_array[3] = (value) & 0xFF\n self.write(device_id, address, register_array)",
"def i2c_read8(self, address, register):\n raise NotImplementedError",
"def _write8bits(self, value):\n\t\tfor i in range(8):\n\t\t\tbit = (value >> i) & 0x01\n\t\t\tGPIO.output(self.pins_data[i], bit)\n\t\tself._pulse_e()\n\t\tmicroSleep(1) # Address Hold Time is 10nS min",
"def _write_register(self, reg_addr, data):\n self.bus.write_i2c_block_data(self.I2C_ADDRESS, reg_addr, [ data & 0xFF ])",
"def _write_register(self, reg_addr, data):\n self.bus.write_i2c_block_data(self.I2C_ADDRESS, reg_addr, [ data & 0xFF ])",
"def Write_Int8(self,Address,Register,Int8):\n self.Transaction(chr(Address)+chr(Register)+struct.pack('b',Int8))",
"def _write8bits(self, value):\n for i in range(8):\n bit = (value >> i) & 0x01\n self.output(self._data_pins()[i], bit)\n self._pulse_enable()",
"def expanderWrite( self, _data ): # uint8_t\n\t\t#Wire.beginTransmission(_Addr);\n\t\t#printIIC((int)(_data) | _backlightval) # print II\n\t\tself.i2c.writeto( self.address, bytes( [_data | self._backlightval] ))\n\t\t#Wire.endTransmission();",
"def write_uint8(self,value):\n packed = struct.pack('!B',value)\n self.data.extend(packed)",
"def write_byte_data(self, value, register_address):\n if type(value) != list:\n value = [value]\n with SMBus(self.i2c_bus) as bus:\n bus.write_i2c_block_data(self.i2c_address, register_address, value)",
"def __set_i2c_address(self, address):\n fcntl.ioctl(self.file_read, self.I2C_SLAVE, address)\n fcntl.ioctl(self.file_write, self.I2C_SLAVE, address)",
"def toI2C(n):\n print(\"{} 0x{:02x} I2C Address \".format(n, n))\n print(\"==================\")\n wb = n << 1\n print(\"{} 0x{:02x} Address Write\".format(wb, wb))\n rb = (n << 1) | 0b000001\n print(\"{} 0x{:02x} Address Read\".format(rb, rb))",
"def send_byte(byte_out):\n GPIO.output(clock_pin, 0)\n # set the chip select to write\n GPIO.output(chip_select, 1)\n # send the byte \n values = [(ord(byte_out) >> i) % 2 for i in range(0, 8)]\n GPIO.setup(data_pins, GPIO.OUT)\n GPIO.output(data_pins, values)\n # flash the clock pin\n GPIO.output(clock_pin, 1)\n GPIO.output(clock_pin, 0)",
"def _write_byte(self, byte):\n\n # Setup io pin as output\n self.gpio.setup(self._io_pin, GPIO.OUT)\n\n for _ in range(8):\n # Write data on the rising edge of clk\n self.gpio.output(self._clk_pin, GPIO.LOW)\n self._sleep()\n\n self.gpio.output(self._io_pin, byte & 0x01)\n\n byte >>= 1\n self.gpio.output(self._clk_pin, GPIO.HIGH)\n self._sleep()",
"def Write_uInt8(self,Address,Register,uInt8):\n self.Transaction(chr(Address)+chr(Register)+struct.pack('B',uInt8))",
"def write24bit(self, register, value):\n if value > ((2 ** 24) - 1) or value < 0:\n raise ValueError\n valuearray = struct.pack('!I', value) # Place value into byte array, format '!I' is int, network order\n self.device.writeregistermulti(register, valuearray[1:]) #Only 3 LSB bytes",
"def memory_write8(self, addr, data, zone=None):\n return self.memory_write(addr, data, zone, 8)",
"def flash_write8(self, addr, data):\n return self.flash_write(addr, data, 8)",
"def write_raw8(self, value):\n raise NotImplementedError",
"def __analogWrite__(self, channel, value):\n d = bytearray(2)\n d[0] = (value >> 8) & 0x0F\n d[1] = value & 0xFF\n self.writeBytes(d)",
"def StoreBits8(self, val):\n tmp_val = struct.pack(\">B\", val)\n self.StoreBits( (StrToList(tmp_val), 8))",
"def data(self, c):\n if self._spi is not None:\n # SPI write.\n self._gpio.set_high(self._dc)\n self._spi.write([c])\n else:\n # I2C write.\n control = 0x40 # Co = 0, DC = 0\n self._i2c.write8(control, c)",
"def setByte(self, address: ghidra.program.model.address.Address, value: int) -> None:\n ..."
] | [
"0.81820816",
"0.7564525",
"0.7462808",
"0.711898",
"0.71006817",
"0.6852014",
"0.6789489",
"0.6774337",
"0.6672894",
"0.6656542",
"0.6608093",
"0.6608093",
"0.6605974",
"0.6600265",
"0.6553627",
"0.6430836",
"0.63670194",
"0.6350984",
"0.6313063",
"0.6307843",
"0.6297266",
"0.62763494",
"0.6187555",
"0.61834276",
"0.6117757",
"0.6080053",
"0.6014755",
"0.5963149",
"0.5919043",
"0.58769375"
] | 0.872866 | 0 |
Read an 8bit value from an address and register via I2C. | def i2c_read8(self, address, register):
raise NotImplementedError | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def read(self, register): #good\r\n\t\tcurrentVal = self.i2c.readU8(register)\r\n\t\treturn currentVal",
"def read_byte(fd, reg):\n b, = write_read_i2c(fd, bytes([reg]), 1)\n return b",
"def i2c_write8(self, address, register, value):\n raise NotImplementedError",
"def _i2c_read(self, register, bank=None):\n if bank is not None:\n self.set_bank(bank)\n return self.i2c.read_byte_data(self.address, register)",
"def read_register(self, device_id, address):\n register_array = self.read(device_id, address, 1) \n return register_array[0] << 24 | register_array[1] << 16 | register_array[2] << 8 | register_array[3]",
"def Read_Int8(self,Address,Register):\n return struct.unpack('b',self.Transaction(chr(Address+1)+chr(Register),3)[1][2])[0]",
"def i2c_read16(self, address, register):\n raise NotImplementedError",
"def read_U8(self, register):\n raise NotImplementedError",
"async def i2c_read_data(self, address):\n if address in self.i2c_map:\n map_entry = self.i2c_map.get(address)\n data = map_entry.get('value')\n return data\n else:\n return None",
"async def i2c_read_data(self, address):\n if address in self.i2c_map:\n map_entry = self.i2c_map.get(address)\n data = map_entry.get('value')\n return data\n else:\n return None",
"def Read_uInt8(self,Address,Register):\n return struct.unpack('B',self.Transaction(chr(Address+1)+chr(Register),3)[1][2])[0]",
"def read_io_byte(self, address):\n return self.io.read(self.regs.resolve_address(address))",
"def _read_register(self, reg, fast=False):\n _buf = bytearray(3)\n _reg = bytearray([reg])\n with self.i2c_device as i2c:\n if fast:\n i2c.readfrom_into(self._i2c_addr, _buf)\n else:\n i2c.write_then_readinto(self._i2c_addr, _reg, _buf, in_end=2)\n return _buf[0] << 8 | _buf[1]",
"def read_mcp3008(pi, adc, channel):\n count, data = pi.spi_xfer(adc, [1, (8 + channel) << 4, 0])\n value = ((data[1] << 8) | data[2]) & 0x3FF\n return value",
"def peek8(self,adr,memory=\"vn\"):\n data=self.SPItrans([self.READ,(adr>>8)&0xFF,adr&0xFF,0x00]);\n return ord(data[3]);",
"def read_S8(self, register):\n raise NotImplementedError",
"def toI2C(n):\n print(\"{} 0x{:02x} I2C Address \".format(n, n))\n print(\"==================\")\n wb = n << 1\n print(\"{} 0x{:02x} Address Write\".format(wb, wb))\n rb = (n << 1) | 0b000001\n print(\"{} 0x{:02x} Address Read\".format(rb, rb))",
"def write_byte(fd, reg, b):\n write_read_i2c(fd, bytes([reg, b]), 0)",
"def get_byte():\n GPIO.setup(data_pins, GPIO.IN)\n # read the data pins\n GPIO.output(chip_select, 0)\n GPIO.output(clock_pin, 1)\n GPIO.output(clock_pin, 0)\n value = 0\n for i in range(0, 8):\n value += GPIO.input(data_pins[i]) << i\n return value",
"def _read_register_1sbyte(self, reg_addr):\n buffer = self.bus.read_i2c_block_data(self.I2C_ADDRESS, reg_addr, 1)\n val = buffer[0]\n if val & (1 << 7) != 0:\n val = val - (1 << 7)\n return val",
"def _read_register_1sbyte(self, reg_addr):\n buffer = self.bus.read_i2c_block_data(self.I2C_ADDRESS, reg_addr, 1)\n val = buffer[0]\n if val & (1 << 7) != 0:\n val = val - (1 << 7)\n return val",
"def readmem8(self, address):\n return self._readmem(address, 'mem8')",
"def read_i2c_word(self, register):\n\t\t# Read the data from the registers\n\t\thigh = self.bus.read_byte_data(self.address, register)\n\t\tlow = self.bus.read_byte_data(self.address, register + 1)\n\n\t\tvalue = (high << 8) + low\n\n\t\tif (value >= 0x8000):\n\t\t\treturn -((65535 - value) + 1)\n\t\telse:\n\t\t\treturn value",
"def read_i2c_block_data(self, i2c_address, register, length):\n return self.regs[register:register + length]",
"def _read_byte(self):\n # Setup io pin as input mode\n self.gpio.setup(self._io_pin, GPIO.IN)\n\n byte = 0\n for i in range(8):\n # Read data on the falling edge of clk\n self.gpio.output(self._clk_pin, GPIO.HIGH)\n self._sleep()\n\n self.gpio.output(self._clk_pin, GPIO.LOW)\n self._sleep()\n\n bit = self.gpio.input(self._io_pin)\n byte |= ((2 ** i) * bit)\n\n return byte",
"def write(self, register, value): #good\r\n\t\tself.i2c.write8(register, value)",
"def read_i2c_word(self, register):\n # Read the data from the registers\n high = self.bus.read_byte_data(self.address, register)\n low = self.bus.read_byte_data(self.address, register + 1)\n\n value = (high << 8) + low\n\n if (value >= 0x8000):\n return -((65535 - value) + 1)\n else:\n return value",
"def Read_uInt8s(self,Address,Register,Number=0):\n pass",
"def read_uint8(self):\n bytes = self.data[:1]\n value = struct.unpack('!B',bytes)[0]\n self.data = self.data[1:]\n return value",
"def ReadInt8(self, endian=\"<\"):\n return self.unpack('%sb' % endian)"
] | [
"0.7447897",
"0.7203795",
"0.71414053",
"0.7111455",
"0.67797315",
"0.66714233",
"0.66070837",
"0.6474858",
"0.64660287",
"0.64660287",
"0.6404863",
"0.6402492",
"0.6393998",
"0.6368525",
"0.6337723",
"0.6311798",
"0.6286579",
"0.628637",
"0.6215728",
"0.6214058",
"0.6214058",
"0.6195721",
"0.618829",
"0.61751366",
"0.61701035",
"0.61533713",
"0.6145114",
"0.61200196",
"0.60911644",
"0.60786223"
] | 0.8244201 | 0 |
Read an 16bit value from an address and register via I2C. | def i2c_read16(self, address, register):
raise NotImplementedError | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def read16bit(self, register):\n valuearray = bytearray(self.device.readregistermulti(register, 2))\n return struct.unpack('!H', valuearray)[0]",
"def _i2c_read(self, register, bank=None):\n if bank is not None:\n self.set_bank(bank)\n return self.i2c.read_byte_data(self.address, register)",
"def read_register(self, device_id, address):\n register_array = self.read(device_id, address, 1) \n return register_array[0] << 24 | register_array[1] << 16 | register_array[2] << 8 | register_array[3]",
"def readmem16(self, address):\n return self._readmem(address, 'mem16')",
"def Read_uInt16(self,Address,Register):\n return struct.unpack('H',self.Transaction(chr(Address+1)+chr(Register),6)[1][2:4])[0]",
"def read_byte(fd, reg):\n b, = write_read_i2c(fd, bytes([reg]), 1)\n return b",
"def read(self, register): #good\r\n\t\tcurrentVal = self.i2c.readU8(register)\r\n\t\treturn currentVal",
"def _read_register(self, reg, fast=False):\n _buf = bytearray(3)\n _reg = bytearray([reg])\n with self.i2c_device as i2c:\n if fast:\n i2c.readfrom_into(self._i2c_addr, _buf)\n else:\n i2c.write_then_readinto(self._i2c_addr, _reg, _buf, in_end=2)\n return _buf[0] << 8 | _buf[1]",
"def read_u16(self) -> int:\n ...",
"def read_uint16(self):\n return self.read(BitTypes.UINT_16.value)",
"def read_u16(self) -> int:",
"def _read16(input):\n return struct.unpack(\"<H\", _read_exactly(input, 2))[0]",
"def read_U16BE(self, register):\n raise NotImplementedError",
"def Read_uInt16s(self,Address,Register,Number):\n pass",
"def read_S16(self, register, little_endian=True):\n raise NotImplementedError",
"def i16(c, o = 0):\n return struct.unpack(\"<H\", c[o:o+2])[0]",
"def read_S16BE(self, register):\n raise NotImplementedError",
"def swd_read16(self, offset):\n value = self._dll.JLINK_SWD_GetU16(offset)\n return ctypes.c_uint16(value).value",
"def i2c_read8(self, address, register):\n raise NotImplementedError",
"def read_i2c_word(self, register):\n\t\t# Read the data from the registers\n\t\thigh = self.bus.read_byte_data(self.address, register)\n\t\tlow = self.bus.read_byte_data(self.address, register + 1)\n\n\t\tvalue = (high << 8) + low\n\n\t\tif (value >= 0x8000):\n\t\t\treturn -((65535 - value) + 1)\n\t\telse:\n\t\t\treturn value",
"def read_i2c_block_data(self, i2c_address, register, length):\n return self.regs[register:register + length]",
"def toI2C(n):\n print(\"{} 0x{:02x} I2C Address \".format(n, n))\n print(\"==================\")\n wb = n << 1\n print(\"{} 0x{:02x} Address Write\".format(wb, wb))\n rb = (n << 1) | 0b000001\n print(\"{} 0x{:02x} Address Read\".format(rb, rb))",
"def read_S16LE(self, register):\n raise NotImplementedError",
"def read_U16LE(self, register):\n raise NotImplementedError",
"def read_i2c_word(self, register):\n # Read the data from the registers\n high = self.bus.read_byte_data(self.address, register)\n low = self.bus.read_byte_data(self.address, register + 1)\n\n value = (high << 8) + low\n\n if (value >= 0x8000):\n return -((65535 - value) + 1)\n else:\n return value",
"def read_U16(self, register, little_endian=True):\n raise NotImplementedError",
"def read_spi_data_channel(channel):\n\n adc = spi.xfer2([1, (8+channel) << 4, 0])\n return ((adc[1] & 3) << 8) + adc[2]",
"async def i2c_read_data(self, address):\n if address in self.i2c_map:\n map_entry = self.i2c_map.get(address)\n data = map_entry.get('value')\n return data\n else:\n return None",
"async def i2c_read_data(self, address):\n if address in self.i2c_map:\n map_entry = self.i2c_map.get(address)\n data = map_entry.get('value')\n return data\n else:\n return None",
"def read_mcp3008(pi, adc, channel):\n count, data = pi.spi_xfer(adc, [1, (8 + channel) << 4, 0])\n value = ((data[1] << 8) | data[2]) & 0x3FF\n return value"
] | [
"0.69229364",
"0.6757217",
"0.66875",
"0.66865295",
"0.6567214",
"0.6475658",
"0.64697033",
"0.6447846",
"0.639497",
"0.6385862",
"0.6369454",
"0.6283643",
"0.6235697",
"0.6234341",
"0.62243325",
"0.619493",
"0.61311156",
"0.6094481",
"0.60801494",
"0.6063816",
"0.6032868",
"0.6027926",
"0.6018641",
"0.60124815",
"0.60087126",
"0.598734",
"0.5960672",
"0.5943791",
"0.5943791",
"0.5892446"
] | 0.8427159 | 0 |
Configure a servo device in paltform. | def configure_servo(self, config):
raise NotImplementedError | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def configure_servo(self, board):\n self.servo = board.get_pin(f\"d:{self.pin}:p\")\n board.servo_config(\n pin = self.pin,\n min_pulse = 544,\n max_pulse = 2400,\n angle = 93\n )",
"async def configure_servo(self, number: str) -> \"ServoPlatformInterface\":\n raise NotImplementedError",
"def set_param_motor():\n servo.setSpeed(0, 0) # max = 255\n servo.setAccel(0, 0)\n servo.setSpeed(1, 150) # max = 255\n servo.setAccel(1, 150)",
"def servo_on(self):\n self.logger.info('Setting servo ON')\n self.electronics.move_servo(1)\n self.config['servo']['status'] = 1",
"def servo_config(self, pin, min_pulse=544, max_pulse=2400, angle=0):\n if pin > len(self.digital) or self.digital[pin].mode == UNAVAILABLE:\n raise IOError(\"Pin %s is not a valid servo pin\")\n data = itertools.chain([pin], to_two_bytes(min_pulse),\n to_two_bytes(max_pulse))\n self.send_sysex(SERVO_CONFIG, data)\n \n # set pin._mode to SERVO so that it sends analog messages\n # don't set pin.mode as that calls this method\n self.digital[pin]._mode = SERVO\n self.digital[pin].write(angle)",
"async def servo_config(self, pin, min_pulse=544, max_pulse=2400):\n #command = [pin, min_pulse & 0x7f, (min_pulse >> 7) & 0x7f, max_pulse & 0x7f,\n # (max_pulse >> 7) & 0x7f]\n\n self._digital_pins_directly[pin].ConfigServo(min_pulse, max_pulse)\n #await self._send_sysex(PrivateConstants.SERVO_CONFIG, command)",
"def SelectServo(self, servo):\n if servo == 'none':\n self._servo_port = None\n elif servo == 'any':\n self._servo_port = 0\n else:\n self._servo_port = int(servo)\n self._out.Notice('Servo port %s' % str(self._servo_port))",
"async def servo_config(self, pin, min_pulse=544, max_pulse=2400):\n command = [pin, min_pulse & 0x7f, (min_pulse >> 7) & 0x7f, max_pulse & 0x7f,\n (max_pulse >> 7) & 0x7f]\n\n await self._send_sysex(PrivateConstants.SERVO_CONFIG, command)",
"def servo_set_target(ch, pulse):\n\n # Pulse number is 4x pulse width (in microseconds)\n p_num = 4 * int(pulse)\n\n # Send command to servo controller\n servo_send_cmd(cmd_set_target, ch, p_num)",
"def pibooth_configure(cfg):",
"def _DutControl(self, args):\n if self._servo_port is None:\n raise IOError('No servo access available, please use --servo')\n if self._servo_port:\n args.extend(['-p', '%s' % self._servo_port])\n return self._tools.Run('dut-control', args)",
"def servo_force(self, *args, **kwargs) -> Any:\n pass",
"def init_servos():\n for i in range(0, 7):\n kit.servo[i].actuation_range = 180\n kit.servo[i].set_pulse_width_range(450, 2550)",
"def set_servo(name,servo,value):\n name = _lookup(name)\n servo_data = list(name) + [-1,-1,-1,-1]\n servo_data[servo + 1] = value\n mc.set('servo_values',servo_data)",
"def setservo(pidevice, axes, states=None, toignore=None, **kwargs):\n if not isdeviceavailable([GCS2Commands, GCS21Commands], pidevice):\n raise TypeError('Type %s of pidevice is not supported!' % type(pidevice).__name__)\n\n if not pidevice.HasSVO():\n return False\n if not axes:\n return True\n axes, states = getitemsvaluestuple(axes, states)\n if pidevice.HasRNP():\n axestorelax = [axis for axis, state in list(getservo(pidevice, axes).items()) if not state]\n if axestorelax:\n pidevice.RNP(axestorelax, [0.0] * len(axestorelax))\n waitonready(pidevice, **kwargs)\n eaxaxes = [axes[i] for i in range(len(axes)) if states[i]]\n enableaxes(pidevice, axes=eaxaxes, **kwargs)\n success = True\n toignore = [] if toignore is None else toignore\n toignore = [toignore] if not isinstance(toignore, list) else toignore\n toignore += [gcserror.E5_PI_CNTR_MOVE_WITHOUT_REF_OR_NO_SERVO, gcserror.E23_PI_CNTR_ILLEGAL_AXIS]\n for i, axis in enumerate(axes):\n try:\n pidevice.SVO(axis, states[i])\n except GCSError as exc: # no GCSRaise() because we want to log a warning\n if exc in toignore:\n debug('could not set servo for axis %r to %s: %s', axis, states[i], exc)\n success = False\n else:\n raise\n waitonready(pidevice, **kwargs)\n return success",
"def servo_make_default(self):\n self.servo_config.save_as_default_config()",
"def setup_motor(self,pin_num):\n pi.set_servo_pulsewidth(pin_num, 2000)\n sleep(2)\n pi.set_servo_pulsewidth(pin_num, 500 )\n sleep(2)",
"def configure_vdc(self, rng, res, unit = 'V'):\n self.write_to_serial(':conf:volt:dc ' + str(rng) + ',' + str(res))# + unit)",
"def configure(self):\n\t\tself.outChannel = CAClient(self.pvstring + \".AOUT\")\n\t\tself.outChannel.configure()\n\t\tself.inChannel = CAClient(self.pvstring + \".TINP\")\n\t\tself.inChannel.configure()",
"async def configure_stepper(self, number: str, config: dict) -> \"StepperPlatformInterface\":\n raise NotImplementedError",
"def resetservo(self):\n debug('ControllerStartup.resetservo()')\n if self.servostates is not None:\n setservo(self.pidevice, self.servostates)\n elif self._databuf['servobuf']:\n setservo(self.pidevice, self._databuf['servobuf'])",
"def set_servo(self, servo: int, position: Optional[ServoPosition]) -> None:\n if servo < 0 or servo >= self._num_servos:\n raise RuntimeError(\"That servo does not exist.\")",
"def configure(self):\n\n # instantiate Serial\n self.serial = serial.Serial()\n\n # set port_path, e.g. '/dev/ttyUSBx' or 'COMx'\n self.serial.port = self.port.device\n\n # set baudrate\n self.serial.baudrate = 115200",
"def _on_config_changed(self, _):\n self._configure_pod()",
"def _configure_pod(self):\n logger.debug(\"Configuring Pod\")\n\n if not self.unit.is_leader():\n self.unit.status = ActiveStatus()\n return\n\n self.unit.status = MaintenanceStatus(\"Setting pod spec.\")\n pod_spec = self._build_pod_spec()\n\n self.model.pod.set_spec(pod_spec)\n self.unit.status = ActiveStatus()",
"def __init__(self, servo_gpio, pi=None, pulse_left_ns=2500, pulse_right_ns=1000, pulse_centre_ns=None):\n\n self.gpio = servo_gpio\n\n if pi is None:\n self.pi = pi = pigpio.pi()\n else:\n self.pi = pi\n\n self.pulse_left_ns = pulse_left_ns\n self.pulse_right_ns = pulse_right_ns\n\n if pulse_centre_ns is None:\n self.pulse_centre_ns = ((pulse_left_ns - pulse_right_ns) // 2) + pulse_right_ns",
"def servo_off(self):\n self.logger.info('Setting servo OFF')\n self.electronics.move_servo(0)\n self.config['servo']['status'] = 0",
"def setup(self):\n GPIO.setwarnings(False)\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(self.Motor_A_EN, GPIO.OUT)\n GPIO.setup(self.Motor_B_EN, GPIO.OUT)\n GPIO.setup(self.Motor_A_Pin1, GPIO.OUT)\n GPIO.setup(self.Motor_A_Pin2, GPIO.OUT)\n GPIO.setup(self.Motor_B_Pin1, GPIO.OUT)\n GPIO.setup(self.Motor_B_Pin2, GPIO.OUT)\n self.motorStop() # Avoids automatic motor rotation after initialization\n try: # Try is used here to avoid errors due to repeated setting of PWM\n self.pwm_A = GPIO.PWM(self.Motor_A_EN, 1000)\n self.pwm_B = GPIO.PWM(self.Motor_B_EN, 1000)\n except:\n pass",
"def app_principal_led():\n \"\"\"\n import serial\n ser = serial.Serial(0) # open first serial port\n print ser.portstr # check which port was really used\n ser.write(\"hello\") # write a string\n ser.close() # close port \n \"\"\"\n\n\n start = mpa.ModuloPyArduino()\n p, v = start.config_arduino()\n con = start.set_conection(p, v)\n\n\n print \"\\n Status of conection: \", con\n if con != 0:\n start.serial_loop_app(con, 1)\n else:\n pass\n\n con.close()",
"def servo(self, position: int):\n position = int(position) # bytes only takes ints\n\n command = bytearray([])\n command += b\"e\"\n command += bytes([position])\n command += b\"\\n\"\n\n # newline indicates end of command to arduino\n self.send(command)"
] | [
"0.77946854",
"0.7138827",
"0.6989722",
"0.6734098",
"0.6667007",
"0.6605694",
"0.65124875",
"0.63172555",
"0.631123",
"0.6249312",
"0.62050116",
"0.61567163",
"0.61037195",
"0.6098429",
"0.60313237",
"0.6020388",
"0.6019371",
"0.599168",
"0.5991535",
"0.5964606",
"0.5962235",
"0.58867043",
"0.5865651",
"0.58654606",
"0.58139557",
"0.57678074",
"0.5725424",
"0.56984216",
"0.56346667",
"0.5621525"
] | 0.72971183 | 1 |
Subclass this method in a platform module to configure a matrix light. This method should return a reference to the matrix lights's platform interface object which will be called to access the hardware. | def configure_matrixlight(self, config):
raise NotImplementedError | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def configure_light(self, number: str, subtype: str, config: LightConfig,\n platform_settings: dict) -> \"LightPlatformInterface\":\n raise NotImplementedError",
"def port_maker(self, platform):\n raise NotImplementedError()",
"def _init_hardware(self):\n return",
"def configure(self):\n super(ProjectionMatrix, self).configure()\n if self.sensors is None:\n self.sensors = self.skin_air.sensors\n\n if isinstance(self.sensors, sensors_module.SensorsEEG):\n self.skin_air.sensors = self.sensors\n self.skin_air.sensors_to_surface, self.skin_air.sensor_locations = self.sensors.sensors_to_surface(self.skin_air)\n\n # Create OpenMEEG objects from TVB objects.\n self.om_head = self.create_om_head()\n self.om_sources = self.create_om_sources()\n self.om_sensors = self.create_om_sensors()\n\n # Calculate based on type of sources\n if isinstance(self.sources, surfaces_module.Cortex):\n self.om_source_matrix = self.surface_source() #NOTE: ~1 hr\n elif isinstance(self.sources, connectivity_module.Connectivity):\n self.om_source_matrix = self.dipole_source()\n\n # Calculate based on type of sensors\n if isinstance(self.sensors, sensors_module.SensorsEEG):\n self.om_head2sensor = self.head2eeg()\n elif isinstance(self.sensors, sensors_module.SensorsMEG):\n self.om_head2sensor = self.head2meg()\n if isinstance(self.sources, surfaces_module.Cortex):\n self.om_source2sensor = self.surf2meg()\n elif isinstance(self.sources, connectivity_module.Connectivity):\n self.om_source2sensor = self.dip2meg()\n\n #NOTE: ~1 hr\n self.om_inverse_head = self.inverse_head(inv_head_mat_file = \"hminv_uid\")",
"def platform_init(self):\n if isinstance(self.imu, MockImuController) or isinstance(self.pwm_controller, MockPWMController):\n print(\"Mock components detected, creating mock antenna controller\")\n platform = MockPlatformController(self.azimuth_servo, self.elevation_servo, self.imu)\n else:\n print(\"Initializing PIDAntennaController class\")\n platform = PIDPlatformController(\n self.azimuth_servo,\n self.elevation_servo,\n self.imu,\n pid_output_limits=self.pid_config.get(\"output_limits\"),\n pid_frequency=self.pid_config.get(\"period\"),\n p=self.pid_config.get(\"p\"),\n i=self.pid_config.get(\"i\"),\n d=self.pid_config.get(\"d\")\n )\n \n self.platform = platform\n\n if not isinstance(self.gps, MockGPSController):\n self.gps_update_loop = GPSLocationController(self.gps)\n self.gps_update_loop.start()\n else:\n self.gps_update_loop = None\n \n return platform",
"def _connect_to_hardware(self):\n if False: # !!!TEMP:need to validate config...\n if len(self.config['ports']) > 1:\n self.log.fatal(\"only one slave com port is supported\")\n if len(self.config['ports']) == 0:\n self.log.warning(\"no communication port setted!\")\n return\n port = self.config['ports'][0]\n self.communicator = RaspSerialCommunicator(\n platform=self, port=port,\n baud=self.config['baud'])\n self.communicator = RaspSerialCommunicator(\n platform=self, port='/dev/ttyAMA0',\n baud=115200)",
"def setPlatform(self):\n\t\treturn None",
"def __init__( self, dev, port ):\n super( Grove_Light_Sensor, self ).__init__( dev, port )",
"def setup_platform(\n hass: HomeAssistant,\n config: ConfigType,\n add_entities: AddEntitiesCallback,\n discovery_info: DiscoveryInfoType | None = None,\n) -> None:\n lights = []\n for channel, device_config in config[CONF_DEVICES].items():\n device = {}\n device[\"name\"] = device_config[CONF_NAME]\n device[\"dimmable\"] = device_config[\"dimmable\"]\n device[\"channel\"] = channel\n device[\"driver\"] = config[CONF_DRIVER]\n device[\"host\"] = config[CONF_HOST]\n device[\"port\"] = config[CONF_PORT]\n lights.append(FutureNowLight(device))\n\n add_entities(lights, True)",
"def __init__(self, machine):\n super().__init__(machine)\n\n # Set default platform features. Each platform interface can change\n # these to notify the framework of the specific features it supports.\n self.features['has_drivers'] = True\n self.features['max_pulse'] = 255",
"def __init__(self, machine):\n super().__init__(machine)\n\n # Set default platform features. Each platform interface can change\n # these to notify the framework of the specific features it supports.\n self.features['has_drivers'] = True\n self.features['max_pulse'] = 255",
"def __init__(self, envirophat, use_leds):\n self.envirophat = envirophat\n self.use_leds = use_leds\n # sensors readings\n self.light = None\n self.light_red = None\n self.light_green = None\n self.light_blue = None\n self.accelerometer_x = None\n self.accelerometer_y = None\n self.accelerometer_z = None\n self.magnetometer_x = None\n self.magnetometer_y = None\n self.magnetometer_z = None\n self.temperature = None\n self.pressure = None\n self.voltage_0 = None\n self.voltage_1 = None\n self.voltage_2 = None\n self.voltage_3 = None",
"def Initialize(self):\n return _gmat_py.Hardware_Initialize(self)",
"def configure_switch(self, number: str, config: SwitchConfig, platform_config: dict) -> \"SwitchPlatformInterface\":\n raise NotImplementedError",
"def __init__(self):\n GPIO.setwarnings(False)\n GPIO.cleanup() # Reset the high and low levels of the GPIO port\n #The following code defines the GPIO used to control the L298N chip. This definition is different for different Raspberry Pi driver boards.\n self.Motor_A_EN = 17\n self.Motor_B_EN = 4\n self.Motor_A_Pin1 = 27\n self.Motor_A_Pin2 = 18\n self.Motor_B_Pin1 = 21\n self.Motor_B_Pin2 = 26\n self.setup()",
"def configure(self):\n\n self.platform.configure()",
"def _initialize_hardware(self):\n # Import\n try:\n import board\n import busio\n import adafruit_vl6180x\n except Exception as ex:\n logging.error(\n '\\n *** ERROR importing Adafruit libraries: {}'.format(\n ex,\n ),\n )\n\n # Things failed, so we must be running locally, not on a widget;\n # don't bother hooking up the VL6180X\n return\n\n # Initialize I2C and VL6180X\n try:\n i2c = busio.I2C(board.SCL, board.SDA)\n self._sensor = adafruit_vl6180x.VL6180X(i2c)\n except Exception as ex:\n logging.error(\n '\\n *** ERROR initializing I2C/LSM303: {}'.format(ex),\n )\n\n self._initialize_id_led()",
"def initialize(self, platform=None):\n\n if self._simulation is None:\n if type(platform) is str:\n self._simulation = openmm.app.Simulation(\n topology=self.topology.mdtraj.to_openmm(),\n system=self.system,\n integrator=self.integrator,\n platform=openmm.Platform.getPlatformByName(platform),\n platformProperties=self.openmm_properties\n )\n elif platform is None:\n self._simulation = openmm.app.Simulation(\n topology=self.topology.mdtraj.to_openmm(),\n system=self.system,\n integrator=self.integrator,\n platformProperties=self.openmm_properties\n )\n else:\n self._simulation = openmm.app.Simulation(\n topology=self.topology.mdtraj.to_openmm(),\n system=self.system,\n integrator=self.integrator,\n platform=platform,\n platformProperties=self.openmm_properties\n )\n\n logger.info(\n 'Initialized OpenMM engine using platform `%s`' %\n self.platform)",
"def getPlatform(self):\n\t\treturn None",
"def __init__(self):\n GPIO.setmode(GPIO.BOARD)\n for light in self.all:\n GPIO.setup(light, GPIO.OUT)",
"def test_light_sensor(self):\n with patch.dict(TYPES, {'LightSensor': self.mock_type}):\n state = State('sensor.light', '900',\n {ATTR_DEVICE_CLASS: 'illuminance'})\n get_accessory(None, state, 2, {})",
"def setup_platform(hass, config, add_devices_callback, discovery_info=None):\n host = config.get(CONF_HOST)\n name = config.get(CONF_NAME)\n token = config.get('token')\n\n add_devices_callback([MiroboSwitch(name, host, token)])",
"def __init__(self, parent, endpoint):\n Wemo_Endpoint.__init__(self, parent, endpoint)\n self.device_type = self._Parent._DeviceTypes.get('wemo_light')\n self.FEATURES.update({\n FEATURE_BRIGHTNESS: True,\n FEATURE_PERCENT: True,\n FEATURE_NUMBER_OF_STEPS: 100\n })",
"def _initialize_hardware(self):\n # Import\n try:\n from gpiozero import MCP3008\n except Exception as ex:\n logging.error('\\n *** ERROR importing gpiozero: {}'.format(ex))\n\n # Things failed, must be running locally, not on a widget, so don't\n # bother initializing the MCP3008\n return\n\n # Initialize the MCP3008\n try:\n self._sensor = MCP3008(channel=0)\n except Exception as ex:\n logging.error('\\n *** ERROR initializing MCP3008: {}'.format(ex))\n return\n\n # Start force loop thread\n threading.Thread(target=self._force_loop, daemon=True).start()",
"def define_material(self):\n\n # Check which class should be called.\n const_eqn = self.config['material']['const_eqn']\n if isclass(const_eqn):\n mat_class = self.config['material']['const_eqn']\n elif const_eqn == 'lin_elastic':\n mat_class = materials.solid_materials.LinearIsoMaterial\n elif const_eqn == 'neo_hookean':\n mat_class = materials.solid_materials.NeoHookeMaterial\n elif const_eqn == 'demiray':\n mat_class = materials.solid_materials.DemirayMaterial\n elif const_eqn == 'fung':\n mat_class = materials.solid_materials.FungMaterial\n elif const_eqn == 'guccione':\n mat_class = materials.solid_materials.GuccioneMaterial\n elif const_eqn == 'holzapfel_ogden':\n mat_class = materials.solid_materials.HolzapfelOgdenMaterial\n elif const_eqn == 'newtonian' or const_eqn == 'stokes':\n mat_class = materials.fluids.NewtonianFluid\n else:\n raise NotImplementedError(\"Shouldn't be in here...\")\n\n # Create an instance of the material class and store\n # as member data.\n try:\n inverse = self.config['formulation']['inverse']\n except KeyError:\n inverse = False\n self._material = mat_class(inverse=inverse,\n **self.config['material'])\n\n return None",
"def __init__(self, device: SensemeDevice) -> None:\n super().__init__(device, f\"{device.name} Light\")\n self._attr_supported_color_modes = {ColorMode.COLOR_TEMP}\n self._attr_color_mode = ColorMode.COLOR_TEMP\n self._attr_min_mireds = color_temperature_kelvin_to_mired(\n device.light_color_temp_max\n )\n self._attr_max_mireds = color_temperature_kelvin_to_mired(\n device.light_color_temp_min\n )",
"def __init__(self, machine):\n self.machine = machine\n self.features = {}\n self.log = None\n self.debug = False\n\n # Set default platform features. Each platform interface can change\n # these to notify the framework of the specific features it supports.\n self.features['has_dmd'] = False\n self.features['has_rgb_dmd'] = False\n self.features['has_accelerometers'] = False\n self.features['has_i2c'] = False\n self.features['has_servos'] = False\n self.features['has_matrix_lights'] = False\n self.features['has_gis'] = False\n self.features['has_leds'] = False\n self.features['has_switches'] = False\n self.features['has_drivers'] = False\n self.features['tickless'] = False",
"def __init__(self, host):\n self._io = RemoteIO(host)\n self._host = host\n\n self._left_wheel = Wheel(id='b', side='left', remote_io=self._io)\n self._right_wheel = Wheel(id='a', side='right', remote_io=self._io, inverse=True)\n\n self._cam = Camera(host)\n\n self._left_led = LED(side='left', remote_io=self._io)\n self._front_led = LED(side='center', remote_io=self._io)\n self._right_led = LED(side='right', remote_io=self._io)",
"def configure_dmd(self) -> \"DmdPlatformInterface\":\n raise NotImplementedError",
"def set_light_mode(self, is_lid):\n raise NotImplementedError()"
] | [
"0.6384125",
"0.60540134",
"0.5839954",
"0.5778105",
"0.5726734",
"0.5701559",
"0.5663591",
"0.56370527",
"0.5621239",
"0.56157774",
"0.56157774",
"0.55837554",
"0.5552895",
"0.55357146",
"0.55314416",
"0.55262935",
"0.5517123",
"0.5507401",
"0.5503909",
"0.5472796",
"0.5434975",
"0.5385365",
"0.53803635",
"0.5378067",
"0.53734154",
"0.53694874",
"0.535593",
"0.5343105",
"0.53325826",
"0.5324488"
] | 0.67870253 | 0 |
Subclass this method in a platform module to configure a switch. This method should return a reference to the switch's platform interface object which will be called to access the hardware. | def configure_switch(self, config):
raise NotImplementedError | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def configure_switch(self, number: str, config: SwitchConfig, platform_config: dict) -> \"SwitchPlatformInterface\":\n raise NotImplementedError",
"def _init_hardware(self):\n return",
"def setup_platform(hass, config, add_devices_callback, discovery_info=None):\n host = config.get(CONF_HOST)\n name = config.get(CONF_NAME)\n token = config.get('token')\n\n add_devices_callback([MiroboSwitch(name, host, token)])",
"def port_maker(self, platform):\n raise NotImplementedError()",
"def configure_hardware_sound_system(self) -> \"HardwareSoundPlatformInterface\":\n raise NotImplementedError",
"def __init__(self,\n device_name,\n create_device_func,\n props,\n hub_name_prop,\n primary_port_prop,\n secondary_port_prop,\n ethernet_switch_prop,\n ethernet_port_prop,\n get_switchboard_if_initialized,\n power_and_data_share_cable=False,\n pre_off_func=None):\n super().__init__(device_name=device_name)\n\n self._create_device_func = create_device_func\n self._hub_name_prop = hub_name_prop\n self._primary_port_prop = primary_port_prop\n self._secondary_port_prop = secondary_port_prop\n self._props = props\n self._ethernet_switch = None\n\n # Set the properties\n self._get_switchboard_if_initialized = get_switchboard_if_initialized\n self._power_and_data_share_cable = power_and_data_share_cable\n self._pre_off_func = pre_off_func\n self._ethernet_switch_prop = ethernet_switch_prop\n self._ethernet_port_prop = ethernet_port_prop",
"def configure_light(self, number: str, subtype: str, config: LightConfig,\n platform_settings: dict) -> \"LightPlatformInterface\":\n raise NotImplementedError",
"def __init__(self, machine):\n super().__init__(machine)\n\n # Set default platform features. Each platform interface can change\n # these to notify the framework of the specific features it supports.\n self.features['has_drivers'] = True\n self.features['max_pulse'] = 255",
"def __init__(self, machine):\n super().__init__(machine)\n\n # Set default platform features. Each platform interface can change\n # these to notify the framework of the specific features it supports.\n self.features['has_drivers'] = True\n self.features['max_pulse'] = 255",
"def setPlatform(self):\n\t\treturn None",
"def configure_driver(self, config: DriverConfig, number: str, platform_settings: dict) -> \"DriverPlatformInterface\":\n raise NotImplementedError",
"def setup_platform(hass, config, add_devices_callback, discovery_info=None):\n add_devices_callback([\n HE853Switch('OviSwitch', STATE_ON),\n HE853Switch('AC', STATE_OFF)\n ])",
"def create_switch():\n connection = MagicMock()\n connection.address = 'addr'\n connection.port = 'port'\n connection.protocol.version = 0x04\n switch = Switch('00:00:00:00:00:00:00:01', connection)\n switch._enabled = True\n return switch",
"def configure(self):\n\n self.platform.configure()",
"def _connect_to_hardware(self):\n if False: # !!!TEMP:need to validate config...\n if len(self.config['ports']) > 1:\n self.log.fatal(\"only one slave com port is supported\")\n if len(self.config['ports']) == 0:\n self.log.warning(\"no communication port setted!\")\n return\n port = self.config['ports'][0]\n self.communicator = RaspSerialCommunicator(\n platform=self, port=port,\n baud=self.config['baud'])\n self.communicator = RaspSerialCommunicator(\n platform=self, port='/dev/ttyAMA0',\n baud=115200)",
"def __init__(\n self,\n netatmo_device: NetatmoDevice,\n ) -> None:\n super().__init__(netatmo_device.data_handler)\n\n self._switch = cast(NaModules.Switch, netatmo_device.device)\n\n self._id = self._switch.entity_id\n self._attr_name = self._device_name = self._switch.name\n self._model = self._switch.device_type\n self._config_url = CONF_URL_CONTROL\n\n self._home_id = self._switch.home.entity_id\n\n self._signal_name = f\"{HOME}-{self._home_id}\"\n self._publishers.extend(\n [\n {\n \"name\": HOME,\n \"home_id\": self._home_id,\n SIGNAL_NAME: self._signal_name,\n },\n ]\n )\n self._attr_unique_id = f\"{self._id}-{self._model}\"\n self._attr_is_on = self._switch.on",
"def __init__(self, mb_info, switch_config):\n self.microblaze = Arduino(mb_info, ARDUINO_MAILBOX_PROGRAM)\n self.iop_switch_config = switch_config",
"def setup_platform(hass, config, add_devices, discovery_info=None):\n name = config.get(CONF_NAME)\n mac = config.get(CONF_MAC)\n pin = config.get(CONF_PIN)\n\n add_devices([ProgtimeSwitch(mac, pin, name)])",
"def platform_init(self):\n if isinstance(self.imu, MockImuController) or isinstance(self.pwm_controller, MockPWMController):\n print(\"Mock components detected, creating mock antenna controller\")\n platform = MockPlatformController(self.azimuth_servo, self.elevation_servo, self.imu)\n else:\n print(\"Initializing PIDAntennaController class\")\n platform = PIDPlatformController(\n self.azimuth_servo,\n self.elevation_servo,\n self.imu,\n pid_output_limits=self.pid_config.get(\"output_limits\"),\n pid_frequency=self.pid_config.get(\"period\"),\n p=self.pid_config.get(\"p\"),\n i=self.pid_config.get(\"i\"),\n d=self.pid_config.get(\"d\")\n )\n \n self.platform = platform\n\n if not isinstance(self.gps, MockGPSController):\n self.gps_update_loop = GPSLocationController(self.gps)\n self.gps_update_loop.start()\n else:\n self.gps_update_loop = None\n \n return platform",
"def getPlatform(self):\n\t\treturn None",
"def __init__(self, parent, endpoint):\n Wemo_Endpoint.__init__(self, parent, endpoint)\n self.device_type = self._Parent._DeviceTypes.get('wemo_switch')\n self.FEATURES.update({\n FEATURE_BRIGHTNESS: False,\n FEATURE_PERCENT: False,\n FEATURE_NUMBER_OF_STEPS: False\n })",
"def __init__(self):\n self.hw = dev_hwinfo.device()\n self.ethKey=\"Ethernet\"\n self.ethAllInterfaceName=[]\n dir_path = os.path.dirname(os.path.realpath(__file__))\n self.myDefine = init_define.main()\n self.mPlatform=self.hw.getPlatform()",
"def setup_platform(hass, config, add_devices, discovery_info=None) -> None:\n friendly_name = config.get(CONF_FRIENDLY_NAME)\n mac_addr = config.get(CONF_MAC)\n add_devices([Switchmate(mac_addr, friendly_name)], True)",
"def setup_platform(hass, config, add_devices, discovery_info=None):\n switches = []\n for coil in config.get(\"coils\"):\n switches.append(ModbusCoilSwitch(\n coil.get(CONF_NAME),\n coil.get(CONF_SLAVE),\n coil.get(CONF_COIL)))\n add_devices(switches)",
"def get_switch(self, conf, dpid):\n\t\tpass",
"def setup(hass: HomeAssistant, base_config: ConfigType) -> bool: # noqa: C901\n\n hass.data[DOMAIN] = {}\n\n # Parse configuration into a dict of device name to physical address\n # represented as a list of four elements.\n device_aliases = {}\n devices = base_config[DOMAIN].get(CONF_DEVICES, {})\n _LOGGER.debug(\"Parsing config %s\", devices)\n device_aliases.update(parse_mapping(devices))\n _LOGGER.debug(\"Parsed devices: %s\", device_aliases)\n\n platform = base_config[DOMAIN].get(CONF_PLATFORM, SWITCH)\n\n loop = (\n # Create own thread if more than 1 CPU\n hass.loop\n if multiprocessing.cpu_count() < 2\n else None\n )\n host = base_config[DOMAIN].get(CONF_HOST)\n display_name = base_config[DOMAIN].get(CONF_DISPLAY_NAME, DEFAULT_DISPLAY_NAME)\n if host:\n adapter = TcpAdapter(host, name=display_name, activate_source=False)\n else:\n adapter = CecAdapter(name=display_name[:12], activate_source=False)\n hdmi_network = HDMINetwork(adapter, loop=loop)\n\n def _adapter_watchdog(now=None):\n _LOGGER.debug(\"Reached _adapter_watchdog\")\n event.call_later(hass, WATCHDOG_INTERVAL, _adapter_watchdog_job)\n if not adapter.initialized:\n _LOGGER.info(\"Adapter not initialized; Trying to restart\")\n hass.bus.fire(EVENT_HDMI_CEC_UNAVAILABLE)\n adapter.init()\n\n _adapter_watchdog_job = HassJob(_adapter_watchdog, cancel_on_shutdown=True)\n\n @callback\n def _async_initialized_callback(*_: Any):\n \"\"\"Add watchdog on initialization.\"\"\"\n return event.async_call_later(hass, WATCHDOG_INTERVAL, _adapter_watchdog_job)\n\n hdmi_network.set_initialized_callback(_async_initialized_callback)\n\n def _volume(call: ServiceCall) -> None:\n \"\"\"Increase/decrease volume and mute/unmute system.\"\"\"\n mute_key_mapping = {\n ATTR_TOGGLE: KEY_MUTE_TOGGLE,\n ATTR_ON: KEY_MUTE_ON,\n ATTR_OFF: KEY_MUTE_OFF,\n }\n for cmd, att in call.data.items():\n if cmd == CMD_UP:\n _process_volume(KEY_VOLUME_UP, att)\n elif cmd == CMD_DOWN:\n _process_volume(KEY_VOLUME_DOWN, att)\n elif cmd == CMD_MUTE:\n hdmi_network.send_command(\n KeyPressCommand(mute_key_mapping[att], dst=ADDR_AUDIOSYSTEM)\n )\n hdmi_network.send_command(KeyReleaseCommand(dst=ADDR_AUDIOSYSTEM))\n _LOGGER.info(\"Audio muted\")\n else:\n _LOGGER.warning(\"Unknown command %s\", cmd)\n\n def _process_volume(cmd, att):\n if isinstance(att, (str,)):\n att = att.strip()\n if att == CMD_PRESS:\n hdmi_network.send_command(KeyPressCommand(cmd, dst=ADDR_AUDIOSYSTEM))\n elif att == CMD_RELEASE:\n hdmi_network.send_command(KeyReleaseCommand(dst=ADDR_AUDIOSYSTEM))\n else:\n att = 1 if att == \"\" else int(att)\n for _ in range(0, att):\n hdmi_network.send_command(KeyPressCommand(cmd, dst=ADDR_AUDIOSYSTEM))\n hdmi_network.send_command(KeyReleaseCommand(dst=ADDR_AUDIOSYSTEM))\n\n def _tx(call: ServiceCall) -> None:\n \"\"\"Send CEC command.\"\"\"\n data = call.data\n if ATTR_RAW in data:\n command = CecCommand(data[ATTR_RAW])\n else:\n src = data.get(ATTR_SRC, ADDR_UNREGISTERED)\n dst = data.get(ATTR_DST, ADDR_BROADCAST)\n if ATTR_CMD in data:\n cmd = data[ATTR_CMD]\n else:\n _LOGGER.error(\"Attribute 'cmd' is missing\")\n return\n if ATTR_ATT in data:\n if isinstance(data[ATTR_ATT], (list,)):\n att = data[ATTR_ATT]\n else:\n att = reduce(lambda x, y: f\"{x}:{y:x}\", data[ATTR_ATT])\n else:\n att = \"\"\n command = CecCommand(cmd, dst, src, att)\n hdmi_network.send_command(command)\n\n def _standby(call: ServiceCall) -> None:\n hdmi_network.standby()\n\n def _power_on(call: ServiceCall) -> None:\n hdmi_network.power_on()\n\n def _select_device(call: ServiceCall) -> None:\n \"\"\"Select the active device.\"\"\"\n if not (addr := call.data[ATTR_DEVICE]):\n _LOGGER.error(\"Device not found: %s\", call.data[ATTR_DEVICE])\n return\n if addr in device_aliases:\n addr = device_aliases[addr]\n else:\n entity = hass.states.get(addr)\n _LOGGER.debug(\"Selecting entity %s\", entity)\n if entity is not None:\n addr = entity.attributes[\"physical_address\"]\n _LOGGER.debug(\"Address acquired: %s\", addr)\n if addr is None:\n _LOGGER.error(\n \"Device %s has not physical address\", call.data[ATTR_DEVICE]\n )\n return\n if not isinstance(addr, (PhysicalAddress,)):\n addr = PhysicalAddress(addr)\n hdmi_network.active_source(addr)\n _LOGGER.info(\"Selected %s (%s)\", call.data[ATTR_DEVICE], addr)\n\n def _update(call: ServiceCall) -> None:\n \"\"\"Update if device update is needed.\n\n Called by service, requests CEC network to update data.\n \"\"\"\n hdmi_network.scan()\n\n def _new_device(device):\n \"\"\"Handle new devices which are detected by HDMI network.\"\"\"\n key = f\"{DOMAIN}.{device.name}\"\n hass.data[DOMAIN][key] = device\n ent_platform = base_config[DOMAIN][CONF_TYPES].get(key, platform)\n discovery.load_platform(\n hass,\n ent_platform,\n DOMAIN,\n discovered={ATTR_NEW: [key]},\n hass_config=base_config,\n )\n\n def _shutdown(call):\n hdmi_network.stop()\n\n def _start_cec(callback_event):\n \"\"\"Register services and start HDMI network to watch for devices.\"\"\"\n hass.services.register(\n DOMAIN, SERVICE_SEND_COMMAND, _tx, SERVICE_SEND_COMMAND_SCHEMA\n )\n hass.services.register(\n DOMAIN, SERVICE_VOLUME, _volume, schema=SERVICE_VOLUME_SCHEMA\n )\n hass.services.register(\n DOMAIN,\n SERVICE_UPDATE_DEVICES,\n _update,\n schema=SERVICE_UPDATE_DEVICES_SCHEMA,\n )\n hass.services.register(DOMAIN, SERVICE_POWER_ON, _power_on)\n hass.services.register(DOMAIN, SERVICE_STANDBY, _standby)\n hass.services.register(DOMAIN, SERVICE_SELECT_DEVICE, _select_device)\n\n hdmi_network.set_new_device_callback(_new_device)\n hdmi_network.start()\n\n hass.bus.listen_once(EVENT_HOMEASSISTANT_START, _start_cec)\n hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, _shutdown)\n return True",
"def connect_to_switches(self):\n for p4switch in self.topo.get_p4switches():\n thrift_port = self.topo.get_thrift_port(p4switch)\n self.controllers[p4switch] = SimpleSwitchThriftAPI(thrift_port)",
"def __init__(self):\n GPIO.setwarnings(False)\n GPIO.cleanup() # Reset the high and low levels of the GPIO port\n #The following code defines the GPIO used to control the L298N chip. This definition is different for different Raspberry Pi driver boards.\n self.Motor_A_EN = 17\n self.Motor_B_EN = 4\n self.Motor_A_Pin1 = 27\n self.Motor_A_Pin2 = 18\n self.Motor_B_Pin1 = 21\n self.Motor_B_Pin2 = 26\n self.setup()",
"def __init__(self, hdw=['Soundcard'], devicename='dev1'):\n self.debugFlag = False\n self.task = None # NI Task\n self.required_hardware = hdw # Require specific hardware \n self.hardware = [] # list of hardware actually found on this system\n self.find_hardware(device_info={'devicename': devicename}) # population the self.hardware list",
"def setup_platform(hass, config, add_devices, discovery_info=None):\n devices = config.get(CONF_SWITCHES, {})\n cmdrgbwlight = []\n\n for object_id, device_config in devices.items():\n value_template = device_config.get(CONF_STATE_VALUE_TEMPLATE)\n\n if value_template is not None:\n value_template.hass = hass\n\n cmdrgbwlight.append(\n CommandSwitch(\n hass,\n object_id,\n device_config.get(CONF_NAME),\n device_config.get(CONF_COMMAND_ON),\n device_config.get(CONF_COMMAND_OFF),\n device_config.get(CONF_COMMAND_STATE),\n device.config.get(CONF_BRIGHTNESS_STATE),\n device.config.get(CONF_BRIGHTNESS_COMMAND),\n device.config.get(CONF_BRIGHTNESS_VALUE_TEMPLATE),\n device.config.get(CONF_RGB_STATE),\n device.config.get(CONF_RGB_COMMAND),\n device.config.get(CONF_RGB_VALUE_TEMPLATE),\n device.config.get(CONF_FRIENDLY_NAME, object_id),\n device.config.get(CONF_BRIGHTNESS_SCALE),\n value_template\n )\n )\n\n if not cmdrgbwlight:\n _LOGGER.error(\"No switches added\")\n return False\n\n add_devices(cmdrgbwlight)"
] | [
"0.75633675",
"0.6523127",
"0.65048945",
"0.6400696",
"0.6324981",
"0.628187",
"0.625404",
"0.62352747",
"0.62352747",
"0.6210974",
"0.62051237",
"0.6186631",
"0.61536473",
"0.61463755",
"0.60833514",
"0.60233676",
"0.6022216",
"0.60210854",
"0.5984712",
"0.59599894",
"0.59232455",
"0.59154606",
"0.5894098",
"0.5887083",
"0.5875085",
"0.58373123",
"0.58311623",
"0.5774694",
"0.5765068",
"0.573655"
] | 0.65371144 | 1 |
Return config section for additional switch config items. | def get_switch_config_section(cls):
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_switch_config_section(cls) -> Optional[str]:\n return None",
"def get_section(self,name):\n if self.__config.has_section(name):\n data={}\n for opt,val in self.__config.items(name):\n data[opt]=val\n return data\n else:\n raise Exception(_('EVOGTK: Section \"%s\" does not exist in this preferences instance') % name)",
"def configure(self, section):",
"def get_stepper_config_section(cls) -> Optional[str]:\n return None",
"def get_config(self):\n config = {\n 'multichannel': self._multichannel,\n 'complex_part': self._complex_part\n }\n base_config = super().get_config()\n return {**base_config, **config}",
"def get_config(self):\n return {\"name\": self.name, \"tunable\": self.tunable}",
"def get_coil_config_section(cls) -> Optional[str]:\n return None",
"def get_config_main_sections(self):\n self.sections_in_config = self.config_handle.sections()",
"def config(self):\n return \"\\n\".join([ c.config(True) for p, c in self.configs_ ])",
"def getSection(self, section, item):\n if self.config.has_section(section):\n if self.config.has_option(section, item):\n return self.config.get(section, item)\n return None",
"def get_config_descr(self, name):\n return self.configs[name][1]",
"def get_config_section(self, title_startswith, return_all=True):\n for section in self._config_sections:\n if section[0].startswith(title_startswith):\n if return_all:\n yield section\n else:\n return section",
"def get_switch_overwrite_section(cls):\n return None",
"def get_config_on_json(self):\n # load section CONFIG from data\n try:\n return self.json_data[\"CONFIG\"]\n except:\n constant.get_error(constant.ERROR_004)",
"def get_coil_config_section(cls):\n return None",
"def get_config(self):\n return {'reduction': self.reduction, 'name': self.name}",
"def gather_configuration(self, config):\n config['log']['logging_level'] = self.logDisplay.get_logging_level()\n\n # MIDI\n config['midi']['winch_midi_input'] = self.winchMidiInputCombo.current_item()\n config['midi']['midi_output'] = self.midiOutputCombo.current_item()\n\n # OSC\n addr, port = self.oscListenerConfig.get_OSC_port()\n config['osc']['listener_addr'] = addr\n config['osc']['listener_port'] = str(port)\n addr, port = self.oscSenderConfig.get_OSC_port()\n config['osc']['sender_addr'] = addr\n config['osc']['sender_port'] = str(port)\n\n # DMX\n config['dmx']['dmx_output_serial_port'] = self.dmxSelect.current_item()\n\n # winches\n for i, winchSelect in enumerate(self.winchSelects):\n key = \"winch_%d_output_serial_port\" % (i+1)\n config['winches'][key] = winchSelect.current_item()\n\n return",
"def __getitem__(self, item):\n return self._config[item]",
"def subconfig(self, subsection):\n if config.is_config(self.config):\n raise PluginFeatureError(\"subconfig() incompatible with plugin.Config, \"\n \"use config.option_map()\")\n section = self.plugin_name() + '/' + subsection\n if section not in self.bot.config_root:\n self.bot.config_root[section] = {}\n return self.bot.config_root[section]",
"def get_config(self, name):\n return self.configs[name][0]",
"def _opt_config(self):\n return self._opt_method.config",
"def get_config(self):\n return super().get_config()",
"def section(self):\n return SECTION_NAME_TO_SECTION[self.section_name]",
"def get_rec_config(self):\n conf_map = {}\n if len(self.reconstructions.text()) > 0:\n conf_map['reconstructions'] = str(self.reconstructions.text())\n if len(self.device.text()) > 0:\n conf_map['device'] = str(self.device.text()).replace('\\n', '')\n if len(self.alg_seq.text()) > 0:\n conf_map['algorithm_sequence'] = str(self.alg_seq.text()).replace('\\n', '')\n if len(self.beta.text()) > 0:\n conf_map['beta'] = str(self.beta.text())\n if len(self.support_area.text()) > 0:\n conf_map['support_area'] = str(self.support_area.text()).replace('\\n', '')\n if self.cont.isChecked():\n conf_map['cont'] = 'true'\n if len(self.cont_dir_button.text().strip()) > 0:\n conf_map['continue_dir'] = '\"' + str(self.cont_dir_button.text()).strip() + '\"'\n print('cont_dir', conf_map['continue_dir'])\n\n for feat_id in self.features.feature_dir:\n self.features.feature_dir[feat_id].add_config(conf_map)\n\n return conf_map",
"def get_config(self):\n if self.allow_reco():\n return self.chs_config()\n else:\n return self.get_config_j(self.id)",
"def get_config(self):\n\n return {section: self.sections[section].get_values() for section in self.sections}",
"def config_list_options(section):\n return __CONFIG.items(section)",
"def _config_sections(self):\n data = []\n section_data = []\n for index, line in enumerate(self.running_config):\n if self._nextline_startswith_space(index):\n section_data.append(line)\n else:\n if len(section_data) > 0:\n section_data.append(line)\n data.append(section_data)\n section_data = []\n return data",
"def get(self, name, section=section_default):\n return self.config[section][name]",
"def get_architecture_config_section(architecture_name: str) -> BaseArchitectureConfigSection:\n architecture_module = _import_architecture_module(architecture_name)\n return architecture_module.ArchitectureConfigSection()"
] | [
"0.70081085",
"0.6003366",
"0.5735365",
"0.5721832",
"0.5691188",
"0.569049",
"0.56896424",
"0.5651286",
"0.5600963",
"0.5576065",
"0.5570528",
"0.55503464",
"0.5539065",
"0.5482666",
"0.5422852",
"0.5421628",
"0.54125065",
"0.53546286",
"0.53522193",
"0.53439945",
"0.53227085",
"0.53196234",
"0.52948934",
"0.5271917",
"0.5256359",
"0.5254117",
"0.52536285",
"0.524673",
"0.52359176",
"0.523084"
] | 0.7165999 | 0 |
Return config section for additional switch config overwrite items. | def get_switch_overwrite_section(cls):
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_switch_config_section(cls):\n return None",
"def get_switch_config_section(cls) -> Optional[str]:\n return None",
"def validate_switch_overwrite_section(self, switch: Switch, config_overwrite: dict) -> dict:\n switch.machine.config_validator.validate_config(\n \"switch_overwrites\", config_overwrite, switch.name,\n base_spec=self.__class__.get_switch_overwrite_section())\n return config_overwrite",
"def get_section(self,name):\n if self.__config.has_section(name):\n data={}\n for opt,val in self.__config.items(name):\n data[opt]=val\n return data\n else:\n raise Exception(_('EVOGTK: Section \"%s\" does not exist in this preferences instance') % name)",
"def get_new_config(self):\n app_config = zaza.model.get_application_config(self.application_name)\n new_value = str(not app_config['disable-mlockall'].get('value', False))\n return 'disable-mlockall', new_value",
"def configure(self, section):",
"def config(self):\n return \"\\n\".join([ c.config(True) for p, c in self.configs_ ])",
"def get_config(self):\n config = {\n 'multichannel': self._multichannel,\n 'complex_part': self._complex_part\n }\n base_config = super().get_config()\n return {**base_config, **config}",
"def get_stepper_config_section(cls) -> Optional[str]:\n return None",
"def get_coil_config_section(cls) -> Optional[str]:\n return None",
"def get_new_config(self):\n app_config = zaza.model.get_application_config(self.application_name)\n return 'enable-sriov', str(not app_config['enable-sriov']['value'])",
"def get_coil_overwrite_section(cls):\n return None",
"def update_network_section(self):\n rconfig = configparser.RawConfigParser()\n rconfig.read(self.conf_file)\n if self.ext_net:\n if not rconfig.has_section('network'):\n rconfig.add_section('network')\n rconfig.set('network', 'public_network_id', self.ext_net.id)\n rconfig.set('network', 'floating_network_name', self.ext_net.name)\n rconfig.set('network-feature-enabled', 'floating_ips', True)\n else:\n if not rconfig.has_section('network-feature-enabled'):\n rconfig.add_section('network-feature-enabled')\n rconfig.set('network-feature-enabled', 'floating_ips', False)\n with open(self.conf_file, 'w', encoding='utf-8') as config_file:\n rconfig.write(config_file)",
"def update_scenario_section(self):\n rconfig = configparser.RawConfigParser()\n rconfig.read(self.conf_file)\n filename = getattr(\n config.CONF, f'{self.case_name}_image', self.filename)\n if not rconfig.has_section('scenario'):\n rconfig.add_section('scenario')\n rconfig.set('scenario', 'img_file', filename)\n rconfig.set('scenario', 'img_disk_format', getattr(\n config.CONF, f'{self.case_name}_image_format',\n self.image_format))\n extra_properties = self.extra_properties.copy()\n if env.get('IMAGE_PROPERTIES'):\n extra_properties.update(\n functest_utils.convert_ini_to_dict(\n env.get('IMAGE_PROPERTIES')))\n extra_properties.update(\n getattr(config.CONF, f'{self.case_name}_extra_properties', {}))\n rconfig.set(\n 'scenario', 'img_properties',\n functest_utils.convert_dict_to_ini(extra_properties))\n with open(self.conf_file, 'w', encoding='utf-8') as config_file:\n rconfig.write(config_file)",
"def _overwrite_with_config(self, new_cfg):\n for section in new_cfg.sections():\n for key, val in new_cfg.items(section):\n self.config.set(section, key, val)",
"def _build_new_config_(self):\n if hasattr(self,\"_init_config\"):\n # Has been loaded\n newconfig = []\n set_key = []\n for l in self._init_config:\n if ((l.startswith(\"#\") or l.startswith(\"\\t\") or l.startswith(\" \")) or len(l)==0):\n newconfig.append(l)\n else:\n key = l.split()[0]\n if key in self.switched_off_keys:\n key = \"# \"+key\n newconfig.append(self.get_config_lines(key))\n set_key.append(key)\n\n keys = np.asarray(list(self.config.keys()))\n not_set_key = keys[~np.in1d(keys, set_key)]\n \n if len(not_set_key)>0:\n newconfig.append(\"# == NEW KEYS == #\")\n for key in not_set_key:\n newconfig.append(self.get_config_lines(key))\n return np.array(newconfig)\n else:\n # new\n return self.get_config(\"array\")",
"def get_config(self):\n return super().get_config()",
"def merge_jupyter_config_data(self, config, in_config):\n self.log.debug(f\"\"\"[lite][config][merge] ..... {config}\"\"\")\n self.log.debug(f\"\"\"[lite][config][merge] ..... {in_config}\"\"\")\n\n config = config or {}\n in_config = in_config or {}\n\n for k, v in in_config.items():\n if k in [DISABLED_EXTENSIONS, FEDERATED_EXTENSIONS]:\n config[k] = [*config.get(k, []), *v]\n elif k in [SETTINGS_OVERRIDES]:\n config[k] = config.get(k, {})\n for pkg, pkg_config in v.items():\n config[k][pkg] = config[k].get(pkg, {})\n config[k][pkg].update(pkg_config)\n else:\n config[k] = v\n self.log.debug(f\"\"\"[lite][config][merge] ..... {config}\"\"\")\n return config",
"def merge_jupyter_config_data(self, config, in_config):\n self.log.debug(f\"\"\"[lite][config][merge] ..... {config}\"\"\")\n self.log.debug(f\"\"\"[lite][config][merge] ..... {in_config}\"\"\")\n\n config = config or {}\n in_config = in_config or {}\n\n for k, v in in_config.items():\n if k in [DISABLED_EXTENSIONS, FEDERATED_EXTENSIONS]:\n config[k] = [*config.get(k, []), *v]\n elif k in [SETTINGS_OVERRIDES]:\n config[k] = config.get(k, {})\n for pkg, pkg_config in v.items():\n config[k][pkg] = config[k].get(pkg, {})\n config[k][pkg].update(pkg_config)\n else:\n config[k] = v\n self.log.debug(f\"\"\"[lite][config][merge] ..... {config}\"\"\")\n return config",
"def get_config_descr(self, name):\n return self.configs[name][1]",
"def config_func(tools, index, device_id, config_old: {}, config_new: {}):\n\n # This is an example of a firmware upgrade requiring a configuration migration\n\n # Firmware 01.03.XX to 01.04.XX configuration migration.\n\n # GENERAL section, no changes\n config_new[\"general\"] = config_old[\"general\"]\n\n # LOG section, error_frames added\n config_new[\"log\"] = config_old[\"log\"]\n config_new[\"log\"][\"error_frames\"] = {\"state\": 0}\n \n # RTC section, no changes\n config_new[\"rtc\"] = config_old[\"rtc\"]\n\n # SECONDARY PORT section, no changes\n config_new['secondaryport'] = config_old['secondaryport']\n\n # CAN sections, remote_frames added, filter moved\n for can_x in [\"can_1\", \"can_2\"]:\n config_new[can_x] = config_old[can_x]\n config_new[can_x][\"filter\"] = {\"remote_frames\": 0, \"id\": config_old[can_x][\"filter\"]}\n\n # LIN sections, before optional, now mandatory\n for lin_x in [\"lin_1\", \"lin_2\"]:\n if lin_x in config_old:\n config_new[lin_x] = config_old[lin_x]\n\n # CONNECT section, server->request_style now mandatory\n if \"connect\" in config_old:\n config_new[\"connect\"] = config_old[\"connect\"]\n\n # Add mandatory \"request_style\" if not already set\n if \"s3\" in config_new[\"connect\"]:\n if \"server\" in config_new[\"connect\"][\"s3\"]:\n if \"request_style\" not in config_new[\"connect\"][\"s3\"][\"server\"]:\n config_new[\"connect\"][\"s3\"][\"server\"][\"request_style\"] = 0\n\n return config_new",
"def subconfig(self, subsection):\n if config.is_config(self.config):\n raise PluginFeatureError(\"subconfig() incompatible with plugin.Config, \"\n \"use config.option_map()\")\n section = self.plugin_name() + '/' + subsection\n if section not in self.bot.config_root:\n self.bot.config_root[section] = {}\n return self.bot.config_root[section]",
"def get_config(self):\n return {\"name\": self.name, \"tunable\": self.tunable}",
"def get_rllib_full_config(self):\n return merged_dict(self.get_default_config(), self.get_config())",
"def create_config_file_before(self):\n config = self.create_site_config()\n export_policy = {\n \"export\": {\n \"tenant\": \"intersite-testsuite\",\n \"app\": \"app\",\n \"epg\": \"epg\",\n \"remote_epg\": \"intersite-testsuite-app-epg\",\n \"remote_sites\": [\n {\n \"site\": {\n \"name\": \"Site2\",\n \"interfaces\": [\n {\n \"l3out\": {\n \"name\": \"l3out\",\n \"tenant\": \"intersite-testsuite\",\n \"provides\": [\n {\n \"contract_name\": \"contract-1\",\n },\n {\n \"contract_name\": \"contract-2\",\n }\n ]\n }\n }\n ]\n }\n }\n ]\n }\n }\n config['config'].append(export_policy)\n return config",
"def get_config(self):\n config = {\n }\n base_config = super(MatrixConcat, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))",
"def getSection(self, section, item):\n if self.config.has_section(section):\n if self.config.has_option(section, item):\n return self.config.get(section, item)\n return None",
"def _configure_addon(self):\n cfg = None\n try:\n data_dir = os.path.split(self.props.data_dir)\n\n cfg = Configuration(jobtype='Blender', \n data_path=data_dir[0],\n log_level=int(self.props.log_level),\n name=self.props.ini_file,\n datadir=data_dir[1])\n \n except (InvalidConfigException, IndexError) as exp:\n self.log.warning(\"Warning failed to load config file, \"\n \"creating new default config.\")\n self.log.warning(str(exp))\n \n finally:\n\n if not os.path.isdir(self.props.data_dir):\n raise EnvironmentError(\"Data directory not created - \"\n \"please ensure you have adequate permissions.\")\n\n if not cfg:\n cfg = Configuration(jobtype='Blender', log_level='warning')\n\n if self.props.endpoint:\n cfg = override_config(cfg, endpoint=self.props.endpoint)\n if self.props.account:\n cfg = override_config(cfg, account=self.props.account)\n if self.props.key:\n cfg = override_config(cfg, key=self.props.key)\n if self.props.client_id:\n cfg = override_config(cfg, client_id=self.props.client_id)\n if self.props.tenant:\n cfg = override_config(cfg, tenant=self.props.tenant)\n if self.props.redirect:\n cfg = override_config(cfg, redirect=self.props.redirect)\n\n cfg.save_config()\n return cfg",
"def get_config_template(self) -> cconfig.Config:",
"def get_coil_config_section(cls):\n return None"
] | [
"0.68945116",
"0.6773478",
"0.6018817",
"0.58323014",
"0.5795032",
"0.5730897",
"0.57164955",
"0.5677969",
"0.56104964",
"0.55940247",
"0.5481566",
"0.54292023",
"0.54252213",
"0.5420217",
"0.54023397",
"0.53998023",
"0.5392274",
"0.5382826",
"0.5382826",
"0.53691185",
"0.53688157",
"0.53682745",
"0.535558",
"0.5352885",
"0.53395766",
"0.5334287",
"0.53295475",
"0.53192884",
"0.53190905",
"0.5317932"
] | 0.69502443 | 0 |
Validate switch overwrite section for platform. | def validate_switch_overwrite_section(self, switch: Switch, config_overwrite: dict) -> dict:
switch.machine.config_validator.validate_config(
"switch_overwrites", config_overwrite, switch.name,
base_spec=self.__class__.get_switch_overwrite_section())
return config_overwrite | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def validate_switch_section(self, switch: \"Switch\", config: dict) -> dict:\n if self.get_switch_config_section():\n spec = self.get_switch_config_section() # pylint: disable-msg=assignment-from-none\n config = switch.machine.config_validator.validate_config(spec, config, switch.name)\n elif config:\n raise AssertionError(\"No platform_config supported but not empty {} for switch {}\".\n format(config, switch.name))\n\n return config",
"def validate_switch_section(self, switch: Switch, config: dict) -> dict:\n base_spec = [\"device\"]\n if self.__class__.get_switch_config_section():\n base_spec.append(self.__class__.get_switch_config_section())\n switch.machine.config_validator.validate_config(\n \"switches\", config, switch.name,\n base_spec=base_spec)\n return config",
"def get_switch_overwrite_section(cls):\n return None",
"def validate_coil_overwrite_section(self, driver, config_overwrite):\n driver.machine.config_validator.validate_config(\n \"coil_overwrites\", config_overwrite, driver.name,\n base_spec=self.get_coil_overwrite_section())\n return config_overwrite",
"def sectional_overwrite_check(self):\n\n for rule in self.options['sectional_overwrite']:\n if self.lineage_test(rule):\n return True\n return False",
"def command_validate_switch():\n\n def duplicate_port(entry, name):\n dpid = entry['dpid']\n\n print 'Warning: switch %s duplicate interface names: %s' % (dpid, name)\n if bigsh.debug_backtrace:\n for port in entry['ports']:\n if port['name'] == name:\n print 'SWTICH %s:%s PORT %s' % (entry, name, port)\n\n def not_case_sensitive(entry, name):\n dpid = entry['dpid']\n\n ports = {}\n for port in entry['ports']:\n if port['name'].lower() == name:\n ports[port['name']] = port\n\n print 'Warning: switch %s case insentive interface names: %s' % \\\n (dpid, ' - '.join(ports.keys()))\n if bigsh.debug_backtrace:\n for port in ports:\n print 'SWTICH %s PORT %s' % (dpid, port)\n\n bigdb = bigsh.bigdb\n try:\n (schema, entries) = bigdb.schema_and_result('core/switch', {})\n except Exception, e:\n print 'command_validate_switch:', e\n traceback.print_exc()\n return\n\n if entries:\n for entry in entries.iter():\n dpid = entry['dpid']\n\n # verify that the port names are unique even when case\n # sensitive\n all_names = [p['name'] for p in entry['interface']]\n one_case_names = utif.unique_list_from_list([x.lower() for x in all_names])\n if len(all_names) != len(one_case_names):\n # Something is rotten, find out what.\n for (i, port_name) in enumerate(all_names):\n # use enumerate to drive upper-triangle comparison\n for other_name in all_names[i+1:]:\n if port_name == other_name:\n duplicate_port(entry, port_name)\n elif port_name.lower() == other_name.lower():\n not_case_sensitive(entry, port_name)",
"def _check_config(self):",
"def validate(tool):\n if SHED not in tool:\n tool[SHED] = DEFAULT_TOOLSHED\n if REVISIONS not in tool:\n tool[REVISIONS] = []",
"def check_config_mode(self, check_string=')#', pattern=''):\n if not pattern:\n pattern = re.escape(self.base_prompt[:16])\n return super(ExaROSSSH, self).check_config_mode(\n check_string=check_string, pattern=pattern)",
"def check_config_mode(self, check_string=\")#\", pattern=\"\"):\n return super().check_config_mode(check_string=check_string)",
"def is_hao_event(self, evt):\n return evt.detail and 'SRIOVPhysicalPort.ConfigChange' in evt.detail",
"def _validate_config(self):\n pass",
"def test_invalid_overprovision_value(self):\n command_line = [\"pool\", \"overprovision\", \"thispool\", \"1.2\"]\n for prefix in [[], [\"-propagate\"]]:\n self.check_system_exit(prefix + command_line, _PARSE_ERROR)",
"def validate_config(self):\n pass",
"def validate_config(self):\n pass",
"def _validate_change_when_exposing_in_dhcp(self):\n if self.pk and settings.DHCP_ENTRY_FORBID_CHANGE:\n from ralph.networks.models import IPAddress\n old_obj = self.__class__._default_manager.get(pk=self.pk)\n try:\n if old_obj.ipaddress.dhcp_expose:\n if old_obj.mac != self.mac:\n raise ValidationError(\n 'Cannot change MAC when exposing in DHCP'\n )\n except IPAddress.DoesNotExist:\n pass",
"def check_config_mode(\n self, check_string: str = \")#\", pattern: str = \"\", force_regex: bool = False\n ) -> bool:\n return super().check_config_mode(check_string=check_string, pattern=pattern)",
"def test_valid_machine():\n config = load_json_fixture(\"basic-addon-config.json\")\n\n config[\"machine\"] = [\n \"intel-nuc\",\n \"odroid-c2\",\n \"odroid-n2\",\n \"odroid-xu\",\n \"qemuarm-64\",\n \"qemuarm\",\n \"qemux86-64\",\n \"qemux86\",\n \"raspberrypi\",\n \"raspberrypi2\",\n \"raspberrypi3-64\",\n \"raspberrypi3\",\n \"raspberrypi4-64\",\n \"raspberrypi4\",\n \"tinker\",\n ]\n\n assert vd.SCHEMA_ADDON_CONFIG(config)\n\n config[\"machine\"] = [\n \"!intel-nuc\",\n \"!odroid-c2\",\n \"!odroid-n2\",\n \"!odroid-xu\",\n \"!qemuarm-64\",\n \"!qemuarm\",\n \"!qemux86-64\",\n \"!qemux86\",\n \"!raspberrypi\",\n \"!raspberrypi2\",\n \"!raspberrypi3-64\",\n \"!raspberrypi3\",\n \"!raspberrypi4-64\",\n \"!raspberrypi4\",\n \"!tinker\",\n ]\n\n assert vd.SCHEMA_ADDON_CONFIG(config)\n\n config[\"machine\"] = [\n \"odroid-n2\",\n \"!odroid-xu\",\n \"qemuarm-64\",\n \"!qemuarm\",\n \"qemux86-64\",\n \"qemux86\",\n \"raspberrypi\",\n \"raspberrypi4-64\",\n \"raspberrypi4\",\n \"!tinker\",\n ]\n\n assert vd.SCHEMA_ADDON_CONFIG(config)",
"def _check_ops(self):\n required_ops = ['san_ip', 'san_login', 'san_password']\n for attr in required_ops:\n if not getattr(self.configuration, attr, None):\n raise exception.InvalidInput(reason=_('%s is not set.') % attr)\n\n replica = self.configuration.safe_get('replication_device')\n if replica and isinstance(replica, list):\n replica_ops = ['backend_id', 'login', 'password', 'rpo']\n for attr in replica_ops:\n if attr not in replica[0]:\n msg = _('replication_device %s is not set.') % attr\n raise exception.InvalidInput(reason=msg)\n self.replica = Replication(replica[0])",
"def check_config_mode(self, check_string=\"(config\", pattern=\"\"):\n return super().check_config_mode(check_string=check_string, pattern=pattern)",
"def __checkSwitch ( self, letter, value ):\n\n #-- 1 --\n # [ if letter is a key in self.switchMap -> I\n # else ->\n # sys.stderr +:= (usage message) + (error message)\n # stop execution ]\n if not self.switchMap.has_key ( letter ):\n usage ( self.switchSpecs, self.posSpecs,\n \"No such switch: -%s\" % letter )\n\n #-- 2 --\n if len(value) == 0:\n self.switchMap[letter] = 1\n else:\n self.switchMap[letter] = value",
"def check_config_mode(self, check_string=\">config\", pattern=\"\"):\n return super().check_config_mode(check_string=check_string, pattern=pattern)",
"def __validate_boot_settings_properties_in_xml_file(profile):\n # TODO: Create a validation for <bootorder> values\n INVALID_ATTRIBUTE_ERROR_MESSAGE = \"Invalid value for %s attribute. Valid values are: %s\"\n\n if profile.has_property(XML_MANAGE_BOOT_MODE_ATTRIBUTE):\n if profile.manageBoot not in XML_BOOLEAN_LIST:\n ui_lib.fail_test(INVALID_ATTRIBUTE_ERROR_MESSAGE % (XML_MANAGE_BOOT_MODE_ATTRIBUTE, XML_BOOLEAN_LIST), False)\n elif profile.has_property(XML_BOOT_MODE_ATTRIBUTE):\n if profile.bootMode not in PROFILE_BOOT_MODE_LIST:\n ui_lib.fail_test(INVALID_ATTRIBUTE_ERROR_MESSAGE % (XML_BOOT_MODE_ATTRIBUTE, PROFILE_BOOT_MODE_LIST), False)\n elif profile.bootMode == CONSTANT_UEFI or profile.bootMode == CONSTANT_UEFI_OPTIMIZED:\n if profile.has_property(XML_BOOT_POLICY_ATTRIBUTE):\n if profile.bootPolicy not in PROFILE_BOOT_POLICY_LIST:\n ui_lib.fail_test(INVALID_ATTRIBUTE_ERROR_MESSAGE % (XML_BOOT_POLICY_ATTRIBUTE, PROFILE_BOOT_POLICY_LIST), False)\n elif profile.has_property(XML_MANAGE_BOOT_ORDER_ATTRIBUTE):\n if profile.manageBootOrder not in XML_BOOLEAN_LIST:\n ui_lib.fail_test(INVALID_ATTRIBUTE_ERROR_MESSAGE % (XML_MANAGE_BOOT_ORDER_ATTRIBUTE, XML_BOOLEAN_LIST), False)\n elif profile.has_property(XML_PRIMARY_BOOT_DEVICE):\n if profile.primaryBootDevice not in PROFILE_PRIMARY_BOOT_DEVICE_LIST:\n ui_lib.fail_test(INVALID_ATTRIBUTE_ERROR_MESSAGE % (XML_PRIMARY_BOOT_DEVICE, PROFILE_PRIMARY_BOOT_DEVICE_LIST), False)",
"def _check_valid_config(self):\n default_keys = self.default_config.keys()\n current_keys = self.config.keys()\n\n if default_keys != current_keys:\n msg = f\"Config must have the following keys : {list(default_keys)}\"\n self.logger.critical(msg)\n sys.exit(0)",
"def check_config_mode(self):\n return False",
"def validate_configurator_version():\n if settings.CONFIGURATOR_MODULE == \"bootmachine.contrib.configurators.salt\":\n pkgver = settings.SALT_AUR_PKGVER\n pkgrel = settings.SALT_AUR_PKGREL\n response = urllib2.urlopen(\"https://aur.archlinux.org/packages/sa/salt/PKGBUILD\")\n for line in response:\n if line.startswith(\"pkgver=\") and not pkgver in line:\n abort(\"The requested Salt 'pkgrel={0}' in the AUR was updated to '{1}'.\".format(\n pkgver, line.strip()))\n if line.startswith(\"pkgrel=\") and not pkgrel in line:\n abort(\"The requested Salt 'pkgrel={0}' in the AUR was updated to '{1}'.\".format(\n pkgrel, line.strip()))",
"def test_invalid_machine():\n config = load_json_fixture(\"basic-addon-config.json\")\n\n config[\"machine\"] = [\n \"intel-nuc\",\n \"raspberrypi3\",\n \"raspberrypi4-64\",\n \"raspberrypi4\",\n \"tinkerxy\",\n ]\n\n with pytest.raises(vol.Invalid):\n assert vd.SCHEMA_ADDON_CONFIG(config)\n\n config[\"machine\"] = [\n \"intel-nuc\",\n \"intel-nuc\",\n ]\n\n with pytest.raises(vol.Invalid):\n assert vd.SCHEMA_ADDON_CONFIG(config)",
"def validateBoot (self):\n self.mountBootPartition()\n stateDictionary = self._createBootInstallationDictionary()\n self._writeDictionaryAsJson(stateDictionary, self._getBootInstallationFilePath())\n self._log(\"validate-boot\").notice(\"boot partition is validated\")",
"def pilotValidateBoot (self):\n return self.validateBoot()",
"def validate(self):\n\n print(\"Checking for supported board.\")\n if self.board == \"\": \n sys.exit(\"Unknown board type. Exiting.\")\n\n supportedboards = supportedBoards()\n\n if not self.board in supportedboards:\n sys.exit(\"Board %s is not supported.\" % self.board)\n return False\n\n if not self.getpath(): \n sys.exit(\"%s unable to find binary file to upload in \\\n specified path or current working directory %s. \\\n Exiting now.\" % (errstr, str(array[0])))\n\n array = self.getfiletype()\n if not (array[0] or array[1]):\n return False\n\n self.arch = array[0]\n self.filetype = array[1]\n return True"
] | [
"0.6460815",
"0.6240742",
"0.58130705",
"0.576157",
"0.55020815",
"0.54661757",
"0.5350918",
"0.53023165",
"0.527657",
"0.52585673",
"0.52316093",
"0.5221872",
"0.5212852",
"0.521263",
"0.521263",
"0.5193446",
"0.51146615",
"0.5110619",
"0.5091372",
"0.50717586",
"0.507172",
"0.5064842",
"0.50598496",
"0.5058418",
"0.50174963",
"0.5014226",
"0.501217",
"0.49970406",
"0.49960887",
"0.4985221"
] | 0.7329361 | 0 |
Validate a switch config for platform. | def validate_switch_section(self, switch: Switch, config: dict) -> dict:
base_spec = ["device"]
if self.__class__.get_switch_config_section():
base_spec.append(self.__class__.get_switch_config_section())
switch.machine.config_validator.validate_config(
"switches", config, switch.name,
base_spec=base_spec)
return config | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def validate_switch_section(self, switch: \"Switch\", config: dict) -> dict:\n if self.get_switch_config_section():\n spec = self.get_switch_config_section() # pylint: disable-msg=assignment-from-none\n config = switch.machine.config_validator.validate_config(spec, config, switch.name)\n elif config:\n raise AssertionError(\"No platform_config supported but not empty {} for switch {}\".\n format(config, switch.name))\n\n return config",
"def _validate_config(self):\n pass",
"def validate_config(self):\n pass",
"def validate_config(self):\n pass",
"def command_validate_switch():\n\n def duplicate_port(entry, name):\n dpid = entry['dpid']\n\n print 'Warning: switch %s duplicate interface names: %s' % (dpid, name)\n if bigsh.debug_backtrace:\n for port in entry['ports']:\n if port['name'] == name:\n print 'SWTICH %s:%s PORT %s' % (entry, name, port)\n\n def not_case_sensitive(entry, name):\n dpid = entry['dpid']\n\n ports = {}\n for port in entry['ports']:\n if port['name'].lower() == name:\n ports[port['name']] = port\n\n print 'Warning: switch %s case insentive interface names: %s' % \\\n (dpid, ' - '.join(ports.keys()))\n if bigsh.debug_backtrace:\n for port in ports:\n print 'SWTICH %s PORT %s' % (dpid, port)\n\n bigdb = bigsh.bigdb\n try:\n (schema, entries) = bigdb.schema_and_result('core/switch', {})\n except Exception, e:\n print 'command_validate_switch:', e\n traceback.print_exc()\n return\n\n if entries:\n for entry in entries.iter():\n dpid = entry['dpid']\n\n # verify that the port names are unique even when case\n # sensitive\n all_names = [p['name'] for p in entry['interface']]\n one_case_names = utif.unique_list_from_list([x.lower() for x in all_names])\n if len(all_names) != len(one_case_names):\n # Something is rotten, find out what.\n for (i, port_name) in enumerate(all_names):\n # use enumerate to drive upper-triangle comparison\n for other_name in all_names[i+1:]:\n if port_name == other_name:\n duplicate_port(entry, port_name)\n elif port_name.lower() == other_name.lower():\n not_case_sensitive(entry, port_name)",
"def validate_config(self):\n\n # LOCALHOST\n if self.location == 'localhost':\n if 'browserName' not in self.config.keys():\n msg = \"Add the 'browserName' in your local_config: e.g.: 'Firefox', 'Chrome', 'Safari'\" # noqa\n self.runner.critical_log(msg)\n raise BromeBrowserConfigException(msg)\n\n # EC2\n elif self.location == 'ec2':\n self.validate_ec2_browser_config()\n\n # VIRTUALBOX\n elif self.location == 'virtualbox':\n self.validate_virtualbox_config()",
"def validate_config(self, config: Dict) -> bool:\n raise NotImplementedError",
"def check_config(config):\n pass",
"def validate_config(self, changed):\n logger.debug(\"[%s] Validating config (Legacy path)\", self.name)\n if not self.to_validate(changed):\n return\n # Validate (Legacy Path)\n from noc.cm.engine import Engine\n\n engine = Engine(self)\n try:\n engine.check()\n except: # noqa\n logger.error(\"Failed to validate config for %s\", self.name)\n error_report()",
"def state_failsafe_validate(cfg, app, win, events):",
"def _check_config(self):",
"def validate_config(params, error_callback):\n local_params = dict(params)\n _validate_value_formats(local_params, error_callback)\n _validate_in_cidr(local_params, error_callback)\n _validate_dhcp_range(local_params, error_callback)\n _validate_inspection_range(local_params, error_callback)\n _validate_no_overlap(local_params, error_callback)\n _validate_ips(local_params, error_callback)\n _validate_interface_exists(local_params, error_callback)",
"def state_chosen_validate(cfg, app, win, events):",
"def validate_settings(_cfg, _ctx):\n pass",
"def test_valid_configuration(self):\n\n conf = [\n 'gasoline', '228i', 'model_luxury_line', 'silver', 'rims_384',\n 'tapistry_black', 'steptronic', 'smoker_package', 'tow_hook'\n ]\n\n attr_val_ids = self.get_attr_val_ids(conf)\n validation = self.cfg_tmpl.validate_configuration(attr_val_ids)\n self.assertTrue(validation, \"Valid configuration failed validation\")",
"def validate_config(config: NeedlemanWunschAlgorithmConfig):\n\n parameters_names_list = [\"SAME\", \"DIFF\", \"GAP_PENALTY\", \"MAX_NUMBER_PATHS\", \"MAX_SEQ_LENGTH\"]\n\n for param_name in parameters_names_list:\n if not isinstance(config[param_name], int):\n return False, f\"Parameter {param_name} is not int!\"\n \n for param_name in parameters_names_list[0:3]:\n if config[param_name] == 0:\n return False, f\"Parameter {param_name} can not be equal to 0!\"\n\n for param_name in parameters_names_list[3:]:\n if config[param_name] < 1:\n return False, f\"Parameter {param_name} can not be less than 1!\"\n\n if config.SAME <= config.DIFF:\n return False, f\"Parameter SAME must be greater than parameter DIFF!\"\n\n if config.MAX_SEQ_LENGTH > constants.MAXIMUM_SEQ_LEN:\n return False, f\"Value of parameter MAX_SEQ_LENGTH is too big. It should be less than {constants.MAXIMUM_SEQ_LEN}\"\n\n if config.MAX_NUMBER_PATHS > constants.MAXIMUM_NUMBER_PATHS:\n return False, f\"Value of parameter MAX_NUMBER_PATHS is too big. It should be less than {constants.MAXIMUM_NUMBER_PATHS}\"\n\n return True, \"\"",
"def validate_config(app: App, config: Config):\n for state_machine in config.state_machines.values():\n _validate_state_machine(app, state_machine)",
"def validate(config):\n valid, error = validate_relays(config['relays'])\n if not valid:\n click.echo(\"Error: Configuration invalid: {}\".format(error))\n else:\n click.echo(\"OK: Configuration is valid.\")",
"def _validatePortConfig(self):\n if config.BindHTTPPorts:\n if config.HTTPPort == 0:\n raise UsageError(\n \"HTTPPort required if BindHTTPPorts is not empty\"\n )\n elif config.HTTPPort != 0:\n config.BindHTTPPorts = [config.HTTPPort]\n if config.BindSSLPorts:\n if config.SSLPort == 0:\n raise UsageError(\n \"SSLPort required if BindSSLPorts is not empty\"\n )\n elif config.SSLPort != 0:\n config.BindSSLPorts = [config.SSLPort]",
"def _verify_switch_created(self, switch):\n if not (\n hasattr(switch, \"switch_power\") and\n isinstance(switch.switch_power, switch_power_base.SwitchPowerBase)):\n raise errors.CapabilityNotReadyError(\n msg=\"'switch_power' capability is missing in hub device {} ({}),\"\n \" or is not an instance of SwitchPowerBase\".format(\n self.hub_name,\n type(switch).__name__),\n device_name=self._device_name)",
"def state_choose_validate(cfg, app, win, events):",
"def config_validate(ctx, **kwargs):\n # Validates pf9-express config file and obtains Auth Token\n #Load Active Config into ctx\n GetConfig(ctx).GetActiveConfig()\n #Get Token\n token = GetToken().get_token_v3(\n ctx.params[\"du_url\"],\n ctx.params[\"du_username\"],\n ctx.params[\"du_password\"],\n ctx.params[\"du_tenant\"] )\n if token is not None:\n click.echo('Config Validated!')\n click.echo('Token: %s' % token)\n else:\n click.echo('Config Validation Failed!')",
"def check_config_mode(self, check_string=\")#\", pattern=\"\"):\n return super().check_config_mode(check_string=check_string)",
"def config_sanity_check(config: dict) -> dict:\n\n # back compatibility support\n config = parse_v011(config)\n\n # check model\n if config[\"train\"][\"method\"] == \"conditional\":\n if config[\"dataset\"][\"train\"][\"labeled\"] is False: # unlabeled\n raise ValueError(\n \"For conditional model, data have to be labeled, got unlabeled data.\"\n )\n\n return config",
"def validate(self):\n\n print(\"Checking for supported board.\")\n if self.board == \"\": \n sys.exit(\"Unknown board type. Exiting.\")\n\n supportedboards = supportedBoards()\n\n if not self.board in supportedboards:\n sys.exit(\"Board %s is not supported.\" % self.board)\n return False\n\n if not self.getpath(): \n sys.exit(\"%s unable to find binary file to upload in \\\n specified path or current working directory %s. \\\n Exiting now.\" % (errstr, str(array[0])))\n\n array = self.getfiletype()\n if not (array[0] or array[1]):\n return False\n\n self.arch = array[0]\n self.filetype = array[1]\n return True",
"def configure_switch(self, number: str, config: SwitchConfig, platform_config: dict) -> \"SwitchPlatformInterface\":\n raise NotImplementedError",
"def validate(self):\n AcceleratorType.validate(self.accelerator_type)\n gcp.validate_machine_configuration(self.cpu_cores,\n self.memory,\n self.accelerator_type,\n self.accelerator_count)",
"def check_config(cfg):",
"def check_config_mode(self, check_string=\"(config\", pattern=\"\"):\n return super().check_config_mode(check_string=check_string, pattern=pattern)",
"def validate_coil_section(self, driver, config) -> dict:\n if self.get_coil_config_section():\n spec = self.get_coil_config_section() # pylint: disable-msg=assignment-from-none\n config = driver.machine.config_validator.validate_config(spec, config, driver.name)\n elif config:\n raise AssertionError(\"No platform_config supported but not empty {} for driver {}\".\n format(config, driver.name))\n\n return config"
] | [
"0.77033937",
"0.62883526",
"0.6265816",
"0.6265816",
"0.619783",
"0.6017843",
"0.5960255",
"0.57412815",
"0.56868637",
"0.5671507",
"0.5662892",
"0.56561095",
"0.5582826",
"0.5568699",
"0.55670327",
"0.55480725",
"0.5541124",
"0.55361867",
"0.5534851",
"0.55181766",
"0.5508078",
"0.5491742",
"0.5489759",
"0.5484303",
"0.54827994",
"0.54700303",
"0.5458753",
"0.54522574",
"0.5432262",
"0.5410213"
] | 0.73547333 | 1 |
Add driver feature and default max_pulse length. | def __init__(self, machine):
super().__init__(machine)
# Set default platform features. Each platform interface can change
# these to notify the framework of the specific features it supports.
self.features['has_drivers'] = True
self.features['max_pulse'] = 255 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def setInternalPulser(self,pulserEnable,pulseHeight):\n pass",
"def set_gesture_pulse_count_and_length(self, pulse_count, pulse_length):\n if not (1 <= pulse_count <= 64):\n raise ValueError(\"pulse_count must be in range [1-64].\")\n if not (APDS_9960.PULSE_LEN_4_MICROS <= pulse_length\n <= APDS_9960.PULSE_LEN_32_MICROS):\n raise ValueError(\"pulse_length must be one of PULSE_LEN_N_MICROS.\")\n\n reg_value = (pulse_count - 1) | (pulse_length << 6)\n self.write_byte_data(reg_value, APDS_9960.GESTURE_PULSE_COUNT_AND_LEN_REG_ADDRESS)",
"def config_pulse_modulation(self, frequency=1e3, input='square'):\n self.enable_pulse_modulation()\n self.pulse_source = 'internal'\n self.pulse_input = input\n self.pulse_frequency = frequency",
"def set_pulse_width(self):\n\t\t\"\"\"For PWM Register-0\"\"\"\n\t\tbus.write_byte_data(PCA9530_2C_1_DEFAULT_ADDRESS, PCA9530_2C_1_REG_PWM0, PCA9530_2C_1_PWM0_USERDEFINED)\n\t\t\n\t\t\"\"\"For PWM Register-1\"\"\"\n\t\tbus.write_byte_data(PCA9530_2C_1_DEFAULT_ADDRESS, PCA9530_2C_1_REG_PWM1, PCA9530_2C_1_PWM1_USERDEFINED)",
"def __init__(__self__, *,\n driver: 'outputs.CSIPowerMaxSpecDriver'):\n pulumi.set(__self__, \"driver\", driver)",
"def enableVideoData(self, length):\n self.setVideoDataEnabled(True) \n self.slider.setMaximum(length - 1)",
"def TextFieldOptionsAddMaxLengthEnabled(builder, maxLengthEnabled):\n return AddMaxLengthEnabled(builder, maxLengthEnabled)",
"def TextFieldOptionsAddMaxLength(builder, maxLength):\n return AddMaxLength(builder, maxLength)",
"def enable_pulse_modulation(self):\n self.write(\":SOUR:PULM:STAT ON\")",
"def pytest_addoption(parser):\n parser.addoption(\n \"--amount_mclag_intf\",\n action=\"store\",\n type=int,\n default=6,\n help=\"Amount of mclag interfaces to test, default value is 6\",\n )",
"def add_options(parser):\n parser.add_option(\"\", \"--bt\", type=\"float\", default=_def_bt,\n help=\"set bandwidth-time product [default=%default] (GFSK)\")\n add_options=staticmethod(add_options)",
"def _set_maximum(self):\n self._level_gen.maximum_length = self._maximum_length_spinbox.value()\n self._refresh_view()",
"def addPulse(self, overwrite=False, send=False, **kwargs):\n pulse = Pulse(pulseGenerator=self, **kwargs)\n if overwrite:\n for p in self.pulseList:\n if p.name == pulse.name:\n self.pulseList.remove(p)\n self.pulseList.append(pulse)\n # self.pulseList+=(pulse,)\n self.debugPrint(\"pulseAdded\")\n if send:\n self.preparePulseSequence()\n self.sendPulseSequence()",
"def servo_gain(self, *args, **kwargs) -> Any:\n pass",
"def pulse_width(self) -> int:",
"def add_config(self):\n\n config = {\n 'count_up': CountUp,\n 'count_down': CountDown,\n 'count_up_or_down': CountUpOrDown,\n 'high_speed_counter_definition': HighSpeedCounterDefinition,\n 'high_speed_counter': HighSpeedCounter,\n 'pulse_output': PulseOutput\n }\n\n return config",
"def __init__(self, tellcore_device, signal_repetitions):\n super().__init__(tellcore_device, signal_repetitions)\n\n self._brightness = 255",
"def __init__(self, period=14, **kwargs):\n super(VWAP, self).__init__(**kwargs)\n self.time_delay = period",
"def servo_gainfactor(self, *args, **kwargs) -> Any:\n pass",
"def test_patch_hyperflex_feature_limit_internal(self):\n pass",
"def servo_gainupdate(self, *args, **kwargs) -> Any:\n pass",
"def motorLimitsChanged(self):\n pass",
"def _pulse_width_record(self, pin):\n self._time = time.ticks_us()\n if self._prev_time == 0:\n self._prev_time = self._time\n return\n self.pulse_buffer.append(self._time - self._prev_time)\n self._prev_time = self._time\n self.lenth = self.lenth + 1",
"def max_pwm(self, per: int):\r\n self._max_pwm = per\r\n self.max_pwm_hist.append(per)\r\n\r\n if self.max_pwm_hist[-2] != per and self._daq:\r\n msg = Message(\"max_pwm\", per, self.checksum).message_bytes\r\n self._daq.asynch.transmit(msg)",
"def test_update_hyperflex_feature_limit_internal(self):\n pass",
"def test_create_hyperflex_feature_limit_internal(self):\n pass",
"def add_pulse_to_frame(self, frame_num, *pulse_data_args):\n\n for pulse_data in pulse_data_args:\n if pulse_data is not None:\n if frame_num not in self.frame_pulse_data.keys():\n self.frame_pulse_data[frame_num] = []\n self.frame_pulse_data[frame_num].append(pulse_data)",
"def limit():\n bwc = BandwidthConfigurator()\n bwc.limit()",
"def _cb_new_pulse(self, channel=None):\n self._pulse += 1\n # print(channel, self._pulse)",
"def _additional_option(self):\n pass"
] | [
"0.54564935",
"0.52239263",
"0.5213172",
"0.52111316",
"0.5123015",
"0.5072573",
"0.5040025",
"0.49868053",
"0.4933255",
"0.49097246",
"0.49087787",
"0.4903712",
"0.49020264",
"0.48712954",
"0.48569748",
"0.48522627",
"0.48354438",
"0.48199806",
"0.4814763",
"0.48026678",
"0.4799333",
"0.47899252",
"0.47245097",
"0.4723057",
"0.47212422",
"0.46982288",
"0.46886525",
"0.46800458",
"0.46755695",
"0.4659862"
] | 0.52949923 | 1 |
Subclass this method in a platform module to clear a hardware switch rule for this switch. Clearing a hardware rule means actions on this switch will no longer affect coils. Another way to think of this is that it 'disables' a hardware rule. This is what you'd use to disable flippers and autofire_coils during tilt, game over, etc. | def clear_hw_rule(self, switch, coil):
raise NotImplementedError | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def clear_hw_rule(self, switch: SwitchSettings, coil: DriverSettings):\n raise NotImplementedError",
"def clear_hw_rule(self, switch, coil):\n self.log.info(\"clear_hw_rule(coil=%s sw=%s)\" %\n (coil.hw_driver.number, switch.hw_switch.number))\n self.communicator.rule_clear(coil.hw_driver.number, switch.hw_switch.number)",
"def turn_off(self, **kwargs):\n self.smartplug.turn_off()",
"def turn_off(self):\n self._state = False\n if(self._device['type'] == '_DT-PLUG' or self._device['type'] == '_THIMR'):\n self._send_cmd(self._device, 'cmd=ctrl&devices={[' + self._device[\"sid\"] + ']}&op={\"cmd\":5,\"op\":0 }', 5)\n if(self._device['type'] == '_REALY2' or self._device['type'] == '_REALY4'): \n self._send_cmd(self._device, 'cmd=ctrl&devices={[' + self._device[\"sid\"] + ']}&op={\"cmd\":5,\"'+ self._data_key +'\":0 }', 5)",
"def turn_off(self, **kwargs):\n self._state = False\n self.schedule_update_ha_state()\n self._hs_color = None\n self._attributes[\"hs_color\"] = self._hs_color\n self._attributes[\"brightness\"] = None",
"def pibooth_reset(cfg, hard):",
"def turn_eht_off(self):\n raise NotImplementedError",
"def _force_off(self):\n self._interface.set('fw_wp_vref', self._fw_wp_vref)\n self._interface.set('fw_wp_en', 'on')\n self._interface.set('fw_wp', 'off')",
"def clear(self):\n self.cmd(0x33) # $33 8-bit mode\n self.cmd(0x32) # $32 8-bit mode\n self.cmd(0x28) # $28 8-bit mode\n self.cmd(0x0C) # $0C 8-bit mode\n self.cmd(0x06) # $06 8-bit mode\n self.cmd(0x01) # $01 8-bit mode",
"def switch_off(self):\n raise DeviceException(DeviceException.FEATURE_NOT_IMPLEMENTED)",
"def resetDeviceStates(self):",
"def off(config: dict):\n switch_device(config, config[\"inching\"], \"off\")",
"def turn_aux_heat_off(self):\n self.set_operation_mode(STATE_HEAT)",
"def turn_off(self, **kwargs) -> None:\n self._device.writeCharacteristic(self._handle, b'\\x01', True)\n self._state = False\n self.schedule_update_ha_state()",
"def off_switch(self):\n self._switch_callback = None",
"def setOff(self, command):\r\n self.setDriver('ST', 0)",
"def reset_config(modem, disable_auto_linking, monitor_mode, auto_led, deadman):\n modem.configuration[DISABLE_AUTO_LINKING].set_value(not disable_auto_linking)\n modem.configuration[MONITOR_MODE].set_value(not monitor_mode)\n modem.configuration[AUTO_LED].set_value(not auto_led)\n modem.configuration[DEADMAN].set_value(not deadman)",
"def clear_single_switch_rules(switch_id,in_port,out_port):\n print(\"** Remove flows from {}\".format(switch_id))\n in_rule = \"in_port={}\".format(in_port)\n out_rule = \"in_port={}\".format(out_port)\n subprocess.Popen([\"ovs-ofctl\",\"-O\",\"OpenFlow13\",\"del-flows\",switch_id,in_rule],\n stdout=subprocess.PIPE).wait()\n subprocess.Popen([\"ovs-ofctl\",\"-O\",\"OpenFlow13\",\"del-flows\",switch_id,out_rule],\n stdout=subprocess.PIPE).wait()\n\n ### If debugging, remove the comments below to see what the flow rules are\n # result = subprocess.Popen([\"ovs-ofctl\",\"-O\",\"OpenFlow13\",\"dump-flows\",switch_id],\n # stdout=subprocess.PIPE).communicate()[0]\n # print (result)",
"def turn_off(self, **kwargs: Any) -> None:\n self._device.power_on = False\n _LOGGER.debug(\"Turn off light %s\", self._device.ip)",
"def turn_off(self):\n if self._module_type == NA_VALVE:\n self._data.homestatus.setroomThermpoint(\n self._data.home_id,\n self._room_id,\n STATE_NETATMO_MANUAL,\n DEFAULT_MIN_TEMP,\n )\n elif self.hvac_mode != HVAC_MODE_OFF:\n self._data.homestatus.setroomThermpoint(\n self._data.home_id, self._room_id, STATE_NETATMO_OFF\n )\n self.update_without_throttle = True\n self.schedule_update_ha_state()",
"def kill_all(self):\n self.settings['lights_on'] = 12\n self.settings['lights_off'] = 12\n self.settings['overhead_level'] = 0\n self.settings['soil_1'] = 0\n self.settings['soil_2'] = 0\n self.settings['soil_3'] = 0\n self.settings['soil_4'] = 0\n self.scale_overhead_level.set(self.settings['overhead_level'])\n self.scale_smc1.set(self.settings['soil_1'])\n self.scale_smc2.set(self.settings['soil_2'])\n self.scale_smc3.set(self.settings['soil_3'])\n self.scale_smc4.set(self.settings['soil_4'])\n self.active_changes = True # (flag) Once changes are retrieved, we assume that they will be sent to the controller",
"def _reset(self):\n self._interface.set('fw_wp_en', 'off')",
"def _doDisableRegulation(self):\n self._cmdRegulOff()",
"def set_light_off(self):\r\n self._light = \"OFF\"",
"def turn_off(self, **kwargs: Any) -> None:\n if (\n DPCODE_LIGHT in self.tuya_device.status\n and DPCODE_SWITCH not in self.tuya_device.status\n ):\n commands = [{\"code\": DPCODE_LIGHT, \"value\": False}]\n else:\n commands = [{\"code\": DPCODE_SWITCH, \"value\": False}]\n self._send_command(commands)",
"def _reset(cls):\r\n cls._CONFIGURED = False\r\n cls._ENABLED = {}",
"def turn_off(self, **kwargs) -> None:\n self.wink.set_state(False)",
"def deconfigure(self):\n\n self.platform.deconfigure()",
"def off(self, include_ethernet=False):\n if not self.healthy:\n self.health_check()\n if self._pre_off_func:\n self._pre_off_func()\n switchboard = self._get_switchboard_if_initialized()\n if self._power_and_data_share_cable:\n if switchboard:\n switchboard.add_log_note(\n f\"comm_power.off() called on {self._device_name} set communication \"\n f\"port {self.port_number} to charge as device has a single USB \"\n \"cable for data and power.\")\n switchboard.close_all_transports()\n self._hub.switch_power.power_on(self.port_number, data_sync=False)\n if self.secondary_port_number is not None:\n self._hub.switch_power.power_on(\n self.secondary_port_number, data_sync=False)\n else:\n if switchboard:\n switchboard.close_all_transports()\n self._hub.switch_power.power_off(self.port_number)\n if self.secondary_port_number is not None:\n self._hub.switch_power.power_off(self.secondary_port_number)\n if include_ethernet:\n self.ethernet_off()",
"def turnLightingSystemOff():\n dislin.light('OFF')"
] | [
"0.8259759",
"0.797209",
"0.6230499",
"0.6211321",
"0.6141262",
"0.5926914",
"0.58824986",
"0.5876358",
"0.58560795",
"0.58331096",
"0.57995737",
"0.57605374",
"0.57516795",
"0.5739651",
"0.5729749",
"0.5706329",
"0.5672119",
"0.5603738",
"0.56002516",
"0.5589667",
"0.55387104",
"0.5535843",
"0.553559",
"0.5526481",
"0.55174744",
"0.55162746",
"0.5506924",
"0.54994327",
"0.5481463",
"0.5481205"
] | 0.8435235 | 0 |
Return addition config section for coils. | def get_coil_config_section(cls):
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_coil_config_section(cls) -> Optional[str]:\n return None",
"def get_config(self):\n return self.cat_feats_cfg",
"def _getConfigName(self):\n return \"%s_processCoadd_config\" % (self.config.coaddName,)",
"def get_rec_config(self):\n conf_map = {}\n if len(self.reconstructions.text()) > 0:\n conf_map['reconstructions'] = str(self.reconstructions.text())\n if len(self.device.text()) > 0:\n conf_map['device'] = str(self.device.text()).replace('\\n', '')\n if len(self.alg_seq.text()) > 0:\n conf_map['algorithm_sequence'] = str(self.alg_seq.text()).replace('\\n', '')\n if len(self.beta.text()) > 0:\n conf_map['beta'] = str(self.beta.text())\n if len(self.support_area.text()) > 0:\n conf_map['support_area'] = str(self.support_area.text()).replace('\\n', '')\n if self.cont.isChecked():\n conf_map['cont'] = 'true'\n if len(self.cont_dir_button.text().strip()) > 0:\n conf_map['continue_dir'] = '\"' + str(self.cont_dir_button.text()).strip() + '\"'\n print('cont_dir', conf_map['continue_dir'])\n\n for feat_id in self.features.feature_dir:\n self.features.feature_dir[feat_id].add_config(conf_map)\n\n return conf_map",
"def config(self):\n return \"\\n\".join([ c.config(True) for p, c in self.configs_ ])",
"def get_config(self):\n if self.allow_reco():\n return self.chs_config()\n else:\n return self.get_config_j(self.id)",
"def configure(self, section):",
"def get_coil_overwrite_section(cls):\n return None",
"def get_section(self,name):\n if self.__config.has_section(name):\n data={}\n for opt,val in self.__config.items(name):\n data[opt]=val\n return data\n else:\n raise Exception(_('EVOGTK: Section \"%s\" does not exist in this preferences instance') % name)",
"def validate_coil_section(self, driver, config):\n base_spec = [\"device\"]\n if self.__class__.get_coil_config_section():\n base_spec.append(self.__class__.get_coil_config_section())\n driver.machine.config_validator.validate_config(\n \"coils\", config, driver.name,\n base_spec=base_spec)\n return config",
"def add_conl_config(cfg):\n # We retry random cropping until no single category in semantic segmentation GT occupies more\n cfg.MODEL.CONL = CN()\n\n cfg.MODEL.CONL.STAGES=['res4']\n cfg.MODEL.CONL.BLOCKS=[[-1,],]\n\n cfg.MODEL.CONL.RATIO = 1.0/4.0\n cfg.MODEL.CONL.DOWNSAMPLE=True\n cfg.MODEL.CONL.USE_GN=False\n cfg.MODEL.CONL.LR_MULT=0\n cfg.MODEL.CONL.USE_OUT=False\n cfg.MODEL.CONL.OUT_BN=False\n cfg.MODEL.CONL.WHITEN_TYPE=['channel']\n cfg.MODEL.CONL.TEMP = 1.0\n cfg.MODEL.CONL.WITH_GC=False\n cfg.MODEL.CONL.WITH_2FC=False\n cfg.MODEL.CONL.DOUBLE_CONV=False\n\n cfg.MODEL.CONL.WITH_STATE=False\n cfg.MODEL.CONL.NCLS=32",
"def add_config(self):\n\n config = {\n 'count_up': CountUp,\n 'count_down': CountDown,\n 'count_up_or_down': CountUpOrDown,\n 'high_speed_counter_definition': HighSpeedCounterDefinition,\n 'high_speed_counter': HighSpeedCounter,\n 'pulse_output': PulseOutput\n }\n\n return config",
"def get_config(self):\n return {'reduction': self.reduction, 'name': self.name}",
"def config_section_data():\n config_data = u\"\"\"[fn_sep]\nsep_base_path=/sepm/api/v1\nsep_auth_path=/sepm/api/v1/identity/authenticate\nsep_host=<SEPM server dns name or ip address>\nsep_port=8446\nsep_username=<username>\nsep_password=<password>\nsep_domain=<SEP domain name>\n# Optional settings for access to SEPM via a proxy.\n#http_proxy=http://proxy:80\n#https_proxy=http://proxy:80\n# Limit result sent to Resilient, add full result as an attachment.\nsep_results_limit=200\n# Period of time (seconds) to wait for all endpoints to return a scan result.\nsep_scan_timeout=1800\n\"\"\"\n return config_data",
"def get_config(self):\n config = {\n }\n base_config = super(MatrixConcat, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))",
"def add_config(self):\n\n config = {\n 'byte_to_integer': ByteToInteger,\n 'integer_to_byte': IntegerToByte,\n 'integer_to_double_integer': IntegerToDoubleInteger,\n 'integer_to_string': IntegerToString,\n 'double_integer_to_integer': DoubleIntegerToInteger,\n 'double_integer_to_real': DoubleIntegerToReal,\n 'double_integer_to_string': DoubleIntegerToString,\n 'binary_coded_decimal_to_integer': BinaryCodedDecimalToInteger,\n 'integer_to_binary_coded_decimal': IntegerToBinaryCodedDecimal,\n 'round': Round,\n 'truncate': Truncate,\n 'real_to_string': RealToString,\n 'integer_to_ascii': IntegerToASCII,\n 'double_integer_to_ascii': DoubleIntegerToASCII,\n 'real_to_ascii': RealToASCII,\n 'ascii_to_hexadecimal': ASCIIToHexadecimal,\n 'hexadecimal_to_ascii': HexadecimalToASCII,\n 'string_to_integer': StringToInteger,\n 'string_to_double_integer': StringToDoubleInteger,\n 'string_to_real': StringToReal,\n 'decode': Decode,\n 'encode': Encode,\n 'segment': Segment\n }\n\n return config",
"def get_config_main_sections(self):\n self.sections_in_config = self.config_handle.sections()",
"def op_config(self) -> Any:\n return self.solid_config",
"def get_config_on_json(self):\n # load section CONFIG from data\n try:\n return self.json_data[\"CONFIG\"]\n except:\n constant.get_error(constant.ERROR_004)",
"def getConfig(self):\n return self.cp",
"def get_config(config):\n section = 'General'\n def add(name, val):\n if not config.has_option(section, name):\n config.set(section, name, val)\n add('input_fofn', 'NA')\n add('target', 'assembly')\n #add('sge_option', 'NA') # Needed for PBS, but not for everything\n add('sge_option_da', 'NA')\n add('sge_option_la', 'NA')\n add('sge_option_pda', 'NA')\n add('sge_option_pla', 'NA')\n add('sge_option_fc', 'NA')\n add('sge_option_cns', 'NA')\n return get_dict_from_old_falcon_cfg(config)",
"def build_confcom_addon_profile(self) -> ManagedClusterAddonProfile:\n # determine the value of constants\n addon_consts = self.context.get_addon_consts()\n CONST_ACC_SGX_QUOTE_HELPER_ENABLED = addon_consts.get(\n \"CONST_ACC_SGX_QUOTE_HELPER_ENABLED\"\n )\n\n confcom_addon_profile = self.models.ManagedClusterAddonProfile(\n enabled=True, config={CONST_ACC_SGX_QUOTE_HELPER_ENABLED: \"false\"})\n if self.context.get_enable_sgxquotehelper():\n confcom_addon_profile.config[CONST_ACC_SGX_QUOTE_HELPER_ENABLED] = \"true\"\n return confcom_addon_profile",
"def corenlp_coref_props(self):\n coref_props = self.config._sections['corenlp_coref_props']\n return coref_props",
"def get_config(self):\n\n return {section: self.sections[section].get_values() for section in self.sections}",
"def cg_config():\n return {}",
"def get_confg(self):\n\n ini = ConfigParser()\n self.config_parser = ini\n # if isinstance(cfile, (file, StringIO.StringIO, io.BytesIO)):\n if isinstance(self.config_data, str) and self.config_data:\n fp = io.BytesIO(self.config_data)\n ini.readfp(fp)\n elif self.config_file is not None:\n ini.read([self.config_file, os.path.expanduser('~/.' + self.config_file)])\n\n if ini.has_section('whoshere'):\n return ini.items('whoshere')\n\n return {}",
"def config(self, function):\n self.cfgs.append(ConfigScope(function))\n return self.cfgs[-1]",
"def config_section_data():\n config_data = u\"\"\"[feeds]\n# comma separated section names. ex. sqlserver_feed,file_feed\nfeed_names=<your feeds>\nreload=true\n# use reload_types to limit the types of objects when reload=true.\n# Ex: incident,task,note,artifact,attachment,<data_table_api_name>\nreload_types=\n# set to true if ElasticSearch errors occur during reload=true\nreload_query_api_method=false\n\n# feed_data is the default message destination that will be listened to\nqueue=feed_data\n\n# set to true if attachment data should be part of payload send to plugins\ninclude_attachment_data=false\n# if necessary, specify the supported workspace (by label, case sensitive) and the list of feeds associated with it\n# ex: 'Default Workspace': ['sqlserver_feed'], 'workspace A': ['kafka_feed', 'resilient_feed']\nworkspaces=\n\"\"\"\n return config_data",
"def get_config_template(self) -> cconfig.Config:",
"def getZapataConf(self):\n #cProf = briProfiles[self['briconfig']] #Grab the config profile\n #output = self.mergeConfigList(cProf, briConfigList)\n output = []\n for portInd, portLine in enumerate(self.portLines[:-1]):\n if self[portInd]['type'] == 'na':\n continue\n signalling = str.join('_', (self[portInd]['type'], self[portInd]['signalling']))\n output.append(\"group = \"+ str.join(', ', self.pluginEntity.getPortGroup(portLine[1])))\n #Get CallerID\n output.append(\"callerid = \" + self[portInd]['callerid'])\n #Get PickupGroup\n output.append(\"callgroup = \" + self[portInd]['callgroup'])\n output.append(\"pickupgroup = \" + self[portInd]['pickupgroup'])\n #Context Bindings\n output.append(\"context = \"+ self[portInd]['context'])\n output.append(\"signalling = \"+ signalling) \n output.append(\"channel = \"+ str(portLine[0]))\n return output"
] | [
"0.6987265",
"0.61188745",
"0.58987737",
"0.5838142",
"0.5801516",
"0.57374656",
"0.57162094",
"0.5709847",
"0.5694724",
"0.56832767",
"0.5636348",
"0.56250817",
"0.55275506",
"0.5512176",
"0.5507755",
"0.5460587",
"0.5455781",
"0.5453585",
"0.5423884",
"0.5390443",
"0.5382282",
"0.53492665",
"0.53465974",
"0.5331778",
"0.5301936",
"0.5281374",
"0.5264328",
"0.52589804",
"0.52485013",
"0.5223411"
] | 0.6867581 | 1 |
Return addition config section for coils overwrites. | def get_coil_overwrite_section(cls):
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_coil_config_section(cls) -> Optional[str]:\n return None",
"def get_coil_config_section(cls):\n return None",
"def get_config(self):\n config = {\n }\n base_config = super(MatrixConcat, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))",
"def _overwrite_with_config(self, new_cfg):\n for section in new_cfg.sections():\n for key, val in new_cfg.items(section):\n self.config.set(section, key, val)",
"def configure(self, section):",
"def validate_coil_overwrite_section(self, driver, config_overwrite):\n driver.machine.config_validator.validate_config(\n \"coil_overwrites\", config_overwrite, driver.name,\n base_spec=self.get_coil_overwrite_section())\n return config_overwrite",
"def config(self):\n return \"\\n\".join([ c.config(True) for p, c in self.configs_ ])",
"def get_switch_overwrite_section(cls):\n return None",
"def config(self, function):\n self.cfgs.append(ConfigScope(function))\n return self.cfgs[-1]",
"def concat_config(config, new_config):\n for new_path in new_config:\n if new_path not in config:\n config[new_path] = new_config[new_path]\n else:\n config[new_path][0] = config[new_path][0] or new_config[new_path][0]\n for filename in config[new_path]:\n if filename != 0:\n if filename in new_config[new_path]:\n for opt in config[new_path][filename]:\n if opt in new_config[new_path][filename]:\n new_config[new_path][filename][opt]\\\n .update(config[new_path][filename][opt])\n else:\n new_config[new_path][filename][opt] = \\\n config[new_path][filename][opt]\n else:\n new_config[new_path][filename] = config[new_path][filename]\n return config",
"def add_config(self):\n\n config = {\n 'count_up': CountUp,\n 'count_down': CountDown,\n 'count_up_or_down': CountUpOrDown,\n 'high_speed_counter_definition': HighSpeedCounterDefinition,\n 'high_speed_counter': HighSpeedCounter,\n 'pulse_output': PulseOutput\n }\n\n return config",
"def overwrite(section: str, data: any) -> None:\n\toverwriteDict[section] = data\n\tlogger.debug(f'Overwritten config {section}!')",
"def _getConfigName(self):\n return \"%s_processCoadd_config\" % (self.config.coaddName,)",
"def update_network_section(self):\n rconfig = configparser.RawConfigParser()\n rconfig.read(self.conf_file)\n if self.ext_net:\n if not rconfig.has_section('network'):\n rconfig.add_section('network')\n rconfig.set('network', 'public_network_id', self.ext_net.id)\n rconfig.set('network', 'floating_network_name', self.ext_net.name)\n rconfig.set('network-feature-enabled', 'floating_ips', True)\n else:\n if not rconfig.has_section('network-feature-enabled'):\n rconfig.add_section('network-feature-enabled')\n rconfig.set('network-feature-enabled', 'floating_ips', False)\n with open(self.conf_file, 'w', encoding='utf-8') as config_file:\n rconfig.write(config_file)",
"def get_section(self,name):\n if self.__config.has_section(name):\n data={}\n for opt,val in self.__config.items(name):\n data[opt]=val\n return data\n else:\n raise Exception(_('EVOGTK: Section \"%s\" does not exist in this preferences instance') % name)",
"def _configure_addon(self):\n cfg = None\n try:\n data_dir = os.path.split(self.props.data_dir)\n\n cfg = Configuration(jobtype='Blender', \n data_path=data_dir[0],\n log_level=int(self.props.log_level),\n name=self.props.ini_file,\n datadir=data_dir[1])\n \n except (InvalidConfigException, IndexError) as exp:\n self.log.warning(\"Warning failed to load config file, \"\n \"creating new default config.\")\n self.log.warning(str(exp))\n \n finally:\n\n if not os.path.isdir(self.props.data_dir):\n raise EnvironmentError(\"Data directory not created - \"\n \"please ensure you have adequate permissions.\")\n\n if not cfg:\n cfg = Configuration(jobtype='Blender', log_level='warning')\n\n if self.props.endpoint:\n cfg = override_config(cfg, endpoint=self.props.endpoint)\n if self.props.account:\n cfg = override_config(cfg, account=self.props.account)\n if self.props.key:\n cfg = override_config(cfg, key=self.props.key)\n if self.props.client_id:\n cfg = override_config(cfg, client_id=self.props.client_id)\n if self.props.tenant:\n cfg = override_config(cfg, tenant=self.props.tenant)\n if self.props.redirect:\n cfg = override_config(cfg, redirect=self.props.redirect)\n\n cfg.save_config()\n return cfg",
"def _build_new_config_(self):\n if hasattr(self,\"_init_config\"):\n # Has been loaded\n newconfig = []\n set_key = []\n for l in self._init_config:\n if ((l.startswith(\"#\") or l.startswith(\"\\t\") or l.startswith(\" \")) or len(l)==0):\n newconfig.append(l)\n else:\n key = l.split()[0]\n if key in self.switched_off_keys:\n key = \"# \"+key\n newconfig.append(self.get_config_lines(key))\n set_key.append(key)\n\n keys = np.asarray(list(self.config.keys()))\n not_set_key = keys[~np.in1d(keys, set_key)]\n \n if len(not_set_key)>0:\n newconfig.append(\"# == NEW KEYS == #\")\n for key in not_set_key:\n newconfig.append(self.get_config_lines(key))\n return np.array(newconfig)\n else:\n # new\n return self.get_config(\"array\")",
"def op_config(self) -> Any:\n return self.solid_config",
"def get_config(self):\n return self.cat_feats_cfg",
"def set_config(self, cfg):\n\n cfg.add_section(self.name)\n for attr, value in self.__dict__.items():\n if value not in [\"false\", \"none\"] and attr != \"name\":\n attr = attr.replace(\"_\", \"-\")\n cfg.set(self.name, attr, value)",
"def set_config(self, cfg):\n\n cfg.add_section(self.name)\n for attr, value in self.__dict__.items():\n if value not in [\"false\", \"none\"] and attr != \"name\":\n attr = attr.replace(\"_\", \"-\")\n cfg.set(self.name, attr, value)",
"def set_config(self, cfg):\n\n cfg.add_section(self.name)\n for attr, value in self.__dict__.items():\n if value not in [\"false\", \"none\"] and attr != \"name\":\n attr = attr.replace(\"_\", \"-\")\n cfg.set(self.name, attr, value)",
"def get_config(self):\n return {'reduction': self.reduction, 'name': self.name}",
"def add_over(self, override: ItemConfig) -> None:\n self.all_conf = lazy_conf.concat(self.all_conf, override.all_conf)\n\n for vers_id, styles in override.versions.items():\n our_styles = self.versions.setdefault(vers_id, {})\n for sty_id, style in styles.items():\n if sty_id not in our_styles:\n our_styles[sty_id] = style\n else:\n our_styles[sty_id] = lazy_conf.concat(our_styles[sty_id], style)",
"def merge_jupyter_config_data(self, config, in_config):\n self.log.debug(f\"\"\"[lite][config][merge] ..... {config}\"\"\")\n self.log.debug(f\"\"\"[lite][config][merge] ..... {in_config}\"\"\")\n\n config = config or {}\n in_config = in_config or {}\n\n for k, v in in_config.items():\n if k in [DISABLED_EXTENSIONS, FEDERATED_EXTENSIONS]:\n config[k] = [*config.get(k, []), *v]\n elif k in [SETTINGS_OVERRIDES]:\n config[k] = config.get(k, {})\n for pkg, pkg_config in v.items():\n config[k][pkg] = config[k].get(pkg, {})\n config[k][pkg].update(pkg_config)\n else:\n config[k] = v\n self.log.debug(f\"\"\"[lite][config][merge] ..... {config}\"\"\")\n return config",
"def merge_jupyter_config_data(self, config, in_config):\n self.log.debug(f\"\"\"[lite][config][merge] ..... {config}\"\"\")\n self.log.debug(f\"\"\"[lite][config][merge] ..... {in_config}\"\"\")\n\n config = config or {}\n in_config = in_config or {}\n\n for k, v in in_config.items():\n if k in [DISABLED_EXTENSIONS, FEDERATED_EXTENSIONS]:\n config[k] = [*config.get(k, []), *v]\n elif k in [SETTINGS_OVERRIDES]:\n config[k] = config.get(k, {})\n for pkg, pkg_config in v.items():\n config[k][pkg] = config[k].get(pkg, {})\n config[k][pkg].update(pkg_config)\n else:\n config[k] = v\n self.log.debug(f\"\"\"[lite][config][merge] ..... {config}\"\"\")\n return config",
"def get_config(self):\n return super().get_config()",
"def get_rec_config(self):\n conf_map = {}\n if len(self.reconstructions.text()) > 0:\n conf_map['reconstructions'] = str(self.reconstructions.text())\n if len(self.device.text()) > 0:\n conf_map['device'] = str(self.device.text()).replace('\\n', '')\n if len(self.alg_seq.text()) > 0:\n conf_map['algorithm_sequence'] = str(self.alg_seq.text()).replace('\\n', '')\n if len(self.beta.text()) > 0:\n conf_map['beta'] = str(self.beta.text())\n if len(self.support_area.text()) > 0:\n conf_map['support_area'] = str(self.support_area.text()).replace('\\n', '')\n if self.cont.isChecked():\n conf_map['cont'] = 'true'\n if len(self.cont_dir_button.text().strip()) > 0:\n conf_map['continue_dir'] = '\"' + str(self.cont_dir_button.text()).strip() + '\"'\n print('cont_dir', conf_map['continue_dir'])\n\n for feat_id in self.features.feature_dir:\n self.features.feature_dir[feat_id].add_config(conf_map)\n\n return conf_map",
"def set_config(self, cfg):\n\n cfg.add_section(self.name)\n for attr, value in self.__dict__.items():\n if value not in [\"false\", \"none\", \"0\"] and attr != \"name\":\n attr = attr.replace(\"_\", \"-\")\n cfg.set(self.name, attr, value)",
"def get_config(self):\n layer_config = {\n \"anchors\": self._anchors, \n \"classes\": self._classes,\n \"ignore_thresh\": self._ignore_thresh, \n \"truth_thresh\": self._truth_thresh, \n \"iou_thresh\": self._iou_thresh, \n \"loss_type\": self._loss_type, \n \"iou_normalizer\": self._iou_normalizer,\n \"cls_normalizer\": self._cls_normalizer, \n \"scale_x_y\": self._scale_x_y, \n }\n layer_config.update(super().get_config())\n return layer_config"
] | [
"0.61900616",
"0.61157715",
"0.6036548",
"0.58794725",
"0.5742443",
"0.57416075",
"0.5736262",
"0.5693316",
"0.5629141",
"0.5574493",
"0.55668503",
"0.5548216",
"0.5522465",
"0.55043876",
"0.5483229",
"0.5475912",
"0.5463066",
"0.54567635",
"0.5451854",
"0.54403436",
"0.54403436",
"0.54403436",
"0.5429553",
"0.54257125",
"0.5425618",
"0.5425618",
"0.5417426",
"0.54116756",
"0.54116327",
"0.5405163"
] | 0.6522079 | 0 |
Validate coil overwrite config for platform. | def validate_coil_overwrite_section(self, driver, config_overwrite):
driver.machine.config_validator.validate_config(
"coil_overwrites", config_overwrite, driver.name,
base_spec=self.get_coil_overwrite_section())
return config_overwrite | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _validate_config(self):\n pass",
"def validate_config(self):\n pass",
"def validate_config(self):\n pass",
"def _check_config(self):\n self._config[\"dataset_name\"] = MetaDataset(self._config[\"dataset_name\"])\n self._config[\"embedding_crop\"] = EmbeddingCrop(\n self._config[\"embedding_crop\"])\n if self._config[\"dataset_name\"] == MetaDataset.TIERED:\n error_message = \"embedding_crop: {} not supported for {}\".format(\n self._config[\"embedding_crop\"], self._config[\"dataset_name\"])\n assert self._config[\n \"embedding_crop\"] == EmbeddingCrop.CENTER, error_message",
"def validate_coil_section(self, driver, config) -> dict:\n if self.get_coil_config_section():\n spec = self.get_coil_config_section() # pylint: disable-msg=assignment-from-none\n config = driver.machine.config_validator.validate_config(spec, config, driver.name)\n elif config:\n raise AssertionError(\"No platform_config supported but not empty {} for driver {}\".\n format(config, driver.name))\n\n return config",
"def _check_config(self):",
"def validate_coil_section(self, driver, config):\n base_spec = [\"device\"]\n if self.__class__.get_coil_config_section():\n base_spec.append(self.__class__.get_coil_config_section())\n driver.machine.config_validator.validate_config(\n \"coils\", config, driver.name,\n base_spec=base_spec)\n return config",
"def check_config(config):\n pass",
"def check_configs(self):\n\n pass",
"def validate_config(self):\n\n # LOCALHOST\n if self.location == 'localhost':\n if 'browserName' not in self.config.keys():\n msg = \"Add the 'browserName' in your local_config: e.g.: 'Firefox', 'Chrome', 'Safari'\" # noqa\n self.runner.critical_log(msg)\n raise BromeBrowserConfigException(msg)\n\n # EC2\n elif self.location == 'ec2':\n self.validate_ec2_browser_config()\n\n # VIRTUALBOX\n elif self.location == 'virtualbox':\n self.validate_virtualbox_config()",
"def validate_config(self):\n\n ServerHeraldNotifyBase.validate_config(self)\n\n # Prowl requires an API key\n if not self.config_has('prowl'):\n print ('`prowl` notification type requires a Prowl API key to be '\n 'specified in the config file.')\n sys.exit(1)\n\n if not self.config_has('prowl', 'apikey'):\n print 'Prowl requires an API key in the config file'\n sys.exit(1)",
"def check_config(cfg):",
"def check_configuration(self, configuration):\n super(Pixiv_bot, self).check_configuration(configuration)",
"def validate_config(self, changed):\n logger.debug(\"[%s] Validating config (Legacy path)\", self.name)\n if not self.to_validate(changed):\n return\n # Validate (Legacy Path)\n from noc.cm.engine import Engine\n\n engine = Engine(self)\n try:\n engine.check()\n except: # noqa\n logger.error(\"Failed to validate config for %s\", self.name)\n error_report()",
"def validate_config(self, config: Dict) -> bool:\n raise NotImplementedError",
"def valid_configuration(self):\n valid = True\n\n if (not self.__config.suffix()) and (self.__config.output_dir() == self.__config.input_dir()):\n print(\"ERROR: output_dir directory cannot be the same as input_dir with an empty suffix!\")\n valid = False\n if not self.__config.public_key():\n print(\"ERROR: public_key not set! Set it through 'pdfworkshop config public_key <your_key>'. \"\n \"A free API key can be obtained from https://developer.ilovepdf.com/\")\n valid = False\n return valid",
"def validate_config(self):\n reference = data_file(\"../config/template/minimum_aiscalator.conf\")\n ref = pyhocon.ConfigFactory.parse_file(reference)\n msg = \"In Global Application Configuration file \"\n _validate_configs(self._app_conf, ref, msg,\n missing_exception=True,\n type_mismatch_exception=True)\n reference = data_file(\"../config/template/aiscalator.conf\")\n ref = pyhocon.ConfigFactory.parse_file(reference)\n msg = \"In Global Application Configuration file \"\n _validate_configs(self._app_conf, ref, msg,\n missing_exception=False,\n type_mismatch_exception=True)\n if self._step_name:\n reference = data_file(\"../config/template/minimum_step.conf\")\n ref = pyhocon.ConfigFactory.parse_file(reference)\n msg = \"in step named \" + self._step_name\n _validate_configs(self._step,\n ref[\"steps\"][\"Untitled\"],\n msg,\n missing_exception=True,\n type_mismatch_exception=True)\n reference = data_file(\"../config/template/step.conf\")\n ref = pyhocon.ConfigFactory.parse_file(reference)\n msg = \"in step named \" + self._step_name\n _validate_configs(self._step,\n ref[\"steps\"][\"Untitled\"],\n msg,\n missing_exception=False,\n type_mismatch_exception=True)\n if self._dag_name:\n reference = data_file(\"../config/template/minimum_dag.conf\")\n ref = pyhocon.ConfigFactory.parse_file(reference)\n msg = \"in dag named \" + self._dag_name\n _validate_configs(self._dag,\n ref[\"dags\"][\"Untitled\"],\n msg,\n missing_exception=True,\n type_mismatch_exception=True)\n reference = data_file(\"../config/template/step.conf\")\n ref = pyhocon.ConfigFactory.parse_file(reference)\n msg = \"in dag named \" + self._dag_name\n _validate_configs(self._dag,\n ref[\"dags\"][\"Untitled\"],\n msg,\n missing_exception=False,\n type_mismatch_exception=True)",
"def properties_validation(config_data: Dict = None) -> bool:\n\n if config_data is None:\n config_file = os.path.join(\n os.path.dirname(__file__), 'server-config.json')\n with open(config_file) as config:\n config_data = json.load(config)\n platform_properties, err = PlatformPropertiesSchema().load(config_data)\n\n # Raise error if required property is not provided\n if err:\n raise MissingRequiredParameterError(err)\n\n # Raise error if unsupported protocol or module\n for protocol in platform_properties.supported_transfer_protocols:\n if protocol not in SUPPORTED_PROTOCOLS:\n err = str.format(\"Unsupported protocol {}\", protocol)\n raise ValueError(err)\n for module in platform_properties.supported_modules:\n if module not in SUPPORTED_MODULES:\n err = str.format(\"Unsupported module {}\", module)\n raise ValueError(err)\n\n # Raise error if https not in supported protocols\n if \"https\" not in platform_properties.supported_transfer_protocols:\n raise MissingRequiredParameterError(\n 'CARMIN 0.3 requires https support')\n\n # Raise error if minTimeout is greater than maxTimeout\n if (platform_properties.max_authorized_execution_timeout != 0\n and platform_properties.min_authorized_execution_timeout >\n platform_properties.max_authorized_execution_timeout):\n raise ValueError('maxTimeout must be greater than minTimeout')\n return True",
"def _check_valid_config(self):\n default_keys = self.default_config.keys()\n current_keys = self.config.keys()\n\n if default_keys != current_keys:\n msg = f\"Config must have the following keys : {list(default_keys)}\"\n self.logger.critical(msg)\n sys.exit(0)",
"def validate_config():\n\n # diff/sync settings, not including templates (see below)\n nori.setting_check_list('action', ['diff', 'sync'])\n nori.setting_check_type('reverse', bool)\n nori.setting_check_type('bidir', bool)\n nori.setting_check_callbacks('pre_action_callbacks')\n nori.setting_check_callbacks('post_action_callbacks', 1, 1)\n for i, cb_t in enumerate(nori.core.cfg['post_action_callbacks']):\n nori.setting_check_type(('post_action_callbacks', i, 3), bool)\n nori.setting_check_list('source_type', ['generic', 'drupal'])\n nori.setting_check_callable('source_query_func', may_be_none=False)\n nori.setting_check_callable('source_query_defaulter', may_be_none=True)\n nori.setting_check_callable('source_query_validator', may_be_none=False)\n nori.setting_check_callbacks('source_template_change_callbacks')\n nori.setting_check_callbacks('source_global_change_callbacks')\n nori.setting_check_list('dest_type', ['generic', 'drupal'])\n nori.setting_check_callable('dest_query_func', may_be_none=False)\n nori.setting_check_callable('dest_query_defaulter', may_be_none=True)\n nori.setting_check_callable('dest_query_validator', may_be_none=False)\n nori.setting_check_callbacks('dest_template_change_callbacks')\n nori.setting_check_callbacks('dest_global_change_callbacks')\n nori.setting_check_list('template_mode', ['all', 'include', 'exclude'])\n if nori.core.cfg['template_mode'] != 'all':\n nori.setting_check_not_empty('template_list')\n for i, t_name in enumerate(nori.core.cfg['template_list']):\n nori.setting_check_type(('template_list', i),\n nori.core.STRING_TYPES)\n nori.setting_check_list('key_mode', ['all', 'include', 'exclude'])\n if nori.core.cfg['key_mode'] != 'all':\n nori.setting_check_not_empty('key_list')\n\n # templates: general\n nori.setting_check_not_empty(\n 'templates', types=nori.core.MAIN_SEQUENCE_TYPES\n )\n for i, template in enumerate(nori.core.cfg['templates']):\n nori.setting_check_type(('templates', i), nori.core.MAPPING_TYPES)\n # bogus elements\n for k in template:\n if k not in T_KEYS:\n nori.err_exit(\n \"Warning: cfg['templates'][{0}][{1}] is set\\n\"\n \"(to {2}), but there is no such setting.\" .\n format(i, *map(nori.pps, [k, template[k]])),\n nori.core.exitvals['startup']['num']\n )\n # template name\n nori.setting_check_type(('templates', i, T_NAME_KEY),\n nori.core.STRING_TYPES)\n # multiple-valued value columns?\n nori.setting_check_type(('templates', i, T_MULTIPLE_KEY), bool)\n # source-DB query function arguments\n nori.setting_check_arg_tuple(('templates', i, T_S_QUERY_ARGS_KEY))\n # to-dest transform function\n nori.setting_check_callable(('templates', i, T_TO_D_FUNC_KEY),\n may_be_none=True)\n # source-DB don't-replicate flag\n nori.setting_check_type(('templates', i, T_S_NO_REPL_KEY), bool)\n # source-DB change callbacks\n nori.setting_check_callbacks(('templates', i, T_S_CHANGE_CB_KEY))\n # dest-DB query function arguments\n nori.setting_check_arg_tuple(('templates', i, T_D_QUERY_ARGS_KEY))\n # to-source transform function\n nori.setting_check_callable(('templates', i, T_TO_S_FUNC_KEY),\n may_be_none=True)\n # dest-DB don't-replicate flag\n nori.setting_check_type(('templates', i, T_D_NO_REPL_KEY), bool)\n # dest-DB change callbacks\n nori.setting_check_callbacks(('templates', i, T_D_CHANGE_CB_KEY))\n # key mode\n nori.setting_check_list(('templates', i, T_KEY_MODE_KEY),\n ['all', 'include', 'exclude'])\n if template[T_KEY_MODE_KEY] != 'all':\n # key list\n nori.setting_check_not_empty(('templates', i, T_KEY_LIST_KEY))\n\n # templates: query-function arguments\n for (sd, t_key, validator_key) in [\n ('s', T_S_QUERY_ARGS_KEY, 'source_query_validator'),\n ('d', T_D_QUERY_ARGS_KEY, 'dest_query_validator')\n ]:\n # args tuple\n args_idx = ('templates', i, t_key)\n args_t = template[t_key]\n # key_cv, value_cv (somewhat)\n for cv_str in ['key_cv', 'value_cv']:\n cv_idx = args_idx + (1, cv_str)\n nori.setting_check_not_empty(\n cv_idx, types=nori.core.MAIN_SEQUENCE_TYPES\n )\n cv_seq = args_t[1][cv_str]\n for j, cv in enumerate(cv_seq):\n nori.setting_check_length(cv_idx + (j, ), 2, 3,\n types=tuple)\n # the rest of the arguments\n nori.core.cfg[validator_key](sd, args_idx, args_t, i)\n\n # reporting settings\n nori.setting_check_list('report_order', ['template', 'keys'])\n # the rest are handled by nori.validate_email_config()",
"def _validate_configurations(self) -> None:\n if self.__exception:\n raise self.__exception",
"def validate_switch_overwrite_section(self, switch: Switch, config_overwrite: dict) -> dict:\n switch.machine.config_validator.validate_config(\n \"switch_overwrites\", config_overwrite, switch.name,\n base_spec=self.__class__.get_switch_overwrite_section())\n return config_overwrite",
"def check_config(self):\n assert 'AUTO' in self.config\n assert 'LAT' in self.config\n assert 'LON' in self.config\n assert 'ALL_CHANNELS' in self.config\n for key in self.extract_servo_channels():\n assert 'STATUS' in self.config[key]\n self.config[key]['STATUS'] = float(self.config[key]['STATUS'])\n assert 0.0 <= self.config[key]['STATUS'] <= 1.0\n\n if not 'SUNRISE_BUFFER' in self.config[key]:\n self.config[key]['SUNRISE_BUFFER'] = 0\n if not 'SUNSET_BUFFER' in self.config[key]:\n self.config[key]['SUNSET_BUFFER'] = 0",
"def config_sanity_check(config: dict) -> dict:\n\n # back compatibility support\n config = parse_v011(config)\n\n # check model\n if config[\"train\"][\"method\"] == \"conditional\":\n if config[\"dataset\"][\"train\"][\"labeled\"] is False: # unlabeled\n raise ValueError(\n \"For conditional model, data have to be labeled, got unlabeled data.\"\n )\n\n return config",
"def validate_config(self):\n config = self.config\n\n # which doc types are enabled\n need_at_least_one = ['GOOGLE_DRIVE_ENABLED','GITHUB_ENABLED','DISQUS_ENABLED']\n found_one = False\n for n in need_at_least_one:\n if n in config.keys():\n found_one = True\n break\n if not found_one:\n raise Exception(\"Error: need at least one of: %s\"%(\", \".join(need_at_least_one)))\n\n if 'GOOGLE_DRIVE_ENABLED' in config.keys():\n if config['GOOGLE_DRIVE_ENABLED']:\n if 'GOOGLE_DRIVE_CREDENTIALS_FILE' in config.keys():\n if os.path.basename(config['GOOGLE_DRIVE_CREDENTIALS_FILE']) != 'credentials.json':\n raise Exception(\"Error: the file specified with GOOGLE_DRIVE_CREDENTIALS_FILE in the config file must have a filename of 'credentials.json'\")",
"def validate_config(self):\r\n c = self.config\r\n \r\n # Make sure that we have a database_path, and an image_path...\r\n assert 'database_path' in c\r\n assert 'image_path' in c\r\n # We should probably check if these paths exist and make them as well...\r\n \r\n # Set the default values.\r\n graph_draw_frequency = c['graph_draw_frequency']\r\n for period, interval in self.default_config['graph_draw_frequency'].iteritems():\r\n graph_draw_frequency.setdefault(period, interval)\r\n \r\n # A quick check to make sure that our port is an integer.\r\n c['httpd_port'] = int(c['httpd_port'])\r\n \r\n # Make sure that no duplicate IDs exist, and that the template exists as well.\r\n ids = set()\r\n for graph in c['graphs']:\r\n graph.setdefault('config', {})\r\n graph['config'].setdefault('periods', [])\r\n assert graph['id'] not in ids\r\n ids.add(graph['id'])\r\n assert(template_exists(graph['template']))",
"def validate(self, config_json):\n pass",
"def check_config_conflicts(config: CfgNode):\n if config.task == \"generation\":\n assert config['train'].teacher_forcing == True, \"You should use teacher forcing to train generation!\"\n \n if config.task == \"generation\":\n if config.dataloader.max_seq_length >= config.generation.max_length:\n logger.warning(\"In generation, your config.generation.max_length is shorter than config.max_seq_length\"\n \"This can lead to unexpected behavior. You should consider increasing ``config.generation.max_length``.\"\n )\n raise RuntimeError",
"def check_config_mode(self):\n return False",
"def validate_config_dict(self):\n config_options = [\"pipeline_name\",\n \"num_processors\",\n \"num_sessions_at_once\",\n \"available_memory\",\n \"cluster_system\",\n \"output_directory\",\n \"working_directory\",\n \"template_head_for_anat\",\n \"exclude_zeros\",\n \"start_idx\",\n \"stop_idx\",\n \"write_report\",\n \"write_graph\",\n \"write_all_outputs\",\n \"upload_to_s3\",\n \"bucket_prefix\",\n \"bucket_out_prefix\",\n \"local_prefix\",\n \"bucket_name\",\n \"creds_path\"]\n invalid = []\n for param in self._config.keys():\n if param not in config_options:\n invalid.append(param)\n if len(invalid) > 0:\n err = \"\\n[!] The following parameters in your configuration \" \\\n \"file are not recognized. Double-check the pipeline \" \\\n \"configuration template.\\n\"\n err += \"\\n\".join([x for x in invalid])\n raise Exception(err)\n else:\n return 0"
] | [
"0.6632182",
"0.6598692",
"0.6598692",
"0.638589",
"0.6335491",
"0.6256042",
"0.60498774",
"0.60335827",
"0.5984886",
"0.59721404",
"0.5929158",
"0.5917141",
"0.58872664",
"0.5825252",
"0.578265",
"0.57337016",
"0.57169205",
"0.57041675",
"0.57027197",
"0.57003504",
"0.56746656",
"0.5674592",
"0.56715584",
"0.5668083",
"0.5660116",
"0.56488526",
"0.5586463",
"0.5526044",
"0.55253035",
"0.5513132"
] | 0.7273753 | 0 |
Validate coil config for platform. | def validate_coil_section(self, driver, config):
base_spec = ["device"]
if self.__class__.get_coil_config_section():
base_spec.append(self.__class__.get_coil_config_section())
driver.machine.config_validator.validate_config(
"coils", config, driver.name,
base_spec=base_spec)
return config | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def validate_coil_section(self, driver, config) -> dict:\n if self.get_coil_config_section():\n spec = self.get_coil_config_section() # pylint: disable-msg=assignment-from-none\n config = driver.machine.config_validator.validate_config(spec, config, driver.name)\n elif config:\n raise AssertionError(\"No platform_config supported but not empty {} for driver {}\".\n format(config, driver.name))\n\n return config",
"def validate_config(self):\n pass",
"def validate_config(self):\n pass",
"def _validate_config(self):\n pass",
"def validate_config(self):\n\n # LOCALHOST\n if self.location == 'localhost':\n if 'browserName' not in self.config.keys():\n msg = \"Add the 'browserName' in your local_config: e.g.: 'Firefox', 'Chrome', 'Safari'\" # noqa\n self.runner.critical_log(msg)\n raise BromeBrowserConfigException(msg)\n\n # EC2\n elif self.location == 'ec2':\n self.validate_ec2_browser_config()\n\n # VIRTUALBOX\n elif self.location == 'virtualbox':\n self.validate_virtualbox_config()",
"def check_config(config):\n pass",
"def config_sanity_check(config: dict) -> dict:\n\n # back compatibility support\n config = parse_v011(config)\n\n # check model\n if config[\"train\"][\"method\"] == \"conditional\":\n if config[\"dataset\"][\"train\"][\"labeled\"] is False: # unlabeled\n raise ValueError(\n \"For conditional model, data have to be labeled, got unlabeled data.\"\n )\n\n return config",
"def properties_validation(config_data: Dict = None) -> bool:\n\n if config_data is None:\n config_file = os.path.join(\n os.path.dirname(__file__), 'server-config.json')\n with open(config_file) as config:\n config_data = json.load(config)\n platform_properties, err = PlatformPropertiesSchema().load(config_data)\n\n # Raise error if required property is not provided\n if err:\n raise MissingRequiredParameterError(err)\n\n # Raise error if unsupported protocol or module\n for protocol in platform_properties.supported_transfer_protocols:\n if protocol not in SUPPORTED_PROTOCOLS:\n err = str.format(\"Unsupported protocol {}\", protocol)\n raise ValueError(err)\n for module in platform_properties.supported_modules:\n if module not in SUPPORTED_MODULES:\n err = str.format(\"Unsupported module {}\", module)\n raise ValueError(err)\n\n # Raise error if https not in supported protocols\n if \"https\" not in platform_properties.supported_transfer_protocols:\n raise MissingRequiredParameterError(\n 'CARMIN 0.3 requires https support')\n\n # Raise error if minTimeout is greater than maxTimeout\n if (platform_properties.max_authorized_execution_timeout != 0\n and platform_properties.min_authorized_execution_timeout >\n platform_properties.max_authorized_execution_timeout):\n raise ValueError('maxTimeout must be greater than minTimeout')\n return True",
"def validate_config(self, config: Dict) -> bool:\n raise NotImplementedError",
"def validate_config(self):\n\n ServerHeraldNotifyBase.validate_config(self)\n\n # Prowl requires an API key\n if not self.config_has('prowl'):\n print ('`prowl` notification type requires a Prowl API key to be '\n 'specified in the config file.')\n sys.exit(1)\n\n if not self.config_has('prowl', 'apikey'):\n print 'Prowl requires an API key in the config file'\n sys.exit(1)",
"def _check_config(self):\n self._config[\"dataset_name\"] = MetaDataset(self._config[\"dataset_name\"])\n self._config[\"embedding_crop\"] = EmbeddingCrop(\n self._config[\"embedding_crop\"])\n if self._config[\"dataset_name\"] == MetaDataset.TIERED:\n error_message = \"embedding_crop: {} not supported for {}\".format(\n self._config[\"embedding_crop\"], self._config[\"dataset_name\"])\n assert self._config[\n \"embedding_crop\"] == EmbeddingCrop.CENTER, error_message",
"def _check_config(self):",
"def test_valid_configuration(self):\n\n conf = [\n 'gasoline', '228i', 'model_luxury_line', 'silver', 'rims_384',\n 'tapistry_black', 'steptronic', 'smoker_package', 'tow_hook'\n ]\n\n attr_val_ids = self.get_attr_val_ids(conf)\n validation = self.cfg_tmpl.validate_configuration(attr_val_ids)\n self.assertTrue(validation, \"Valid configuration failed validation\")",
"def check_config(cfg):",
"def validate_config(self):\r\n c = self.config\r\n \r\n # Make sure that we have a database_path, and an image_path...\r\n assert 'database_path' in c\r\n assert 'image_path' in c\r\n # We should probably check if these paths exist and make them as well...\r\n \r\n # Set the default values.\r\n graph_draw_frequency = c['graph_draw_frequency']\r\n for period, interval in self.default_config['graph_draw_frequency'].iteritems():\r\n graph_draw_frequency.setdefault(period, interval)\r\n \r\n # A quick check to make sure that our port is an integer.\r\n c['httpd_port'] = int(c['httpd_port'])\r\n \r\n # Make sure that no duplicate IDs exist, and that the template exists as well.\r\n ids = set()\r\n for graph in c['graphs']:\r\n graph.setdefault('config', {})\r\n graph['config'].setdefault('periods', [])\r\n assert graph['id'] not in ids\r\n ids.add(graph['id'])\r\n assert(template_exists(graph['template']))",
"def valid_configuration(self):\n valid = True\n\n if (not self.__config.suffix()) and (self.__config.output_dir() == self.__config.input_dir()):\n print(\"ERROR: output_dir directory cannot be the same as input_dir with an empty suffix!\")\n valid = False\n if not self.__config.public_key():\n print(\"ERROR: public_key not set! Set it through 'pdfworkshop config public_key <your_key>'. \"\n \"A free API key can be obtained from https://developer.ilovepdf.com/\")\n valid = False\n return valid",
"def validate_config(self):\n config = self.config\n\n # which doc types are enabled\n need_at_least_one = ['GOOGLE_DRIVE_ENABLED','GITHUB_ENABLED','DISQUS_ENABLED']\n found_one = False\n for n in need_at_least_one:\n if n in config.keys():\n found_one = True\n break\n if not found_one:\n raise Exception(\"Error: need at least one of: %s\"%(\", \".join(need_at_least_one)))\n\n if 'GOOGLE_DRIVE_ENABLED' in config.keys():\n if config['GOOGLE_DRIVE_ENABLED']:\n if 'GOOGLE_DRIVE_CREDENTIALS_FILE' in config.keys():\n if os.path.basename(config['GOOGLE_DRIVE_CREDENTIALS_FILE']) != 'credentials.json':\n raise Exception(\"Error: the file specified with GOOGLE_DRIVE_CREDENTIALS_FILE in the config file must have a filename of 'credentials.json'\")",
"def validate(self):\n if not self.hmc_address:\n raise ValueError(\"No HMC address provided\")\n if (not self.credentials['user']\n or not self.credentials['password']):\n raise ValueError(\n \"No CPC credentials set. Please provide 'admin-user' and \"\n \"'admin-password' in hypervisor profile\")\n if not self.boot_options:\n raise ValueError(\n \"No CPC boot method configured. Please set \"\n \"'liveimg-insfile-url' in CPC profile parameters or \"\n \"attach a volume with live image\")",
"def _validate(self, config):\n assert isinstance(config, BaseConfig), \\\n \"Configuration should be instance of `BaseConfig`, but given {}\".format(type(config))",
"def check_configuration(self, configuration):\n super(Pixiv_bot, self).check_configuration(configuration)",
"def check_config(config):\n\n # Check config\n assert config.dataset in [\"conll04\", \"ace05\"]\n assert config.train_mode in [\"train\", \"train+dev\"]\n\n for emb in config.embedder:\n assert emb in [\"word\", \"char\", \"bert-base\", \"bert-large\"], emb\n\n if \"char\" in config.embedder:\n assert config.char_pool in [\"last\", \"avg\", \"max\"]\n\n if config.encoder is not None:\n assert config.encoder == \"bilstm\"\n\n for task in config.tasks:\n assert task in [\"ner\", \"re\"]\n\n assert config.ner_decoder in [\"iobes\", \"span\"]\n\n if \"cuda\" in config.device:\n assert torch.cuda.is_available(), \"CUDA not available\"",
"def validate(self, config_json):\n pass",
"def validate_config(config):\n # check if paths are valid\n check_paths = {\n 'data_path': r'data$',\n 'master_list_path': r'master_list\\.csv$',\n 'duplicate_list_path': r'duplicate_list\\.csv$',\n 'log_path': r'data[\\\\\\/]jobfunnel.log$',\n 'filter_list_path': r'data[\\\\\\/]filter_list\\.json$',\n }\n\n for path, pattern in check_paths.items():\n if not re.search(pattern, config[path]):\n raise ConfigError(path)\n # check if the provider list only consists of supported providers\n if not set(config['providers']).issubset(PROVIDERS):\n raise ConfigError('providers')\n\n # check validity of region settings\n validate_region(config['search_terms']['region'])\n\n # check validity of delay settings\n validate_delay(config['delay_config'])\n\n # check the validity of max_listing_days settings\n if(config['max_listing_days'] is not None and config['max_listing_days'] < 0):\n raise ConfigError('max_listing_days')",
"def validate_config_dict(self):\n config_options = [\"pipeline_name\",\n \"num_processors\",\n \"num_sessions_at_once\",\n \"available_memory\",\n \"cluster_system\",\n \"output_directory\",\n \"working_directory\",\n \"template_head_for_anat\",\n \"exclude_zeros\",\n \"start_idx\",\n \"stop_idx\",\n \"write_report\",\n \"write_graph\",\n \"write_all_outputs\",\n \"upload_to_s3\",\n \"bucket_prefix\",\n \"bucket_out_prefix\",\n \"local_prefix\",\n \"bucket_name\",\n \"creds_path\"]\n invalid = []\n for param in self._config.keys():\n if param not in config_options:\n invalid.append(param)\n if len(invalid) > 0:\n err = \"\\n[!] The following parameters in your configuration \" \\\n \"file are not recognized. Double-check the pipeline \" \\\n \"configuration template.\\n\"\n err += \"\\n\".join([x for x in invalid])\n raise Exception(err)\n else:\n return 0",
"def _verify_options(config: configuration.Config) -> None:\n\n if not config.config['species']:\n log._logger.error('You must specify a species (-s/--species)')\n exit(1)\n\n if config.config['hpc'] and config.config['local']:\n log._logger.error('You can only use one of the config options (hpc/local)')\n exit(1)\n\n if config.config['hpc'] and config.config['custom']:\n log._logger.error('You can only use one of the config options (hpc/custom)')\n exit(1)\n\n if config.config['local'] and config.config['custom']:\n log._logger.error('You can only use one of the config options (local/custom)')\n exit(1)\n\n if (not config.config['hpc']) and\\\n (not config.config['local']) and\\\n (not config.config['custom']):\n log._logger.error(\n 'You must specify a compute cluster environment (hpc/local/custom)'\n )\n exit(1)\n\n if config.config['custom'] and (not config.config['scheduler']):\n log._logger.error(\n 'The custom compute environment requires a scheduler address to be set'\n )\n exit(1)",
"def check_configs(self):\n\n pass",
"def validate_config(self):\n reference = data_file(\"../config/template/minimum_aiscalator.conf\")\n ref = pyhocon.ConfigFactory.parse_file(reference)\n msg = \"In Global Application Configuration file \"\n _validate_configs(self._app_conf, ref, msg,\n missing_exception=True,\n type_mismatch_exception=True)\n reference = data_file(\"../config/template/aiscalator.conf\")\n ref = pyhocon.ConfigFactory.parse_file(reference)\n msg = \"In Global Application Configuration file \"\n _validate_configs(self._app_conf, ref, msg,\n missing_exception=False,\n type_mismatch_exception=True)\n if self._step_name:\n reference = data_file(\"../config/template/minimum_step.conf\")\n ref = pyhocon.ConfigFactory.parse_file(reference)\n msg = \"in step named \" + self._step_name\n _validate_configs(self._step,\n ref[\"steps\"][\"Untitled\"],\n msg,\n missing_exception=True,\n type_mismatch_exception=True)\n reference = data_file(\"../config/template/step.conf\")\n ref = pyhocon.ConfigFactory.parse_file(reference)\n msg = \"in step named \" + self._step_name\n _validate_configs(self._step,\n ref[\"steps\"][\"Untitled\"],\n msg,\n missing_exception=False,\n type_mismatch_exception=True)\n if self._dag_name:\n reference = data_file(\"../config/template/minimum_dag.conf\")\n ref = pyhocon.ConfigFactory.parse_file(reference)\n msg = \"in dag named \" + self._dag_name\n _validate_configs(self._dag,\n ref[\"dags\"][\"Untitled\"],\n msg,\n missing_exception=True,\n type_mismatch_exception=True)\n reference = data_file(\"../config/template/step.conf\")\n ref = pyhocon.ConfigFactory.parse_file(reference)\n msg = \"in dag named \" + self._dag_name\n _validate_configs(self._dag,\n ref[\"dags\"][\"Untitled\"],\n msg,\n missing_exception=False,\n type_mismatch_exception=True)",
"def config_sanity_check(self):\n if 'name' not in self.config:\n raise EventifyConfigError(\n \"\"\"Required configuration parameter missing!\n Please configure \"name\" as a string in your\n configuration.\"\"\")\n\n if 'publish_topic' not in self.config:\n raise EventifyConfigError(\n \"\"\"Required configuration parameter missing!\n Please configure \"public_topic\" as an object\n in your configuration.\"\"\")\n\n if 'topic' not in self.config['publish_topic']:\n raise EventifyConfigError(\n \"\"\"Required configuration parameter missing!\n Please configure \"topic\" as a key in your\n \"public_topic object.\"\"\")",
"def validate(self):\n AcceleratorType.validate(self.accelerator_type)\n gcp.validate_machine_configuration(self.cpu_cores,\n self.memory,\n self.accelerator_type,\n self.accelerator_count)",
"def config_validate(ctx, **kwargs):\n # Validates pf9-express config file and obtains Auth Token\n #Load Active Config into ctx\n GetConfig(ctx).GetActiveConfig()\n #Get Token\n token = GetToken().get_token_v3(\n ctx.params[\"du_url\"],\n ctx.params[\"du_username\"],\n ctx.params[\"du_password\"],\n ctx.params[\"du_tenant\"] )\n if token is not None:\n click.echo('Config Validated!')\n click.echo('Token: %s' % token)\n else:\n click.echo('Config Validation Failed!')"
] | [
"0.72570366",
"0.6911317",
"0.6911317",
"0.6909976",
"0.65492195",
"0.6416078",
"0.62831616",
"0.6280884",
"0.62649447",
"0.62235534",
"0.6199735",
"0.618072",
"0.6152399",
"0.61097735",
"0.60900956",
"0.60865796",
"0.6076924",
"0.5985597",
"0.59361964",
"0.58753914",
"0.5853821",
"0.58474845",
"0.5840261",
"0.5834601",
"0.5834341",
"0.5828237",
"0.580378",
"0.5792004",
"0.5773247",
"0.5772195"
] | 0.70865804 | 1 |
Set pulse on hit and release rule to driver. Pulses a driver when a switch is hit. When the switch is released the pulse is canceled. Typically used on the main coil for dual coil flippers without eos switch. | def set_pulse_on_hit_and_release_rule(self, enable_switch, coil):
raise NotImplementedError | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_pulse_on_hit_and_release_rule(self, enable_switch: SwitchSettings, coil: DriverSettings):\n raise NotImplementedError",
"def set_pulse_on_hit_and_release_rule(self, enable_switch, coil):\n self.log.info(\"set_pulse_on_hit_and_release_rule(coil=%s sw=%s)\" %\n (coil.hw_driver.number, enable_switch.hw_switch.number))\n self.communicator.rule_add(2, coil.hw_driver.number, enable_switch.hw_switch.number,\n duration=self._get_pulse_ms_value(coil))",
"def set_pulse_on_hit_and_enable_and_release_rule(self, enable_switch, coil):\n raise NotImplementedError",
"def set_pulse_on_hit_and_enable_and_release_rule(self, enable_switch: SwitchSettings, coil: DriverSettings):\n raise NotImplementedError",
"def set_pulse_on_hit_and_enable_and_release_and_disable_rule(self, enable_switch, disable_switch, coil):\n raise NotImplementedError",
"def set_pulse_on_hit_and_enable_and_release_rule(self, enable_switch, coil):\n self.log.info(\"set_pulse_on_hit_and_enable_and_release_rule(coil=%s sw=%s)\" %\n (coil.hw_driver.number, enable_switch.hw_switch.number))\n self.communicator.rule_add(3, coil.hw_driver.number, enable_switch.hw_switch.number,\n duration=self._get_pulse_ms_value(coil))",
"def set_pulse_on_hit_and_release_and_disable_rule(self, enable_switch: SwitchSettings,\n eos_switch: SwitchSettings, coil: DriverSettings,\n repulse_settings: Optional[RepulseSettings]):\n raise NotImplementedError",
"def set_pulse_on_hit_rule(self, enable_switch, coil):\n raise NotImplementedError",
"def set_pulse_on_hit_rule(self, enable_switch: SwitchSettings, coil: DriverSettings):\n raise NotImplementedError",
"def set_pulse_on_hit_and_enable_and_release_and_disable_rule(self, enable_switch, disable_switch, coil):\n self.log.info(\"set_pulse_on_hit_and_enable_and_release_and_disable_rule(coil=%s sw=%s dis_sw=%s)\" %\n (coil.hw_driver.number, enable_switch.hw_switch.number, disable_switch.hw_switch.number))\n self.communicator.rule_add(4, coil.hw_driver.number, enable_switch.hw_switch.number, disable_sw_id=disable_switch.hw_switch.number,\n duration=self._get_pulse_ms_value(coil))",
"def set_pulse_on_hit_and_enable_and_release_and_disable_rule(self, enable_switch: SwitchSettings,\n eos_switch: SwitchSettings, coil: DriverSettings,\n repulse_settings: Optional[RepulseSettings]):\n raise NotImplementedError",
"def set_pulse_on_hit_rule(self, enable_switch, coil):\n self.log.info(\"set_pulse_on_hit_rule(coil=%s sw=%s)\" %\n (coil.hw_driver.number, enable_switch.hw_switch.number))\n self.communicator.rule_add(1, coil.hw_driver.number, enable_switch.hw_switch.number, \n duration=self._get_pulse_ms_value(coil))",
"def pulley_activate(self):\n self.pulley(\"up\")\n time.sleep(5 * 0.7)\n self.pulley(\"stop\")\n time.sleep(2)\n self.pulley(\"down\")\n time.sleep(2.85)\n self.pulley(\"stop\")",
"def _pulse_enable(self):\n self.set_low(self._en_pin)\n self._usleep(1)\n self.set_high(self._en_pin)\n self._usleep(1)\n self.set_low(self._en_pin)\n # commands need > 37us to settle\n self._usleep(100)",
"def set_delayed_pulse_on_hit_rule(self, enable_switch: SwitchSettings, coil: DriverSettings, delay_ms: int):\n del enable_switch\n del coil\n del delay_ms\n raise AssertionError(\"This platform does not support delayed pulse hardware rules.\")",
"def pulse(vjoy, btn_id):\n global g_is_running\n g_is_running = True\n while g_is_running:\n vjoy[1].button(btn_id).is_pressed = True\n time.sleep(g_hold_time)\n vjoy[1].button(btn_id).is_pressed = False\n time.sleep(g_pause_time)",
"def idle(self):\n self.pi.set_servo_pulsewidth(self.gpio, 0)",
"def steer(self):\n\n while self.active:\n angle = self.driver.angle\n steering_pwm_calc = self.angle_to_pmw(angle)\n self.pwm.set_pwm(0, 0, steering_pwm_calc)",
"def setup_motor(self,pin_num):\n pi.set_servo_pulsewidth(pin_num, 2000)\n sleep(2)\n pi.set_servo_pulsewidth(pin_num, 500 )\n sleep(2)",
"def pulse_hi(pin, length=0.00001): \n on(pin)\n time.sleep(length)\n off(pin)\n time.sleep(length)",
"def setInternalPulser(self,pulserEnable,pulseHeight):\n pass",
"def teleopPeriodic(self):\n\n try:\n if self.debounce(6, gamepad=True):\n self.boulder_automation.toggle_shoot_boulder()\n except:\n self.onException()\n \n try:\n if self.debounce(2) or self.debounce(1, gamepad=True):\n self.boulder_automation.toggle_intake_boulder()\n except:\n self.onException()\n\n try:\n if self.debounce(7):\n self.chassis.toggle_field_oriented()\n except:\n self.onException()\n\n try:\n if self.debounce(8):\n enabled = self.heading_hold_pid.isEnable()\n self.heading_hold_pid.disable()\n self.bno055.resetHeading()\n self.heading_hold_pid.setSetpoint(constrain_angle(self.bno055.getAngle()))\n self.heading_hold_pid.reset()\n if enabled:\n self.heading_hold_pid.enable()\n except:\n self.onException()\n\n \"\"\"try:\n if self.debounce(10):\n self.chassis.toggle_vision_tracking()\n except:\n self.onException()\"\"\"\n\n try:\n if self.debounce(10):\n self.chassis.toggle_range_holding(self.chassis.correct_range)\n except:\n self.onException()\n\n try:\n if self.debounce(1) or self.debounce(8, gamepad=True):\n self.boulder_automation.toggle_shoot_boulder()\n except:\n self.onException()\n\n try:\n if self.debounce(9):\n self.chassis.toggle_heading_hold()\n except:\n self.onException()\n\n try:\n if self.debounce(4):\n self.defeater.up()\n except:\n self.onException()\n\n try:\n if self.debounce(5):\n self.shooter.stop()\n self.intake.stop()\n except:\n self.onException()\n\n try:\n if self.debounce(3):\n #self.chassis.range_setpoint = self.chassis.correct_range\n #self.chassis.distance_pid.enable()\n # self.shooter.start_shoot()\n self.chassis.range_setpoint = 0.0\n self.chassis.track_vision = False\n self.chassis.toggle_range_holding()\n self.chassis.toggle_vision_tracking()\n except:\n self.onException()\n\n try:\n if self.debounce(6):\n self.defeater.down()\n except:\n self.onException()\n\n \"\"\"try:\n if self.debounce(10):\n self.shooter.backdrive()\n self.intake.backdrive()\n except:\n self.onException()\"\"\"\n\n try:\n if self.joystick.getPOV() != -1:\n self.chassis.heading_hold = True\n direction = 0.0\n if self.joystick.getPOV() == 0:\n # shooter centre goal\n direction = math.pi\n elif self.joystick.getPOV() == 90:\n # shooter right goal\n direction = math.pi / 3.0 + math.pi\n elif self.joystick.getPOV() == 270:\n # shooter left goal\n direction = -math.pi / 3.0 + math.pi\n elif self.joystick.getPOV() == 180:\n direction = 0.0\n self.chassis.set_heading_setpoint(direction)\n except:\n self.onException()\n\n try:\n if self.joystick.getRawButton(11) or self.gamepad.getRawButton(2):\n self.chassis.field_oriented = False \n else:\n self.chassis.field_oriented = True\n except:\n self.onException()\n\n try:\n if self.gamepad.getRawButton(3):\n self.boulder_automation.engage(\"backdrive_manual\")\n elif self.boulder_automation.current_state == \"backdrive_manual\":\n self.boulder_automation.done()\n except:\n self.onException()\n\n \"\"\"try:\n if self.debounce(1, gamepad=True):\n self.chassis.zero_encoders()\n self.chassis.distance_pid.setSetpoint(1.2)\n self.chassis.distance_pid.enable()\n except:\n self.onException()\"\"\"\n\n try:\n if self.debounce(10, gamepad=True):\n self.vision.write_image()\n except:\n self.onException()\n\n try:\n if self.joystick.getRawButton(12):\n self.joystick_rate = 0.6\n else:\n self.joystick_rate = 0.4\n except:\n self.onException()\n\n self.chassis.inputs = [-rescale_js(self.joystick.getY(), deadzone=0.05, exponential=1.2),\n - rescale_js(self.joystick.getX(), deadzone=0.05, exponential=1.2),\n - rescale_js(self.joystick.getZ(), deadzone=0.2, exponential=15.0, rate=self.joystick_rate),\n (self.joystick.getThrottle() - 1.0) / -2.0\n ]\n for input in self.chassis.inputs[0:3]:\n if input != 0.0:\n # Break out of auto if we move the stick\n self.chassis.distance_pid.disable()\n self.chassis.range_setpoint = None\n self.chassis.track_vision = False\n # self.chassis.field_oriented = True\n self.putData()",
"def stop_motor(self):\n self.output(self.steering_pin, 0)\n self.pi.set_servo_pulsewidth(self.steering_pin, 0)",
"def motorswitch(self, bo, pin, t):\n self.app.processEvents()\n if(self.win.getStopped() == True):\n self.win.updatelabel2(\"Jingle button was clicked.\\nClick another!\")\n return\n while self.win.getPaused() == True:\n self.app.processEvents() # Not really too sure if this line is needed. NEEDS TESTING\n self.win.updatelabel2(\"Jingle Song Paused!\\nChoose A new Song or Play to Resume!\")\n time.sleep(.1)\n GPIO.output(pin, bo)\n time.sleep(t)",
"def pulse_lo(pin, length=0.00001):\n off(pin)\n time.sleep(length)\n on(pin)\n time.sleep(length)",
"def toggle(light_id):\n if light_id == \"alloff\":\n pidomCtrl.pulse(\"alloff\")\n elif light_id == \"outside\":\n pidomCtrl.pulse(\"outside\")\n elif light_id == \"stairs\":\n pidomCtrl.pulse(\"stairs\")\n elif light_id == \"frontdoorgroupoff\":\n pidomCtrl.pulse(\"persistedoff\")\n elif light_id == \"persistedon\":\n pidomCtrl.pulse(\"frontdoorgroupon\")",
"def event_switch_1_off(self, ioname, iovalue):\n if self.main_state:\n self.rpi.io.relay_1.value = False\n self.state_1_on = False\n self.door_count = self.door_count + 1\n # self.door_outside_time_closed = time.time()\n # self.door_outside_delta_time_open = self.door_outside_time_closed - self.door_outside_time_opened\n # self.door_outside_sum_time_open = self.door_outside_sum_time_open + self.door_outside_delta_time_open\n # print(\"outside delta: \", int(self.door_outside_delta_time_open),\n # \"sec outside sum: \", int(self.door_outside_sum_time_open), ' sec')\n self.door_outside_sum_time_open = \\\n self.door_outside_sum_time_open + time.time() - self.door_outside_time_opened\n self.trigger = self.trigger_door_outside_close",
"def disable_pulse_modulation(self):\n self.write(\":SOUR:PULM:STAT OFF\")",
"def servo_set_target(ch, pulse):\n\n # Pulse number is 4x pulse width (in microseconds)\n p_num = 4 * int(pulse)\n\n # Send command to servo controller\n servo_send_cmd(cmd_set_target, ch, p_num)",
"def enable_pulse_modulation(self):\n self.write(\":SOUR:PULM:STAT ON\")"
] | [
"0.7318226",
"0.7116496",
"0.71158063",
"0.69559383",
"0.6919302",
"0.6803722",
"0.6787461",
"0.6785574",
"0.67510176",
"0.6638195",
"0.66331255",
"0.64948416",
"0.59656495",
"0.59290755",
"0.5917162",
"0.5901438",
"0.5824688",
"0.5653308",
"0.5549026",
"0.54918414",
"0.53997046",
"0.5390638",
"0.53359467",
"0.5328965",
"0.5328099",
"0.5299545",
"0.5294925",
"0.52885246",
"0.52872175",
"0.5235002"
] | 0.74059016 | 0 |
Set pulse on hit and enable and release and disable rule on driver. Pulses a driver when a switch is hit. Then enables the driver (may be with pwm). When the switch is released the pulse is canceled and the driver gets disabled. When the second disable_switch is hit the pulse is canceled and the driver gets disabled. Typically used on the main coil for dual coil flippers with eos switch. | def set_pulse_on_hit_and_enable_and_release_and_disable_rule(self, enable_switch, disable_switch, coil):
raise NotImplementedError | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_pulse_on_hit_and_release_and_disable_rule(self, enable_switch: SwitchSettings,\n eos_switch: SwitchSettings, coil: DriverSettings,\n repulse_settings: Optional[RepulseSettings]):\n raise NotImplementedError",
"def set_pulse_on_hit_and_enable_and_release_and_disable_rule(self, enable_switch: SwitchSettings,\n eos_switch: SwitchSettings, coil: DriverSettings,\n repulse_settings: Optional[RepulseSettings]):\n raise NotImplementedError",
"def set_pulse_on_hit_and_enable_and_release_and_disable_rule(self, enable_switch, disable_switch, coil):\n self.log.info(\"set_pulse_on_hit_and_enable_and_release_and_disable_rule(coil=%s sw=%s dis_sw=%s)\" %\n (coil.hw_driver.number, enable_switch.hw_switch.number, disable_switch.hw_switch.number))\n self.communicator.rule_add(4, coil.hw_driver.number, enable_switch.hw_switch.number, disable_sw_id=disable_switch.hw_switch.number,\n duration=self._get_pulse_ms_value(coil))",
"def set_pulse_on_hit_and_enable_and_release_rule(self, enable_switch: SwitchSettings, coil: DriverSettings):\n raise NotImplementedError",
"def set_pulse_on_hit_and_release_rule(self, enable_switch: SwitchSettings, coil: DriverSettings):\n raise NotImplementedError",
"def set_pulse_on_hit_and_enable_and_release_rule(self, enable_switch, coil):\n raise NotImplementedError",
"def set_pulse_on_hit_and_release_rule(self, enable_switch, coil):\n raise NotImplementedError",
"def set_pulse_on_hit_and_enable_and_release_rule(self, enable_switch, coil):\n self.log.info(\"set_pulse_on_hit_and_enable_and_release_rule(coil=%s sw=%s)\" %\n (coil.hw_driver.number, enable_switch.hw_switch.number))\n self.communicator.rule_add(3, coil.hw_driver.number, enable_switch.hw_switch.number,\n duration=self._get_pulse_ms_value(coil))",
"def set_pulse_on_hit_and_release_rule(self, enable_switch, coil):\n self.log.info(\"set_pulse_on_hit_and_release_rule(coil=%s sw=%s)\" %\n (coil.hw_driver.number, enable_switch.hw_switch.number))\n self.communicator.rule_add(2, coil.hw_driver.number, enable_switch.hw_switch.number,\n duration=self._get_pulse_ms_value(coil))",
"def set_pulse_on_hit_rule(self, enable_switch: SwitchSettings, coil: DriverSettings):\n raise NotImplementedError",
"def set_pulse_on_hit_rule(self, enable_switch, coil):\n raise NotImplementedError",
"def _pulse_enable(self):\n self.set_low(self._en_pin)\n self._usleep(1)\n self.set_high(self._en_pin)\n self._usleep(1)\n self.set_low(self._en_pin)\n # commands need > 37us to settle\n self._usleep(100)",
"def set_delayed_pulse_on_hit_rule(self, enable_switch: SwitchSettings, coil: DriverSettings, delay_ms: int):\n del enable_switch\n del coil\n del delay_ms\n raise AssertionError(\"This platform does not support delayed pulse hardware rules.\")",
"def set_pulse_on_hit_rule(self, enable_switch, coil):\n self.log.info(\"set_pulse_on_hit_rule(coil=%s sw=%s)\" %\n (coil.hw_driver.number, enable_switch.hw_switch.number))\n self.communicator.rule_add(1, coil.hw_driver.number, enable_switch.hw_switch.number, \n duration=self._get_pulse_ms_value(coil))",
"def pulseEnable( self, _data ): # uint8_t\n\t\tself.expanderWrite( _data | LCD_EN ) # En high\n\t\tsleep_us(1) # enable pulse must be >450ns\n\n\t\tself.expanderWrite( _data & (0xFF ^ LCD_EN) ) # En low\n\t\tsleep_us(50) # commands need > 37us to settle",
"def enable_charge_pump(enable):\n send_command(0x8D)\n if enable:\n send_command(0x14)\n else:\n send_command(0x10)",
"def enable_pulse_modulation(self):\n self.write(\":SOUR:PULM:STAT ON\")",
"def set_disabled_switch(self, disabled):\n self.disabled = disabled",
"def _led_disable():\n # type: () -> None\n GPIO.output(LED_nOE, GPIO.HIGH)",
"def disable(self):\n logging.debug(\"Disabling switch %s\" % self.name)\n self.disabled = True",
"def steer(self):\n\n while self.active:\n angle = self.driver.angle\n steering_pwm_calc = self.angle_to_pmw(angle)\n self.pwm.set_pwm(0, 0, steering_pwm_calc)",
"def set_PIenable(self,highlow): \n GPIO.output(self.chanlist[4], highlow)",
"def disable_pulse_modulation(self):\n self.write(\":SOUR:PULM:STAT OFF\")",
"def kill_switch(disable_after, keys):\n watchdog(disable_after, keys)",
"def enable(self):\n self.switch.enable()\n self._enabled = True",
"def enable(self, coil):\n self.log.info(\"RASPDriver.Enable(%s %s)\" % (coil.config['label'], coil.hw_driver.number))\n self.platform.communicator.driver_enable(coil.hw_driver.number)\n pass",
"def disable_radio(self):\n self.acquire_response(b'AT*R0')",
"def enable_motor(self, enabled):\r\n self.enabled = enabled\r\n\r\n # Set motors in neutral if disabling.\r\n if not self.enabled:\r\n self.set_neutral()",
"def teleopPeriodic(self):\n\n try:\n if self.debounce(6, gamepad=True):\n self.boulder_automation.toggle_shoot_boulder()\n except:\n self.onException()\n \n try:\n if self.debounce(2) or self.debounce(1, gamepad=True):\n self.boulder_automation.toggle_intake_boulder()\n except:\n self.onException()\n\n try:\n if self.debounce(7):\n self.chassis.toggle_field_oriented()\n except:\n self.onException()\n\n try:\n if self.debounce(8):\n enabled = self.heading_hold_pid.isEnable()\n self.heading_hold_pid.disable()\n self.bno055.resetHeading()\n self.heading_hold_pid.setSetpoint(constrain_angle(self.bno055.getAngle()))\n self.heading_hold_pid.reset()\n if enabled:\n self.heading_hold_pid.enable()\n except:\n self.onException()\n\n \"\"\"try:\n if self.debounce(10):\n self.chassis.toggle_vision_tracking()\n except:\n self.onException()\"\"\"\n\n try:\n if self.debounce(10):\n self.chassis.toggle_range_holding(self.chassis.correct_range)\n except:\n self.onException()\n\n try:\n if self.debounce(1) or self.debounce(8, gamepad=True):\n self.boulder_automation.toggle_shoot_boulder()\n except:\n self.onException()\n\n try:\n if self.debounce(9):\n self.chassis.toggle_heading_hold()\n except:\n self.onException()\n\n try:\n if self.debounce(4):\n self.defeater.up()\n except:\n self.onException()\n\n try:\n if self.debounce(5):\n self.shooter.stop()\n self.intake.stop()\n except:\n self.onException()\n\n try:\n if self.debounce(3):\n #self.chassis.range_setpoint = self.chassis.correct_range\n #self.chassis.distance_pid.enable()\n # self.shooter.start_shoot()\n self.chassis.range_setpoint = 0.0\n self.chassis.track_vision = False\n self.chassis.toggle_range_holding()\n self.chassis.toggle_vision_tracking()\n except:\n self.onException()\n\n try:\n if self.debounce(6):\n self.defeater.down()\n except:\n self.onException()\n\n \"\"\"try:\n if self.debounce(10):\n self.shooter.backdrive()\n self.intake.backdrive()\n except:\n self.onException()\"\"\"\n\n try:\n if self.joystick.getPOV() != -1:\n self.chassis.heading_hold = True\n direction = 0.0\n if self.joystick.getPOV() == 0:\n # shooter centre goal\n direction = math.pi\n elif self.joystick.getPOV() == 90:\n # shooter right goal\n direction = math.pi / 3.0 + math.pi\n elif self.joystick.getPOV() == 270:\n # shooter left goal\n direction = -math.pi / 3.0 + math.pi\n elif self.joystick.getPOV() == 180:\n direction = 0.0\n self.chassis.set_heading_setpoint(direction)\n except:\n self.onException()\n\n try:\n if self.joystick.getRawButton(11) or self.gamepad.getRawButton(2):\n self.chassis.field_oriented = False \n else:\n self.chassis.field_oriented = True\n except:\n self.onException()\n\n try:\n if self.gamepad.getRawButton(3):\n self.boulder_automation.engage(\"backdrive_manual\")\n elif self.boulder_automation.current_state == \"backdrive_manual\":\n self.boulder_automation.done()\n except:\n self.onException()\n\n \"\"\"try:\n if self.debounce(1, gamepad=True):\n self.chassis.zero_encoders()\n self.chassis.distance_pid.setSetpoint(1.2)\n self.chassis.distance_pid.enable()\n except:\n self.onException()\"\"\"\n\n try:\n if self.debounce(10, gamepad=True):\n self.vision.write_image()\n except:\n self.onException()\n\n try:\n if self.joystick.getRawButton(12):\n self.joystick_rate = 0.6\n else:\n self.joystick_rate = 0.4\n except:\n self.onException()\n\n self.chassis.inputs = [-rescale_js(self.joystick.getY(), deadzone=0.05, exponential=1.2),\n - rescale_js(self.joystick.getX(), deadzone=0.05, exponential=1.2),\n - rescale_js(self.joystick.getZ(), deadzone=0.2, exponential=15.0, rate=self.joystick_rate),\n (self.joystick.getThrottle() - 1.0) / -2.0\n ]\n for input in self.chassis.inputs[0:3]:\n if input != 0.0:\n # Break out of auto if we move the stick\n self.chassis.distance_pid.disable()\n self.chassis.range_setpoint = None\n self.chassis.track_vision = False\n # self.chassis.field_oriented = True\n self.putData()",
"def enable_sensor_power():\n sen = digital.SensorPower(\"senpwr\") \n sen.set()"
] | [
"0.7557274",
"0.7468426",
"0.7387155",
"0.72845364",
"0.7275343",
"0.71724355",
"0.7050846",
"0.6838765",
"0.67255855",
"0.6656644",
"0.6406962",
"0.6232095",
"0.6179704",
"0.60970235",
"0.5676361",
"0.5635614",
"0.56271976",
"0.5584219",
"0.55626994",
"0.555252",
"0.55448014",
"0.5524679",
"0.5521017",
"0.55037355",
"0.54988956",
"0.5434108",
"0.5417902",
"0.53932065",
"0.5385622",
"0.53845286"
] | 0.7729859 | 0 |
Set pulse on hit rule on driver. Pulses a driver when a switch is hit. When the switch is released the pulse continues. Typically used for autofire coils such as pop bumpers. | def set_pulse_on_hit_rule(self, enable_switch, coil):
raise NotImplementedError | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_pulse_on_hit_rule(self, enable_switch: SwitchSettings, coil: DriverSettings):\n raise NotImplementedError",
"def set_pulse_on_hit_rule(self, enable_switch, coil):\n self.log.info(\"set_pulse_on_hit_rule(coil=%s sw=%s)\" %\n (coil.hw_driver.number, enable_switch.hw_switch.number))\n self.communicator.rule_add(1, coil.hw_driver.number, enable_switch.hw_switch.number, \n duration=self._get_pulse_ms_value(coil))",
"def set_pulse_on_hit_and_release_rule(self, enable_switch, coil):\n raise NotImplementedError",
"def set_pulse_on_hit_and_release_rule(self, enable_switch: SwitchSettings, coil: DriverSettings):\n raise NotImplementedError",
"def set_pulse_on_hit_and_release_rule(self, enable_switch, coil):\n self.log.info(\"set_pulse_on_hit_and_release_rule(coil=%s sw=%s)\" %\n (coil.hw_driver.number, enable_switch.hw_switch.number))\n self.communicator.rule_add(2, coil.hw_driver.number, enable_switch.hw_switch.number,\n duration=self._get_pulse_ms_value(coil))",
"def set_pulse_on_hit_and_enable_and_release_rule(self, enable_switch, coil):\n raise NotImplementedError",
"def set_pulse_on_hit_and_enable_and_release_rule(self, enable_switch: SwitchSettings, coil: DriverSettings):\n raise NotImplementedError",
"def set_pulse_on_hit_and_enable_and_release_rule(self, enable_switch, coil):\n self.log.info(\"set_pulse_on_hit_and_enable_and_release_rule(coil=%s sw=%s)\" %\n (coil.hw_driver.number, enable_switch.hw_switch.number))\n self.communicator.rule_add(3, coil.hw_driver.number, enable_switch.hw_switch.number,\n duration=self._get_pulse_ms_value(coil))",
"def set_pulse_on_hit_and_enable_and_release_and_disable_rule(self, enable_switch, disable_switch, coil):\n raise NotImplementedError",
"def set_delayed_pulse_on_hit_rule(self, enable_switch: SwitchSettings, coil: DriverSettings, delay_ms: int):\n del enable_switch\n del coil\n del delay_ms\n raise AssertionError(\"This platform does not support delayed pulse hardware rules.\")",
"def set_pulse_on_hit_and_enable_and_release_and_disable_rule(self, enable_switch, disable_switch, coil):\n self.log.info(\"set_pulse_on_hit_and_enable_and_release_and_disable_rule(coil=%s sw=%s dis_sw=%s)\" %\n (coil.hw_driver.number, enable_switch.hw_switch.number, disable_switch.hw_switch.number))\n self.communicator.rule_add(4, coil.hw_driver.number, enable_switch.hw_switch.number, disable_sw_id=disable_switch.hw_switch.number,\n duration=self._get_pulse_ms_value(coil))",
"def _pulse_enable(self):\n self.set_low(self._en_pin)\n self._usleep(1)\n self.set_high(self._en_pin)\n self._usleep(1)\n self.set_low(self._en_pin)\n # commands need > 37us to settle\n self._usleep(100)",
"def set_pulse_on_hit_and_enable_and_release_and_disable_rule(self, enable_switch: SwitchSettings,\n eos_switch: SwitchSettings, coil: DriverSettings,\n repulse_settings: Optional[RepulseSettings]):\n raise NotImplementedError",
"def set_pulse_on_hit_and_release_and_disable_rule(self, enable_switch: SwitchSettings,\n eos_switch: SwitchSettings, coil: DriverSettings,\n repulse_settings: Optional[RepulseSettings]):\n raise NotImplementedError",
"def pulley_activate(self):\n self.pulley(\"up\")\n time.sleep(5 * 0.7)\n self.pulley(\"stop\")\n time.sleep(2)\n self.pulley(\"down\")\n time.sleep(2.85)\n self.pulley(\"stop\")",
"def setup_motor(self,pin_num):\n pi.set_servo_pulsewidth(pin_num, 2000)\n sleep(2)\n pi.set_servo_pulsewidth(pin_num, 500 )\n sleep(2)",
"def steer(self):\n\n while self.active:\n angle = self.driver.angle\n steering_pwm_calc = self.angle_to_pmw(angle)\n self.pwm.set_pwm(0, 0, steering_pwm_calc)",
"def setInternalPulser(self,pulserEnable,pulseHeight):\n pass",
"def pulse_hi(pin, length=0.00001): \n on(pin)\n time.sleep(length)\n off(pin)\n time.sleep(length)",
"def enable_pulse_modulation(self):\n self.write(\":SOUR:PULM:STAT ON\")",
"def pulseEnable( self, _data ): # uint8_t\n\t\tself.expanderWrite( _data | LCD_EN ) # En high\n\t\tsleep_us(1) # enable pulse must be >450ns\n\n\t\tself.expanderWrite( _data & (0xFF ^ LCD_EN) ) # En low\n\t\tsleep_us(50) # commands need > 37us to settle",
"def idle(self):\n self.pi.set_servo_pulsewidth(self.gpio, 0)",
"def pulse(vjoy, btn_id):\n global g_is_running\n g_is_running = True\n while g_is_running:\n vjoy[1].button(btn_id).is_pressed = True\n time.sleep(g_hold_time)\n vjoy[1].button(btn_id).is_pressed = False\n time.sleep(g_pause_time)",
"def pulse(self, coil, milliseconds):\n self.log.info(\"RASPDriver.Pulse(%s %s, %d ms)\" %\n (coil.config['label'], coil.hw_driver.number, milliseconds))\n self.platform.communicator.driver_pulse(coil.hw_driver.number, milliseconds)\n return milliseconds",
"def servo_set_target(ch, pulse):\n\n # Pulse number is 4x pulse width (in microseconds)\n p_num = 4 * int(pulse)\n\n # Send command to servo controller\n servo_send_cmd(cmd_set_target, ch, p_num)",
"def setPulseDivisor(self, pd, motor=0): \n\t\tcmd = 'SAP'\t # Get axis parameter\n\t\ttype = 154\t\t # Microstep resolution\n\t\tvalue = int(pd)\t\t # Microstep resolution \n\t\tself.sendCommand(cmd, type, motor, value)\n\t\tdata = self.receiveData()\n\t\tif data.status != 100:\n\t\t\tif self.errorDict.has_key(data.status):\n\t\t\t\traise MotorError(self.errorDict[data.status])\n\t\t\telif data.status == None:\n\t\t\t\traise MotorError('Incorrect controller response, trying to reconnect')\n\t\t\telse:\n\t\t\t\traise MotorError(''.join(('Unknown error, ', str(data.status))))",
"def trigger(self):\n GPIO.output(self.trigger_pin, 1)\n time.sleep(10/1000000)\n GPIO.output(self.trigger_pin, 0)",
"def pulse_lo(pin, length=0.00001):\n off(pin)\n time.sleep(length)\n on(pin)\n time.sleep(length)",
"def set_PIenable(self,highlow): \n GPIO.output(self.chanlist[4], highlow)",
"def toggle(light_id):\n if light_id == \"alloff\":\n pidomCtrl.pulse(\"alloff\")\n elif light_id == \"outside\":\n pidomCtrl.pulse(\"outside\")\n elif light_id == \"stairs\":\n pidomCtrl.pulse(\"stairs\")\n elif light_id == \"frontdoorgroupoff\":\n pidomCtrl.pulse(\"persistedoff\")\n elif light_id == \"persistedon\":\n pidomCtrl.pulse(\"frontdoorgroupon\")"
] | [
"0.7381068",
"0.72259647",
"0.7224337",
"0.7212447",
"0.71060675",
"0.6980037",
"0.6900245",
"0.6871108",
"0.64783704",
"0.6437388",
"0.6357339",
"0.6284529",
"0.62668526",
"0.6243657",
"0.60278153",
"0.58707553",
"0.5841685",
"0.57336885",
"0.57220095",
"0.5721565",
"0.5681233",
"0.5678193",
"0.5633998",
"0.56086236",
"0.5588203",
"0.5504313",
"0.53330755",
"0.5328091",
"0.53154695",
"0.5260661"
] | 0.735149 | 1 |
Computes the kronecker product of a sequence of matrices. | def kron(*matrices: np.ndarray) -> np.ndarray:
product = np.eye(1)
for m in matrices:
product = np.kron(product, m)
return np.array(product) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def kronecker_product(mat1, mat2):\n m1, n1 = mat1.get_shape().as_list()\n mat1_rsh = tf.reshape(mat1, [m1, 1, n1, 1])\n m2, n2 = mat2.get_shape().as_list()\n mat2_rsh = tf.reshape(mat2, [1, m2, 1, n2])\n return tf.reshape(mat1_rsh * mat2_rsh, [m1 * m2, n1 * n2])",
"def _kronecker_product(mat1: tf.Tensor, mat2: tf.Tensor) -> tf.Tensor:\n m1, n1 = mat1.get_shape().as_list()\n mat1_rsh = tf.reshape(mat1, [m1, 1, n1, 1])\n m2, n2 = mat2.get_shape().as_list()\n mat2_rsh = tf.reshape(mat2, [1, m2, 1, n2])\n return tf.reshape(mat1_rsh * mat2_rsh, [m1 * m2, n1 * n2])",
"def kronecker(self, value):\n if not (type(self) == type(value)):\n raise TypeError(\"Inappropriate argument type for kronecker product\")\n returnvalue = Matrix()\n for i in range(self._height):\n for j in range(value._height):\n newRow = list()\n for k in range(self._width):\n for l in range(value._width):\n newRow.append(self[i][k] * value[j][l])\n returnvalue.addRow(*newRow)\n return returnvalue",
"def kron_prod(matList):\n ret = matList[0]\n for i in range(1, len(matList)):\n ret = kron(ret, matList[i])\n return ret",
"def kronecker_prod(x, y):\n if len(list(x.size())) != 3 or len(list(y.size())) != 3:\n raise ValueError(\"An input is not of the right dimension.\")\n\n z = torch.zeros(\n 2,\n x.size()[1] * y.size()[1],\n x.size()[2] * y.size()[2],\n dtype=torch.double,\n device=x.device,\n )\n\n row_count = 0\n\n for i in range(x.size()[1]):\n for k in range(y.size()[1]):\n column_count = 0\n for j in range(x.size()[2]):\n for l in range(y.size()[2]):\n\n z[0][row_count][column_count] = (x[0][i][j] * y[0][k][l]) - (\n x[1][i][j] * y[1][k][l]\n )\n z[1][row_count][column_count] = (x[0][i][j] * y[1][k][l]) + (\n x[1][i][j] * y[0][k][l]\n )\n\n column_count += 1\n row_count += 1\n\n return z",
"def kronecker_operators(*args):\n return reduce(wrapped_kronecker, *args)",
"def wrapped_kronecker(operator_1, operator_2):\n return scipy.sparse.kron(operator_1, operator_2, 'csc')",
"def multiply_matrices(list):\n # Section 1: Start matrix product using 1st matrix in list\n matrix_product = list[0]\n\n # Section 2: Loop thru list to create product\n for matrix in list[1:]:\n matrix_product = matrix_multiply(matrix_product, matrix)\n\n return matrix_product",
"def khatrirao(matrices, skip_matrix=None, reverse=False):\n matrices = list(matrices)\n\n if skip_matrix is not None:\n if skip_matrix > -1:\n matrices = [matrices[i]\n for i in range(len(matrices)) if i != skip_matrix]\n else:\n raise ValueError('Wrong skip_matrix: {}'.format(skip_matrix))\n\n if matrices[0].ndim == 1:\n matrices[0] = np.reshape(matrices[0], [1, -1])\n\n n_columns = matrices[0].shape[1]\n\n # Optional part, testing whether the matrices have the proper size\n for i, matrix in enumerate(matrices):\n if matrix.ndim != 2:\n raise ValueError('All the matrices must have exactly 2 dimensions!'\n 'Matrix {} has dimension {} != 2.'.format(\n i, matrix.ndim))\n if matrix.shape[1] != n_columns:\n raise ValueError('All matrices must have same number of columns!'\n 'Matrix {} has {} columns != {}.'.format(\n i, matrix.shape[1], n_columns))\n\n n_factors = len(matrices)\n\n if reverse:\n matrices = matrices[::-1]\n # Note: we do NOT use .reverse() which would reverse matrices\n # even outside this function\n\n start = ord('a')\n common_dim = 'z'\n target = ''.join(chr(start + i) for i in range(n_factors))\n source = ','.join(i + common_dim for i in target)\n operation = source + '->' + target + common_dim\n return np.einsum(operation, *matrices).reshape((-1, n_columns))",
"def power_matrix(A, k):\n nrow = np.shape(A)[0]\n A0 = np.identity(nrow) \n for k in range(q):\n A0 = np.dot(A0, A)\n \n return A0",
"def __mul__(left, right):\n \n if isinstance(left, Plucker) and isinstance(right, Plucker):\n # reciprocal product\n return np.dot(left.uw, right.v) + np.dot(right.uw, left.v)\n elif isinstance(left, Plucker) and arg.ismatrix(right, (4,None)):\n return left.skew @ right; # postmultiply by 4xN",
"def multikron(a):\n return _reduce(_np.kron, a)",
"def khatri_rao(matrices):\n\n n_columns = matrices[0].shape[1]\n n_factors = len(matrices)\n\n start = ord('a')\n common_dim = 'z'\n target = ''.join(chr(start + i) for i in range(n_factors))\n source = ','.join(i+common_dim for i in target)\n operation = source+'->'+target+common_dim\n return np.einsum(operation, *matrices).reshape((-1, n_columns))",
"def kron(a, b):\r\n a = tensor.as_tensor_variable(a)\r\n b = tensor.as_tensor_variable(b)\r\n if (a.ndim + b.ndim <= 2):\r\n raise TypeError('kron: inputs dimensions must sum to 3 or more. '\r\n 'You passed %d and %d.' % (a.ndim, b.ndim))\r\n o = tensor.outer(a, b)\r\n o = o.reshape(tensor.concatenate((a.shape, b.shape)),\r\n a.ndim + b.ndim)\r\n shf = o.dimshuffle(0, 2, 1, * range(3, o.ndim))\r\n if shf.ndim == 3:\r\n shf = o.dimshuffle(1, 0, 2)\r\n o = shf.flatten()\r\n else:\r\n o = shf.reshape((o.shape[0] * o.shape[2],\r\n o.shape[1] * o.shape[3]) +\r\n tuple([o.shape[i] for i in range(4, o.ndim)]))\r\n return o",
"def multiply_matrices(a, b):\n try:\n x = len(b[0])\n except:\n b = make_2D(b)\n try:\n x = len(a[0])\n except:\n a = make_2D(a)\n if len(a[0]) != len(b):\n print 'error: matrices cannot be multiplied'\n return\n out = np.zeros((len(a), len(b[0])))\n for i in range(len(out)):\n for j in range(len(out[0])):\n sum = 0\n for k in range(len(a[i])):\n sum += a[i][k] * b[k][j]\n out[i][j] = sum\n return out",
"def outer_product(input_sets, axis=0):\n out = cartesian_product(input_sets)\n return np.prod(out, axis=axis)\n\n # try:\n # from pyapprox.cython.utilities import outer_product_pyx\n # # fused type does not work for np.in32, np.float32, np.int64\n # # so envoke cython cast\n # if np.issubdtype(input_sets[0][0], np.signedinteger):\n # return outer_product_pyx(input_sets, 1)\n # if np.issubdtype(input_sets[0][0], np.floating):\n # return outer_product_pyx(input_sets, 1.)\n # else:\n # return outer_product_pyx(input_sets, input_sets[0][0])\n # except ImportError:\n # print('outer_product extension failed')\n\n # num_elems = 1\n # num_sets = len(input_sets)\n # sizes = np.empty((num_sets), dtype=int)\n # for ii in range(num_sets):\n # sizes[ii] = len(input_sets[ii])\n # num_elems *= sizes[ii]\n\n # # try:\n # # from pyapprox.weave import c_outer_product\n # # return c_outer_product(input_sets)\n # # except:\n # # print ('outer_product extension failed')\n\n # result = np.empty((num_elems), dtype=type(input_sets[0][0]))\n # for ii in range(num_elems):\n # result[ii] = 1.0\n # multi_index = ind2sub(sizes, ii, num_elems)\n # for jj in range(num_sets):\n # result[ii] *= input_sets[jj][multi_index[jj]]\n\n # return result",
"def khatri_rao(matrices, weights=None, skip_matrix=None, mask=None):\n if skip_matrix is not None:\n matrices = [matrices[i] for i in range(len(matrices)) if i != skip_matrix]\n\n # Khatri-rao of only one matrix: just return that matrix\n if len(matrices) == 1:\n return matrices[0]\n\n if T.ndim(matrices[0]) == 2:\n n_columns = matrices[0].shape[1]\n else:\n n_columns = 1\n matrices = [T.reshape(m, (-1, 1)) for m in matrices]\n warnings.warn(\n \"Khatri-rao of a series of vectors instead of matrices. \"\n \"Considering each as a matrix with 1 column.\"\n )\n\n # Testing whether the matrices have the proper size\n for i, matrix in enumerate(matrices):\n if T.ndim(matrix) != 2:\n raise ValueError(\n \"All the matrices must have exactly 2 dimensions!\"\n f\"Matrix {i} has dimension {T.ndim(matrix)} != 2.\"\n )\n if matrix.shape[1] != n_columns:\n raise ValueError(\n \"All matrices must have same number of columns!\"\n f\"Matrix {i} has {matrix.shape[1]} columns != {n_columns}.\"\n )\n\n shared_dim = \"a\"\n start = ord(\"b\")\n individual_dims = [chr(start + i) for i in range(len(matrices))]\n equation = \",\".join(f\"{i}{shared_dim}\" for i in individual_dims)\n\n if weights is not None:\n equation += f\",{shared_dim}\"\n matrices = matrices + [weights]\n\n if mask is not None:\n equation += \",\" + \"\".join(individual_dims)\n matrices.append(mask)\n\n equation += \"->\" + \"\".join(individual_dims) + shared_dim\n\n return T.reshape(T.einsum(equation, *matrices), (-1, n_columns))",
"def multiM(*args):\r\n filas_1,filas_2 = len(args[0]),len(args[1])\r\n columnas_1,columnas_2 = len(args[0][0]),len(args[1][0])\r\n matriz_r = []\r\n for k in range(filas_1):\r\n matriz_r.append([0]*columnas_2)\r\n for i in range(columnas_2):\r\n matriz_r[k][i] = 0\r\n for i in range(filas_1):\r\n for j in range(columnas_1):\r\n for k in range(columnas_2):\r\n matriz_r[i][k] = matriz_r[i][k] + args[0][i][j] * args[1][j][k]\r\n return matriz_r",
"def _canonical_kr(B, C):\n n, p = B.shape\n m, pC = C.shape\n A = np.zeros((n * m, p))\n for k in range(B.shape[-1]):\n A[:, k] = np.kron(B[:, k], C[:, k])\n return A",
"def _kp(a, b):\n if a.shape != b.shape or a.shape[-1] != 1:\n raise(ValueError)\n N = a.shape[0]\n # take the outer product over the last two axes, then reshape:\n return np.einsum('ijk,ilk->ijkl', a, b).reshape(N, -1, 1)",
"def jordan_wigner_ladder_sparse(n_qubits, tensor_factor, ladder_type):\n parities = tensor_factor * [pauli_z_csc]\n identities = [\n scipy.sparse.identity(2**(n_qubits - tensor_factor - 1),\n dtype=complex,\n format='csc')\n ]\n if ladder_type:\n operator = kronecker_operators(parities + [q_raise_csc] + identities)\n else:\n operator = kronecker_operators(parities + [q_lower_csc] + identities)\n return operator",
"def kr(B, C):\n if B.ndim != 2 or C.ndim != 2:\n raise ValueError(\"B and C must have 2 dimensions\")\n\n n, p = B.shape\n m, pC = C.shape\n\n if p != pC:\n raise ValueError(\"B and C must have the same number of columns\")\n\n return np.einsum('ij, kj -> ikj', B, C).reshape(m * n, p)",
"def mmultiply(self, matrix):\n try:\n result_matrix = [[0 for row in range(len(self.matrix))] for col in range(len(matrix[0]))]\n for i in range(len(self.matrix)):\n for j in range(len(matrix[0])):\n for k in range(len(matrix)):\n result_matrix[i][j] += self.matrix[i][k] * matrix[k][j]\n self.matrix = result_matrix\n except IndexError:\n pass\n pass",
"def matrix_mult(m1, m2):\n pass",
"def outer_product(x):\n return keras.backend.batch_dot(\n x[0]\n , x[1]\n , axes=[1,1]\n ) / x[0].get_shape().as_list()[1]",
"def matrixPowers(S,K):\n # S can be either a single GSO (N x N) or a collection of GSOs (E x N x N)\n if len(S.shape) == 2:\n N = S.shape[0]\n assert S.shape[1] == N\n E = 1\n S = S.reshape(1, N, N)\n scalarWeights = True\n elif len(S.shape) == 3:\n E = S.shape[0]\n N = S.shape[1]\n assert S.shape[2] == N\n scalarWeights = False\n\n # Now, let's build the powers of S:\n thisSK = np.tile(np.eye(N, N).reshape(1,N,N), [E, 1, 1])\n SK = thisSK.reshape(E, 1, N, N)\n for k in range(1,K):\n thisSK = thisSK @ S\n SK = np.concatenate((SK, thisSK.reshape(E, 1, N, N)), axis = 1)\n # Take out the first dimension if it was a single GSO\n if scalarWeights:\n SK = SK.reshape(K, N, N)\n\n return SK",
"def kronecker_graph(g, k, add_self_edges=True, strip_self_edges=True):\n\n adj = nx.adjacency_matrix(g).todense()\n if add_self_edges:\n for i in range(len(adj)):\n adj[i, i] = 1\n mat = adj\n for i in range(k - 1):\n mat = np.kron(mat, adj)\n if strip_self_edges:\n for i in range(len(mat)):\n mat[i, i] = 0\n name = \"kronecker(%s, %s, %s, %s)\" % (\n g.name if g.name else hash(g), k, add_self_edges, strip_self_edges)\n return nx.Graph(mat, name=name)",
"def nkron(*args):\n return reduce(np.kron, args, np.array([1]))",
"def __mul__(self, other):\n #\n # TODO - your code here\n #\n final_matrix = []\n for i in range(self.h):\n temp_row = []\n for j in range(other.w):\n # take dot-product of row of\n # matrix in 1st arg with col of\n # matrix in 2nd arg\n temp_row.append(dot_product(get_row(self.g, i), get_col(other.g, j)))\n final_matrix.append(temp_row)\n return Matrix(final_matrix)\n # TODO - your code here",
"def matmul():\n\n if RESULT_IN_NVRAM:\n matrix_c = ResultMatrixInDaos()\n else:\n matrix_c = ResultMatrixInMemory()\n\n # This could be trivially optimized by reordering indexes\n # and caching either a_block or b_block (assuming C in-memory).\n # *However* it would result in unfair comparisons with the \n # previous implementation used elsewhere.\n # Using the naive algorithm makes sense for a raw comparison.\n for i in range(MATRIXSIZE):\n for j in range(MATRIXSIZE):\n partial_result_block = np.zeros((BLOCKSIZE, BLOCKSIZE))\n\n for k in range(MATRIXSIZE):\n a_block = np.fromstring(\n DAOS_KV[\"A%02d%02d\" % (i, k)],\n dtype=NP_FROMSTRING_DTYPE\n ).reshape((BLOCKSIZE, BLOCKSIZE))\n\n b_block = np.fromstring(\n DAOS_KV[\"B%02d%02d\" % (k, j)],\n dtype=NP_FROMSTRING_DTYPE\n ).reshape((BLOCKSIZE, BLOCKSIZE))\n\n partial_result_block += a_block @ b_block\n \n matrix_c[i,j] = partial_result_block\n\n return matrix_c"
] | [
"0.74108386",
"0.7209864",
"0.7069195",
"0.69637865",
"0.6961648",
"0.68957955",
"0.6489303",
"0.61083525",
"0.60522455",
"0.5999372",
"0.5961622",
"0.59576",
"0.5849226",
"0.581817",
"0.57675886",
"0.57403356",
"0.5626816",
"0.5555709",
"0.5546035",
"0.55225235",
"0.55220044",
"0.54923695",
"0.5492368",
"0.54688114",
"0.5453334",
"0.5449206",
"0.5445028",
"0.54397184",
"0.54141784",
"0.53985"
] | 0.7603846 | 0 |
r""" Matches any differential equation that nth_algebraic can solve. Uses `sympy.solve` but teaches it how to integrate derivatives. This involves calling `sympy.solve` and does most of the work of finding a solution (apart from evaluating the integrals). | def _matches(self):
eq = self.ode_problem.eq
func = self.ode_problem.func
var = self.ode_problem.sym
# Derivative that solve can handle:
diffx = self._get_diffx(var)
# Replace derivatives wrt the independent variable with diffx
def replace(eq, var):
def expand_diffx(*args):
differand, diffs = args[0], args[1:]
toreplace = differand
for v, n in diffs:
for _ in range(n):
if v == var:
toreplace = diffx(toreplace)
else:
toreplace = Derivative(toreplace, v)
return toreplace
return eq.replace(Derivative, expand_diffx)
# Restore derivatives in solution afterwards
def unreplace(eq, var):
return eq.replace(diffx, lambda e: Derivative(e, var))
subs_eqn = replace(eq, var)
try:
# turn off simplification to protect Integrals that have
# _t instead of fx in them and would otherwise factor
# as t_*Integral(1, x)
solns = solve(subs_eqn, func, simplify=False)
except NotImplementedError:
solns = []
solns = [simplify(unreplace(soln, var)) for soln in solns]
solns = [Equality(func, soln) for soln in solns]
self.solutions = solns
return len(solns) != 0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def solve_differential_equation(f_derivatives, initial, oldest=120):\n bunch = solve_ivp(f_derivatives, t_span=(0, oldest), y0=initial, vectorized=True, dense_output=True)\n return bunch.sol",
"def get_equations(self):\n dyn = self.dynamics\n tds = dyn.time_derivatives\n eqs = []\n _flags=[]\n if(tds):\n for t in tds:\n ex = Expression(t.value)\n self.get_refractory()\n for i in self.uf:\n if(self.uf==t):\n _flags=['unless refractory']\n s = SingleEquation('differential equation', t.variable, self.timederivative_dimension(t), 'float', ex, flags=_flags)\n eqs.append(s)\n\n\n rgs = dyn.regimes\n for r in rgs:\n rtds = r.time_derivatives\n if(rtds):\n for rtd in rtds:\n ex = Expression(rtd.value)\n self.get_refractory()\n for i in self.uf :\n if(i==rtd):\n _flags=['unless refractory']\n s = SingleEquation('differential equation', rtd.variable, self.timederivative_dimension(rtd), 'float', ex, flags=_flags)\n eqs.append(s)\n dvs = dyn.derived_variables\n for d in dvs:\n if(d.value):\n ex = Expression(d.value)\n s = SingleEquation('subexpression', d.name, self.getBrianSIUnits(d.dimension), 'float', ex)\n eqs.append(s)\n return Equations(eqs)",
"def solve(self, d, t, x0 = None, **kwargs):\n if x0 == None:\n x0 = np.array([0., d])\n #x0 = np.array([self.Om*self.A0/np.sqrt(self.Q**2*(1-self.Om**2)**2 + self.Om**2), d])\n sol = solve_ivp(self.get_ode(d), (t[0], t[-1]), x0, t_eval = t, vectorized = True, **kwargs)\n\n return sol",
"def solve(self, niter):\n E = self.list_PDE[0]\n I = self.list_PDE[1]\n\n # ...\n un = E.unknown\n unew = I.unknown\n\n# un.set(E.rhs)\n # ...\n\n # ...\n for i in range(0,niter):\n\n rhs = E.dot(un)\n I.solve(rhs)\n\n un.set(unew)\n # ...",
"def check_solutions(eq):\n s = diophantine(eq)\n\n factors = Mul.make_args(eq)\n\n var = list(eq.free_symbols)\n var.sort(key=default_sort_key)\n\n while s:\n solution = s.pop()\n for f in factors:\n if diop_simplify(f.subs(zip(var, solution))) == 0:\n break\n else:\n return False\n return True",
"def symbolic_solve(expr, x, y, xvals, varsol, bound_expr):\n\n # return function from expression\n fun = lambdify((x, y), expr, 'numpy')\n max_fun = lambdify((x, y), bound_expr, 'numpy')\n\n # solutions over varsol\n match = fun(np.expand_dims(xvals, axis=1), varsol)\n\n # closest match to ~ 0. (i.e. supply ~ demand)\n idx = bn.nanargmin(abs(match), axis=1)\n\n # solution with approximate minimizing\n sol = np.asarray([varsol[e, idx[e]] for e in range(len(xvals))])\n\n # deal with mismatches by only allowing up to 5% variation around An\n up = abs(max_fun(xvals, sol))\n mismatch = bn.nanmin(abs(match), axis=1) <= 0.05 * up\n mismatch = mismatch.astype(int)\n\n if all(mismatch) == 0: # no precise enough match\n mismatch[1] = 1 # pick 1st valid value\n\n sol = np.ma.masked_where(idx == 0, sol)\n sol = np.ma.masked_where(mismatch == 0, sol)\n\n return sol",
"def dismod_solution(iota, rho, chi, omega):\n f_b = build_derivative_full(iota, rho, chi, omega)\n bunch = solve_differential_equation(f_b, initial=np.array([1.0 - 1e-6, 1e-6], dtype=np.float))\n S = lambda t: bunch(t)[0]\n C = lambda t: bunch(t)[1]\n return S, C",
"def solve_ode(self, n_evals, atol = 1e-6, rtol = 1e-4, print_time = False,max_step = np.inf):\n st = time.time()\n t_eval = np.linspace(0,self.t_ode,int(n_evals * self.t_ode) + 1 ).ravel()\n solution = solve_ivp(self.f, self.t_span,self.s0[:3*self.N], t_eval = t_eval, rtol = rtol, atol = atol, max_step = max_step )\n end = time.time()\n if print_time:\n print(\"Solution of ODE with n_evals: \" + str(n_evals) +\", t_ode: \" + str(self.t_ode) \n + \", took: \" + str(end-st) + \" s.\")\n return solution.y.T",
"def test_solver():\n # Choice of nonlinear coefficient\n m = 2\n\n def q(u):\n return (1+u)**m\n\n def Dq(u):\n return m*(1+u)**(m-1)\n\n u_exact = Expression(\n 'pow((pow(2, m+1)-1)*x[0] + 1, 1.0/(m+1)) - 1', m=m)\n linear_solver = 'direct'\n errors = []\n for method in 'alg_Newton', 'pde_Newton':\n for J_comp in 'manual', 'automatic':\n for degree in 1, 2, 3:\n error_prev = -1\n for divisions in [(10, 10), (20, 20), (40, 40)]:\n u = solver(\n q, Dq, f, divisions, degree,\n method, J_comp,\n linear_solver,\n abs_tol_Krylov=1E-10,\n rel_tol_Krylov=1E-10,\n abs_tol_Newton=1E-10,\n rel_tol_Newton=1E-10)\n\n # Find max error\n u_e = interpolate(u_exact, u.function_space())\n import numpy as np\n error = np.abs(u_e.vector().array() -\n u.vector().array()).max()\n # Expect convergence as h**(degree+1)\n if error_prev > 0:\n frac = abs(error - error_prev/2**(degree+1))\n errors.append(frac)\n error_prev = error\n tol = 4E-5\n for error_reduction in errors:\n assert error_reduction < tol, error_reduction",
"def integrator(find=\"v_o\", printEq=False, **kwargs):\n eq = list()\n eq.append(\"Eq(i_R, i_C\")\n eq.append(\"Eq(i_R, vi/R\")\n eq.append(\"Eq(i_C, -C*d_vo/dt\")\n eq.append(\"Eq(i_C, -C*diff(vo,t)\")\n eq.append(\"Eq(vo, -1/(R*C) * integrate(vi,(t,0,tf)))\")\n return solveEqs(eq, find, printEq=printEq, **kwargs)",
"def solve(self,\n notifications = False\n ):\n\n if notifications:\n print('[info]: Solving differential equations for '+self.name+' model. ')\n \n\n \n # getting the time values\n self.days_list = np.linspace(self.tbeg,self.tend,self.npoints)\n\n # calling the odeint method to solve the diff. equations\n self.x = odeint(self.diff_eq,self.x0,self.days_list,args = (self.par,))\n '''\n Its important to note that (par_est,) is the way to define a tuple\n with just one element. When we put (par_est), the parenteses won't\n indicate a tuple\n '''\n \n #setting the variables\n self.confirmed_list = self.x[:,1] + self.x[:,2] + self.x[:,3]\n self.recovered_list = self.x[:,2]\n self.death_list = self.x[:,3]",
"def solve(self,\n notifications = False\n ):\n\n if notifications:\n print('[info]: Solving differential equations for '+self.name+' model. ')\n \n\n \n # getting the time values\n self.days_list = np.linspace(self.tbeg,self.tend,self.npoints)\n\n # calling the odeint method to solve the diff. equations\n self.x = odeint(self.diff_eq,self.x0,self.days_list,args = (self.par,))\n '''\n Its important to note that (par_est,) is the way to define a tuple\n with just one element. When we put (par_est), the parenteses won't\n indicate a tuple\n '''\n \n #setting the variables\n self.confirmed_list = self.x[:,1] + self.x[:,2] + self.x[:,3]\n self.recovered_list = self.x[:,2]\n self.death_list = self.x[:,3]",
"def test_solve_ex_2_11(self):\n\n def f_a(x):\n return x - x ** 3 - 4 * x ** 2 + 10\n\n def f_b(x):\n inner = 10 / x - 4 * x\n logger.info(\"Performing sqrt({})\".format(inner))\n return math.sqrt(inner)\n\n logger.info('-' * 40)\n # f_a(x) cannot be used to solve x^3 + 4x^2 - 10 = 0 as it diverges and oscillates.\n iterate.solve(f_a, estimate=1.5, iterations=5, logger=logger)\n logger.info('-' * 40)\n\n with self.assertRaises(ValueError):\n # f_b(x) cannot be used to solve x^3 + 4x^2 - 10 = 0 as the 3rd iteration attempts to root a -ve number.\n iterate.solve(f_b, estimate=1.5, iterations=5, logger=logger)\n logger.info('-' * 40)",
"def test_solve_quadratic_fixed(self):\n iden1 = Identity()\n iden2 = Identity()\n iden3 = Identity()\n iden1.x.val = 4\n iden2.x.val = 5\n iden3.x.val = 6\n iden1.x.name = 'x1'\n iden2.x.name = 'x2'\n iden3.x.name = 'x3'\n iden2.x.fixed = False\n term1 = LeastSquaresTerm(iden1.target, 1, 1)\n term2 = LeastSquaresTerm(iden2.target, 2, 2)\n term3 = LeastSquaresTerm(iden3.target, 3, 3)\n prob = LeastSquaresProblem([term1, term2, term3])\n prob.solve()\n self.assertAlmostEqual(prob.objective, 10)\n self.assertAlmostEqual(iden1.x.val, 4)\n self.assertAlmostEqual(iden2.x.val, 2)\n self.assertAlmostEqual(iden3.x.val, 6)",
"def rhs(y, t, l, m, g):\n # Unpack the states so you can use the variable names in the\n # sympy.physics.mechanics equations\n q1 = y[0]\n q2 = y[1]\n u1 = y[2]\n u2 = y[3]\n # or you can make use of python's tuple unpacking for a one liner\n # q1, q2, u1, u2 = y\n\n # Initialize a vector for the derivatives.\n dydt = zeros((len(y)))\n\n # Compute the derivatives, these are pasted in from the\n # sympy.physics.mechanics results.\n dydt[0] = u1\n dydt[1] = u2\n dydt[2] = (-g*sin(q1)*sin(q2)**2 + 2*g*sin(q1) -\n g*sin(q2)*cos(q1)*cos(q2) + 2*l*u1**2*sin(q1)*cos(q1)*cos(q2)**2 -\n l*u1**2*sin(q1)*cos(q1) - 2*l*u1**2*sin(q2)*cos(q1)**2*cos(q2) +\n l*u1**2*sin(q2)*cos(q2) + l*u2**2*sin(q1)*cos(q2) -\n l*u2**2*sin(q2)*cos(q1))/(l*(sin(q1)**2*sin(q2)**2 +\n 2*sin(q1)*sin(q2)*cos(q1)*cos(q2) + cos(q1)**2*cos(q2)**2 - 2))\n dydt[3] = (-sin(q1)*sin(q2)/2 - cos(q1)*cos(q2)/2)*(2*g*l*m*sin(q1) -\n l**2*m*(-sin(q1)*cos(q2) +\n sin(q2)*cos(q1))*u2**2)/(l**2*m*(sin(q1)*sin(q2)/2 +\n cos(q1)*cos(q2)/2)*(sin(q1)*sin(q2) + cos(q1)*cos(q2)) -\n l**2*m) + (g*l*m*sin(q2) - l**2*m*(sin(q1)*cos(q2) -\n sin(q2)*cos(q1))*u1**2)/(l**2*m*(sin(q1)*sin(q2)/2 +\n cos(q1)*cos(q2)/2)*(sin(q1)*sin(q2) + cos(q1)*cos(q2))\n - l**2*m)\n\n # Return the derivatives.\n return dydt",
"def getExactSoln(phi_ic, c, nt):\n \n dimx = len(phi_ic) \n\n lag = int(c*nt)\n\n # phiExact stores the exact solution phi(x-ut)\n phiExact = np.roll(phi_ic, lag)\n \n #impose periodic boundary condn\n phiExact[dimx-1] = phiExact[0]\n \n return phiExact",
"def solver(equation, verbose, short, round_to):\n\tnoms = normalize(equation)\n\tequat_orig = convert_to_list(noms)\n\tif not equat_orig:\n\t\treturn\n\tif verbose:\n\t\tcount_terms(equat_orig)\n\tequat_reduced = equat_reduce(equat_orig)\n\tpolyn_deg = get_polyn_degree(equat_reduced)\n\tprint(f'\\033[93mReduced form is: \\033[0m{format_equation_output(equat_reduced, polyn_deg, short)}')\n\tcompute_roots(equat_reduced, polyn_deg, verbose, round_to)\n\treturn",
"def solve_1rst_degree_poly(expr):\n eq = sympy.Eq(expr)\n # `sympy.Eq` returns True (sympy-type True)\n # when the the generated expression is 2x-2x=0 or 0x=0,\n # and False when 0x=4.\n if eq is sympy.sympify(True):\n return AnyNumber\n elif eq is sympy.sympify(False):\n return NoSolution\n else:\n return sympy.solve(eq, x)[0]",
"def fun_unit_sol(d, N = 100):\n\n contFrac = cont_frac_sqrt(d)\n\n if d%4 == 2 or d%4 == 3:\n # solution to x^2 - dy^2 = 1 or -1\n # use Rosen, Theorem 11.5 page 404\n\n period = 0\n\n for k in range(1, N):\n if check_for_period(contFrac, k) == True:\n period = k\n break\n if k == N-1:\n raise ValueError(\"Period is larger than N, increase N.\")\n\n x = convergent_num(contFrac, period)\n y = convergent_den(contFrac, period)\n return [x,y]\n\n else:\n # solution to x^2 - dy^2 = 4 or -4\n # must search for solution\n # use Rosen, Theorems 11.3 (page 402) and 11.4 (page 403)\n\n # hardwire these\n if d == 5:\n return [1,1]\n if d == 13:\n return [3, 1]\n\n period = 0\n\n for k in range(1, N):\n if check_for_period(contFrac, k) == True:\n period = k\n break\n if k == N-1:\n raise ValueError(\"Period is larger than N, increase N.\")\n\n x = convergent_num(contFrac, period)\n y = convergent_den(contFrac, period)\n sols = [[2*x,2*y]]\n\n # search for a solution\n for k in range(1, period + 1):\n # convergents are an increasing sequence\n # so this will produce minimal solution\n x = convergent_num(contFrac, k)\n y = convergent_den(contFrac, k)\n\n if x**2 - d*y**2 == 4:\n sols.append([x,y])\n if x**2 - d*y**2 == -4:\n sols.append([x,y])\n\n minX = sols[0][0]\n ansIndex = 0\n for i in range(1, len(sols)):\n if sols[i][0] < minX:\n minX = sols[i][0]\n ansIndex = i\n return sols[ansIndex]",
"def test_exact_numerical_solution():\n a = 0.2\n b = 3\n\n def f(t, u):\n return a # + (u - u_exact(t))**5\n\n def u_exact(t):\n \"\"\"Exact u(t) corresponding to f above.\"\"\"\n return a * t + b\n\n u0 = u_exact(0)\n T = 8\n N = 10\n tol = 1E-15\n #t_points = np.linspace(0, T, N)\n t_span = (0, T)\n for solver_class in registered_solver_classes:\n solver = solver_class(f)\n solver.set_initial_condition(u0)\n t, u = solver.solve(t_span, N)\n u_e = u_exact(t)\n max_error = (u_e - u).max()\n msg = f'{solver.__class__.__name__} failed with max_error={max_error}'\n assert max_error < tol, msg",
"def solve(self):\n\n if self.degree > 2:\n return \"The polynomial degree is strictly greater than 2, I can't solve.\"\n \n elif self.degree == 0:\n \"\"\"a * X^0 = 0\"\"\" \n a = self.all_terms[0].coefficient\n if a != 0:\n return \"The eqution has no solution\"\n else:\n return \"Every real number is a solution\"\n\n elif self.degree == 1:\n \"\"\"a * X^1 + b * X^0 = 0\"\"\"\n a = self.all_terms[1].coefficient\n b = self.all_terms[0].coefficient\n return formula.linear(a, b)\n\n elif self.degree == 2:\n \"\"\"a * X^2 + b * X^1 + c * X^0 = 0\"\"\"\n a = self.all_terms[2].coefficient\n b = self.all_terms[1].coefficient\n c = self.all_terms[0].coefficient\n discriminant = (b ** 2) - (4 * a * c)\n two_a = 2 * a\n if discriminant == 0:\n return formula.linear(two_a, b)\n else:\n if discriminant > 0:\n return formula.quadratic(two_a, b, discriminant)\n else:\n return formula.quadratic(two_a, b, discriminant, simple=False)",
"def solve_part2(input, verbose=False):\n equations = parse(input)\n\n result = []\n for eq in equations:\n result.append(solve_equation_addition_precendence(eq, verbose))\n\n if verbose:\n print(f\"results: {result}\")\n\n return sum(result)",
"def solve(self, times: np.ndarray):\n # Should probably use solve_ivp instead\n return odeint(self._eqns, self._initial_conditions, times).T",
"def _solve_scalar_quadratic_equation(z, d):\n a = d.T @ d\n b = 2 * z.T @ d\n c = z.T @ z - 1\n sqrt_discriminant = np.sqrt(b * b - 4 * a * c)\n\n aux = b + np.copysign(sqrt_discriminant, b)\n ta = -aux / (2 * a)\n tb = -2 * c / aux\n\n return sorted([ta, tb])",
"def solve(self):\n \n # getting the time values\n self.days_list = np.linspace(self.tbeg,self.tend,self.npoints)\n\n # calling the odeint method to solve the diff. equations\n self.x = odeint(self.diff_eq,self.x0,self.days_list,args = (self.par,))\n '''\n Its important to note that (par_est,) is the way to define a tuple\n with just ode element. When we put (par_est), the parenteses won't\n indicate a typle\n '''\n \n #setting the variables\n self.confirmed_list = self.x[:,1] + self.x[:,2] + self.x[:,3]\n self.recovered_list = self.x[:,2]\n self.death_list = self.x[:,3]",
"def solve(self,notifications = False):\n if notifications:\n print('[info]: Solving differential equations for '+self.name+' model. ')\n \n # getting the time values\n self.days_list = np.linspace(self.tbeg,self.tend,self.npoints)\n\n # calling the odeint method to solve the diff. equations\n self.x = odeint(self.diff_eq,self.x0,self.days_list,args = (self.par,))\n \n #setting the variables\n self.confirmed_list = self.x[:,1] + self.x[:,2] + self.x[:,3]\n self.recovered_list = self.x[:,2]\n self.death_list = self.x[:,3]",
"def solve(self,notifications = False):\n if notifications:\n print('[info]: Solving differential equations for '+self.name+' model. ')\n \n # getting the time values\n self.days_list = np.linspace(self.tbeg,self.tend,self.npoints)\n\n # calling the odeint method to solve the diff. equations\n self.x = odeint(self.diff_eq,self.x0,self.days_list,args = (self.par,))\n \n #setting the variables\n self.confirmed_list = self.x[:,1] + self.x[:,2] + self.x[:,3]\n self.recovered_list = self.x[:,2]\n self.death_list = self.x[:,3]",
"def _calc_interaction_expansion(self):\n # preevaluate expansions for volume and surface phase functions\n # this returns symbolic code to be then further used\n\n volexp = self.V.legexpansion(self.t_0, self.t_ex,\n self.p_0, self.p_ex,\n self.geometry).doit()\n\n brdfexp = self.SRF.legexpansion(self.t_0, self.t_ex,\n self.p_0, self.p_ex,\n self.geometry).doit()\n\n # preparation of the product of p*BRDF for coefficient retrieval\n # this is the eq.23. and would need to be integrated from 0 to 2pi\n fPoly = expand(2 * sp.pi * volexp * brdfexp)\n\n # do integration of eq. 23\n expr = self._integrate_0_2pi_phis(fPoly)\n\n # now we do still simplify the expression to be able to express\n # things as power series of cos(theta_s)\n theta_s = sp.Symbol('theta_s')\n replacements = [(sp.sin(theta_s) ** i,\n expand((1. - sp.cos(theta_s) ** 2)\n ** sp.Rational(i, 2)))\n for i in range(1, self.SRF.ncoefs + self.V.ncoefs - 1)\n if i % 2 == 0]\n\n res = expand(expr.xreplace(dict(replacements)))\n\n return res",
"def differentiator(find=\"v_o\", printEq=False, **kwargs):\n eq = list()\n eq.append(\"Eq(i_R, i_C\")\n eq.append(\"Eq(i_R, -vo/R\")\n eq.append(\"Eq(i_C, C*d_vi/dt\")\n eq.append(\"Eq(i_C, C*diff(vi,t)\")\n eq.append(\"Eq(vo, -R*C*d_vi/dt)\")\n eq.append(\"Eq(vo, -R*C*diff(vi,t))\")\n return solveEqs(eq, find, printEq=printEq, **kwargs)",
"def parse_simple_eqn(equation=\"\"):\n # Define replacement rules.\n simple_replacements = [[' ', ''],\n ['**', '^'], ['*', ' \\\\cdot '],\n ['math.', ''], ['np.', ''],\n ['pi', '\\\\pi'] , ['tan', '\\\\tan'],\n ['cos', '\\\\cos'], ['sin', '\\\\sin'],\n ['sec', '\\\\sec'], ['csc', '\\\\csc']]\n complex_replacements = [['^', '{{{i1}}}^{{{i2}}}'],\n ['_', '{{{i1}}}_{{{i2}}}'],\n ['/', '\\\\frac{{{i1}}}{{{i2}}}'],\n ['sqrt','\\\\sqrt{{{i2}}}']]\n # Carry out simple replacements\n for pair in simple_replacements:\n equation = equation.replace(pair[0], pair[1])\n # Now complex replacements\n for item in ['*', '/', '+', '-', '^', '_', ',', 'sqrt']:\n equation = equation.replace(item, ' ' + item + ' ')\n q_split = equation.split()\n for index, item in enumerate(q_split):\n for pair in complex_replacements:\n if item == pair[0]:\n if item == 'sqrt':\n match_str = \" \".join(q_split[index:index+2])\n else:\n match_str = \" \".join(q_split[index-1:index+2])\n equation = equation.replace(match_str, pair[1].format(\n i1=q_split[index-1], i2=q_split[index+1]))\n return equation"
] | [
"0.5892715",
"0.58814234",
"0.5792888",
"0.5772103",
"0.57659477",
"0.5739992",
"0.5614944",
"0.56034535",
"0.5599466",
"0.5571074",
"0.5567494",
"0.5567494",
"0.5477489",
"0.54729176",
"0.545909",
"0.5450426",
"0.5439577",
"0.5438245",
"0.54256886",
"0.5413706",
"0.54007506",
"0.5381577",
"0.5381247",
"0.5376448",
"0.53700507",
"0.5369105",
"0.5369105",
"0.5336284",
"0.53103644",
"0.53024215"
] | 0.6641433 | 0 |
r""" Helper function to match hint ``linear_coefficients``. Matches the expression to the form `(a_1 x + b_1 f(x) + c_1)/(a_2 x + b_2 | def _linear_coeff_match(self, expr, func):
f = func.func
x = func.args[0]
def abc(eq):
r'''
Internal function of _linear_coeff_match
that returns Rationals a, b, c
if eq is a*x + b*f(x) + c, else None.
'''
eq = _mexpand(eq)
c = eq.as_independent(x, f(x), as_Add=True)[0]
if not c.is_Rational:
return
a = eq.coeff(x)
if not a.is_Rational:
return
b = eq.coeff(f(x))
if not b.is_Rational:
return
if eq == a*x + b*f(x) + c:
return a, b, c
def match(arg):
r'''
Internal function of _linear_coeff_match that returns Rationals a1,
b1, c1, a2, b2, c2 and a2*b1 - a1*b2 of the expression (a1*x + b1*f(x)
+ c1)/(a2*x + b2*f(x) + c2) if one of c1 or c2 and a2*b1 - a1*b2 is
non-zero, else None.
'''
n, d = arg.together().as_numer_denom()
m = abc(n)
if m is not None:
a1, b1, c1 = m
m = abc(d)
if m is not None:
a2, b2, c2 = m
d = a2*b1 - a1*b2
if (c1 or c2) and d:
return a1, b1, c1, a2, b2, c2, d
m = [fi.args[0] for fi in expr.atoms(Function) if fi.func != f and
len(fi.args) == 1 and not fi.args[0].is_Function] or {expr}
m1 = match(m.pop())
if m1 and all(match(mi) == m1 for mi in m):
a1, b1, c1, a2, b2, c2, denom = m1
return (b2*c1 - b1*c2)/denom, (a1*c2 - a2*c1)/denom | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_linear_coef(p1, p2):\n slope = (p1[1] - p2[1]) / (p1[0] - p2[0])\n intercept = p1[1] - slope * p1[0]\n return slope, intercept",
"def _parse_linear(expr: Expr, symb_only=False):\n\n coeff, factors = expr.as_coeff_mul()\n\n if len(factors) == 0:\n return coeff, None\n elif len(factors) == 1:\n factor = factors[0]\n if symb_only and not isinstance(factor, Symbol):\n return None, None\n return coeff, factor\n else:\n return None, None",
"def linear(force_zero=None, **kwargs):\n if force_zero is None and len(kwargs) == 0:\n # noinspection PyUnusedLocal\n def lf(x, params, const_list, const_dict):\n a, b = params[0:2]\n return a * x + b\n return FitFunction(\n func=lf, num_fit_params=2, force_zero=force_zero,\n name='linear', code='p1', **kwargs\n )\n else:\n # noinspection PyUnusedLocal\n def lf(x, params, const_list, const_dict):\n a = params[0]\n return a * x\n return FitFunction(\n func=lf, num_fit_params=1, force_zero=force_zero,\n name='linear', code='p1', **kwargs\n )",
"def c_coefficients(x1,x2,x3,y1,y2,y3,initial_slope,final_slope):\n\tC = c_matrix(x1,x2,x3)\n\ty = y_vector(x1,x2,x3,y1,y2,y3,initial_slope,final_slope)\n\tCCoefficients = np.dot(inv(C),y)\n\treturn(CCoefficients)",
"def linearRegression(x,y,f1,l1,f2,l2):\n n = len(x)\n sumx = 0.0 # sum_n(x_i)\n sumy = 0.0 # sum_n(y_i)\n sumxx = 0.0 # sum_n(x_i*x_i)\n sumxy = 0.0 # sum_n(x_i*y_i)\n count = 0\n for i2 in range(f2,l2+1):\n for i1 in range(f1,l1+1):\n xi = x[i2][i1]\n yi = y[i2][i1]\n sumx += xi\n sumy += yi\n sumxx += xi*xi\n sumxy += xi*yi\n count += 1\n beta = (sumxy-sumx*sumy/count)/(sumxx-sumx*sumx/count)\n alpha = (sumy-beta*sumx)/count\n #z = zerofloat(n)\n #for i in range(n):\n # if null is None or x[i]!=null:\n # z[i] = alpha+beta*x[i]\n #print 'slope =',beta\n #return z\n return beta",
"def linear_function(x, y):\n\n return x + y / 2.",
"def polynomial_equation(funct):\n coeff = str(differentiation.parse_coefficient(funct))\n if \"^\" not in funct:\n divisor = \"1\"\n else:\n divisor_location = str(funct.index(\"^\") + 1)\n divisor = funct[divisor_location:]\n if divisor == \"-1\":\n pass\n else:\n divisor = str(int(divisor) + 1)\n coeff += \"/\" + divisor\n return coeff + \"x^\" + str(divisor)",
"def linearfit(x,y):\n fit = np.polyfit(x,y,1)\n fit_fn = np.poly1d(fit)\n yy = fit_fn(x) \n \n return yy",
"def claret_linear(mu, coeff):\n return 1.0 - coeff * (1.0 - mu)",
"def spline_coefficients(x1,x2,x3,y1,y2,y3,initial_slope,final_slope):\n\tC = c_coefficients(x1,x2,x3,y1,y2,y3,initial_slope,final_slope)\n\tD = d_coefficients(x1,x2,x3,C)\n\tB = b_coefficients(x1,x2,x3,y1,y2,y3,C,D)\n\tA = a_coefficients(y1,y2)\n\treturn(A,B,C[:2],D)",
"def d_coefficients(x1,x2,x3,CCoefficients):\n\tDCoefficients = np.array([\t(CCoefficients[1]-CCoefficients[0])/(3*(x2-x1)), \\\n\t\t\t\t\t\t\t\t(CCoefficients[2]-CCoefficients[1])/(3*(x3-x2))\t], \\\n\t\t\t\t\t\t\t\tfloat)\n\treturn(DCoefficients)",
"def coefA(x0,y0,x1,y1):\n return -(y1-y0)/(x1-x0)",
"def linear_regression(data):\n x_values = [x for x, y in data] #Get x values\n y_values = [y for x, y in data] #Get y values\n x_mean = sum(x_values) / len(x_values) #Compute mean value of x\n y_mean = sum(y_values) / len(y_values) #Compute mean value of y\n # Compute\n coefficient = sum([(x - x_mean) * (y-y_mean) for x,y in data]) / sum([(x - x_mean) ** 2 for x in x_values])\n intercept = y_mean - coefficient * x_mean # Compute Intercept\n return((coefficient,intercept))",
"def linear_attenuation_coefficient(self, lines):\n wl = lines.to(\"nm\", \"spectroscopy\").magnitude\n if isarray(wl):\n return [self.getExtinctionCoefficient(l) for l in wl]\n else:\n return self.getExtinctionCoefficient(wl)",
"def canonical_linear_form(self, *vars):\n arg0, arg1 = self.args\n if arg0.has(*vars) and arg1.has(*vars):\n arg0 = arg0.canonical_linear_form(*vars)\n arg1 = arg1.canonical_linear_form(*vars)\n return arg0 + arg1\n elif arg0.has(*vars) or arg1.has(*vars):\n raise NonlinearOperatorError(\"Cannot add dependent and independent terms.\")\n else:\n return self",
"def linear_expression(A, b, x, tol=1.e-9):\n\n # linear term (explicitly state that it is a LinExpr since it can be that A[i] = 0)\n exprs = [gp.LinExpr(sum([A[i,j]*x[j] for j in range(A.shape[1]) if np.abs(A[i,j]) > tol])) for i in range(A.shape[0])]\n\n # offset term\n exprs = [expr+b[i] if np.abs(b[i]) > tol else expr for i, expr in enumerate(exprs)]\n\n return exprs",
"def a_coefficients(y1,y2):\n\tACoefficients = np.array([\ty1, \\\n\t\t\t\t\t\t\t\ty2 ]).astype(float)\n\treturn(ACoefficients)",
"def match(expr):\n\n expr_type = all_functions.detect_expr_type(expr)\n if not isinstance(expr_type, sympy.Poly):\n matcher = noevals.noevalify(coeff0 * expr_type(x0) + coeff3)\n\n # we need to make sure that the expression is actually noevalified,\n # removing this causes issues with certain cases, like the one in this function's docstring\n noevalified_transformed_equation = noevals.noevalify(expr)\n match = noevalified_transformed_equation.match(matcher)\n\n noevaled_interior = noevals.noevalify(match[x0])\n interior_matcher = noevals.noevalify(coeff1 * x + coeff2)\n interior_match = noevaled_interior.match(interior_matcher)\n\n overall_match = dict(itertools.chain(match.items(), interior_match.items()))\n overall_match.pop(x0) # we don't want the \"interior\" wildcard to be included in the output to students\n else:\n matcher = coeff0 * x ** 2 + coeff1 * x + coeff2\n overall_match = expr.match(matcher)\n\n return MatrixLinearTransformation.order_match(overall_match)",
"def constant_equation(funct):\n return funct + \"x\"",
"def model_linear(train_x, train_y, test_x):\n train_x = sm.add_constant(train_x)\n model_fit = sm.OLS(train_y, train_x).fit()\n model_info = {'model': 'linear', 'R2': model_fit.rsquared, 'f_pvalue': model_fit.f_pvalue,\n 'const': model_fit.params.const, 'beta': model_fit.params.values[1]}\n predictions = model_fit.predict(sm.add_constant(test_x))\n return predictions, model_info",
"def b_coefficients(x1,x2,x3,y1,y2,y3,CCoefficients,DCoefficients):\n\tBCoefficients = np.array([\t((y2-y1)/(x2-x1)-CCoefficients[0]*(x2-x1) - DCoefficients[0]*((x2-x1)**2)), \\\n\t\t\t\t\t\t\t\t((y3-y2)/(x3-x2)-CCoefficients[1]*(x3-x2) - DCoefficients[1]*((x3-x2)**2)) \t]).astype(float)\n\treturn(BCoefficients)",
"def linear_regression(x, y):\n #print(\"Fitting\", y, \"\\nagainst\", x)\n matrix = np.vstack( [x, np.ones_like(x)] ).T\n slope, intercept = np.linalg.lstsq(matrix,y)[0]\n #print(\"gives slope=\", slope, \"intercept=\", intercept)\n return (slope, intercept)",
"def coefC(x0,y0,x1,y1):\n return (x1*y0-x0*y1)/(x1-x0)",
"def linear(m, b, x, xx):\n y = m*(x - xx) + b\n return y",
"def linear_regression(features, values):\n x_values = sm.add_constant(features)\n model = sm.OLS(values, x_values, hasconst=True)\n results = model.fit()\n all_params = results.params\n intercept = all_params[0]\n params = all_params[1:]\n return intercept, params",
"def linear_polynomial(self, e: 'PFElement') -> Polynomial:\n poly = self.polynomial(-e)\n poly += poly.monic(1)\n return poly",
"def __init__(self, coefficients, name=None):\n coefficients = {v: str(c) for v,c in coefficients.iteritems()}\n expr = ' + '.join('%s*%s' % (c,v) for v,c in coefficients.iteritems())\n Function.__init__(self, expr, variables=set(coefficients),\n first_derivatives=coefficients, name=name)\n self.coefficients = coefficients",
"def get_polyfit_function(self):\n N = len(self.coefficients)\n return lambda x: np.dot( self.get_poly(x).T , self.coefficients.reshape(N, 1) )",
"def calc_poly_linear_regression(independent, dependent):\n # Reshape for sklearn\n independent = independent.values.reshape(-1,1)\n dependent = dependent.values.reshape(-1,1)\n # Make the whole thing poly\n poly = PolynomialFeatures(degree=2)\n independent_ = poly.fit_transform(independent)\n # Do the linear regression\n model = LinearRegression()\n model.fit(independent_, dependent)\n # Calculate R2\n return model.score(independent_, dependent)",
"def linear(self, verbose=0):\n\n # Output linear regression summary with coefficients and p-values\n # if desired\n if verbose != 0:\n model = sm.OLS(self.y_train, sm.add_constant(self.X_train)).fit()\n print(model.summary())\n\n linear_regressor = LinearRegression(fit_intercept=True, normalize=False,\n copy_X=True)\n linear_score = np.mean(cross_val_score(\n estimator=linear_regressor, X=self.X_train, y=self.y_train,\n cv=5, scoring=self.scorer))\n print('Linear score: ' + str(linear_score))\n return linear_regressor"
] | [
"0.6206099",
"0.6202251",
"0.6075639",
"0.59964895",
"0.5825655",
"0.5821187",
"0.58134663",
"0.5804364",
"0.5800974",
"0.58002186",
"0.57740015",
"0.57720554",
"0.5732264",
"0.570375",
"0.5698317",
"0.56869346",
"0.5648885",
"0.5631185",
"0.56305605",
"0.56204075",
"0.5614729",
"0.5592127",
"0.5583624",
"0.5576216",
"0.5571138",
"0.55546",
"0.55495423",
"0.5543561",
"0.5531215",
"0.55291843"
] | 0.72550875 | 0 |
Initializes a new input item behaviour. model requires a input item model | def __init__(self, model):
super(InputItemBehaviour, self).__init__(model)
logging.log(1, "Trace: InputItemBehaviour(%s)" % model)
# self._register_events()
eventsManager.registerEvent(
'onInputClick-' + self._model.itemName, (pygame.MOUSEBUTTONDOWN),
self.onInputClick)
self.write(
self._model.text if not self._model.empty else self._model.placeHolder,
self._model.color if not self._model.empty else
(self._model.color[0] / 2, self._model.color[1] / 2, self._model.color[2] / 2)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self,name,value,*args,**kargs):\n \n kargs['text'] = '' # Force no label\n self.input = value\n InputItem.__init__(self,name,*args,**kargs)\n self.layout().insertWidget(1,self.input)",
"def set_inputs(self, item_data):\n self.item_type = item_data[0]\n self.size = item_data[1]\n self.toppings = item_data[2]",
"def init_model(self):\n pass",
"def __init__(self, model_item=None, prefix=\"\", parent=None):\n QAbstractGraphicsShapeItem.__init__(self, parent)\n self.setFlags(QGraphicsItem.ItemIsSelectable |\n QGraphicsItem.ItemIsMovable |\n QGraphicsItem.ItemSendsGeometryChanges |\n QGraphicsItem.ItemSendsScenePositionChanges)\n\n self._model_item = model_item\n if self._model_item is not None:\n self._model_item.model().dataChanged.connect(self.onDataChanged)\n\n # initialize members\n self._prefix = prefix\n self._auto_text_keys = self.defaultAutoTextKeys[:]\n self._text = \"\"\n self._text_bg_brush = None\n self._text_item = QGraphicsTextItem(self)\n self._text_item.setPos(0, 0)\n self._text_item.setAcceptHoverEvents(False)\n self._text_item.setFlags(QGraphicsItem.ItemIgnoresTransformations)\n self._text_item.setHtml(self._compile_text())\n self._valid = True\n\n if len(self.cycleValuesOnKeypress) > 0:\n print(\"cycleValueOnKeypress is deprecated and will be removed in the future. \" +\n \"Set BaseItem.hotkeys instead with cycleValue()\")\n self.changeColor()",
"def initialize_model(self):\n pass",
"def init_items(self):\r\n raise NotImplementedError()",
"def __init__(self, meta_model, input_spec):\r\n\r\n # Check if the input specifications are correct\r\n RM.check_if_type(input_spec, list, 'The input specifications')\r\n for i in range(len(input_spec)):\r\n RM.check_if_type(input_spec[i], str, 'Input specification %x' % i)\r\n RM.check_if_poss_input_spec(input_spec[i], i)\r\n\r\n InputDecorator.__init__(self, meta_model)\r\n\r\n self.input_spec = input_spec",
"def _add_init(self, p_model):\r\n\r\n raise NotImplementedError",
"def initialize(self, model):\n pass",
"def __init__(self, inputId=\"\", intype=INPUT_TEXT, placeHolderContent=\"\", selections = [], defaultInput=\"\"):\n\t\tself.inputId = inputId\n\t\tself.intype = intype\n\t\tself.placeHolderContent = placeHolderContent\n\t\tself.selections = selections\n\t\tself.defaultInput = defaultInput",
"def __init__(self):\n self.model = None",
"def __init__(self):\n self.model = None",
"def __init__(self, model):\n self._model = model",
"def __init__(self,name,value,*args,**kargs):\n self.input = CoordsBox()\n InputItem.__init__(self,name,*args,**kargs)\n self.layout().insertWidget(1,self.input)\n self.setValue(value)",
"def __init__(self):\n self._data = PositionalList() # list of Item instances",
"def __init__(self):\n self.inputs = {}",
"def __init__(self, model):\n self.model = model",
"def __init__(self, model):\n self.model = model",
"def __init__(self, model):\n self.model = model",
"def __init__(self, model):\n self.model = model",
"def __init__(self):\n self.item_list = []",
"def __init__(self, model):\n\t\tself.model = model",
"def __init__(self, **kwargs):\n self.item = Item(**kwargs)\n self._working_dir = None",
"def __init__(self, model: Type[ModelType]):\n self.model = model",
"def __init__(self, model: Type[ModelType]):\n self.model = model",
"def __init__(self, interpreter, items):\n self.interpreter = interpreter\n self.items = items",
"def __init__(self, items):\n self.items = items",
"def __init__(self, controller):\n super().__init__(controller)\n\n # The hovered input when entering this View.\n self.first_inp = \"s\"\n\n # Initialize selected variable.\n self.selected = None\n\n # Make background graphics.\n self.make_background_graphics()\n\n # Make Buttons.\n self.make_buttons()\n\n # Make the information box. This explains each Button.\n self.make_info_box()\n\n # Initializes popup.\n self.make_popup()\n\n # Map of input to functions.\n enter = self.graphics.ENTER_KEY\n self.controls = {\n # Pressing \"q\" will go back to the main menu.\n \"q\": lambda: Action(\"goto main menu view\", []),\n\n # Movement keys.\n \"w\": lambda: self.move_cursor(Direction.U),\n \"a\": lambda: self.move_cursor(Direction.L),\n \"s\": lambda: self.move_cursor(Direction.D),\n \"d\": lambda: self.move_cursor(Direction.R),\n\n # Repeat the last valid input.\n enter: self.repeat_last_valid_input,\n\n # Click the selected UIElement.\n \"m\": self.click\n }",
"def __init__(self):\n self.model = self.load_model()\n self.form_html = self.create_form_html()",
"def __init__(self):\n # Item Dictionary (key: call number, value: Item object)\n self.item_list = {}\n\n # Add some items manually for testing purposes.\n book1 = Book(\"In praise of Idleness\", \"B-1\", 3, \"bertrand russell\")\n book2 = Book(\"Breaking the Code\", \"B-2\", 1, \"Pat Matter\")\n dvd = DVD(\"Breaking Bad\", \"D-1\", 2, \"2019-01-05\", \"CA\")\n self._add_item_by_item(book1)\n self._add_item_by_item(book2)\n self._add_item_by_item(dvd)"
] | [
"0.64909756",
"0.6275057",
"0.6250221",
"0.6131106",
"0.6091335",
"0.60677344",
"0.6066739",
"0.6032842",
"0.6030022",
"0.59932554",
"0.5961387",
"0.5961387",
"0.5958153",
"0.58987296",
"0.58704364",
"0.58674127",
"0.5828547",
"0.5828547",
"0.5828547",
"0.5828547",
"0.58174175",
"0.58137685",
"0.57844746",
"0.5780743",
"0.5780743",
"0.5775311",
"0.5761775",
"0.57546085",
"0.5727724",
"0.5716022"
] | 0.78237134 | 0 |
Returns the requested custom interest in full detail. | def GetCustomInterest(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def interests(self):\n if \"interests\" in self._prop_dict:\n return self._prop_dict[\"interests\"]\n else:\n return None",
"def get_loan_info():\n\n try:\n principal = int(request.get_json()[\"amount\"])\n tenure = int(request.get_json()[\"tenure\"])\n except:\n return jsonify({\"message\": \"Input is 'amount' and 'tenure'\"}), 400\n\n interest_rate = get_interest_rate(tenure)\n emi = calculate_emi(principal, interest_rate, tenure)\n total = round(emi * tenure, 2)\n interest = round(total - principal, 2)\n\n output = {\n \"principal\": principal,\n \"tenure\": tenure,\n \"interest\": interest,\n \"interest_rate\": interest_rate,\n \"emi\": emi,\n \"total\": total,\n }\n\n return jsonify({\"loan_info\": output})",
"def interest(account):\n stm = shared_morphene_instance()\n if mph.rpc is not None:\n mph.rpc.rpcconnect()\n if not account:\n if \"default_account\" in mph.config:\n account = [mph.config[\"default_account\"]]\n\n t = PrettyTable([\n \"Account\", \"Last Interest Payment\", \"Next Payment\"\n ])\n t.align = \"r\"\n for a in account:\n a = Account(a, morphene_instance=stm)\n i = a.interest()\n t.add_row([\n a[\"name\"],\n i[\"last_payment\"],\n \"in %s\" % (i[\"next_payment_duration\"])\n ])\n print(t)",
"def get(self, list_id, category_id, interest_id):\n return self._mc_client._get(\n url=self._build_path(list_id, 'interest-categories', category_id, 'interests', interest_id))",
"def total_interest(self):\n return sum(self.table[\"interest\"])",
"def get_percent_interest(self):\n return self.__percentage_interest",
"def get_user_interests():\n data = request_verification(request.get_json())\n if isinstance(data, int):\n return codes[data], data\n else:\n try:\n user_data = r.table('interests').filter(r.row['user_id'] == data).run(g.rdb_conn)\n except Exception as e:\n return e, 500\n else:\n interests = user_data.items[0].get('interests')\n return json.dumps(interests), 200",
"def prompt_user_account_to_get_interest():\n print('What account do you want 0.5% automatic interest?:')\n return input()",
"def to_effective(self) -> EffectiveInterest:\n pass",
"def get_point_of_interest(self):\n if self.point_of_interest == \"centroid\":\n return (round(float(self.kf.x[0][0]), 2), round(float(self.kf.x[1][0]), 2))\n elif self.point_of_interest == \"botmid\":\n x1, y1, x2, y2 = convert_x_to_bbox(self.kf.x)[0]\n x = (x2 + x1) / 2\n y = y2\n return (round(float(x), 2), round(float(y), 2))\n elif self.point_of_interest == \"topmid\":\n x1, y1, x2, y2 = convert_x_to_bbox(self.kf.x)[0]\n x = (x2 + x1) / 2\n y = y1\n return (round(float(x), 2), round(float(y), 2))\n else:\n print(\"point of interest not devised\")\n exit()",
"def suggested_retirement_income(self, request, parent_lookup_client, pk, format=None):\n # TODO: Make this work\n return Response(1234)",
"def compute_interest(self) -> float:\n interest = self._balance * SavingsAccount.RATE\n self.deposit(interest)\n return interest",
"def effecticeInterestRate():\n rate = float(input(\"What is your interest rate:\\n\"))\n compound = int(input(\"How many times in a year you give interest:\\n\"))\n\n EIR = (1 + ((rate/100)/compound))**compound - 1\n eir = EIR*100\n return \"Your effective interest rate is: %.3f\" % eir",
"def get_details(self):",
"def get_region_of_interest(self) -> UserRoi:\n result = UserRoiStructure()\n Utils.check(VL53L1X_C_LIBRARY.VL53L1_GetUserROI(self.dev, byref(result)))\n return UserRoi.from_struct(result)",
"def pinterest(self):\n return self._pinterest",
"def listNotificationInterests(self):\n return []",
"def add_interest(self, interest_node):\n user_interest_relationship = Relationship(start_node=self.user_node,\n rel=AgoraRelationship.INTERESTED_IN,\n end_node=interest_node)\n self.graph_db.create_unique(user_interest_relationship)\n return self.user_interests",
"def __repr__(self):\n name = \"Investor: %s\" % self.name\n cash = \"Cash: %s\" % self.cash\n risk_money = \"Risk Money: %s\" % self.risk_money\n portfolio = \"Portfolio: %s\" % self.portfolio\n info = name + cash + risk_money + portfolio\n return info",
"def getByInterest(database,interest):\n correspondant=[]\n for key,usr in database.items():\n if interest in usr.interest:\n correspondant.append(usr)\n if len(correspondant)==0:\n print(f\"there is no user interested in {interest}\")\n return correspondant, False\n else:\n return correspondant,True",
"def get_interest_rates():\n\n try:\n tenure = int(request.get_json()[\"tenure\"])\n except:\n return jsonify({\"message\": \"Input is 'tenure' in months\"}), 400\n\n if tenure <= 5:\n return jsonify({\"interest_rate\": 10}), 200\n elif tenure > 5 and tenure <= 24:\n return jsonify({\"interest_rate\": 12}), 200\n else:\n return jsonify({\"interest_rate\": 15}), 200",
"def calculate_income(self, request, parent_lookup_client, pk, format=None):\n # TODO: Make this work\n return Response(2345)",
"def _ebit(self):\n return self.net_income + self.tax_expense + self.interest_expense",
"def total_interest(self) -> Decimal:\n return self._quantize(self.schedule(int(self.term / self.term_multiplier * self.n_periods)).total_interest)",
"def interest_to_principle(self) -> float:\n return float(round(self.total_interest / self.total_principal * 100, 1))",
"def info():\n if g.party_id is None:\n # No party is configured for the current site.\n abort(404)\n\n party = party_service.get_party(g.party_id)\n\n return {\n 'party': party,\n }",
"def detail(self):\n info = self.info()\n return info",
"def GetInsight(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")",
"def __calculate_monthly_interest(self):\n return self.__percentage_interest / 12",
"def incomeBar(self):\r\n return self._createTextProfile(self.income)"
] | [
"0.63655275",
"0.58175296",
"0.5700428",
"0.5637553",
"0.5635685",
"0.5514969",
"0.5504437",
"0.5473622",
"0.54700804",
"0.54615504",
"0.5434457",
"0.5407035",
"0.53967106",
"0.53456295",
"0.53062636",
"0.5304055",
"0.53011596",
"0.5287489",
"0.527064",
"0.51641273",
"0.51545066",
"0.51514244",
"0.5121042",
"0.5116196",
"0.50478375",
"0.50413877",
"0.5040204",
"0.5028663",
"0.50042546",
"0.49804917"
] | 0.680991 | 0 |
Creates or updates custom interests. Operation statuses are returned. | def MutateCustomInterests(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def interests(self, create, extracted, **kwargs):\n if not create:\n return\n if extracted:\n for interest in extracted:\n self.interests.add(interest)",
"def post(self):\n try:\n data = request.get_json()\n user_interests = InterestService.create_or_update_user_interests(\n token_auth.current_user(), data[\"interests\"]\n )\n return user_interests.to_primitive(), 200\n except (ValueError, KeyError) as e:\n return {\"Error\": str(e)}, 400",
"def update(self, list_id, category_id, interest_id, data):\n return self._mc_client._patch(\n url=self._build_path(list_id, 'interest-categories', category_id, 'interests', interest_id),\n data=data)",
"def add_interest(self, interest_node):\n user_interest_relationship = Relationship(start_node=self.user_node,\n rel=AgoraRelationship.INTERESTED_IN,\n end_node=interest_node)\n self.graph_db.create_unique(user_interest_relationship)\n return self.user_interests",
"def GetCustomInterest(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def compute_interest(self) -> float:\n interest = self._balance * SavingsAccount.RATE\n self.deposit(interest)\n return interest",
"def add_interest_paper(date, paperURI, user):\n paper = diversion_for_week_paper(date, paperURI)\n not_interested = paper['NotInterested']\n if user in not_interested:\n not_interested.remove(user)\n interested = paper['Interested']\n if user not in interested:\n interested.append(user)\n resp = table.update_item(\n Key={\"WeekOf\": date, \"Paper\": paperURI},\n ExpressionAttributeNames={\n \"#interested\": \"Interested\",\n \"#notInterested\": \"NotInterested\"\n },\n ExpressionAttributeValues={\n \":interested\": interested,\n \":notInterested\": not_interested\n },\n UpdateExpression=\"SET #interested = :interested, #notInterested = :notInterested\"\n )",
"def update_interest(origin_matrix, interest_calculation_type, interest_payment_type, scaled_interest, grace_period_interest_calculate, grace_period_interest_pay, grace_period_balloon):\n balance = origin_matrix[BALANCE_IDX]\n new_interest_paid_arr = np.zeros(len(origin_matrix[0]))\n\n for idx in range(1, len(balance)):\n new_interest_paid_arr[idx] = balance[idx-1] * scaled_interest\n\n if interest_calculation_type == 'initial amount or flat':\n new_interest_paid_arr[1:] = balance[0] * scaled_interest\n\n # for grace period interest calculation\n for idx in range(1, grace_period_interest_calculate+1):\n new_interest_paid_arr[idx] = 0\n\n # for grace period interest payment\n for idx in range(1, grace_period_interest_pay+1):\n new_interest_paid_arr[grace_period_interest_pay+1] += new_interest_paid_arr[idx]\n new_interest_paid_arr[idx] = 0\n\n # for grace ballon\n for idx in range(len(balance)-grace_period_balloon, len(balance)):\n new_interest_paid_arr[idx] = 0\n\n # CHANGE\n if interest_payment_type == 'single end-term payment':\n new_interest_paid_arr[-1-grace_period_balloon] = sum(new_interest_paid_arr)\n for idx in range(len(new_interest_paid_arr)-1-grace_period_balloon):\n new_interest_paid_arr[idx] = 0\n return new_interest_paid_arr",
"def interest(account):\n stm = shared_morphene_instance()\n if mph.rpc is not None:\n mph.rpc.rpcconnect()\n if not account:\n if \"default_account\" in mph.config:\n account = [mph.config[\"default_account\"]]\n\n t = PrettyTable([\n \"Account\", \"Last Interest Payment\", \"Next Payment\"\n ])\n t.align = \"r\"\n for a in account:\n a = Account(a, morphene_instance=stm)\n i = a.interest()\n t.add_row([\n a[\"name\"],\n i[\"last_payment\"],\n \"in %s\" % (i[\"next_payment_duration\"])\n ])\n print(t)",
"def data_processing():\n data = request.get_json()\n user_id = request_verification(data)\n\n if isinstance(user_id, int):\n return codes[user_id], user_id\n else:\n interest = {\n 'user_id': user_id,\n 'interests': data['interests']\n }\n try:\n r.table('interests').insert(interest).run(g.rdb_conn)\n except Exception as e:\n add_to_log(e)\n return e, 500\n else:\n return 'The record is created', 201",
"def create(self, list_id, category_id, data):\n return self._mc_client._post(\n url=self._build_path(list_id, 'interest-categories', category_id, 'interests'), data=data)",
"def MarkInsightAccepted(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")",
"def add_margin_interest(self, interest_time: int, asset: str, interest: float, interest_type: str,\n isolated_symbol: Optional[str] = None, auto_commit: bool = True):\n if isolated_symbol is None:\n table = tables.CROSS_MARGIN_INTEREST_TABLE\n row = (interest_time, asset, interest, interest_type)\n else:\n table = tables.ISOLATED_MARGIN_INTEREST_TABLE\n row = (interest_time, isolated_symbol, asset, interest, interest_type)\n\n self.add_row(table, row, auto_commit=auto_commit)",
"def interests(self):\n if \"interests\" in self._prop_dict:\n return self._prop_dict[\"interests\"]\n else:\n return None",
"def test_update_risk(self):\n test_date = datetime.datetime.utcnow().strftime(\"%Y-%m-%d\")\n with factories.single_commit():\n risk_id = factories.RiskFactory().id\n created_at = test_date\n updated_at = test_date\n new_values = {\n \"title\": \"New risk\",\n \"created_at\": created_at,\n \"updated_at\": updated_at,\n \"review_status\": all_models.Review.STATES.UNREVIEWED,\n \"review_status_display_name\": \"some status\",\n }\n risk = all_models.Risk.query.get(risk_id)\n\n response = self.api.put(risk, risk.id, new_values)\n\n self.assert200(response)\n risk = all_models.Risk.query.get(risk_id)\n self.assert_instance(new_values, risk)",
"def get_user_interests():\n data = request_verification(request.get_json())\n if isinstance(data, int):\n return codes[data], data\n else:\n try:\n user_data = r.table('interests').filter(r.row['user_id'] == data).run(g.rdb_conn)\n except Exception as e:\n return e, 500\n else:\n interests = user_data.items[0].get('interests')\n return json.dumps(interests), 200",
"def on_interest_change(origin_matrix, changes_on_interest, interest_payment_type, grace_period_balloon):\n new_interest = origin_matrix[INTEREST_PAID_IDX]\n for idx in range(len(new_interest)):\n if changes_on_interest[idx] != None:\n # check if the change is made on last row\n # CHANGE\n if idx != len(new_interest)-1-grace_period_balloon:\n if interest_payment_type == 'single end-term payment':\n new_interest[len(new_interest)-1-grace_period_balloon] -= changes_on_interest[idx]\n else:\n new_interest[idx+1] += new_interest[idx] - changes_on_interest[idx]\n\n new_interest[idx] = changes_on_interest[idx]\n return new_interest",
"def open_interest(self, open_interest):\n\n self._open_interest = open_interest",
"def api_asset_add(char_code: str, name: str, capital: str, interest: str):\n capital, interest = float(capital), float(interest)\n asset = Asset(char_code=char_code, name=name, capital=capital, interest=interest)\n\n if app.bank.contains(asset):\n return f\"Asset '{name}' already exists\", 403\n\n app.bank.add(asset)\n return f\"Asset '{name}' was successfully added\", 200",
"def create(username: str, password: str, firstname: str, lastname: str, \\\n email: str, state: str, city: str, street: str, \\\n street2: str, interests: str) -> dict:\n validation_check = {}\n if not username:\n validation_check[\"success\"] = False\n validation_check[\"username\"] = \"Username cannot be left blank.\"\n elif sql.is_username_taken(username):\n validation_check[\"success\"] = False\n validation_check[\"username\"] = \"Username is already taken\"\n if not password:\n validation_check[\"success\"] = False\n validation_check[\"password\"] = \"Password cannot be left blank\"\n\n if not interests:\n validation_check[\"success\"] = False\n validation_check[\"interests\"] = \"You are required to have at least one interest\"\n\n if not validation_check.get(\"success\", True):\n return validation_check\n\n interests = interests.split(',') # Interests should be comma seperated\n registration_response = sql.register_account(username, password, firstname, lastname, \\\n email, state, city, street, street2, interests)\n\n # If we were registered successfully\n # update the interest associations on another thread\n if registration_response['success']:\n EXECUTOR.submit(sql.update_interests, interests)\n\n return registration_response",
"def test_edit_interest_list_success(self):\n id = self.list_1.id\n url = reverse('xds_api:interest-list', args=(id,))\n _, token = AuthToken.objects.create(self.user_1)\n new_name = \"edited name\"\n empty_list = []\n new_list = {\"name\": new_name,\n \"description\": self.list_1.description,\n \"experiences\": empty_list}\n response = \\\n self.client.patch(url,\n data=json.dumps(new_list),\n HTTP_AUTHORIZATION='Token {}'.format(token),\n content_type=\"application/json\")\n responseDict = json.loads(response.content)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(responseDict[\"name\"], new_name)\n self.assertEqual(responseDict[\"experiences\"], [])",
"def get_interest_rates():\n\n try:\n tenure = int(request.get_json()[\"tenure\"])\n except:\n return jsonify({\"message\": \"Input is 'tenure' in months\"}), 400\n\n if tenure <= 5:\n return jsonify({\"interest_rate\": 10}), 200\n elif tenure > 5 and tenure <= 24:\n return jsonify({\"interest_rate\": 12}), 200\n else:\n return jsonify({\"interest_rate\": 15}), 200",
"async def get_coins_of_interest(\n self,\n ) -> Tuple[Dict[bytes32, Coin], Dict[bytes32, Coin]]:\n all_pending = []\n pending_accept = await self.get_offers_with_status(TradeStatus.PENDING_ACCEPT)\n pending_confirm = await self.get_offers_with_status(TradeStatus.PENDING_CONFIRM)\n pending_cancel = await self.get_offers_with_status(TradeStatus.PENDING_CANCEL)\n all_pending.extend(pending_accept)\n all_pending.extend(pending_confirm)\n all_pending.extend(pending_cancel)\n removals = {}\n additions = {}\n\n for trade in all_pending:\n for coin in trade.removals:\n removals[coin.name()] = coin\n for coin in trade.additions:\n additions[coin.name()] = coin\n\n return removals, additions",
"def get(self, list_id, category_id, interest_id):\n return self._mc_client._get(\n url=self._build_path(list_id, 'interest-categories', category_id, 'interests', interest_id))",
"def total_interest(self):\n return sum(self.table[\"interest\"])",
"def test_update_risk_profile_using_put(self):\n pass",
"def repaid_interest(self, repaid_interest):\n\n self._repaid_interest = repaid_interest",
"def save(self, *args):\n self.party_name, self.office_name, self.user_id, self.date_created, self.status = args\n format_str = f\"\"\"\n INSERT INTO public.applications (party_name,office_name,user_id,date_created,status)\n VALUES ('{args[0]}','{args[1]}','{args[2]}','{(datetime.now())}','pending');\n \"\"\"\n cursor.execute(format_str)",
"def ModifyIncr(self):\n if self.force_auto_sync:\n self.get('ModifyIncr')\n return self._ModifyIncr",
"def add_interest(self, namespace, jid=None):\n if not isinstance(namespace, set) and not isinstance(namespace, list):\n namespace = [namespace]\n\n for ns in namespace:\n self.xmpp['xep_0030'].add_feature('%s+notify' % ns,\n jid=jid)\n self.xmpp['xep_0115'].update_caps(jid)"
] | [
"0.64842814",
"0.63414025",
"0.58531773",
"0.5730995",
"0.5729617",
"0.51918006",
"0.518725",
"0.5175335",
"0.50396997",
"0.49845538",
"0.49697483",
"0.49449798",
"0.4870831",
"0.48692632",
"0.48253375",
"0.48198012",
"0.4756726",
"0.46992794",
"0.4693637",
"0.46757862",
"0.46729645",
"0.45582408",
"0.45147523",
"0.44987875",
"0.44945675",
"0.44711685",
"0.44691846",
"0.44478327",
"0.44289356",
"0.44247222"
] | 0.6584547 | 0 |
create_unexisted_dir(directory, element)> create unexisted directory. This function create directory if there are unexisted directory in the path. | def create_unexisted_dir(directory, element):
directory += "/" + element
if get_file_type(directory) == 0:
mkdir(directory)
return directory | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_not_existing_directory(\n directory: str\n):\n p = pathlib.Path(directory)\n if not p.is_dir():\n print(f'Creating directory: {directory} as it does not exist')\n p.mkdir(parents=True, exist_ok=True)",
"def ifnotexistmkdir(directory):\n if not os.path.exists(directory):\n os.mkdir(directory)\n return Path(directory)",
"def create_directory_if_not_exists(directory_path):\n os.makedirs(directory_path, exist_ok=True)",
"def create_dir_if_doesnt_exist(dir_path):\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)\n return",
"def _mkdir_if_not_exist(path):\n if not(os.path.isdir(path)):\n os.mkdir(path)\n else:\n _logger.info('Skipping existing directory %s' % path)",
"def create_or_clean_directory(dir):\n\tif not os.path.exists(dir):\n\t\tprint(\"The path \\\"\" + dir + \"\\\" does not exist\")\n\t\tprint(\"creating directory \\\"\" + dir + \"\\\"\")\n\t\tos.makedirs(dir)\n\telse: #Directory exists, but we want to clean it before use\n\t\tprint(dir + \" already exists. Cleaning before use...\")\n\t\tshutil.rmtree(dir)\n\t\tos.makedirs(dir)",
"def _create_dir_if_not_exists(dir: str):\n\n if os.path.exists(dir) and not os.path.isdir(dir):\n raise ValueError(f'Provided path {dir} was not a directory')\n\n if not os.path.exists(dir):\n _log.info(f'Creating directory {dir}')\n os.mkdir(dir)",
"def create_empty_dir(dir_path):\n if os.path.isdir(dir_path):\n shutil.rmtree(dir_path)\n\n os.mkdir(dir_path)\n if not os.path.isdir(dir_path):\n raise OSError()",
"def _ensure_dir_exists(self, directory):\n directory = directory.strip()\n if not Path(directory).exists():\n os.mkdir(directory)",
"def remove_and_create_dir(path):\n dir = os.path.dirname(path)\n print('attempting to delete ', dir, ' path ', path)\n if os.path.exists(path):\n os.system(\"rm -rf \" + path)\n os.system(\"mkdir -p \" + path)",
"def checkExistenceDir(path):\n path = os.path.abspath(path)\n if not os.path.isdir(path):\n logger.warning(\n \"Directory {} does not seem to exist, creating one.\".format(path)\n )\n os.mkdir(path)",
"def dlt_create_dir(path): \n shutil.rmtree(path,ignore_errors=True)\n os.makedirs(path, exist_ok = True)",
"def mkdir_if_missing(directory, delete_if_exist=False):\n\n if delete_if_exist and os.path.exists(directory): shutil.rmtree(directory)\n\n # check if not exist, then make\n if not os.path.exists(directory):\n os.makedirs(directory)",
"def mkdir_if_missing(directory, delete_if_exist=False):\n\n if delete_if_exist and os.path.exists(directory): shutil.rmtree(directory)\n\n # check if not exist, then make\n if not os.path.exists(directory):\n os.makedirs(directory)",
"def create_file_directory():\n\n # Verify if directory exist.\n # If yes, delete it and every thing inside and create it again.\n # If not, just create it.\n\n if os.path.isdir('./file'):\n\n shutil.rmtree('./file')\n\n os.mkdir('./file')",
"def make_empty_dir(d):\n\n if os.path.exists(d):\n shutil.rmtree(d)\n os.makedirs(d, exist_ok=True)\n else:\n os.makedirs(d, exist_ok=True)\n\n return",
"def create_directory(dir_path):\r\n if not os.path.exists(dir_path):\r\n os.makedirs(dir_path, exist_ok=True)",
"def create_dir(directory):\n if not os.path.exists(directory):\n os.makedirs(directory)",
"def create_dir(directory):\n if not os.path.exists(directory):\n os.makedirs(directory)",
"def mkdir_if_not_exists(path):\n if not os.path.exists(path):\n os.makedirs(path)",
"def makeDirIfNotExists(pathToDir, isCleanIfExists=True):\n if os.path.isdir(pathToDir) and isCleanIfExists:\n shutil.rmtree(pathToDir)\n if not os.path.isdir(pathToDir):\n os.makedirs(pathToDir)",
"def prepare_dir(path, empty=False):\n\n def create_dir(path):\n \"\"\"\n Creates a directory\n :param path: string\n :return: nothing\n \"\"\"\n try:\n os.makedirs(path)\n except OSError as exc:\n if exc.errno != errno.EEXIST:\n raise\n\n if not os.path.exists(path):\n create_dir(path)",
"def create_dir(_dir):\n if not os.path.exists(_dir):\n os.makedirs(_dir)",
"def ensure_dir_exists(directory):\n if not os.path.exists(directory):\n os.makedirs(directory)",
"def ensure_dir_exists(dir_path):\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)",
"def mkdir_if_missing(d):\n if not os.path.exists(d):\n os.makedirs(d)",
"def EnsureDirExists(path):\n try:\n os.makedirs(os.path.dirname(path))\n except OSError:\n pass",
"def create_dir(path):\n if not os.path.exists(path):\n os.makedirs(path)",
"def mkDir(path):\n if not os.path.exists(path):\n try:\n os.makedirs(path)\n except OSError:\n # In a race between two threads, this thread may have lost,\n # in which case the directory will now exist. Otherwise this\n # is a real exception.\n if not os.path.exists(path):\n raise",
"def make_dir_if_needed(dir) :\n\tif not exists(dir) :\n\t\tos.makedirs(dir)"
] | [
"0.76600796",
"0.7608169",
"0.7521542",
"0.73711544",
"0.72699314",
"0.72553796",
"0.7234705",
"0.71181333",
"0.70318043",
"0.7030892",
"0.7011069",
"0.70085305",
"0.70078796",
"0.70078796",
"0.70018446",
"0.6988016",
"0.6964505",
"0.69333845",
"0.69333845",
"0.692276",
"0.69148636",
"0.6889903",
"0.68588954",
"0.6855163",
"0.6822951",
"0.68106294",
"0.67795175",
"0.6767186",
"0.6753696",
"0.67494047"
] | 0.90142894 | 0 |
Checks if the parameter contains only numbers and atleast of length 3. | def validate_phone_number(val):
if not val.isdigit() or len(val) < 3:
raise argparse.ArgumentTypeError("Invalid phone number")
return val | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def validate(number):\n number = compact(number)\n if len(number) != 10:\n raise InvalidLength()\n if not _nipt_re.match(number):\n raise InvalidFormat()\n return number",
"def is_int3(items):\n return len(items) == 3 and all(isinstance(item, int) for item in items)",
"def validate(number):\n number = compact(number)\n if len(number) != 9:\n raise InvalidLength()\n if not isdigits(number[2:]):\n raise InvalidFormat()\n if not isdigits(number[:2]) and not all(x in 'ABCEHKMOPT' for x in number[:2]):\n raise InvalidFormat()\n if number[0] not in '1234567ABCEHKM':\n raise InvalidComponent()\n if number[-1] != calc_check_digit(number):\n raise InvalidChecksum()\n return number",
"def checknum(val):\n\n if len(val) == 0:\n return False\n\n for i in range(len(val)):\n if not val[i].isdigit():\n return False\n\n return True",
"def validate(number):\n number = compact(number)\n if not isdigits(number):\n raise InvalidFormat()\n if len(number) != 10:\n raise InvalidLength()\n if checksum(number) != 0:\n raise InvalidChecksum()\n return number",
"def _formatMatriculaValid(np):\n return len(np)==7 and np[:4].isdigit() and np[4:].isalpha()",
"def test_non_numberic_validation(self):",
"def test_non_numberic_validation(self):",
"def check_input_digits_count(self):\n check = len(str(self.input)) == 4\n return check",
"def input_validation(input_: str) -> bool:\n return fullmatch('[1-9]', input_) is not None",
"def _check_value(self):\n value = str(self._value_field.toPlainText())\n if value=='': return True\n ACCEPTABLES_CHARS = ('1', '2', '3', '4', '5', '6', '7', '8', '9', '0',\n '.', ',', ';', ' ', '\\n', '-')\n\n for char in value:\n if not char in ACCEPTABLES_CHARS:\n return False\n if Variable.is_acceptable_arg(value):\n rows, columns = np.matrix(value).shape\n return 1 <= rows <= 4 and 1 <= columns <= 4\n else:\n return False",
"def validate(number):\n number = compact(number)\n if len(number) != 11:\n raise InvalidLength()\n if not isdigits(number):\n raise InvalidFormat()\n if number.startswith('0'):\n raise InvalidFormat()\n # In the first 10 digits exactly one digit must be repeated two or\n # three times and other digits can appear only once.\n counter = defaultdict(int)\n for n in number[:10]:\n counter[n] += 1\n counts = [c for c in counter.values() if c > 1]\n if len(counts) != 1 or counts[0] not in (2, 3):\n raise InvalidFormat()\n return mod_11_10.validate(number)",
"def validVarConstructLength(self,varlen):\r\n if len(varlen)!=2:\r\n print 'variable must specify name and type'\r\n return False\r\n else:\r\n return True",
"def isNumber(txt):\r\n if not isinstance(txt, str) or len(txt)==0:\r\n return \"error: isNumber\"\r\n # --- YOU CODE STARTS HERE\r\n else: \r\n try: \r\n m = float(txt)\r\n return True\r\n except ValueError: \r\n return False",
"def validate(info):\n\n\tif info == \"\": \n\t\treturn False\n\telse:\n\t\tif len(info) < 5 or len(info) > 32:\n\t\t\treturn False\n\t\telse:\n\t\t\treturn True",
"def validate_number(input_data):\n if input_data.startswith('-'):\n return input_data.i\n else:\n return False",
"def validate_parameters(side_1, side_2, side_3):\n if side_1 > 0 and side_2 > 0 and side_3 > 0 and (side_1 + side_2 > side_3) and \\\n (side_1 + side_3 > side_2) and (side_3 + side_2 > side_1):\n return True\n else:\n return False",
"def is_three_channeled(value):\n return len(value) == 3",
"def validate_chunk_width(chunk_width):\n if not isinstance(chunk_width, str):\n return False\n a = chunk_width.split(\",\")\n assert len(a) != 0 # would be code error\n for elem in a:\n try:\n i = int(elem)\n if i < 1 and i != -1:\n return False\n except:\n return False\n return True",
"def validate(self):\n return (self.check_input_digits_count()\n and self.check_if_input_is_int()\n and self.check_if_input_digits_are_unique())",
"def test_check_name_is_3_parts():\n check_name_length()",
"def is_valid_number(self, text, widget):\n if len(text) > 2:\n return False\n for char in text:\n if not char.isdigit():\n return False\n if text != '' and int(text) == 0:\n return False\n return True",
"def is_valid_numeric(inString):\r\n return is_int(inString) or is_float(inString)",
"def validate_n_digits(n: int = 4) -> bool:\n def func(s: str):\n if len(s) != n:\n return False\n if not s.isdigit():\n return False\n return True\n return func",
"def _multiple_choice_validate(s: str, len_options: int):\n if not s:\n raise ValueError('Please enter a value between {} and {}'.format(\n 1, len_options + 1))\n\n if not str.isnumeric(s):\n raise ValueError('Please enter a numeric value')\n\n if 1 <= int(s) <= (len_options + 1):\n return\n else:\n raise ValueError('Please enter a value between {} and {}'.format(\n 1, len_options + 1))",
"def check_params_length(params, length=5):\n # TODO: generalise this for emulators with some parameters fixed\n def check(array):\n if len(array) != length:\n raise ValueError(\"'params' must specify each of (acc, x, z, qb, mass)\")\n\n if len(params.shape) == 1:\n check(params)\n elif len(params.shape) == 2:\n check(params[0])",
"def number_only(number):\n number = number.replace(' ', '')\n result = re.match(r\"^[0-9]+$\", number)\n if not result:\n return True\n return False",
"def check_for_float_and_int(check):",
"def sanitize_input(ll):\n p = sum([l[1] for l in ll])\n if not all([l[0] == int(l[0]) for l in ll]):\n if round(p, 5) != 1:\n return \"It's not a valid distribution and furthermore, one or more variable value are not integers\"\n else:\n return \"All the variable values should be integers\"\n if round(p, 5) != 1:\n return \"It's not a valid distribution\"",
"def numeric_check(param, name):\n\tif not isinstance(param, numbers.Number):\n\t\traise TypeError(\"Keyword arg '%s' must be a real number. Got: %s\" % (\n\t\t\tname, type(param)))\n\telse:\n\t\tpass"
] | [
"0.6559074",
"0.63855785",
"0.63790244",
"0.6328673",
"0.6307101",
"0.6269353",
"0.6255046",
"0.6255046",
"0.62493193",
"0.6218361",
"0.620253",
"0.6170183",
"0.61353946",
"0.6122875",
"0.6059919",
"0.6043937",
"0.6032656",
"0.6031185",
"0.60122234",
"0.60106295",
"0.6006608",
"0.59639364",
"0.5951324",
"0.59245354",
"0.5903377",
"0.58766586",
"0.5872651",
"0.5847164",
"0.58411276",
"0.58354133"
] | 0.6658412 | 0 |
Either direct url or file required. | def clean(self):
if not self.direct_url and not self.file:
raise ValidationError('File or direct url required.') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_geturl_purpose(self):\n self.fs.create('foo')\n with self.assertRaises(errors.NoURL):\n self.fs.geturl('foo', '__nosuchpurpose__')",
"def is_url_requirement(ireq):\n return bool(ireq.original_link)",
"def local(self):\r\n return self._url.scheme in ('', 'file')",
"def file_url(self, url):\n return self.is_regex_url(url, self.is_file_regex)",
"def getOriginalFile(url):\n # does url exist?\n if url is None or url is \"\":\n return",
"def direct_url(self) -> Optional[DirectUrl]:\n try:\n content = self.read_text(DIRECT_URL_METADATA_NAME)\n except FileNotFoundError:\n return None\n try:\n return DirectUrl.from_json(content)\n except (\n UnicodeDecodeError,\n json.JSONDecodeError,\n DirectUrlValidationError,\n ) as e:\n logger.warning(\n \"Error parsing %s for %s: %s\",\n DIRECT_URL_METADATA_NAME,\n self.canonical_name,\n e,\n )\n return None",
"def validate_url(self):\n pass",
"def get_url(self):\n try:\n return self._file.url\n except AttributeError:\n raise NotImplementedError(\"Underlying file does not have a URL.\")",
"def direct_url(self):\n #return '%s/getDownloadableFile' % self.absolute_url()\n return self.context.absolute_url()",
"def get_as_source(self):\n if self.as_source:\n return True\n if self.allow_source_from_get and self.request.GET.get('src', False):\n return True\n if self.allow_source_from_post and self.request.POST.get('src', False):\n return True\n return False",
"def get_datafile_url(self):\n try:\n return self.datafile.url\n except ValueError:\n if core.utils.is_absolute_url(self.source):\n if self.source.startswith('s3://'):\n return None # file is in the UPLOAD_BUCKET\n return self.source\n logger.error(\"File not found at '%s'\", self.datafile.name)\n return None",
"def url(self, path=None, type_of=\"csv\"):\n\n if \"https://\" in str(path) or \"http://\" in str(path) or \"file://\" in str(path):\n return self.data_loader(str(path), type_of)\n else:\n print(\"Unknown sample data identifier. Please choose an id from the list below\")",
"def url(self, path=None, type_of=\"csv\"):\n\n if \"https://\" in str(path) or \"http://\" in str(path) or \"file://\" in str(path):\n return self.data_loader(str(path), type_of)\n else:\n print(\"Unknown sample data identifier. Please choose an id from the list below\")",
"def url_type(verifield, required):\n return verifield is None or urlparse(verifield) is not None",
"def is_file(self):\n return self.tipo == 'file' or self.tipo is None",
"def is_valid_file(args):\n if args.file is not None:\n return True\n return False",
"def check_file_open(filename: str, err_string: str, required: bool = False) -> None:\n if required or filename is not None:\n if filename is None:\n print('\\n' + err_string + '\\n')\n sys.exit(1)\n else:\n try:\n pathlib.Path(filename).resolve(strict=True)\n except FileNotFoundError:\n print('\\n' + err_string + '\\n')\n sys.exit(1)",
"def _access_file(pagename, request):\n _ = request.getText\n\n error = None\n if not request.values.get('target'):\n error = _(\"Filename of attachment not specified!\")\n else:\n filename = wikiutil.taintfilename(request.values['target'])\n fpath = getFilename(request, pagename, filename)\n\n if os.path.isfile(fpath):\n return (pagename, filename, fpath)\n error = _(\"Attachment '%(filename)s' does not exist!\") % {'filename': filename}\n\n error_msg(pagename, request, error)\n return (pagename, None, None)",
"def _get_file_helper(self):\n page = self.course.moodle.fetch(\n self._download_url % self.id,\n None\n )\n # The resource URL should magically 303 across to the actual file\n if page.history and page.history[0].status_code == 303:\n return page, page.content\n\n # If it doesn't 303 to the actual file then there might be a download\n # link to try\n bs = bs4.BeautifulSoup(page.text, 'lxml')\n\n div = bs.find('div', class_='resourceworkaround')\n\n if div: # it's a link to the resource\n link = div.find('a').href\n\n page = self.course.moodle.fetch(\n link,\n None\n )\n return page, page.content\n\n # Perhaps it's an embedded object\n obj = bs.find('object', id='resourceobject')\n if obj:\n link = obj['data']\n\n page = self.course.moodle.fetch(\n link,\n None\n )\n return page, page.content\n\n raise ValueError(\"No idea how to get that resource\")",
"def get_redirect_url(self, *args, **kwargs):\n return self.document.file.url",
"def get_absolute_url(self):\n\n file_url = settings.MEDIA_URL + str(self.file_link.url)\n filelist_url = self.file_list.get_absolute_url() if self.file_list else \"\"\n contentmodel_url = super(File, self).get_absolute_url()\n\n # otherwise return the url for its list of files or its content model url\n return (file_url or filelist_url or contentmodel_url or \"\")",
"def _get_url(self, absolute):",
"def is_file(self):\n return self.type == \"file\"",
"def test_get_object_link_file(self):\n plugin = ProjectAppPluginPoint.get_plugin(PLUGIN_NAME)\n url = reverse(\n 'filesfolders:file_serve',\n kwargs={'file': self.file.sodar_uuid, 'file_name': self.file.name},\n )\n ret = plugin.get_object_link('File', self.file.sodar_uuid)\n self.assertEqual(ret['url'], url)\n self.assertEqual(ret['label'], self.file.name)\n self.assertEqual(ret['blank'], True)",
"def __isUrl(self, url):\n if type(url)==str:\n return url.startswith('http://') or url.startswith('https://')\n return False",
"def _check_source (fileurl, path_unzip, outfile) :\n if outfile is not None and os.path.splitext (outfile)[1].lower () == os.path.splitext (fileurl)[1].lower () :\n file = _check_url_file (fileurl, path_download = path_unzip, outfile = outfile)\n return file\n else :\n file = _check_url_file (fileurl, path_download = path_unzip, outfile = None)\n txt = _check_zip_file (file, path_unzip = path_unzip, outfile = outfile)\n if not os.path.exists (txt):\n message = \"hal_core._check_source: unable to find file \" + txt + \" source (\" + fileurl + \")\"\n raise PQHException (message)\n return txt",
"def get_url(self):\n return self.get_file(uri_type=URI_URL, no_copy=True)",
"def is_separate_file(self):\n return self.uri is not None and not self.has_data_uri",
"def filename(self) -> Optional[str]:\n ...",
"def _file_url(self, fid):\n base = self.tq.threatq_host + '/files/'\n return base + str(fid) + '/details'"
] | [
"0.60759676",
"0.5991341",
"0.5933003",
"0.58513236",
"0.58213466",
"0.58213407",
"0.5801431",
"0.5788176",
"0.5755739",
"0.57088137",
"0.5697783",
"0.5672512",
"0.5672512",
"0.56354654",
"0.5618201",
"0.5613041",
"0.5595372",
"0.55869484",
"0.5542433",
"0.5506076",
"0.54980326",
"0.5492456",
"0.54752046",
"0.546597",
"0.54581225",
"0.53922707",
"0.5384985",
"0.5382046",
"0.53786534",
"0.53733414"
] | 0.61261535 | 0 |
Basically Object.assign(GAN_PARAMS, params) See stackoverflow 38987 | def step6_set_gan_params(params):
global GAN_PARAMS
GAN_PARAMS = {**GAN_PARAMS, **params} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def gather_params(self):\n for layer in self.layers:\n for name, value in layer.params.iteritems():\n self.params[name] = value",
"def assign_params(sess, params, network):\n for idx, param in enumerate(params):\n assign_op = network.all_params[idx].assign(param)\n sess.run(assign_op)",
"def set_parameters(targeted_flag='true',\r\n tv_flag='false',\r\n hinge_flag='true',\r\n cos_flag='false',\r\n interpolation='bilinear',\r\n model_type='small',\r\n loss_type='center',\r\n dataset_type='vgg',\r\n attack='CW',\r\n norm='2',\r\n epsilon=0.1,\r\n iterations=100,\r\n binary_steps=8,\r\n learning_rate=0.01,\r\n epsilon_steps=0.01,\r\n init_const=0.3,\r\n mean_loss='embeddingmean',\r\n batch_size=-1,\r\n margin=5.0,\r\n amplification=2.0):\r\n params = {}\r\n\r\n params['model_type'] = model_type\r\n params['loss_type'] = loss_type\r\n params['dataset_type'] = dataset_type\r\n params['attack'] = attack\r\n params['norm'] = norm\r\n params['epsilon'] = epsilon\r\n params['iterations'] = iterations\r\n params['binary_steps'] = binary_steps\r\n params['learning_rate'] = learning_rate\r\n params['epsilon_steps'] = epsilon_steps\r\n params['init_const'] = init_const\r\n params['mean_loss'] = mean_loss\r\n params['batch_size'] = batch_size\r\n params['targeted_flag'] = string_to_bool(targeted_flag)\r\n params['tv_flag'] = string_to_bool(tv_flag)\r\n params['hinge_flag'] = string_to_bool(hinge_flag)\r\n params['cos_flag'] = string_to_bool(cos_flag)\r\n params['margin'] = margin\r\n params['amp'] = amplification\r\n\r\n if model_type == 'small' and loss_type == 'center':\r\n params['pixel_max'] = 1.0\r\n params['pixel_min'] = -1.0\r\n else:\r\n params['pixel_max'] = 1.0\r\n params['pixel_min'] = 0.0\r\n\r\n if (dataset_type == 'vggsmall'):\r\n params['align_dir'] = VGG_ALIGN_160_DIR\r\n params['test_dir'] = VGG_TEST_DIR\r\n elif model_type == 'large' or dataset_type == 'casia':\r\n params['align_dir'] = ALIGN_160_DIR\r\n elif model_type == 'small':\r\n params['align_dir'] = ALIGN_96_DIR\r\n else:\r\n ValueError('ValueError: Argument must be either \"small\" or \"large\".')\r\n \r\n if interpolation == 'nearest':\r\n params['interpolation'] = cv2.INTER_NEAREST\r\n elif interpolation == 'bilinear':\r\n params['interpolation'] = cv2.INTER_LINEAR\r\n elif interpolation == 'bicubic':\r\n params['interpolation'] = cv2.INTER_CUBIC\r\n elif interpolation == 'lanczos':\r\n params['interpolation'] = cv2.INTER_LANCZOS4\r\n elif interpolation == 'super':\r\n print('finish later')\r\n else:\r\n raise ValueError('ValueError: Argument must be of the following, [nearest, bilinear, bicubic, lanczos, super].')\r\n\r\n if params['hinge_flag']:\r\n params['attack_loss'] = 'hinge'\r\n else:\r\n params['attack_loss'] = 'target'\r\n if not params['targeted_flag']:\r\n params['attack_loss'] = 'target'\r\n if norm == 'inf':\r\n norm_name = 'i'\r\n else:\r\n norm_name = '2'\r\n if params['tv_flag']:\r\n tv_name = '_tv'\r\n else:\r\n tv_name = ''\r\n if params['cos_flag']:\r\n cos_name = '_cos'\r\n else:\r\n cos_name = ''\r\n\r\n params['model_name'] = '{}_{}'.format(model_type, loss_type)\r\n if dataset_type == 'casia' or dataset_type == 'vggsmall':\r\n params['model_name'] = dataset_type\r\n params['attack_name'] = '{}_l{}{}{}'.format(attack.lower(), norm_name, tv_name, cos_name)\r\n\r\n return params",
"def _shared_params(self):\n return BenchmarkBase._shared_params(self)._replace(\n model='inception3', batch_size=64, distortions=False)",
"def gen_params(self) -> Dict:\n param_dict: Dict = {}\n\n gX_name: List[str] = ['g_leak', 'g_nav', 'g_kvhh', 'g_kva', 'g_kvsi', \n 'g_cav', 'g_kca', 'g_nap', 'g_kir']\n gX_name: List[str] = list(itertools.compress(gX_name, list(self.channel_bool.values())[:9]))\n gX_log: np.ndarray = 4 * np.random.rand(len(gX_name)) - 2 # from -2 to 2\n gX: np.ndarray = (10 * np.ones(len(gX_name))) ** gX_log # 0.01 ~ 100\n gX_itr: Iterator = zip(gX_name, gX)\n\n gR_name: List[str] = ['g_ampar', 'g_nmdar', 'g_gabar']\n gR_name: List[str] = list(itertools.compress(gR_name, list(self.channel_bool.values())[9:12]))\n gR_log: np.ndarray = 4 * np.random.rand(len(gR_name)) - 3 # from -3 to 1\n gR: np.ndarray = (10 * np.ones(len(gR_name))) ** gR_log # 0.001 ~ 10\n gR_itr: Iterator = zip(gR_name, gR)\n\n param_dict.update(gX_itr)\n param_dict.update(gR_itr)\n\n if self.channel_bool['ca']:\n tCa_log: float = 2 * np.random.rand(1) + 1 # from 1 to 3\n tCa: float = 10 ** tCa_log # 10 ~ 1000\n tCa_dict: Dict = {'t_ca': tCa}\n param_dict.update(tCa_dict)\n\n return param_dict",
"def copy_params(self):\n tf.get_default_session().run(self.copy_ops)",
"def _shared_params(self):\n return BenchmarkBase._shared_params(self)._replace(\n model='alexnet', batch_size=512, distortions=False)",
"def load_parameters(self, params):\n # load (aka. deep copy) parameters in params into network\n c=0\n self.params = []\n names = ['W_i']\n for n,p in zip(names, params):\n self.params.append(theano.shared(name = p.name,\n value = p.get_value(borrow=True)))\n \n setattr(self, n, self.params[c])\n c+=1\n assert( len(self.params) == c )",
"def make_params(config):\n params = copy.deepcopy(config.view.params)\n params.t2bins = np.arange(0, params.t2bin_max + 1e-4, params.t2bin_stepsize)\n params.out = make_Bunch(\"State and output of detection processing\") # outputs are not parameters, maybe separate \n return params",
"def _assign_model_params(self, sess):\n with self.graph.as_default():\n for nn in range(self.num_networks):\n self.networks[nn].assign_model_params(sess)",
"def gen_params(self) -> Dict:\n param_dict: Dict = {}\n\n gX_name: List[str] = ['g_leak', 'g_kvhh', 'g_cav', 'g_kca', 'g_nap']\n gX_log: np.ndarray = 4 * np.random.rand(5) - 2 # from -2 to 2\n gX: np.ndarray = (10 * np.ones(5)) ** gX_log # 0.01 ~ 100\n gX_itr: Iterator = zip(gX_name, gX)\n\n tCa_log: float = 2 * np.random.rand(1) + 1 # from 1 to 3\n tCa: float = 10 ** tCa_log # 10 ~ 1000\n tCa_dict: Dict = {'t_ca': tCa}\n\n param_dict.update(gX_itr)\n param_dict.update(tCa_dict)\n return param_dict",
"def _shared_params(self):\n return BenchmarkBase._shared_params(self)._replace(\n num_gpus=1, model='resnet50', distortions=False, forward_only=True)",
"def _shared_params(self):\n return BenchmarkBase._shared_params(self)._replace(\n model='vgg16', batch_size=128, distortions=False)",
"def update_parameters_with_gd(parameters, grads, learning_rate):\n\n L = len(parameters) // 2 # number of layers in the neural networks\n\n # Update rule for each parameter\n for l in range(L):\n parameters[\"W\" + str(l+1)] = parameters[\"W\" + str(l+1)] - learning_rate * grads['dW' + str(l+1)]\n parameters[\"b\" + str(l+1)] = parameters[\"b\" + str(l+1)] - learning_rate * grads['db' + str(l+1)]\n \n return parameters",
"def gen_params(self) -> Dict:\n param_dict: Dict = {}\n\n gX_name: List[str] = ['g_leak', 'g_nav', 'g_kvhh', 'g_kva', 'g_kvsi', \n 'g_cav', 'g_kca', 'g_nap', 'g_kir']\n gX_log: np.ndarray = 4 * np.random.rand(9) - 2 # from -2 to 2\n gX: np.ndarray = (10 * np.ones(9)) ** gX_log # 0.01 ~ 100\n gX_itr: Iterator = zip(gX_name, gX)\n\n gR_name: List[str] = ['g_ampar', 'g_nmdar', 'g_gabar']\n gR_log: np.ndarray = 4 * np.random.rand(3) - 3 # from -3 to 1\n gR: np.ndarray = (10 * np.ones(3)) ** gR_log # 0.001 ~ 10\n gR_itr: Iterator = zip(gR_name, gR)\n\n tCa_log: float = 2 * np.random.rand(1) + 1 # from 1 to 3\n tCa: float = 10 ** tCa_log # 10 ~ 1000\n tCa_dict: Dict = {'t_ca': tCa}\n\n param_dict.update(gX_itr)\n param_dict.update(gR_itr)\n param_dict.update(tCa_dict)\n return param_dict",
"def update_parameters(parameters, grads, learning_rate):\n pass",
"def update_parameters_with_gd(parameters, grads, learning_rate):\n\n L = len(parameters) // 2 # number of layers in the neural networks\n\n # Update rule for each parameter\n for l in range(L):\n ### START CODE HERE ### (approx. 2 lines)\n parameters[\"W\" + str(l+1)] = parameters[\"W\" + str(l+1)]-learning_rate* grads[\"dW\" + str(l+1)]\n parameters[\"b\" + str(l+1)] = parameters[\"b\" + str(l+1)]-learning_rate* grads[\"db\" + str(l+1)]\n ### END CODE HERE ###\n \n return parameters",
"def _set_params(self, params, defaults):\n new_params = OrderedDict(\n zip(params, [x if isinstance(x, Parameter) else Parameter() for x in defaults])\n )\n for key, value in self._src.items():\n if key in new_params:\n new_params[key] = value\n\n self._src = new_params",
"def _shared_params(self):\n params = {}\n if _NUM_BATCHES.value is not None:\n params['num_batches'] = _NUM_BATCHES.value\n if self.output_dir is not None:\n params['benchmark_log_dir'] = self.output_dir\n return benchmark_cnn.make_params(**params)",
"def vanilla_gan_model(params):\n # Instantiate generator and discriminator objects.\n network_dict = instantiate_network_objects(params)\n\n # Instantiate generator optimizer.\n generator_optimizer = instantiate_optimizer(params, scope=\"generator\")\n\n # Instantiate discriminator optimizer.\n discriminator_optimizer = instantiate_optimizer(\n params, scope=\"discriminator\"\n )\n\n return (\n network_dict,\n {\n \"generator\": generator_optimizer,\n \"discriminator\": discriminator_optimizer\n }\n )",
"def initialize_adam(parameters) :\n\n L = len(parameters) // 2\n v = {}\n s = {}\n for l in range(L):\n v[\"dW\" + str(l + 1)] = np.zeros_like(parameters[\"W\" + str(l + 1)])\n v[\"db\" + str(l + 1)] = np.zeros_like(parameters[\"b\" + str(l + 1)])\n\n s[\"dW\" + str(l+1)] = np.zeros_like(parameters[\"W\" + str(l + 1)])\n s[\"db\" + str(l+1)] = np.zeros_like(parameters[\"b\" + str(l + 1)])\n\n return v, s",
"def update_parameters(params, grads, alpha):\n n_layers = len(params) // 2\n for i in range(n_layers):\n params['w%s' % (i+1)] = (\n params['w%s' % (i+1)] - alpha * grads['dw%s' % (i+1)])\n params['b%s' % (i+1)] = (\n params['b%s' % (i+1)] - alpha * grads['db%s' % (i+1)])\n return params",
"def set_params(self, params: Dict) -> None:\n self.leak.set_g(params[\"g_leak\"])\n self.kvhh.set_g(params[\"g_kvhh\"])\n self.cav.set_g(params[\"g_cav\"])\n self.kca.set_g(params[\"g_kca\"])\n self.nap.set_g(params[\"g_nap\"])\n self.tau_ca = params[\"t_ca\"]",
"def create_hparams(experiment):\n hparams = {}\n\n # General parameters.\n hparams['batch_size'] = 64\n hparams['eval_batch_size'] = 64\n hparams['learning_rate_warmup_steps'] = 2000\n hparams['learning_rate_constant'] = 1\n hparams['learning_rate'] = 0.001\n hparams['train_epoches'] = 200\n hparams['steps_per_epoch'] = 30\n hparams['train_steps'] = 1000 * 1000\n hparams['eval_steps'] = 100\n hparams['caption_optimizer'] = 't2t'\n hparams['clip_norm'] = 5.0\n hparams['train_files'] = ''\n hparams['eval_files'] = ''\n hparams['train_buffer_size'] = 2000\n hparams['eval_buffer_size'] = 500\n hparams['train_pixel_encoder'] = True\n hparams['debug'] = False\n hparams['distribution_strategy'] = 'mirrored'\n\n # Embedding parameters.\n hparams['embedding_file'] = ''\n hparams['word_vocab_path'] = ''\n hparams['glove_trainable'] = True\n hparams['vocab_size'] = 10000\n\n # View hierarchy encoder parameters.\n hparams['max_pixel_pos'] = 100\n hparams['max_dom_pos'] = 500\n hparams['screen_encoder'] = 'pixel_transformer'\n hparams['screen_embedding_feature'] = ['text', 'type', 'pos', 'click', 'dom']\n hparams['obj_text_aggregation'] = 'max'\n hparams['synthetic_screen_noise'] = 0.\n\n # General parameters.\n hparams['num_hidden_layers'] = 2\n hparams['hidden_size'] = 2\n hparams['filter_size'] = 2\n hparams['num_heads'] = 2\n hparams['dropout'] = 0.2\n hparams['layer_prepostprocess_dropout'] = 0.2\n hparams['attention_dropout'] = 0.2\n hparams['relu_dropout'] = 0.2\n\n transformer_hparams = model_params.BASE_PARAMS\n\n # Add parameters from transformer model.\n hparams.update(transformer_hparams)\n\n # Rewrite all the parameters from command-line flags.\n config = screen2words_experiment_config.experiments[experiment]\n hparams.update(config)\n\n return hparams",
"def init_params(options):\n params = OrderedDict()\n\n # event embedding, shape = (n_events, dim_proj)\n randn = np.random.randn(options['n_events'],\n options['dim_proj'])\n params['Eemb'] = (0.1 * randn).astype(config.floatX)\n\n # shape = dim_proj * dim_proj\n gru_Wz = ortho_weight(options['dim_proj'])\n params['gru_Wz'] = gru_Wz\n gru_Wh = ortho_weight(options['dim_proj'])\n params['gru_Wh'] = gru_Wh\n gru_Wr = ortho_weight(options['dim_proj'])\n params['gru_Wr'] = gru_Wr\n\n # shape = dim_proj * dim_proj\n gru_Uz = ortho_weight(options['dim_proj'])\n params['gru_Uz'] = gru_Uz\n gru_Uh = ortho_weight(options['dim_proj'])\n params['gru_Uh'] = gru_Uh\n gru_Ur = ortho_weight(options['dim_proj'])\n params['gru_Ur'] = gru_Ur\n\n # shape = dim_proj\n gru_bz = np.random.rand(options['dim_proj']).astype(config.floatX)-0.5\n params['gru_bz'] = gru_bz\n gru_bh = np.random.rand(options['dim_proj']).astype(config.floatX)-0.5\n params['gru_bh'] = gru_bh\n gru_br = np.random.rand(options['dim_proj']).astype(config.floatX)-0.5\n params['gru_br'] = gru_br\n\n # for attention\n attp_q = init_params_weight(options['dim_proj'], options['dim_att'])\n params['attp_q'] = attp_q\n attp_b = np.random.rand(options['dim_att'], ).astype(config.floatX) - 0.5\n params['attp_b'] = attp_b\n attp_eta = np.random.rand(options['dim_att'], ).astype(config.floatX) - 0.5\n params['attp_eta'] = attp_eta\n\n atts_q = init_params_weight(options['dim_proj'], options['dim_att'])\n params['atts_q'] = atts_q\n atts_b = np.random.rand(options['dim_att'], ).astype(config.floatX) - 0.5\n params['atts_b'] = atts_b\n atts_eta = np.random.rand(options['dim_att'], ).astype(config.floatX) - 0.5\n params['atts_eta'] = atts_eta\n\n atti_q = init_params_weight(options['dim_proj'], options['dim_att'])\n params['atti_q'] = atti_q\n atti_b = np.random.rand(options['dim_att'], ).astype(config.floatX) - 0.5\n params['atti_b'] = atti_b\n atti_eta = np.random.rand(options['dim_att'], ).astype(config.floatX) - 0.5\n params['atti_eta'] = atti_eta\n\n atta_q = init_params_weight(options['dim_proj'], options['dim_att'])\n params['atta_q'] = atta_q\n atta_b = np.random.rand(options['dim_att'], ).astype(config.floatX) - 0.5\n params['atta_b'] = atta_b\n atta_eta = np.random.rand(options['dim_att'], ).astype(config.floatX) - 0.5\n params['atta_eta'] = atta_eta\n\n # decoding matrix for external influences\n W_ext = init_params_weight(options['dim_proj'],\n options['n_events'])\n params['W_ext'] = W_ext\n dec_b = np.random.rand(options['n_events']).astype(config.floatX)-0.5\n params['b_ext'] = dec_b.astype(config.floatX)\n\n return params",
"def init(self):\n self.reparam_layers = []\n if self.model_type == \"GCN\":\n for i in range(self.num_layers):\n if self.reparam_all_layers is True:\n is_reparam = True\n elif isinstance(self.reparam_all_layers, tuple):\n reparam_all_layers = tuple([kk + self.num_layers if kk < 0 else kk for kk in self.reparam_all_layers])\n is_reparam = i in reparam_all_layers\n else:\n raise\n if is_reparam:\n self.reparam_layers.append(i)\n setattr(self, \"conv{}\".format(i + 1),\n GCNConv(self.num_features if i == 0 else self.latent_size,\n self.latent_size if i != self.num_layers - 1 else self.num_classes,\n cached=True,\n reparam_mode=self.reparam_mode if is_reparam else None,\n prior_mode=self.prior_mode if is_reparam else None,\n sample_size=self.sample_size,\n bias=True if self.with_relu else False,\n val_use_mean=self.val_use_mean,\n normalize=self.normalize,\n ))\n # self.conv1 = ChebConv(self.num_features, 16, K=2)\n # self.conv2 = ChebConv(16, self.num_features, K=2)\n\n elif self.model_type == \"GAT\":\n latent_size = int(self.latent_size / 2) # Under the default setting, latent_size = 8\n for i in range(self.num_layers):\n if i == 0:\n input_size = self.num_features\n else:\n if self.struct_dropout_mode[0] == 'DNsampling' or (self.struct_dropout_mode[0] == 'standard' and len(self.struct_dropout_mode) == 3):\n input_size = latent_size * 8 * 2\n else:\n input_size = latent_size * 8\n if self.reparam_all_layers is True:\n is_reparam = True\n elif isinstance(self.reparam_all_layers, tuple):\n reparam_all_layers = tuple([kk + self.num_layers if kk < 0 else kk for kk in self.reparam_all_layers])\n is_reparam = i in reparam_all_layers\n else:\n raise\n if is_reparam:\n self.reparam_layers.append(i)\n setattr(self, \"conv{}\".format(i + 1), GATConv(\n input_size,\n latent_size if i != self.num_layers - 1 else self.num_classes,\n heads=8 if i != self.num_layers - 1 else 1, concat=True,\n reparam_mode=self.reparam_mode if is_reparam else None,\n prior_mode=self.prior_mode if is_reparam else None,\n val_use_mean=self.val_use_mean,\n struct_dropout_mode=self.struct_dropout_mode,\n sample_size=self.sample_size,\n ))\n if self.struct_dropout_mode[0] == 'DNsampling' or (self.struct_dropout_mode[0] == 'standard' and len(self.struct_dropout_mode) == 3):\n setattr(self, \"conv{}_1\".format(i + 1), GATConv(\n input_size,\n latent_size if i != self.num_layers - 1 else self.num_classes,\n heads=8 if i != self.num_layers - 1 else 1, concat=True,\n reparam_mode=self.reparam_mode if is_reparam else None,\n prior_mode=self.prior_mode if is_reparam else None,\n val_use_mean=self.val_use_mean,\n struct_dropout_mode=self.struct_dropout_mode,\n sample_size=self.sample_size,\n ))\n # On the Pubmed dataset, use heads=8 in conv2.\n \n else:\n raise Exception(\"Model_type {} is not valid!\".format(self.model_type))\n\n self.reparam_layers = sorted(self.reparam_layers)\n \n if self.model_type == \"GCN\":\n if self.with_relu:\n reg_params = [getattr(self, \"conv{}\".format(i+1)).parameters() for i in range(self.num_layers - 1)]\n self.reg_params = itertools.chain(*reg_params)\n self.non_reg_params = getattr(self, \"conv{}\".format(self.num_layers)).parameters()\n else:\n self.reg_params = OrderedDict()\n self.non_reg_params = self.parameters()\n else:\n self.reg_params = self.parameters()\n self.non_reg_params = OrderedDict()\n self.to(self.device)",
"def initialize(self):\n\n\t\tparameters = {}\n\t\tL = len(self.layer_dims) # number of layers in the network\n\n\t\tfor l in range(1, L):\n\t\t\tparameters['W' + str(l)] = np.random.randn(self.layer_dims[l], self.layer_dims[l-1]) * 0.01\n\t\t\tparameters['b' + str(l)] = np.zeros((self.layer_dims[l], 1))\n\n\t\t\tassert(parameters['W' + str(l)].shape == (self.layer_dims[l], self.layer_dims[l-1]))\n\t\t\tassert(parameters['b' + str(l)].shape == (self.layer_dims[l], 1))\n\n\t\treturn parameters",
"def merged_parameters(self, parameters):\n result = self.__params.copy()\n for k, v in parameters.iteritems():\n result[k] = v\n return result",
"def _shared_params(self):\n return BenchmarkBase._shared_params(self)._replace(\n model='resnet50', batch_size=128, distortions=False,\n optimizer='momentum')",
"def __init__(self, input_size, hidden_size, output_size, std=1e-4):\n self.params = {}\n self.params['W1'] = std * np.random.randn(input_size, hidden_size)\n self.params['b1'] = np.zeros(hidden_size)\n self.params['W2'] = std * np.random.randn(hidden_size, output_size)\n self.params['b2'] = np.zeros(output_size)"
] | [
"0.6051539",
"0.60158926",
"0.58936363",
"0.5835516",
"0.582533",
"0.57827276",
"0.5763045",
"0.57321995",
"0.5730178",
"0.5729452",
"0.56903327",
"0.5668459",
"0.56559336",
"0.56548595",
"0.56519294",
"0.55942523",
"0.5588376",
"0.55822974",
"0.55491924",
"0.55464953",
"0.5525608",
"0.552214",
"0.5514944",
"0.54950523",
"0.54917157",
"0.54756707",
"0.5466441",
"0.5458843",
"0.5452666",
"0.5443564"
] | 0.74238956 | 0 |
Cut a map into many chunks based on the chunk_size variable (note_group_size). | def cut_map_chunks(c):
r = []
for i in range(0, (c.shape[0] - chunk_size) // step_size):
chunk = c[i * step_size:i * step_size + chunk_size]
r.append(chunk)
return tf.stack(r) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def chunks(self, list_to_chunk, size):\n for i in range(0, len(list_to_chunk), size):\n yield list_to_chunk[i:i + size]",
"def split_chunk(list, chunk_size):\n for i in range(0, len(list), chunk_size):\n yield list[i:i + chunk_size]",
"def _split_on_chunks(self, iterable, size):\n return utils.split_on_chunks(iterable, size)",
"def get_chunks(vals, size):\n for i in range(0, len(vals), size):\n yield vals[i:i + size]",
"def get_chunks(size):\n chunk_start = 0\n chunk_size = 0x20000\n\n while chunk_start + chunk_size < size:\n yield (chunk_start, chunk_size)\n chunk_start += chunk_size\n if chunk_size < 0x100000:\n chunk_size += 0x20000\n\n if chunk_start < size:\n yield (chunk_start, size - chunk_start)",
"def chunks(items, size):\n return [items[i:i+size] for i in range(0, len(items), size)]",
"def chunk(items, chunk_size):\n start_index = 0\n for start_index in xrange(0, len(items), chunk_size):\n end_index = min(start_index+chunk_size, len(items))\n yield items[start_index:end_index]",
"def split_to_chunks(of_list, chunk_size):\n assert of_list is not None\n\n for i in range(0, len(of_list), chunk_size):\n yield of_list[i:i + chunk_size]",
"def split_chunks(\n key: core.ChunkKey,\n dataset: xarray.Dataset,\n target_chunks: Mapping[str, int],\n) -> Iterator[Tuple[core.ChunkKey, xarray.Dataset]]:\n # This function splits consolidated arrays into blocks of new sizes, e.g.,\n # ⌈x_00 x_01 ...⌉ ⌈⌈x_00⌉ ⌈x_01⌉ ...⌉\n # X = |x_10 x_11 ...| = ||x_10| |x_11| ...|\n # |x_20 x_21 ...| |⌊x_20⌋ ⌊x_21⌋ ...|\n # ⌊ ... ... ...⌋ ⌊ ... ... ...⌋\n # and emits them as (ChunkKey, xarray.Dataset) pairs.\n all_bounds = []\n for dim, chunk_size in target_chunks.items():\n start = key.get(dim, 0)\n stop = start + dataset.sizes[dim]\n all_bounds.append(_split_chunk_bounds(start, stop, chunk_size))\n\n for bounds in itertools.product(*all_bounds):\n offsets = dict(key)\n slices = {}\n for dim, (start, stop) in zip(target_chunks, bounds):\n base = key.get(dim, 0)\n offsets[dim] = start\n slices[dim] = slice(start - base, stop - base)\n\n new_key = core.ChunkKey(offsets)\n new_chunk = dataset.isel(slices)\n yield new_key, new_chunk",
"def chunks(iterator, size):\n for index in range(0, len(iterator), size):\n yield iterator[index:index + size]",
"def perform_chunking(self, data_size, chunk_size):\r\n\r\n chunks, i = [], 0\r\n while True:\r\n chunks.append((i * (chunk_size - self.overlap / 2), i * (chunk_size - self.overlap / 2) + chunk_size))\r\n i += 1\r\n if chunks[-1][1] > data_size:\r\n break\r\n\r\n n_count = len(chunks)\r\n chunks[-1] = tuple(x - (n_count * chunk_size - data_size - (n_count - 1) * self.overlap / 2) for x in chunks[-1])\r\n chunks = [(int(x), int(y)) for x, y in chunks]\r\n return chunks",
"def chunks(items, chunk_size):\r\n items = list(items)\r\n return (items[i:i + chunk_size] for i in xrange(0, len(items), chunk_size))",
"def build_map(chunk_start, result, total_chunks, start_id, end_id):\n size = len(chunk_start)\n for i in prange(size):\n beg = chunk_start[i]\n end = chunk_start[i + 1] if i < size - 1 else total_chunks\n if start_id < end and beg < end_id: # [beg, end) intersect [start_id, end_id)\n result[max(beg - start_id, 0) : (end - start_id), 0] = beg\n result[max(beg - start_id, 0) : (end - start_id), 1] = end",
"def get_chunks(sequence, window_size, step=1):\n k = len(sequence)\n for i in range(0, k - window_size + 1, step):\n end = i + window_size\n chunk = sequence[i:i + window_size]\n assert len(chunk) == window_size\n yield chunk, end",
"def in_memory_rechunk(\n inputs: List[Tuple[core.ChunkKey, xarray.Dataset]],\n target_chunks: Mapping[str, int],\n) -> Iterator[Tuple[core.ChunkKey, xarray.Dataset]]:\n key, dataset = consolidate_chunks(inputs)\n yield from split_chunks(key, dataset, target_chunks)",
"def chunks(seq, size):\n for i in range(0, len(seq), size):\n yield seq[i:i + size]",
"def get_chunks(indivs, k):\r\n\tpair_chunk_collection=[]\r\n\tfor i in xrange(0, len(indivs[0])-k+1, k):\r\n\t\tchunks=[]\r\n\t\tfor x in indivs:\r\n\t\t\tchunks.append(x[i:i+k])\r\n\t\tpartial_phase_pairs=tune_em(chunks, 5)[1]\r\n\t\tprint partial_phase_pairs\r\n\t\tpair_chunk_collection.append(partial_phase_pairs)\r\n\treturn pair_chunk_collection",
"def get_chunks(sequence, chunk_size):\n seq_length = len(sequence)\n seq_list = []\n treshold = int(seq_length) // int(chunk_size)\n if treshold <4:\n raise ValueError(\"Change chunk size\")\n for i in range(treshold):\n seq = sequence[i*chunk_size:(i+1)*chunk_size]\n seq_list.append(seq)\n return seq_list",
"def chunk(seq, size, groupByList=True):\n func = tuple\n if groupByList:\n func = list\n return [func(seq[i:i + size]) for i in range(0, len(seq), size)]",
"def _shrink_large_groups_if_needed(testcase_map):\n\n def _key_func(testcase):\n weight = 0\n if not testcase.one_time_crasher_flag:\n weight |= 2**1\n if testcase.issue_id:\n weight |= 2**2\n return weight\n\n group_id_with_testcases_map = {}\n for testcase in six.itervalues(testcase_map):\n if not testcase.group_id:\n continue\n\n if testcase.group_id not in group_id_with_testcases_map:\n group_id_with_testcases_map[testcase.group_id] = [testcase]\n else:\n group_id_with_testcases_map[testcase.group_id].append(testcase)\n\n for group_id in group_id_with_testcases_map:\n testcases_in_group = group_id_with_testcases_map[group_id]\n if len(testcases_in_group) <= GROUP_MAX_TESTCASE_LIMIT:\n continue\n\n testcases_in_group = sorted(testcases_in_group, key=_key_func)\n for testcase in testcases_in_group[:-GROUP_MAX_TESTCASE_LIMIT]:\n try:\n testcase_entity = data_handler.get_testcase_by_id(testcase.id)\n except errors.InvalidTestcaseError:\n # Already deleted.\n continue\n\n if testcase_entity.bug_information:\n continue\n\n logs.log_warn(('Deleting testcase {testcase_id} due to overflowing group '\n '{group_id}.').format(\n testcase_id=testcase.id, group_id=testcase.group_id))\n testcase_entity.key.delete()",
"def chunk(size, seq):\n if not isinstance(size, int) or size <= 0: # pragma: no cover\n raise ValueError(\"size must be an integer greater than zero\")\n\n group = []\n\n for item in seq:\n if len(group) >= size:\n yield group\n group = []\n group.append(item)\n\n if group:\n yield group",
"def chunks(sequence, chunk_size):\r\n\r\n # YOUR CODE HERE\r",
"def chunkify(list,size):\n for i in range (0, len(list), size):\n yield list[i:i+size]",
"def _chunker(self, seq, size):\n return (seq.iloc[pos:pos + size] for pos in range(0, len(seq), size))",
"def _get_chunks(l, n = 10):\n \n for i in range(0, len(l), n): yield l[i: i + n]",
"def _chunker(self, seq, size):\n return (seq[pos:pos + size] for pos in range(0, len(seq), size))",
"def chunks(lst, size):\n for i in range(0, len(lst), size):\n yield lst[i:i + size]",
"def split_chunk(chunk, sizes, max_iter=1000, rng=None):\n assert len(chunk) == sum(sizes), f\"{len(chunk)} != {sum(sizes)}\"\n if not isinstance(rng, random.Random):\n rng = random\n # Precompute neighbors for each cube in the chunk\n neighbors = dict()\n for c in chunk:\n neighbors[c] = set(c.neighbors()) & set(chunk)\n for i in range(max_iter):\n result = split_chunk_iter(chunk, sizes, neighbors, rng)\n if result != None:\n return result\n raise SplitChunkMaxIterationExceeded(\"Ran out of iterations trying to split chunk\")",
"def chunk(list, chunksize):\n for i in range(0, len(list), chunksize):\n yield list[i:i + chunksize]",
"def chunks(cipher, size):\n\treturn [cipher[i*size:(i+1)*size] for i in range(int(math.ceil(len(cipher)*1.0/size)))]"
] | [
"0.6285758",
"0.6224028",
"0.61347896",
"0.6081203",
"0.60160613",
"0.6003113",
"0.5989309",
"0.597895",
"0.596816",
"0.59547454",
"0.5915846",
"0.5897652",
"0.58658063",
"0.5865107",
"0.58630854",
"0.5845289",
"0.584348",
"0.5822176",
"0.58202684",
"0.58082134",
"0.5794542",
"0.5762553",
"0.5761553",
"0.5750741",
"0.57308227",
"0.5718923",
"0.5703445",
"0.56952083",
"0.5680681",
"0.5676635"
] | 0.71322733 | 0 |
The biggest function here. It takes a tensor with random number as input, with extra variables in extvar (for extvar see the KerasCustomMappingLayer class) var_tensor shape is (batch_size(None), 4 note_group_size) the first dimension is "None", or "?" if you print the shape. It is filled with batch_size in training time. output shape is (batch_size(None), note_group_size, 6) where each last dimension is (x_start, y_start, x_vector, y_vector, x_end, y_end), all mapped to [1,1] range the vector in the middle is supposed to be the direction of cursor after hitting the note The reason this function is this big is that TensorFlow rewrites functions used in the training loop, which includes this one as a "mapping layer". It was amazingly done, but still I have run into troubles with the rewriting many times. That was the reason I didn't dare to reduce it into smaller functions. You might notice I didn't use np calls in this function at all. Yes, it will cause problems. Everything needs to be converted to tf calls instead. Take it in mind if you're editing it. | def construct_map_with_sliders(var_tensor, extvar=[]):
var_tensor = tf.cast(var_tensor, tf.float32)
var_shape = var_tensor.shape
wall_l = 0.15
wall_r = 0.85
x_max = 512
y_max = 384
out = []
cp = tf.constant([256, 192, 0, 0])
phase = 0
# Should be equal to note_group_size
half_tensor = var_shape[1]//4
# length multiplier
if "length_multiplier" in extvar:
length_multiplier = extvar["length_multiplier"]
else:
length_multiplier = 1
# notedists
if "begin" in extvar:
begin_offset = extvar["begin"]
else:
begin_offset = 0
# note_distances_now = length_multiplier * np.expand_dims(note_distances[begin_offset:begin_offset+half_tensor], axis=0);
# note_angles_now = np.expand_dims(note_angles[begin_offset:begin_offset+half_tensor], axis=0);
# Load external arrays as tensors
relevant_tensors = extvar["relevant_tensors"]
relevant_is_slider = relevant_tensors["is_slider"]
relevant_slider_lengths = relevant_tensors["slider_lengths"]
relevant_slider_types = relevant_tensors["slider_types"]
relevant_slider_cos = relevant_tensors["slider_cos_each"]
relevant_slider_sin = relevant_tensors["slider_sin_each"]
relevant_note_distances = relevant_tensors["note_distances"]
note_distances_now = length_multiplier * \
tf.expand_dims(relevant_note_distances, axis=0)
# init
l = tf.convert_to_tensor(note_distances_now, dtype="float32")
sl = l * 0.7
cos_list = var_tensor[:, 0:half_tensor * 2]
sin_list = var_tensor[:, half_tensor * 2:]
len_list = tf.sqrt(tf.square(cos_list) + tf.square(sin_list))
cos_list = cos_list / len_list
sin_list = sin_list / len_list
wall_l = 0.05 * x_max + l * 0.5
wall_r = 0.95 * x_max - l * 0.5
wall_t = 0.05 * y_max + l * 0.5
wall_b = 0.95 * y_max - l * 0.5
# rerand = tf.cast(tf.greater(l, y_max / 2), tf.float32);
# not_rerand = tf.cast(tf.less_equal(l, y_max / 2), tf.float32);
tick_diff = extvar["tick_diff"]
# max_ticks_for_ds is an int variable, converted to float to avoid potential type error
use_ds = tf.expand_dims(tf.cast(tf.less_equal(
tick_diff, extvar["max_ticks_for_ds"]), tf.float32), axis=0)
# rerand = not use distance snap
rerand = 1 - use_ds
not_rerand = use_ds
next_from_slider_end = extvar["next_from_slider_end"]
# Starting position
if "start_pos" in extvar:
_pre_px = extvar["start_pos"][0]
_pre_py = extvar["start_pos"][1]
_px = tf.cast(_pre_px, tf.float32)
_py = tf.cast(_pre_py, tf.float32)
else:
_px = tf.cast(256, tf.float32)
_py = tf.cast(192, tf.float32)
# this is not important since the first position starts at _ppos + Δpos
_x = tf.cast(256, tf.float32)
_y = tf.cast(192, tf.float32)
# Use a buffer to save output
outputs = tf.TensorArray(tf.float32, half_tensor)
for k in range(half_tensor):
# r_max = 192, r = 192 * k, theta = k * 10
rerand_x = 256 + 256 * var_tensor[:, k]
rerand_y = 192 + 192 * var_tensor[:, k + half_tensor*2]
# Distance snap start
# If the starting point is close to the wall, use abs() to make sure it doesn't go outside the boundaries
delta_value_x = l[:, k] * cos_list[:, k]
delta_value_y = l[:, k] * sin_list[:, k]
# It is tensor calculation batched 8~32 each call, so if/else do not work here.
wall_value_l = tf.cast(tf.less(_px, wall_l[:, k]), tf.float32)
wall_value_r = tf.cast(tf.greater(_px, wall_r[:, k]), tf.float32)
wall_value_xmid = tf.cast(tf.greater(
_px, wall_l[:, k]), tf.float32) * tf.cast(tf.less(_px, wall_r[:, k]), tf.float32)
wall_value_t = tf.cast(tf.less(_py, wall_t[:, k]), tf.float32)
wall_value_b = tf.cast(tf.greater(_py, wall_b[:, k]), tf.float32)
wall_value_ymid = tf.cast(tf.greater(
_py, wall_t[:, k]), tf.float32) * tf.cast(tf.less(_py, wall_b[:, k]), tf.float32)
x_delta = tf.abs(delta_value_x) * wall_value_l - tf.abs(delta_value_x) * \
wall_value_r + delta_value_x * wall_value_xmid
y_delta = tf.abs(delta_value_y) * wall_value_t - tf.abs(delta_value_y) * \
wall_value_b + delta_value_y * wall_value_ymid
# rerand_* if not using distance snap, (_p* + *_delta) if using distance snap
_x = rerand[:, k] * rerand_x + not_rerand[:, k] * (_px + x_delta)
_y = rerand[:, k] * rerand_y + not_rerand[:, k] * (_py + y_delta)
# _x = rerand_x;
# _y = rerand_y;
# _x = _px + x_delta;
# _y = _py + y_delta;
# Distance snap end
# calculate output vector
# slider part
sln = relevant_slider_lengths[k]
slider_type = relevant_slider_types[k]
scos = relevant_slider_cos[k]
ssin = relevant_slider_sin[k]
_a = cos_list[:, k + half_tensor]
_b = sin_list[:, k + half_tensor]
# cos(a+θ) = cosa cosθ - sina sinθ
# sin(a+θ) = cosa sinθ + sina cosθ
_oa = _a * scos - _b * ssin
_ob = _a * ssin + _b * scos
cp_slider = tf.transpose(tf.stack(
[_x / x_max, _y / y_max, _oa, _ob, (_x + _a * sln) / x_max, (_y + _b * sln) / y_max]))
_px_slider = tf.cond(next_from_slider_end,
lambda: _x + _a * sln, lambda: _x)
_py_slider = tf.cond(next_from_slider_end,
lambda: _y + _b * sln, lambda: _y)
# circle part
_a = rerand[:, k] * cos_list[:, k + half_tensor] + \
not_rerand[:, k] * cos_list[:, k]
_b = rerand[:, k] * sin_list[:, k + half_tensor] + \
not_rerand[:, k] * sin_list[:, k]
# _a = cos_list[:, k + half_tensor];
# _b = sin_list[:, k + half_tensor];
cp_circle = tf.transpose(
tf.stack([_x / x_max, _y / y_max, _a, _b, _x / x_max, _y / y_max]))
_px_circle = _x
_py_circle = _y
# Outputs are scaled to [0,1] region
outputs = outputs.write(k, tf.where(
relevant_is_slider[k], cp_slider, cp_circle))
# Set starting point for the next circle/slider
_px = tf.where(
tf.cast(relevant_is_slider[k], tf.bool), _px_slider, _px_circle)
_py = tf.where(
tf.cast(relevant_is_slider[k], tf.bool), _py_slider, _py_circle)
return tf.transpose(outputs.stack(), [1, 0, 2]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def generator(real_init): #-1, 5, 3\n with tf.variable_scope('rnn_gen', reuse = tf.AUTO_REUSE, initializer=tf.contrib.layers.xavier_initializer()) as scope:\n x_init = real_init[:,:,0] #-1, 8\n y_init = real_init[:,:,1] #-1, 8\n z_init = real_init[:,:,2] #-1, 8\n \n with tf.variable_scope('x_rw', reuse = tf.AUTO_REUSE, initializer=tf.contrib.layers.xavier_initializer()) as scope:\n x_w1 = tf.get_variable('x_w1', [6, 3])\n x_b1 = tf.get_variable('x_b1', [3])\n x_w2 = tf.get_variable('x_w2', [9, 3])\n x_b2 = tf.get_variable('x_b2', [3])\n x_w3 = tf.get_variable('x_w3', [3, 1])\n x_b3 = tf.get_variable('x_b3', [1])\n x_w4 = tf.get_variable('x_w4', [3, 1])\n x_b4 = tf.get_variable('x_b4', [1])\n \n x_full = x_init#-1, 8\n for i in range(104):\n x_layer1 = tf.nn.elu(tf.matmul(x_init, x_w1) + x_b1)#-1, 4\n x_layer2 = tf.concat([x_init, x_layer1], 1)#-1, 12\n x_layer3 = tf.nn.elu(tf.matmul(x_layer2, x_w2) + x_b2)#-1, 4\n \n x_mean = tf.sigmoid(tf.matmul(x_layer3, x_w3) + x_b3)#-1, 1\n x_gamma = tf.sigmoid(tf.matmul(x_layer3, x_w4) + x_b4)#-1, 1\n x_delta = x_mean + x_gamma * tf.random_normal(tf.shape(x_gamma), dtype = tf.float32)#-1, 1\n x_delta_denorm = (x_delta * 2 - 1) / 80\n x_output = tf.expand_dims(x_full[:,-1], 1) + x_delta_denorm\n x_full = tf.concat([x_full, x_output], axis = 1)#-1, 10 + 1\n x_init = x_full[:,-6:]\n \n with tf.variable_scope('y_rw', reuse = tf.AUTO_REUSE, initializer=tf.contrib.layers.xavier_initializer()) as scope:\n y_w1 = tf.get_variable('y_w1', [6, 3])\n y_b1 = tf.get_variable('y_b1', [3])\n y_w2 = tf.get_variable('y_w2', [9, 3])\n y_b2 = tf.get_variable('y_b2', [3])\n y_w3 = tf.get_variable('y_w3', [3, 1])\n y_b3 = tf.get_variable('y_b3', [1])\n y_w4 = tf.get_variable('y_w4', [3, 1])\n y_b4 = tf.get_variable('y_b4', [1])\n \n y_full = y_init#-1, 8\n for i in range(104):\n y_layer1 = tf.nn.elu(tf.matmul(y_init, y_w1) + y_b1)\n y_layer2 = tf.concat([y_init, y_layer1], 1)#-1, 12\n y_layer3 = tf.nn.elu(tf.matmul(y_layer2, y_w2) + y_b2)#-1, 4\n \n y_mean = tf.sigmoid(tf.matmul(y_layer3, y_w3) + y_b3) #-1, 1\n y_gamma = tf.sigmoid(tf.matmul(y_layer3, y_w4) + y_b4) #-1, 1\n y_delta = y_mean + y_gamma * tf.random_normal(tf.shape(y_gamma), dtype = tf.float32)#-1, 1\n y_delta_denorm = (y_delta * 5 - 2.7) / 1700\n y_output = tf.expand_dims(y_full[:,-1], 1) + y_delta_denorm\n y_full = tf.concat([y_full, y_output], axis = 1)#-1, 10 + 1, 1\n y_init = y_full[:,-6:]\n \n with tf.variable_scope('z_rw', reuse = tf.AUTO_REUSE, initializer=tf.contrib.layers.xavier_initializer()) as scope:\n z_w1 = tf.get_variable('z_w1', [6, 3])\n z_b1 = tf.get_variable('z_b1', [3])\n z_w2 = tf.get_variable('z_w2', [9, 3])\n z_b2 = tf.get_variable('z_b2', [3])\n z_w3 = tf.get_variable('z_w3', [3, 1])\n z_b3 = tf.get_variable('z_b3', [1])\n z_w4 = tf.get_variable('z_w4', [3, 1])\n z_b4 = tf.get_variable('z_b4', [1])\n \n z_full = z_init#-1, 8\n for i in range(104):\n z_layer1 = tf.nn.elu(tf.matmul(z_init, z_w1) + z_b1)\n z_layer2 = tf.concat([z_init, z_layer1], 1)#-1, 12\n z_layer3 = tf.nn.elu(tf.matmul(z_layer2, z_w2) + z_b2)#-1, 4\n \n z_mean = tf.sigmoid(tf.matmul(z_layer3, z_w3) + z_b3) #-1, 1\n z_gamma = tf.sigmoid(tf.matmul(z_layer3, z_w4) + z_b4) #-1, 1\n z_delta = z_mean + z_gamma * tf.random_normal(tf.shape(z_gamma), dtype = tf.float32)#-1, 1\n z_delta_denorm = (z_delta * 3 - 2.2) / 300\n z_output = tf.expand_dims(z_full[:,-1], 1) + z_delta_denorm\n z_full = tf.concat([z_full, z_output], axis = 1)#-1, 10 + 1, 1\n z_init = z_full[:,-6:]\n \n rand_layer1 = tf.concat([tf.expand_dims(x_full, 2), tf.expand_dims(y_full, 2), tf.expand_dims(z_full, 2)], axis = 2)#-1, 200, 3\n rand_layer1_reshape = tf.reshape(rand_layer1, shape = (-1, 110, 3, 1))\n output_filt = tf.ones(shape = (11, 1, 1, 1)) / 11\n \n rand_layer2 = tf.nn.conv2d(rand_layer1_reshape, output_filt, strides = [1,1,1,1], padding = 'VALID')\n \n return tf.reshape(rand_layer2, shape = (-1, 100, 3)) #-1, 110, 3",
"def build_rnn(x, h, output_size, scope, n_layers, size, gru_size, activation=tf.tanh, output_activation=None, regularizer=None):\n #====================================================================================#\n # ----------PROBLEM 2----------\n #====================================================================================#\n # YOUR CODE HERE\n # raise NotImplementedError\n with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):\n ## step 1 - inputs are first embedded by an MLP\n x = build_mlp(x, output_size, scope, n_layers, size, activation=activation, output_activation=output_activation, regularizer=regularizer)\n # print(\"after build_mlp x = \", x)\n ## step 2 - passed to a GRU cell\n # as \"sy_hidden = tf.placeholder(shape=[None, self.gru_size], name=\"hidden\", dtype=tf.float32)\" -> h, so we can only pass for 1 single GRUCell, if multiple CRUCell, hidden state will be agumented.\n # as x is the size of (?, output_size) in output of build_mlp, the rnn must be equal with size\n x, h = tf.nn.dynamic_rnn(tf.nn.rnn_cell.GRUCell(output_size, activation=activation), x, initial_state=h)\n # after build_mlp x = Tensor(\"continuous_logits/continuous_logits/fc1/BiasAdd:0\", shape=(?, 1, 32), dtype=float32)\n # x = tf.squeeze(x, axis=1)\n x = tf.reshape(x, (-1, x.get_shape()[1]*x.get_shape()[2]))\n # print(\"dynamic_rnn build_mlp x = \", x, \" h = \", h)\n return x, h",
"def define_nmt(hidden_size, batch_size, en_timesteps, en_vsize, fr_timesteps, fr_vsize):\n\n logger.debug(\"Defining Inputs\")\n # Define an input sequence and process it.\n if batch_size:\n encoder_inputs = tf.keras.layers.Input(batch_shape=(batch_size, en_timesteps, en_vsize), name='encoder_inputs')\n decoder_inputs = tf.keras.layers.Input(batch_shape=(batch_size, fr_timesteps - 1, fr_vsize), name='decoder_inputs')\n else:\n encoder_inputs = tf.keras.layers.Input(shape=(en_timesteps, en_vsize), name='encoder_inputs')\n if fr_timesteps:\n decoder_inputs = tf.keras.layers.Input(shape=(fr_timesteps - 1, fr_vsize), name='decoder_inputs')\n else:\n decoder_inputs = tf.keras.layers.Input(shape=(None, fr_vsize), name='decoder_inputs')\n\n logger.debug(\"Defining the sequential models\")\n\n # Encoder GRU\n encoder_gru = tf.keras.layers.GRU(hidden_size, return_sequences=True, return_state=True, name='encoder_gru')\n encoder_out, encoder_state = encoder_gru(encoder_inputs)\n\n # Set up the decoder GRU, using `encoder_states` as initial state.\n decoder_gru = tf.keras.layers.GRU(hidden_size, return_sequences=True, return_state=True, name='decoder_gru')\n decoder_out, decoder_state = decoder_gru(decoder_inputs, initial_state=encoder_state)\n\n logger.debug(\"Defining the attention layer\")\n # Attention layer\n attn_layer = AttentionLayer(name='attention_layer')\n attn_out, attn_states = attn_layer([encoder_out, decoder_out])\n\n # Concat attention input and decoder GRU output\n decoder_concat_input = tf.keras.layers.Concatenate(axis=-1, name='concat_layer')([decoder_out, attn_out])\n\n logger.debug(\"Defining the dense layers\")\n # Dense layer\n dense = tf.keras.layers.Dense(fr_vsize, activation='softmax', name='softmax_layer')\n dense_time = tf.keras.layers.TimeDistributed(dense, name='time_distributed_layer')\n decoder_pred = dense_time(decoder_concat_input)\n\n logger.debug(\"Defining the full model\")\n # Full model\n full_model = tf.keras.models.Model(inputs=[encoder_inputs, decoder_inputs], outputs=decoder_pred)\n full_model.compile(optimizer='adam', loss='categorical_crossentropy')\n\n full_model.summary()\n\n \"\"\" Inference model \"\"\"\n batch_size = 1\n\n logger.debug(\"Defining the inference model\")\n\n \"\"\" Encoder (Inference) model \"\"\"\n encoder_inf_inputs = tf.keras.layers.Input(batch_shape=(batch_size, en_timesteps, en_vsize), name='encoder_inf_inputs')\n encoder_inf_out, encoder_inf_state = encoder_gru(encoder_inf_inputs)\n encoder_model = tf.keras.models.Model(inputs=encoder_inf_inputs, outputs=[encoder_inf_out, encoder_inf_state])\n\n \"\"\" Decoder (Inference) model \"\"\"\n decoder_inf_inputs = tf.keras.layers.Input(batch_shape=(batch_size, 1, fr_vsize), name='decoder_word_inputs')\n encoder_inf_states = tf.keras.layers.Input(batch_shape=(batch_size, en_timesteps, hidden_size), name='encoder_inf_states')\n decoder_init_state = tf.keras.layers.Input(batch_shape=(batch_size, hidden_size), name='decoder_init')\n\n decoder_inf_out, decoder_inf_state = decoder_gru(decoder_inf_inputs, initial_state=decoder_init_state)\n attn_inf_out, attn_inf_states = attn_layer([encoder_inf_states, decoder_inf_out])\n decoder_inf_concat = tf.keras.layers.Concatenate(axis=-1, name='concat')([decoder_inf_out, attn_inf_out])\n decoder_inf_pred = tf.keras.layers.TimeDistributed(dense)(decoder_inf_concat)\n decoder_model = tf.keras.models.Model(inputs=[encoder_inf_states, decoder_init_state, decoder_inf_inputs],\n outputs=[decoder_inf_pred, attn_inf_states, decoder_inf_state])\n\n return full_model, encoder_model, decoder_model",
"def tensor_network_aug(inputs, states, output_size, rank_vals, bias, bias_start=0.0):\n # each coordinate of hidden state is independent- parallel\n num_orders = len(rank_vals)+1\n num_lags = len(states)\n batch_size = tf.shape(inputs)[0]\n state_size = states[0].get_shape()[1].value #hidden layer size\n inp_size = inputs.get_shape()[1].value\n total_state_size = (inp_size + state_size * num_lags + 1 )\n\n mat_dims = np.ones((num_orders,)) * total_state_size\n mat_ranks = np.concatenate(([1], rank_vals, [output_size]))\n mat_ps = np.cumsum(np.concatenate(([0], mat_ranks[:-1] * mat_dims * mat_ranks[1:])),dtype=np.int32)\n mat_size = mat_ps[-1]\n mat = vs.get_variable(\"weights\", mat_size) # h_z x h_z... x output_size\n\n states = (inputs,) + states # concatenate the [x, h] \n \n states_tensor = nest.flatten(states)\n #total_inputs = [inputs]\n states_vector = tf.concat(states, 1)\n states_vector = tf.concat( [states_vector, tf.ones([batch_size, 1])], 1)\n \"\"\"form high order state tensor\"\"\"\n states_tensor = states_vector\n for order in range(num_orders-1):\n states_tensor = _outer_product(batch_size, states_tensor, states_vector)\n \n # states_tensor= tf.reshape(states_tensor, [-1,total_state_size**num_orders] )\n\n cores = []\n for i in range(num_orders):\n # Fetch the weights of factor A^i from our big serialized variable weights_h.\n mat_core = tf.slice(mat, [mat_ps[i]], [mat_ps[i + 1] - mat_ps[i]])\n mat_core = tf.reshape(mat_core, [mat_ranks[i], total_state_size, mat_ranks[i + 1]]) \n cores.append(mat_core)\n \n res = tensor_train_contraction(states_tensor, cores)\n if not bias:\n return res\n biases = vs.get_variable(\"biases\", [output_size])\n return nn_ops.bias_add(res,biases)",
"def call(self, inputs, training=None, mask=None):\n \"\"\"\n We would like to proceed with a batching point of view.\n The problem here, is that tf.map_fn creates a graph for each realisation, making us loose the initialization on the current graph...\n Thus we cannot use it here, while this has not been fixed in tensorflow!\n \"\"\"\n inputs = tf.cast(tf.convert_to_tensor(inputs),dtype=tf.float64)\n\n if training:\n self.verifyMask()\n inputs = inputs/self.rescaleFactor\n\n if self.usingLog:\n inputs = tf.exp(inputs)\n\n gatheredCps = tf.stop_gradient(tf.fill([tf.shape(inputs)[0]],tf.reshape(self._obtainCp(inputs[0]),())))\n gatheredCps = tf.reshape(gatheredCps,((tf.shape(inputs)[0],1)))\n tf.assert_equal(tf.shape(gatheredCps),(tf.shape(inputs)[0],1))\n #\n # gatheredCps = tf.stop_gradient(self.obtainCp(inputs))\n # gatheredCps = tf.fill([tf.shape(inputs)[0]],tf.reshape(self.mycps,()))\n # gatheredCps = tf.reshape(gatheredCps,((tf.shape(inputs)[0],1)))\n\n #self.meanGatheredCps.assign(tf.reduce_mean(gatheredCps))\n #tf.summary.scalar(\"mean_cp\",data=tf.reduce_mean(gatheredCps),step=tf.summary.experimental.get_step())\n\n x = self.layerList[0](inputs,cps=gatheredCps,isFirstLayer=True)\n for l in self.layerList[1:]:\n if self.usingLog:\n x = l(tf.exp(x),cps=gatheredCps)\n else:\n x = l(x,cps=gatheredCps)\n if self.usingSoftmax:\n if self.usingLog:\n s = tf.keras.activations.softmax(tf.exp(x))\n else:\n s = tf.keras.activations.softmax(x)\n else:\n s = x\n return s",
"def build_mlp(x, output_size, scope, n_layers, size, activation=tf.tanh, output_activation=None, regularizer=None):\n i = 0\n for i in range(n_layers):\n x = tf.layers.dense(inputs=x,units=size, activation=activation, name='fc{}'.format(i), kernel_regularizer=regularizer, bias_regularizer=regularizer)\n\n x = tf.layers.dense(inputs=x, units=output_size, activation=output_activation, name='fc{}'.format(i + 1), kernel_regularizer=regularizer, bias_regularizer=regularizer)\n return x",
"def add_dense_layer(self):\n output = self.cnn_layer\n # weight_shape is dimension 1 of the first weights in the dense layer. We\n # update it accordingly as we keep adding intermediate layers\n weight_shape = self.out_channels_2\n if self.hybrid:\n with tf.variable_scope(\"lstm_layer\"):\n print(\"adding Recurrent layer\")\n if self.rnn:\n print(\"Adding Basic RNN cell\")\n cell_fw = tf.contrib.rnn.BasicRNNCell(self.lstm_size)\n cell_bw = tf.contrib.rnn.BasicRNNCell(self.lstm_size)\n elif self.gru:\n print(\"Adding GRU cell\")\n cell_fw = tf.contrib.rnn.GRUCell(self.lstm_size)\n cell_bw = tf.contrib.rnn.GRUCell(self.lstm_size)\n elif self.elman:\n print(\"Adding ELMAN cell\")\n cell_fw = ElmanRNNCell(self.lstm_size)\n cell_bw = ElmanRNNCell(self.lstm_size)\n else:\n print(\"Adding LSTM cell\")\n cell_fw = tf.contrib.rnn.LSTMCell(self.lstm_size)\n cell_bw = tf.contrib.rnn.LSTMCell(self.lstm_size)\n if self.use_window_rnn:\n cnn_values = self.cnn_values\n print(\"using window_rnn\")\n else:\n print(\"not using window_rnn\")\n cnn_values = self.cnn_layer\n (output_fw, output_bw), _ = tf.nn.bidirectional_dynamic_rnn(\n cell_fw, cell_bw, cnn_values,\n sequence_length=self.sequence_lengths, dtype=tf.float32\n )\n output = tf.concat([output_fw, output_bw], axis=-1)\n output = tf.nn.dropout(output, self.dropout_holder)\n self.word_reps = output\n weight_shape = 2*self.lstm_size\n with tf.variable_scope(\"dense_layer\"):\n print(\"adding dense layer\")\n # weight_shape_2 is dimension 2 of the first weights in the dense layer.\n # we update it accordingly if we add more layers at the end\n weight_shape_2 = self.tag_count\n if self.pos and (self.single_ann or self.pos_ann):\n # this if block is for the cases when we have to concatenate the output\n # from prevoius layer to pos_embeddings\n print(\"Concatenating pos vecs with lstm/cnn output\")\n output = tf.concat([output, self.pos_vecs], axis=-1)\n weight_shape = weight_shape + self.pos_embedding_size\n output = tf.reshape(output, [-1, weight_shape])\n if self.single_ann:\n # This means we will have a hidden layer i.e two weights.\n print(\"Setting up hidden layer for single ann\")\n weight_shape_2 = self.ann_size\n if self.pos and not self.concat_pos and self.bi_ann:\n # this if block si for the case when we want to implement a bi-partite\n # ann, as such we will get weights for pos embeddings\n print(\"setting up network for bi ann..\")\n weight_shape_2 = self.ann_size\n w_pos = tf.get_variable(\"w_pos\", dtype=tf.float32,\n shape=[self.pos_embedding_size,\n self.pos_ann_count])\n b_pos = tf.get_variable(\"b_pos\", dtype=tf.float32,\n shape=[self.pos_ann_count],\n initializer=tf.zeros_initializer())\n output_pos = tf.reshape(self.pos_vecs, [-1, self.pos_embedding_size])\n pred_pos = tf.matmul(output_pos, w_pos) + b_pos\n output = tf.reshape(output, [-1, weight_shape])\n if len(output.shape) == 3:\n print(\"Setting up a network with no ann, only a fully connected dense \"\n \"layer\")\n # we have to reshape our output so that we can multiply it with weights.\n # if we reach here, it means we haven't reshaped it properly.\n output = tf.reshape(output, [-1, weight_shape])\n w1 = tf.get_variable(\"w1\", dtype=tf.float32,\n shape=[weight_shape, weight_shape_2])\n b1 = tf.get_variable(\"b1\", dtype=tf.float32, shape=[weight_shape_2],\n initializer=tf.zeros_initializer())\n pred = tf.matmul(output, w1) + b1\n # the pred above has weight_shape_2 as it's second dimension. our vars\n # have been defined in such a way that weight_shape_2 = self.tag_count if\n # we don't have to care about adding another layer.\n if self.pos and not self.concat_pos and self.bi_ann:\n # This block deals with concatenating output of pos hidden layer to\n # pred. We change weight_shape_2 accordingly.\n print(\"setting up network for bi ann..\")\n pred = tf.concat([pred, pred_pos], axis=-1)\n weight_shape_2 = weight_shape_2 + self.pos_ann_count\n\n if self.pos and (self.single_ann or self.bi_ann):\n # now, these are our final sets of weights in case we have an ann. so\n # the second dimension of these weights is self.tag_count!\n print(\"setting up network for ann\")\n w2 = tf.get_variable(\"w2\", dtype=tf.float32, shape=[weight_shape_2,\n self.tag_count])\n b2 = tf.get_variable(\"b2\", shape=[self.tag_count], dtype=tf.float32,\n initializer=tf.zeros_initializer())\n pred = tf.matmul(pred, w2) + b2\n self.logits = tf.reshape(pred, [-1, self.max_len, self.tag_count])",
"def add_prediction_op(self):\n\n #x_dropout = tf.keras.layers.SpatialDropout1D(0.4).apply(self.input_placeholder)\n layer_1_size = 150\n layer_2_size = 25\n num_aux_feats = 4\n\n rnn_cell_layer_1_fwd = tf.nn.rnn_cell.GRUCell(\n layer_1_size,\n activation='relu',\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\n bias_initializer=tf.zeros_initializer(),\n name=\"gru_1\",\n dtype=tf.float32\n )\n\n rnn_cell_layer_1_bwd = tf.nn.rnn_cell.GRUCell(\n layer_1_size,\n activation='relu',\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\n bias_initializer=tf.zeros_initializer(),\n name=\"gru_1\",\n dtype=tf.float32\n )\n\n rnn_cell_layer_2_fwd = tf.nn.rnn_cell.GRUCell(\n layer_2_size,\n activation='relu',\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\n bias_initializer=tf.zeros_initializer(),\n name=\"gru_2\",\n dtype=tf.float32\n )\n\n rnn_cell_layer_2_bwd = tf.nn.rnn_cell.GRUCell(\n layer_2_size,\n activation='relu',\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\n bias_initializer=tf.zeros_initializer(),\n name=\"gru_2\",\n dtype=tf.float32\n )\n\n outputs_1, states_1 = tf.nn.bidirectional_dynamic_rnn(\n cell_fw=rnn_cell_layer_1_fwd,\n cell_bw=rnn_cell_layer_1_bwd,\n inputs=self.input_placeholder,\n sequence_length=self.batch_seq_length_placeholder,\n dtype=tf.float32\n )\n h1 = tf.concat(outputs_1, axis=2, name=\"concat1\")\n\n outputs_2, states_2 = tf.nn.bidirectional_dynamic_rnn(\n cell_fw=rnn_cell_layer_2_fwd,\n cell_bw=rnn_cell_layer_2_bwd,\n inputs=h1,\n dtype=tf.float32\n )\n h2 = tf.concat(states_2, axis=1, name=\"concat2\")\n\n h3 = tf.concat(\n [\n h2,\n tf.reshape(self.batch_unique_count_placeholder, (-1, 1)),\n tf.reduce_max(h2, axis=1, keepdims=True)\n ],\n axis=1,\n name=\"concat3\"\n )\n num_aux_feats = 2\n\n #h_drop = tf.nn.dropout(state, keep_prob=0.8)\n\n with tf.name_scope(\"classifier\"):\n self.W_ho = tf.get_variable(\n \"W_ho\",\n (layer_2_size * 2 + num_aux_feats, self.config.n_classes),\n tf.float32,\n tf.contrib.layers.xavier_initializer(),\n trainable=True\n )\n self.b_o = tf.get_variable(\n \"bo\",\n (1, self.config.n_classes),\n tf.float32, tf.zeros_initializer(),\n trainable=True\n )\n pred = tf.matmul(h3, self.W_ho) + self.b_o\n\n return pred",
"def layers(vgg_layer3_out, vgg_layer4_out, vgg_layer7_out, num_classes):\n # TODO: Implement function\n # we use an L2 regularizer to prevent weight overfitting. \n # ... kernel_regularizer= tf.contrib.layers.l2_regularizer(1e-3)) ...\n \n # 1x1 conv of vgg layer 7\n l7_out = tf.layers.conv2d( vgg_layer7_out, num_classes, 1, padding='same', \n kernel_initializer = tf.random_normal_initializer(stddev=0.02),\n kernel_regularizer = tf.contrib.layers.l2_regularizer(1e-3));\n\n # upsampling layer 7 output\n l4a_input_1 = tf.layers.conv2d_transpose( l7_out, num_classes, 4, strides=(2,2), padding='same',\n kernel_initializer = tf.random_normal_initializer(stddev=0.02),\n kernel_regularizer = tf.contrib.layers.l2_regularizer(1e-3));\n\n # 1x1 conv of vgg layer 4 from encoder\n l4a_input_2 = tf.layers.conv2d(vgg_layer4_out, num_classes, 1, \n padding= 'same', \n kernel_initializer= tf.random_normal_initializer(stddev=0.01), \n kernel_regularizer= tf.contrib.layers.l2_regularizer(1e-3))\n \n # connect layer 4 and previous layer 4 by adding them\n layer4dec_output = tf.add(l4a_input_1, l4a_input_2)\n\n \n\n # upsample layer4dec output\n l3dec_input_1 = tf.layers.conv2d_transpose(layer4dec_output, num_classes, 4, strides= (2, 2), padding= 'same', \n kernel_initializer= tf.random_normal_initializer(stddev=0.02), \n kernel_regularizer= tf.contrib.layers.l2_regularizer(1e-3))\n \n # 1x1 conv of vgg layer 3 encoder output\n l3dec_input_2 = tf.layers.conv2d(vgg_layer3_out, num_classes, 1, \n padding= 'same', \n kernel_initializer= tf.random_normal_initializer(stddev=0.02), \n kernel_regularizer= tf.contrib.layers.l2_regularizer(1e-3))\n\n\n # connect layer 3s by adding them\n layer3dec_output = tf.add(l3dec_input_1, l3dec_input_2)\n\n # upsample\n out_layer = tf.layers.conv2d_transpose(layer3dec_output, num_classes, 16, strides= (8, 8), padding= 'same', \n kernel_initializer= tf.random_normal_initializer(stddev=0.02), \n kernel_regularizer= tf.contrib.layers.l2_regularizer(1e-3))\n\n return out_layer",
"def decode(yolo_output, num_of_anchor_bbox, classes, strides, anchors, index):\n \"\"\" takes in tensor of shape (batch_size, gridsize_x, gridsize_y, number of anchor boxes, number of classes) \"\"\"\n \"\"\" returns tesnor of shape (batch_size, gridsize_x, gridsize_y, number of anchor boxes, number of classes) \"\"\"\n \n # takes in original anchors and process to scaled anchors based on strides for respective scales\n anchors_scaled = (np.array(anchors).T/strides).T\n \n # obtain dimensions from yolo_output\n conv_shape = tf.shape(yolo_output)\n batch_size = conv_shape[0]\n grid_size = conv_shape[1:3]\n\n # reshape yolo_output\n yolo_output = tf.reshape(yolo_output, (batch_size, grid_size[0], grid_size[1], num_of_anchor_bbox, 5 + classes))\n\n # split yolo_output along last axis to extract features\n raw_dx_dy, raw_dw_dh, raw_objectiveness, raw_class_probs = tf.split(yolo_output, (2, 2, 1, classes), axis = -1)\n\n # create grid where grid[x][y] == (y, x)\n xy_grid = tf.meshgrid(tf.range(grid_size[1]), tf.range(grid_size[0]))\n\n # reshape to [gx, gy, 1, 2] and cast to float32 data type\n xy_grid = tf.expand_dims(tf.stack(xy_grid, axis = -1), axis = 2) \n xy_grid = tf.cast(xy_grid, tf.float32)\n\n # calculate the center position of the prediction box (train_input_size):\n pred_xy = (tf.sigmoid(raw_dx_dy) + xy_grid) * strides[index]\n\n # calculate the length and width of the prediction box (train_input_size):\n pred_wh = (tf.exp(raw_dw_dh) * anchors_scaled[index]) * strides[index]\n\n # concatenate pred_xy and pred_wh\n pred_xywh = tf.concat([pred_xy, pred_wh], axis = -1)\n\n # objectiveness score\n pred_objectiveness = tf.sigmoid(raw_objectiveness) \n\n # class probabilities\n pred_prob = tf.sigmoid(raw_class_probs) \n\n # concatenate decoded results\n pred = tf.concat([pred_xywh, pred_objectiveness, pred_prob], axis = -1)\n\n return pred",
"def get_model_tweetonly(batch_size, max_seq_length, input_size, hidden_size, target_size,\n vocab_size, pretrain, tanhOrSoftmax, dropout):\n\n # batch_size x max_seq_length\n inputs = tf.placeholder(tf.int32, [batch_size, max_seq_length])\n\n cont_train = True\n if pretrain == \"pre\":\n cont_train = False\n embedding_matrix = tf.Variable(tf.random_uniform([vocab_size, input_size], -0.1, 0.1), # input_size is embeddings size\n name=\"embedding_matrix\", trainable=cont_train)\n\n # batch_size x max_seq_length x input_size\n embedded_inputs = tf.nn.embedding_lookup(embedding_matrix, inputs)\n\n\n # [batch_size x inputs_size] with max_seq_length elements\n # fixme: possibly inefficient\n # inputs_list[0]: batch_size x input[0] <-- word vector of the first word\n inputs_list = [tf.squeeze(x) for x in\n tf.split(1, max_seq_length, embedded_inputs)]\n\n lstm_encoder = Encoder(rnn_cell.BasicLSTMCell, input_size, hidden_size)\n start_state = tf.zeros([batch_size, lstm_encoder.state_size])\n\n # [h_i], [h_i, c_i] <-- LSTM\n # [h_i], [h_i] <-- RNN\n outputs, states = lstm_encoder(inputs_list, start_state, \"LSTM\")\n\n drop_prob = None\n if dropout:\n drop_prob = 0.1\n\n lstm_encoder = Encoder(rnn_cell.BasicLSTMCell, input_size, hidden_size, drop_prob, drop_prob)\n\n outputs_fin = outputs[-1]\n if tanhOrSoftmax == \"tanh\":\n model = Projector(target_size, non_linearity=tf.nn.tanh)(outputs_fin) #tf.nn.softmax\n else:\n model = Projector(target_size, non_linearity=tf.nn.softmax)(outputs_fin) # tf.nn.softmax\n\n\n return model, [inputs]",
"def train_step(\n return_var, updates, x_unlabeled, inputs, batch_sizes, batches_per_epoch=100\n):\n return_vars_ = np.zeros(shape=(len(return_var)))\n # train batches_per_epoch batches\n for batch_num in range(0, batches_per_epoch):\n feed_dict = {K.learning_phase(): 1}\n # feed corresponding input for each input_type\n for input_type, input_placeholder in inputs.items():\n batch_ids = np.random.choice(\n len(x_unlabeled[0]), size=batch_sizes[input_type], replace=False\n )\n for i in range(len(input_placeholder)):\n feed_dict[input_placeholder[i]] = x_unlabeled[i][batch_ids]\n\n all_vars = return_var + updates\n return_vars_ += np.asarray(\n K.get_session().run(all_vars, feed_dict=feed_dict)[: len(return_var)]\n )\n\n return return_vars_",
"def input_fn_builder(features, seq_length, is_training, drop_remainder):\n \"\"\"This is used to make the proper format of the prediction variable\"\"\"\n\n all_input_ids = []\n all_input_mask = []\n all_segment_ids = []\n all_label_ids = []\n\n for feature in features:\n all_input_ids.append(feature.input_ids)\n all_input_mask.append(feature.input_mask)\n all_segment_ids.append(feature.segment_ids)\n all_label_ids.append(feature.label_id)\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n print(params)\n batch_size = 500\n\n num_examples = len(features)\n\n d = tf.data.Dataset.from_tensor_slices({\n \"input_ids\":\n tf.constant(\n all_input_ids, shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"input_mask\":\n tf.constant(\n all_input_mask,\n shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"segment_ids\":\n tf.constant(\n all_segment_ids,\n shape=[num_examples, seq_length],\n dtype=tf.int32),\n \"label_ids\":\n tf.constant(all_label_ids, shape=[num_examples], dtype=tf.int32),\n })\n\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=100)\n\n d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)\n return d\n\n return input_fn",
"def generate_batch_tf(batch_size, num_skips, skip_window):\n # define data_index to have global scope so that other functions etc. can use it\n global data_index\n assert batch_size % num_skips == 0\n assert num_skips <= 2 * skip_window\n batch = np.ndarray(shape=(batch_size), dtype=np.int32)\n labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)\n span = 2 * skip_window + 1 # [ skip_window target skip_window ]\n # a deque is like a list but with efficient reads / writes to both ends\n buffer = collections.deque(maxlen=span)\n for _ in range(span):\n buffer.append(data[data_index])\n # this is data_index++ but it loops back to 0 when data_index = len(data)\n data_index = (data_index + 1) % len(data)\n for i in range(batch_size // num_skips): # // is the divide and floor operator\n target = skip_window # target label at the center of the buffer\n targets_to_avoid = [skip_window]\n for j in range(num_skips): # sample num_skips different labels for each input word from this buffer\n while target in targets_to_avoid: # keep sampling until we select a different value\n target = random.randint(0, span - 1)\n targets_to_avoid.append(target)\n batch[i * num_skips + j] = buffer[skip_window]\n labels[i * num_skips + j, 0] = buffer[target]\n buffer.append(data[data_index]) # this appends a value to the end and pops off the first value\n data_index = (data_index + 1) % len(data)\n return batch, labels",
"def test():\n\n sess = tf.Session()\n inputs = tf.placeholder(name='inputs', shape=[16, 224, 224, 3], dtype=tf.float32)\n fcn_1, end_points = fcn_8(inputs)\n for ep in end_points.keys():\n print(ep, end_points[ep].shape)\n\n print('-' * 8)\n vars = tf.global_variables()\n for var in vars:\n print(var.name, var.shape)",
"def transformer(U, theta, out_size, name='SpatialTransformer', **kwargs):\n\n def _repeat(x, n_repeats):\n with tf.variable_scope('_repeat'):\n rep = tf.transpose(\n tf.expand_dims(tf.ones(shape=tf.stack([n_repeats, ])), 1), [1, 0])\n rep = tf.cast(rep, 'int32')\n x = tf.matmul(tf.reshape(x, (-1, 1)), rep)\n return tf.reshape(x, [-1])\n\n def _interpolate(im, x, y, out_size):\n with tf.variable_scope('_interpolate'):\n # constants\n num_batch = tf.shape(im)[0]\n height = tf.shape(im)[1]\n width = tf.shape(im)[2]\n channels = tf.shape(im)[3]\n\n x = tf.cast(x, 'float32')\n y = tf.cast(y, 'float32')\n height_f = tf.cast(height, 'float32')\n width_f = tf.cast(width, 'float32')\n out_height = out_size[0]\n out_width = out_size[1]\n zero = tf.zeros([], dtype='int32')\n max_y = tf.cast(tf.shape(im)[1] - 1, 'int32')\n max_x = tf.cast(tf.shape(im)[2] - 1, 'int32')\n\n # scale indices from [-1, 1] to [0, width/height]\n x = (x + 1.0)*(width_f) / 2.0\n y = (y + 1.0)*(height_f) / 2.0\n\n # do sampling\n x0 = tf.cast(tf.floor(x), 'int32')\n x1 = x0 + 1\n y0 = tf.cast(tf.floor(y), 'int32')\n y1 = y0 + 1\n\n x0 = tf.clip_by_value(x0, zero, max_x)\n x1 = tf.clip_by_value(x1, zero, max_x)\n y0 = tf.clip_by_value(y0, zero, max_y)\n y1 = tf.clip_by_value(y1, zero, max_y)\n dim2 = width\n dim1 = width*height\n base = _repeat(tf.range(num_batch)*dim1, out_height*out_width)\n base_y0 = base + y0*dim2\n base_y1 = base + y1*dim2\n idx_a = base_y0 + x0\n idx_b = base_y1 + x0\n idx_c = base_y0 + x1\n idx_d = base_y1 + x1\n\n # use indices to lookup pixels in the flat image and restore\n # channels dim\n im_flat = tf.reshape(im, tf.stack([-1, channels]))\n im_flat = tf.cast(im_flat, 'float32')\n Ia = tf.gather(im_flat, idx_a)\n Ib = tf.gather(im_flat, idx_b)\n Ic = tf.gather(im_flat, idx_c)\n Id = tf.gather(im_flat, idx_d)\n\n # and finally calculate interpolated values\n x0_f = tf.cast(x0, 'float32')\n x1_f = tf.cast(x1, 'float32')\n y0_f = tf.cast(y0, 'float32')\n y1_f = tf.cast(y1, 'float32')\n wa = tf.expand_dims(((x1_f-x) * (y1_f-y)), 1)\n wb = tf.expand_dims(((x1_f-x) * (y-y0_f)), 1)\n wc = tf.expand_dims(((x-x0_f) * (y1_f-y)), 1)\n wd = tf.expand_dims(((x-x0_f) * (y-y0_f)), 1)\n output = tf.add_n([wa*Ia, wb*Ib, wc*Ic, wd*Id])\n return output\n\n def _meshgrid(height, width):\n with tf.variable_scope('_meshgrid'):\n # This should be equivalent to:\n # x_t, y_t = np.meshgrid(np.linspace(-1, 1, width),\n # np.linspace(-1, 1, height))\n # ones = np.ones(np.prod(x_t.shape))\n # grid = np.vstack([x_t.flatten(), y_t.flatten(), ones])\n x_t = tf.matmul(tf.ones(shape=tf.stack([height, 1])),\n tf.transpose(tf.expand_dims(tf.linspace(-1.0, 1.0, width), 1), [1, 0]))\n y_t = tf.matmul(tf.expand_dims(tf.linspace(-1.0, 1.0, height), 1),\n tf.ones(shape=tf.stack([1, width])))\n\n x_t_flat = tf.reshape(x_t, (1, -1))\n y_t_flat = tf.reshape(y_t, (1, -1))\n\n ones = tf.ones_like(x_t_flat)\n grid = tf.concat(axis=0, values=[x_t_flat, y_t_flat, ones])\n return grid\n\n def _transform(theta, input_dim, out_size):\n with tf.variable_scope('_transform'):\n num_batch = tf.shape(input_dim)[0]\n height = tf.shape(input_dim)[1]\n width = tf.shape(input_dim)[2]\n num_channels = tf.shape(input_dim)[3]\n theta = tf.reshape(theta, (-1, 2, 3))\n theta = tf.cast(theta, 'float32')\n\n # grid of (x_t, y_t, 1), eq (1) in ref [1]\n height_f = tf.cast(height, 'float32')\n width_f = tf.cast(width, 'float32')\n out_height = out_size[0]\n out_width = out_size[1]\n grid = _meshgrid(out_height, out_width)\n grid = tf.expand_dims(grid, 0)\n grid = tf.reshape(grid, [-1])\n grid = tf.tile(grid, tf.stack([num_batch]))\n grid = tf.reshape(grid, tf.stack([num_batch, 3, -1]))\n\n # Transform A x (x_t, y_t, 1)^T -> (x_s, y_s)\n T_g = tf.matmul(theta, grid)\n x_s = tf.slice(T_g, [0, 0, 0], [-1, 1, -1])\n y_s = tf.slice(T_g, [0, 1, 0], [-1, 1, -1])\n x_s_flat = tf.reshape(x_s, [-1])\n y_s_flat = tf.reshape(y_s, [-1])\n\n input_transformed = _interpolate(\n input_dim, x_s_flat, y_s_flat,\n out_size)\n\n output = tf.reshape(\n input_transformed, tf.stack([num_batch, out_height, out_width, num_channels]))\n return output\n\n with tf.variable_scope(name):\n output = _transform(theta, U, out_size)\n return output",
"def rnn_decoder_with_attention(decoder_inputs, initial_state, cell, loop_function,attention_states,scope=None):#3D Tensor [batch_size x attn_length x attn_size]\n with tf.variable_scope(scope or \"rnn_decoder\"):\n print(\"rnn_decoder_with_attention started...\")\n state = initial_state #[batch_size x cell.state_size].\n _, hidden_size = state.get_shape().as_list() #200\n attention_states_original=attention_states\n batch_size,sequence_length,_=attention_states.get_shape().as_list()\n outputs = []\n prev = None\n #################################################\n for i, inp in enumerate(decoder_inputs):#循环解码部分的输入。如sentence_length个[batch_size x input_size]\n # 如果是训练,使用训练数据的输入;如果是test, 将t时刻的输出作为t + 1 时刻的s输入\n if loop_function is not None and prev is not None:#测试的时候:如果loop_function不为空且前一个词的值不为空,那么使用前一个的值作为RNN的输入\n with tf.variable_scope(\"loop_function\", reuse=True):\n inp = loop_function(prev, i)\n if i > 0:\n tf.get_variable_scope().reuse_variables()\n ##ATTENTION#################################################################################################################################################\n # 1.get logits of attention for each encoder input. attention_states:[batch_size x attn_length x attn_size]; query=state:[batch_size x cell.state_size]\n query=state\n W_a = tf.get_variable(\"W_a\", shape=[hidden_size, hidden_size],initializer=tf.random_normal_initializer(stddev=0.1))\n query=tf.matmul(query, W_a) #[batch_size,hidden_size]\n query=tf.expand_dims(query,axis=1) #[batch_size, 1, hidden_size]\n U_a = tf.get_variable(\"U_a\", shape=[hidden_size, hidden_size],initializer=tf.random_normal_initializer(stddev=0.1))\n U_aa = tf.get_variable(\"U_aa\", shape=[ hidden_size])\n attention_states=tf.reshape(attention_states,shape=(-1,hidden_size)) #[batch_size*sentence_length,hidden_size]\n attention_states=tf.matmul(attention_states, U_a) #[batch_size*sentence_length,hidden_size]\n #print(\"batch_size\",batch_size,\" ;sequence_length:\",sequence_length,\" ;hidden_size:\",hidden_size) #print(\"attention_states:\", attention_states) #(?, 200)\n attention_states=tf.reshape(attention_states,shape=(-1,sequence_length,hidden_size)) # TODO [batch_size,sentence_length,hidden_size]\n #query_expanded: [batch_size,1, hidden_size]\n #attention_states_reshaped: [batch_size,sentence_length,hidden_size]\n attention_logits=tf.nn.tanh(query+attention_states+U_aa) #[batch_size,sentence_length,hidden_size]. additive style\n\n # 2.get possibility of attention\n attention_logits=tf.reshape(attention_logits,shape=(-1,hidden_size)) #batch_size*sequence_length [batch_size*sentence_length,hidden_size]\n V_a = tf.get_variable(\"V_a\", shape=[hidden_size,1],initializer=tf.random_normal_initializer(stddev=0.1)) #[hidden_size,1]\n attention_logits=tf.matmul(attention_logits,V_a) #最终需要的是[batch_size*sentence_length,1]<-----[batch_size*sentence_length,hidden_size],[hidden_size,1]\n attention_logits=tf.reshape(attention_logits,shape=(-1,sequence_length)) #attention_logits:[batch_size,sequence_length]\n ##########################################################################################################################################################\n #attention_logits=tf.reduce_sum(attention_logits,2) #[batch_size x attn_length]\n attention_logits_max=tf.reduce_max(attention_logits,axis=1,keep_dims=True) #[batch_size x 1]\n # possibility distribution for each encoder input.it means how much attention or focus for each encoder input\n p_attention=tf.nn.softmax(attention_logits-attention_logits_max)#[batch_size x attn_length]\n\n # 3.get weighted sum of hidden state for each encoder input as attention state\n p_attention=tf.expand_dims(p_attention,axis=2) #[batch_size x attn_length x 1]\n # attention_states:[batch_size x attn_length x attn_size]; p_attention:[batch_size x attn_length];\n attention_final=tf.multiply(attention_states_original,p_attention) #[batch_size x attn_length x attn_size]\n context_vector=tf.reduce_sum(attention_final,axis=1) #[batch_size x attn_size]\n ############################################################################################################################################################\n #inp:[batch_size x input_size].it is decoder input; attention_final:[batch_size x attn_size]\n output, state = cell(inp, state,context_vector) #attention_final TODO 使用RNN走一步\n outputs.append(output) # 将输出添加到结果列表中\n if loop_function is not None:\n prev = output\n print(\"rnn_decoder_with_attention ended...\")\n return outputs, state",
"def get_model_conditional_target_feed(batch_size, max_seq_length, input_size, hidden_size, target_size,\n vocab_size, pretrain, tanhOrSoftmax, dropout):\n # batch_size x max_seq_length\n inputs = tf.placeholder(tf.int32, [batch_size, max_seq_length])\n inputs_cond = tf.placeholder(tf.int32, [batch_size, max_seq_length])\n\n cont_train = True\n if pretrain == \"pre\": # continue training embeddings or not. Currently works better to continue training them.\n cont_train = False\n embedding_matrix = tf.Variable(tf.random_uniform([vocab_size, input_size], -0.1, 0.1),\n # input_size is embeddings size\n name=\"embedding_matrix\", trainable=cont_train)\n\n # batch_size x max_seq_length x input_size\n embedded_inputs = tf.nn.embedding_lookup(embedding_matrix, inputs)\n embedded_inputs_cond = tf.nn.embedding_lookup(embedding_matrix, inputs_cond)\n\n # [batch_size x inputs_size] with max_seq_length elements\n # fixme: possibly inefficient\n # inputs_list[0]: batch_size x input[0] <-- word vector of the first word\n inputs_list = [tf.squeeze(x) for x in\n tf.split(1, max_seq_length, embedded_inputs)]\n\n drop_prob = None\n if dropout:\n drop_prob = 0.1\n lstm_encoder_target = Encoder(rnn_cell.BasicLSTMCell, input_size, hidden_size, drop_prob, drop_prob)\n\n start_state = tf.zeros([batch_size, lstm_encoder_target.state_size])\n\n # [h_i], [h_i, c_i] <-- LSTM\n # [h_i], [h_i] <-- RNN\n outputs, states = lstm_encoder_target(inputs_list, start_state, \"LSTM\")\n\n lstm_encoder_tweet = Encoder(rnn_cell.BasicLSTMCell, input_size + 2 * hidden_size, hidden_size, drop_prob,\n drop_prob)\n\n inputs_cond_list = [tf.concat(1, [tf.squeeze(x), states[-1]]) for x in\n tf.split(1, max_seq_length, embedded_inputs_cond)]\n\n # running a second LSTM conditioned on the last state of the first\n outputs_cond, states_cond = lstm_encoder_tweet(inputs_cond_list, states[-1],\n \"LSTMcond\")\n\n outputs_fin = outputs_cond[-1]\n\n\n if tanhOrSoftmax == \"tanh\":\n model = Projector(target_size, non_linearity=tf.nn.tanh, bias=True)(outputs_fin) # tf.nn.softmax\n else:\n model = Projector(target_size, non_linearity=tf.nn.softmax, bias=True)(outputs_fin) # tf.nn.softmax\n\n return model, [inputs, inputs_cond]",
"def call(self, reshaped_input):\n \"\"\"\n In Keras, there are two way to do matrix multiplication (dot product)\n 1) K.dot : AxB -> when A has batchsize and B doesn't, use K.dot\n 2) tf.matmul: AxB -> when A and B both have batchsize, use tf.matmul\n \n Error example: Use tf.matmul when A has batchsize (3 dim) and B doesn't (2 dim)\n ValueError: Shape must be rank 2 but is rank 3 for 'net_vlad_1/MatMul' (op: 'MatMul') with input shapes: [?,21,64], [64,3]\n \n tf.matmul might still work when the dim of A is (?,64), but this is too confusing.\n Just follow the above rules.\n \"\"\"\n \n ''' Computation of N_v in Equation 3 of the paper '''\n activation = K.dot(reshaped_input, self.cluster_weights)\n \n activation += self.cluster_biases\n \n activation = tf.nn.softmax(activation)\n\n activation = tf.reshape(activation,\n [-1, self.max_samples, self.cluster_size])\n\n activation = tf.transpose(activation,perm=[0,2,1])\n \n reshaped_input = tf.reshape(reshaped_input,[-1,\n self.max_samples, self.feature_size])\n\n vlad = tf.matmul(activation,reshaped_input)\n vlad = tf.transpose(vlad,perm=[0,2,1])\n vlad = tf.nn.l2_normalize(vlad,1)\n vlad = tf.reshape(vlad,[-1, self.cluster_size*self.feature_size])\n Nv = tf.nn.l2_normalize(vlad,1)\n \n # Equation 3 in the paper\n # \\hat{y} = W_N N_v\n vlad = K.dot(Nv, self.Wn)\n\n return vlad",
"def build_mlp(input_placeholder, output_size, scope, n_layers, size, activation=tf.tanh, output_activation=None):\n output_placeholder = input_placeholder\n with tf.variable_scope(scope):\n for _ in range(n_layers):\n output_placeholder = tf.layers.dense(output_placeholder, size, activation=activation) # HINT: use tf.layers.dense (specify <input>, <size>, activation=<?>)\n output_placeholder = tf.layers.dense(output_placeholder, output_size, activation=output_activation) # HINT: use tf.layers.dense (specify <input>, <size>, activation=<?>)\n return output_placeholder",
"def inference(x):\n print(type(x))\n print(np.shape(x))\n print(x)\n \n with tf.variable_scope(\"hidden_layer_1\"):\n hidden_1 = layer2(x, [input_size, n_hidden_1], [n_hidden_1])\n #print([input_size, n_hidden_1])\n \n with tf.variable_scope(\"hidden_layer_2\"):\n hidden_2 = layer2(hidden_1, [n_hidden_1, n_hidden_2], [n_hidden_2])\n #print([n_hidden_1, n_hidden_2])\n \n with tf.variable_scope(\"hidden_layer_3\"):\n hidden_3 = layer2(hidden_2, [n_hidden_2, n_hidden_3], [n_hidden_3])\n #print([n_hidden_2, n_hidden_3])\n \n with tf.variable_scope(\"hidden_layer_4\"):\n hidden_4 = layer2(hidden_3, [n_hidden_3, n_hidden_4], [n_hidden_4])\n #print([n_hidden_3, n_hidden_4])\n \n with tf.variable_scope(\"hidden_layer_5\"):\n hidden_5 = layer2(hidden_4, [n_hidden_4, n_hidden_5], [n_hidden_5])\n #print([n_hidden_4, n_hidden_5])\n \n with tf.variable_scope(\"output\"):\n output = layer1(hidden_5, [n_hidden_5, output_size], [output_size])\n #print([n_hidden_5, output_size])\n\n return output",
"def GraphFn(self, inp):\n tensor = inp * 2.0\n tensor = array_ops.reshape(tensor, self.tensor_shapes[1])\n tensor = tensor + 3.0\n tensor = array_ops.reshape(tensor, self.tensor_shapes[2])\n tensor = tensor * 4.0\n tensor = array_ops.reshape(tensor, self.tensor_shapes[3])\n tensor += tensor + 5.0\n return array_ops.identity(tensor, name='output_0')",
"def tgn(\n # Settings\n n_nodes: int,\n memory_size: int,\n time_embedding_size: int,\n dropout: float,\n learning_rate: float,\n target: utils.Target,\n is_training: bool,\n # Inputs\n node_ids: tf.Tensor,\n batch_idx: tf.Tensor,\n batch_times: tf.Tensor,\n batch_features: tf.Tensor,\n batch_most_recent: tf.Tensor,\n edge_idx: tf.Tensor,\n edge_times: tf.Tensor,\n edge_features: tf.Tensor,\n) -> Dict[str, tf.Tensor]:\n\n memory = tgn_memory(\n n_nodes=n_nodes,\n memory_size=memory_size,\n time_embedding_size=time_embedding_size,\n node_ids=node_ids,\n write_idx=batch_idx[:2],\n write_mask=batch_most_recent,\n write_features=batch_features,\n write_times=batch_times,\n )\n\n hidden = tgn_gnn(\n time_embedding_size=time_embedding_size,\n dropout=is_training * dropout,\n input=memory.output,\n last_update=memory.last_update,\n edge_idx=edge_idx,\n edge_times=edge_times,\n edge_features=edge_features,\n )\n\n logits = tgn_link_predictor(\n tf.gather(hidden, tf.tile(batch_idx[0][tf.newaxis], (2, 1))),\n tf.gather(hidden, batch_idx[1:]),\n )\n\n # Masks any batch padding\n batch_mask = tf.not_equal(batch_idx[0], node_ids.shape[0] - 1)\n count = tf.reduce_sum(tf.cast(batch_mask, tf.int32))\n labels = tf.tile(tf.constant([[1], [0]], dtype=logits.dtype),\n (1, logits.shape[1]))\n # *2 because the reference uses mean(pos_loss) + mean(neg_loss)\n loss = 2 * tf.reduce_mean(\n tf.cast(batch_mask, logits.dtype) *\n tf.nn.sigmoid_cross_entropy_with_logits(labels=labels, logits=logits))\n\n if is_training:\n if target is utils.Target.IPU:\n step = optimiser.Adam(\n learning_rate=learning_rate).minimize_with_global_step(loss)\n else:\n # Allows AMP with TF_ENABLE_AUTO_MIXED_PRECISION=1\n step = tf.train.AdamOptimizer(\n learning_rate=learning_rate).minimize(loss)\n with tf.control_dependencies(memory.updates + (step, )):\n return dict(loss=tf.identity(loss), count=count)\n else:\n with tf.control_dependencies(memory.updates):\n return dict(loss=tf.identity(loss),\n count=count,\n probs=tf.nn.sigmoid(logits))",
"def make_attention_lstm():\n from tensorflow.keras import activations\n from tensorflow.keras import backend as K\n from tensorflow.keras import constraints, initializers, regularizers\n\n # from keras.legacy import interfaces\n from tensorflow.keras.layers import RNN, InputSpec, Layer\n\n def _time_distributed_dense(\n x,\n w,\n b=None,\n dropout=None,\n input_dim=None,\n output_dim=None,\n timesteps=None,\n training=None,\n ):\n \"\"\"Apply `y . w + b` for every temporal slice y of x.\n\n # Arguments\n x: input tensor.\n w: weight matrix.\n b: optional bias vector.\n dropout: wether to apply dropout (same dropout mask\n for every temporal slice of the input).\n input_dim: integer; optional dimensionality of the input.\n output_dim: integer; optional dimensionality of the output.\n timesteps: integer; optional number of timesteps.\n training: training phase tensor or boolean.\n # Returns\n Output tensor.\n \"\"\"\n if not input_dim:\n input_dim = K.shape(x)[2]\n if not timesteps:\n timesteps = K.shape(x)[1]\n if not output_dim:\n output_dim = K.int_shape(w)[1]\n\n if dropout is not None and 0.0 < dropout < 1.0:\n # apply the same dropout pattern at every timestep\n ones = K.ones_like(K.reshape(x[:, 0, :], (-1, input_dim)))\n dropout_matrix = K.dropout(ones, dropout)\n expanded_dropout_matrix = K.repeat(dropout_matrix, timesteps)\n x = K.in_train_phase(x * expanded_dropout_matrix, x, training=training)\n\n # collapse time dimension and batch dimension together\n x = K.reshape(x, (-1, input_dim))\n x = K.dot(x, w)\n if b is not None:\n x = K.bias_add(x, b)\n # reshape to 3D tensor\n if K.backend() == \"tensorflow\":\n x = K.reshape(x, K.stack([-1, timesteps, output_dim]))\n x.set_shape([None, None, output_dim])\n else:\n x = K.reshape(x, (-1, timesteps, output_dim))\n return x\n\n class AttentionLSTMCell(Layer):\n \"\"\"Long-Short Term Memory unit - with Attention.\n\n # Arguments\n units: Positive integer, dimensionality of the output space.\n activation: Activation function to use\n (see [activations](keras/activations.md)).\n If you pass None, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).\n recurrent_activation: Activation function to use\n for the recurrent step\n (see [activations](keras/activations.md)).\n attention_activation: Activation function to use\n for the attention step. If you pass None, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).\n (see [activations](keras/activations.md)).\n use_bias: Boolean, whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix,\n used for the linear transformation of the inputs.\n (see [initializers](../initializers.md)).\n recurrent_initializer: Initializer for the `recurrent_kernel`\n weights matrix,\n used for the linear transformation of the recurrent state.\n (see [initializers](../initializers.md)).\n bias_initializer: Initializer for the bias vector\n (see [initializers](../initializers.md)).\n attention_initializer: Initializer for the `attention_kernel` weights\n matrix, used for the linear transformation of the inputs.\n (see [initializers](../initializers.md)).\n use_chrono_initialization: Boolean.\n If True, add 1 to the bias of the forget gate at initialization.\n Setting it to true will also force `bias_initializer=\"zeros\"`.\n This is recommended in [Jozefowicz et al.]\n (http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)\n kernel_regularizer: Regularizer function applied to\n the `kernel` weights matrix\n (see [regularizer](../regularizers.md)).\n recurrent_regularizer: Regularizer function applied to\n the `recurrent_kernel` weights matrix\n (see [regularizer](../regularizers.md)).\n bias_regularizer: Regularizer function applied to the bias vector\n (see [regularizer](../regularizers.md)).\n activity_regularizer: Regularizer function applied to\n the output of the layer (its \"activation\").\n (see [regularizer](../regularizers.md)).\n attention_regularizer: Regularizer function applied to\n the `attention_kernel` weights matrix\n (see [regularizer](../regularizers.md)).\n kernel_constraint: Constraint function applied to\n the `kernel` weights matrix\n (see [constraints](../constraints.md)).\n recurrent_constraint: Constraint function applied to\n the `recurrent_kernel` weights matrix\n (see [constraints](../constraints.md)).\n bias_constraint: Constraint function applied to the bias vector\n (see [constraints](../constraints.md)).\n attention_constraint: Constraint function applied to\n the `attention_kernel` weights matrix\n (see [constraints](../constraints.md)).\n dropout: Float between 0 and 1.\n Fraction of the units to drop for\n the linear transformation of the inputs.\n recurrent_dropout: Float between 0 and 1.\n Fraction of the units to drop for\n the linear transformation of the recurrent state.\n return_attention: Returns the attention vector instead of\n the internal state.\n # References\n - [Long short-term memory]\n (http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf)\n (original 1997 paper)\n - [Learning to forget: Continual prediction with LSTM]\n (http://www.mitpressjournals.org/doi/pdf/10.1162/089976600300015015)\n - [Supervised sequence labeling with recurrent neural networks]\n (http://www.cs.toronto.edu/~graves/preprint.pdf)\n - [A Theoretically Grounded Application of Dropout\n in Recurrent Neural Networks]\n (http://arxiv.org/abs/1512.05287)\n - [Bahdanau, Cho & Bengio (2014),\n \"Neural Machine Translation by Jointly Learning to Align and Translate\"]\n (https://arxiv.org/pdf/1409.0473.pdf)\n - [Xu, Ba, Kiros, Cho, Courville, Salakhutdinov, Zemel & Bengio (2016)\n \"Show, Attend and Tell: Neural Image Caption Generation\n with Visual Attention\"]\n (http://arxiv.org/pdf/1502.03044.pdf)\n \"\"\"\n\n _tags = {\"python_dependencies\": \"tensorflow\"}\n\n def __init__(\n self,\n units,\n activation=\"tanh\",\n recurrent_activation=\"hard_sigmoid\",\n attention_activation=\"tanh\",\n use_bias=True,\n kernel_initializer=\"glorot_uniform\",\n recurrent_initializer=\"orthogonal\",\n attention_initializer=\"orthogonal\",\n bias_initializer=\"zeros\",\n unit_forget_bias=True,\n kernel_regularizer=None,\n recurrent_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n attention_regularizer=None,\n kernel_constraint=None,\n recurrent_constraint=None,\n bias_constraint=None,\n attention_constraint=None,\n dropout=0.0,\n recurrent_dropout=0.0,\n return_attention=False,\n implementation=1,\n **kwargs,\n ):\n super().__init__(**kwargs)\n self.input_spec = [InputSpec(ndim=2)]\n self.units = units\n self.activation = activations.get(activation)\n self.recurrent_activation = activations.get(recurrent_activation)\n self.attention_activation = activations.get(attention_activation)\n self.use_bias = use_bias\n\n self.kernel_initializer = initializers.get(kernel_initializer)\n self.recurrent_initializer = initializers.get(recurrent_initializer)\n self.attention_initializer = initializers.get(attention_initializer)\n self.bias_initializer = initializers.get(bias_initializer)\n self.unit_forget_bias = unit_forget_bias\n\n self.kernel_regularizer = regularizers.get(kernel_regularizer)\n self.recurrent_regularizer = regularizers.get(recurrent_regularizer)\n self.bias_regularizer = regularizers.get(bias_regularizer)\n self.activity_regularizer = regularizers.get(activity_regularizer)\n self.attention_regularizer = regularizers.get(attention_regularizer)\n\n self.kernel_constraint = constraints.get(kernel_constraint)\n self.recurrent_constraint = constraints.get(recurrent_constraint)\n self.bias_constraint = constraints.get(bias_constraint)\n self.attention_constraint = constraints.get(attention_constraint)\n\n self.dropout = min(1.0, max(0.0, dropout))\n self.recurrent_dropout = min(1.0, max(0.0, recurrent_dropout))\n self.return_attention = return_attention\n self._dropout_mask = None\n self._recurrent_dropout_mask = None\n self.implementation = implementation\n self.state_spec = [\n InputSpec(shape=(None, self.units)),\n InputSpec(shape=(None, self.units)),\n ]\n self.state_size = (self.units, self.units)\n\n def build(self, input_shape):\n \"\"\"Build the AttentionLSTMCell object.\"\"\"\n if hasattr(self, \"timesteps\") and self.timesteps is not None:\n self.timestep_dim = self.timesteps\n else:\n self.timestep_dim = 1 # input_shape[0]\n\n self.input_dim = input_shape[-1]\n\n self.kernel = self.add_weight(\n shape=(self.input_dim, self.units * 4),\n name=\"kernel\",\n initializer=self.kernel_initializer,\n regularizer=self.kernel_regularizer,\n constraint=self.kernel_constraint,\n )\n self.recurrent_kernel = self.add_weight(\n shape=(self.units, self.units * 4),\n name=\"recurrent_kernel\",\n initializer=self.recurrent_initializer,\n regularizer=self.recurrent_regularizer,\n constraint=self.recurrent_constraint,\n )\n\n # add attention kernel\n self.attention_kernel = self.add_weight(\n shape=(self.input_dim, self.units * 4),\n name=\"attention_kernel\",\n initializer=self.attention_initializer,\n regularizer=self.attention_regularizer,\n constraint=self.attention_constraint,\n )\n\n # add attention weights\n # weights for attention model\n self.attention_weights = self.add_weight(\n shape=(self.input_dim, self.units),\n name=\"attention_W\",\n initializer=self.attention_initializer,\n regularizer=self.attention_regularizer,\n constraint=self.attention_constraint,\n )\n\n self.attention_recurrent_weights = self.add_weight(\n shape=(self.units, self.units),\n name=\"attention_U\",\n initializer=self.recurrent_initializer,\n regularizer=self.recurrent_regularizer,\n constraint=self.recurrent_constraint,\n )\n\n if self.use_bias:\n if self.unit_forget_bias:\n\n def bias_initializer(shape, *args, **kwargs):\n return K.concatenate(\n [\n self.bias_initializer((self.units,), *args, **kwargs),\n initializers.Ones()((self.units,), *args, **kwargs),\n self.bias_initializer(\n (self.units * 2,), *args, **kwargs\n ),\n ]\n )\n\n else:\n bias_initializer = self.bias_initializer\n self.bias = self.add_weight(\n shape=(self.units * 4,),\n name=\"bias\",\n initializer=bias_initializer,\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint,\n )\n\n self.attention_bias = self.add_weight(\n shape=(self.units,),\n name=\"attention_b\",\n initializer=self.bias_initializer,\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint,\n )\n\n self.attention_recurrent_bias = self.add_weight(\n shape=(self.units, 1),\n name=\"attention_v\",\n initializer=self.bias_initializer,\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint,\n )\n else:\n self.bias = None\n self.attention_bias = None\n self.attention_recurrent_bias = None\n\n self.kernel_i = self.kernel[:, : self.units]\n self.kernel_f = self.kernel[:, self.units : self.units * 2]\n self.kernel_c = self.kernel[:, self.units * 2 : self.units * 3]\n self.kernel_o = self.kernel[:, self.units * 3 :]\n\n self.recurrent_kernel_i = self.recurrent_kernel[:, : self.units]\n self.recurrent_kernel_f = self.recurrent_kernel[\n :, self.units : self.units * 2\n ]\n self.recurrent_kernel_c = self.recurrent_kernel[\n :, self.units * 2 : self.units * 3\n ]\n self.recurrent_kernel_o = self.recurrent_kernel[:, self.units * 3 :]\n\n self.attention_i = self.attention_kernel[:, : self.units]\n self.attention_f = self.attention_kernel[:, self.units : self.units * 2]\n self.attention_c = self.attention_kernel[:, self.units * 2 : self.units * 3]\n self.attention_o = self.attention_kernel[:, self.units * 3 :]\n\n if self.use_bias:\n self.bias_i = self.bias[: self.units]\n self.bias_f = self.bias[self.units : self.units * 2]\n self.bias_c = self.bias[self.units * 2 : self.units * 3]\n self.bias_o = self.bias[self.units * 3 :]\n else:\n self.bias_i = None\n self.bias_f = None\n self.bias_c = None\n self.bias_o = None\n\n self.built = True\n\n def _generate_dropout_mask(self, inputs, training=None):\n if 0 < self.dropout < 1:\n ones = K.ones_like(K.squeeze(inputs[:, 0:1, :], axis=1))\n\n def dropped_inputs():\n return K.dropout(ones, self.dropout)\n\n self._dropout_mask = [\n K.in_train_phase(dropped_inputs, ones, training=training)\n for _ in range(4)\n ]\n else:\n self._dropout_mask = None\n\n def _generate_recurrent_dropout_mask(self, inputs, training=None):\n if 0 < self.recurrent_dropout < 1:\n ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))\n ones = K.tile(ones, (1, self.units))\n\n def dropped_inputs():\n return K.dropout(ones, self.dropout)\n\n self._recurrent_dropout_mask = [\n K.in_train_phase(dropped_inputs, ones, training=training)\n for _ in range(4)\n ]\n else:\n self._recurrent_dropout_mask = None\n\n def call(self, inputs, states, training=None):\n \"\"\"Call the AttentionLSTMCell.\"\"\"\n # dropout matrices for input units\n dp_mask = self._dropout_mask\n # dropout matrices for recurrent units\n rec_dp_mask = self._recurrent_dropout_mask\n\n h_tm1 = states[0] # previous memory state\n c_tm1 = states[1] # previous carry state\n\n # alignment model\n h_att = K.repeat(h_tm1, self.timestep_dim)\n att = _time_distributed_dense(\n inputs,\n self.attention_weights,\n self.attention_bias,\n input_dim=self.input_dim,\n output_dim=self.units,\n timesteps=self.timestep_dim,\n )\n attention_ = self.attention_activation(\n K.dot(h_att, self.attention_recurrent_weights) + att\n ) # energy\n attention_ = K.squeeze(\n K.dot(attention_, self.attention_recurrent_bias), 2\n ) # energy\n\n alpha = K.exp(attention_)\n\n if dp_mask is not None:\n alpha *= dp_mask[0]\n\n alpha /= K.sum(alpha, axis=1, keepdims=True)\n alpha_r = K.repeat(alpha, self.input_dim)\n alpha_r = K.permute_dimensions(alpha_r, (0, 2, 1))\n\n # make context vector (soft attention after Bahdanau et al.)\n z_hat = inputs * alpha_r\n # context_sequence = z_hat\n z_hat = K.sum(z_hat, axis=1)\n\n if self.implementation == 1:\n if 0 < self.dropout < 1.0:\n inputs_i = inputs * dp_mask[0]\n inputs_f = inputs * dp_mask[1]\n inputs_c = inputs * dp_mask[2]\n inputs_o = inputs * dp_mask[3]\n else:\n inputs_i = inputs\n inputs_f = inputs\n inputs_c = inputs\n inputs_o = inputs\n x_i = K.dot(inputs_i, self.kernel_i)\n x_f = K.dot(inputs_f, self.kernel_f)\n x_c = K.dot(inputs_c, self.kernel_c)\n x_o = K.dot(inputs_o, self.kernel_o)\n if self.use_bias:\n x_i = K.bias_add(x_i, self.bias_i)\n x_f = K.bias_add(x_f, self.bias_f)\n x_c = K.bias_add(x_c, self.bias_c)\n x_o = K.bias_add(x_o, self.bias_o)\n\n if 0 < self.recurrent_dropout < 1.0:\n h_tm1_i = h_tm1 * rec_dp_mask[0]\n h_tm1_f = h_tm1 * rec_dp_mask[1]\n h_tm1_c = h_tm1 * rec_dp_mask[2]\n h_tm1_o = h_tm1 * rec_dp_mask[3]\n else:\n h_tm1_i = h_tm1\n h_tm1_f = h_tm1\n h_tm1_c = h_tm1\n h_tm1_o = h_tm1\n i = self.recurrent_activation(\n x_i\n + K.dot(h_tm1_i, self.recurrent_kernel_i)\n + K.dot(z_hat, self.attention_i)\n )\n f = self.recurrent_activation(\n x_f\n + K.dot(h_tm1_f, self.recurrent_kernel_f)\n + K.dot(z_hat, self.attention_f)\n )\n c = f * c_tm1 + i * self.activation(\n x_c\n + K.dot(h_tm1_c, self.recurrent_kernel_c)\n + K.dot(z_hat, self.attention_c)\n )\n o = self.recurrent_activation(\n x_o\n + K.dot(h_tm1_o, self.recurrent_kernel_o)\n + K.dot(z_hat, self.attention_o)\n )\n else:\n if 0.0 < self.dropout < 1.0:\n inputs *= dp_mask[0]\n z = K.dot(inputs, self.kernel)\n if 0.0 < self.recurrent_dropout < 1.0:\n h_tm1 *= rec_dp_mask[0]\n z += K.dot(h_tm1, self.recurrent_kernel)\n z += K.dot(z_hat, self.attention_kernel)\n\n if self.use_bias:\n z = K.bias_add(z, self.bias)\n\n z0 = z[:, : self.units]\n z1 = z[:, self.units : 2 * self.units]\n z2 = z[:, 2 * self.units : 3 * self.units]\n z3 = z[:, 3 * self.units :]\n\n i = self.recurrent_activation(z0)\n f = self.recurrent_activation(z1)\n c = f * c_tm1 + i * self.activation(z2)\n o = self.recurrent_activation(z3)\n\n h = o * self.activation(c)\n if 0 < self.dropout + self.recurrent_dropout:\n if training is None:\n h._uses_learning_phase = True\n return h, [h, c]\n\n class AttentionLSTM(RNN):\n \"\"\"Long-Short Term Memory unit - with Attention.\n\n # Arguments\n units: Positive integer, dimensionality of the output space.\n activation: Activation function to use\n (see [activations](keras/activations.md)).\n If you pass None, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).\n recurrent_activation: Activation function to use\n for the recurrent step\n (see [activations](keras/activations.md)).\n attention_activation: Activation function to use\n for the attention step. If you pass None, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).\n (see [activations](keras/activations.md)).\n use_bias: Boolean, whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix,\n used for the linear transformation of the inputs.\n (see [initializers](../initializers.md)).\n recurrent_initializer: Initializer for the `recurrent_kernel`\n weights matrix,\n used for the linear transformation of the recurrent state.\n (see [initializers](../initializers.md)).\n bias_initializer: Initializer for the bias vector\n (see [initializers](../initializers.md)).\n attention_initializer: Initializer for the `attention_kernel` weights\n matrix, used for the linear transformation of the inputs.\n (see [initializers](../initializers.md)).\n use_chrono_initialization: Boolean.\n If True, add 1 to the bias of the forget gate at initialization.\n Setting it to true will also force `bias_initializer=\"zeros\"`.\n This is recommended in [Jozefowicz et al.]\n (http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)\n kernel_regularizer: Regularizer function applied to\n the `kernel` weights matrix\n (see [regularizer](../regularizers.md)).\n recurrent_regularizer: Regularizer function applied to\n the `recurrent_kernel` weights matrix\n (see [regularizer](../regularizers.md)).\n bias_regularizer: Regularizer function applied to the bias vector\n (see [regularizer](../regularizers.md)).\n activity_regularizer: Regularizer function applied to\n the output of the layer (its \"activation\").\n (see [regularizer](../regularizers.md)).\n attention_regularizer: Regularizer function applied to\n the `attention_kernel` weights matrix\n (see [regularizer](../regularizers.md)).\n kernel_constraint: Constraint function applied to\n the `kernel` weights matrix\n (see [constraints](../constraints.md)).\n recurrent_constraint: Constraint function applied to\n the `recurrent_kernel` weights matrix\n (see [constraints](../constraints.md)).\n bias_constraint: Constraint function applied to the bias vector\n (see [constraints](../constraints.md)).\n attention_constraint: Constraint function applied to\n the `attention_kernel` weights matrix\n (see [constraints](../constraints.md)).\n dropout: Float between 0 and 1.\n Fraction of the units to drop for\n the linear transformation of the inputs.\n recurrent_dropout: Float between 0 and 1.\n Fraction of the units to drop for\n the linear transformation of the recurrent state.\n implementation: Implementation mode, either 1 or 2.\n return_sequences: Boolean. Whether to return the last output.\n in the output sequence, or the full sequence.\n return_state: Boolean. Whether to return the last state\n in addition to the output.\n return_attention: Returns the attention vector instead of\n the internal state.\n go_backwards: Boolean (default False).\n If True, process the input sequence backwards and return the\n reversed sequence.\n stateful: Boolean (default False). If True, the last state\n for each sample at index i in a batch will be used as initial\n state for the sample of index i in the following batch.\n unroll: Boolean (default False).\n If True, the network will be unrolled,\n else a symbolic loop will be used.\n Unrolling can speed-up a RNN,\n although it tends to be more memory-intensive.\n Unrolling is only suitable for short sequences.\n # References\n - [Long short-term memory]\n (http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf)\n (original 1997 paper)\n - [Learning to forget: Continual prediction with LSTM]\n (http://www.mitpressjournals.org/doi/pdf/10.1162/089976600300015015)\n - [Supervised sequence labeling with recurrent neural networks]\n (http://www.cs.toronto.edu/~graves/preprint.pdf)\n - [A Theoretically Grounded Application of Dropout\n in Recurrent Neural Networks]\n (http://arxiv.org/abs/1512.05287)\n - [Bahdanau, Cho & Bengio (2014)\n \"Neural Machine Translation by Jointly Learning to Align and Translate\"]\n (https://arxiv.org/pdf/1409.0473.pdf)\n - [Xu, Ba, Kiros, Cho, Courville, Salakhutdinov, Zemel & Bengio (2016)\n \"Show, Attend and Tell: Neural Image Caption Generation\n with Visual Attention\"]\n (http://arxiv.org/pdf/1502.03044.pdf)\n \"\"\"\n\n _tags = {\"python_dependencies\": \"tensorflow\"}\n\n # '@interfaces.legacy_recurrent_support\n def __init__(\n self,\n units,\n activation=\"tanh\",\n recurrent_activation=\"hard_sigmoid\",\n attention_activation=\"tanh\",\n use_bias=True,\n kernel_initializer=\"glorot_uniform\",\n recurrent_initializer=\"orthogonal\",\n attention_initializer=\"orthogonal\",\n bias_initializer=\"zeros\",\n unit_forget_bias=True,\n kernel_regularizer=None,\n recurrent_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n attention_regularizer=None,\n kernel_constraint=None,\n recurrent_constraint=None,\n bias_constraint=None,\n attention_constraint=None,\n dropout=0.0,\n recurrent_dropout=0.0,\n implementation=1,\n return_sequences=False,\n return_state=False,\n return_attention=False,\n go_backwards=False,\n stateful=False,\n unroll=False,\n **kwargs,\n ):\n import warnings\n\n if implementation == 0:\n warnings.warn(\n \"`implementation=0` has been deprecated, \"\n \"and now defaults to `implementation=1`.\"\n \"Please update your layer call.\",\n stacklevel=2,\n )\n implementation = 1\n\n if K.backend() == \"cntk\":\n if not kwargs.get(\"unroll\") and (dropout > 0 or recurrent_dropout > 0):\n warnings.warn(\n \"RNN dropout is not supported with the CNTK backend \"\n \"when using dynamic RNNs (i.e. non-unrolled). \"\n \"You can either set `unroll=True`, \"\n \"set `dropout` and `recurrent_dropout` to 0, \"\n \"or use a different backend.\",\n stacklevel=2,\n )\n dropout = 0.0\n recurrent_dropout = 0.0\n\n cell = AttentionLSTMCell(\n units,\n activation=activation,\n recurrent_activation=recurrent_activation,\n attention_activation=attention_activation,\n use_bias=use_bias,\n kernel_initializer=kernel_initializer,\n recurrent_initializer=recurrent_initializer,\n attention_initializer=attention_initializer,\n bias_initializer=bias_initializer,\n unit_forget_bias=unit_forget_bias,\n kernel_regularizer=kernel_regularizer,\n recurrent_regularizer=recurrent_regularizer,\n bias_regularizer=bias_regularizer,\n activity_regularizer=activity_regularizer,\n attention_regularizer=attention_regularizer,\n kernel_constraint=kernel_constraint,\n recurrent_constraint=recurrent_constraint,\n bias_constraint=bias_constraint,\n attention_constraint=attention_constraint,\n dropout=dropout,\n recurrent_dropout=recurrent_dropout,\n return_attention=return_attention,\n implementation=implementation,\n )\n super().__init__(\n cell,\n return_sequences=return_sequences,\n return_state=return_state,\n go_backwards=go_backwards,\n stateful=stateful,\n unroll=unroll,\n **kwargs,\n )\n self.return_attention = return_attention\n\n def build(self, input_shape):\n \"\"\"Build the AttentionLSTM object.\"\"\"\n self.cell.timesteps = input_shape[1]\n self.cell.build(input_shape)\n\n def call(self, inputs, mask=None, training=None, initial_state=None):\n \"\"\"Call the AttentionLSTM object.\"\"\"\n self.cell._generate_dropout_mask(inputs, training=training)\n self.cell._generate_recurrent_dropout_mask(inputs, training=training)\n return super().call(\n inputs, mask=mask, training=training, initial_state=initial_state\n )\n\n @property\n def units(self):\n \"\"\"Return property units.\"\"\"\n return self.cell.units\n\n @property\n def activation(self):\n \"\"\"Return property activation.\"\"\"\n return self.cell.activation\n\n @property\n def recurrent_activation(self):\n \"\"\"Return property recurrent_activation.\"\"\"\n return self.cell.recurrent_activation\n\n @property\n def attention_activation(self):\n \"\"\"Return property attention_activation.\"\"\"\n return self.cell.attention_activation\n\n @property\n def use_bias(self):\n \"\"\"Return property use_bias.\"\"\"\n return self.cell.use_bias\n\n @property\n def kernel_initializer(self):\n \"\"\"Return property kernel_initializer.\"\"\"\n return self.cell.kernel_initializer\n\n @property\n def recurrent_initializer(self):\n \"\"\"Return property recurrent_initializer.\"\"\"\n return self.cell.recurrent_initializer\n\n @property\n def attention_initializer(self):\n \"\"\"Return property attention_initializer.\"\"\"\n return self.cell.attention_initializer\n\n @property\n def bias_initializer(self):\n \"\"\"Return property bias_initializer.\"\"\"\n return self.cell.bias_initializer\n\n @property\n def unit_forget_bias(self):\n \"\"\"Return property unit_forget_bias.\"\"\"\n return self.cell.unit_forget_bias\n\n @property\n def kernel_regularizer(self):\n \"\"\"Return property kernel_regularizer.\"\"\"\n return self.cell.kernel_regularizer\n\n @property\n def recurrent_regularizer(self):\n \"\"\"Return property recurrent_regularizer.\"\"\"\n return self.cell.recurrent_regularizer\n\n @property\n def bias_regularizer(self):\n \"\"\"Return property bias_regularizer.\"\"\"\n return self.cell.bias_regularizer\n\n @property\n def activity_regularizer(self):\n \"\"\"Return property activity_regularizer.\"\"\"\n return self.cell.activity_regularizer\n\n @property\n def attention_regularizer(self):\n \"\"\"Return property attention_regularizer.\"\"\"\n return self.cell.attention_regularizer\n\n @property\n def kernel_constraint(self):\n \"\"\"Return property kernel_constraint.\"\"\"\n return self.cell.kernel_constraint\n\n @property\n def recurrent_constraint(self):\n \"\"\"Return property recurrent_constraint.\"\"\"\n return self.cell.recurrent_constraint\n\n @property\n def bias_constraint(self):\n \"\"\"Return property bias_constraint.\"\"\"\n return self.cell.bias_constraint\n\n @property\n def attention_constraint(self):\n \"\"\"Return property attention_constraint.\"\"\"\n return self.cell.attention_constraint\n\n @property\n def dropout(self):\n \"\"\"Return property dropout.\"\"\"\n return self.cell.dropout\n\n @property\n def recurrent_dropout(self):\n \"\"\"Return property recurrent_dropout.\"\"\"\n return self.cell.recurrent_dropout\n\n @property\n def implementation(self):\n \"\"\"Return property implementation.\"\"\"\n return self.cell.implementation\n\n def get_config(self):\n \"\"\"Return configuration dict of the AttentionLSTM object.\"\"\"\n config = {\n \"units\": self.units,\n \"activation\": activations.serialize(self.activation),\n \"recurrent_activation\": activations.serialize(\n self.recurrent_activation\n ),\n \"attention_activation\": activations.serialize(\n self.attention_activation\n ),\n \"use_bias\": self.use_bias,\n \"kernel_initializer\": initializers.serialize(self.kernel_initializer),\n \"recurrent_initializer\": initializers.serialize(\n self.recurrent_initializer\n ),\n \"bias_initializer\": initializers.serialize(self.bias_initializer),\n \"attention_initializer\": initializers.serialize(\n self.attention_initializer\n ),\n \"use_chrono_initialization\": self.unit_forget_bias,\n \"kernel_regularizer\": regularizers.serialize(self.kernel_regularizer),\n \"recurrent_regularizer\": regularizers.serialize(\n self.recurrent_regularizer\n ),\n \"bias_regularizer\": regularizers.serialize(self.bias_regularizer),\n \"activity_regularizer\": regularizers.serialize(\n self.activity_regularizer\n ),\n \"attention_regularizer\": regularizers.serialize(\n self.attention_regularizer\n ),\n \"kernel_constraint\": constraints.serialize(self.kernel_constraint),\n \"recurrent_constraint\": constraints.serialize(\n self.recurrent_constraint\n ),\n \"bias_constraint\": constraints.serialize(self.bias_constraint),\n \"attention_constraint\": constraints.serialize(\n self.attention_constraint\n ),\n \"dropout\": self.dropout,\n \"recurrent_dropout\": self.recurrent_dropout,\n \"return_attention\": self.return_attention,\n }\n base_config = super().get_config()\n del base_config[\"cell\"]\n return dict(list(base_config.items()) + list(config.items()))\n\n @classmethod\n def from_config(cls, config):\n \"\"\"Create a new AttentionLSTM object from a configuration dict.\"\"\"\n if \"implementation\" in config and config[\"implementation\"] == 0:\n config[\"implementation\"] = 1\n return cls(**config)\n\n return AttentionLSTM",
"def layers(vgg_layer3_out, vgg_layer4_out, vgg_layer7_out, num_classes):\n\n fcn_layer7_conv_1x1 = tf.layers.conv2d(vgg_layer7_out, num_classes, 1, padding='SAME',\n kernel_initializer=tf.random_normal_initializer(stddev=0.01),\n kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3), name='fcn_layer7_conv_1x1')\n\n fcn_layer7_deconv = tf.layers.conv2d_transpose(fcn_layer7_conv_1x1, num_classes, 4, 2, padding='SAME',\n kernel_initializer=tf.random_normal_initializer(stddev=0.01),\n kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3), name='fcn_layer7_deconv')\n\n vgg_layer4_out_scale = tf.multiply(vgg_layer4_out, 0.01, name='vgg_layer4_out_scale')\n\n fcn_layer4_conv_1x1 = tf.layers.conv2d(vgg_layer4_out_scale, num_classes, 1, padding='SAME',\n kernel_initializer=tf.random_normal_initializer(stddev=0.01),\n kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3), name='fcn_layer4_conv_1x1')\n\n intermediate_1 = tf.add(fcn_layer7_deconv, fcn_layer4_conv_1x1, name='intermediate_1')\n\n vgg_layer3_out_scale = tf.multiply(vgg_layer3_out, 0.0001, name='vgg_layer3_out_scale')\n\n fcn_layer3_conv_1x1 = tf.layers.conv2d(vgg_layer3_out_scale, num_classes, 1, padding='SAME',\n kernel_initializer=tf.random_normal_initializer(stddev=0.01),\n kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3), name='fcn_layer3_conv_1x1')\n\n intermediate_1_deconv = tf.layers.conv2d_transpose(intermediate_1, num_classes, 4, 2, padding='SAME',\n kernel_initializer=tf.random_normal_initializer(stddev=0.01),\n kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3), name='intermediate_1_deconv')\n\n intermediate_2 = tf.add(intermediate_1_deconv, fcn_layer3_conv_1x1, name='intermediate_2')\n\n fcn_output = tf.layers.conv2d_transpose(intermediate_2, num_classes, 16, 8, padding='SAME',\n kernel_initializer=tf.random_normal_initializer(stddev=0.01),\n kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3), name='fcn_output')\n\n return fcn_output",
"def layers(vgg_layer3_out, vgg_layer4_out, vgg_layer7_out, num_classes):\n # TODO: Implement function\n\n #regulizer is needed to suppress high weights and prevent overfitting\n #First layer to preserve spatial information\n # Discussion on Default Kernel Initializer\n # https://stackoverflow.com/questions/43284047/what-is-the-default-kernel-initializer-in-tf-layers-conv2d-and-tf-layers-dense\n # Excellents hints and discussion from:\n # https://discussions.udacity.com/t/here-is-some-advice-and-clarifications-about-the-semantic-segmentation-project/403100\n\n\n\n #Encoding Step :\n #-----------------\n conv_1x1_L1 = tf.layers.conv2d(vgg_layer7_out, num_classes, kernel_size=(1, 1), strides=(1, 1),\n padding='same', kernel_regularizer = tf.contrib.layers.l2_regularizer(1e-3),\n kernel_initializer= tf.truncated_normal_initializer(stddev=0.001))\n\n conv_1x1_L2 = tf.layers.conv2d(vgg_layer4_out, num_classes, kernel_size=(1, 1), strides=(1, 1),\n padding='same', kernel_regularizer = tf.contrib.layers.l2_regularizer(1e-3),\n kernel_initializer= tf.truncated_normal_initializer(stddev=0.001))\n\n #scaling as suggested in the forum\n conv_1x1_L2 = tf.multiply(conv_1x1_L2, 0.01)\n\n conv_1x1_L3 = tf.layers.conv2d(vgg_layer3_out, num_classes, kernel_size=(1, 1), strides=(1, 1),\n padding='same', kernel_regularizer = tf.contrib.layers.l2_regularizer(1e-3),\n kernel_initializer= tf.truncated_normal_initializer(stddev=0.001))\n\n #scaling as suggested in the forum\n conv_1x1_L3 = tf.multiply(conv_1x1_L3, 0.0001)\n\n # Decoding Step and Skipping:\n #Deconvolution. upsampling to the original image size each time by 2\n #-----------------------------------------------------------\n\n output_deConv_L1 = tf.layers.conv2d_transpose(conv_1x1_L1, num_classes, kernel_size=(4, 4), strides=(2, 2),\n padding='same', kernel_regularizer = tf.contrib.layers.l2_regularizer(1e-3),\n kernel_initializer= tf.truncated_normal_initializer(stddev=0.001))\n\n skipping_L1 = tf.add(output_deConv_L1, conv_1x1_L2) #vgg_layer4_out + trans(vgg_layer3_out)\n\n #printing\n #tf.Print(output_deConv_L1, [tf.shape(output_deConv_L1)])\n\n #-------------------------\n\n output_deConv_L2 = tf.layers.conv2d_transpose(skipping_L1, num_classes, kernel_size=(4, 4), strides=(2, 2),\n padding='same', kernel_regularizer = tf.contrib.layers.l2_regularizer(1e-3),\n kernel_initializer= tf.truncated_normal_initializer(stddev=0.001))\n\n skipping_L2 = tf.add(output_deConv_L2, conv_1x1_L3) #vgg_layer7_out + trans(skipping_L1)\n #-------------------------\n\n output_deConv_L3 = tf.layers.conv2d_transpose(skipping_L2, num_classes, kernel_size=(16, 16), strides=(8, 8),\n padding='same', kernel_regularizer = tf.contrib.layers.l2_regularizer(1e-3),\n kernel_initializer= tf.truncated_normal_initializer(stddev=0.001))\n\n #-------------------------\n return output_deConv_L3",
"def define_nmt(hidden_size, batch_size, en_timesteps, en_vsize, fr_timesteps, fr_vsize):\n\n # Define an input sequence and process it.\n if batch_size:\n encoder_inputs = Input(batch_shape=(batch_size, en_timesteps, en_vsize), name='encoder_inputs')\n decoder_inputs = Input(batch_shape=(batch_size, fr_timesteps - 1, fr_vsize), name='decoder_inputs')\n else:\n encoder_inputs = Input(shape=(en_timesteps, en_vsize), name='encoder_inputs')\n if fr_timesteps:\n decoder_inputs = Input(shape=(fr_timesteps - 1, fr_vsize), name='decoder_inputs')\n else:\n decoder_inputs = Input(shape=(None, fr_vsize), name='decoder_inputs')\n\n # Encoder GRU\n encoder_gru = GRU(hidden_size, return_sequences=True, return_state=True, name='encoder_gru')\n encoder_out, encoder_state = encoder_gru(encoder_inputs)\n\n # Set up the decoder GRU, using `encoder_states` as initial state.\n decoder_gru = GRU(hidden_size, return_sequences=True, return_state=True, name='decoder_gru')\n decoder_out, decoder_state = decoder_gru(decoder_inputs, initial_state=encoder_state)\n\n # Attention layer\n attn_layer = AttentionLayer(name='attention_layer')\n attn_out, attn_states = attn_layer([encoder_out, decoder_out])\n\n # Concat attention input and decoder GRU output\n decoder_concat_input = Concatenate(axis=-1, name='concat_layer')([decoder_out, attn_out])\n\n # Dense layer\n dense = Dense(fr_vsize, activation='softmax', name='softmax_layer')\n dense_time = TimeDistributed(dense, name='time_distributed_layer')\n decoder_pred = dense_time(decoder_concat_input)\n\n # Full model\n full_model = Model(inputs=[encoder_inputs, decoder_inputs], outputs=decoder_pred)\n full_model.compile(optimizer='adam', loss='categorical_crossentropy', metrics = ['accuracy'])\n\n full_model.summary()\n\n \"\"\" Inference model \"\"\"\n batch_size = 1\n\n \"\"\" Encoder (Inference) model \"\"\"\n encoder_inf_inputs = Input(batch_shape=(batch_size, en_timesteps, en_vsize), name='encoder_inf_inputs')\n encoder_inf_out, encoder_inf_state = encoder_gru(encoder_inf_inputs)\n encoder_model = Model(inputs=encoder_inf_inputs, outputs=[encoder_inf_out, encoder_inf_state])\n\n \"\"\" Decoder (Inference) model \"\"\"\n decoder_inf_inputs = Input(batch_shape=(batch_size, 1, fr_vsize), name='decoder_word_inputs')\n encoder_inf_states = Input(batch_shape=(batch_size, en_timesteps, hidden_size), name='encoder_inf_states')\n decoder_init_state = Input(batch_shape=(batch_size, hidden_size), name='decoder_init')\n\n decoder_inf_out, decoder_inf_state = decoder_gru(decoder_inf_inputs, initial_state=decoder_init_state)\n attn_inf_out, attn_inf_states = attn_layer([encoder_inf_states, decoder_inf_out])\n decoder_inf_concat = Concatenate(axis=-1, name='concat')([decoder_inf_out, attn_inf_out])\n decoder_inf_pred = TimeDistributed(dense)(decoder_inf_concat)\n decoder_model = Model(inputs=[encoder_inf_states, decoder_init_state, decoder_inf_inputs],\n outputs=[decoder_inf_pred, attn_inf_states, decoder_inf_state])\n\n return full_model, encoder_model, decoder_model",
"def predict(predict_var, x_unlabeled, inputs, batch_sizes, view_size):\n x = x_unlabeled\n\n # calculate batches for predict loop\n unlabeled_batch_size = batch_sizes.get(\"Embedding\", 0)\n batch_size = min(len(x[0]), unlabeled_batch_size)\n batches = make_batches(len(x[0]), batch_size)\n\n y_preds = []\n # predict over all points\n for j, (batch_start, batch_end) in enumerate(batches):\n feed_dict = {K.learning_phase(): 0}\n # feed corresponding input for each input_type\n for input_type, input_placeholder in inputs.items():\n if input_type == \"Embedding\":\n for i in range(view_size):\n feed_dict[input_placeholder[i]] = x[i][batch_start:batch_end]\n elif input_type == \"Orthogonal\":\n batch_ids = np.random.choice(\n len(x), size=min(len(x), batch_sizes[input_type]), replace=False\n )\n for i in range(view_size):\n feed_dict[input_placeholder[i]] = x[i][batch_ids]\n else:\n raise Exception(\"Unrecognized feed name ['{}']\".format(input_type))\n # evaluate the batch\n y_pred_batch = np.asarray(K.get_session().run(predict_var, feed_dict=feed_dict))\n y_preds.append(y_pred_batch)\n y_list = np.concatenate(y_preds, axis=1)\n\n return y_list",
"def get_model_conditional(batch_size, max_seq_length, input_size, hidden_size, target_size,\n vocab_size, pretrain, tanhOrSoftmax, dropout):\n\n # batch_size x max_seq_length\n inputs = tf.placeholder(tf.int32, [batch_size, max_seq_length])\n inputs_cond = tf.placeholder(tf.int32, [batch_size, max_seq_length])\n\n cont_train = True\n if pretrain == \"pre\": # continue training embeddings or not. Currently works better to continue training them.\n cont_train = False\n embedding_matrix = tf.Variable(tf.random_uniform([vocab_size, input_size], -0.1, 0.1), #input_size is embeddings size\n name=\"embedding_matrix\", trainable=cont_train)\n\n # batch_size x max_seq_length x input_size\n embedded_inputs = tf.nn.embedding_lookup(embedding_matrix, inputs)\n embedded_inputs_cond = tf.nn.embedding_lookup(embedding_matrix, inputs_cond)\n\n # [batch_size x inputs_size] with max_seq_length elements\n # fixme: possibly inefficient\n # inputs_list[0]: batch_size x input[0] <-- word vector of the first word\n inputs_list = [tf.squeeze(x) for x in\n tf.split(1, max_seq_length, embedded_inputs)]\n inputs_cond_list = [tf.squeeze(x) for x in\n tf.split(1, max_seq_length, embedded_inputs_cond)]\n\n drop_prob = None\n if dropout:\n drop_prob = 0.1\n lstm_encoder = Encoder(rnn_cell.BasicLSTMCell, input_size, hidden_size, drop_prob, drop_prob)\n\n start_state = tf.zeros([batch_size, lstm_encoder.state_size])\n\n # [h_i], [h_i, c_i] <-- LSTM\n # [h_i], [h_i] <-- RNN\n outputs, states = lstm_encoder(inputs_list, start_state, \"LSTM\")\n\n # running a second LSTM conditioned on the last state of the first\n outputs_cond, states_cond = lstm_encoder(inputs_cond_list, states[-1],\n \"LSTMcond\")\n\n outputs_fin = outputs_cond[-1]\n if tanhOrSoftmax == \"tanh\":\n model = Projector(target_size, non_linearity=tf.nn.tanh, bias=True)(outputs_fin) #tf.nn.softmax\n else:\n model = Projector(target_size, non_linearity=tf.nn.softmax, bias=True)(outputs_fin) # tf.nn.softmax\n\n return model, [inputs, inputs_cond]",
"def define_graph(glove_embeddings_arr):\n\n input_data = tf.placeholder(dtype=tf.int32,shape=[batch_size,review_word_limit],name=\"input_data\")\n labels = tf.placeholder(dtype=tf.float32,shape=[batch_size,2],name=\"labels\")\n \n dropout_keep_prob = tf.placeholder_with_default(0.85, shape=())\n\n weights = tf.Variable(tf.truncated_normal([lstm_cell_count,sentiment_classes]))\n bias = tf.Variable(tf.constant(.0,dtype=tf.float32,shape=[sentiment_classes]))\n\n\n #Embed the input words\n emb = tf.convert_to_tensor(glove_embeddings_arr,dtype=tf.float32)\n data = tf.Variable(tf.zeros([batch_size,review_word_limit,embedding_length],dtype=tf.float32))\n data = tf.nn.embedding_lookup(emb,input_data)\n\n # Define Network\n lstm = tf.contrib.rnn.BasicLSTMCell(lstm_cell_count,)\n lstm = tf.contrib.rnn.DropoutWrapper(cell = lstm, output_keep_prob = dropout_keep_prob)\n\n lstm_out , lstm_last_state = tf.nn.dynamic_rnn(lstm,data,dtype=tf.float32)\n lstm_out = tf.transpose(lstm_out,[1,0,2])\n lstm_out = tf.gather(lstm_out,int(lstm_out.get_shape()[0])-1)\n prediction = (tf.matmul(lstm_out,weights)+bias)\n\n #Define loss function\n loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=prediction,labels=labels))\n\n #Define accuracy\n accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(prediction,1),tf.argmax(labels,1)),dtype=tf.float32))\n\n #Define optimizer\n optimizer = tf.train.AdamOptimizer().minimize(loss)\n\n return input_data, labels, dropout_keep_prob, optimizer, accuracy, loss"
] | [
"0.6142572",
"0.5995336",
"0.5860496",
"0.5843825",
"0.5820258",
"0.5785165",
"0.5778968",
"0.57665837",
"0.57546574",
"0.5752519",
"0.5733955",
"0.5669118",
"0.56649256",
"0.5586168",
"0.55840546",
"0.5577646",
"0.5569322",
"0.5565465",
"0.5564543",
"0.5561244",
"0.55558175",
"0.55310875",
"0.55173707",
"0.5509672",
"0.55089253",
"0.5504667",
"0.54937553",
"0.5492523",
"0.54863304",
"0.5474216"
] | 0.65570605 | 0 |
Generate one set (note_group_size) of notes. Trains at least (good_epoch = 6) epochs for each model, then continue training until all the notes satisfy exit conditions (within boundaries). If the training goes on until (max_epoch = 25), it exits anyways. Inside the training loop, each big epoch it trains generator for (g_epochs = 7) epochs, and classifier for (c_epochs = 3). The numbers are set up to balance the powers of those two models. plot_map flag is only used for debugging. | def generate_set(models, begin=0, start_pos=[256, 192], group_id=-1, length_multiplier=1, plot_map=True):
extvar["begin"] = begin
extvar["start_pos"] = start_pos
extvar["length_multiplier"] = length_multiplier
extvar["next_from_slider_end"] = GAN_PARAMS["next_from_slider_end"]
note_group_size = GAN_PARAMS["note_group_size"]
max_epoch = GAN_PARAMS["max_epoch"]
good_epoch = GAN_PARAMS["good_epoch"] - 1
g_multiplier = GAN_PARAMS["g_epochs"]
c_multiplier = GAN_PARAMS["c_epochs"]
g_batch = GAN_PARAMS["g_batch"]
g_input_size = GAN_PARAMS["g_input_size"]
c_true_batch = GAN_PARAMS["c_true_batch"]
c_false_batch = GAN_PARAMS["c_false_batch"]
c_randfalse_batch = GAN_PARAMS["c_randfalse_batch"]
reset_model_weights(models)
set_extvar(models, extvar)
gmodel, mapping_layer, classifier_model, mmodel, default_weights = models
for i in range(max_epoch):
gnoise = np.random.random((g_batch, g_input_size))
glabel = [np.zeros((g_batch, note_group_size * 4)),
np.ones((g_batch,)), np.ones((g_batch,))]
ginput = conv_input(gnoise, extvar)
# fit mmodel instead of gmodel
history = mmodel.fit(ginput, glabel, epochs=g_multiplier,
validation_split=0.2, verbose=0,
callbacks=[])
pred_noise = np.random.random((c_false_batch, g_input_size))
pred_input = conv_input(pred_noise, extvar)
predicted_maps_data, predicted_maps_mapped, _predclass = mmodel.predict(
pred_input)
new_false_maps = predicted_maps_mapped
new_false_labels = np.zeros(c_false_batch)
# random numbers as negative samples
# special_train_data.shape[2] == 6
randfalse_maps = np.random.rand(
c_randfalse_batch, note_group_size, special_train_data.shape[2])
randfalse_labels = np.zeros(c_randfalse_batch)
rn = np.random.randint(0, special_train_data.shape[0], (c_true_batch,))
actual_train_data = np.concatenate(
(new_false_maps, randfalse_maps, special_train_data[rn]), axis=0)
actual_train_labels = np.concatenate(
(new_false_labels, randfalse_labels, special_train_labels[rn]), axis=0)
history2 = classifier_model.fit(actual_train_data, actual_train_labels, epochs=c_multiplier,
validation_split=0.2, verbose=0,
callbacks=[])
# calculate the losses
g_loss = np.mean(history.history['loss'])
c_loss = np.mean(history2.history['loss'])
print("Group {}, Epoch {}: G loss: {} vs. C loss: {}".format(
group_id, 1+i, g_loss, c_loss))
# delete the history to free memory
del history, history2
# make a new set of notes
res_noise = np.random.random((1, g_input_size))
res_input = conv_input(res_noise, extvar)
_resgenerated, res_map, _resclass = mmodel.predict(res_input)
if plot_map:
plot_current_map(tf.convert_to_tensor(res_map, dtype=tf.float32))
# early return if found a good solution
# good is (inside the map boundary)
if i >= good_epoch:
current_map = res_map
if inblock_trueness(current_map[:, :, 0:2]).numpy()[0] == 0 and inblock_trueness(current_map[:, :, 4:6]).numpy()[0] == 0:
# debugging options to check map integrity
# print(tf.reduce_mean(current_map));
# print("-----MAPLAYER-----")
# print(tf.reduce_mean(mapping_layer(conv_input(tf.convert_to_tensor(_resgenerated, dtype="float32"), extvar))));
# print("-----CMWS-----")
# print(tf.reduce_mean(construct_map_with_sliders(tf.convert_to_tensor(_resgenerated, dtype="float32"), extvar=mapping_layer.extvar)));
break
if plot_map:
for i in range(3): # from our testing, any random input generates nearly the same map
plot_noise = np.random.random((1, g_input_size))
plot_input = conv_input(plot_noise, extvar)
_plotgenerated, plot_mapped, _plotclass = mmodel.predict(
plot_input)
plot_current_map(tf.convert_to_tensor(
plot_mapped, dtype=tf.float32))
# Don't do this in this version. It's for old versions where models are rebuilt each loop
# del mmodel, mapping_layer;
return res_map.squeeze() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def generate_test():\n o = []\n pos = [384, 288]\n note_group_size = GAN_PARAMS[\"note_group_size\"]\n generate_set(begin=3 * note_group_size, start_pos=pos,\n length_multiplier=dist_multiplier, group_id=3, plot_map=True)",
"def generate(train_data_path, trained_model_path, num_output_files):\n # load the notes used to train the model\n\n train_data = data_preprocess.load_from_pickle(train_data_path)\n training_notes = train_data[\"data\"]\n note_translator = train_data[\"note_translator\"]\n\n net = networks.TransformerNet.load_checkpoint(trained_model_path)\n\n for i in range(num_output_files):\n prediction_output = generate_notes(net, training_notes, note_translator)\n create_midi(prediction_output, file_suffix=i)",
"def generate_map():\n o = []\n note_group_size = GAN_PARAMS[\"note_group_size\"]\n pos = [np.random.randint(100, 412), np.random.randint(80, 304)]\n models = make_models()\n\n print(\"# of groups: {}\".format(timestamps.shape[0] // note_group_size))\n for i in range(timestamps.shape[0] // note_group_size):\n z = generate_set(models, begin=i * note_group_size, start_pos=pos, length_multiplier=dist_multiplier,\n group_id=i, plot_map=False)[:, :6] * np.array([512, 384, 1, 1, 512, 384])\n pos = z[-1, 0:2]\n o.append(z)\n a = np.concatenate(o, axis=0)\n return a",
"def _train_epoch(self, train_batches, dropout_keep_prob, data, batch_size, save_dir, save_prefix):\n pad_id = self.vocab.get_id(self.vocab.pad_token)\n total_num, total_loss = 0, 0\n log_every_n_batch, n_batch_loss = 50, 0\n eval_every_n_batch = (len(data.train_set) - 1) / (8 * batch_size)\n for bitx, batch in enumerate(train_batches, 1): \n feed_dict = {self.p: batch['passage_token_ids'],\n self.q: batch['question_token_ids'],\n self.pc: batch['passage_char_ids'],\n self.qc: batch['question_char_ids'],\n self.p_em: batch['passage_em'],\n self.p_pos: batch['passage_pos'],\n self.q_pos: batch['question_pos'],\n self.p_length: batch['passage_length'],\n self.q_length: batch['question_length'],\n self.start_label: batch['start_id'],\n self.end_label: batch['end_id'],\n self.pr: batch['passage_rank'],\n self.dropout_keep_prob: dropout_keep_prob}\n\n _, loss = self.sess.run([self.train_op, self.loss], \n feed_dict=feed_dict)\n\n total_loss += loss * len(batch['raw_data'])\n total_num += len(batch['raw_data'])\n n_batch_loss += loss\n if log_every_n_batch > 0 and bitx % log_every_n_batch == 0:\n self.logger.info('Average loss from batch {} to {} is {}'.format(\n bitx - log_every_n_batch + 1, bitx, n_batch_loss / log_every_n_batch))\n n_batch_loss = 0\n \n if eval_every_n_batch > 0 and bitx % eval_every_n_batch == 0:\n self.logger.info('Evaluating the model ...')\n if data.dev_set is not None:\n eval_batches = data.gen_mini_batches('dev', batch_size, pad_id, shuffle=False)\n eval_loss, bleu_rouge = self.evaluate(eval_batches)\n self.logger.info('Dev eval loss {}'.format(eval_loss))\n self.logger.info('Dev eval result: {}'.format(bleu_rouge))\n\n if bleu_rouge['ROUGE-L'] > self.max_rouge_l:\n self.save(save_dir, save_prefix)\n self.max_rouge_l = bleu_rouge['ROUGE-L']\n else:\n self.logger.warning('No dev set is loaded for evaluation in the dataset!')\n\n return 1.0 * total_loss / total_num",
"def get_generators(patch_size, batch_size, preprocess_func, output_reshape_func, num_validation, train_processes,\n train_cache, train_data_dir='data/train/'):\n\n dirs = util.get_data_list(train_data_dir)\n labels = util.parse_labels_months()\n train_paths, validation_paths = util.train_validation_split(dirs, labels)\n # generate train batch loader\n train_data_loader = CTBatchLoader(train_paths, batch_size, patch_size, num_threads_in_multithreaded=1,\n preprocess_func=preprocess_func)\n\n train_transforms = get_train_transform(patch_size)\n train_data_generator = MultiThreadedAugmenter(train_data_loader, train_transforms, num_processes=train_processes,\n num_cached_per_queue=train_cache, seeds=None, pin_memory=False)\n\n # wrapper to be compatible with keras\n train_generator_keras = KerasGenerator(train_data_generator, output_reshapefunc=output_reshape_func)\n\n # generate validation batch loader\n valid_data_loader = CTBatchLoader(validation_paths, num_validation, patch_size,\n num_threads_in_multithreaded=1, preprocess_func=preprocess_func)\n valid_transforms = get_valid_transform(patch_size)\n valid_data_generator = MultiThreadedAugmenter(valid_data_loader, valid_transforms, num_processes=1,\n num_cached_per_queue=1, seeds=None, pin_memory=False)\n # wrapper to be compatible with keras\n valid_generator_keras = KerasGenerator(valid_data_generator, output_reshape_func, 1)\n\n return train_generator_keras, valid_generator_keras",
"def generate_notes(model, training_notes, note_translator):\n\n # pick a random sequence from the input as a starting point for the prediction\n sequence_length = model.sequence_length\n note_reverse_translator = {index: note for note, index in note_translator.items()}\n init_state = data_preprocess.prepare_predict_init_state(training_notes, sequence_length)\n\n # create lists for model samples and actual notes created\n X = [note_translator[note] for note in init_state]\n prediction_output = init_state\n\n # copy model to computation device and set to evaluation mode\n device = torch.device(\"cuda:1\" if torch.cuda.is_available() else \"cpu\")\n model = model.to(device)\n model.eval()\n\n # generate notes\n notes_to_generate = 512\n for i in range(notes_to_generate):\n input_tensor = torch.tensor(X).view(1, -1).to(device)\n all_next_notes_logits = model(input_tensor)\n next_note_logits = all_next_notes_logits[:, -1, :] # take the last vector of the sequence\n\n next_note_index = sample_prediction(next_note_logits, \"\")\n # _, next_note_index = torch.max(next_note_logits, dim=2)\n # next_note_index = next_note_index.cpu().item()\n\n # next_notes = [note_reverse_translator[note] for note in next_notes_index.cpu().numpy().flatten()]\n next_note_index = next_note_index.cpu().item()\n next_note = note_reverse_translator[next_note_index]\n prediction_output.append(next_note)\n X.append(next_note_index)\n X = X[1:] # advance to next prediction using generated note\n\n return prediction_output",
"def generate_batch():\n\n # Initialize variables\n example = np.zeros(self.batch_size)\n labels = np.zeros((self.batch_size, 1))\n alphas = np.zeros(self.batch_size)\n n_items = 0\n index = 0\n\n while index < len(data):\n reduced_window = random.randint(0, self.window_size)\n if data[index] is not None:\n\n left = max(0, index - self.window_size + reduced_window)\n right = min((index + self.window_size + 1 -\n reduced_window), len(data) - 1)\n for pos2 in range(left, right, 1):\n\n if n_items == self.batch_size:\n queue.put((example, labels, index))\n example = np.zeros(self.batch_size)\n labels = np.zeros((self.batch_size, 1))\n n_items = 0\n\n if pos2 != index and data[pos2] is not None:\n example[n_items] = data[pos2]\n labels[n_items] = data[index]\n alpha = self.learning_rate - \\\n (self.learning_rate - 0.001) * (index / self.n_words)\n alphas[n_items] = max(0.001, alpha)\n n_items += 1\n index += 1\n\n # Poison pills\n for _ in range(n_workers):\n queue.put(None)",
"def parallel_sequential_generation(seed_text, batch_size=10, mask_len=14, top_k=0, temperature=None, max_iter=300, burnin=200,\n cuda=False, print_every=10, verbose=True):\n seed_len = len(seed_text)\n batch = get_init_text(seed_text, mask_len, batch_size)\n \n for ii in range(max_iter):\n kk = np.random.randint(0, mask_len) if np.random.randint(0,2) == 0 else np.random.randint(seed_len + mask_len, seed_len + 2* mask_len)\n for jj in range(batch_size):\n batch[jj][kk] = mask_id\n inp = torch.tensor(batch).cuda() if cuda else torch.tensor(batch)\n out = model(inp)\n topk = top_k if (ii >= burnin) else 0\n idxs = generate_step(out, gen_idx=kk, top_k=topk, temperature=temperature, sample=(ii < burnin))\n idxs = idxs if hasattr(idxs, \"__getitem__\") else [idxs]\n for jj in range(batch_size):\n batch[jj][kk] = idxs[jj]\n \n if verbose and np.mod(ii+1, print_every) == 0:\n for_print = tokenizer.convert_ids_to_tokens(batch[0])\n for_print = for_print[:kk+1] + ['(*)'] + for_print[kk+1:]\n print(\"iter\", ii+1, \" \".join(for_print))\n \n return untokenize_batch(batch)",
"def create_generator(params, mode, epoch):\n # example\n # problem_list: ['NER', 'CWS', 'WeiboNER', 'WeiboSegment']\n # problem_chunk: [['NER'], ['CWS'], ['WeiboNER', 'WeiboSegment']]\n problem_list = []\n problem_chunk = []\n for problem_dict in params.run_problem_list:\n problem_list += list(problem_dict.keys())\n problem_chunk.append(list(problem_dict.keys()))\n\n # get dummy labels\n def _create_dummpy_label(problem_type):\n if problem_type == 'cls':\n return 0\n else:\n return [0]*params.max_seq_len\n dummy_label_dict = {problem+'_label_ids': _create_dummpy_label(\n params.problem_type[problem]) for problem in problem_list if params.problem_type[problem] != 'pretrain'}\n\n # init gen\n gen_dict = {problem: params.read_data_fn[problem](params, mode)\n for problem in problem_list}\n\n while gen_dict:\n # sample problem to train\n if len(problem_chunk) > 1:\n data_num_list = [params.data_num_dict[chunk[0]]\n for chunk in problem_chunk]\n if params.multitask_balance_type == 'data_balanced':\n sample_prob = np.array(data_num_list) / np.sum(data_num_list)\n current_problem_chunk_ind = np.random.choice(\n list(range(len(problem_chunk))), p=sample_prob)\n current_problem_chunk = problem_chunk[current_problem_chunk_ind]\n\n elif params.multitask_balance_type == 'problem_balanced':\n sample_prob = np.array(\n [1]*len(data_num_list)) / np.sum([1]*len(data_num_list))\n current_problem_chunk_ind = np.random.choice(\n list(range(len(problem_chunk))), p=sample_prob)\n current_problem_chunk = problem_chunk[current_problem_chunk_ind]\n else:\n current_problem_chunk = problem_chunk[0]\n\n # create loss multiplier\n loss_multiplier = {}\n for problem in problem_list:\n if problem in current_problem_chunk:\n loss_multiplier[problem+'_loss_multiplier'] = 1\n else:\n loss_multiplier[problem+'_loss_multiplier'] = 0\n\n base_dict = {}\n base_input = None\n for problem in current_problem_chunk:\n try:\n instance = next(gen_dict[problem])\n except StopIteration:\n if mode == 'train':\n gen_dict[problem] = params.read_data_fn[problem](\n params, mode)\n instance = next(gen_dict[problem])\n else:\n del gen_dict[problem]\n continue\n except KeyError:\n continue\n\n base_dict.update(instance)\n if base_input is None:\n base_input = instance['input_ids']\n elif not params.augument_mask_lm:\n assert base_input == instance[\n 'input_ids'], 'Inputs id of two chained problem not aligned. Please double check!'\n\n if not base_dict:\n continue\n\n # add dummpy labels\n for dummy_problem in dummy_label_dict:\n if dummy_problem not in base_dict:\n base_dict[dummy_problem] = dummy_label_dict[dummy_problem]\n # add loss multipliers\n base_dict.update(loss_multiplier)\n yield base_dict",
"def kmeans_004():\n crops = [200] # Should probably also add 250\n scales = [30, 50] # Scaling is probably the most important part here\n\n scores = []\n for s in scales:\n crop = 200\n n_centroids = 1600\n n_patches = 400000\n # rf_size = int(round(s * .2))\n rf_size = 10\n logger.info(\"Training with crop {}, scale {}, patch size {}, patches {}, centroids {}\".format(crop, s, rf_size, n_patches, n_centroids))\n\n train_x_crop_scale = CropScaleImageTransformer(training=True,\n result_path='data/data_train_crop_{}_scale_{}.npy'.format(crop, s),\n crop_size=crop,\n scaled_size=s,\n n_jobs=-1,\n memmap=True)\n\n # spherical generator\n kmeans_generator = KMeansFeatureGenerator(n_centroids=n_centroids,\n rf_size=rf_size,\n result_path='data/mdl_kmeans_004_scale_{}_rf_{}'.format(s, rf_size),\n n_iterations=20,\n n_jobs=-1,)\n\n patch_extractor = models.KMeansFeatures.PatchSampler(n_patches=n_patches,\n patch_size=rf_size,\n n_jobs=-1)\n images = train_x_crop_scale.transform()\n logger.info(\"Images ndarray shape: {}\".format(images.shape))\n patches = patch_extractor.transform(images)\n logger.info(\"Patches ndarray shape: {}\".format(patches.shape))\n\n kmeans_generator.fit(patches)\n\n del patches\n gc.collect()\n\n train_x = kmeans_generator.transform(images, save_to_file='data/data_kmeans_features_004_scale_{}_rf_{}.npy'.format(s, rf_size), memmap=True)\n train_y = classes.train_solutions.data\n # Unload some objects\n del images\n gc.collect()\n logger.info(\"Train X ndarray shape: {}\".format(train_x.shape))\n\n wrapper = ModelWrapper(models.Ridge.RidgeRFEstimator, {'alpha': 500, 'n_estimators': 250}, n_jobs=-1)\n wrapper.cross_validation(train_x, train_y, n_folds=2, parallel_estimator=True)\n scores.append((s, wrapper.cv_scores))\n del wrapper\n gc.collect()",
"def learn(start_epoch, max_epochs, plot_interval, test_interval, checkpoint_interval, delete_checkpoints):\n\tglobal epoch\n\tfor epoch in range(start_epoch, int(max_epochs) + 1):\n\t\tsys.stdout.flush()\n\t\t#while os.path.exists(\"stop.txt\"): # Allows pausing during training\n\t\t#\ttime.sleep(5)\n\t\tstart_time = time.time()\n\t\tcorrect_syn, MAE_syn, avg_dev_syn = model_train(syn_train_loader, False)\n\t\tcorrect_nat, MAE_nat, avg_dev_nat = model_train(nat_train_loader, True)\n\t\tlen_nat = len(nat_train_loader.dataset)\n\t\tlen_syn = len(syn_train_loader.dataset)\n\t\tprint(\"Train Epoch: \" + str(\n\t\t\tepoch) + \"\\tNAT: Dec: {:.3f}\\tKLD: {:.4f}\\tCor: {:.3f}\\tMAE: {:.2f}\\tDEV: {:.3f}\\tRegr: {:.3f}\\t\\tSYN: Dec: {:.3f}\\tKLD: {:.4f}\\tCor: {:.3f}\\tMAE: {:.2f}\\tDEV: {:.3f}\\tRegr: {:.3f}\\ttime: {:.2f}s\"\n\t\t\t .format(decoder_nat_loss / len_nat, KLD_nat_loss / len_nat, correct_nat, MAE_nat, avg_dev_nat, regressor_nat / len_nat,\n\t\t\t\t\t decoder_syn_loss / len_syn, KLD_syn_loss / len_syn, correct_syn, MAE_syn, avg_dev_syn, regressor_syn / len_syn,\n\t\t\t\t\t time.time() - start_time))\n\t\treset_loss_sums()\n\t\tif epoch % test_interval == 0:\n\t\t\tcorrect_syn, MAE_syn = model_test(epoch, False)\n\t\t\tcorrect_nat, MAE_nat = model_test(epoch, True)\n\t\t\tlen_nat = len(nat_test_loader.dataset)\n\t\t\tlen_syn = len(syn_test_loader.dataset)\n\t\t\tprint(\"=> Test Epoch: \" + str(\n\t\t\t\tepoch) + \"\\tDec_nat: {:.3f}\\tKLD_nat: {:.4f}\\tCor_nat: {:.3f}\\tMAE_nat: {:.2f}\\tRegr_nat: {:.3f}\\tDec_syn: {:.3f}\\tKLD_syn: {:.4f}\\tCor_syn: {:.3f}\\tMAE_syn: {:.2f}\\tRegr_syn: {:.3f}\\ttime: {:.2f}s\"\n\t\t\t\t .format(decoder_nat_loss / len_nat, KLD_nat_loss / len_nat, correct_nat, MAE_nat, regressor_nat / len_nat,\n\t\t\t\t\t\t decoder_syn_loss / len_syn, KLD_syn_loss / len_syn, correct_syn, MAE_syn, regressor_syn / len_syn,\n\t\t\t\t\t\t time.time() - start_time))\n\t\t\treset_loss_sums()\n\t\t\trepresent()\n\t\tif epoch % eval_interval == 0:\n\t\t\tevaluate()\n\t\tif epoch % checkpoint_interval == 0:\n\t\t\tsave_log(decoder_nat_log, \"Decoder_nat\")\n\t\t\tsave_log(decoder_syn_log, \"Decoder_syn\")\n\t\t\tsave_log(KLD_nat_log, \"KLD_nat\")\n\t\t\tsave_log(KLD_syn_log, \"KLD_syn\")\n\t\t\tsave_log(regressor_nat_log, \"Regressor_nat\")\n\t\t\tsave_log(regressor_syn_log, \"Regressor_syn\")\n\t\t\tsave_log(correct_nat_log, \"Correct_nat\")\n\t\t\tsave_log(correct_syn_log, \"Correct_syn\")\n\t\t\ttorch.save({\n\t\t\t\t'epoch': epoch + 1,\n\t\t\t\t'state_dict': model.state_dict(),\n\t\t\t\t'optimizer': optimizer.state_dict(),\n\t\t\t}, directory + '/%d.pth' % epoch)\n\t\t\tif delete_checkpoints and epoch > checkpoint_interval: # the first one has to exist already\n\t\t\t\tos.remove(directory + \"/\" + str(epoch - checkpoint_interval) + \".pth\")\n\t\tif epoch % plot_interval == 0:\n\t\t\tplotlyplot.directory = directory\n\t\t\tplotlyplot.createPlots(100, 50, directory)\n\tshowcase()\n\tplaySound()",
"def main():\n \n # Load the notes used to train the model\n notes = pickle.load(open('data/notes', 'rb'))\n \n # Load the notes from all video games combined\n all_notes = pickle.load(open('data/all_notes', 'rb'))\n \n # Get number of unique notes, rests, and chords in the midi files\n n_vocab = len(set(all_notes))\n\n # Generate Network Inputs (list of lists containing note sequences)\n # Generate Normalized Network Input\n network_input, normalized_input = prepare_sequences(notes, all_notes, n_vocab)\n \n # Generate the Keras model with final dense layer having n_vocab number of nodes\n model = create_network(normalized_input, n_vocab)\n \n # Generate the note outputs from the model, and random sequence of notes for network input\n prediction_output = generate_notes(model, network_input, all_notes, n_vocab)\n \n # Create the Midi file from the generated note output\n create_midi(prediction_output)",
"def train(self, max_epochs: int=100) \\\n -> Generator[Tuple[float, float, int], bool, None]:\n assert self.tf_init_done, \"Must call .init_tf() first!\"\n\n tr = tqdm.trange(max_epochs, desc='epoch', leave=True)\n mean_loss = None\n\n for epoch_num in tr:\n # only extend replay by a bit each time\n succ_rates = self._extend_replays(max(25 // len(self.problems), 1))\n succ_rate = np.mean(succ_rates)\n replay_sizes = self._get_replay_sizes()\n replay_size = sum(replay_sizes)\n tr.set_postfix(\n succ_rate=succ_rate, net_loss=mean_loss, states=replay_size)\n self._log_op_value('succ-rate', succ_rate)\n self._log_op_value('replay-size', replay_size)\n # do a few batches of SGD (should keep us close to convergence)\n mean_loss = self._optimise(300)\n tr.set_postfix(\n succ_rate=succ_rate, net_loss=mean_loss, states=replay_size)\n keep_going = yield succ_rate, mean_loss, replay_size\n if not keep_going:\n print('.train() terminating early')\n break",
"def learn(self):\n epochswin = [] # count the number of wins at every epoch of the network against the preceding version\n epochdraw = [] # count the number of draws at every epoch of the network against the preceding version\n epochswingreedy = [] # count the number of wins against greedy at every epoch\n epochswinrandom = [] # count the number of wins against random at every epoch\n epochsdrawgreedy = [] # count the number of draws against greedy at every epoch\n epochsdrawrandom = [] # count the number of wins against random at every epoch\n epochswinminmax = [] # count the number of wins against minmax at every epoch\n epochsdrawminmax = [] # count the number of draws against minmax at every epoch\n\n\n if self.args.load_model == True:\n file = open(self.args.trainExampleCheckpoint + \"graphwins:iter\" + str(self.args.numIters) + \":eps\" + str(\n self.args.numEps) + \":dim\" + str(self.game.n) + \".txt\", \"r+\")\n lines = file.readlines()\n for index, line in enumerate(lines):\n for word in line.split():\n if index == 0:\n epochswin.append(word)\n elif index == 1:\n epochdraw.append(word)\n file.close()\n\n file = open(self.args.trainExampleCheckpoint + \"graphwins:iter\" + str(self.args.numIters) + \":eps\" + str(\n self.args.numEps) + \":dim\" + str(self.game.n) + \":greedyrandom.txt\", \"r+\")\n lines = file.readlines()\n for index, line in enumerate(lines):\n for word in line.split():\n if index == 0:\n epochswingreedy.append(word)\n elif index == 1:\n epochsdrawgreedy.append(word)\n elif index == 2:\n epochswinrandom.append(word)\n elif index == 3:\n epochsdrawrandom.append(word)\n elif index == 4:\n epochswinminmax.append(word)\n elif index == 5:\n epochsdrawminmax.append(word)\n file.close()\n self.loadTrainExamples()\n\n for i in range(1, self.args.numIters + 1):\n # bookkeeping\n print('------ITER ' + str(i) + '------')\n # examples of the iteration\n iterationTrainExamples = deque([], maxlen=self.args.maxlenOfQueue)\n\n eps_time = AverageMeter()\n bar = Bar('Self Play', max=self.args.numEps)\n end = time.time()\n\n for eps in range(self.args.numEps):\n iterationTrainExamples += self.executeEpisode()\n\n # bookkeeping + plot progress\n eps_time.update(time.time() - end)\n end = time.time()\n bar.suffix = '({eps}/{maxeps}) Eps Time: {et:.3f}s | Total: {total:} | ETA: {eta:}'.format(eps=eps + 1,\n maxeps=self.args.numEps,\n et=eps_time.avg,\n total=bar.elapsed_td,\n eta=bar.eta_td)\n bar.next()\n bar.finish()\n\n # save the iteration examples to the history\n self.trainExamplesHistory.append(iterationTrainExamples)\n\n if len(self.trainExamplesHistory) > self.args.numItersForTrainExamplesHistory:\n print(\"len(trainExamplesHistory) =\", len(self.trainExamplesHistory),\n \" => remove the oldest trainExamples\")\n self.trainExamplesHistory.pop(0)\n # backup history to a file\n # NB! the examples were collected using the model from the previous iteration, so (i-1)\n self.saveTrainExamples(i - 1)\n\n # shuffle examlpes before training\n trainExamples = []\n for e in self.trainExamplesHistory:\n trainExamples.extend(e)\n shuffle(trainExamples)\n\n # training new network, keeping a copy of the old one\n\n filename = \"curent\"+str(i)+\"temp:iter\" + str(self.args.numIters) + \":eps\" + str(self.args.numEps) + \":dim\" + str(\n self.game.n) + \".pth.tar\"\n filenameBest = \"best\" + str(self.args.numIters) + \":eps\" + str(self.args.numEps) + \":dim\" + str(\n self.game.n) + \".pth.tar\"\n\n self.nnet.save_checkpoint(folder=self.args.checkpoint, filename=filename)\n exists = os.path.isfile(filenameBest)\n if exists:\n self.pnet.load_checkpoint(folder=self.args.checkpoint, filename=filenameBest)\n else:\n self.pnet.load_checkpoint(folder=self.args.checkpoint, filename=filename)\n pmcts = MCTS(self.game, self.pnet, self.args)\n\n self.nnet.train(trainExamples)\n nmcts = MCTS(self.game, self.nnet, self.args)\n\n print('PITTING AGAINST PREVIOUS VERSION')\n arena = Arena(lambda x: np.argmax(pmcts.getActionProb(x, temp=0)),\n lambda x: np.argmax(nmcts.getActionProb(x, temp=0)), self.game,nmcts,pmcts,evaluate=True)\n\n pwins, nwins, draws = arena.playGames(self.args.arenaCompare, False)\n\n pmcts.clear()\n nmcts.clear()\n del pmcts\n del nmcts\n\n print(' ')\n print('NEW/PREV WINS : %d / %d ; DRAWS : %d' % (nwins, pwins, draws))\n if i == 1:\n epochswin.append(pwins)\n epochdraw.append(0)\n\n epochswin.append(nwins)\n epochdraw.append(draws)\n self.writeLogsToFile(epochswin, epochdraw)\n\n ''' Get all the players and then pit them against the network. You need to modify here if you implement \n more players\n '''\n (gp, rp, mp) = self.decidePlayers()\n\n if self.args.parallel == 0:\n\n\n nmcts1 = MCTS(self.game, self.nnet, self.args)\n nmcts2 = MCTS(self.game, self.nnet, self.args)\n nmcts3 = MCTS(self.game, self.nnet, self.args)\n\n arenagreedy = Arena(lambda x: np.argmax(nmcts1.getActionProb(x, temp=0)), gp, self.game,nmcts1)\n arenarandom = Arena(lambda x: np.argmax(nmcts2.getActionProb(x, temp=0)), rp, self.game,nmcts2)\n arenaminmax = Arena(lambda x: np.argmax(nmcts3.getActionProb(x, temp=0)), mp, self.game,nmcts3,evaluate=True)\n\n pwinsminmax, nwinsminmax, drawsminmax = arenaminmax.playGames(self.args.arenaCompare)\n print(\"minmax - \"+str(pwinsminmax)+\" \"+str(nwinsminmax)+\" \"+str(drawsminmax))\n pwinsgreedy, nwinsgreedy, drawsgreedy = arenagreedy.playGames(self.args.arenaCompare)\n print(\"greedy - \"+str(pwinsgreedy)+\" \"+str(nwinsgreedy)+\" \"+str(drawsgreedy))\n pwinsreandom, nwinsrandom, drawsrandom = arenarandom.playGames(self.args.arenaCompare)\n print(\"random - \"+str(pwinsreandom)+\" \"+str(nwinsrandom)+\" \"+str(drawsrandom))\n\n nmcts1.clear()\n nmcts2.clear()\n nmcts3.clear()\n del nmcts1\n del nmcts2\n del nmcts3\n\n else:\n '''\n This will be used if you want to evaluate the network against the benchmarks in a parallel way\n '''\n\n self.args.update({'index': str(i)})\n\n p = self.parallel(self.args.arenaCompare)\n (pwinsminmax, nwinsminmax, drawsminmax) = p[0] # self.parallel(\"minmax\", self.args.arenaCompare)\n (pwinsgreedy, nwinsgreedy, drawsgreedy) = p[1] # self.parallel(\"greedy\",self.args.arenaCompare)\n (pwinsreandom, nwinsrandom, drawsrandom) = p[2] # self.parallel(\"random\",self.args.arenaCompare)\n\n epochsdrawgreedy.append(drawsgreedy)\n epochsdrawrandom.append(drawsrandom)\n epochswinrandom.append(pwinsreandom)\n epochswingreedy.append(pwinsgreedy)\n epochswinminmax.append(pwinsminmax)\n epochsdrawminmax.append(drawsminmax)\n\n self.writeLogsToFile(epochswingreedy, epochsdrawgreedy, epochswinrandom, epochsdrawrandom, epochswinminmax,\n epochsdrawminmax, training=False)\n\n if pwins + nwins == 0 or float(nwins) / (pwins + nwins) <= self.args.updateThreshold:\n print('REJECTING NEW MODEL')\n filename = \"curent\"+str(i)+\"temp:iter\" + str(self.args.numIters) + \":eps\" + str(self.args.numEps) + \":dim\" + str(\n self.game.n) + \".pth.tar\"\n filenameBest = \"best\" + str(self.args.numIters) + \":eps\" + str(self.args.numEps) + \":dim\" + str(\n self.game.n) + \".pth.tar\"\n exists = os.path.isfile(filenameBest)\n if exists:\n self.nnet.load_checkpoint(folder=self.args.checkpoint, filename=filenameBest)\n else:\n self.nnet.load_checkpoint(folder=self.args.checkpoint, filename=filename)\n\n else:\n print('ACCEPTING NEW MODEL')\n\n filename = \"best\" + str(self.args.numIters) + \":eps\" + str(self.args.numEps) + \":dim\" + str(\n self.game.n) + \".pth.tar\"\n self.nnet.save_checkpoint(folder=self.args.checkpoint, filename=self.getCheckpointFile(i))\n self.nnet.save_checkpoint(folder=self.args.checkpoint, filename=filename)\n self.mcts.clear()\n del self.mcts\n self.mcts = MCTS(self.game, self.nnet, self.args, mcts=True) # reset search tree\n print(self.tracker.print_diff())\n self.writeLogsToFile(epochswin, epochdraw, training=True)",
"def generate_notes(model, network_input, n_vocab):\n # pick a random sequence from the input as a starting point for the prediction\n start = np.random.randint(0, len(network_input)-1)\n \n # Get pitch names and store in a dictionary\n pitchnames = sorted(set(item for item in notes))\n int_to_note = dict((number, note) for number, note in enumerate(pitchnames))\n\n pattern = network_input[start]\n prediction_output = []\n\n # generate 500 notes\n for note_index in range(500):\n prediction_input = np.reshape(pattern, (1, len(pattern), 1))\n prediction_input = prediction_input / float(n_vocab)\n\n prediction = model.predict(prediction_input, verbose=0)\n\n index = np.argmax(prediction)\n result = int_to_note[index]\n prediction_output.append(result)\n \n pattern = np.append(pattern,index)\n pattern = pattern[1:len(pattern)]\n\n return prediction_output",
"def train(dataset, epochs, discriminator, generator, generatorOptimizer, discriminatorOptimizer, seed, checkpoint, checkpoint_prefix):\n for epoch in range(epochs):\n start = time.time()\n\n for image_batch in tqdm(dataset):\n train_step(image_batch, generator, discriminator, generatorOptimizer, discriminatorOptimizer)\n #2nd for()\n\n #Produce images for the GIF\n display.clear_output(wait=True)\n generate_and_save_images(generator, epoch + 1, seed)\n\n #save model every 5 epochs\n if (epoch + 1) % 5 == 0:\n print(\"Saving checkpoint.\")\n checkpoint.save(file_prefix = checkpoint_prefix)\n #if() \n\n print(\"time for epoch {} is {} sec\".format(epoch + 1, time.time()-start))\n #1st for\n \n #Generate after final epoch\n display.clear_output(wait=True)\n generator.save(\"CatGeneratorColor.model\")\n discriminator.save(\"CatDiscriminatorColor.model\")",
"def train(length, n_ep, show=False):\r\n al_dict = {'a':0, 'b':1, 'c':2, 'd':3, 'e':4, 'f':5, 'g':6, 'h':7, 'i':8, \r\n 'j':9, 'k':10, 'l':11, 'm':12, 'n':13, 'o':14, 'p':15, 'q':16, \r\n 'r':17, 's':18, 't':19, 'u':20, 'v':21, 'w':22, 'x':23, 'y':24, \r\n 'z':25}\r\n \r\n #Load proper data\r\n raw_rand = open(f'Length {length} Data/rand_data_len{length}.txt', 'r').readlines()\r\n raw_human = open(f'Length {length} Data/human_data_len{length}.txt','r').readlines()\r\n \r\n #Turn data into numbers\r\n raw_rand = [string[:length] for string in raw_rand]\r\n raw_human = [string[:length] for string in raw_human]\r\n \r\n raw_rand = [list(string) for string in raw_rand]\r\n raw_human = [list(string) for string in raw_human]\r\n \r\n for lists in raw_rand:\r\n for i in range(length):\r\n lists[i] = al_dict[lists[i]]\r\n for lists in raw_human:\r\n for i in range(length):\r\n lists[i] = al_dict[lists[i]]\r\n \r\n #Set aside 1/3 for test data\r\n test_rand = raw_rand[:len(raw_rand)//3]\r\n test_human = raw_human[:len(raw_human)//3]\r\n train_rand = raw_rand[len(raw_rand)//3:]\r\n train_human = raw_human[len(raw_human)//3:]\r\n \r\n #Intersperse both samples randomly with labels (0 = rand, 1 = human)\r\n train = []\r\n trn_labels = []\r\n test = []\r\n tst_labels = []\r\n while len(test_rand) and len(test_human) != 0:\r\n num = random.random()\r\n if num > .5 and len(test_rand) > 0:\r\n test.append(test_rand.pop())\r\n tst_labels.append(0)\r\n else:\r\n test.append(test_human.pop())\r\n tst_labels.append(1)\r\n \r\n while len(train_rand) and len(train_human) != 0:\r\n num = random.random()\r\n if num > .5 and len(train_rand) > 0:\r\n train.append(train_rand.pop())\r\n trn_labels.append(0)\r\n else:\r\n train.append(train_human.pop())\r\n trn_labels.append(1)\r\n \r\n # Build network, 2 hidden layers, 26 dimensional vectors for alphabet\r\n model = keras.Sequential()\r\n model.add(keras.layers.Embedding(26, 16)) #26=numletters\r\n model.add(keras.layers.GlobalAveragePooling1D())\r\n model.add(keras.layers.Dense(16, activation=tf.nn.relu))\r\n model.add(keras.layers.Dense(1, activation=tf.nn.sigmoid))\r\n \r\n # Loss function, Using Probabilities\r\n model.compile(optimizer=tf.train.AdamOptimizer(),\r\n loss='binary_crossentropy',\r\n metrics=['accuracy'])\r\n \r\n # Perform n_ep epochs of training on train_data\r\n partial_train = np.array(train[len(train)//10:])\r\n partial_labels = trn_labels[len(train)//10:]\r\n val_train = np.array(train[:len(train)//10])\r\n val_labels = trn_labels[:len(train)//10]\r\n print((len(partial_train), len(partial_labels)),\r\n (len(val_train), len(val_labels)),\r\n (len(test), len(tst_labels)))\r\n \r\n history = model.fit(partial_train,\r\n partial_labels,\r\n epochs=n_ep,\r\n batch_size=512,\r\n validation_data=(val_train, val_labels),\r\n verbose=1)\r\n \r\n #EVALUATE THE FINAL MODEL\r\n\r\n results = model.evaluate(np.array(test), tst_labels)\r\n print(results)\r\n \r\n # GRAPH ACCURACY AND LOSS OVER TIME\r\n if show:\r\n acc = history.history['acc']\r\n val_acc = history.history['val_acc']\r\n loss = history.history['loss']\r\n val_loss = history.history['val_loss']\r\n \r\n history_dict = history.history\r\n \r\n epochs = range(1, len(acc) + 1)\r\n \r\n # \"bo\" is for \"blue dot\"\r\n plt.plot(epochs, loss, 'bo', label='Training loss')\r\n # b is for \"solid blue line\"\r\n plt.plot(epochs, val_loss, 'b', label='Validation loss')\r\n plt.title('Training and validation loss')\r\n plt.xlabel('Epochs')\r\n plt.ylabel('Loss')\r\n plt.legend()\r\n \r\n plt.show()\r\n \r\n plt.clf() # clear figure\r\n acc_values = history_dict['acc']\r\n val_acc_values = history_dict['val_acc']\r\n \r\n plt.plot(epochs, acc, 'bo', label='Training acc')\r\n plt.plot(epochs, val_acc, 'b', label='Validation acc')\r\n plt.title('Training and validation accuracy')\r\n plt.xlabel('Epochs')\r\n plt.ylabel('Accuracy')\r\n plt.legend()\r\n \r\n plt.show()\r\n \r\n return model",
"def _train_internal(self, opts):\n\n batches_num = self._data.num_points / opts['batch_size']\n train_size = self._data.num_points\n num_plot = 320\n sample_prev = np.zeros([num_plot] + list(self._data.data_shape))\n l2s = []\n\n counter = 0\n decay = 1.\n logging.error('Training VAE')\n for _epoch in xrange(opts[\"gan_epoch_num\"]):\n\n if opts['decay_schedule'] == \"manual\":\n if _epoch == 30:\n decay = decay / 2.\n if _epoch == 50:\n decay = decay / 5.\n if _epoch == 100:\n decay = decay / 10.\n\n if _epoch > 0 and _epoch % opts['save_every_epoch'] == 0:\n os.path.join(opts['work_dir'], opts['ckpt_dir'])\n self._saver.save(self._session,\n os.path.join(opts['work_dir'],\n opts['ckpt_dir'],\n 'trained-pot'),\n global_step=counter)\n\n for _idx in xrange(batches_num):\n # logging.error('Step %d of %d' % (_idx, batches_num ) )\n data_ids = np.random.choice(train_size, opts['batch_size'],\n replace=False, p=self._data_weights)\n batch_images = self._data.data[data_ids].astype(np.float)\n batch_noise = utils.generate_noise(opts, opts['batch_size'])\n _, loss, loss_kl, loss_reconstruct = self._session.run(\n [self._optim, self._loss, self._loss_kl,\n self._loss_reconstruct],\n feed_dict={self._real_points_ph: batch_images,\n self._noise_ph: batch_noise,\n self._lr_decay_ph: decay,\n self._is_training_ph: True})\n counter += 1\n\n if opts['verbose'] and counter % opts['plot_every'] == 0:\n debug_str = 'Epoch: %d/%d, batch:%d/%d' % (\n _epoch+1, opts['gan_epoch_num'], _idx+1, batches_num)\n debug_str += ' [L=%.2g, Recon=%.2g, KLQ=%.2g]' % (\n loss, loss_reconstruct, loss_kl)\n logging.error(debug_str)\n\n if opts['verbose'] and counter % opts['plot_every'] == 0:\n metrics = Metrics()\n points_to_plot = self._run_batch(\n opts, self._generated, self._noise_ph,\n self._noise_for_plots[0:num_plot],\n self._is_training_ph, False)\n l2s.append(np.sum((points_to_plot - sample_prev)**2))\n metrics.l2s = l2s[:]\n metrics.make_plots(\n opts,\n counter,\n None,\n points_to_plot,\n prefix='sample_e%04d_mb%05d_' % (_epoch, _idx))\n reconstructed = self._session.run(\n self._reconstruct_x,\n feed_dict={self._real_points_ph: batch_images,\n self._is_training_ph: False})\n metrics.l2s = None\n metrics.make_plots(\n opts,\n counter,\n None,\n reconstructed,\n prefix='reconstr_e%04d_mb%05d_' % (_epoch, _idx))\n if opts['early_stop'] > 0 and counter > opts['early_stop']:\n break\n if _epoch > 0:\n os.path.join(opts['work_dir'], opts['ckpt_dir'])\n self._saver.save(self._session,\n os.path.join(opts['work_dir'],\n opts['ckpt_dir'],\n 'trained-pot-final'),\n global_step=counter)",
"def _create_examples(self, lines, set_type):\n # Parallelizing a bit batch computation because it is quite slow...\n #lines = lines[:500]\n step = 18 # 17 sentences per input sequence\n #encoded_dict = self.tokenizer.encode('[CLS] ' + ' [SEP] [CLS] '.join(lines) + ' [SEP]')\n #tokens = np.array(encoded_dict.tokens)\n #ids = np.array(encoded_dict.ids)\n \n n = len(lines)\n \n def f(i, sequence):\n guid = \"%s-%s\" % (set_type, i)\n text_a = self.pad_to_max_length([2] + self.mask_tokens(sequence) + [3])\n text_b = [0 if item==0 else 1 for item in text_a]\n label = self.pad_to_max_length([2] + sequence + [3])\n label = [label[i] if item==4 else -100 for i, item in enumerate(text_a)] # for loss computation, only taking into account MASK tokens with id==4\n example = InputExample(guid=guid,text_a=text_a,text_b=text_b,label=label)\n return example\n \n def g(i, line):\n sequence = self.tokenizer.encode(' '.join(line)).ids\n return f(i, sequence)\n \n # Splitting data for memory issues...\n indexes = list(range(0, n, step))\n m = len(indexes)\n n_splits = self.n_splits\n splits = [indexes[i*m//n_splits: m*(i+1)//n_splits] for i in range(n_splits)]\n for index_split, split in enumerate(splits):\n print(f\"Computing split {index_split+1} / {n_splits}... Split size: {len(split)}\")\n examples = Parallel(n_jobs=-1)(delayed(g)(index+split[0], lines[i:i + step]) for index, i in tqdm(enumerate(split)))\n self.save_object(os.path.join(self.dataset_dir, f'{self.dataset_name}{set_type}_examples_split-{index_split}.pkl'), examples)\n # Merging\n #examples = [self.load_object(os.path.join(self.dataset_dir, f'{self.dataset_name}{set_type}_examples_split-{index_split}.pkl')) for index_split in range(n_splits)]\n #examples = [item for l in examples for item in l]\n #self.save_object(os.path.join(self.dataset_dir, f'{self.dataset_name}{set_type}_examples.pkl'), examples)\n \n examples_paths = [os.path.join(self.dataset_dir, f'{self.dataset_name}{set_type}_examples_split-{index_split}.pkl') for index_split in range(n_splits)]\n \n return examples_paths",
"def run_epoch(session,\n model,\n dataset,\n is_train=False,\n plot_attention_weights=False):\n assert dataset is not None\n n_words = len([word for sample in dataset for word in sample if word > 0])\n epoch_size = int(math.ceil(len(dataset) / model.batch_size))\n # producer = lm_data_producer(dataset, model.batch_size, model.num_steps)\n\n fetches = {\"step_cost\": model.batch_loss, \"niters\": model.nwords}\n if is_train:\n fetches[\"eval_op\"] = model.train_op\n if plot_attention_weights:\n fetches[\"weights\"] = model.attention_weights\n\n costs = 0.0\n iters = 0\n start_time = time.time()\n # for step, (x, y) in enumerate(producer):\n for step in range(epoch_size):\n step_time = time.time()\n vals = session.run(fetches, {})\n step_cost = vals[\"step_cost\"]\n costs += step_cost\n # iters += np.sum(x > 0)\n iters += vals[\"niters\"]\n\n # print information regarding the current training process\n if is_train:\n if step % (epoch_size // 20) == 10:\n print(\"{:.3f} - aprox. loss {:.8f} - approx. speed: {:.0f} wps\".format(\n step * 1.0 / epoch_size, costs / (step + 1),\n iters / (time.time() - start_time)))\n # print information regarding the current training process\n else:\n if step % (epoch_size // 10) == 5:\n print(\"{:.3f} - approx. speed: {:.0f} wps\".format(\n step * 1.0 / epoch_size, iters / (time.time() - start_time)))\n\n return np.exp(costs / n_words)",
"def _train_epoch(self, train_batches, data, max_metric_value, metric_save, patience, step_pbar):\n evaluate = True\n exit_tag = False\n num_steps = self.args.num_steps\n check_point, batch_size = self.args.check_point, self.args.batch_size\n save_dir, save_prefix = self.args.save_dir, self.args.algo\n\n for bitx, batch in enumerate(train_batches):\n if evaluate and self.global_step % self.eval_freq == 0:\n if data.dev_set is not None:\n dev_batches = data.gen_mini_batches('dev', 31928, shuffle=False)\n dev_loss, dev_perplexity, dev_perplexity_at_rank = self.evaluate(dev_batches, data)\n #print('dev loss=%s' % dev_loss, 'dev ppl=%s' % dev_perplexity, 'dev ppl at rank=', dev_perplexity_at_rank)\n\n test_batches = data.gen_mini_batches('test', 41405, shuffle=False)\n test_loss, test_perplexity, test_perplexity_at_rank = self.evaluate(test_batches, data)\n #print('test loss=%s' % test_loss, 'dev ppl=%s' % test_perplexity, 'dev ppl at rank=' , test_perplexity_at_rank)\n\n self.writer.add_scalar(\"dev/loss\", dev_loss, self.global_step)\n self.writer.add_scalar(\"dev/perplexity\", dev_perplexity, self.global_step)\n self.writer.add_scalar(\"test/loss\", test_loss, self.global_step)\n self.writer.add_scalar(\"test/perplexity\", test_perplexity, self.global_step)\n\n for trunc_level in self.trunc_levels:\n ndcg_version1, ndcg_version2 = self.relevance_estimator.evaluate(self, data, self.relevance_queries, trunc_level)\n self.writer.add_scalar(\"NDCG_version1/{}\".format(trunc_level), ndcg_version1, self.global_step)\n self.writer.add_scalar(\"NDCG_version2/{}\".format(trunc_level), ndcg_version2, self.global_step)\n\n if dev_loss < metric_save:\n metric_save = dev_loss\n patience = 0\n else:\n patience += 1\n # Trick: do not decay d_lr help convergence\n if patience >= self.patience:\n #self.adjust_learning_rate(self.discrim_optimizer, self.args.lr_decay)\n self.adjust_learning_rate(self.policy_optimizer, self.args.lr_decay)\n self.g_lr *= self.args.lr_decay\n #self.d_lr *= self.args.lr_decay\n self.writer.add_scalar('train/g_lr', self.g_lr, self.global_step)\n #self.writer.add_scalar('train/d_lr', self.d_lr, self.global_step)\n metric_save = dev_loss\n patience = 0\n self.patience += 1\n else:\n self.logger.warning('No dev set is loaded for evaluation in the dataset!')\n\n self.global_step += 1\n step_pbar.update(1)\n QIDS = Variable(torch.from_numpy(np.array(batch['qids'], dtype=np.int64)))\n UIDS = Variable(torch.from_numpy(np.array(batch['uids'], dtype=np.int64)))\n VIDS = Variable(torch.from_numpy(np.array(batch['vids'], dtype=np.int64)))\n PRE_CLICKS = Variable(torch.from_numpy(np.array(batch['clicks'], dtype=np.int64)[:, :-1]))\n CLICKS = Variable(torch.from_numpy(np.array(batch['clicks'], dtype=np.int64)[:, 1:]))\n\n # generate trajectories\n for __ in range(self.args.d_step):\n actor_rnn_state = Variable(torch.zeros(1, QIDS.shape[0], self.gru_hidden_size))\n critic_rnn_state = Variable(torch.zeros(1, QIDS.shape[0], self.gru_hidden_size))\n CLICK_ = torch.zeros(QIDS.shape[0], 1, dtype=CLICKS.dtype)\n logits = torch.zeros(QIDS.shape[0], 0, 2)\n values = torch.zeros(QIDS.shape[0], 0)\n CLICKS_ = Variable(torch.zeros((QIDS.shape[0], 0), dtype=CLICKS.dtype))\n if self.use_cuda:\n QIDS, UIDS, VIDS, PRE_CLICKS, CLICKS = QIDS.cuda(), UIDS.cuda(), VIDS.cuda(), PRE_CLICKS.cuda(), CLICKS.cuda()\n actor_rnn_state, critic_rnn_state, CLICK_ = actor_rnn_state.cuda(), critic_rnn_state.cuda(), CLICK_.cuda()\n logits, values, CLICKS_ = logits.cuda(), values.cuda(), CLICKS_.cuda()\n self.policy.eval()\n for i in range(self.max_d_num + 1):\n logit, value, actor_rnn_state, critic_rnn_state = self.policy(QIDS[:, i:i+1], \n UIDS[:, i:i+1], \n VIDS[:, i:i+1], \n CLICK_, \n actor_rnn_state, \n critic_rnn_state)\n if i > 0:\n CLICK_ = torch.distributions.Categorical(logit).sample()\n logits = torch.cat([logits, logit], dim=1)\n values = torch.cat([values, value], dim=1)\n CLICKS_ = torch.cat([CLICKS_, CLICK_], dim=1)\n\n if self.use_cuda:\n CLICKS_ = torch.cat((torch.zeros((CLICKS_.shape[0], 1), dtype=CLICKS_.dtype, device=torch.device('cuda')), CLICKS_), dim=1)\n else:\n CLICKS_ = torch.cat((torch.zeros((CLICKS_.shape[0], 1), dtype=CLICKS_.dtype), CLICKS_), dim=1)\n\n '''update discriminator'''\n for _ in range(self.args.k):\n self.discrim.train()\n self.discrim_optimizer.zero_grad()\n g_o, _ = self.discrim(QIDS, UIDS, VIDS, CLICKS_)\n g_o_target = torch.ones((QIDS.shape[0], g_o.shape[1]))\n e_o, _ = self.discrim(QIDS, UIDS, VIDS, CLICKS)\n e_o_target = torch.zeros((QIDS.shape[0], e_o.shape[1]))\n if self.use_cuda:\n g_o_target, e_o_target = g_o_target.cuda(), e_o_target.cuda()\n \n discrim_loss = self.discrim_criterion(g_o, g_o_target) + self.discrim_criterion(e_o, e_o_target)\n discrim_loss.backward()\n self.discrim_optimizer.step()\n self.writer.add_scalar('train/d_loss', discrim_loss.data, self.global_step)\n\n '''estimate advantage'''\n with torch.no_grad():\n self.discrim.eval()\n rewards = -torch.log(self.discrim(QIDS, UIDS, VIDS, CLICKS_)[0])\n # print(rewards.shape, values.shape)\n #print(tensor_type)\n #exit(0)\n deltas = torch.zeros(rewards.shape)\n advantages = torch.zeros(rewards.shape)\n prev_value = torch.zeros(rewards.shape[0])\n prev_advantage = torch.zeros(rewards.shape[0])\n if self.use_cuda:\n deltas, advantages = deltas.cuda(), advantages.cuda()\n prev_value, prev_advantage = prev_value.cuda(), prev_advantage.cuda()\n '''print(deltas)\n print(advantages)\n print(prev_value)\n print(prev_advantage)\n exit(0)'''\n\n for i in reversed(range(rewards.size(1))):\n deltas[:, i] = rewards[:, i] + self.gamma * prev_value - values[:, i]\n advantages[:, i] = deltas[:, i] + self.gamma * self.tau * prev_advantage\n prev_value = values[:, i]\n prev_advantage = advantages[:, i]\n\n returns = values + advantages\n advantages = (advantages - advantages.mean()) / (advantages.std() + MINF)\n # advantages = (returns - returns.mean())/returns.std()\n\n fixed_log_probs = torch.distributions.Categorical(logits).log_prob(CLICKS_[:, 1:])\n\n '''PPO update'''\n self.policy.train()\n optim_batchsize = 512\n optim_iter_num = int(math.ceil(QIDS.shape[0] / optim_batchsize))\n if self.use_cuda:\n CLICKS_ = torch.cat((torch.zeros((CLICKS_.shape[0], 1), dtype=CLICKS_.dtype, device=torch.device('cuda')), CLICKS_), dim=1)\n else:\n CLICKS_ = torch.cat((torch.zeros((CLICKS_.shape[0], 1), dtype=CLICKS_.dtype), CLICKS_), dim=1)\n for _ in range(self.args.g_step):\n perm = np.arange(QIDS.shape[0])\n np.random.shuffle(perm)\n\n QIDS, UIDS, VIDS, PRE_CLICKS, CLICKS, CLICKS_, advantages, returns, fixed_log_probs = \\\n QIDS[perm].clone(), UIDS[perm].clone(), VIDS[perm].clone(), PRE_CLICKS[perm].clone(), \\\n CLICKS[perm].clone(), CLICKS_[perm].clone(), advantages[perm].clone(), returns[perm].clone(), fixed_log_probs[perm].clone()\n\n #print(QIDS)\n #exit(0)\n\n for i in range(optim_iter_num):\n ind = slice(i * optim_batchsize, min((i + 1) * optim_batchsize, QIDS.shape[0]))\n qids_b, uids_b, vids_b, pclicks_b, clicks_b, clicks__b, advantage_b, returns_b, fixed_log_probs_b = \\\n QIDS[ind], UIDS[ind], VIDS[ind], CLICKS_[ind, :-1], CLICKS[ind], CLICKS_[ind, 2:], \\\n advantages[ind], returns[ind], fixed_log_probs[ind]\n\n logits, values_pred, _, _ = self.policy(qids_b, uids_b, vids_b, pclicks_b)\n dist = torch.distributions.Categorical(logits)\n\n\n '''update critic'''\n value_loss = (values_pred - returns_b).pow(2).mean()\n '''optimizer policy'''\n log_probs_b = dist.log_prob(clicks__b)\n ratio = torch.exp(log_probs_b - fixed_log_probs_b)\n surr1 = ratio * advantage_b\n surr2 = torch.clamp(ratio, 1.0 - self.clip_epsilon, 1.0 + self.clip_epsilon) * advantage_b\n policy_surr = -torch.min(surr1, surr2).mean()\n pe = dist.entropy().mean()\n loss = value_loss + self.alpha * policy_surr - self.beta * pe\n\n self.policy_optimizer.zero_grad()\n loss.backward()\n torch.nn.utils.clip_grad_norm_(self.policy.parameters(), 40)\n self.policy_optimizer.step()\n g_loss, _ = self.compute_loss(logits, clicks_b)\n\n self.writer.add_scalar('train/g_loss', g_loss.data, self.global_step)\n self.writer.add_scalar('train/g_valueloss', value_loss.data, self.global_step)\n self.writer.add_scalar('train/g_policysurr', policy_surr.data, self.global_step)\n self.writer.add_scalar('train/g_entropy', pe.data, self.global_step)\n\n if check_point > 0 and self.global_step % check_point == 0:\n self.save_model(save_dir, save_prefix)\n if self.global_step >= num_steps:\n exit_tag = True\n\n return max_metric_value, exit_tag, metric_save, patience",
"def get_batch_gen(self, config):\n\n ################\n # Def generators\n ################\n\n def random_balanced_gen():\n print('trying to generate batch series with ', self.num_train, 'shapes')\n\n # Initiate concatenation lists\n tp_list = [] # points\n tev_list = [] # eigen vectors\n tevt_list = [] # transposed eigen vectors\n tv_list = [] # eigen values\n tevf_list = [] # full eigen vectors for ground truth maps\n ti_list = [] # cloud indices\n\n batch_n = 0\n i_batch = 0\n\n gen_indices = np.random.permutation(int(self.num_train)) # initiate indices for the generator\n # if we had to test on this dataset we would need to introduce a test/val case with non-shuffled indices\n # print(gen_indices.shape, config.batch_num)\n # if config.split == 'test':\n # print('test setting here not fully supported')\n # n_shapes = self.num_test # has to be defined\n # gen_indices = []\n # for i in range(n_shapes - 1):\n # for j in range(i + 1, n_shapes):\n # gen_indices += [i, j] # put all the pairs in order\n # gen_indices = np.array(gen_indices)\n\n\n # Generator loop\n for p_i in gen_indices:\n\n # Get points and other input data\n new_points = self.input_points[p_i]\n new_evecs = self.input_evecs[p_i][:, :self.neig]\n new_evecs_trans = self.input_evecs_trans[p_i][:self.neig, :]\n new_evals = self.input_evals[p_i][:self.neig]\n\n new_evecs_full = self.input_evecs_full[p_i][:, :self.neig]\n\n n = new_points.shape[0]\n\n if i_batch == config.batch_num:\n\n yield (np.concatenate(tp_list, axis=0),\n np.concatenate(tev_list, axis=0),\n np.concatenate(tevt_list, axis=1),\n np.concatenate(tv_list, axis=1),\n np.concatenate(tevf_list, axis=0),\n np.array(ti_list, dtype=np.int32),\n np.array([tp.shape[0] for tp in tp_list]))\n\n tp_list = []\n tev_list = []\n tevt_list = []\n tv_list = []\n tevf_list = []\n ti_list = []\n\n batch_n = 0\n i_batch = 0\n\n # Add data to current batch\n tp_list += [new_points]\n tev_list += [new_evecs]\n tevt_list += [new_evecs_trans]\n tv_list += [new_evals]\n tevf_list += [new_evecs_full]\n ti_list += [p_i]\n\n # Update batch size\n batch_n += n\n i_batch += 1\n\n # yield the rest if necessary (it will not be a full batch and could lead to mistakes because of\n # shape matching needing pairs !!!!)\n yield (np.concatenate(tp_list, axis=0),\n np.concatenate(tev_list, axis=0),\n np.concatenate(tevt_list, axis=1),\n np.concatenate(tv_list, axis=1),\n np.concatenate(tevf_list, axis=0),\n np.array(ti_list, dtype=np.int32),\n np.array([tp.shape[0] for tp in tp_list]))\n\n ##################\n # Return generator\n ##################\n\n # Generator types and shapes\n gen_types = (tf.float32, tf.float32, tf.float32, tf.float32, tf.float32, tf.int32, tf.int32)\n gen_shapes = ([None, 3], [None, self.neig],\n [self.neig, None], [self.neig, None], [None, self.neig], [None], [None])\n\n return random_balanced_gen, gen_types, gen_shapes",
"def train_generator(path, max_length):\n questions, answers, segment_ids = parse_data(path)\n randns = np.random.random((len(questions)))\n\n for que, ans, segment_id, randn in zip(questions, answers, segment_ids, randns):\n if randn < 0.34:\n input_id = que + ans\n if len(segment_id) - sum(segment_id) >= max_length:\n # 第一个句子长度大于max_length\n continue\n input_id, input_mask, masked_ids, masked_positions, masked_weights = create_input_mask(input_id, max_length)\n segment_id += [1] * (max_length - len(segment_id))\n segment_id = segment_id[:max_length]\n attention_mask = create_attention_mask_for_seq(segment_id, input_mask)\n elif randn >= 0.34 and randn <= 0.67:\n input_id = que + ans\n input_id, input_mask, masked_ids, masked_positions, masked_weights = create_input_mask(input_id, max_length)\n attention_mask = create_attention_mask_for_bi(input_mask)\n segment_id += [1] * (max_length - len(segment_id))\n segment_id = segment_id[:max_length]\n elif randn > 0.67 and randn <= 0.83:\n input_id = que + ans\n input_id, input_mask, masked_ids, masked_positions, masked_weights = create_input_mask(input_id, max_length)\n segment_id += [1] * (max_length - len(segment_id))\n segment_id = segment_id[:max_length]\n attention_mask = create_attention_mask_for_lm(input_mask)\n else:\n input_id = que + ans\n input_id, input_mask, masked_ids, masked_positions, masked_weights = create_input_mask(input_id, max_length)\n segment_id += [1] * (max_length - len(segment_id))\n segment_id = segment_id[:max_length]\n attention_mask = create_attention_mask_for_lm(input_mask, reverse=True)\n\n features = {'input_ids': input_id,\n 'input_mask': attention_mask,\n 'segment_ids': segment_id,\n 'masked_lm_positions': masked_positions,\n 'masked_lm_ids': masked_ids,\n 'masked_lm_weights': masked_weights}\n assert len(features['input_ids']) == len(features['input_mask']) == len(features['segment_ids']) == len(\n features['masked_lm_positions']) == len(features['masked_lm_ids']) == len(\n features['masked_lm_weights']) == max_length\n yield features",
"def generate_new_batches(Gs, features, y, idx, graph_window, shift, batch_size, device, test_sample):\n\n # print(\"...create batches...\")\n N = len(idx)\n n_nodes = Gs[0].number_of_nodes()\n n_state = 50 \n \n adj_lst = list()\n features_lst = list()\n y_lst = list()\n node_lst = list()\n\n batch_data = []\n for i in range(0, N, batch_size):\n if i+batch_size >= N:\n batch_data += [[idx[x] for x in range(i, N)]]\n else:\n batch_data += [[idx[x] for x in range(i, i+batch_size)]]\n\n for batch in batch_data:\n adj_tmp = list()\n features_tmp = list()\n y_tmp = list()\n num_tmp = list()\n line_idx = 0\n for val in batch:\n for k in range(val-graph_window+1,val+1):\n adj_tmp.append(nx.adjacency_matrix(Gs[k-1]).toarray()) \n for feat in features[k]:\n features_tmp.append(feat)\n num_tmp += list(range(line_idx, line_idx+50))\n line_idx += len(features[k])\n y_tmp.append(y[val+shift])\n\n adj_tmp = sparse_mx_to_torch_sparse_tensor(sp.block_diag(adj_tmp))\n adj_lst.append(adj_tmp.to(device))\n features_tmp = torch.FloatTensor(features_tmp)\n features_lst.append(features_tmp.to(device))\n y_tmp = torch.FloatTensor(y_tmp).reshape(-1)\n y_lst.append(y_tmp.to(device))\n node_lst.append(num_tmp)\n\n\n return adj_lst, features_lst, y_lst, node_lst",
"def kmeans_006():\n n_centroids_vals = [1000, 2000, 2500, 3000]\n scores = []\n\n for n_centroids in n_centroids_vals:\n s = 15\n crop = 150\n n_patches = 400000\n rf_size = 5\n logger.info(\"Training with n_centroids {}\".format(n_centroids))\n\n train_x_crop_scale = CropScaleImageTransformer(training=True,\n result_path='data/data_train_crop_{}_scale_{}.npy'.format(crop, s),\n crop_size=crop,\n scaled_size=s,\n n_jobs=-1,\n memmap=True)\n test_x_crop_scale = CropScaleImageTransformer(training=False,\n result_path='data/data_test_crop_{}_scale_{}.npy'.format(crop, s),\n crop_size=crop,\n scaled_size=s,\n n_jobs=-1,\n memmap=True)\n\n kmeans_generator = KMeansFeatureGenerator(n_centroids=n_centroids,\n rf_size=rf_size,\n result_path='data/mdl_kmeans_006_centroids_{}'.format(n_centroids),\n n_iterations=20,\n n_jobs=-1,)\n\n patch_extractor = models.KMeansFeatures.PatchSampler(n_patches=n_patches,\n patch_size=rf_size,\n n_jobs=-1)\n images = train_x_crop_scale.transform()\n\n patches = patch_extractor.transform(images)\n\n kmeans_generator.fit(patches)\n\n del patches\n gc.collect()\n\n train_x = kmeans_generator.transform(images, save_to_file='data/data_kmeans_features_006_centroids_{}.npy'.format(n_centroids), memmap=True)\n train_y = classes.train_solutions.data\n # Unload some objects\n del images\n gc.collect()\n\n wrapper = ModelWrapper(models.Ridge.RidgeRFEstimator, {'alpha': 500, 'n_estimators': 250}, n_jobs=-1)\n wrapper.cross_validation(train_x, train_y, n_folds=2, parallel_estimator=True)\n\n score = (n_centroids, wrapper.cv_scores)\n logger.info(\"Scores: {}\".format(score))\n scores.append(score)\n\n del wrapper\n gc.collect()",
"def kmeans_007():\n n_centroids = 5000\n s = 50\n crop = 200\n # Originally, 1600 centroids for 400,000 patches, or 250 patches per centroid\n # 800000 / 5000 = will give us 160 patches per centroid\n n_patches = 800000\n rf_size = 20\n # 31 x 31 = 961 patches per image, which is 10x more patches than the original settings\n # If we set stride 2, then it's 16 x 16 patches = 256, only twice as many patches\n stride = 2\n train_x_crop_scale = CropScaleImageTransformer(training=True,\n crop_size=crop,\n scaled_size=s,\n n_jobs=-1,\n memmap=True)\n images = train_x_crop_scale.transform()\n patch_extractor = models.KMeansFeatures.PatchSampler(n_patches=n_patches,\n patch_size=rf_size,\n n_jobs=-1)\n patches = patch_extractor.transform(images)\n\n kmeans_generator = KMeansFeatureGenerator(n_centroids=n_centroids,\n rf_size=rf_size,\n result_path='data/mdl_kmeans_007'.format(n_centroids),\n n_iterations=20,\n n_jobs=-1,)\n kmeans_generator.fit(patches)\n\n del patches\n gc.collect()\n\n train_x = kmeans_generator.transform(images, save_to_file='data/data_kmeans_features_007.npy', stride_size=stride, memmap=True)\n train_y = classes.train_solutions.data\n # Unload some objects\n del images\n gc.collect()\n\n wrapper = ModelWrapper(models.Ridge.RidgeRFEstimator, {'alpha': 500, 'n_estimators': 250}, n_jobs=-1)\n wrapper.cross_validation(train_x, train_y, parallel_estimator=True)\n\n \"\"\"\n wrapper.fit(train_x, train_y)\n\n test_x_crop_scale = CropScaleImageTransformer(training=False,\n crop_size=crop,\n scaled_size=s,\n n_jobs=-1,\n memmap=True)\n\n test_images = test_x_crop_scale.transform()\n test_x = kmeans_generator.transform(test_images, save_to_file='data/data_test_kmeans_features_007.npy'.format(n_centroids), memmap=True)\n res = wrapper.predict(test_x)\n sub = classes.Submission(res)\n sub.to_file('sub_kmeans_006.csv')\n \"\"\"",
"def main():\n bee_model = md.BeeForagingModel(GRID_WIDTH, GRID_HEIGHT, 10, 30, 7)\n\n iteration_size = 1000\n\n for i in range(45):\n print(f'ITERATION {i*iteration_size}')\n\n print({k: len(v) for k, v in bee_model.grid.grids.items()})\n start_time = time.time()\n bee_model.run_model(iteration_size)\n print(time.time() - start_time)",
"def generate_notes(model, network_input, pitch_names, n_vocab):\n \n # Pick a random sequence from the input as a starting point for the prediction\n random_start = np.random.randint(0, len(network_input)-1)\n\n # Create a dictionary to map note integers to pitches\n int_to_note = dict((number, note) for number, note in enumerate(pitch_names))\n\n # Choose a random sequence of notes to use as the network seed\n pattern = network_input[random_start]\n \n # Create an empty list to record the prediction output for each successive note\n prediction_output = []\n\n # Generate 500 notes\n for num_notes in range(500):\n \n # Reshape the input into a format compatible with LSTM layers\n prediction_input = np.reshape(pattern, (1, len(pattern), 1))\n \n # Normalize the network input by dividing by n_vocab (number of unique notes, rests, and chords)\n prediction_input = prediction_input / float(n_vocab)\n\n # Predict the next note, given an input sequence of notes\n prediction = model.predict(prediction_input, verbose=0)\n\n # Add each generated note to prediction_output\n index = np.argmax(prediction)\n result = int_to_note[index]\n prediction_output.append(result)\n\n # Update the pattern list to include to the generated note, used in next for-loop loop\n pattern.append(index)\n pattern = pattern[1:len(pattern)]\n\n return prediction_output",
"def train_epoch(self, sess, n_g_train=1, n_d_train=1, keep_prob=1.0,\r\n summary_writer=None):\r\n assert int(n_g_train) > 0 and int(n_d_train) > 0\r\n display_name_list = ['d_loss', 'g_loss']\r\n cur_summary = None\r\n\r\n if self.epoch_id == 100:\r\n self._lr = self._lr / 10\r\n if self.epoch_id == 300:\r\n self._lr = self._lr / 10\r\n\r\n cur_epoch = self._train_data.epochs_completed\r\n\r\n step = 0\r\n d_loss_sum = 0\r\n g_loss_sum = 0\r\n self.epoch_id += 1\r\n while cur_epoch == self._train_data.epochs_completed:\r\n self.global_step += 1\r\n step += 1\r\n\r\n batch_data = self._train_data.next_batch_dict()\r\n im = batch_data['im']\r\n \r\n # train discriminator\r\n for i in range(int(n_d_train)):\r\n random_vec = distributions.random_vector(\r\n (len(im), self._t_model.in_len), dist_type='uniform')\r\n _, d_loss = sess.run(\r\n [self._train_d_op, self._d_loss_op], \r\n feed_dict={self._t_model.real: im,\r\n self._t_model.lr: self._lr,\r\n self._t_model.keep_prob: keep_prob,\r\n self._t_model.random_vec: random_vec})\r\n # train generator\r\n for i in range(int(n_g_train)):\r\n random_vec = distributions.random_vector(\r\n (len(im), self._t_model.in_len), dist_type='uniform')\r\n _, g_loss = sess.run(\r\n [self._train_g_op, self._g_loss_op], \r\n feed_dict={\r\n self._t_model.lr: self._lr,\r\n self._t_model.keep_prob: keep_prob,\r\n self._t_model.random_vec: random_vec})\r\n\r\n d_loss_sum += d_loss\r\n g_loss_sum += g_loss\r\n\r\n if step % 100 == 0:\r\n cur_summary = sess.run(\r\n self._train_summary_op, \r\n feed_dict={self._t_model.real: im,\r\n self._t_model.keep_prob: keep_prob,\r\n self._t_model.random_vec: random_vec})\r\n\r\n display(self.global_step,\r\n step,\r\n [d_loss_sum / n_d_train, g_loss_sum / n_g_train],\r\n display_name_list,\r\n 'train',\r\n summary_val=cur_summary,\r\n summary_writer=summary_writer)\r\n\r\n print('==== epoch: {}, lr:{} ===='.format(cur_epoch, self._lr))\r\n cur_summary = sess.run(\r\n self._train_summary_op, \r\n feed_dict={self._t_model.real: im,\r\n self._t_model.keep_prob: keep_prob,\r\n self._t_model.random_vec: random_vec})\r\n display(self.global_step,\r\n step,\r\n [d_loss_sum / n_d_train, g_loss_sum / n_g_train],\r\n display_name_list,\r\n 'train',\r\n summary_val=cur_summary,\r\n summary_writer=summary_writer)",
"def train(epochs=100, batch=30, info=5000):\n data = get()\n info = batch * 200\n batch_backup = batch\n\n images = np.concatenate((data[\"train\"], data[\"test\"]))\n\n images = images.reshape(len(images), 28, 28, 1)\n dataSize = len(images)\n\n print(\"data size:\", dataSize)\n print(\"Batch size:\", batch)\n\n dis = discriminator()\n\n ident = Model(img, validity)\n ident.trainable = False\n ident.compile(loss='binary_crossentropy', optimizer=op1,\n metrics=['accuracy'])\n\n gener = generator()\n gener.compile(loss='binary_crossentropy', optimizer=op2,\n metrics=['accuracy'])\n\n adversarial = gan(gan=gener, ident=ident)\n\n for epoch in range(epochs):\n batch = batch_backup\n start = 0\n end = batch\n while start <= dataSize:\n\n if end >= dataSize:\n real = images[start:]\n batch = dataSize % batch\n else:\n real = images[start:end]\n\n noise = np.random.uniform(-1, 1, (batch, 100))\n fake = gener.predict(noise)\n\n x = np.concatenate((real, fake))\n\n y = np.ones((2 * batch, 1))\n y[batch:, :] = 0\n\n disLoss = dis.train_on_batch(x, y)\n\n y = np.ones((batch, 1))\n\n #noise = np.random.uniform(-1, 1, (batch, 100))\n\n advLoss = adversarial.train_on_batch(noise, y)\n\n n = np.random.uniform(-1, 1, (1, 100))\n p = gener.predict(n)\n\n prograssBar(start, dataSize)\n\n if end % (info * 2) == 0:\n print(\"\")\n gener.save(\"Dog2GAN.h5\")\n print(\"saved: {}\".format(epoch))\n\n if end % info == 0:\n stats = \"D: {} ; G: {}\".format(disLoss, advLoss)\n print(stats)\n\n if end >= dataSize:\n break\n\n start += batch\n end += batch"
] | [
"0.6539602",
"0.59907496",
"0.5987289",
"0.5776078",
"0.575335",
"0.56405765",
"0.5622853",
"0.5605876",
"0.5576589",
"0.5575136",
"0.5563018",
"0.5555339",
"0.55250406",
"0.5518238",
"0.54963267",
"0.54839075",
"0.54748034",
"0.54684985",
"0.54609823",
"0.5452404",
"0.5443034",
"0.541",
"0.540928",
"0.5404713",
"0.54041153",
"0.53752464",
"0.53674126",
"0.5360435",
"0.5353434",
"0.53502125"
] | 0.7040524 | 0 |
This is only used in debugging. Prints .osu text directly. | def print_osu_text(a):
for i, ai in enumerate(a):
if not is_slider[i]:
print("{},{},{},1,0,0:0:0".format(
int(ai[0]), int(ai[1]), int(timestamps[i])))
else:
print("{},{},{},2,0,L|{}:{},1,{},0:0:0".format(int(ai[0]), int(ai[1]), int(timestamps[i]), int(round(
ai[0] + ai[2] * slider_lengths[i])), int(round(ai[1] + ai[3] * slider_lengths[i])), int(slider_length_base[i] * slider_ticks[i]))) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def emu_print(text):\n print \"%s %s\" % (EMU_PRINT_PREFIX, text)",
"def print_text(TINY_FONT, x, y, text, color = white):\n text_image = TINY_FONT.render(text, True, color)\n gameDisplay.blit(text_image, (x,y))",
"def _print(self, text):\n\t\tif self.verbose:\n\t\t\tprint text",
"def Print(self, text):\n pass",
"def print_game_over():\n print()\n print(\" _____ __ __ ______ ______ ________ _____ \")\n print(r\" / ____| /\\ | \\/ | ____| / __ \\ \\ / / ____| __ \\ \")\n print(r\" | | __ / \\ | \\ / | |__ | | | \\ \\ / /| |__ | |__) |\")\n print(r\" | | |_ | / /\\ \\ | |\\/| | __| | | | |\\ \\/ / | __| | _ / \")\n print(r\" | |__| |/ ____ \\| | | | |____ | |__| | \\ / | |____| | \\ \\ \")\n print(r\" \\_____/_/ \\_\\_| |_|______| \\____/ \\/ |______|_| \\_\\\\\")\n print()",
"def make_a_sound(): # document string\n print('quack')",
"def ui_output_text(morzeText: str):\n print(morzeText)",
"def WriteText(self, text):\n print(text)",
"def rock():\n typer.echo(\"🤖🤘\")",
"def game_over_text():\n over_text = over_font.render(\"GAME OVER\", True, (255, 255, 255))\n screen.blit(over_text, (200, 250))",
"def debugPrint(text: str):\r\n if DEBUG:\r\n print(text)",
"def Print(self, s, color=(229, 153, 153, 255)):\r\n self.screen.blit(self.font.render(s, True, color), (5, self.textLine))\r\n self.textLine += 15",
"def debug_print(text):\r\n if settings.debug:\r\n print (text)",
"def text_output(self):\n print(self.board)\n print()",
"def showText(pos):\n\treturn OnscreenText( \\\n\t\ttext=\" \", \\\n\t\tstyle=1, fg=(0,0,0,1), pos=(-1.3, pos), \\\n\t\talign=TextNode.ALeft, scale = .06, mayChange = True)",
"def sprint(string=\"string\"):\n global screen\n screen += string",
"def DBG(text):\n print(f\"LEP: {text}\")",
"def show(self):\n f = open('/tmp/dotty', 'w')\n f.write(self.dot())\n f.close()\n os.system('cat /tmp/dotty | dot -Tgif > /tmp/dotty.gif')\n os.system('eog /tmp/dotty.gif')",
"def print_game_logo():\n\n HANGMAN_ASCII_ART = r\"\"\"\n _ _\n | | | |\n | |__| | __ _ _ __ __ _ _ __ ___ __ _ _ __\n | __ |/ _` | '_ \\ / _` | '_ ` _ \\ / _` | '_ \\\n | | | | (_| | | | | (_| | | | | | | (_| | | | |\n |_| |_|\\__,_|_| |_|\\__, |_| |_| |_|\\__,_|_| |_|\n __/ |\n |___/\n\"\"\"\n \n clear_player_screen()\n print_centered(HANGMAN_ASCII_ART)\n\n return None",
"def echo(self, txt):\n if self.gui == None:\n print txt\n else:\n self.gui.echo(txt)",
"def showInstructions():\n print(\"\"\"\n RPG Game\n ========\n Commands:\n go [direction]\n get [item]\n\n\t\"\"\")",
"def play(self):\n print(\"Bientôt ! :)\")",
"def bye():\r\n return \"<p>Bye World! <p>\"",
"def banner_ascii():\n print(\"\")\n print(f\"\\n{RED} Steganography Tool{RESET}\")\n print(f\"{RED} Made By {RESET}\")\n print(f\"{RED} Ehthe Samul Islam Laskar USN:1DS16CS712 {RESET}\")\n print(f\"{RED} B Padma USN:1DS19CS420{RESET}\")\n print(f\"{RED} Nikhil D Kanyal USN:1DS17CS731{RESET}\")\n print(f\"{YELLOW}Type 'help' to see commands{RESET}\")",
"def text(self):\n surface_score = pygame.font.SysFont('Helvetic', 100).render(str(self.score), False, BLACK)\n screen.blit(surface_score, (50, 50))",
"def show_text(text, colour):\n message = font_style.render(text, True, colour)\n dis.blit(message, [game_size_x/2, game_size_y/2])",
"def display_text(self, text):\n self.write_to_serial(':DISP:TEXT \\'' + text + '\\'')",
"def paintText(self, text):\n return '@paint '+text * 2",
"def print(self, my_screen, text_string):\n text_bitmap = self.font.render(text_string, True, BLACK)\n my_screen.blit(text_bitmap, [self.x_pos, self.y_pos])\n self.y_pos += self.line_height",
"def printPokemon():\n print(\" _ \")\n print(\" _ __ ___ | | _____ _ __ ___ ___ _ __ \")\n print(\" | '_ \\ / _ \\| |/ / _ \\ '_ ` _ \\ / _ \\| '_ \\ \")\n print(\" | |_) | (_) | < __/ | | | | | (_) | | | |\")\n print(\" | .__/ \\___/|_|\\_\\___|_| |_| |_|\\___/|_| |_|\")\n print(\" |_| \")"
] | [
"0.65063953",
"0.636514",
"0.62646013",
"0.6242274",
"0.61999524",
"0.6140702",
"0.6124424",
"0.60740983",
"0.6073409",
"0.6061641",
"0.6054812",
"0.60428387",
"0.60134625",
"0.60110116",
"0.5988794",
"0.5965525",
"0.5959333",
"0.59516466",
"0.5946331",
"0.59317094",
"0.59148836",
"0.59145576",
"0.58962476",
"0.5883607",
"0.5882458",
"0.5878248",
"0.58735365",
"0.5861329",
"0.5853587",
"0.5846142"
] | 0.6594397 | 0 |
get_containment_slots(game_object) Retrieve the containment slots of an object. | def get_containment_slots(cls, game_object: GameObject) -> Tuple[CommonObjectContainmentSlot]:
# noinspection PyTypeChecker
game_object: GameObject = CommonObjectUtils.get_root_parent(game_object)
slot_component = cls.get_slot_component(game_object)
if slot_component is None:
return tuple()
containment_slot_list: List[CommonObjectContainmentSlot] = list()
for (slot_hash, slot_types) in tuple(slot_component.get_containment_slot_infos()):
containment_slot_list.append(CommonObjectContainmentSlot(slot_hash, slot_types))
return tuple(containment_slot_list) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def slots(self):\n return self.__slots.values()",
"def GetAllSlots(cls):\n slots = []\n for parent in cls.__mro__:\n slots.extend(getattr(parent, \"__slots__\", []))\n return slots",
"def get_slot_component(cls, game_object: GameObject) -> Union[SlotComponent, None]:\n if not CommonComponentUtils.has_component(game_object, CommonComponentType.SLOT):\n return None\n # noinspection PyTypeChecker\n slot_component: SlotComponent = CommonComponentUtils.get_component(game_object, CommonComponentType.SLOT)\n return slot_component",
"def get_all_spawnable_cells(self):\n spawnable_positions = []\n\n for i in range(self.grid.width):\n for j in range(self.grid.height):\n n_list = self.grid.get_cell_list_contents([(i, j)])\n\n if len(n_list) <= 0:\n spawnable_positions.append((i, j))\n elif len(n_list) > 0:\n n = n_list[0]\n if not any(map(lambda t: isinstance(n, t), self.not_spawnable_objects)):\n spawnable_positions.append((i, j))\n\n return spawnable_positions",
"def get_eight_way_partition_slots(slot_manager: SlotManager) -> List[Slot]:\n partition_order = [Dir.horizontal, Dir.horizontal, Dir.vertical]\n return slot_manager.getLeafSlotsAfterPartition(partition_order)",
"def slots(self):\n highSlots = self._getAttribute(Attribute.highSlots)\n medSlots = self._getAttribute(Attribute.medSlots)\n lowSlots = self._getAttribute(Attribute.lowSlots)\n\n if None in [highSlots, medSlots, lowSlots]:\n # This is a T3 ship.\n highSlots = medSlots = lowSlots = 0\n\n # Get rigs and subs.\n rigSlots = self._getAttribute(Attribute.rigSlots, 0)\n subSlots = self._getAttribute(Attribute.subSlots, 0)\n\n # Get missile and turret slots.\n missileSlots = self._getAttribute(Attribute.missileSlots, 0)\n turretSlots = self._getAttribute(Attribute.turretSlots, 0)\n\n return {\n \"highSlots\": int(highSlots),\n \"medSlots\": int(medSlots),\n \"lowSlots\": int(lowSlots),\n \"rigSlots\": int(rigSlots),\n \"subSlots\": int(subSlots),\n \"turretSlots\": int(turretSlots),\n \"missileSlots\": int(missileSlots)\n }",
"def container_for_slot(self, slot):\n\n for l in self.metalist:\n if not len(l):\n continue\n if slot < len(l):\n return l, slot\n slot -= len(l)",
"def get_slots(self) -> int:",
"def free_slots(self, day_bounds: Slot):\n free_slots: List[Slot] = []\n time_ptr = day_bounds.start\n for meeting in self.meetings:\n if meeting.start > time_ptr:\n free_slots.append(Slot(time_ptr.time_str, meeting.start.time_str))\n time_ptr = meeting.end\n if day_bounds.end > time_ptr:\n free_slots.append(Slot(time_ptr.time_str, day_bounds.end.time_str))\n return free_slots",
"def timeslot(self) -> List[TimeslotTimeslot]:\n return self._timeslot",
"def get_slots(intent_request):\n return intent_request[\"currentIntent\"][\"slots\"]",
"def _get_appointment_slots(self, timezone, employee=None):\n self.ensure_one()\n appt_tz = pytz.timezone(self.appointment_tz)\n requested_tz = pytz.timezone(timezone)\n first_day = requested_tz.fromutc(datetime.utcnow() + relativedelta(hours=self.min_schedule_hours))\n last_day = requested_tz.fromutc(datetime.utcnow() + relativedelta(days=self.max_schedule_days))\n\n # Compute available slots (ordered)\n slots = self._slots_generate(first_day.astimezone(appt_tz), last_day.astimezone(appt_tz), timezone)\n if not employee or employee in self.employee_ids:\n self._slots_available(slots, first_day.astimezone(pytz.UTC), last_day.astimezone(pytz.UTC), employee)\n\n # Compute calendar rendering and inject available slots\n today = requested_tz.fromutc(datetime.utcnow())\n start = today\n month_dates_calendar = cal.Calendar(0).monthdatescalendar\n months = []\n while (start.year, start.month) <= (last_day.year, last_day.month):\n dates = month_dates_calendar(start.year, start.month)\n for week_index, week in enumerate(dates):\n for day_index, day in enumerate(week):\n mute_cls = weekend_cls = today_cls = None\n today_slots = []\n if day.weekday() in (cal.SUNDAY, cal.SATURDAY):\n weekend_cls = 'o_weekend'\n if day == today.date() and day.month == today.month:\n today_cls = 'o_today'\n if day.month != start.month:\n mute_cls = 'text-muted o_mute_day'\n else:\n # slots are ordered, so check all unprocessed slots from until > day\n while slots and (slots[0][timezone][0].date() <= day):\n if (slots[0][timezone][0].date() == day) and ('employee_id' in slots[0]):\n today_slots.append({\n 'employee_id': slots[0]['employee_id'].id,\n 'datetime': slots[0][timezone][0].strftime('%Y-%m-%d %H:%M:%S'),\n 'hours': slots[0][timezone][0].strftime('%H:%M')\n })\n slots.pop(0)\n dates[week_index][day_index] = {\n 'day': day,\n 'slots': today_slots,\n 'mute_cls': mute_cls,\n 'weekend_cls': weekend_cls,\n 'today_cls': today_cls\n }\n\n months.append({\n 'month': format_datetime(start, 'MMMM Y', locale=get_lang(self.env).code),\n 'weeks': dates\n })\n start = start + relativedelta(months=1)\n return months",
"def neighbors(self, node_object):\n\n (node_column, node_row) = node_object\n row_flags = numpy.logical_and(\n self.row_indices_in_region >= node_row - 1,\n self.row_indices_in_region <= node_row + 1)\n column_flags = numpy.logical_and(\n self.column_indices_in_region >= node_column - 1,\n self.column_indices_in_region <= node_column + 1)\n\n neighbour_indices = numpy.where(\n numpy.logical_and(row_flags, column_flags))[0]\n neighbour_indices = neighbour_indices.tolist()\n\n node_index = numpy.where(numpy.logical_and(\n self.row_indices_in_region == node_row,\n self.column_indices_in_region == node_column))[0][0]\n neighbour_indices.remove(node_index)\n\n return [(self.column_indices_in_region[i],\n self.row_indices_in_region[i]) for i in neighbour_indices]",
"def get_all_slots(iso_datetime):\n d_time = datetime.fromisoformat(iso_datetime)\n d_date = date(d_time.year, d_time.month, d_time.day)\n schedule = AppointmentService.APPOINTMENT_SCHEDULE.get(d_date.weekday(), {})\n slots = []\n\n if schedule:\n begin_time = datetime.combine(d_date, schedule['begin'])\n end_time = datetime.combine(d_date, schedule['end'])\n\n while begin_time < end_time:\n slots.append(begin_time)\n begin_time += AppointmentService.APPOINTMENT_DURATION\n\n return slots",
"def get_connected_objects_by_slot_name_gen(\n cls,\n script_object: ScriptObject,\n slot_name: CommonSlotType,\n include_object_callback: Callable[[ScriptObject], bool] = None\n ) -> Iterator[ScriptObject]:\n if script_object is None:\n return tuple()\n\n slot_name_str = str(slot_name)\n with_slot_in_front_of_name = f'slot_{slot_name}'\n\n def _has_slot_name(_connected_object: ScriptObject) -> bool:\n if not _connected_object.parent_slot:\n return False\n for _connected_object_slot_type in _connected_object.parent_slot.slot_types:\n if cls.get_slot_name(_connected_object_slot_type) in (slot_name_str, with_slot_in_front_of_name):\n return True\n return False\n\n if include_object_callback is not None:\n include_object_callback = CommonFunctionUtils.run_predicates_as_one((_has_slot_name, include_object_callback))\n else:\n include_object_callback = _has_slot_name\n\n for connected_object in CommonObjectSlotUtils.get_all_connected_objects_gen(\n script_object,\n include_object_callback=include_object_callback\n ):\n yield connected_object",
"def _get_initial_slots(self, rows, cols) -> list:\n slots = []\n for x in range(rows):\n row = []\n for y in range(cols):\n slot = Slot(x=x, y=y, mine=False, available=True, flag=False)\n row.append(slot)\n slots.append(row)\n return slots",
"def get_empty_slots(self):\n slots = np.reshape(range(0, self.size * self.size), (self.size, self.size))\n\n return slots[~self.tiles_taken]",
"def pd_king_neighbors(obj: PdObject) -> List[PdObject]:\n return [neighbor for tag, neighbor in pd_king_neighbors_and_self(obj) if tag]",
"def _GetSlots(mcs, attrs):\n raise NotImplementedError",
"def _getBrailleRegionsForFrame(self, obj):\n\n self._debugGenerator(\"_getBrailleRegionsForFrame\", obj)\n\n regions = []\n\n text = \"\"\n text = self._script.appendString(\n text, self._script.getDisplayedLabel(obj))\n text = self._script.appendString(\n text, self._script.getDisplayedText(obj))\n text = self._script.appendString(text,\n self._script.getTextForValue(obj))\n text = self._script.appendString(text, self._getTextForRole(obj))\n\n # If this application has more than one unfocused alert or\n # dialog window, then add '(<m> dialogs)' to the braille context,\n # to let the user know.\n #\n alertAndDialogCount = \\\n self._script.getUnfocusedAlertAndDialogCount(obj)\n if alertAndDialogCount > 0:\n # Translators: this tells the user how many unfocused\n # alert and dialog windows plus the total number of\n # windows that this application has.\n #\n line = ngettext(\"(%d dialog)\",\n \"(%d dialogs)\",\n alertAndDialogCount) % alertAndDialogCount\n text = self._script.appendString(text, line)\n\n regions = []\n componentRegion = braille.Component(obj, text)\n regions.append(componentRegion)\n\n return [regions, componentRegion]",
"def get_obstacles(self):\n return self.obstacles",
"def cluster_slots() -> Dict[str, Any]:\n # TODO: refactor tests to not use cli singleton auth.\n certs.cli_cert = certs.default_load(conf.make_master_url())\n authentication.cli_auth = authentication.Authentication(conf.make_master_url())\n r = api.get(conf.make_master_url(), \"api/v1/agents\")\n assert r.status_code == requests.codes.ok, r.text\n jvals = r.json() # type: Dict[str, Any]\n return {agent[\"id\"]: agent[\"slots\"].values() for agent in jvals[\"agents\"]}",
"def test10_containment_triples(self):\n ct = list(LDPRS().containment_triples())\n self.assertEqual(ct, [])",
"def get_four_way_partition_slots(slot_manager: SlotManager) -> List[Slot]:\n partition_order = [Dir.horizontal, Dir.horizontal]\n return slot_manager.getLeafSlotsAfterPartition(partition_order)",
"def get_parking_slot():\n return parking_slots",
"def pg_get_replication_slots(self) -> Optional[int]:\n if self.pg_num_version >= 140000:\n query = queries.get(\"get_replication_slots_post_140000\")\n else:\n return None\n ret = pg.fetchone(self.pg_conn, query)\n return int(ret[\"replication_slots\"])",
"def findObjects( self, x=None, y=None, w=None, h=None ):\n\n if x is None:\n x = self.xAbs\n if y is None:\n y = self.yAbs\n if w is None:\n w = self.w\n if h is None:\n h = self.h\n\n childs = []\n\n subframe, subframeG = self.getSubframe( x, y, w, h )\n\n for childType in self.__class__.childTypes:\n #trackableObjects = eval(childType + \".detect( subframeG )\")\n tmpObj = TrackedObject.factory( childType )\n trackableObjects = tmpObj.__class__.detect( subframeG )\n tmpObj.__class__.listOf.pop()\n\n\n if len(trackableObjects) > 0:\n TrackedObject.factory(childType).__class__.listOf = []\n\n for ( x, y, w, h ) in trackableObjects:\n trackableObject = TrackedObject.factory( childType )\n trackableObject.parent = self\n trackableObject.setBoundingBox( int(x), int(y), int(w), int(h) )\n #trackableObject.findObjects()\n childs.append(trackableObject)\n\n if len(childs) > 0:\n self.childs = childs\n self.lastDetection = TrackedObject.frameCount\n\n for child in self.childs:\n child.findObjects()",
"def pd_king_neighbors_and_self(obj: PdObject) -> List[Tuple[bool, Any]]:\n\n if isinstance(obj, Block):\n raise TypeError('Cannot compute neighbors of block ' + repr(obj))\n if isinstance(obj, (Char, int, float, complex)):\n return [\n (True, num.pd_add_const(obj, -1)),\n (False, obj),\n (True, num.pd_add_const(obj, 1)),\n ]\n elif len(obj) == 0:\n return [(False, obj)]\n elif isinstance(obj, str):\n # type juggling is actually kind of annoying, just doing it explicitly\n return [\n (any(deltas), ''.join(chr(ord(ch) + delta) for ch, delta in zip(obj, deltas)))\n for deltas in itertools.product([-1, 0, 1], repeat=len(obj))\n ]\n else:\n xs = pd_to_list(obj)\n\n return [\n (tag or tag2, [neighbor] + neighbors)\n for tag, neighbor in pd_king_neighbors_and_self(xs[0])\n for tag2, neighbors in pd_king_neighbors_and_self(xs[1:])\n ]",
"def _get_mask_coords(self, obj, shrink_factor):\n # split bounding box into x,y coordinates\n xmin, ymin, xmax, ymax = np.split(obj.bounding_box, len(obj.bounding_box))\n # compute bounding box center coordinate\n bbox_center = np.array([xmin + (xmax - xmin) / 2, ymin + (ymax - ymin) / 2])\n # compute target/mask center coordinate\n mask_center = bbox_center / self.stride\n # compute the shrinked dimensions (? check this)\n shrink_dim = np.array([(xmax - xmin) * np.sqrt(shrink_factor),\n (ymax - ymin) * np.sqrt(shrink_factor)]) / self.stride\n # compute x,y target/mask coordinates\n mask_coords = np.array([np.max([0, np.min([self.target_width, (mask_center[0] - shrink_dim[0] / 2)])]),\n np.max([0, np.min([self.target_height, (mask_center[1] - shrink_dim[1] / 2)])]),\n np.max([0, np.min([self.target_width, (mask_center[0] + shrink_dim[0] / 2)])]),\n np.max([0, np.min([self.target_height, (mask_center[1] + shrink_dim[1] / 2)])])],\n dtype=np.int32)\n return mask_coords",
"def create_zones(self, object_list: list) -> list:\n zone_list = [\n [\n object\n for object in object_list\n if self._object_in_ring(object=object, band=ring)\n ]\n for ring in self.zone_distances\n ]\n # print(\n # f\"len(zone_list): {len(zone_list)}, len(zone_list[0]): {len(zone_list[0])}\"\n # )\n\n return zone_list"
] | [
"0.526133",
"0.5145089",
"0.5117318",
"0.5091099",
"0.50412333",
"0.5012706",
"0.48904616",
"0.4870754",
"0.48465842",
"0.47900295",
"0.47505447",
"0.47355798",
"0.47103837",
"0.4635481",
"0.462364",
"0.46191993",
"0.4604094",
"0.45993218",
"0.4587846",
"0.4573867",
"0.45437527",
"0.453431",
"0.4530009",
"0.45258963",
"0.4524369",
"0.4521938",
"0.45182535",
"0.4502001",
"0.44980988",
"0.44772226"
] | 0.8407297 | 0 |
get_slot_component(game_object) Retrieve the SlotComponent of an Object. | def get_slot_component(cls, game_object: GameObject) -> Union[SlotComponent, None]:
if not CommonComponentUtils.has_component(game_object, CommonComponentType.SLOT):
return None
# noinspection PyTypeChecker
slot_component: SlotComponent = CommonComponentUtils.get_component(game_object, CommonComponentType.SLOT)
return slot_component | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getSlotforObject(cls, obj):\n if obj.__class__ in restslotattributedict.keys():\n attr = cls.getSlotAttrib(obj)\n if attr is not None:\n peek = getattr(obj, \"peek_\" + attr)\n slot = str(peek()).split('/')[0]\n else:\n return False, \"0\"\n return True, slot",
"def get_first_connected_object_by_slot_name(\n cls,\n script_object: ScriptObject,\n slot_name: CommonSlotType,\n include_object_callback: Callable[[ScriptObject], bool] = None\n ) -> Union[ScriptObject, None]:\n for child in cls.get_connected_objects_by_slot_name_gen(\n script_object,\n slot_name,\n include_object_callback=include_object_callback\n ):\n return child\n return None",
"def get_containment_slots(cls, game_object: GameObject) -> Tuple[CommonObjectContainmentSlot]:\n # noinspection PyTypeChecker\n game_object: GameObject = CommonObjectUtils.get_root_parent(game_object)\n slot_component = cls.get_slot_component(game_object)\n if slot_component is None:\n return tuple()\n containment_slot_list: List[CommonObjectContainmentSlot] = list()\n for (slot_hash, slot_types) in tuple(slot_component.get_containment_slot_infos()):\n containment_slot_list.append(CommonObjectContainmentSlot(slot_hash, slot_types))\n return tuple(containment_slot_list)",
"def get_slot(item_id):\n if item_id in all_items:\n return all_items[item_id]['slot']\n return None",
"def get_frame_slot_value(self, slot):\n # FrameObject is a dictionary of slot names and values.\n [slotName, value] = self.pgdb.sendPgdbFnCall('get-frame-slot-value', self.frameid, Symbol(slot))\n if not slotName:\n raise PythonCycError(\"Slot \"+slot+\" does not exist for frame \"+self.frameid+\" from organism (orgid) \"+self.pgdb._orgid)\n # Modify slot name to allow Python's syntax (e.g., '_' instead of '-').\n self.__dict__[convertLispIdtoPythonId(slotName)] = value\n return self",
"def get_card(self, slot):\n return self._starting_card[slot]",
"def fl_get_object_component(ptr_flobject, flobjclass, compontype, seqnum):\n _fl_get_object_component = library.cfuncproto(\n library.load_so_libforms(), \"fl_get_object_component\",\n cty.POINTER(xfdata.FL_OBJECT), [cty.POINTER(xfdata.FL_OBJECT),\n cty.c_int, cty.c_int, cty.c_int], \\\n \"\"\"FL_OBJECT * fl_get_object_component(FL_OBJECT * composite,\n int objclass, int type, int numb)\"\"\")\n library.check_if_flinitialized()\n library.verify_flobjectptr_type(ptr_flobject)\n i_flobjclass = library.convert_to_intc(flobjclass)\n i_compontype = library.convert_to_intc(compontype)\n i_seqnum = library.convert_to_intc(seqnum)\n library.keep_elem_refs(ptr_flobject, flobjclass, i_flobjclass, \\\n compontype, i_compontype, seqnum, i_seqnum)\n retval = _fl_get_object_component(ptr_flobject, i_flobjclass, \\\n i_compontype, i_seqnum)\n return retval",
"def _get_linkable_component(script):\n component = None\n for c in script.components:\n if isinstance(c, SessionComponent):\n return c\n if isinstance(c, SpecialMixerComponent) and not c.is_return_mixer:\n component = c\n\n if component is None:\n if hasattr(script, '_session_ring'):\n return script._session_ring\n return component",
"def get_slot(self, idx):\n assert (idx >= 0) and (idx < self.size()), \"Index is out of range\"\n return self.slots[idx]",
"def slot(self):\n if self.__slot in ApexAP1000.SLOTS:\n return self.__slot\n else:\n raise ValueError('Bad slot number !')",
"def get_slot(self, c):\n if 'slot_number' in c.keys():\n slot_number = c['slot_number']\n return slot_number\n else:\n raise ValueError(self.no_selection_msg())\n \n # returnValue(voltage * units.V)",
"def get_connected_objects_by_slot_name_gen(\n cls,\n script_object: ScriptObject,\n slot_name: CommonSlotType,\n include_object_callback: Callable[[ScriptObject], bool] = None\n ) -> Iterator[ScriptObject]:\n if script_object is None:\n return tuple()\n\n slot_name_str = str(slot_name)\n with_slot_in_front_of_name = f'slot_{slot_name}'\n\n def _has_slot_name(_connected_object: ScriptObject) -> bool:\n if not _connected_object.parent_slot:\n return False\n for _connected_object_slot_type in _connected_object.parent_slot.slot_types:\n if cls.get_slot_name(_connected_object_slot_type) in (slot_name_str, with_slot_in_front_of_name):\n return True\n return False\n\n if include_object_callback is not None:\n include_object_callback = CommonFunctionUtils.run_predicates_as_one((_has_slot_name, include_object_callback))\n else:\n include_object_callback = _has_slot_name\n\n for connected_object in CommonObjectSlotUtils.get_all_connected_objects_gen(\n script_object,\n include_object_callback=include_object_callback\n ):\n yield connected_object",
"def get_component(self, sCompName):\n return self._dComponents.get(sCompName, None)",
"def get_slot_name(cls, slot: SlotType) -> str:\n if slot is None:\n return 'No Slot Name'\n return slot.__name__",
"def get_slot_definition(self, slot: DeckSlotName) -> SlotDefV3:\n deck_def = self.get_deck_definition()\n\n for slot_def in deck_def[\"locations\"][\"orderedSlots\"]:\n if slot_def[\"id\"] == slot.id:\n return slot_def\n\n raise errors.SlotDoesNotExistError(\n f\"Slot ID {slot.id} does not exist in deck {deck_def['otId']}\"\n )",
"def _get_slot_variable(self, layer_name, slot_name):\n return self._tls._slot_variables.get(layer_name, {}).get(\n slot_name, None\n )",
"def get_component(self, name: str) -> Any:\n for c in self._components:\n if c.name == name:\n return c\n raise ValueError(f\"No component found with name {name}\")",
"def get_by_slot(self, parent_object, slot):\n placeholder = self.parent(parent_object).get(slot=slot)\n placeholder.parent = parent_object # fill the reverse cache\n return placeholder",
"def comp(self, componentname):\n retv = self.components.lookup(componentname)\n if (retv == None):\n raise Exception(\"Component not found: '{0}'.\".format(componentname))\n return retv",
"def get_component(self, name):\n for cmpt in self.components:\n if cmpt['name'] == name:\n return cmpt",
"def get_component(self, name: str) -> Any:\n return self._manager.get_component(name)",
"def get_slot(self, board: int, adc: str) -> Union[int, None]:\n slot = None\n for s, info in self.slot_info.items():\n if board == info[0] and adc == info[1]:\n slot = s\n break\n\n return slot",
"def get_slot(self, *args, **kwargs):\n return self._optimizer.get_slot(*args, **kwargs)",
"def getSlotAttrib(cls, obj):\n attr = None\n if obj.__class__ in restslotattributedict.keys():\n attr = restslotattributedict[obj.__class__]\n return attr",
"def slot_get(instance: object, name: str) -> Any:\n\n owner = type(instance)\n attribute = getattr(owner, name)\n try:\n descriptor_get = attribute.__get__\n except AttributeError:\n return attribute\n else:\n return descriptor_get(instance, owner)",
"def get_equipment_slot(self, source_entity):\n open_slots = (source_entity.equipment.get_open_slots_of_type\n (self.parent.equipment_type.value))\n if len(open_slots) > 0:\n return open_slots[0]\n else:\n return (source_entity.equipment.get_slots_of_type\n (self.parent.equipment_type.value))[0]",
"def get_component(self, platform: RuntimeProcessorType, component_id: str) -> Optional[Component]:\n component: Optional[Component] = None\n\n catalogs = self._component_cache.get(platform.name, {})\n for catalog_name, catalog_properties in catalogs.items():\n component = catalog_properties.get(\"components\", {}).get(component_id)\n if component:\n break\n\n if not component:\n self.log.error(f\"Component with ID '{component_id}' could not be found in any catalog.\")\n\n return component",
"def component(self, index):\n return self.components[index]",
"def get(name):\r\n return componentManager.components[name]",
"def pci_slot(self):\n return self._pci_slot"
] | [
"0.5974101",
"0.5687909",
"0.5576533",
"0.5557353",
"0.5550324",
"0.55383635",
"0.54384905",
"0.5350814",
"0.5348756",
"0.53248906",
"0.53145486",
"0.52669525",
"0.52061915",
"0.5178124",
"0.51176554",
"0.51156604",
"0.5053627",
"0.5039317",
"0.50180686",
"0.5006887",
"0.4996687",
"0.498943",
"0.49757674",
"0.49650094",
"0.4962581",
"0.49028274",
"0.48888794",
"0.48558378",
"0.4836688",
"0.48219028"
] | 0.8331069 | 0 |
get_slot_name(slot) Retrieve the name of a slot. | def get_slot_name(cls, slot: SlotType) -> str:
if slot is None:
return 'No Slot Name'
return slot.__name__ | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_slot_variable(self, layer_name, slot_name):\n return self._tls._slot_variables.get(layer_name, {}).get(\n slot_name, None\n )",
"def slot(self):\n if self.__slot in ApexAP1000.SLOTS:\n return self.__slot\n else:\n raise ValueError('Bad slot number !')",
"def getSignalName(sig):\n try:\n return sig._name\n except AttributeError:\n pass\n return sig.name",
"def getSignalName(sig):\n try:\n return sig._name\n except AttributeError:\n pass\n return sig.name",
"def get_slot(self, idx):\n assert (idx >= 0) and (idx < self.size()), \"Index is out of range\"\n return self.slots[idx]",
"def get_slot(item_id):\n if item_id in all_items:\n return all_items[item_id]['slot']\n return None",
"def get_slot_definition(self, slot: DeckSlotName) -> SlotDefV3:\n deck_def = self.get_deck_definition()\n\n for slot_def in deck_def[\"locations\"][\"orderedSlots\"]:\n if slot_def[\"id\"] == slot.id:\n return slot_def\n\n raise errors.SlotDoesNotExistError(\n f\"Slot ID {slot.id} does not exist in deck {deck_def['otId']}\"\n )",
"def get_name(self, context: bpy.types.Context) -> str:\n if not self.name:\n connector = self.__create_connector(\n self.name_connector, context=context)\n self.name = connector.get_name()\n return self.name",
"def get_frame_slot_value(self, slot):\n # FrameObject is a dictionary of slot names and values.\n [slotName, value] = self.pgdb.sendPgdbFnCall('get-frame-slot-value', self.frameid, Symbol(slot))\n if not slotName:\n raise PythonCycError(\"Slot \"+slot+\" does not exist for frame \"+self.frameid+\" from organism (orgid) \"+self.pgdb._orgid)\n # Modify slot name to allow Python's syntax (e.g., '_' instead of '-').\n self.__dict__[convertLispIdtoPythonId(slotName)] = value\n return self",
"def get_name_by_socket(self, socket):\n with self.register_lock:\n return self.socket_name[socket]",
"def visit_slot(self, slot_name: str, slot: SlotDefinition) -> None:\n sn = underscore(slot_name)\n self.emit('slot', sn)\n if slot.domain:\n self.emit('domain', sn, underscore(slot.domain))\n if slot.range:\n self.emit('range', sn, underscore(slot.range))\n for p in slot.mixins:\n self.emit('mixin', sn, underscore(p))\n if slot.is_a:\n is_a = underscore(slot.is_a)\n\n #uri = self.owlgen._prop_uri(slot.name)\n uri = f'http://w3id.org/biolink/vocab/{sn}'\n self.emit('has_uri', sn, uri)\n if slot.multivalued:\n self.emit('multivalued', sn)\n if slot.required:\n self.emit('required', sn)",
"def getName(self):\n return signal_base_get_name(self.obj)",
"def get_card(self, slot):\n return self._starting_card[slot]",
"def get_name(self):\n return self._qname",
"def get_slot_component(cls, game_object: GameObject) -> Union[SlotComponent, None]:\n if not CommonComponentUtils.has_component(game_object, CommonComponentType.SLOT):\n return None\n # noinspection PyTypeChecker\n slot_component: SlotComponent = CommonComponentUtils.get_component(game_object, CommonComponentType.SLOT)\n return slot_component",
"def get_name(self) -> str:\n def _seg2():\n if self.name:\n return self.name\n else:\n try:\n return self.player.title\n except AttributeError:\n return 'No title specified'\n try:\n if self.player.title == 'translate_tts':\n return 'Speech'\n else:\n return _seg2()\n except AttributeError:\n return _seg2()",
"def name(self):\n return signal_base_get_name(self.obj)",
"def get_alexa_slot_value(data, slot=None) -> Union[str, None]:\n if \"request\" in data and \"intent\" in data[\"request\"] and \"slots\" in data[\"request\"][\"intent\"]:\n if slot is None:\n return data[\"request\"][\"intent\"][\"slots\"]\n else:\n if slot in data[\"request\"][\"intent\"][\"slots\"] and \"value\" in data[\"request\"][\"intent\"][\"slots\"][slot]:\n return data[\"request\"][\"intent\"][\"slots\"][slot][\"value\"]\n else:\n return None\n else:\n return None",
"def get_name() -> str:",
"def _get_clear_tm_voq_slot_id_egress_port_name(self):\n return self.__clear_tm_voq_slot_id_egress_port_name",
"def get_name(self, asset):\n return self.get_name_and_meta(asset)[0]",
"def signame(sig):\r\n\r\n if _signames is None:\r\n _init_signames()\r\n return _signames.get(sig) or \"signal %d\" % sig",
"def get_name() -> str:\n pass",
"def _get_frame_name(self, frame):\n if isinstance(frame, str):\n name = frame\n frame_obj = None\n else:\n name = frame.name\n frame_obj = frame\n return name, frame_obj",
"def get_name(self) -> str:\n return self._name",
"def get_name(self) -> str:\n return self._name",
"def get_name(node):\n if isinstance(node, ast.Name):\n return node.id",
"def _get_histname(self, plot, var, frame):\n return '_'.join([plot, var, frame])",
"def getName(self):\n return _libsbml.Port_getName(self)",
"def get_snap_name(self, sid):\n return \"cs-{0}\".format(sid)"
] | [
"0.6404714",
"0.60975593",
"0.5857362",
"0.5857362",
"0.5825722",
"0.5819096",
"0.5776919",
"0.57526577",
"0.5740833",
"0.5738997",
"0.56404126",
"0.5614616",
"0.5590616",
"0.5581622",
"0.5572394",
"0.5567416",
"0.5482684",
"0.54648507",
"0.54605675",
"0.54585487",
"0.5458404",
"0.54530454",
"0.5452874",
"0.5446044",
"0.5432647",
"0.5432647",
"0.5423528",
"0.5414179",
"0.54125315",
"0.54083675"
] | 0.89791465 | 0 |
get_first_connected_object_by_slot_name(script_object, slot_name, include_object_callback=None) Get the first connected object by slot. | def get_first_connected_object_by_slot_name(
cls,
script_object: ScriptObject,
slot_name: CommonSlotType,
include_object_callback: Callable[[ScriptObject], bool] = None
) -> Union[ScriptObject, None]:
for child in cls.get_connected_objects_by_slot_name_gen(
script_object,
slot_name,
include_object_callback=include_object_callback
):
return child
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_connected_objects_by_slot_name_gen(\n cls,\n script_object: ScriptObject,\n slot_name: CommonSlotType,\n include_object_callback: Callable[[ScriptObject], bool] = None\n ) -> Iterator[ScriptObject]:\n if script_object is None:\n return tuple()\n\n slot_name_str = str(slot_name)\n with_slot_in_front_of_name = f'slot_{slot_name}'\n\n def _has_slot_name(_connected_object: ScriptObject) -> bool:\n if not _connected_object.parent_slot:\n return False\n for _connected_object_slot_type in _connected_object.parent_slot.slot_types:\n if cls.get_slot_name(_connected_object_slot_type) in (slot_name_str, with_slot_in_front_of_name):\n return True\n return False\n\n if include_object_callback is not None:\n include_object_callback = CommonFunctionUtils.run_predicates_as_one((_has_slot_name, include_object_callback))\n else:\n include_object_callback = _has_slot_name\n\n for connected_object in CommonObjectSlotUtils.get_all_connected_objects_gen(\n script_object,\n include_object_callback=include_object_callback\n ):\n yield connected_object",
"def get_object(self, pool_name, object_name):\n return self.get_object_and_version(pool_name, object_name)[0]",
"def connectionFromName(self, name):\n for item in self.items():\n if isinstance(item, ConnectionItem):\n if item.name() == name:\n return item\n return None",
"def get_client_by_socket(self, socket):\n candidate_connection_objects = [connection for connection in self if connection.socket() is socket]\n assert len(candidate_connection_objects) != 0, \"?? socket %s not found in list of client objects\" % socket\n assert len(\n candidate_connection_objects) == 1, \"?? socket %s appears in list of client objects multiple times\" % socket\n return candidate_connection_objects[0]",
"def cursor_for_object_in_connection(data, _object):\n if _object not in data:\n return None\n\n offset = data.index(_object)\n return offset_to_cursor(offset)",
"def get_slot_component(cls, game_object: GameObject) -> Union[SlotComponent, None]:\n if not CommonComponentUtils.has_component(game_object, CommonComponentType.SLOT):\n return None\n # noinspection PyTypeChecker\n slot_component: SlotComponent = CommonComponentUtils.get_component(game_object, CommonComponentType.SLOT)\n return slot_component",
"def getSlotforObject(cls, obj):\n if obj.__class__ in restslotattributedict.keys():\n attr = cls.getSlotAttrib(obj)\n if attr is not None:\n peek = getattr(obj, \"peek_\" + attr)\n slot = str(peek()).split('/')[0]\n else:\n return False, \"0\"\n return True, slot",
"def is_connected(self, slot):\n return slot in self.slots",
"def getObject(name, index=-1):\n\n names = [x.name for x in bpy.data.objects if getNamePrefix(x.name) == name]\n names = sorted(names, key=getNameIndex)\n\n if len(names) == 0:\n print(\"Object '{}' not found!\".format(name))\n return None\n\n if index < 0:\n obj = bpy.data.objects[names[index]]\n else:\n for n in names:\n if getNameIndex(n) == index:\n obj = bpy.data.objects[n]\n\n return obj",
"def get_by_slot(self, parent_object, slot):\n placeholder = self.parent(parent_object).get(slot=slot)\n placeholder.parent = parent_object # fill the reverse cache\n return placeholder",
"def find_object_by_path(cls, object_path):\n # XXX: ideally this would be per-connection method.\n with cls._object_path_map_lock:\n return cls._object_path_to_object_map[object_path]",
"def get_object(self, name):\n try:\n return self.data['objects'][normalize_object_name(name)]\n except KeyError:\n return None",
"def findBucket(conn, bucketName):\n for cand in conn.get_all_buckets():\n if cand.name == bucketName:\n return cand\n return None",
"def findBucket(conn, bucketName):\n for cand in conn.get_all_buckets():\n if cand.name == bucketName:\n return cand\n return None",
"def choose_serial_connection(potential_connections):\n for connection in potential_connections:\n if os.path.exists(connection):\n return connection\n return None",
"def get_object_by_name(self, object_list, object_name):\n obj = None\n for i in object_list:\n if i.get_name().lower() == object_name.lower():\n obj = i\n break\n return obj",
"def get_remote_device(self, slot, protocols, o):\n for path in (\n find_path(o, slot, [p.translate(translation_map) for p in protocols], trace_wire=True)\n or []\n ):\n if path.obj != o and not path.obj.is_wire:\n return path",
"def find_connection(self, id):\r\n\t\tfor player in self.established_connection_list:\r\n\t\t\tif (player.id == id):\r\n\t\t\t\treturn player",
"def getObjectFromPosition(position):\n for object in self.objects:\n if (int(self.position[0]) == int(object.position[0])) and (int(self.position[1]) == int(object.position[1])):\n return object\n\n return None",
"def _get_real_object(self, name):\n name = name if isinstance(name, str) else name.name\n for obj in self._objects:\n if name == obj.name:\n return obj\n else:\n raise ValueError(\"Cannot retrieve object. Unknown name {}. \".format(name))",
"def get_card(self, slot):\n return self._starting_card[slot]",
"def select_source(self, c, slot_number):\n source_found = yield self.find_source(c, slot_number)\n if source_found:\n c['slot_number'] = slot_number\n else:\n raise ValueError(self.slot_not_found_msg(slot_number))",
"def get_object_name(obj):\n\n namespace = dict(globals(), **locals()) \n return [name for name in namespace if namespace[name] is obj][0]",
"def _get_COM_object(self):\r\n prefix = \"LightTools API Server | \"\r\n\r\n if self._pid:\r\n if not psutil.pid_exists(self._pid):\r\n msg = \"Couldn't find a LightTools process with PID {}.\"\r\n raise ValueError(msg.format(self._pid))\r\n prefix += str(self._pid)\r\n\r\n start_time = time.time()\r\n connection_attempt_timed_out = (\r\n lambda current_time: current_time - start_time > self._timeout\r\n )\r\n\r\n rot = _comutils.RunningObjectTable()\r\n\r\n while not connection_attempt_timed_out(time.time()):\r\n comobjs = rot.get_objects()\r\n for name in comobjs:\r\n if name.startswith(prefix):\r\n return comobjs[name]\r\n else:\r\n msg = (\r\n \"Couldn't establish a connection to LightTools within {} \"\r\n \"seconds. Connection attempt aborted.\"\r\n )\r\n raise error.TimeOutError(msg.format(self._timeout))",
"def get_object_vertex(self, obj):\n return self.object_vertices[obj]",
"def find_object_by_name(remote, obj_name):\n cmd = mmapi.StoredCommands()\n cmd_key = cmd.AppendSceneCommand_FindObjectByName(obj_name)\n remote.runCommand(cmd)\n result_val = mmapi.any_result()\n bFound = cmd.GetSceneCommandResult_FindObjectByName(cmd_key, result_val)\n return (bFound, result_val.i)",
"def connect_obj_event(self, obj, signal_name, listener):\n return QtCore.QObject.connect(\n obj, QtCore.SIGNAL(signal_name), listener)",
"def get_client_by_handle(self, handle):\n candidate_client_objects = [client for client in self if client.handle == handle]\n assert len( candidate_client_objects) < 2, \"?? socket %s appears in list of client objects multiple times\" % handle\n if candidate_client_objects:\n return candidate_client_objects[0]\n return None",
"def get_object(self, name):\n return self._internal.objects[name]",
"def getObject(self, oid):\n if self.ws :\n [_instance, _type, _id] = oid.split(\".\")\n if (not (oid in self.ws.objectMap) or\n _instance == \"1\" and _type == \"7\"): # force refresh orders\n data = self.rpc.get_object(oid)\n self.ws.objectMap[oid] = data\n else:\n data = self.ws.objectMap[oid]\n if len(data) == 1 :\n return data[0]\n else:\n return data\n else :\n return self.rpc.get_object(oid)[0]"
] | [
"0.71495444",
"0.5211954",
"0.5190289",
"0.51773876",
"0.50943196",
"0.5074009",
"0.50448954",
"0.4951919",
"0.49485144",
"0.49116898",
"0.4874453",
"0.48243442",
"0.4816911",
"0.4816911",
"0.4781857",
"0.47295526",
"0.47264364",
"0.47204056",
"0.47114155",
"0.46943238",
"0.46839678",
"0.46211568",
"0.46068725",
"0.4594635",
"0.45628557",
"0.45622563",
"0.45322555",
"0.45117375",
"0.44617006",
"0.4454975"
] | 0.88282555 | 0 |
get_connected_objects_by_slot_generator(script_object, slot_name, include_object_callback=None) Get all connected objects by slot. | def get_connected_objects_by_slot_name_gen(
cls,
script_object: ScriptObject,
slot_name: CommonSlotType,
include_object_callback: Callable[[ScriptObject], bool] = None
) -> Iterator[ScriptObject]:
if script_object is None:
return tuple()
slot_name_str = str(slot_name)
with_slot_in_front_of_name = f'slot_{slot_name}'
def _has_slot_name(_connected_object: ScriptObject) -> bool:
if not _connected_object.parent_slot:
return False
for _connected_object_slot_type in _connected_object.parent_slot.slot_types:
if cls.get_slot_name(_connected_object_slot_type) in (slot_name_str, with_slot_in_front_of_name):
return True
return False
if include_object_callback is not None:
include_object_callback = CommonFunctionUtils.run_predicates_as_one((_has_slot_name, include_object_callback))
else:
include_object_callback = _has_slot_name
for connected_object in CommonObjectSlotUtils.get_all_connected_objects_gen(
script_object,
include_object_callback=include_object_callback
):
yield connected_object | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_first_connected_object_by_slot_name(\n cls,\n script_object: ScriptObject,\n slot_name: CommonSlotType,\n include_object_callback: Callable[[ScriptObject], bool] = None\n ) -> Union[ScriptObject, None]:\n for child in cls.get_connected_objects_by_slot_name_gen(\n script_object,\n slot_name,\n include_object_callback=include_object_callback\n ):\n return child\n return None",
"def get_all_connected_objects_gen(\n cls,\n script_object: ScriptObject,\n include_self: bool = False,\n direct_connections_only: bool = False,\n include_object_callback: Callable[[ScriptObject], bool] = None\n ) -> Iterator[BaseObject]:\n if direct_connections_only:\n if include_self:\n yield script_object\n for connected_object in script_object.children:\n if connected_object is None:\n continue\n if include_object_callback is not None and not include_object_callback(connected_object):\n continue\n yield connected_object\n else:\n for connected_object in script_object.children_recursive_gen(include_self=include_self):\n if connected_object is None:\n continue\n if include_object_callback is not None and not include_object_callback(connected_object):\n continue\n yield connected_object",
"def is_connected(self, slot):\n return slot in self.slots",
"def get_connection_objects(self, account_id,\n business_id=None, batch=False):\n path = 'act_{}/connectionobjects'.format(account_id)\n args = {}\n if business_id:\n args['business_id'] = business_id\n\n return self.make_request(path, 'GET', args, batch=batch)",
"def connected_component(self):\n t1 = datetime.datetime.now()\n nodes = set(x.hex for x in self.agents)\n result = []\n while nodes:\n node = nodes.pop()\n # This set will contain the next group of nodes connected to each other.\n group = {node}\n # Build a queue with this node in it.\n queue = [node]\n # Iterate the queue.\n # When it's empty, we finished visiting a group of connected nodes.\n while queue:\n # Consume the next item from the queue.\n node = queue.pop(0)\n # Fetch the neighbors.\n neighbors = set(x for x in node.fon if x.is_occupied == 1)\n # Remove the neighbors we already visited.\n neighbors.difference_update(group)\n # Remove the remaining nodes from the global set.\n nodes.difference_update(neighbors)\n # Add them to the group of connected nodes.\n group.update(neighbors)\n # Add them to the queue, so we visit them in the next iterations.\n queue.extend(neighbors)\n\n # Add the group to the list of groups.\n result.append(len(group))\n td = datetime.datetime.now() - t1\n print(\"calculated {} connected components in {} seconds\".format(len(result),td.total_seconds()))\n return len(result), np.histogram(result, self.cluster_hist_breaks)[0]",
"def connect(self, slot):\r\n if inspect.ismethod(slot):\r\n instance = slot.__self__\r\n function = slot.__func__\r\n if instance not in self._methods:\r\n self._methods[instance] = set()\r\n if function not in self._methods[instance]:\r\n self._methods[instance].add(function)\r\n else:\r\n if slot not in self._functions:\r\n self._functions.add(slot)",
"def get_connections(self, name):\n cls, pending, connected = self._proxies[name]\n return list(connected)",
"def all_connections(self):\n for i in _xrange(self.num_patterns):\n for c in self._available_connections[i]:\n yield c\n for c in self._in_use_connections[i]:\n yield c",
"def iter_recursive_objects(self):\n from noc.inv.models.interface import Interface\n\n for i in Interface.objects.filter(managed_object=self.id):\n yield i",
"def get_containment_slots(cls, game_object: GameObject) -> Tuple[CommonObjectContainmentSlot]:\n # noinspection PyTypeChecker\n game_object: GameObject = CommonObjectUtils.get_root_parent(game_object)\n slot_component = cls.get_slot_component(game_object)\n if slot_component is None:\n return tuple()\n containment_slot_list: List[CommonObjectContainmentSlot] = list()\n for (slot_hash, slot_types) in tuple(slot_component.get_containment_slot_infos()):\n containment_slot_list.append(CommonObjectContainmentSlot(slot_hash, slot_types))\n return tuple(containment_slot_list)",
"def connect(self, slot):\n #if inspect.getargspec(slot).keywords is None:\n # raise exceptions.SlotMustAcceptKeywords(self, slot)\n\n if not self.is_connected(slot):\n self.slots.append(slot)",
"def connected_component(region):\n conn = [list(region[0])] # first cell of connected component\n cell_idx = 0\n while cell_idx < len(conn): \n cell = conn[cell_idx] \n neighbours = find_neighbours(cell, region) # find neighbours of cell in region\n for neighbour in neighbours:\n if not neighbour in conn:\n conn.append(neighbour) # add found neighbours to connected component\n cell_idx += 1\n return conn",
"def get_incoming_connections(self, comp):\n in_connections = []\n for comp_id, connections in self.connections.items():\n for connection in connections:\n source, name = connection\n if source == comp.data:\n in_connections.append(connection)\n return in_connections",
"def update_connected_users_slot(self):\n self.update_connected_users_list()",
"def connectedComponents(catalog):\n catalog[\"components\"] = scc.KosarajuSCC(catalog[\"connections\"])\n return scc.connectedComponents(catalog[\"components\"])",
"def get_connections_by_relation(self, qid, relation):\n if self._kg_symbols is None:\n return []\n return self._kg_symbols.get_connections_by_relation(qid, relation)",
"def find_nearby_nodes_bf_graph(self, objs, dep_limit = 2, output = {}, obj_limit = 100):\n\t\tself.layers+=1\n\t\tif self.layers > dep_limit or len(objs) == 0:\n\t\t\tself.root_logger.info('LAYER ' + str(self.layers - 1) + ' DONE\\n')\n\t\t\tif len(objs) == 0:\n\t\t\t\tself.root_logger.info('ALL CONNECTED OBJECTS FOUND')\n\t\t\telse:\n\t\t\t\tself.layers -= 1\n\t\t\treturn output\n\t\t\n\t\tif (self.layers == 1):\n\t\t\toutput = {}\n\t\t\ti = self.get_node_info(objs[0].split()[1], objs[0].split()[0])\n\t\t\toutput[0] = {'pointers_from': [], 'type': objs[0].split()[0], 'id': objs[0].split()[1], 'name': i[0], 'status': i[1], 'deleted': i[2], 'type_full': i[3]}\n\t\t\tself.existing_nodes[objs[0]] = 0\n\t\telse:\n\t\t\tself.root_logger.info('LAYER ' + str(self.layers - 1) + ' DONE. SEARCHING LAYER ' + str(self.layers) + '...\\n')\n\t\tworking_objects = []\n\t\tfor obj in objs:\n\t\t\tcurrent = self.existing_nodes[obj]\n\t\t\tsuccess_counter = len(output) - 1 - current # used to keep track of next available index\t\n\t\t\tparts = obj.split() # each obj in the list stored as \"objecttype objectid\" (delimited by a space)\n\t\t\tfor obj_type in self.pointers_to[obj.split()[0]]: # adding parent nodes that the current object points to\n\t\t\t\tif obj_type == 'order_' or obj_type == 'user_': #since ids stored under user_id and order_id fields\n\t\t\t\t\tsql_query = \"SELECT obj->>'\" + obj_type + \"id' FROM \" + parts[0] + \" WHERE obj->>'id'='\" + parts[1] + \"'\"\n\t\t\t\telse:\n\t\t\t\t\tsql_query = \"SELECT obj->>'\" + obj_type + \"_id' FROM \" + parts[0] + \" WHERE obj->>'id'='\" + parts[1] + \"'\"\n\n\t\t\t\tself.queries[sql_query] = True\n\t\t\t\tself.cur.execute(sql_query)\n\n\t\t\t\tresult = self.cur.fetchall()\n\t\t\t\t\n\n\t\t\t\tif len(result) != 0 and result[0][0] != None and (obj_type + \" \" + result[0][0]) in self.existing_nodes:\n\t\t\t\t\tif self.existing_nodes[obj] not in output[self.existing_nodes[obj_type + \" \" + result[0][0]]]['pointers_from']:\n\t\t\t\t\t\toutput[self.existing_nodes[obj_type + \" \" + result[0][0]]]['pointers_from'].append(self.existing_nodes[obj])\n\t\t\t\telif len(result) != 0 and result[0][0] != None:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tinfo = self.get_node_info(result[0][0], obj_type, pointer = obj)\n\t\t\t\t\t\tworking_objects.append(obj_type + \" \" + result[0][0])\n\t\t\t\t\t\tsuccess_counter += 1\n\t\t\t\t\t\tself.existing_nodes[working_objects[-1]] = current + success_counter\n\t\t\t\t\t\tif (current + success_counter) in output:\n\t\t\t\t\t\t\toutput[current + success_counter]['pointers_from'].append(self.existing_nodes[obj])\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\toutput[current + success_counter] = {'pointers_from': [self.existing_nodes[obj]], 'id': result[0][0], 'type': obj_type, 'name': info[0], 'status': info[1], 'deleted': info[2], 'type_full': info[3]}\n\n\t\t\t\t\t\tif len(self.existing_nodes) >= obj_limit:\n\t\t\t\t\t\t\tself.root_logger.info(\"OBJECT LIMIT REACHED\")\n\t\t\t\t\t\t\treturn output\n\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\tpass\n\n\t\t\t\telse:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tif obj_type == 'user_' or obj_type == 'order_':\n\t\t\t\t\t\t\tsql_query = \"SELECT obj->>'\" + obj_type + \"ids' FROM \" + parts[0] + \" WHERE obj->>'id'='\" + parts[1] + \"'\"\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tsql_query = \"SELECT obj->>'\" + obj_type + \"_ids' FROM \" + parts[0] + \" WHERE obj->>'id'='\" + parts[1] + \"'\"\n\t\t\t\t\t\tself.cur.execute(sql_query)\n\t\t\t\t\t\t\n\t\t\t\t\t\tresults = json.loads(self.cur.fetchall()[0][0]).keys()\n\t\t\t\t\t\tself.queries[sql_query] = True\n\t\t\t\t\t\tfor r in results:\n\t\t\t\t\t\t\tif (obj_type + \" \" + r) in self.existing_nodes:\n\t\t\t\t\t\t\t\tif self.existing_nodes[obj] not in output[self.existing_nodes[obj_type + \" \" + r]]['pointers_from']:\n\t\t\t\t\t\t\t\t\toutput[self.existing_nodes[obj_type + \" \" + r]]['pointers_from'].append(self.existing_nodes[obj])\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tinformation = self.get_node_info(r, obj_type, pointer = obj)\n\t\t\t\t\t\t\t\tworking_objects.append(obj_type + \" \" + r)\n\t\t\t\t\t\t\t\tsuccess_counter += 1\n\t\t\t\t\t\t\t\tself.existing_nodes[working_objects[-1]] = current + success_counter\n\t\t\t\t\t\t\t\tif (current + success_counter) in output:\n\t\t\t\t\t\t\t\t\toutput[self.existing_nodes[obj_type + \" \" + r]]['pointers_from'].append(self.existing_nodes[obj])\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\toutput[current + success_counter] = {'pointers_from': [self.existing_nodes[obj]], 'id': r, 'type': obj_type, 'name': information[0], 'status': information[1], 'deleted': information[2], 'type_full': information[3]}\n\t\t\t\t\t\t\t\tif len(self.existing_nodes) >= obj_limit:\n\t\t\t\t\t\t\t\t\tself.root_logger.info(\"OBJECT LIMIT REACHED\")\n\t\t\t\t\t\t\t\t\treturn output\n\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\tpass\n\t\t\ttry: #adding child nodes that point to the object\n\t\t\t\tfor obj_type in self.pointed_to_by[obj.split()[0]]:\n\t\t\t\t\tif parts[0] == 'user_' or parts[0] == 'order_':\n\t\t\t\t\t\tsql_query = \"SELECT obj->>'id', obj->>'name', obj->>'status', obj->>'deleted', obj->>'type_full' FROM \" + obj_type + \" WHERE obj->>'\" + parts[0] + \"id'='\" + parts[1] + \"'\"\n\t\t\t\t\telse:\n\t\t\t\t\t\tsql_query = \"SELECT obj->>'id', obj->>'name', obj->>'status', obj->>'deleted', obj->>'type_full' FROM \" + obj_type + \" WHERE obj->>'\" + parts[0] + \"_id'='\" + parts[1] + \"'\"\n\t\t\t\t\t\n\t\t\t\t\tself.cur.execute(sql_query)\n\t\t\t\t\tresults = self.cur.fetchall()\n\t\t\t\t\tself.queries[sql_query] = True\n\n\t\t\t\t\tfor r in results:\n\t\t\t\t\t\tif r[0] != None:\n\t\t\t\t\t\t\tif (obj_type + \" \" + r[0]) in self.existing_nodes:\n\t\t\t\t\t\t\t\tif self.existing_nodes[obj_type + \" \" + r[0]] not in output[current]['pointers_from']:\n\t\t\t\t\t\t\t\t\toutput[current]['pointers_from'].append(self.existing_nodes[obj_type + \" \" + r[0]])\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tworking_objects.append(obj_type + \" \" + r[0])\n\t\t\t\t\t\t\t\tsuccess_counter += 1\n\t\t\t\t\t\t\t\tself.existing_nodes[working_objects[-1]] = current + success_counter\n\t\t\t\t\t\t\t\toutput[current]['pointers_from'].append(current + success_counter)\n\t\t\t\t\t\t\t\tif (current + success_counter) not in output:\n\t\t\t\t\t\t\t\t\toutput[current + success_counter] = {'pointers_from': [], 'id': r[0], 'type': obj_type, 'name': r[1], 'status': r[2], 'deleted': r[3], 'type_full': r[4]}\n\t\t\t\t\t\t\t\tif len(self.existing_nodes) >= obj_limit:\n\t\t\t\t\t\t\t\t\tself.root_logger.info(\"OBJECT LIMIT REACHED\")\n\t\t\t\t\t\t\t\t\treturn output\n\n\t\t\texcept Exception as e:\n\t\t\t\tpass\n\n\t\treturn self.find_nearby_nodes_bf_graph(working_objects, dep_limit, output, obj_limit)",
"def get_all_connections(self, qid):\n if self._kg_symbols is None:\n return {}\n return self._kg_symbols.get_all_connections(qid)",
"def autoselect_connectors(self, connectors_records):\n original_parts = self.parts\n all_part_ids = [c.id for c in original_parts]\n connectors_records = [\n c for c in connectors_records if c.id not in all_part_ids\n ]\n\n slotted_parts_records = [\n self.parts_dict[list(parts)[0]]\n for parts in self.compute_slots().values()\n ]\n self.parts = slotted_parts_records + connectors_records\n self.compute_fragments()\n self.initialize()\n graph = self.filtered_connections_graph\n components = sorted(\n nx.components.connected_components(graph.to_undirected()),\n key=lambda graph_: -len(graph_),\n )\n\n for component in components:\n\n newgraph = graph.copy() # deepcopy(graph)\n newgraph.remove_nodes_from(\n set(newgraph.nodes()).difference(component)\n )\n all_paths = dict(nx.all_pairs_shortest_path(graph))\n parts_ids = set([rec.id for rec in slotted_parts_records])\n parts_nodes = [\n n\n for n in newgraph.nodes()\n if self.fragments_dict[n].original_part.id in parts_ids\n ]\n parts_graph = nx.DiGraph()\n parts_graph.add_edges_from(\n [\n (node, other_node)\n for node in parts_nodes\n for other_node, path in all_paths[node].items()\n if (other_node != node)\n and (other_node in parts_nodes)\n and len(set(path[1:-1]).intersection(set(parts_nodes)))\n == 0\n ]\n )\n cycle = []\n if len(parts_graph) != len(original_parts):\n continue\n for cycle in nx.cycles.simple_cycles(parts_graph):\n\n if len(cycle) == len(parts_graph):\n break\n if len(cycle) == len(parts_graph):\n break\n else:\n err = AssemblyMixError(\n message=\"No construct found involving all parts\", mix=self\n )\n err.graph = graph\n raise err\n if len(cycle) == 0:\n raise ValueError(\"No solution found - a connector may be missing.\")\n\n selected_connectors = [\n self.fragments_dict[n].original_part\n for (node1, node2) in zip(cycle, cycle[1:] + [cycle[0]])\n for n in all_paths[node1][node2][1:-1]\n ]\n\n # initialize the mix with the selected connectors\n self.parts = original_parts + selected_connectors\n self.compute_fragments()\n self.initialize()\n return selected_connectors",
"def cluster_get_keys_in_slot(self, slot: int, num_keys: int) -> ResponseT:\n return self.execute_command(\"CLUSTER GETKEYSINSLOT\", slot, num_keys)",
"def iterate_connected_atoms(self, atom):\n successors_iter = self._execution_graph.successors_iter\n return _depth_first_iterate(\n self._execution_graph, {\n co.FLOW: successors_iter,\n co.TASK: successors_iter,\n co.RETRY: successors_iter,\n }, successors_iter(atom))",
"def get_object_childs(self, obj_name):\n index = 0\n children_list = []\n child = 0\n parent_handle = self.get_object_handle(obj_name)\n while child != -1:\n res, child = vrep.simxGetObjectChild(self.client_id, parent_handle, index, vrep.simx_opmode_blocking)\n if res == vrep.simx_return_ok:\n children_list.append(child)\n index = index + 1\n else:\n print('Remote fucntion get_object_childs call failed.')\n return []\n del children_list[len(children_list) - 1]\n return children_list",
"def connected_components(self):\n return [_connected_components.remote(self.rows)]",
"def connections( self, cls = None ):\n scene = self.scene()\n if ( not scene ):\n return []\n \n if ( not cls ):\n cls = XNodeConnection\n \n output = []\n for item in scene.items():\n if ( not isinstance(item, cls) ):\n continue\n \n if ( item.inputNode() == self or item.outputNode() == self ):\n output.append(item)\n \n return output",
"def test_objectresource_listobjects(self):\n\n home01 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name=\"user01\", create=True)\n self.assertTrue(home01 is not None)\n calendar01 = yield home01.childWithName(\"calendar\")\n yield calendar01.createCalendarObjectWithName(\"1.ics\", Component.fromString(self.caldata1))\n yield calendar01.createCalendarObjectWithName(\"2.ics\", Component.fromString(self.caldata2))\n yield self.commitTransaction(0)\n\n home = yield self._remoteHome(self.theTransactionUnderTest(1), \"user01\")\n self.assertTrue(home is not None)\n calendar = yield home.childWithName(\"calendar\")\n names = yield calendar.listObjectResources()\n self.assertEqual(set(names), set((\"1.ics\", \"2.ics\",)))\n yield self.commitTransaction(1)",
"def connections(self, recurse = True):\n \n return NeuroObject.connections(self, recurse) + [self.root] + self.arborizations(False) + self.gapJunctions(False) + self.innervations(False) + self.synapses(False)",
"def list_available_clients(self):\n connected_clients = self.all_clients.keys()\n return connected_clients",
"def clusters_connected( self):\n def check_connected( k, vertices, edges):\n dads = {}\n for p in vertices:\n dads[p] = p\n\n def Find( c):\n while c != dads[c]:\n c = dads[c]\n return c\n\n def Union( p, q):\n dads[Find(p)] = Find(q)\n\n for p,q in edges:\n Union( p, q)\n\n stuff = set([ Find(p) for (k,p) in dads.items()])\n assert len(stuff) == 1, \"More than one partition\"\n\n vertices = collections.defaultdict( list)\n for p in itertools.product( range(self.n), repeat=2):\n vertices[self.raster[p]].append( p)\n\n def X():\n for x in range(self.n-1):\n for y in range(self.n):\n yield (x,y),(x+1,y)\n\n def Y():\n for x in range(self.n):\n for y in range(self.n-1):\n yield (x,y),(x,y+1)\n\n connections = collections.defaultdict( list)\n for (p,q) in itertools.chain( X(), Y()):\n if self.raster[p] == self.raster[q]:\n connections[self.raster[p]].append( ( p, q))\n\n for (k,v) in vertices.items():\n check_connected( k, v, connections[k])",
"def outputConnections(self, cls=None):\n scene = self.scene()\n if ( not scene ):\n return []\n \n if ( not cls ):\n cls = XNodeConnection\n \n output = []\n for item in scene.items():\n if ( not isinstance(item, cls) ):\n continue\n \n if ( item.outputNode() == self ):\n output.append(item)\n \n return output",
"def get_connected_nodes(self, node):\n assert node in self.nodes, \"No node \"+str(node)+\" in graph \"+str(self)\n result = [x.node2 for x in self.edges if x.node1 == node]\n result += [x.node1 for x in self.edges if x.node2 == node]\n return sorted(result)"
] | [
"0.6946404",
"0.6847893",
"0.5160706",
"0.4920565",
"0.47073737",
"0.47068468",
"0.46840045",
"0.46185264",
"0.4608268",
"0.46013737",
"0.4584478",
"0.45532605",
"0.4523386",
"0.4488073",
"0.44853002",
"0.44077507",
"0.44007653",
"0.43828243",
"0.43709967",
"0.43648946",
"0.4344692",
"0.4342646",
"0.43385914",
"0.4322916",
"0.4313243",
"0.42916906",
"0.42899722",
"0.4267205",
"0.42585024",
"0.42556474"
] | 0.8711022 | 0 |
get_all_connected_objects_generator(\ script_object,\ include_self=False,\ direct_connections_only=False,\ include_object_callback=None\ ) Retrieve all objects connected to the specified Object. | def get_all_connected_objects_gen(
cls,
script_object: ScriptObject,
include_self: bool = False,
direct_connections_only: bool = False,
include_object_callback: Callable[[ScriptObject], bool] = None
) -> Iterator[BaseObject]:
if direct_connections_only:
if include_self:
yield script_object
for connected_object in script_object.children:
if connected_object is None:
continue
if include_object_callback is not None and not include_object_callback(connected_object):
continue
yield connected_object
else:
for connected_object in script_object.children_recursive_gen(include_self=include_self):
if connected_object is None:
continue
if include_object_callback is not None and not include_object_callback(connected_object):
continue
yield connected_object | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_connected_objects_by_slot_name_gen(\n cls,\n script_object: ScriptObject,\n slot_name: CommonSlotType,\n include_object_callback: Callable[[ScriptObject], bool] = None\n ) -> Iterator[ScriptObject]:\n if script_object is None:\n return tuple()\n\n slot_name_str = str(slot_name)\n with_slot_in_front_of_name = f'slot_{slot_name}'\n\n def _has_slot_name(_connected_object: ScriptObject) -> bool:\n if not _connected_object.parent_slot:\n return False\n for _connected_object_slot_type in _connected_object.parent_slot.slot_types:\n if cls.get_slot_name(_connected_object_slot_type) in (slot_name_str, with_slot_in_front_of_name):\n return True\n return False\n\n if include_object_callback is not None:\n include_object_callback = CommonFunctionUtils.run_predicates_as_one((_has_slot_name, include_object_callback))\n else:\n include_object_callback = _has_slot_name\n\n for connected_object in CommonObjectSlotUtils.get_all_connected_objects_gen(\n script_object,\n include_object_callback=include_object_callback\n ):\n yield connected_object",
"def get_connection_objects(self, account_id,\n business_id=None, batch=False):\n path = 'act_{}/connectionobjects'.format(account_id)\n args = {}\n if business_id:\n args['business_id'] = business_id\n\n return self.make_request(path, 'GET', args, batch=batch)",
"def iter_recursive_objects(self):\n from noc.inv.models.interface import Interface\n\n for i in Interface.objects.filter(managed_object=self.id):\n yield i",
"def connections(self, recurse = True):\n \n return NeuroObject.connections(self, recurse) + [self.root] + self.arborizations(False) + self.gapJunctions(False) + self.innervations(False) + self.synapses(False)",
"def all_connections(self):\n for i in _xrange(self.num_patterns):\n for c in self._available_connections[i]:\n yield c\n for c in self._in_use_connections[i]:\n yield c",
"def get_all_objects():\n gcl = gc.get_objects()\n olist = []\n seen = {}\n # Just in case:\n seen[id(gcl)] = None\n seen[id(olist)] = None\n seen[id(seen)] = None\n # _getr does the real work.\n _getr(gcl, olist, seen)\n return olist",
"def get_all_objects():\n gcl = gc.get_objects()\n olist = []\n seen = {}\n # Just in case:\n seen[id(gcl)] = None\n seen[id(olist)] = None\n seen[id(seen)] = None\n # _getr does the real work.\n _getr(gcl, olist, seen)\n return olist",
"def _get_related_objects(obj, parent_class=False):\n foreign_managers = _get_related_managers(obj, parent_class)\n\n related_objects = []\n for manager in foreign_managers:\n related_objects += manager.all()\n\n return related_objects",
"def get_all_connections(self, id, connection_name, **args):\n while True:\n page = self.get_connections(id, connection_name, **args)\n for post in page[\"data\"]:\n yield post\n next = page.get(\"paging\", {}).get(\"next\")\n if not next:\n return\n args = parse_qs(urlparse(next).query)\n del args[\"access_token\"]",
"def get_downstream_objects(obj):\n # gcl = gc.get_objects()\n olist = []\n seen = {}\n # Just in case:\n # seen[id(gcl)] = None\n seen[id(olist)] = None\n seen[id(seen)] = None\n # _getr does the real work.\n _getr([obj], olist, seen)\n return olist",
"def list_connections(self):\n return self.network.list_connections()",
"def get_all_objects():\n gc.collect()\n gcl = gc.get_objects()\n olist = []\n seen = {}\n # Just in case:\n seen[id(gcl)] = None\n seen[id(olist)] = None\n seen[id(seen)] = None\n # _getr does the real work.\n _getr(gcl, olist, seen)\n return olist",
"def list_object_parents(self,\n object_ref: str,\n include_all_links_to_each_parent: bool = True,\n **kwargs) -> Iterator:\n if include_all_links_to_each_parent:\n def unpack_response(i):\n return '$' + i['ObjectIdentifier'], i['LinkName']\n\n return _paging_loop(cd_client.list_object_parents,\n 'ParentLinks',\n unpack_response,\n DirectoryArn=self._dir_arn,\n ObjectReference={'Selector': object_ref},\n IncludeAllLinksToEachParent=include_all_links_to_each_parent,\n MaxResults=self._page_limit,\n **kwargs\n )\n else:\n return _paging_loop(cd_client.list_object_parents,\n 'Parents',\n self._make_ref,\n DirectoryArn=self._dir_arn,\n ObjectReference={'Selector': object_ref},\n IncludeAllLinksToEachParent=include_all_links_to_each_parent,\n MaxResults=self._page_limit,\n **kwargs\n )",
"def get_all_connected_nodes(self, where_to=OUTGOING):\n\n list_of_all_nodes = []\n\n if not self._directed or where_to == Vertex.OUTGOING:\n for edge in self._outgoing:\n list_of_all_nodes.append(edge.return_other_side(self))\n elif where_to == Vertex.INCOMING:\n for edge in self._incoming:\n list_of_all_nodes.append(edge.return_other_side(self))\n\n return list_of_all_nodes",
"def find_nearby_nodes_bf_graph(self, objs, dep_limit = 2, output = {}, obj_limit = 100):\n\t\tself.layers+=1\n\t\tif self.layers > dep_limit or len(objs) == 0:\n\t\t\tself.root_logger.info('LAYER ' + str(self.layers - 1) + ' DONE\\n')\n\t\t\tif len(objs) == 0:\n\t\t\t\tself.root_logger.info('ALL CONNECTED OBJECTS FOUND')\n\t\t\telse:\n\t\t\t\tself.layers -= 1\n\t\t\treturn output\n\t\t\n\t\tif (self.layers == 1):\n\t\t\toutput = {}\n\t\t\ti = self.get_node_info(objs[0].split()[1], objs[0].split()[0])\n\t\t\toutput[0] = {'pointers_from': [], 'type': objs[0].split()[0], 'id': objs[0].split()[1], 'name': i[0], 'status': i[1], 'deleted': i[2], 'type_full': i[3]}\n\t\t\tself.existing_nodes[objs[0]] = 0\n\t\telse:\n\t\t\tself.root_logger.info('LAYER ' + str(self.layers - 1) + ' DONE. SEARCHING LAYER ' + str(self.layers) + '...\\n')\n\t\tworking_objects = []\n\t\tfor obj in objs:\n\t\t\tcurrent = self.existing_nodes[obj]\n\t\t\tsuccess_counter = len(output) - 1 - current # used to keep track of next available index\t\n\t\t\tparts = obj.split() # each obj in the list stored as \"objecttype objectid\" (delimited by a space)\n\t\t\tfor obj_type in self.pointers_to[obj.split()[0]]: # adding parent nodes that the current object points to\n\t\t\t\tif obj_type == 'order_' or obj_type == 'user_': #since ids stored under user_id and order_id fields\n\t\t\t\t\tsql_query = \"SELECT obj->>'\" + obj_type + \"id' FROM \" + parts[0] + \" WHERE obj->>'id'='\" + parts[1] + \"'\"\n\t\t\t\telse:\n\t\t\t\t\tsql_query = \"SELECT obj->>'\" + obj_type + \"_id' FROM \" + parts[0] + \" WHERE obj->>'id'='\" + parts[1] + \"'\"\n\n\t\t\t\tself.queries[sql_query] = True\n\t\t\t\tself.cur.execute(sql_query)\n\n\t\t\t\tresult = self.cur.fetchall()\n\t\t\t\t\n\n\t\t\t\tif len(result) != 0 and result[0][0] != None and (obj_type + \" \" + result[0][0]) in self.existing_nodes:\n\t\t\t\t\tif self.existing_nodes[obj] not in output[self.existing_nodes[obj_type + \" \" + result[0][0]]]['pointers_from']:\n\t\t\t\t\t\toutput[self.existing_nodes[obj_type + \" \" + result[0][0]]]['pointers_from'].append(self.existing_nodes[obj])\n\t\t\t\telif len(result) != 0 and result[0][0] != None:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tinfo = self.get_node_info(result[0][0], obj_type, pointer = obj)\n\t\t\t\t\t\tworking_objects.append(obj_type + \" \" + result[0][0])\n\t\t\t\t\t\tsuccess_counter += 1\n\t\t\t\t\t\tself.existing_nodes[working_objects[-1]] = current + success_counter\n\t\t\t\t\t\tif (current + success_counter) in output:\n\t\t\t\t\t\t\toutput[current + success_counter]['pointers_from'].append(self.existing_nodes[obj])\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\toutput[current + success_counter] = {'pointers_from': [self.existing_nodes[obj]], 'id': result[0][0], 'type': obj_type, 'name': info[0], 'status': info[1], 'deleted': info[2], 'type_full': info[3]}\n\n\t\t\t\t\t\tif len(self.existing_nodes) >= obj_limit:\n\t\t\t\t\t\t\tself.root_logger.info(\"OBJECT LIMIT REACHED\")\n\t\t\t\t\t\t\treturn output\n\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\tpass\n\n\t\t\t\telse:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tif obj_type == 'user_' or obj_type == 'order_':\n\t\t\t\t\t\t\tsql_query = \"SELECT obj->>'\" + obj_type + \"ids' FROM \" + parts[0] + \" WHERE obj->>'id'='\" + parts[1] + \"'\"\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tsql_query = \"SELECT obj->>'\" + obj_type + \"_ids' FROM \" + parts[0] + \" WHERE obj->>'id'='\" + parts[1] + \"'\"\n\t\t\t\t\t\tself.cur.execute(sql_query)\n\t\t\t\t\t\t\n\t\t\t\t\t\tresults = json.loads(self.cur.fetchall()[0][0]).keys()\n\t\t\t\t\t\tself.queries[sql_query] = True\n\t\t\t\t\t\tfor r in results:\n\t\t\t\t\t\t\tif (obj_type + \" \" + r) in self.existing_nodes:\n\t\t\t\t\t\t\t\tif self.existing_nodes[obj] not in output[self.existing_nodes[obj_type + \" \" + r]]['pointers_from']:\n\t\t\t\t\t\t\t\t\toutput[self.existing_nodes[obj_type + \" \" + r]]['pointers_from'].append(self.existing_nodes[obj])\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tinformation = self.get_node_info(r, obj_type, pointer = obj)\n\t\t\t\t\t\t\t\tworking_objects.append(obj_type + \" \" + r)\n\t\t\t\t\t\t\t\tsuccess_counter += 1\n\t\t\t\t\t\t\t\tself.existing_nodes[working_objects[-1]] = current + success_counter\n\t\t\t\t\t\t\t\tif (current + success_counter) in output:\n\t\t\t\t\t\t\t\t\toutput[self.existing_nodes[obj_type + \" \" + r]]['pointers_from'].append(self.existing_nodes[obj])\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\toutput[current + success_counter] = {'pointers_from': [self.existing_nodes[obj]], 'id': r, 'type': obj_type, 'name': information[0], 'status': information[1], 'deleted': information[2], 'type_full': information[3]}\n\t\t\t\t\t\t\t\tif len(self.existing_nodes) >= obj_limit:\n\t\t\t\t\t\t\t\t\tself.root_logger.info(\"OBJECT LIMIT REACHED\")\n\t\t\t\t\t\t\t\t\treturn output\n\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\tpass\n\t\t\ttry: #adding child nodes that point to the object\n\t\t\t\tfor obj_type in self.pointed_to_by[obj.split()[0]]:\n\t\t\t\t\tif parts[0] == 'user_' or parts[0] == 'order_':\n\t\t\t\t\t\tsql_query = \"SELECT obj->>'id', obj->>'name', obj->>'status', obj->>'deleted', obj->>'type_full' FROM \" + obj_type + \" WHERE obj->>'\" + parts[0] + \"id'='\" + parts[1] + \"'\"\n\t\t\t\t\telse:\n\t\t\t\t\t\tsql_query = \"SELECT obj->>'id', obj->>'name', obj->>'status', obj->>'deleted', obj->>'type_full' FROM \" + obj_type + \" WHERE obj->>'\" + parts[0] + \"_id'='\" + parts[1] + \"'\"\n\t\t\t\t\t\n\t\t\t\t\tself.cur.execute(sql_query)\n\t\t\t\t\tresults = self.cur.fetchall()\n\t\t\t\t\tself.queries[sql_query] = True\n\n\t\t\t\t\tfor r in results:\n\t\t\t\t\t\tif r[0] != None:\n\t\t\t\t\t\t\tif (obj_type + \" \" + r[0]) in self.existing_nodes:\n\t\t\t\t\t\t\t\tif self.existing_nodes[obj_type + \" \" + r[0]] not in output[current]['pointers_from']:\n\t\t\t\t\t\t\t\t\toutput[current]['pointers_from'].append(self.existing_nodes[obj_type + \" \" + r[0]])\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tworking_objects.append(obj_type + \" \" + r[0])\n\t\t\t\t\t\t\t\tsuccess_counter += 1\n\t\t\t\t\t\t\t\tself.existing_nodes[working_objects[-1]] = current + success_counter\n\t\t\t\t\t\t\t\toutput[current]['pointers_from'].append(current + success_counter)\n\t\t\t\t\t\t\t\tif (current + success_counter) not in output:\n\t\t\t\t\t\t\t\t\toutput[current + success_counter] = {'pointers_from': [], 'id': r[0], 'type': obj_type, 'name': r[1], 'status': r[2], 'deleted': r[3], 'type_full': r[4]}\n\t\t\t\t\t\t\t\tif len(self.existing_nodes) >= obj_limit:\n\t\t\t\t\t\t\t\t\tself.root_logger.info(\"OBJECT LIMIT REACHED\")\n\t\t\t\t\t\t\t\t\treturn output\n\n\t\t\texcept Exception as e:\n\t\t\t\tpass\n\n\t\treturn self.find_nearby_nodes_bf_graph(working_objects, dep_limit, output, obj_limit)",
"def get_all(self, object):\n self.lock.acquire()\n result = self.__Session.query(object).all()\n self.lock.release()\n return result",
"def get_connections(self, name):\n cls, pending, connected = self._proxies[name]\n return list(connected)",
"def fetchObjects(self):\n try:\n for i in service.Service.get_workers():\n yield i\n except Exception as e:\n Events.Status.emit(f\"unable to fetch worker information: {e}\")",
"def FindObjects(*args, **kwargs):\n return _gdi_.PseudoDC_FindObjects(*args, **kwargs)",
"def iter_linked ( self ):\n for script in self.iter_scripts():\n yield ( script, list ( script.iter_user_scripts() ) )",
"def list_connections(self, show_passthrough=True):\n return self._exprmapper.list_connections(show_passthrough)",
"def connections( self, cls = None ):\n scene = self.scene()\n if ( not scene ):\n return []\n \n if ( not cls ):\n cls = XNodeConnection\n \n output = []\n for item in scene.items():\n if ( not isinstance(item, cls) ):\n continue\n \n if ( item.inputNode() == self or item.outputNode() == self ):\n output.append(item)\n \n return output",
"async def get_all(self) -> typing.List[Connection]:\n return [Connection.from_dict(conn) for conn in await self.query(CONNECTION_URL)]",
"def get_first_connected_object_by_slot_name(\n cls,\n script_object: ScriptObject,\n slot_name: CommonSlotType,\n include_object_callback: Callable[[ScriptObject], bool] = None\n ) -> Union[ScriptObject, None]:\n for child in cls.get_connected_objects_by_slot_name_gen(\n script_object,\n slot_name,\n include_object_callback=include_object_callback\n ):\n return child\n return None",
"def outputConnections(self, cls=None):\n scene = self.scene()\n if ( not scene ):\n return []\n \n if ( not cls ):\n cls = XNodeConnection\n \n output = []\n for item in scene.items():\n if ( not isinstance(item, cls) ):\n continue\n \n if ( item.outputNode() == self ):\n output.append(item)\n \n return output",
"def iterate_connected_atoms(self, atom):\n successors_iter = self._execution_graph.successors_iter\n return _depth_first_iterate(\n self._execution_graph, {\n co.FLOW: successors_iter,\n co.TASK: successors_iter,\n co.RETRY: successors_iter,\n }, successors_iter(atom))",
"def object_list(self):\n for cdist_object in core.CdistObject.list_objects(\n self.local.object_path, self.local.type_path,\n self.local.object_marker_name):\n if cdist_object.cdist_type.is_install:\n self.log.debug((\"Running in config mode, ignoring install \"\n \"object: {0}\").format(cdist_object))\n else:\n yield cdist_object",
"def getConnectedUsers(self):\n\n\t\treturn self.connectedUsers",
"def list(self):\n\t\tif self.client is None:\n\t\t\traise UsageError(\"Not connected!\")\n\t\treturn self.client.list_conns()",
"def sitecurclntconnections(self) :\n\t\ttry :\n\t\t\treturn self._sitecurclntconnections\n\t\texcept Exception as e:\n\t\t\traise e"
] | [
"0.62425333",
"0.5680843",
"0.54635173",
"0.5462352",
"0.54386246",
"0.5204806",
"0.515782",
"0.51439136",
"0.51033455",
"0.5007588",
"0.49970877",
"0.49409634",
"0.49109083",
"0.49077046",
"0.48719302",
"0.48705676",
"0.48520735",
"0.48517132",
"0.4849996",
"0.47837555",
"0.4740942",
"0.472717",
"0.47186562",
"0.47058347",
"0.46938533",
"0.46838132",
"0.46775287",
"0.46766588",
"0.46748376",
"0.46726382"
] | 0.8612306 | 0 |
Return wx.Icon object based on file basename and bitmap size. | def Icon(self, size, name):
# ------------------------------------------------------------------------
bitmap = self.Bitmap(size, name)
if not bitmap:
return None
icon = wx.EmptyIcon()
icon.CopyFromBitmap(bitmap)
return icon | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def LoadIcon(filename):\n # wx.Image.AddHandler(wx.PNGHandler) # This should work but it doesn't so...\n wx.InitAllImageHandlers() # ...falling back to this instead\n\n filename = \"icons/\" + filename + \".png\"\n image = wx.Image()\n\n with open(filename, mode='rb') as file:\n image.LoadFile(file, type=wx.BITMAP_TYPE_PNG)\n\n return image.ConvertToBitmap()",
"def MakeIcon(self, img):\n if \"wxMSW\" in wx.PlatformInfo:\n img = img.Scale(16, 16)\n elif \"wxGTK\" in wx.PlatformInfo:\n img = img.Scale(22, 22)\n # wxMac can be any size upto 128x128, so leave the source img alone....\n icon = wx.IconFromBitmap(img.ConvertToBitmap())\n return icon",
"def load_icon(fn):\n\n fn = os.path.join(os.path.dirname(__file__), fn)\n bmp = c4d.bitmaps.BaseBitmap()\n if bmp.InitWith(fn)[0] == c4d.IMAGERESULT_OK:\n return bmp\n return None",
"def CreateBitmap(self, artid, artclient, size):\n \n filename = str(artid) + \".png\"\n fullpath = self._find_file(filename, size.width, size.height)\n\n if fullpath:\n return wx.BitmapFromImage(wx.Image(fullpath))\n else:\n return wx.NullBitmap",
"def icon(tag: str, size=24) -> tk.PhotoImage:\n\n res_name = '%s-%dx%d.png' % (tag, size, size)\n with resources.path(ICONS_RESOURCE, res_name) as path:\n path = str(path)\n if path not in icons_loaded:\n icons_loaded[path] = tk.PhotoImage(file=str(path))\n return icons_loaded[path]",
"def createIcon(self, name):\n path = 'data/images/' + name\n icon = QtGui.QIcon(path)\n return icon",
"def make_image(self, path):\n\t\treturn self.ui.get_icon(path)",
"def get_icon(icon_file): \n img_path = _path.join(\n BASEPATH, _path.join('hallbench', _path.join('resources', 'img')))\n icon_path = _path.join(img_path, icon_file)\n icon = _QIcon()\n icon.addPixmap(\n _QPixmap(icon_path),\n _QIcon.Normal,\n _QIcon.Off)\n return icon",
"def get_image(control):\n file = _icons.get(control.Id)\n if file:\n path = os.path.join(os.path.dirname(__file__), \"icons\", file)\n return pyxll.load_image(path)",
"def __make_icon():\n icon = pygame.image.load(str(PurePath(\"res/Images/bird_wing_down.png\")))\n return icon",
"def get_icon_url(name: str, size: IconSize):\n\n file_name = \"{}.jpg\".format(name)\n icon_url_fmt = \"http://media.blizzard.com/wow/icons/{}/{}\"\n return icon_url_fmt.format(str(size.value), file_name)",
"def do_icon(srcfn, magnitude):\n img = Image.open(\"%s.png\" % (srcfn, ))\n draw = ImageDraw.Draw(img)\n (width, _height) = FONT.getsize(magnitude)\n # 40 pixel wide, we want to center it\n x0 = int(20 - (width / 2.))\n draw.text((x0, 8), magnitude, font=FONT, fill=(0, 0, 0, 255))\n img.save((\"../../htdocs/icons/lsr/%s/%s.png\"\n ) % (srcfn, magnitude))\n del img\n del draw",
"def IconFromBitmap(*args, **kwargs):\n val = _gdi_.new_IconFromBitmap(*args, **kwargs)\n return val",
"def _get_icon(icon_name):\n theme = 'Adwaita'\n size = '256x256'\n path = f'/usr/share/icons/{theme}/{size}/mimetypes/{icon_name}.png'\n return path",
"def Image(icon_name, size=32):\n pixmap = QtGui.QPixmap(\":/icons/%s.png\" % icon_name)\n return QtGui.QImage(pixmap.scaled(QtCore.QSize(size, size), QtCore.Qt.KeepAspectRatio, QtCore.Qt.SmoothTransformation))",
"def from_file(cls, path):\n new_icon = cls(QIcon(path))\n return new_icon",
"def GetIconOfExactSize(*args, **kwargs):\n return _gdi_.IconBundle_GetIconOfExactSize(*args, **kwargs)",
"def BitmapFromIcon(*args, **kwargs):\n val = _gdi_.new_BitmapFromIcon(*args, **kwargs)\n return val",
"def icon(value, size = QSize(16, 16)):\r\n return value",
"def icon(self, *names, **kwargs) -> gui.QIcon:\n cache_key = f\"{names}{kwargs}\"\n if cache_key in self.icon_cache:\n return self.icon_cache[cache_key]\n opts = kwargs.pop(\"options\", [{}] * len(names))\n if len(opts) != len(names):\n raise TypeError(f'\"options\" must be a list of size {len(names)}')\n parsed_options = [self._parse_options(o, kwargs, n) for o, n in zip(opts, names)]\n engine = chariconengine.CharIconEngine(self, parsed_options)\n icon = gui.QIcon(engine)\n self.icon_cache[cache_key] = icon\n return icon",
"def IconFromLocation(*args, **kwargs):\n val = _gdi_.new_IconFromLocation(*args, **kwargs)\n return val",
"def get_icon(f):\n\n url = None\n t = FileHelper.get_media_type(f)\n if t == \"audio\":\n url = \"/img/music.png\"\n elif t == \"video\":\n url = \"/img/video.png\"\n elif t == \"image\":\n return FileHelper.get_url(f)\n for fn in os.listdir(os.path.dirname(f)):\n if \"album\" in fn.lower():\n uri = os.path.join(os.path.dirname(f), fn)\n url = FileHelper.get_url(uri)\n\n return url",
"def ImgToBmp( filePath, size ):\n img = wx.Image( filePath )\n img.Rescale( size[0], size[1] )\n bmp = wx.BitmapFromImage( img )\n return bmp",
"def get_image(self, size):\n smallicon = self._item.get(\"icon_url\")\n\n if not smallicon:\n return \"\"\n\n fullurl = self._cdn_url + smallicon\n dims = size\n\n if size == self.ITEM_IMAGE_SMALL: dims = \"96fx96f\"\n elif size == self.ITEM_IMAGE_LARGE: dims = \"512fx512f\"\n\n return fullurl + '/' + dims",
"def api_get_icon():\n pkg_name = request.args.get('pkg')\n if pkg_name:\n pkg_files = Database().db.get_pkg_files(pkg_name)\n for src in pkg_files:\n if src.startswith(\"/usr/share/icons/hicolor/32x32/apps/\"):\n return send_file(src, as_attachment=False)\n return send_file(\"static/images/null.gif\")\n else:\n src = request.args.get('i')\n if not os.path.isfile(src):\n #abort(404)\n return send_file(\"static/images/null.gif\")\n return send_file(src, as_attachment=False)",
"def processIconFilename(self):\n\t\tself.iconFilename = self._getVal(64, 2)",
"def getIconImage(self, name: str) -> Any:\n # Return the image from the cache if possible.\n if name in self.iconimages:\n image = self.iconimages.get(name)\n return image\n try:\n iconsDir = g.os_path_join(g.app.loadDir, \"..\", \"Icons\")\n homeIconsDir = g.os_path_join(g.app.homeLeoDir, \"Icons\")\n for theDir in (homeIconsDir, iconsDir):\n fullname = g.finalize_join(theDir, name)\n if g.os_path_exists(fullname):\n if 0: # Not needed: use QTreeWidget.setIconsize.\n pixmap = QtGui.QPixmap()\n pixmap.load(fullname)\n image = QtGui.QIcon(pixmap)\n else:\n image = QtGui.QIcon(fullname)\n self.iconimages[name] = image\n return image\n # No image found.\n return None\n except Exception:\n g.es_print(\"exception loading:\", fullname)\n g.es_exception()\n return None",
"def Bitmap(self, size, name):\n # ------------------------------------------------------------------------\n try:\n return self.bitmaps[size][name]\n except:\n print \"Bitmap(\\\"%s\\\", \\\"%s\\\"): No such thing!\" % (size, name)\n return None",
"def add_icon_name_from_file(self, icon_name, filename, size=None):\n try:# TODO: Make svg actually recognized\n pixbuf = GdkPixbuf.Pixbuf.new_from_file(filename)\n self.add_icon_name_from_pixbuf(icon_name, pixbuf, size)\n except Exception as e:\n print \"exception in icons.py IconManager.add_icon_name_from_file\"\n print e\n # Happens if, e.g., librsvg is not installed.",
"def ExtractIconReps(icon_file_name):\n with open(icon_file_name, \"r\") as icon_file:\n icon_file_contents = icon_file.readlines()\n\n current_icon_size = REFERENCE_SIZE_DIP\n icon_sizes = []\n current_icon_representation = []\n icon_representations = {}\n for line in icon_file_contents:\n # Strip comments and empty lines.\n line = line.partition(CPP_COMMENT_DELIMITER)[0].strip()\n if not line:\n continue\n # Retrieve sizes specified by CANVAS_DIMENSIONS to ensure icons are added in\n # sorted order by size descending.\n if line.startswith(CANVAS_DIMENSIONS):\n sizes = re.findall(r\"\\d+\", line)\n if len(sizes) != 1:\n Error(\"Malformed {} line in {} - it should specify exactly one size.\"\n .format(CANVAS_DIMENSIONS, icon_file_name))\n icon_sizes.append(int(sizes[0]))\n\n # All icons except the first / default icon must start with\n # \"CANVAS_DIMENSIONS\", so rely on it here as a icon delimiter.\n if current_icon_representation:\n icon_representations = AddIconToDictionary(\n icon_file_name, current_icon_representation, current_icon_size,\n icon_representations)\n current_icon_representation = []\n current_icon_size = icon_sizes[-1]\n\n current_icon_representation.append(line)\n if current_icon_representation:\n icon_representations = AddIconToDictionary(\n icon_file_name, current_icon_representation, current_icon_size,\n icon_representations)\n\n if not icon_representations:\n Error(\"Didn't find any icons in {}.\".format(icon_file_name))\n\n if len(icon_representations) != len(icon_sizes):\n icon_sizes.insert(0, REFERENCE_SIZE_DIP)\n if sorted(icon_sizes, reverse=True) != icon_sizes:\n Error(\"The icons in {} should be sorted in descending order of size.\"\n .format(icon_file_name))\n return icon_representations"
] | [
"0.68177384",
"0.6459053",
"0.636313",
"0.63032913",
"0.61679065",
"0.6149648",
"0.61276317",
"0.6058071",
"0.6027682",
"0.6016447",
"0.5983509",
"0.5916355",
"0.5914208",
"0.5907837",
"0.5867787",
"0.5860089",
"0.58295524",
"0.5822217",
"0.582023",
"0.57835793",
"0.5761614",
"0.5711256",
"0.5673482",
"0.5667337",
"0.5649426",
"0.560498",
"0.5560314",
"0.55460477",
"0.55347663",
"0.5512064"
] | 0.7422429 | 0 |
Search anime in anime database matching filters and return given fields | def search_anime(user_id, filters, fields, sort_col, desc):
my_fields = []
for f in fields:
if hasattr(Anime, f):
my_fields.append(getattr(Anime, f))
my_filters = [
~Anime.malId.in_(db.session.query(UserToAnime.malId)
.filter(UserToAnime.userId == user_id)
.subquery())
]
for f in AA_FILTERS:
if filters.get(f):
my_filters.append(AA_FILTERS[f](filters[f]))
if not hasattr(Anime, sort_col):
sort_col = 'title'
sort_col = getattr(Anime, sort_col)
if desc:
sort_col = sort_col.desc()
results = db.session.query(*my_fields).filter(*my_filters).order_by(sort_col).limit(30)
return parse_search_results(fields, results) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_anime_info(anime_id, fields):\n my_fields = []\n for f in fields:\n try:\n my_fields.append(getattr(Anime, f))\n except AttributeError:\n pass\n\n my_filters = [Anime.malId == anime_id]\n\n results = db.session.query(*my_fields).filter(*my_filters).limit(1)\n return parse_search_results(fields, results)",
"def search(self, query):",
"def search(self):\n datas = self.cleaned_data\n films = Film.objects\n if datas['title']:\n films = films.filter(Q(title_fr__icontains=datas['title']) | Q(title_en__icontains=datas['title']))\n if datas['character']:\n films = films.filter(Q(actors__firstname__icontains=datas['character']) | Q(actors__lastname__icontains=datas['character']))\n if datas['country']:\n films = films.filter(countries__icontains=datas['country'])\n if datas['start_date']:\n films = films.filter(release_date__gte=datas['start_date'])\n if datas['end_date']:\n films = films.filter(release_date__lte=datas['end_date'])\n if datas['play']:\n films = films.filter(play_references__play=datas['play'])\n if datas['adaptation']:\n films = films.filter(play_references__type__name=datas['adaptation'])\n if datas['contributor']:\n films = films.filter(contributor=datas['contributor'])\n return films",
"def search_mal(user_id, filters, fields, sort_col, desc):\n my_fields = []\n for f in fields:\n if hasattr(Anime, f):\n my_fields.append(getattr(Anime, f))\n elif hasattr(UserToAnime, f):\n my_fields.append(getattr(UserToAnime, f))\n\n my_filters = [\n Anime.malId.in_(db.session.query(UserToAnime.malId)\n .filter(UserToAnime.userId == user_id)\n .subquery()),\n MAL_FILTERS[\"join\"](\"dummy\")\n ]\n\n for f in MAL_FILTERS:\n if filters.get(f):\n my_filters.append(MAL_FILTERS[f](filters[f]))\n\n if not hasattr(Anime, sort_col):\n if not hasattr(UserToAnime, sort_col):\n sort_col = getattr(Anime, 'title')\n else:\n sort_col = getattr(UserToAnime, sort_col)\n else:\n sort_col = getattr(Anime, sort_col)\n\n if desc:\n sort_col = sort_col.desc()\n\n results = db.session.query(*my_fields).filter(*my_filters).order_by(sort_col).limit(30)\n return parse_search_results(fields, results)",
"def lookup_search_term():\n while True:\n search_query = input('Show entries containing (in name or notes): ')\n if validate_lookup_search_term_format(search_query):\n break\n print('** Please enter search term **')\n return (Entry.select().where(Entry.employee_name.contains(search_query)) |\n Entry.select().where(Entry.task_notes.contains(search_query)))",
"def searchAll(name, table, field, goal):\n connection, cursor = DBconnect(name)\n cursor.execute(\"SELECT * FROM \"+table+\" WHERE \"+field+\"=:Id\",{\"Id\": goal})\n result = cursor.fetchall()\n DBdisconnect(connection)\n return result",
"def searchByField(database):\n field=str(input(\"What is his field name :\"))\n usrs,find=getByField(database,field)\n for usr in usrs:\n print(usr)",
"def find_some(self,table,field_list,**query_dict):\n start_sql = 'SELECT '\n sql = ''\n query_sql = ''\n for field in field_list: start_sql += field + ',' \n start_sql = start_sql[0:-1] + ' FROM %s WHERE ' % (table)\n try:\n if query_dict:\n for index in query_dict:\n if not isinstance(query_dict[index],dict): query_sql += \" %s = '%s' and\" % (index,query_dict[index]) \n else: query_sql += \" %s %s '%s' and\" % (index,query_dict[index]['rule'],query_dict[index]['value'])\n sql = (start_sql + query_sql)[0:-3] \n info_list = self.db.query(sql)\n except Exception,e: self.treat_except(e) \n return info_list",
"def getList(request, model=Luogo.objects, field=\"nome\", format=\"txt\", fields=None):\n q = request.GET.get(\"q\")\n if q is not None:\n fieldApiString = {\"%s__icontains\" % field: q}\n else:\n fieldApiString = {}\n\n querySet = model.filter(**fieldApiString)\n\n if format == \"txt\":\n results = \"\\n\".join([str(record) for record in querySet])\n return HttpResponse(results, content_type=\"text/plain\")\n if format == \"json\" and fields:\n records = querySet.values(*fields)\n results = [[record[key] for key in fields] for record in records]\n return HttpResponse(json.dumps(results), content_type=\"application/json\")",
"def results(self):\n q = self.cleaned_data['q'].strip()\n patients = PatientInformation.objects.filter(Q(operator__username__contains=q) | \\\n Q(patient_id__contains=q) | Q(first_name__contains=q) | Q(last_name__contains=q) | \\\n Q(email__contains=q)).distinct()\n return patients",
"def do_search(self, line):\n\t\tif not(self.db is None):\n\t\t\tstart = time()\n\t\t\tresult = self.db.contact.find({'$or':[\n\t\t\t\t\t{'first_name': {'$regex':line, '$options':'i'}},\n\t\t\t\t\t{'surname': {'$regex':line, '$options':'i'}},\n\t\t\t\t\t{'company': {'$regex':line, '$options':'i'}},\n\t\t\t\t\t{'address': {'$regex':line, '$options':'i'}},\n\t\t\t\t\t{'telephone': {'$regex':line, '$options':'i'}},\n\t\t\t\t\t{'email': {'$regex':line, '$options':'i'}},\n\t\t\t\t\t{'id_': {'$regex':line, '$options':'i'}}\n\t\t\t\t]})\n\t\t\tfor i in result:\n\t\t\t\tpprint.pprint(i)\n\t\t\tprint(\"Time elapsed: {}\".format(time()-start))\n\t\telse:\n\t\t\tprint(\"You must open the existing database or create new one.\")",
"def search_general(abe, q):\n def process(row):\n (name, code3) = row\n return { 'name': name + ' (' + code3 + ')',\n 'uri': 'chain/' + str(name) }\n ret = map(process, abe.store.selectall(\"\"\"\n SELECT chain_name, chain_code3\n FROM chain\n WHERE UPPER(chain_name) LIKE '%' || ? || '%'\n OR UPPER(chain_code3) LIKE '%' || ? || '%'\n \"\"\", (q.upper(), q.upper())))\n return ret",
"def search(self, name: str) -> \"Airways\":\n output = self.__class__(\n self.data.query(\"route == @name.upper() or navaid == @name.upper()\")\n )\n return output",
"def quickSearch():\n calDB = db.TinyDB('../calDB.json')\n pars = db.Query()\n recList = calDB.search(pars.key.matches(\"wf\"))\n print len(recList)\n for idx in range(len(recList)):\n key = recList[idx]['key']\n vals = recList[idx]['vals']\n print key\n for ch in vals:\n\n print ch, vals[ch]\n return",
"def advanced_search(request):\n\tip = get_ip(request, right_most_proxy=True)\n\tIpAddressInformation.objects.create(ip_address=ip)\n\tif request.method=='POST':\n\t\tsearchterm =request.POST.getlist('searchterm') # list of search term value associated with searchtype\n\t\tsearchtype =request.POST.getlist('searchtype') # list of search parameter\n\t\tsearchtermorg=request.POST.getlist('searchtermorg') # search term value for organism\n\t\tsearchtermfda=request.POST.getlist('searchtermfda') # search term value for FDA\n\t\tsearchtermlist=[]\n\t\tnameFIle=names.get_first_name() # generate random file name to store user search result\n\t\tfastaseq=[]\n\t\tfinalsearhdata=''\n\t\tunique_peptides = set()\n\t\ttryptic_peptide={}\n\t\tuserSuppliedPepSeqStatus=0\n\t\ttry:\n\t\t\tfastafile = request.FILES[\"fileupload\"].read()\n\t\t\tfinalsearhdata+='File'+':'+'Fasta Sequence'+' '\n\t\t\tcurrdate=str(datetime.datetime.now())\n\t\t\tcurrdate=currdate.replace('-','_')\n\t\t\tcurrdate=currdate.replace(' ','_')\n\t\t\tcurrdate=currdate.replace(':','_')\n\t\t\tcurrdate=currdate.replace('.','_')\n\t\t\tnameFIle=currdate+'_'+str(request.FILES[\"fileupload\"]).split('.')[0] # if user upload fasta file then file name will be replaced with user provided file name along with current data and time\n\t\t\tfastafilename=nameFIle+'.fasta'\n\t\t\t#storing user provided fasta file\n\t\t\tfastafilepath=os.path.join(settings.BASE_DIR, 'resultFile', 'fastaFIle', fastafilename)\n\t\t\tfastafilewrite=open(fastafilepath,\"w\")\n\t\t\tfastafilewrite.write(fastafile)\n\t\t\tfastafilewrite.close()\n\n\t\t\t#reading fasta file\n\t\t\tseqCounter=0\n\t\t\tfor useq_record in SeqIO.parse(fastafilepath, 'fasta'):\n\t\t\t\tseqCounter+=1\n\t\t\t\tseqheader = useq_record.id\n\t\t\t\tsequniID = seqheader.split(' ')[0]\n\t\t\t\tsequniID=sequniID.replace('>','')\n\t\t\t\ttempseqs = str(useq_record.seq).strip()\n\t\t\t\tnew_peptides = parser.cleave(tempseqs, 'trypsin')\n\t\t\t\tnew_peptides=[pep for pep in new_peptides if len(pep.strip()) > 3 and len(pep.strip()) <50]\n\t\t\t\ttryptic_peptide[seqCounter]=list(new_peptides)\n\t\t\t\tnew_peptides=list(set(new_peptides))\n\t\t\t\tunique_peptides.update(new_peptides)\n\t\t\t\tfastaseq.append(str(sequniID)+'_'+tempseqs.upper())\n\t\texcept MultiValueDictKeyError:\n\t\t\tpass\n\n\t\ttry:\n\t\t\tfastafileindex=searchtype.index(\"FastaFile\")\n\t\t\t#delete data based on index from list\n\t\t\tdel searchtype[fastafileindex]\n\t\t\tdel searchterm[fastafileindex]\n\t\texcept ValueError:\n\t\t\tpass\n\n\t\ttry:\n\t\t\torgindex=searchtype.index(\"Organism\")\n\t\t\t#delete data based on index from list\n\t\t\tdel searchtype[orgindex]\n\t\t\tdel searchterm[orgindex]\n\t\texcept ValueError:\n\t\t\tpass\n\t\tif len(fastaseq)>0:\n\t\t\tunique_peptides=list(unique_peptides)\n\t\t\tunique_peptides=list(map(lambda x:x.lower(),unique_peptides))\n\t\tsearchtermorg=map(str, searchtermorg) # convert data into string\n\t\tsearchtermorg=map(lambda j: j.strip(), searchtermorg) # remove space\n\t\tsearchtermorg=filter(None, searchtermorg) # remove empty value\n\t\tunqsearchtermorg=list(set(searchtermorg))\n\t\tif len(unqsearchtermorg)>0:\n\t\t\tfinalsearhdata+='Organism'+':'+unqsearchtermorg[0].strip()+' '\n\t\t\t#build elasticsearch query for organism to search data\n\n\t\t\torgquery={\"should\":[\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\"multi_match\":{\n\t\t\t\t\t\t\t\t\t\"query\":unqsearchtermorg[0].strip(),\n\t\t\t\t\t\t\t\t\t\"type\":\"best_fields\",\n\t\t\t\t\t\t\t\t\t\"fields\":[\"Organism.ngram\"],\n\t\t\t\t\t\t\t\t\t\"minimum_should_match\":\"100%\"\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t]\n\t\t\t\t\t}\n\t\t\tbooldic={}\n\t\t\tbooldic[\"bool\"]=orgquery\n\t\t\tsearchtermlist.append(booldic)\n\n\t\ttry:\n\t\t\tfdaindex=searchtype.index(\"Assays for FDA approved Marker\")\n\t\t\t#delete data based on index from list\n\t\t\tdel searchtype[fdaindex]\n\t\t\tdel searchterm[fdaindex]\n\t\texcept ValueError:\n\t\t\tpass\n\n\t\tsearchtermfda=map(str, searchtermfda) # convert data into string\n\t\tsearchtermfda=map(lambda j: j.strip(), searchtermfda) # remove space\n\t\tsearchtermfda=filter(None, searchtermfda) # remove empty value\n\t\tunqsearchtermfda=list(set(searchtermfda))\n\t\tif len(unqsearchtermfda)>0:\n\t\t\tfinalsearhdata+='Assays for FDA approved Marker'+':'+unqsearchtermfda[0].strip()+' '\n\t\t\t#build elasticsearch query for FDA to search data\n\n\t\t\tfdaquery={\"should\":[\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\"multi_match\":{\n\t\t\t\t\t\t\t\t\t\"query\":unqsearchtermfda[0].strip(),\n\t\t\t\t\t\t\t\t\t\"type\":\"best_fields\",\n\t\t\t\t\t\t\t\t\t\"fields\":[\"Assays for FDA approved Marker.ngram\"],\n\t\t\t\t\t\t\t\t\t\"minimum_should_match\":\"100%\"\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t]\n\t\t\t\t\t}\n\t\t\tbooldic={}\n\t\t\tbooldic[\"bool\"]=fdaquery\n\t\t\tsearchtermlist.append(booldic)\n\t\tif 'Peptide Sequence' in searchtype:\n\t\t\tuserSuppliedPepSeqStatus=1\n\t\tfor i in range(0,len(searchtype)):\n\t\t\tsubsearchtype=searchtype[i]\n\t\t\tsubsearchterm=searchterm[i]\n\t\t\t#build elasticsearch query for all except organism and FDA to search data\n\t\t\tif '|' in subsearchterm:\n\t\t\t\tsubsearchterm=(subsearchterm.strip()).split('|')\n\t\t\telse:\n\t\t\t\tsubsearchterm=(subsearchterm.strip()).split('\\n')\n\t\t\tsubsearchterm=map(str, subsearchterm)\n\t\t\tsubsearchterm=map(lambda j: j.strip(), subsearchterm)\n\t\t\tsubsearchterm=filter(None, subsearchterm)\n\t\t\tif subsearchtype == 'Peptide Sequence':\n\t\t\t\tif userSuppliedPepSeqStatus==1:\n\t\t\t\t\tfinalsearhdata+=''.join(subsearchtype)+':'+';'.join(subsearchterm)+' '\n\t\t\t\t\tif len(unique_peptides)>0:\n\t\t\t\t\t\tsubsearchterm=[(item.strip()).lower() for item in subsearchterm]\n\t\t\t\t\t\tsubsearchterm=list(set(subsearchterm) & set(unique_peptides))\n\t\t\telse:\n\t\t\t\tfinalsearhdata+=''.join(subsearchtype)+':'+';'.join(subsearchterm)+' '\n\t\t\tif len(subsearchterm)>0:\n\t\t\t\tsubsearchterm=[(item.strip()).lower() for item in subsearchterm] #converting into lower case\n\t\t\t\tsubsearchterm=list(set(subsearchterm))\n\t\t\t\tshouldlist=[]\n\t\t\t\t\n\t\t\t\tfor x in subsearchterm:\n\t\t\t\t\ttempquery={\n\t\t\t\t\t\t\t\t\"multi_match\":{\n\t\t\t\t\t\t\t\t\t\"query\":x.strip(),\n\t\t\t\t\t\t\t\t\t\"type\":\"best_fields\",\n\t\t\t\t\t\t\t\t\t\"fields\":[str(subsearchtype)+\".ngram\"],\n\t\t\t\t\t\t\t\t\t\"minimum_should_match\":\"100%\"\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\tshouldlist.append(tempquery)\n\t\t\t\tbooldic={}\n\t\t\t\tbooldic[\"bool\"]={\"should\":shouldlist,\"minimum_should_match\": 1}\n\t\t\t\tsearchtermlist.append(booldic)\n\n\t\tif userSuppliedPepSeqStatus==0 and len(unique_peptides)>0:\n\t\t\tshouldlist=[]\n\t\t\tfor x in unique_peptides:\n\t\t\t\ttempquery={\n\t\t\t\t\t\t\t\"multi_match\":{\n\t\t\t\t\t\t\t\t\"query\":x.strip(),\n\t\t\t\t\t\t\t\t\"type\":\"best_fields\",\n\t\t\t\t\t\t\t\t\"fields\":[\"Peptide Sequence.ngram\"],\n\t\t\t\t\t\t\t\t\"minimum_should_match\":\"100%\"\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\tshouldlist.append(tempquery)\n\t\t\tbooldic={}\n\t\t\tbooldic[\"bool\"]={\"should\":shouldlist,\"minimum_should_match\": 1}\n\t\t\tsearchtermlist.append(booldic)\n\t\tunqfastaseq=list(set(fastaseq))\n\t\tif len(searchtermlist)>0 or len(unqfastaseq)>0:\n\t\t\tes.indices.refresh(index=\"mrmassaydb-index\")\n\n\t\t\tquery=\"\"\n\t\t\t#if len(searchtermlist)>0:\n\t\t\tquery={\n\t\t\t\t\"query\": {\n\t\t\t\t\t\"bool\": {\n\t\t\t\t\t\t\"must\":searchtermlist\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\t# if len(searchtermlist)==0:\n\t\t\t# \tquery={\n\t\t\t# \t\t\"query\": {\n\t\t\t# \t\t\t\"match_all\": {}\n\t\t\t# \t\t}\n\t\t\t# \t}\n\t\t\t#storing user search result into json format\n\t\t\tjsonfilename=nameFIle+'_advance_search.json'\n\t\t\tjsonfilepath=os.path.join(settings.BASE_DIR, 'resultFile', 'jsonData','resultJson', 'adavancesearch', 'results', jsonfilename)\n\t\t\tjsonfileoutput= open(jsonfilepath,'w')\n\t\t\tjfinaldata=[]\n\t\t\tres=helpers.scan(client=es,size=1000,scroll='2m',index=\"mrmassaydb-index\", doc_type=\"mrmassaydb-type\",query=query,request_timeout=60)\n\t\t\t#res=helpers.scan(client=es,size=1000,scroll='2m',index=\"my-index\", doc_type=\"my-type\",query=query,request_timeout=30)\n\t\t\tjfinaldata=[]\n\t\t\tusersequnq=[]\n\t\t\tfor i in res:\n\t\t\t\tjdic=i['_source']\n\t\t\t\tjdic={str(tkey):force_text(tvalue) for tkey,tvalue in jdic.items()}\n\t\t\t\tif jdic[\"UniprotKb entry status\"] ==\"Yes\" and jdic['UniProtKB Accession'] !='502':\n\t\t\t\t\tjdic[\"PPI\"] =\"View\"\n\t\t\t\t\tjdic[\"sel\"] =\"\"\n\t\t\t\t\tjdic[\"Drug Bank\"]=jdic[\"Drug Bank\"].replace('\\\\','')\n\t\t\t\t\tjdic[\"Drug Bank\"]=jdic[\"Drug Bank\"].replace('<br>','|')\n\t\t\t\t\tjdic[\"SRMAtlas URL\"]=jdic[\"SRMAtlas URL\"].replace('\\\\','')\n\t\t\t\t\tjdic[\"Passel URL\"]=jdic[\"Passel URL\"].replace('\\\\','')\n\t\t\t\t\tjdic[\"CPTAC URL\"]=jdic[\"CPTAC URL\"].replace('\\\\','')\n\t\t\t\t\tjdic[\"Panoramaweb URL\"]=jdic[\"Panoramaweb URL\"].replace('\\\\','')\n\t\t\t\t\tjdic[\"PeptideTracker URL\"]=jdic[\"PeptideTracker URL\"].replace('\\\\','')\n\t\t\t\t\t#if jdic[\"Pathway Name\"].lower() !='na':\n\t\t\t\t\t#\tjdic[\"Pathway Name\"]=re.sub(r\"(\\w)([A-Z])\",r\"\\1|\\2\",jdic[\"Pathway Name\"])\n\t\t\t\t\tseqhit=0\n\t\t\t\t\t# checking any peptide present in user provided fasta sequence\n\t\t\t\t\t# classified into 3 catagories\n\t\t\t\t\tif len(unqfastaseq)>0:\n\t\t\t\t\t\tpepseq=str(jdic['Peptide Sequence']).strip()\n\t\t\t\t\t\t#if \n\t\t\t\t\t\t#matchCount = tryptic_peptide.count(pepseq.upper())\n\t\t\t\t\t\tindices = [k for k in tryptic_peptide if pepseq.upper() in tryptic_peptide[k]]\n\t\t\t\t\t\tif len(indices)>0:\n\t\t\t\t\t\t\ttempuserseqheadermatch='NA'\n\t\t\t\t\t\t\ttempmatchlist=[]\n\t\t\t\t\t\t\tfor i in indices:\n\t\t\t\t\t\t\t\ttempmatchlist.append('_'.join(fastaseq[i-1].split('_')[:-1]))\n\t\t\t\t\t\t\tif len(tempmatchlist)>0:\n\t\t\t\t\t\t\t\ttempuserseqheadermatch='<br/>'.join(tempmatchlist)\n\t\t\t\t\t\t\n\t\t\t\t\t\t\tif len(indices) > 1:\n\t\t\t\t\t\t\t\tseqhit=len(indices)\n\t\t\t\t\t\t\t\tjdic[\"Peptide in user's database\"] =tempuserseqheadermatch\n\t\t\t\t\t\t\t\tjdic[\"Peptide unique in user's database\"] =\"Present but not unique\"\n\t\t\t\t\t\t\tif len(indices) == 1:\n\t\t\t\t\t\t\t\tseqhit=len(indices)\n\t\t\t\t\t\t\t\tjdic[\"Peptide in user's database\"] =tempuserseqheadermatch\n\t\t\t\t\t\t\t\tjdic[\"Peptide unique in user's database\"] =\"Present and unique\"\n\t\t\t\t\t\t\t\tusersequnq.append(\"Present and unique\")\n\t\t\t\t\t\t\tjfinaldata.append(jdic)\n\t\t\t\t\t\t\t\n\t\t\t\t\telse:\n\t\t\t\t\t\tjfinaldata.append(jdic)\n\t\t\tes.indices.refresh(index=\"mrmassaydb-index\")\n\t\t\t#checking any result generated by database\n\t\t\tfoundHits=len(jfinaldata)\n\t\t\t#storing only 10000 rows in json format\n\t\t\tjson.dump(jfinaldata[:10000],jsonfileoutput)\n\t\t\tjsonfileoutput.close()\n\t\t\t# if result found then do other job\n\t\t\tif foundHits >0:\n\t\t\t\tstatsummary=summaryStatcal(jfinaldata) # sent data to this funcation for generating stat\n\t\t\t\tpathwaychart=statsummary['pathwaychart']\n\t\t\t\tpathwaychart=[i[:2] for i in pathwaychart]\n\t\t\t\tspecieslist=statsummary['specieslist']\n\t\t\t\ttotallist=statsummary['total']\n\t\t\t\tsubcell=statsummary['subcell']\n\t\t\t\tgodic=statsummary['godic']\n\t\t\t\tjvennprot=statsummary['jevennstat'][0]\n\t\t\t\tjvennpep=statsummary['jevennstat'][1]\n\t\t\t\tmrmdatabase=statsummary['jevennstat'][2]\n\t\t\t\tsortedgodic=OrderedDict(sorted(godic.items(), key=lambda t: t[1])) # sorting GO data\n\t\t\t\tupdatedgodic=dict(list(sortedgodic.items()))\n\t\t\t\tpepseqdataseries=ast.literal_eval(json.dumps(statsummary['pepseqdataseries'])) #dumping data into json format\n\t\t\t\tprodataseries=statsummary['prodataseries']\n\t\t\t\tunqisostat=statsummary['unqisostat']\n\t\t\t\tjsonfilepathStat=os.path.join(settings.BASE_DIR, 'resultFile', 'jsonData','resultJson', 'adavancesearch', 'statsummary', jsonfilename)\n\t\t\t\tjsonfileoutputStat= open(jsonfilepathStat,'w')\n\t\t\t\tjson.dump(statsummary,jsonfileoutputStat)\n\t\t\t\tjsonfileoutputStat.close()\n\t\t\t\turlname=\"'/resultFile/jsonData/resultJson/adavancesearch/results/\"+jsonfilename+\"'\"\n\t\t\t\tif len(unqfastaseq)>0:\n\t\t\t\t\ttempcalunq=str(round(((float(usersequnq.count('Present and unique'))/float(len(jfinaldata)))*100),2))+'%'\n\t\t\t\t\tunqisostat.append([\"User data\",tempcalunq,\"NA\"])\n\t\t\t\t\tcontextindex={\n\t\t\t\t\t\t\"filename\":urlname,\"fastacolname\":json.dumps(fastacolname),\n\t\t\t\t\t\t'query': finalsearhdata,'foundHits':foundHits,\n\t\t\t\t\t\t'pathwaychart':pathwaychart[:11],'specieslist':specieslist,\n\t\t\t\t\t\t'totallist':totallist,'subcell':subcell,\n\t\t\t\t\t\t'updatedgodic':updatedgodic,'pepseqdataseries':pepseqdataseries,\n\t\t\t\t\t\t'prodataseries':prodataseries,'unqisostat':unqisostat,\n\t\t\t\t\t\t'jvennprot':json.dumps(jvennprot),'jvennpep':json.dumps(jvennpep),'jvennmrmdb':json.dumps(mrmdatabase),'fastafilename':json.dumps(nameFIle)\n\t\t\t\t\t\t}\n\t\t\t\t\treturn render(request,'resultformuserseq.html',contextindex)\n\t\t\t\telse:\n\t\t\t\t\tcontextindex={\n\t\t\t\t\t\t\"filename\":urlname,\"colname\":json.dumps(colname),\n\t\t\t\t\t\t'query': finalsearhdata,'foundHits':foundHits,\n\t\t\t\t\t\t'pathwaychart':pathwaychart[:11],'specieslist':specieslist,\n\t\t\t\t\t\t'totallist':totallist,'subcell':subcell,\n\t\t\t\t\t\t'updatedgodic':updatedgodic,'pepseqdataseries':pepseqdataseries,\n\t\t\t\t\t\t'prodataseries':prodataseries,'unqisostat':unqisostat,\n\t\t\t\t\t\t'jvennprot':json.dumps(jvennprot),'jvennpep':json.dumps(jvennpep),'jvennmrmdb':json.dumps(mrmdatabase)\n\t\t\t\t\t\t}\n\t\t\t\t\treturn render(request,'resultform.html',contextindex)\n\t\t\telse:\n\t\t\t\treturn render(request,'resultform.html',{'foundHits':foundHits})\n\t\telse:\n\t\t\treturn render(request,'resultform.html',{'foundHits':0})",
"def get_data(query, search_type):\n\n def filter_movies_only(entries):\n return [e for e in entries if e['media_type'] == 'movie']\n\n query = query.encode('utf-8')\n tmdb = get_tmdb(lang)\n search = tmdb.Search()\n if search_type == 'movie':\n movies = search.movie(query=query)['results']\n else:\n persons = search.person(query=query)['results']\n # We only select the first found actor/director.\n if persons:\n person_id = persons[0]['id']\n else:\n return []\n person = tmdb.People(person_id)\n person.combined_credits()\n if search_type == 'actor':\n movies = filter_movies_only(person.cast)\n else:\n movies = filter_movies_only(person.crew)\n movies = [m for m in movies if m['job'] == 'Director']\n return movies",
"def parse_search_results(fields, results):\n my_results = []\n for result in results:\n my_results.append(SearchAnimeResult(fields, result))\n return my_results",
"def findJobData(table,**filters):\n\n\n s = alchemy_connect()\n\n en = s.get_bind(mapper=None)\n\n metadata = MetaData(en)\n\n lwtf_table = Table('SHOW_lwtf',metadata, autoload=True)\n tp_table = Table('SHOW_tp_poirot',metadata, autoload=True)\n\n class SHOW_TABLE(object):\n pass\n\n mapper(SHOW_TABLE,tp_table)\n\n if len(filters):\n\n filters_as_args = list()\n for key,value in filters.items():\n filters_as_args.append( '%s=\"%s\"'%(key,value) ) \n\n return eval( \"s.query(SHOW_TABLE).filter_by(%s).all()\"%','.join(filters_as_args) )\n\n else:\n\n return s.query(SHOW_TABLE).all()",
"def abstract_search(self, model, params):\n domain = []\n\n for key, value in params.items():\n self.check_field_existence(model, key)\n\n # we change the operator according to the field type or name\n if key == 'name':\n domain.append((key, 'ilike', value))\n elif type(value) is list:\n domain.append((key, 'in', value))\n elif key == 'active' and value == False:\n domain.append((key, '!=', True))\n else:\n domain.append((key, '=', value))\n\n return self.env[model].sudo().search(domain)",
"def api_search(title: str) -> Dict[str,List[AnimeThemeAnime]]:\n if not title:\n return None # an empty anime title\n \n r = session.get(URL.format(title))\n if r.status_code == 200:\n return r.json()\n elif r.status_code == 429:\n raise AnimeThemesTimeout('Got 429 error from animethemes.moe, please wait 30s to get the rest of entries.')\n else:\n r.raise_for_status()",
"def search(query_string):",
"def filter_data(start_time, end_time, table_name=\"content\"):\n datas = get_text_from_mysql(table_name=table_name,\n start_time=start_time,\n end_time=end_time)\n return datas",
"def search_for_meme(self, search):\n cursor = self.conn.cursor()\n cursor.execute(f\"select * from memes where lower(meme_name) like ?\", (f'%{search}%', ))\n results = cursor.fetchall()\n cursor.close()\n return results",
"def misc_search(self, kwargs):\n attr = kwargs[\"attributes\"]\n filter_ = kwargs[\"filter\"]\n\n try:\n if attr and attr != \"ALL\":\n results = self.engine.query(filter_, attr.split(\",\"))\n else:\n results = self.engine.query(filter_)\n self.display(results, True)\n except PyAsn1UnicodeDecodeError as e:\n error(f\"Decoding error with the filter: {e}\")\n except Exception as e:\n if e.__str__() == \"\":\n error(\"An exception occurred with the provided filter\")\n else:\n error(e)",
"def search():\n\tif not request.vars.search_term:\n\t\tredirect(URL('index'))\n\tterm = request.vars.search_term\n\torigterm = term\n\tterm = term.replace(' ','|')\n\tartists = db.executesql(\"select distinct(m1.id), m1.art_name, m1.artist_type, m1.country, m1.b_year,m1.b_month,m1.b_date,m1.e_year,m1.e_month,m1.e_day,ts_rank(to_tsvector(m1.art_name),to_tsquery('\"+term+\"')) rank from art_info m1 where to_tsvector('english',m1.art_name) @@ to_tsquery('\"+term+\"') order by rank desc limit 20;\")\n\talbums = db.executesql(\"select distinct(m1.id),m2.name,m1.art_id,m1.art_name,m1.rel_type,m1.count,ts_rank(to_tsvector(m2.name),to_tsquery('\"+term+\"')) rank from rel_art m1, release_name m2, release_group m3 where m3.name = m2.id and m3.id = m1.id and to_tsvector('english',m2.name) @@ to_tsquery('\"+term+\"') order by rank desc limit 20;\")\n\tsongs = db.executesql(\"select m2.id, m1.name, m3.art_id, m3.art_name, m3.rel_id, m3.rel_name from track_name m1, recording m2, rec_rel_art m3 where m1.id = m2.name and m2.id = m3.rec_id and lower(m1.name) LIKE lower('%%\"+origterm+\"%%') limit 20;\")\n\treturn dict(songs=songs, albums=albums, artists=artists)",
"def search_term():\n search = input(\"Enter term or string: \")\n entries = select_entries()\n entries = entries.where(\n (Entry.task_name.contains(search)) |\n (Entry.notes.contains(search)))\n view_entries(entries)\n return entries",
"def search_activity(conn, request):\n\n c = conn.cursor()\n search_query = \"SELECT * FROM Activity T1 WHERE T1.Name LIKE ?\"\n c.execute(search_query, (request,))\n result = c.fetchall()\n return result",
"def fetch_anime(title: str, alid: int, alsite: AnimeListSite) -> Optional[AnimeThemeAnime]:\n for func in (remove_bracket,simplify_title):\n title = func(title)\n \n data = api_search(title)\n if data is None: return None # internal error?\n \n anime = verify_anime(data['anime'],alid,alsite)\n if anime is not None: return anime\n \n return None",
"def search_people_2(search_type, search_value):\n try:\n # Search Types:\n # First Name, Last Name, Address 1, Address 2, City, State, Zip Code, Phone, Email, Identification\n query_string_name =\\\n \"SELECT DISTINCT p.personid, p.firstname, p.lastname, p.middleinitial, p.nickname, \"\\\n \"p.dateofbirth, p.dateofdeath \"\\\n \"FROM person p \"\\\n \"WHERE {0} LIKE ? COLLATE NOCASE;\"\n query_string_address =\\\n \"SELECT DISTINCT p.personid, p.firstname, p.lastname, p.middleinitial, p.nickname, \"\\\n \"p.dateofbirth, p.dateofdeath \"\\\n \"FROM person p \"\\\n \"LEFT JOIN address a on a.personid = p.personid \"\\\n \"WHERE {0} LIKE ? COLLATE NOCASE \"\n\n conn = sqlite3.connect(settings.database_name)\n conn.row_factory = sqlite3.Row\n c = conn.cursor()\n c.execute(\"PRAGMA foreign_keys = ON\")\n final_query = \"\"\n if search_type in {\"First Name\", \"Last Name\"}:\n if search_type == \"First Name\":\n final_query = query_string_name.format(\"p.firstname\")\n if search_type == \"Last Name\":\n final_query = query_string_name.format(\"p.lastname\")\n c.execute(final_query, (\"%\"+search_value+\"%\",))\n\n if search_type in {\"Address 1\", \"Address 2\", \"City\", \"State\", \"Zip Code\"}:\n if search_type == \"Address 1\":\n final_query = query_string_address.format(\"a.addressline1\")\n c.execute(final_query, (\"%\"+search_value+\"%\",))\n if search_type == \"Address 2\":\n final_query = query_string_address.format(\"a.addressline2\")\n c.execute(final_query, (\"%\"+search_value+\"%\",))\n if search_type == \"City\":\n final_query = query_string_address.format(\"a.city\")\n c.execute(final_query, (\"%\"+search_value+\"%\",))\n if search_type == \"State\":\n final_query = query_string_address.format(\"a.state\")\n c.execute(final_query, (\"%\"+search_value+\"%\",))\n if search_type == \"Zip Code\":\n final_query = query_string_address.format(\"a.zipcode\")\n c.execute(final_query, (\"%\"+search_value+\"%\",))\n\n if search_type == \"Phone\":\n c.execute(\"SELECT DISTINCT p.personid, p.firstname, p.lastname, p.middleinitial, p.nickname, \"\n \"p.dateofbirth, p.dateofdeath \"\n \"FROM person p \"\n \"JOIN contact c on c.personid = p.personid \"\n \"JOIN phone ph on ph.contactid = c.contactid \"\n \"WHERE ph.areacode || ph.exchange || ph.trunk LIKE ?;\", (\"%\"+search_value+\"%\",))\n\n if search_type == \"Email\":\n c.execute(\"SELECT DISTINCT p.personid, p.firstname, p.lastname, p.middleinitial, p.nickname,\"\n \"p.dateofbirth, p.dateofdeath \"\n \"FROM person p \"\n \"JOIN contact c on c.personid = p.personid \"\n \"JOIN email e on e.contactid = c.contactid \"\n \"WHERE e.emailaddress LIKE ? COLLATE NOCASE;\", (\"%\"+search_value+\"%\",))\n\n if search_type == \"Identification\":\n c.execute(\"SELECT DISTINCT p.personid, p.firstname, p.lastname, p.middleinitial, p.nickname, \"\n \"p.dateofbirth, p.dateofdeath \"\n \"FROM person p \"\n \"JOIN identification i on i.personid = p.personid \"\n \"WHERE i.identificationnumber LIKE ? COLLATE NOCASE;\", (\"%\"+search_value+\"%\",))\n\n p = []\n if search_type == \"All\":\n p = read_people()\n else:\n for row in c:\n _person = Person()\n _person.person_id = row[\"personid\"]\n _person.first_name = row[\"firstname\"]\n _person.last_name = row[\"lastname\"]\n _person.middle_initial = row[\"middleinitial\"]\n _person.nick_name = row[\"nickname\"]\n _person.date_of_birth = row[\"dateofbirth\"]\n _person.date_of_death = row[\"dateofdeath\"]\n p.append(_person)\n conn.close()\n return p\n except Exception as exc:\n aexc = exc\n return []",
"def searchFields(self):\n\n keyword = self.lineEdit.text().strip()\n self.options = []\n for field in self.all_fields:\n if keyword.lower() in field.lower(): # to make search case insensitive\n self.options.append(field)\n # Error dialog for invalid entry\n if len(self.options) == 0:\n msg = QMessageBox()\n msg.setIcon(QMessageBox.Critical)\n msg.setText(\"No field found containing keyword!\")\n msg.setInformativeText(\"Enter valid attribute\")\n msg.setWindowTitle(\"Error\")\n msg.show()\n msg.exec_()\n else:\n self.populateList()"
] | [
"0.74254316",
"0.5717658",
"0.5675069",
"0.5618613",
"0.55780196",
"0.55210227",
"0.5499253",
"0.5397141",
"0.5395199",
"0.53436446",
"0.53295803",
"0.53275436",
"0.531523",
"0.52899784",
"0.52556527",
"0.52407557",
"0.5222486",
"0.5196042",
"0.5195845",
"0.51951796",
"0.51755667",
"0.5175423",
"0.5141074",
"0.5128635",
"0.5122035",
"0.51039535",
"0.50817025",
"0.50775445",
"0.5065236",
"0.50557524"
] | 0.66820604 | 1 |
Search anime in User's MAL matching filters and return given fields | def search_mal(user_id, filters, fields, sort_col, desc):
my_fields = []
for f in fields:
if hasattr(Anime, f):
my_fields.append(getattr(Anime, f))
elif hasattr(UserToAnime, f):
my_fields.append(getattr(UserToAnime, f))
my_filters = [
Anime.malId.in_(db.session.query(UserToAnime.malId)
.filter(UserToAnime.userId == user_id)
.subquery()),
MAL_FILTERS["join"]("dummy")
]
for f in MAL_FILTERS:
if filters.get(f):
my_filters.append(MAL_FILTERS[f](filters[f]))
if not hasattr(Anime, sort_col):
if not hasattr(UserToAnime, sort_col):
sort_col = getattr(Anime, 'title')
else:
sort_col = getattr(UserToAnime, sort_col)
else:
sort_col = getattr(Anime, sort_col)
if desc:
sort_col = sort_col.desc()
results = db.session.query(*my_fields).filter(*my_filters).order_by(sort_col).limit(30)
return parse_search_results(fields, results) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def search_anime(user_id, filters, fields, sort_col, desc):\n my_fields = []\n for f in fields:\n if hasattr(Anime, f):\n my_fields.append(getattr(Anime, f))\n\n my_filters = [\n ~Anime.malId.in_(db.session.query(UserToAnime.malId)\n .filter(UserToAnime.userId == user_id)\n .subquery())\n ]\n for f in AA_FILTERS:\n if filters.get(f):\n my_filters.append(AA_FILTERS[f](filters[f]))\n\n if not hasattr(Anime, sort_col):\n sort_col = 'title'\n\n sort_col = getattr(Anime, sort_col)\n\n if desc:\n sort_col = sort_col.desc()\n\n results = db.session.query(*my_fields).filter(*my_filters).order_by(sort_col).limit(30)\n return parse_search_results(fields, results)",
"def get_anime_info(anime_id, fields):\n my_fields = []\n for f in fields:\n try:\n my_fields.append(getattr(Anime, f))\n except AttributeError:\n pass\n\n my_filters = [Anime.malId == anime_id]\n\n results = db.session.query(*my_fields).filter(*my_filters).limit(1)\n return parse_search_results(fields, results)",
"def get_malb(user_id, fields, sort_col='title', desc=False):\n my_fields = []\n for f in fields:\n try:\n my_fields.append(getattr(UserToAnime, f))\n except AttributeError:\n try:\n my_fields.append(getattr(Anime, f))\n except AttributeError:\n pass\n\n my_filters = [\n UserToAnime.userId == user_id,\n UserToAnime.myStatus != 10,\n UserToAnime.malId == Anime.malId,\n ]\n\n try:\n sort_col = getattr(Anime, sort_col)\n except AttributeError:\n sort_col = getattr(Anime, 'title')\n\n if desc:\n sort_col = sort_col.desc()\n\n results = db.session.query(*my_fields).filter(*my_filters).order_by(sort_col).order_by(sort_col).all()\n return parse_search_results(fields, results)",
"def get_queryset(self, **kwargs):\n username = self.request.user.username\n query = Meal.objects.filter(member__username=username)\n return query",
"def search_user(user, conditions=[],fields=[], filters={}):\n return db((db.auth_user.first_name.like(user+'%')),*conditions).select(*fields,**filters)",
"def _all_user_annotations_query(request, user):\n userid = util.user.userid_from_username(user.username, request)\n return {\n 'filtered': {\n 'filter': {'term': {'user': userid.lower()}},\n 'query': {'match_all': {}}\n }\n }",
"def filtered_user_search(request_method):\n all_users = User.objects.filter(is_active=True, is_staff=False).select_related('profile')\n if 'name' in request_method:\n all_users = all_users.filter(username__icontains=request_method['name'])\n if 'location' in request_method:\n country = None\n for cntr in COUNTRIES:\n if request_method['location'].casefold() in cntr[1].casefold():\n country = cntr[0]\n all_users = all_users.filter(Q(profile__city__icontains=request_method['location']) | Q(profile__country=country))\n if 'orgform' in request_method:\n if convert_str_to_int(request_method['orgform']) != NOT_DEFINED:\n all_users = all_users.filter(profile__org_form=request_method['orgform'])\n if 'status' in request_method:\n if convert_str_to_int(request_method['status']) != NOT_DEFINED:\n all_users = all_users.filter(profile__status=request_method['status'])\n if 'sex' in request_method:\n if convert_str_to_int(request_method['sex']) != NOT_DEFINED:\n all_users = all_users.filter(profile__sex=request_method['sex'])\n if 'cooperation' in request_method:\n goods = Good.objects.filter(cooperation__icontains=request_method['cooperation'])\n all_users = all_users.filter(goods__in=goods).distinct()\n if 'tag_id[]' in request_method:\n tags_ids = request_method.getlist('tag_id[]')\n goods = Good.objects.filter(tags__in=tags_ids).distinct()\n all_users = all_users.filter(goods__in=goods).distinct()\n return all_users",
"def get_queryset(self, **kwargs):\n username = self.request.path.split('/')[2]\n query = Meal.objects.filter(member__username=username)\n return query",
"def query_users(request):\r\n if(any(param not in [\"name\",'encryption',\"uid\",\"gid\",\"comment\",\"home\",\"shell\"] for param in request.GET)):\r\n badRequest(\"Parameters incorrect\")\r\n user = User()\r\n return HttpResponse(json.dumps(user.query(request.GET)))",
"def results(self):\n q = self.cleaned_data['q'].strip()\n patients = PatientInformation.objects.filter(Q(operator__username__contains=q) | \\\n Q(patient_id__contains=q) | Q(first_name__contains=q) | Q(last_name__contains=q) | \\\n Q(email__contains=q)).distinct()\n return patients",
"def search(request):\n # Get data form request\n name = request.DATA.get('first_name', \"''\")\n if name == \"\":\n\n first_names = request.DATA.get('first_name', \"''\")\n last_names = request.DATA.get('last_name', \"''\")\n display_names = request.DATA.get('display_name', \"''\")\n else:\n first_names = name\n last_names = name\n display_names = name\n genders = request.DATA.get('gender', \"''\")\n\n handicaps = request.DATA.get('handicap', \"''\")\n business_area = request.DATA.get('business_area', \"''\")\n city = request.DATA.get('city', \"''\")\n district = request.DATA.get('district', \"''\")\n age = request.POST.get('age', 0)\n dob_year = int(datetime.now().year) - int(age)\n min_dob = datetime.strptime(str(dob_year) + '-01-1', '%Y-%m-%d')\n max_dob = datetime.strptime(str(dob_year) + '-12-30', '%Y-%m-%d')\n\n # Search by single properties\n results = SearchQuerySet().filter(first_name=first_names).filter(last_name=last_names\n ).filter(gender=genders).filter(\n display_name=display_names).filter(handicap_us=handicaps\n ).filter(handicap_36=handicaps\n ).filter(business_area=business_area).filter(city=city\n ).filter(\n district=district)\n\n if age is not 0:\n results.filter(dob__gte=min_dob, dob__lte=max_dob)\n # Get List user\n queryset = User.objects.all()\n # Create result list\n results_list = []\n # Get User to list by id\n max_loop = results.count()\n for x in range(0, max_loop):\n user = get_object_or_404(queryset, pk=results[x].object.id)\n results_list.append(user)\n # Convert to serializer\n serializer = UserSerializer(results_list, many=True)\n if serializer.is_valid:\n return Response({'status': '200', 'code': 'OK_SEARCH',\n 'detail': serializer.data}, status=200)\n else:\n return Response({'status': '400', 'code': 'E_INVALID_PARAMETER_VALUES',\n 'detail': serializer.errors}, status=400)",
"def search(user, param):\r\n if len(param) <= 2:\r\n return bad_request(error_messages['too_short'])\r\n return search_user(param.lower(), user)",
"def api_plain_user_search(request):\n if request.GET.get('query'):\n users = search_for_plain_users(request.GET.get('query'))\n return JsonResponse(users, safe=False) \n return render_json(error=u'Mangler søkestreng')",
"def __searchUser(self, args = []):\n\n try:\n if len(args) == 0:\n self.__cm.send(p.T_QUERY, '')\n else:\n self.__cm.send(p.T_QUERY, args)\n\n reply = self.__cm.receive()\n\n if (reply is not None and reply.type == p.T_RESULT):\n [ self.__parseUserRecord(r) for r in reply.payload.split() ] \n self.__agent.printList(self.__userList)\n else:\n raise Exception, \"An error occured while fetching user data! The user list is outdated.\"\n \n except Exception, e:\n self.__handleError('List', e)",
"def identify_hacker(self, request):\n users = User.objects.filter(name__startswith=request.POST['query'])\n return HttpResponse(serializers.serialize('json', list(users), fields=('name', 'email')))",
"def searchUsers(self,conds,_from,to,order_by,desc,admin_obj):\n self.__searchUsersCheckInput(conds,_from,to,order_by,desc,admin_obj)\n search_helper=user_main.getAttributeManager().runAttrSearchers(conds,admin_obj)\n return search_helper.getUserIDs(_from,to,order_by,desc)",
"def user_list(request):\r\n query = request.GET.get('q', '')\r\n # if query has 2 or more characters\r\n if len(query) >= 2:\r\n names = query.split(' ')\r\n # if query has a first and last name\r\n if len(names) == 2:\r\n first, last = names\r\n # if first and last name have 2 or more letters\r\n if len(first) >= 2 and len(last) >= 2:\r\n results = User.objects.filter(Q(\r\n first_name__icontains=first, \r\n last_name__icontains=last) | Q(first_name__icontains=last, \r\n last_name__icontains=first)).exclude(pk=request.user.pk)\r\n # if first name has 2 or more letters\r\n elif len(first) >= 2:\r\n results = User.objects.filter(Q(\r\n first_name__icontains=first) | Q(\r\n last_name__icontains=first)).exclude(pk=request.user.pk)\r\n # if last name has 2 or more letters\r\n elif len(last) >= 2:\r\n results = User.objects.filter(Q(\r\n first_name__icontains=last) | Q(\r\n last_name__icontains=last)).exclude(pk=request.user.pk)\r\n # if first and last name have less than 2 letters\r\n else:\r\n results = []\r\n # if query only has one name\r\n else:\r\n results = User.objects.filter(Q(\r\n username__icontains=query)).exclude(pk=request.user.pk)\r\n # if query has less than 2 letters\r\n else:\r\n results = []\r\n d = {\r\n 'results': results,\r\n }\r\n t = loader.get_template('usermessages/results.html')\r\n context = Context(d)\r\n data = {\r\n 'results': t.render(context),\r\n }\r\n return HttpResponse(json.dumps(data), mimetype='application/json')",
"def get_all_users():",
"def findSuggestions():\n users = None\n if current_user.genderPreferences == \"any\":\n users = User.query.filter(or_(User.genderPreferences==current_user.gender, User.genderPreferences=='any'), User.state==current_user.state, User.city==current_user.city, User.id!=current_user.id).all()\n elif current_user.genderPreferences == \"male\":\n users = User.query.filter(or_(User.gender==\"male\", User.gender==\"other\"), or_(User.genderPreferences==current_user.gender, User.genderPreferences==\"any\"), User.state==current_user.state, User.city==current_user.city, User.id!=current_user.id).all()\n elif current_user.genderPreferences == \"female\":\n users = User.query.filter(or_(User.gender==\"female\", User.gender==\"other\"), or_(User.genderPreferences==current_user.gender, User.genderPreferences==\"any\"), User.state==current_user.state, User.city==current_user.city, User.id!=current_user.id).all()\n show_users = []\n print(users)\n for user in users:\n if (not user in current_user.likes) and (not user in current_user.dislikes):\n show_users.append(user)\n print(show_users)\n return show_users",
"def getInterestedUsers():",
"def get_all_users(query):\n\n user_list = None\n if query == None:\n user_list = User.objects.filter(Q(user_profile__isnull=False))\n else:\n user_list = User.objects.filter(Q(first_name__icontains=query) | Q(last_name__icontains=query) | Q(user_profile__skill_section__skill_items__skill__icontains=query)).distinct()\n return user_list",
"def test_search_user(self):\n self.maya.save_profile()\n user = Profile.search_users(self.maya.username)\n self.assertTrue(user.username==\"Maya\")",
"def locate_users_by_manager(self, user, fields=all_fields):\n return self.ldap_connection.search_s(\"ou=Users,dc=redhat,dc=com\",\n ldap.SCOPE_SUBTREE, \"manager=uid=\" + user +\n \",ou=Users,dc=redhat,dc=com\", fields)",
"def query(self, *args, **kwargs) -> List[str]:\r\n self.logger.info(\"Returning Manual Users\")\r\n\r\n return kwargs['users']",
"def _parse_user_query(self, query):\n def _parse_basic_query(attr, value):\n if isinstance(value, str) and '*' in value:\n return MatchGlob(attr, value)\n else:\n return Eq(attr, value)\n\n if isinstance(query, dict):\n subqueries = []\n for attr, value in query.iteritems():\n if isinstance(value, (list, set, tuple)):\n # If value is a list or similar, we build an OR\n or_queries = []\n for or_query in value:\n or_queries.append( _parse_basic_query(attr, or_query) )\n subqueries.append( Or(*or_queries) )\n else:\n subqueries.append(_parse_basic_query(attr, value))\n query = And(*subqueries)\n return query",
"def list_all_users_mfa(parsed_args, config, app):\n users_reg_details = list_credential_user_registration_details(parsed_args, config, app)\n users_attribute_details = list_all_users(parsed_args, config, app)\n enforced_users = get_users_from_enforced_groups(parsed_args, config, app)\n # Now we merge the each user's attribute details with their\n # mfa and SSPR registration information\n for upn, user_details in users_attribute_details.items():\n user_reg_details = users_reg_details.get(upn, {})\n user_details['isCapable'] = user_reg_details.get('isCapable', \"No isCapable field for user\")\n user_details['isSSPREnabled'] = user_reg_details.get('isEnabled', \"No isEnabled field for user\")\n user_details['isSSPRRegistered'] = user_reg_details.get('isRegistered', \"No isRegistered field for user\")\n user_details['isMfaRegistered'] = user_reg_details.get('isMfaRegistered', \"No isMfaRegistered field for user\")\n user_details['authMethods'] = user_reg_details.get('authMethods', \"No authMethods field for user\")\n user_details[\"isMfaEnforced\"] = \"False\"\n if user_details[\"userPrincipalName\"] in enforced_users.keys():\n user_details[\"isMfaEnforced\"] = \"True\"\n if parsed_args.log:\n file_logger.to_file(\"list_all_users_mfa\", users_attribute_details)\n return users_attribute_details",
"def searchByField(database):\n field=str(input(\"What is his field name :\"))\n usrs,find=getByField(database,field)\n for usr in usrs:\n print(usr)",
"def user_search_partial():\n username = request.args.get('search') or ''\n\n ret = []\n for user in User.query.filter(User.name.ilike(username + \"%\")):\n ret.append({\n \"id\": user.id,\n \"name\": user.name\n })\n return json.dumps(ret)",
"def get(self):\n queries = {\"wildcard_properties\": []}\n\n fullname_query = request.args.get(\"fullName\", None)\n email_query = request.args.get(\"email\", None)\n\n if fullname_query:\n queries[\"fullName\"] = f\"TextP.startingWith('{fullname_query}')\"\n queries[\"wildcard_properties\"].append(\"fullName\")\n if email_query:\n queries[\"fullName\"] = f\"TextP.startingWith('{email_query}')\"\n queries[\"wildcard_properties\"].append(\"email\")\n\n users = User.filter(limit=10, **queries)\n response = UserListSchema(many=True).dumps(users).data\n\n return jsonify_response(json.loads(response), 200)",
"def search_email_by_all(M):\n print \"basic search mode\\n\"\n rv, data = M.uid('search', None, 'All')\n if check_response(rv):\n return data\n else:\n return None"
] | [
"0.6912799",
"0.6804895",
"0.6398511",
"0.5926814",
"0.591433",
"0.5748039",
"0.5706041",
"0.5679347",
"0.5626423",
"0.5546155",
"0.5520321",
"0.5474376",
"0.5450997",
"0.5450491",
"0.54390633",
"0.5424477",
"0.540696",
"0.538905",
"0.5386829",
"0.5367615",
"0.5357427",
"0.5326905",
"0.53057325",
"0.52904654",
"0.52688533",
"0.52607596",
"0.5253109",
"0.5248657",
"0.52344143",
"0.52254647"
] | 0.7085026 | 0 |
Get field info for the anime with the given ID | def get_anime_info(anime_id, fields):
my_fields = []
for f in fields:
try:
my_fields.append(getattr(Anime, f))
except AttributeError:
pass
my_filters = [Anime.malId == anime_id]
results = db.session.query(*my_fields).filter(*my_filters).limit(1)
return parse_search_results(fields, results) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_information(id,field=None):\n data = read_csv()\n if field:\n return data[field][id]\n else:\n return data.iloc[id]",
"def info(self, id):",
"def getField(self, fieldID, time):\n\n key = (fieldID, time)\n field = 0\n if fieldID not in self.fields.index:\n raise APIError.APIError('Unknown field ID')\n if time not in self.fields.index:\n field = interP.interpolateFields(self.fields,\n time, fieldID,\n method='linear')\n else:\n field = self.fields[key]\n\n # Check if object is registered to Pyro\n # if hasattr(self, '_pyroDaemon') and not hasattr(field, '_PyroURI'):\n # uri = self._pyroDaemon.register(field)\n # field._PyroURI = uri\n\n return(field)",
"def get_field(self, link_id, field):\n key = self.link_key(link_id)\n \n result = self.connection.hget(key, field)\n \n self.link_messenger.viewed_field(link_id, field)\n \n return result",
"def get_fields_for_cr(cr_id):\n # Construct request\n url = \"{}/reports/{}/patient_fields\"\n url = url.format(FABRIC_API_URL, cr_id)\n\n sys.stdout.flush()\n result = requests.get(url, auth=auth)\n return result.json()",
"def find(self, _id):\n _id = ObjectId(_id)\n model = self.mongo.db.userfield.find_one({\"fields._id\":_id})\n for field in model['fields']:\n if field['_id']==_id:\n return field",
"def get_info(cls, icao_id: str) -> Tuple[str, str]:\n if len(cls.mapping) == 0:\n cls._generate_mapping()\n return cls.mapping.get(icao_id.lower(), ('Unknown', 'Unknown'))",
"def find_field():\n _id = request.form['_id']\n data, code, message = FIELD_SERVICE.find_field(_id)\n return __result(data, code, message)",
"def _get_info_from_fields(self, fields):\n info = []\n for field in fields:\n if field is icemac.ab.calendar.interfaces.IEvent['persons']:\n value = self.persons\n else:\n schema_field = (\n icemac.addressbook.entities.get_bound_schema_field(\n self.context, None, field,\n default_attrib_fallback=False))\n try:\n value = schema_field.get(schema_field.context)\n except AttributeError:\n # Field defined on IEvent but not on IRecurringEvent, thus\n # it does not exist on the RecurredEvent.\n value = None\n if value is not None:\n value = six.text_type(value)\n if value:\n if field is icemac.ab.calendar.interfaces.IEvent['text']:\n info.extend(value.split('\\n'))\n else:\n info.append(value)\n return info",
"def process_field_id(command, command_args):\n field_results = demisto.executeCommand(command, args=command_args)\n field_data = demisto.get(field_results[0], 'Contents')\n message_type = find_entry_type(demisto.get(field_results[0], 'Type'))\n if not field_data:\n human_readable_from_get_command = demisto.get(field_results[0], \"HumanReadable\")\n if human_readable_from_get_command:\n show_service_request_result(message_type, human_readable_from_get_command)\n show_service_request_result(\"error\", ERROR_MESSAGES + json.dumps(field_results))\n if isinstance(field_data, dict):\n all_fields = demisto.get(field_data, \"records\")\n if all_fields:\n return demisto.get(all_fields[0], \"Id\")\n elif isinstance(field_data, list):\n final_field = demisto.get(field_data[0], \"Id\")\n if final_field:\n return final_field\n else:\n show_service_request_result(message_type, field_data)",
"def get_public_timer_details(id):\n\twith postgres, postgres.cursor(cursor_factory=psycopg2.extras.RealDictCursor) as cur:\n\t\tcur.execute(\"select twitchid, title, delta, maxtime, styling from mustard.timers where id=%s\", (id,))\n\t\tinfo = cur.fetchone()\n\t\tif not info: return None\n\t\t# Survive psycopg2 2.8.0 bug by turning the RealDict into a real dict\n\t\t# Otherwise, mutating the dictionary causes future iteration to crash.\n\t\tinfo = {**info}\n\t\ttwitchid = info.pop(\"twitchid\")\n\t\tcur.execute(\"select sched_timezone, schedule from mustard.users where twitchid=%s\", (twitchid,))\n\t\tsched = cur.fetchone()\n\t\tinfo[\"next_event\"] = find_next_event(sched[\"sched_timezone\"], sched[\"schedule\"], info[\"delta\"])\n\t\treturn info",
"def get_field_attr(name):\n # de variant met een repeating group (entiteit, dataitem) levert hier nog een probleem op.\n # is dat omdat er twee entiteiten in 1 scherm staan?\n fields = []\n opts = my.rectypes[name]._meta\n for x in opts.get_fields(): # fields:\n fldname = x.name\n fldtype = x.get_internal_type()\n if fldname == 'id' or fldtype in ('ForeignKey', 'ManyToManyField'):\n # if fldname == 'id' or any((x.many2one, x.many2many, x.one2many))\n continue\n try:\n length = x.max_length\n except AttributeError:\n length = -1\n fields.append((fldname, fldtype[:-5], length))\n return fields",
"def getFieldDetails(self, field_name):\n try:\n value_list = []\n con = self.getMetadataDatabaseConnection()\n results = con.cursor()\n con.cursor().callproc('qiime_assets.get_field_details', [field_name, results])\n\n for row in results:\n # column_name, data_type, desc_or_value, definition, active\n value_list.append((row[0], row[1], row[2], row[3], row[4]))\n \n if len(value_list) == 0:\n # If not found in the dictionary, assume this is a user-created column\n value_list.append((field_name, 'text', '', ''))\n \n return value_list[0]\n except Exception, e:\n print 'Exception caught: %s.\\nThe error is: %s' % (type(e), e)\n return False",
"def get_timer_details(id):\n\twith postgres, postgres.cursor(cursor_factory=psycopg2.extras.RealDictCursor) as cur:\n\t\tcur.execute(\"select * from mustard.timers where id=%s\", (id,))\n\t\treturn cur.fetchone()",
"def get_field_by_id(self, field_id):\n for field in self.fields:\n if field.id == field_id:\n return field\n raise ValueError(\"A field with an ID of '{}' could not be found.\".format(field_id))",
"def getfield(value, arg):\n #import pdb; pdb.set_trace()\n if hasattr(value, \"fields\"):\n fields = getattr(value, \"fields\")\n if str(arg) in fields:\n return str(fields[str(arg)])",
"def get_show_info(self, id, **kwargs):\n kwargs['id'] = id\n return self.get('info/show.json', **kwargs)",
"def getInfo(self, ID, name, nowForget=False):\n def getCallDict():\n if hasattr(self, 'callDict'):\n result = self.callDict\n if nowForget:\n del self.callDict\n else:\n result = None\n return result\n \n if hasattr(self, 'pastInfo'):\n if ID is None and name == 'callDict':\n return getCallDict()\n if ID in self.pastInfo:\n x = self.pastInfo[ID]\n if nowForget:\n del self.pastInfo[ID]\n return x.get(name, None)\n return None\n if name == 'callDict':\n return getCallDict()\n return None",
"def get_game_field(game_id: str):\n request = GetGameFieldRequest(UUID(game_id))\n response = minesweeper_service.get_game_field(request)\n\n if response is None:\n flask.abort(404)\n\n return flask.jsonify(response)",
"def get_mediafile_details(mediafile_id, fields):\n query = MediaFiles.query \\\n .join(Locations, MediaFiles.location_id == Locations.id) \\\n .join(Users, MediaFiles.user_id == Users.id) \\\n .filter(MediaFiles.id == mediafile_id) \\\n .add_columns(*fields)\n logging.debug('Query executed: %s' % query)\n data = to_dict(query.first(), fields)\n values = {'accessed': get_time_str(), 'visits': data['visits'] + 1 if 'visits' in data else 0}\n if values['visits']:\n update_mediafile_values(mediafile_id, values)\n return data",
"def get_fields(self, path):\n with self.inspector(path) as opened_file:\n return opened_file.describe_fields()",
"def get_field(self, field):\n return self.extra_fields[field]",
"def get_record(self, id: uplink.Path):\n pass",
"def getMetadataFields(self, study_id):\n try:\n con = self.getMetadataDatabaseConnection()\n results = con.cursor()\n con.cursor().callproc('qiime_assets.get_metadata_fields', [study_id, results])\n metadata_fields = []\n for row in results:\n metadata_fields.append((row[0], row[1]))\n return metadata_fields\n except Exception, e:\n print 'Exception caught: %s.\\nThe error is: %s' % (type(e), e)\n return False",
"def get_anime_data(mal_id):\n BASE_URL = 'https://myanimelist.net/anime/'\n url = BASE_URL + str(mal_id)\n soup = create_soup(url)\n time.sleep(0.5+2*random.random())\n anime_data = {\n 'mal_id': mal_id,\n 'url': url,\n 'image_url': get_image_url(soup),\n 'trailer_url': get_trailer_url(soup),\n 'title_main': get_title_main(soup),\n 'title_english': get_title_english(soup),\n 'media_type': get_media_type(soup),\n 'source_material': get_source_material(soup),\n 'num_episodes': get_num_episodes(soup),\n 'airing_status': get_airing_status(soup),\n 'aired_dates': get_aired_dates(soup),\n 'premiered': get_premiered(soup),\n 'duration': get_duration(soup),\n 'content_rating': get_content_rating(soup),\n 'genres': get_genres(soup),\n 'score': get_score(soup),\n 'scored_by_num_users': get_scored_by_num_users(soup),\n 'rank_score': get_rank_score(soup),\n 'rank_popularity': get_rank_popularity(soup),\n 'members': get_members(soup),\n 'favorites': get_favorites(soup),\n 'studios': get_studios(soup),\n 'producers': get_producers(soup),\n 'licensors': get_licensors(soup)\n }\n return anime_data",
"def fieldhelp2(self, fieldid):\n txt = []\n dd_desc = M.Globals[\"^DD\"][self.fileid][fieldid][21]\n for k,v in dd_desc.keys_with_decendants():\n txt.append(dd_desc[k][0].value)\n return '\\n'.join(txt)",
"def get_device_info(an_id):\n _check_init()\n return _pypm.GetDeviceInfo(an_id)",
"def __getitem__(self, field_name):\n\n if field_name in self._module._fields.keys():\n try:\n return self._fields[field_name]\n except KeyError:\n if self['id'] == '':\n # If this is a new entry, the 'id' field is yet undefined.\n return ''\n else:\n # Retrieve the field from the SugarCRM connection.\n \n q_str = \"%s.id='%s'\" % (self._module._table, self['id'])\n res = self._module._connection.get_entry_list(\n self._module._name, q_str,\n '', 0, [field_name], 1, 0)\n\n nvl = res['entry_list'][0]['name_value_list']\n for attribute in nvl:\n if attribute == field_name:\n value = nvl[attribute]['value']\n if value:\n self._fields[attribute] = \\\n HTMLParser().unescape(\n nvl[attribute]['value'])\n else:\n self._fields[attribute] = ''\n\n return self._fields[attribute]\n\n else:\n raise AttributeError",
"def get_patient_fields(connection, patient_id):\n patient_id = str(patient_id)\n\n patient_field_results = pymedphys.mosaiq.execute(\n connection,\n \"\"\"\n SELECT\n TxField.FLD_ID,\n TxField.Field_Label,\n TxField.Field_Name,\n TxField.Version,\n TxField.Meterset,\n TxField.Type_Enum,\n Site.Site_Name\n FROM Ident, TxField, Site\n WHERE\n TxField.Pat_ID1 = Ident.Pat_ID1 AND\n TxField.SIT_Set_ID = Site.SIT_Set_ID AND\n Ident.IDA = %(patient_id)s\n \"\"\",\n {\"patient_id\": patient_id},\n )\n\n table = pd.DataFrame(\n data=patient_field_results,\n columns=[\n \"field_id\",\n \"field_label\",\n \"field_name\",\n \"field_version\",\n \"monitor_units\",\n \"field_type\",\n \"site\",\n ],\n )\n\n table.drop_duplicates(inplace=True)\n\n table[\"field_type\"] = [FIELD_TYPES[item] for item in table[\"field_type\"]]\n\n return table",
"def field(self) -> IMockPin:\n return self[\"field\"]"
] | [
"0.6446642",
"0.63725543",
"0.61957574",
"0.59160596",
"0.5703118",
"0.5693682",
"0.56645566",
"0.55645645",
"0.5498024",
"0.5481042",
"0.54654",
"0.54090756",
"0.54001194",
"0.5395187",
"0.5359088",
"0.5338945",
"0.53293055",
"0.53100646",
"0.529101",
"0.52805066",
"0.52737594",
"0.5264005",
"0.52462447",
"0.5242706",
"0.52350533",
"0.52011555",
"0.5199488",
"0.51838624",
"0.5183616",
"0.51277447"
] | 0.76015735 | 0 |
Bulk add anime to database | def add_anime(utoa_list):
for utoa in utoa_list:
db.session.add(utoa)
db.session.commit() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def import_aa_data(anime_list):\n for anime, atog in anime_list:\n db.session.add(anime)\n for genre in atog:\n db.session.add(genre)\n\n db.session.commit()",
"def update_anime(utoa_list):\n for utoa in utoa_list:\n db.session.merge(utoa)\n\n db.session.commit()",
"def all_time_player_save():\n try:\n all_players = players.get_players()\n ap = db.all_time_players\n ap.insert_many(all_players)\n except OperationFailure as e:\n print(e)",
"def add_to_db_single(self, element):\r\n def quot(string):\r\n \"\"\" Replace \" with ' in text strings that goes into the\r\n db, right now it is only done on the name, but it should\r\n be done on all fields that might contain such characters\r\n \"\"\"\r\n return string.replace('\"', \"'\")\r\n\r\n # Make entry i measurements table\r\n query = ('INSERT INTO {table} SET '\r\n 'time=FROM_UNIXTIME({time}), '\r\n 'type=2, '\r\n 'timestep={timestep}, '\r\n 'comment=\"{comment}\", '\r\n 'pass_energy={pass_energy}, '\r\n 'excitation_energy={excitation_energy}, '\r\n 'number_of_scans={number_of_scans}, '\r\n 'project=\"{project}\", '\r\n 'file_name=\"{file_name}\", '\r\n 'name=\"{name}\";').format(\r\n table=self.tables['measurements'],\r\n time=element[0]['date'],\r\n timestep=element[0]['dwell_time'],\r\n comment=element[0]['unique_name'],\r\n pass_energy=element[0]['pass_energy'],\r\n excitation_energy=element[0]['excitation_energy'],\r\n number_of_scans=element[0]['num_scans'],\r\n project=element[0]['project'],\r\n file_name=element[0]['unique_name'].replace('\\\\', '\\\\\\\\'),\r\n name=quot(element[0]['name']))\r\n\r\n # Christian, comment this in to see a list of metadata\r\n #print element[0]\r\n self.cursor.execute(query) # COMMENT\r\n\r\n # Get the id of it\r\n query = ('select id from {table} where type=2 '\r\n 'order by id desc limit 1;').\\\r\n format(table=self.tables['measurements'])\r\n self.cursor.execute(query)\r\n id_ = self.cursor.fetchall()[0][0]\r\n\r\n # Add the data to xy_values table in chunks of 100 data points\r\n counter = 0\r\n query_reset = 'INSERT INTO {table} (measurement, x, y) VALUES'.format(\r\n table=self.tables['xy'])\r\n query = query_reset\r\n # element[1] is tuple of data: (Array(x0, x1, x2), Array(y0, y1, y2)).\r\n # The zip statement (where * pulls out both value) turns it into:\r\n # [(x0, y0), (x1, y1), (x2, y2)]\r\n for x_value, y_value in zip(*element[1]):\r\n counter += 1\r\n query += '({0},{1},{2})'.format(id_, x_value, y_value)\r\n if counter < 100:\r\n query += ','\r\n else:\r\n query += ';'\r\n self.cursor.execute(query)\r\n counter = 0\r\n query = query_reset\r\n # Remember to write the last less than 100 points\r\n if query != query_reset:\r\n # Remove the last , and add a ;\r\n query = query[0: -1] + ';'\r\n self.cursor.execute(query)",
"def insertionAno(cur, conn, date_list):\n for date in date_list:\n idAno = date[0]\n ano=date[1][0]\n fechaEstreno=date[1][1]\n # print(anoInsert.format(idAno,ano,fechaEstreno))\n # REGISTER ANO IN ACTOR TABLE\n cur.execute(anoInsert.format(idAno,ano,fechaEstreno))\n conn.commit()",
"async def insert_many(self, models):\n\n pass",
"def _bulk_add_rows(self, converted) :\n\n insert_sql = 'INSERT INTO \"%s\" VALUES (%s)' % (self.name, ','.join(['?'] * len(self.cols)))\n cur = self.con.cursor()\n cur.executemany(insert_sql, converted)",
"def bulk_insert(cls, path=\"data.json\"):\n from json import load\n from codecs import open\n \n lists = load(open(path, \"r\", \"utf8\"))\n for lst in lists:\n ing = cls(content = lst)\n ing.put()",
"def add(self, entry):\n \"An entry is a tuple of (id, datatime, text).\"\n id = entry[0]\n datee = entry[1]\n text = re.sub('[^A-Za-z0-9]+', ' ', entry[2].lower())\n self.recordsDict[id].create(id, datee, entry[2])\n for word in text.split():\n self.wordDict[word].add(id)",
"def bulk_insert(cls, device_id, imeis):\n insertion_object = []\n for imei in imeis:\n insertion_object.append({'imei': imei, 'normalized_imei': imei[0:14], 'device_id': device_id})\n res = db.engine.execute(ImeiDevice.__table__.insert(), insertion_object)\n res.close()",
"def insert_into_tweets(self, infos):\n query = \"insert into tweets(tweet_id, insert_date, created_at, hashtag) values(?, ?, ?, ?);\"\n with sql.connect('./{}.db'.format(self.name)) as conn:\n conn.executemany(query, infos)",
"def _insert_bulk(self, iterable):\n self.cursor.executemany(self.INSERT, iterable)\n self.conn.commit()",
"def write_to_database(info,timer):\n\n inserts = create_sql_write(info,timer)\n\n connection = engine.connect()\n for insert in inserts:\n connection.execute(insert)\n connection.close()",
"def save(self):\n for t in self.ace_types:\n self.api.api_request(\"PUT\", self.url + t, data={t: self[t]})",
"def run(self):\n self.db.table('points').insert({\n 'name': 'biblioteca',\n 'rfid': '123456'\n })",
"def add(self, file_infos):\n self._check_writable_()\n \n for file_info in file_infos:\n #columns = mesh_id, value, date_data, lon, lat, date_added_to_db, sv_name, info\n #add file to db with status adding\n file_info['date_added_to_db'] = datetime.now()\n list_write = [file_info[el] if el in file_info else None for el in self._columns.keys()]\n #check for proper inputs\n self.check_column_values(list_write)\n \n #add to db\n self._cursor.execute('INSERT INTO FILEINFO VALUES (%s)'%(','.join(['?' for el in self._columns.keys()])), tuple(self.convert_column_dates2str(list_write)))\n self._conn.commit()",
"def insert_many(cursor, data):\n query = \"\"\"INSERT INTO hasil(label, arti, query_id)\n VALUES(?, ?, (\n SELECT id FROM pencarian\n WHERE query=?))\"\"\"\n\n cursor.executemany(query, data)",
"def bulkInsert(self, url, values):\n pass",
"def insert_data_bulk(self, table_name, data):\n if len(data) == 0:\n return\n\n fields = \", \".join(data[0].keys())\n value_placeholders = \", \".join([\"%s\" for f in data[0].keys()])\n query = \"INSERT INTO %s(%s) VALUES (%s)\" % (table_name, fields, value_placeholders)\n\n data = [tuple(self.pack(data_point.values())) for data_point in data]\n\n chunk_size = 50000\n data_chunks = [data[i:i + chunk_size] for i in range(0, len(data), chunk_size)]\n for chunk in data_chunks:\n self.cursor.executemany(query, chunk)\n self.db_connection.commit()",
"def add_to_database(image_info):\n\n for video_id in image_info:\n info = image_info[video_id]\n print(info)\n add_nsfw_image_data(video_id, info) \n image_analysis_id = ImageAnalysis.query.filter(ImageAnalysis.video_id == video_id).first().image_analysis_id\n\n add_tag_image_data(video_id, info, image_analysis_id)\n # add_color_image_data(video_id, info, image_analysis_id)",
"def add_log(conn, task, start_time):\n cursor = conn.cursor()\n cursor.execute('INSERT INTO timelogs (task, start_time) VALUES (?, ?);', (task, start_time))",
"def insert_data(data, collec, many):\n db = client.get_database('tweetstorm')\n collection = db.get_collection(collec)\n if many:\n collection.insert_many(data)\n logger.info(f\"{ymdhms()} inserted {len(data)} tweets to {collec} collection\")\n else:\n collection.insert_one(data)\n logger.info(f\"{ymdhms()} inserted data {data} to {collec} collection\")",
"def add_to_database():\n db_conn.execute(\"INSERT INTO Fietsenstalling (Naam, Achternaam, Adress, FietsNr, PIN) VALUES \"\n \"(?, ?, ?, ?, ?);\",(Naam, Achternaam, Adress, FietsNr, PIN))\n\n db_conn.commit()",
"def add_observation(observation):\n tstamp = datetime.datetime.now(tz=pytz.UTC)\n\n sql_raw = 'SELECT * FROM piws.insert_observation(%s::TIMESTAMPTZ, '\n sql_raw += ' %s::JSONB) '\n params = [tstamp,\n json.dumps(observation, ensure_ascii=False)]\n db.insert(sql_raw, params)",
"def add_to_db(attendance):\n\n\t# Get list of members and keyholderes\n\tmembers, keyholders, aliases = get_members_and_keyholders()\n\t\n\t# Go through each attendee\n\tfor date in attendance:\n\t\tlog(\"Adding attendance for \" + str(date))\n\t\tfor member_type in attendance[date]:\n\t\t\tlog(\"Adding attendance for \" + str(len(attendance[date][member_type])) + \" \" + member_type)\n\t\t\tfor member in attendance[date][member_type]:\n\t\t\t\t# By default, no manual inspection is required\n\t\t\t\tinspection_required = \"NONE\"\n\t\t\t\t\n\t\t\t\t# True if the attendee is an alias to a kerberos listed as a keyholder\n\t\t\t\taliased_keyholder = member in aliases and aliases[member] in keyholders\n\n\t\t\t\tif member_type == \"associate_keyholders\" or member_type == \"keyholders\":\n\t\t\t\t\tif member not in keyholders and not aliased_keyholder:\n\t\t\t\t\t\tif member in members:\n\t\t\t\t\t\t\t# Attendee is a member, but was listed as a keyholder\n\t\t\t\t\t\t\tinspection_required = \"WRONG_TYPE\"\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t# Attendee not found in file, but was listed as a keyholder\n\t\t\t\t\t\t\tinspection_required = \"NOT_FOUND\"\n \n\t\t\t\telif member_type == \"members\":\n\t\t\t\t\t# Inspection not required if listed as member but are actually a keyholder, since\n\t\t\t\t\t# minutes could be from prior to keyholdership\n\t\t\t\t\tif member not in members and member not in keyholders and not aliased_keyholder:\n\t\t\t\t\t\t# Attendee not found in file, but was listed as a member\n\t\t\t\t\t\tinspection_required = \"NOT_FOUND\"\n \n\t\t\t\t# Add to the database\n\t\t\t\tadd_attendance_record(date, member, ATTENDEE_TYPES[member_type], inspection_required)\n\t\t\t\tlog(addto=\"attendance records added\", addval=1)\n\t\n\tlog(logsum=\"attendance records added\")",
"def insert_habit():\n analytics.insert_habit('Play Piano', 'daily', 'Learn more songs', 'Minimum one hour')",
"def addAnimalsToDb(petsInfo):\n \n for pet in petsInfo['petfinder']['pets']['pet']: \n \n #Parsing the json file to get individual information\n \n animal = pet['animal']['$t'] \n name = pet['name']['$t']\n pet_id = pet['id']['$t']\n desc = pet['description']['$t']\n age = pet['age']['$t']\n breeds = pet['breeds']['breed']\n breed = \"\"\n # because some pets have multiple breed stored in a list\n try: \n breed = breeds['$t']\n except TypeError:\n for x in breeds:\n breed += x['$t'] + \", \"\n \n status = pet['status']['$t']\n sex = pet['sex']['$t']\n size = pet['size']['$t']\n mix = pet['mix']['$t']\n match = \"Yes\"\n features = pet['options']['option']\n feature = \"\"\n # because some pets have multiple breed stored in a list\n try:\n feature = features['$t']\n except TypeError: \n for x in features:\n feature += x['$t'] + \", \"\n photo = pet['media']['photos']['photo'][2]['$t']\n if petExist(animal, pet_id): \n firstSeen = Animal.objects.get(pk = pet_id).firstSeen\n pet = Animal(animal = animal, petId = pet_id, petName = name, \n petDescription = desc, petAge = age, \n petBreed = breed, petStatus = status, \n petSex = sex, petSize = size, \n petMix = mix, petFeatures = feature, \n lastSeen = timezone.now(), \n firstSeen = firstSeen,match = match, petPhoto = photo) \n \n pet.save()\n \n# if the pet doesn't exist, add the pet. \n else: \n pet = Animal(animal = animal, petId = pet_id, petName = name, \n petDescription = desc, petAge = age, \n petBreed = breed, petStatus = status, \n petSex = sex, petSize = size, \n petMix = mix, petFeatures = feature, \n lastSeen = timezone.now(), \n firstSeen = timezone.now(), match = match, petPhoto = photo) \n \n pet.save()\n updateTwitterStatus(animal, name, pet_id)\n\n print(\"A new %s has been added.\", animal)\n \n #pprint.pprint(petsInfo) \n print(\"Pet information added to database.\")",
"def _big_insert(cursor, table, datas):\n stamped = table in ('game', 'drive', 'play')\n insert_fields_list = [k for k, _ in datas[0]]\n if stamped:\n insert_fields_list.append('time_inserted')\n insert_fields_list.append('time_updated')\n insert_fields = ', '.join(insert_fields_list)\n\n def times(xs):\n if stamped:\n xs.append('NOW()')\n xs.append('NOW()')\n return xs\n\n def vals(xs):\n return [v for _, v in xs]\n # values = ', '.join(str(_mogrify(cursor, times(vals(data)))).replace('b\"', '').replace('\"', '') for data in datas) # TODO: faster?\n # values = ', '.join(str(_mogrify_team(cursor, times(vals(data)), insert_fields_list)).replace('b\"', '').replace('\"', '').replace(\"b'(\", \"(\").replace(\"')'\", \"')\").replace(\"\\\\'\", \"'\") for data in datas)\n values = ', '.join(_mogrify_team(cursor, times(vals(data)), insert_fields_list).decode(\"utf-8\") for data in datas)\n cursor.execute('INSERT INTO %s (%s) VALUES %s'\n % (table, insert_fields, values))",
"def add_entry(db, table, columns, values):\n mycursor = db.cursor()\n\n sql = \"INSERT INTO \" + table + \" (\" + parse_sql_param_from_array(columns) + \") VALUES (\" + parse_sql_param_from_array(values, escape=True) + \")\"\n mycursor.execute(sql)\n\n db.commit()",
"def db_insert(name, task, time, note):\n Entry.create(name=name,\n task=task,\n time=time,\n note=note)\n return main()"
] | [
"0.7134782",
"0.6691967",
"0.6538583",
"0.608107",
"0.5851746",
"0.5843146",
"0.5800973",
"0.57812434",
"0.56920874",
"0.56710035",
"0.5620775",
"0.56091475",
"0.55755377",
"0.5571656",
"0.55624515",
"0.55595756",
"0.55502504",
"0.55269414",
"0.5497408",
"0.5468426",
"0.5462399",
"0.5462108",
"0.54599744",
"0.5444086",
"0.5435495",
"0.54315",
"0.54286134",
"0.5401059",
"0.5392048",
"0.5388362"
] | 0.76151854 | 0 |
Bulk update anime to database | def update_anime(utoa_list):
for utoa in utoa_list:
db.session.merge(utoa)
db.session.commit() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _update_bulk(self, iterable):\n self.cursor.executemany(self.UPDATE, iterable)",
"def add_anime(utoa_list):\n for utoa in utoa_list:\n db.session.add(utoa)\n\n db.session.commit()",
"def update_afferents_ap(self,time):\n\t\t# Iterate over all dictionaries\n\t\tfor muscle in self.cells:\n\t\t\tfor cellName in self.cells[muscle]:\n\t\t\t\tif cellName in self._afferentsNames:\n\t\t\t\t\tfor cell in self.cells[muscle][cellName]:\n\t\t\t\t\t\tcell.update(time)",
"def database_mass_update(table,file_location):\n with open(file_location,'r') as csv:\n con = lite.connect(DB_FILE, timeout = TIMEOUT)\n con.row_factory = lite.Row\n with con:\n cur = con.cursor()\n lines = csv.readlines()\n for l in lines:\n l = l.split(',')\n if len(l) > 2:\n started = l[0]\n rownum = l[2]\n attempt_num = l[3]\n bs_id = l[1]\n\n else:\n end_time = l[0]\n errorcode = l[1]\n\n cur.execute(\"UPDATE {0} SET Started=? WHERE Rownum=? AND AttemptNum=? AND BSID=?\".format(table), (started, rownum, attempt_num, bs_id))\n\n cur.execute(\"SELECT * FROM {0} WHERE Rownum=? AND AttemptNum=? AND BSID=?\".format(table), (rownum, attempt_num, bs_id))\n columns = cur.fetchone()\n #get search data\n cur.execute(\"SELECT * FROM PulsarSearch WHERE Rownum=?\", (str(bs_id),))\n bs_columns = cur.fetchone()\n\n if int(errorcode) == 0:\n #add processing times and job completion count\n end_s = date_to_sec(str(end_time))\n start_s = date_to_sec(columns['Started'])\n processing = (end_s - start_s)\n\n cur.execute(\"UPDATE {0} SET Proc=?, Ended=?, Exit=? WHERE Rownum=? AND AttemptNum=? AND BSID=?\".format(table), (processing, end_time, errorcode, rownum, attempt_num, bs_id))\n\n tot_proc = float(bs_columns['TotalProc']) + processing\n job_proc = float(bs_columns[table+'Proc']) + processing\n tot_jc = int(bs_columns['TotalJobComp']) + 1\n job_jc = int(bs_columns[table+'JobComp']) + 1\n\n cur.execute(\"UPDATE PulsarSearch SET TotalProc=?, {0}Proc=?, TotalJobComp=?, {0}JobComp=? WHERE Rownum=?\".format(table),\n (str(tot_proc)[:9], str(job_proc)[:9], str(tot_jc)[:9],\n str(job_jc)[:9], bs_id))\n else:\n tot_er = int(bs_columns['TotalErrors']) + 1\n job_er = int(bs_columns[table+'Errors']) + 1\n\n cur.execute(\"UPDATE {0} SET Ended=?, Exit=? WHERE Rownum=? AND \"\n \"AttemptNum=? AND BSID=?\".format(table),\n (end_time, errorcode, rownum, attempt_num, bs_id))\n\n cur.execute(\"UPDATE PulsarSearch SET TotalErrors=?, {0}Errors=? \"\n \"WHERE Rownum=?\".format(table), (tot_er,job_er, bs_id))\n return",
"def refresh():\r\n DB.drop_all()\r\n DB.create_all()\r\n # TODO Get data from OpenAQ, make Record objects with it, and add to db\r\n for i in time_x_values():\r\n DB.session.add(Record(datetime=i[0], value=i[1]))\r\n DB.session.commit()\r\n return 'Data refreshed!'",
"def update_DB(self, iterable, entry_columns, update):\n conn = self.conn\n bulk = []\n old_bulk = []\n list_of_id_db = list()\n list_of_id_atuais = self.lista_atual()\n list_of_id_afastados = self.lista_afastados()\n\n if update:\n list_of_id_db = conn.execute('SELECT id_parlamentar FROM {}.{}'.format(self.schema, self.table))\n list_of_id_db = [tup[0] for tup in list_of_id_db]\n id_row_historic = list(conn.execute('SELECT MAX(id) FROM {}.{}_historic'.format(self.schema, self.table)))[0][0]\n if not id_row_historic:\n id_row_historic = 0\n\n for senador in tqdm(iterable):\n entry = self.fill_entry_senador(senador,entry_columns)\n id_parlamentar = entry['id_parlamentar']\n\n if id_parlamentar in list_of_id_atuais:\n entry['situacao_parlamentar'] = 'atual'\n elif id_parlamentar in list_of_id_afastados:\n entry['situacao_parlamentar'] = 'afastado'\n\n if id_parlamentar in list_of_id_db:\n compare_columns = 'id_parlamentar, nome_completo, nome_parlamentar_atual, forma_tratamento, sexo_parlamentar, data_nascimento, data_falecimento, sigla_uf_origem, endereco_origem, nome_cidade_origem, codigo_estado_civil, endereco_congresso, fone, fax, website, email, profissao, id_camara, id_senado, cpf, titulo_de_eleitor, descricao_participacao'\n\n old_row = conn.execute(\"SELECT {} FROM {}.{} WHERE id_parlamentar='{}'\".format(compare_columns,self.schema, self.table,id_parlamentar))\n old_row = list(old_row)[0]\n new_row = tuple([entry[column] for column in compare_columns.split(', ')])\n\n if old_row != new_row:\n old_entry = copy.deepcopy(entry_columns)\n\n for key in old_entry.keys():\n old_date = conn.execute(\"SELECT {} FROM {}.{} WHERE id_parlamentar='{}'\".format(key,self.schema, self.table,id_parlamentar))\n old_entry[key] = list(old_date)[0][0]\n old_entry['change_date'] = datetime.datetime.today() #capture of change date\n id_row_historic += 1\n old_entry['id'] = id_row_historic\n\n old_bulk.append(old_entry)\n conn.execute(\"DELETE FROM {}.{} WHERE id_parlamentar='{}'\".format(self.schema, self.table,id_parlamentar))\n\n bulk.append(entry)\n else:\n bulk.append(entry)\n\n if len(bulk) > 0:\n df = pd.DataFrame(bulk)\n df.set_index('id_parlamentar', inplace=True)\n print('Adding {} entries to SQL table {}.{}.'.format(len(df),self.schema, self.table))\n df.to_sql(self.table, con=self.conn, schema=self.schema, if_exists='append')\n\n if len(old_bulk) > 0:\n df2 = pd.DataFrame(old_bulk)\n df2.set_index('id_parlamentar', inplace=True)\n historic_table_name = self.table + '_historic'\n print('Adding {} entries to SQL table {}.{}.'.format(len(df2),self.schema, historic_table_name))\n df2.to_sql(historic_table_name, con=self.conn, schema=self.schema, if_exists='append')",
"def import_aa_data(anime_list):\n for anime, atog in anime_list:\n db.session.add(anime)\n for genre in atog:\n db.session.add(genre)\n\n db.session.commit()",
"def update_batch(self, *args, **kwargs):\n pass",
"def updateData(conn, task):\n # updateData(create_connection(), (20000.200, \"BTCUSDT\"))\n sql = ''' UPDATE criptomonedas\n SET price = ?\n WHERE symbol = ?'''\n cur = conn.cursor()\n cur.execute(sql, task)\n conn.commit()",
"def updateAll(self):\n \tself.idToUpdate=''\n \tself.newState=''\n \tself.save()",
"def update(self, time):\n raise NotImplementedError",
"def update(self, time):\n raise NotImplementedError",
"def update_records(self, something):\n print(\"Some logic (not shown) to update database of units\")",
"def update_record(self):\n # print(self.get_hours_diff())\n conn = sqlite3.connect(\"LmtPilots.db\")\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM Pilots_hours\")\n rowids = [row[0] for row in cur.execute('SELECT rowid FROM Pilots_hours')]\n cur.executemany('UPDATE Pilots_hours SET total=? WHERE id=?', zip(self.get_hours_diff(), rowids))\n conn.commit()\n self.db_model2.select()\n # print(self.get_tot_hours())",
"def update():",
"def update():",
"def update(self, dt):\n for obj in self.objects:\n obj.update(dt)",
"def update(*args):",
"def step060():\n logger.logMessage('Begin: updating database')\n update_sql = 'update weather_work set tsa=$1, esDocId = $2 where time = $3;'\n pgConn = pg.connect(host=host,user=user,password=password,database=database) \n c = pgConn.cursor()\n# c.execute('drop table weather_work')\n# c.execute('create table weather_work (like weather excluding constraints)')\n# c.execute('insert into weather_work select * from weather_dupes')\n# c.execute('create index weather_work_time on weather_work(time)')\n pgConn.commit()\n c.execute('prepare updtDocid as {0}'.format(update_sql))\n numUpdates = 0\n with open(renumFile,'r') as f:\n line = f.readline().rstrip()\n while line != '':\n fields = line.split(';')\n tsa = int(fields[0])\n time = fields[1].rstrip() \n docid = fields[2].rstrip()\n try:\n dic = { 'esDocId': docid, 'tsa': tsa , 'time': time+\"+00:00\" }\n c.execute('execute updtDocid (%(tsa)s,%(esDocId)s,%(time)s)',dic)\n numUpdates += 1\n if numUpdates % 250 == 0:\n pgConn.commit()\n logger.logMessage(level='DEBUG',message=\"{0:9d} commited updates\".format(numUpdates))\n except:\n logger.logException('Exception while updating database')\n pgConn.rollback()\n raise\n line = f.readline().rstrip()\n pgConn.commit()\n logger.logMessage(\"Total updates: {0:d}\".format(numUpdates))\n c.close()\n pgConn.close()\n logger.logMessage('End : updating database')",
"def update_db_record(ark, ark_dict):\n session = Session()\n record = find_ark(ark, session).first()\n\n for key, value in ark_dict.items():\n key = key.replace(\"iastate.\", \"\").replace(\"_\", \"\").replace(\".\", \"_\")\n setattr(record, key, value)\n\n session.commit()\n session.close()",
"def run_update_step(self, time, pids, hole_rating, observations):\n\t\treturn NotImplemented",
"def _update():\n\tquery = myTaskSession.query(WorkToolkitDB.db.Task)\n\n\tIDStr = myOpt.id\n\tIDs = re.split('\\s*,\\s*', IDStr)\n\n\tif len(IDs) == 0:\n\t\tprint('ERR: no add task input')\n\t\treturn 1\n\n\t#set default finsih_status if not given\n\tif not myOpt.f:\n\t\tmyOpt.f = 1\n\n\tfor ID in IDs:\n\t\tquery.filter(WorkToolkitDB.db.Task.id == ID).update({WorkToolkitDB.db.Task.finish_status: myOpt.f})\n\n\t\tif myOpt.vt:\n\t\t\tquery.filter(WorkToolkitDB.db.Task.id == ID).update({WorkToolkitDB.db.Task.version_time: myOpt.vt})\n\n\t#commit\n\tmyTaskSession.commit()\n\n\t\"\"\"\n\t#ERR: not given itsm id for update \n\tif not myOpt.id:\n\t\tprint('Error: no itsm id given for update finish_status to 1')\n\t\treturn 1\n\t#set default finsih_status if not given\n\tif not myOpt.f:\n\t\tmyOpt.f = 1\n\n\t\n\tquery.filter(WorkToolkitDB.db.Task.id == myOpt.id).update({'finish_status': myOpt.f})\n\tmyTaskSession.commit()\n\n\t\n\tdata = query.filter(WorkToolkitDB.db.Task.id == myOpt.id).all()\n\tfor record in data:\n\t\t\t#record_arr = record.to_array()\n\t\t\tpt.add_row(record.to_array())\n\n\tprint(pt)\n\t\"\"\"\n\n\treturn 0",
"def bulk_update(self, request):\n serializer = MasterySerializer(\n data=request.data,\n many=True,\n )\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)",
"def save(self):\n for t in self.ace_types:\n self.api.api_request(\"PUT\", self.url + t, data={t: self[t]})",
"def _update_active_rides_fast(self, time: datetime) -> None:\n pass",
"def update(conn, sql):\n # sql = ''' UPDATE tasks\n # SET priority = ? ,\n # begin_date = ? ,\n # end_date = ?\n # WHERE id = ?'''\n cur = conn.cursor()\n cur.execute(conn, sql)\n conn.commit()",
"def model_update(self, db):\n db.session.commit()",
"def update_isolation(self, time: int):",
"def _update_on_refresh():\n cities = City.query.all()\n\n #Iterates over all cities in the database and updates their value\n for city in cities:\n metric_resp, imperial_resp = _get_open_weather_requests(city.name)\n\n metric_json = metric_resp.json()\n imperial_json = imperial_resp.json()\n\n city.temp_celsius = int(metric_json[MAIN][TEMPERATURE])\n city.temp_fahrenheit = int(imperial_json[MAIN][TEMPERATURE])\n db.session.commit()",
"def all_time_player_save():\n try:\n all_players = players.get_players()\n ap = db.all_time_players\n ap.insert_many(all_players)\n except OperationFailure as e:\n print(e)"
] | [
"0.61579776",
"0.6129972",
"0.6076216",
"0.5882213",
"0.57964015",
"0.576862",
"0.5720905",
"0.57084924",
"0.57017916",
"0.5699537",
"0.56977254",
"0.56977254",
"0.56723005",
"0.56692874",
"0.5665055",
"0.5665055",
"0.564072",
"0.56222653",
"0.5608532",
"0.5565741",
"0.55108285",
"0.54802734",
"0.54690295",
"0.54459924",
"0.5443735",
"0.54302543",
"0.5428295",
"0.54223585",
"0.53949076",
"0.538727"
] | 0.7491884 | 0 |
Deletes MALB for corresponding user | def delete_malb(user_id):
return db.session.query(UserToAnime)\
.filter(UserToAnime.userId == user_id)\
.delete() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def delete_user():",
"def delete_user(self, user):\n try:\n with dbm.open(self.dbm_path, 'c', 0o600) as db:\n del db[user.name]\n except KeyError as k:\n pass",
"def delete_user():\n #TODO user delete\n pass",
"def delete_account(request):\n ubanks = request.user.userbank.all()\n for ubank in ubanks:\n ubank.delete()\n user = request.user\n log_out(request)\n user.delete()\n return HttpResponse(\"Account succesfully deleted\")",
"def delete_user(self, user, instance_m):\n from resela.model.User import authenticate\n if user:\n mikrotik_m = MikrotikManager()\n lab_m = LabManager(current_user.session)\n group_m = GroupManager(current_user.session)\n user_m = UserManager(current_user.session)\n\n # Remove router conf\n mikrotik_m.unbind_vpn_to_vlan(user.email)\n mikrotik_m.delete_vpn_user(user.email)\n\n instance_list = instance_m.list(\n detailed=True,\n search_opts={'all_tenants': True, 'user_id': user.id}\n )\n\n for instance in instance_list:\n instance_name = instance.name.split('|')\n lab_name = instance_name[0] + '|' + instance_name[1]\n lab = lab_m.find(name=lab_name)\n instance_m.delete_instance(\n user_m=self,\n session=current_user.session,\n lab=lab,\n instance_id=instance.id\n )\n\n teacher_group = group_m.find(name='teachers')\n\n try:\n user_m.check_in_group(user=user, group=teacher_group)\n snapshot_factory = lab_m.find(\n name='snapshotFactory|{}'.format(user.email))\n\n session = authenticate(\n credentials=current_user.token,\n project_domain_name='snapshotFactory',\n project_name=snapshot_factory.name\n )\n\n security_handler = SecurityGroupHandler(session=session)\n\n for sec_group in security_handler.list()['security_groups']:\n if sec_group['tenant_id'] == snapshot_factory.id and \\\n 'internet' in sec_group['name']:\n security_handler.delete(sec_group['id'])\n\n lab_m.delete(snapshot_factory)\n\n except ksa_exceptions.NotFound:\n # Removing students will cause en exception as they are not found.\n # Does not need to be handled.\n pass\n\n # Remove user from db\n try:\n user_model = UserModel.query.get(user.id)\n DATABASE.session.delete(user_model)\n DATABASE.session.commit()\n except Exception:\n # Ignore user not in database\n pass\n\n # Remove user from openstack\n removed = self.delete(user)\n\n if not removed:\n print('User was not deleted:', user.id)\n raise Exception(' user not deleted')",
"def delete_anime(utoa):\n db.session.query(UserToAnime)\\\n .filter(UserToAnime.userId == utoa.userId, UserToAnime.malId == utoa.malId)\\\n .delete()\n db.session.commit()",
"async def del_user(conn: LDAPConnection, user: dict, mailman: Client) -> None:\n await conn.delete(user[\"dn\"])\n uid = user[\"attributes\"][\"uid\"][0]\n rmtree(user[\"attributes\"][\"homeDirectory\"][0])\n rmtree(f\"/webtree/{uid[:1]}/{uid}\")\n mailing_list = mailman.get_list(\"announce-redbrick\")\n mailing_list.unsubscribe(f\"{uid}@redbrick.dcu.ie\")",
"def delete_user(self, user):\n self.delete(user)",
"def delete_user_account(connection,user):\r\n with connection:\r\n connection.execute(DELETE_SPECIFIC_USER,(user,))",
"def delete(self, userinformation):\n self.db.remove(userinformation)",
"def del_user(request):\r\n mdict = request.matchdict\r\n\r\n # Submit a username.\r\n del_username = mdict.get('username', None)\r\n\r\n if del_username is None:\r\n LOG.error('No username to remove.')\r\n request.response.status_int = 400\r\n return _api_response(request, {\r\n 'error': 'Bad Request: No username to remove.',\r\n })\r\n\r\n u = UserMgr.get(username=del_username)\r\n\r\n if not u:\r\n LOG.error('Username not found.')\r\n request.response.status_int = 404\r\n return _api_response(request, {\r\n 'error': 'User not found.',\r\n })\r\n\r\n try:\r\n # First delete all the tag references for this user's bookmarks.\r\n res = DBSession.query(Bmark.bid).filter(Bmark.username == u.username)\r\n bids = [b[0] for b in res]\r\n\r\n qry = bmarks_tags.delete(bmarks_tags.c.bmark_id.in_(bids))\r\n qry.execute()\r\n\r\n # Delete all of the bmarks for this year.\r\n Bmark.query.filter(Bmark.username == u.username).delete()\r\n DBSession.delete(u)\r\n return _api_response(request, {\r\n 'success': True,\r\n 'message': 'Removed user: ' + del_username\r\n })\r\n except Exception, exc:\r\n # There might be cascade issues or something that causes us to fail in\r\n # removing.\r\n LOG.error(exc)\r\n request.response.status_int = 500\r\n return _api_response(request, {\r\n 'error': 'Bad Request: ' + str(exc)\r\n })",
"def delete_user(id):\n pass",
"def delete_user():\n del globalopts.appdata[request.user]\n del globalopts.users[request.user]\n return \"\", 200",
"def delete(self):\n self.deleted = True\n # Deactivate the user to disallow authentication and also\n # to let the user verify the email again after recovery.\n self.is_active = False\n self.save()\n self.history.create(user_id=self.pk, action=user_history.DELETION)",
"def delete_account(user):\n\n # first delete all owned categories and all the items in those\n # categories, including items that other users added to the category.\n for category in user.categories:\n for item in category.items:\n db.session.delete(item)\n db.session.delete(category)\n db.session.commit()\n\n # then delete all remaining owned items\n for item in user.items:\n db.session.delete(item)\n db.session.commit()\n\n # finally, delete the user\n db.session.delete(user)\n db.session.commit()",
"def delete_user(self) -> None:\n table_dictionary = {\n 'Apple': {\n 'table': 'AppleReceipts',\n 'user_id': 'User_id'\n },\n 'ESL': {\n 'table': 'ESLReceipts',\n 'user_id': 'User_id'\n },\n 'Transactions': {\n 'table': 'Transactions',\n 'user_id': 'User_id'\n },\n 'Users': {\n 'table': 'Users',\n 'user_id': 'id'\n },\n }\n\n # delete the current user's information from the db.\n for key in table_dictionary:\n query = f\"\"\"\n DELETE\n FROM {table_dictionary[key]['table']}\n WHERE {table_dictionary[key]['user_id']}=?;\n \"\"\"\n self.db.commit(query, values=(self.id,))\n\n # perform a sign out\n self.sign_out()\n\n log(f\"User:{self.id} has deleted their account.\")",
"def DelteUser(database):\n firstname=str(input(\"what is the name of the user you want to delete : \"))\n delusr,find =getByName(database,firstname)\n if not find:\n return\n del database[delusr.key]\n for key,usr in database.items():\n if delusr.key in usr.folow:\n usr.folow.remove(delusr.key)\n if delusr.key in usr.folowed:\n usr.folowed.remove(delusr.key)\n \n os.remove(f\"Users/{delusr.key}\")",
"def remove_mfa(self):\n response = self._get_session(API, authed=True).delete(API_HOST + 'mfa/')\n _raise_on_error(response)",
"def delete(self, application_id):",
"def deleteUser(self,name):\n raise BorkedDeleteUser",
"def removedb(dbname):\n os.system(\"dropdb %s\" % dbname)",
"def unassign_house(request):\n user_pk = request.POST.get('userPK')\n term = Term.objects.get_by_url_name(request.POST.get('term'))\n # Delete the HouseMember object for this user/term if it exists\n try:\n HouseMember.objects.get(user__pk=user_pk, term=term).delete()\n except HouseMember.DoesNotExist:\n # Fine if the HouseMember does not exist since we wanted to remove it\n pass\n return json_response()",
"def delete_user(self, user):\n # noinspection PyUnresolvedReferences\n self.delete(user)",
"def admin_bmark_remove(request):\r\n rdict = request.matchdict\r\n username = rdict.get('username')\r\n if username:\r\n username = username.lower()\r\n hash_id = rdict.get('hash_id')\r\n\r\n try:\r\n bmark = BmarkMgr.get_by_hash(hash_id,\r\n username=username)\r\n print bmark\r\n if bmark:\r\n DBSession.delete(bmark)\r\n return _api_response(request, {\r\n 'message': \"done\",\r\n })\r\n else:\r\n return _api_response(request, {\r\n 'error': 'Bookmark not found.',\r\n })\r\n\r\n except NoResultFound:\r\n request.response.status_code = 404\r\n return _api_response(request, {\r\n 'error': 'Bookmark with hash id {0} not found.'.format(\r\n rdict['hash_id'])\r\n })",
"def remove(self, user_id):\n pass",
"def delete():",
"def delete_user(self):\n User.user_list.remove(self)",
"def delete_user(self):\n User.user_list.remove(self)",
"def delete_user(self):\n User.user_list.remove(self)",
"def del_user(self, username):\n pass"
] | [
"0.68551534",
"0.6640083",
"0.65942574",
"0.6466849",
"0.6441461",
"0.6334826",
"0.6277041",
"0.6270421",
"0.6219305",
"0.6171667",
"0.6159259",
"0.61316097",
"0.61063683",
"0.60959184",
"0.6094192",
"0.6052034",
"0.60442144",
"0.604069",
"0.603064",
"0.6013814",
"0.6002803",
"0.5995587",
"0.5992103",
"0.5983953",
"0.5955759",
"0.5953696",
"0.59468114",
"0.59468114",
"0.59468114",
"0.59256965"
] | 0.7381267 | 0 |
Imports list of (Anime, AtoG) tuples into database | def import_aa_data(anime_list):
for anime, atog in anime_list:
db.session.add(anime)
for genre in atog:
db.session.add(genre)
db.session.commit() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_anime(utoa_list):\n for utoa in utoa_list:\n db.session.add(utoa)\n\n db.session.commit()",
"def create_ta_list(ta_list):\n with open(ta_list, \"r\") as ta_file:\n user_list = ta_file.readlines()\n add_to_db(\"ta_list\", user_list[1:])\n add_to_online_db(\"online_ta\", user_list[1:])\n add_to_rating_db(\"ta_rating\", user_list[1:])",
"def save_data(data_tuple_list: list):\n con = sqlite3.connect(\"thecomradenews.db\")\n cur = con.cursor()\n try:\n cur.execute('''CREATE TABLE articles\n (source text,\n title text,\n link text,\n pubdate date,\n guid integer,\n description text,\n content text,\n categories text,\n image_link TEXT,\n UNIQUE(source, title, link, pubdate, guid, description, content, categories, image_link)\n )''')\n except sqlite3.OperationalError as e:\n print(e)\n for i in data_tuple_list:\n try:\n cur.execute(\"INSERT INTO articles values (?, ?, ?, ?, ?, ?, ?, ?, ?)\", i)\n print(f\"Article Added: '{i[1]}'\")\n con.commit()\n except sqlite3.OperationalError as e:\n print(e)\n except sqlite3.IntegrityError:\n print(f\"Article Already Exists: '{i[1]}'\")\n con.close()",
"def load(input):\n for row in input:\n SESSION.execute(INSERT_USERS,\n [int(row['id']),\n row['fname'],\n row['lname'],\n row['email'],\n row['group']])",
"def importDatabase(self):\n db_conn.execute(\"INSERT INTO Fietsenstalling (Naam, Achternaam, Telefoon, FietsNr, PIN) VALUES \"\n \"(?, ?, ?, ?, ?);\", (naamInvoer.get(), achternaamInvoer.get(), telefoonnummerInvoer.get(), FietsNr, pincodeInvoer.get()))\n\n db_conn.commit()",
"def populateSQlite(tagDf): \n conn = sqlite3.connect(os.path.join(prefix, args.db))\n with conn:\n cur = conn.cursor()\n cmds = ['INSERT INTO value VALUES(%d, \\\"%s\\\", %d);' % (r[0], r[1], r[2]) for i, r in tagDf.iterrows()]\n cmds = \"\\n\".join(cmds)\n cur.executescript(cmds)\n conn.commit()",
"def import_activity_object_in_array(path):\n conn = sqlite3.connect(path)\n c = conn.cursor()\n to_return = []\n for row in c.execute('SELECT * FROM '+\"Activity\").fetchall():\n to_return.append(Activity(row[0],row[1],row[2],row[3],row[4],row[5],row[6],row[7],row[8],row[9]))\n conn.close()\n return to_return",
"def populate_t_database():\n with open('minerals.json') as file:\n file = json.loads(file.read())\n\n for mineral in file[:22]:\n mineral_entry = Mineral.objects.get_or_create(**mineral)",
"def load_anvl_file_into_db(batch_file, engine=engine, verbose=True):\n if not db_exists():\n create_db(engine)\n\n arks = load_anvl_as_dict(batch_file)\n session = Session()\n\n for i, a in enumerate(arks):\n if verbose:\n sys.stdout.write(f\"\\r{i}\")\n sys.stdout.flush()\n\n ark_obj = Ark(\n ark=a.get(\"ark\", \"\"),\n target=a.get(\"_target\", \"\"),\n profile=a.get(\"_profile\", \"\"),\n status=a.get(\"_status\", \"\"),\n owner=a.get(\"_owner\", \"\"),\n ownergroup=a.get(\"_ownergroup\", \"\"),\n created=int(a.get(\"_created\", 0)),\n updated=int(a.get(\"_updated\", 0)),\n export=not a.get(\"_export\") == \"no\",\n dc_creator=a.get(\"dc.creator\", \"\"),\n dc_title=a.get(\"dc.title\", \"\"),\n dc_type=a.get(\"dc.type\", \"\"),\n dc_date=a.get(\"dc.date\", \"\"),\n dc_publisher=a.get(\"dc.publisher\", \"\"),\n erc_when=a.get(\"erc.when\", \"\"),\n erc_what=a.get(\"erc.what\", \"\"),\n erc_who=a.get(\"erc.who\", \"\"),\n replaceable=a.get(\"iastate.replaceable\") == \"True\" or input_is_replaceable(a[\"dc.title\"])\n )\n\n session.add(ark_obj)\n\n session.commit()\n session.close()",
"def update_anime(utoa_list):\n for utoa in utoa_list:\n db.session.merge(utoa)\n\n db.session.commit()",
"def insert_data(data_type, data, db_cursor, database):\n for each_file in data:\n with open(f'{DATA_PATH}{each_file}.csv') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n line_count = 0\n for row in csv_reader:\n # do not process the header row, ignore line 0.\n if line_count != 0:\n if data_type == 'stadium_team':\n pass\n stadium_name = row[1].upper()\n x_coord = float(row[3])\n y_coord = float(row[2])\n team = row[0].upper()\n sql = f'INSERT INTO stadium (stadium_name, x_coord, y_coord) ' \\\n f'VALUES (\"{stadium_name}\", {x_coord}, {y_coord})'\n db_cursor.execute(sql)\n database.commit()\n sql = f'INSERT INTO team (name_pk, stadium_id_fk) ' \\\n f'SELECT \"{team}\", stadium_id_pk ' \\\n f'FROM stadium ' \\\n f'WHERE stadium_name = \"{stadium_name}\"'\n db_cursor.execute(sql)\n database.commit()\n elif data_type == 'game':\n home_team = row[3].upper()\n away_team = row[4].upper()\n season = row[0]\n home_team_score = row[5]\n away_team_score = row[6]\n sql = f'INSERT INTO game (home_team_id_pk_fk, away_team_id_pk_fk, season_pk, home_team_score, away_team_score) ' \\\n f'SELECT team1.team_id_pk, team2.team_id_pk, {season}, {home_team_score}, {away_team_score} ' \\\n f'FROM team team1, team team2 ' \\\n f'WHERE team1.name_pk = \"{home_team}\" ' \\\n f'AND team2.name_pk = \"{away_team}\"'\n db_cursor.execute(sql)\n database.commit()\n elif data_type == 'season_overview':\n team = row[2]\n season = row[0]\n position = row[1]\n goals_for = row[7]\n goals_against = row[8]\n wins = row[4]\n draws = row[5]\n losses = row[6]\n sql = f'INSERT INTO season_overview (team_id_pk_fk, season_pk, position, goals_for, goals_against, wins, draws, losses) ' \\\n f'SELECT team.team_id_pk, {season}, {position}, {goals_for}, {goals_against}, {wins}, {draws}, {losses} ' \\\n f'FROM team ' \\\n f'WHERE name_pk = \"{team}\" '\n db_cursor.execute(sql)\n database.commit()\n else:\n print('Invalid data type sent in!')\n raise sys.exit\n line_count += 1",
"def loadAirbnb(file):\n arr = []\n with open(file, newline='') as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n row['price'] = int(row['price'])\n row['number_of_reviews'] = int(row['number_of_reviews'])\n row['latitude'] = float(row['latitude'])\n row['longitude'] = float(row['longitude'])\n row['location'] = {'type': 'Point', 'coordinates': [\n row['longitude'], row['latitude']]}\n del row['latitude']\n del row['longitude']\n arr.append(row)\n\n inserted_ids = db.airbnb.insert_many(arr).inserted_ids\n db.airbnb.create_index(\n [('name', TEXT), ('neighbourhood', TEXT)], default_language='english')\n db.airbnb.create_index([('location', GEOSPHERE)])\n\n print(\"{} Airbnb docs inserted\".format(len(inserted_ids)))\n print(\"Text index created for airbnb\")\n print(\"Geosphere index created for airbnb\")",
"def insert_station(self, t):\n if type(t) == tuple:\n t = [t]\n\n c = self.conn.cursor()\n for s in t:\n try:\n c.execute('INSERT INTO stations VALUES (?,?,?,?)', s)\n except sqlite3.OperationalError as e:\n print e.message\n\n self.conn.commit()\n c.close()",
"def db_converter(self, event):\n # Splits the arr on the semicolon to separate the pairs\n arr = str(self.input.get()).strip().split(';')\n for i, entry in enumerate(arr):\n # Splits the pairs into x and y\n new_entry = tuple(map(float, entry.strip().split(\",\")))\n # Adds the new pairs back into the array\n arr[i] = new_entry\n tuple_grapher(arr)",
"def insert_many(cursor, data):\n query = \"\"\"INSERT INTO hasil(label, arti, query_id)\n VALUES(?, ?, (\n SELECT id FROM pencarian\n WHERE query=?))\"\"\"\n\n cursor.executemany(query, data)",
"def process_map(file_in, db_table):\n data = []\n i = 0\n for _, element in ET.iterparse(file_in):\n el = shape_element(element)\n if el != None:\n data.append(el)\n i = i + 1\n #Insert every 10,000 records to the database\n if i == 10000:\n db_table.insert_many(data)\n #Empty data list and restart count\n data[:] = []\n i = 0\n #Insert rest of the data list to the database\n db_table.insert_many(data)",
"def addrow_from_list(self, list):\n database = managers.database_manager.get_database(self.owner_id, self.database_id)\n con = database.get_connection()\n cur = con.cursor()\n arg = \"(?\"\n for i in xrange(len(list) - 1):\n arg += \", ?\"\n arg += \")\"\n sql = \"INSERT INTO \\'%s\\' VALUES %s\" % (self.name, arg)\n cur.execute(sql, tuple(list))\n con.commit()",
"def import_equipment_object_in_array(path):\n conn = sqlite3.connect(path)\n c = conn.cursor()\n to_return = []\n for row in c.execute('SELECT * FROM '+\"Equipment\").fetchall():\n to_return.append(Equipment(row[0],row[1],row[2],row[3],row[4],row[5]))\n conn.close()\n return to_return",
"def put_it_in_tables(self):\n my_connection = mysql.connector.connect(user=self.user, password=self.password, database='openfoodfacts')\n cursor = my_connection.cursor(buffered=True)\n for i in self.my_data:\n prod_name = i['product_name']\n try:\n add_aliment = (\"INSERT INTO aliment \"\n \"(product_name, product_description, barcode, nutritional_score, stores, product_category) \"\n \"VALUES (%s, %s, %s, %s, %s, %s)\")\n data_aliment = (i['product_name'].replace(\"'\", \"''\"), i['product_description'].replace(\"'\", \"''\"), i['barcode'].replace(\"'\", \"''\"), i['nutritional_score'].replace(\"'\", \"''\"), i['stores'].replace(\"'\", \"''\"), i['product_category'].replace(\"'\", \"''\"))\n cursor.execute(add_aliment, data_aliment)\n except mysql.connector.IntegrityError:\n pass \n my_connection.commit()\n cursor.close()\n my_connection.close()\n print(\"ok c'est fait\")",
"def save_items_to_database(items: pd.DataFrame) -> None:\n for item in items.itertuples():\n insert_item(item)",
"def insert_data_many(settings_filename, query_filename, data_list):\n conn = connect_to_db(settings_filename)\n cursor = conn.cursor()\n \n query = load_query(query_filename)\n cursor.prepare(query)\n logging.debug(query)\n cursor.executemany(None, data_list)\n conn.commit()\n \n cursor.close()\n conn.close()\n\n return True",
"def importData():\n #importChallengeDataToDB()\n importTrendingDataToDB()",
"def insert_frags_to_sqlite(connection, match_id, frags):\n current_row = connection.cursor()\n for frag in frags:\n if len(frag) > 2:\n current_row.execute(\"INSERT INTO match_frag(match_id, \" +\n \"frag_time, killer_name, victim_name,\" +\n \" weapon_code) VALUES (?,?,?,?,?)\",\n (match_id, frag[0], frag[1], frag[2], frag[3]))\n else:\n current_row.execute(\"INSERT INTO match_frag(match_id, \" +\n \"frag_time, killer_name) VALUES (?,?,?)\",\n (match_id, frag[0], frag[1]))",
"def load_data():\n\tscores = pd.read_csv('../data/user_assessment_scores.csv')\n\tviews = pd.read_csv('../data/user_course_views.csv')\n\ttags = pd.read_csv('../data/course_tags.csv')\n\tinterests = pd.read_csv('../data/user_interests.csv')\n\n\tdb_file = '../db/usersim.sqlite'\n\ttry:\n\t\tengine = sqlite3.connect(db_file, timeout=10)\n\t\tscores.to_sql('scores', engine, if_exists='replace', index=False, index_label='user_handle')\n\t\tviews.to_sql('views', engine, if_exists='replace', index=False, index_label='user_handle')\n\t\ttags.to_sql('tags', engine, if_exists='replace', index=False, index_label='course_id')\n\t\tinterests.to_sql('interests', engine, if_exists='replace', index=False, index_label='user_handle')\n\texcept:\n\t\tprint('Error occured while inserting into database')\n\tfinally:\n\t\tif engine:\n\t\t\tengine.close()\n\treturn scores, views, tags, interests",
"def importDB ( c, xml ) :\n assert str(type(c)) == \"<type '_mysql.connection'>\"\n assert str ( type ( xml ) ) == \"<type 'instance'>\"\n for e in xml :\n if e.tag == \"Crisis\" :\n importCrisis ( c, e )\n elif e.tag == \"Organization\" :\n importOrg ( c, e )\n elif e.tag == \"Person\" :\n importPerson ( c, e )\n elif e.tag == \"CrisisKind\" :\n importCrisisKind ( c, e )\n elif e.tag == \"OrganizationKind\" :\n importOrgKind ( c, e )\n elif e.tag == \"PersonKind\" :\n importPersonKind ( c, e )",
"def load_data(connection, insert_sql, data):\n cur = connection.cursor()\n for d in data:\n cur.execute(insert_sql, d)\n connection.commit()",
"def insert_ratings(uid,petitionsIDs,petitionsValues,id):\n\n ratings=[]\n for i in range(0, len(petitionsValues)):\n ls = []\n ls.append(petitionsValues[i])\n ls.append(petitionsIDs[i])\n ls.append(id)\n ls.append(uid)\n if petitionsIDs[i] not in ['0','-1', '-2']:\n ratings.append(tuple(ls))\n db_file = dbFile\n try:\n conn = sqlite3.connect(db_file)\n except Exception as e:\n print(e)\n cur = conn.cursor()\n cur.executemany('insert into GTapp_ratings(rating,petition_id,code,user_id) values (?,?,?,?)', ratings)\n conn.commit()\n #cur.execute(sql, petition)\n #return cur.lastrowid",
"def from_tuples(tuples=list()):\n result = []\n for (user_id, creation_date, name, google_id, email , role_id, student_id) in tuples:\n person = Person()\n person.set_id(user_id)\n person.set_name(name)\n person.set_berechtigung(role_id)\n person.set_email(email)\n person.set_google_id(google_id)\n person.set_creation_date(creation_date)\n person.set_student(student_id)\n result.append(person)\n return result",
"def load_data(db_file):\n con = sqlite3.connect('wcvaarr.db')\n con.row_factory = sqlite3.Row\n c = con.cursor()\n c.execute(\"\"\"SELECT s.latitude, s.longitude, w.date, w.temperature,\n w.dew_point, w.precipitation, w.visibility, w.windspeed,\n w.indicators\n FROM weather AS w JOIN station AS s ON w.station_id = s.id\"\"\")\n weather_result = c.fetchall()\n weather_result = [\n {\n 'latitude': float(res['latitude']),\n 'longitude': float(res['latitude']),\n 'date': int(res['date']),\n 'temperature': float(res['temperature']),\n 'dew_point': float(res['dew_point']),\n # Precipitation is separated into a tuple for easier usage\n 'precipitation': (res['precipitation'][:-1], res['precipitation'][-1:]),\n 'visibility': float(res['visibility']),\n 'windspeed': float(res['windspeed']),\n 'indicators': res['indicators']\n }\n for res in weather_result\n ]\n c.execute('SELECT latitude, longitude, date, fatals FROM accident')\n accident_result = c.fetchall()\n accident_result = [\n {\n 'latitude': float(res['latitude']),\n 'longitude': float(res['longitude']),\n 'date': int(res['date']),\n 'fatals': int(res['fatals']),\n }\n for res in accident_result\n ]\n return (weather_result, accident_result)",
"def insert_archetypes(cursor):\n archetypes = (\n (ARCH_WARRIOR_ID, \"Warrior\", \"c_warrior_swordsman\", 0),\n (ARCH_WIZARD_ID, \"Wizard\", \"c_wizard_wizard\", 0),\n (ARCH_ARCHER_ID, \"Archer\", \"c_archer_archer\", 0),\n (ARCH_CLERIC_ID, \"Cleric\", \"c_cleric_cleric\", 0)\n )\n\n cursor.executemany(\"INSERT INTO archetypes VALUES (?, ?, ?, ?)\", archetypes)"
] | [
"0.6072674",
"0.5872311",
"0.57183385",
"0.5652524",
"0.5601286",
"0.55983347",
"0.5571785",
"0.55452985",
"0.5471581",
"0.5460966",
"0.5454386",
"0.5442215",
"0.5435304",
"0.54290146",
"0.54154193",
"0.5401241",
"0.538957",
"0.5381085",
"0.53738105",
"0.5352589",
"0.5347758",
"0.53434235",
"0.53225845",
"0.53186774",
"0.5298338",
"0.52872163",
"0.52544594",
"0.52450085",
"0.52401036",
"0.52349323"
] | 0.81226104 | 0 |
Parse DB search results into Search Anime format | def parse_search_results(fields, results):
my_results = []
for result in results:
my_results.append(SearchAnimeResult(fields, result))
return my_results | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parse_results(self, result):\n\n interesting = []\n for item in result[\"hits\"][\"hits\"]:\n source = item[\"_source\"]\n meta = source.get(\"meta\")\n\n title = \"No title found\"\n descr = None\n os_path = None\n highlight = None\n\n if meta is not None:\n title = meta.get(\"title\") or \"No title found\"\n if meta.get(\"raw\") is not None:\n descr = meta.get(\"raw\").get(\"description\")\n\n path = source.get(\"path\")\n if path is not None:\n os_path = path.get(\"real\")\n\n highlight = \" \".join(item[\"highlight\"][\"content\"][0].split())\n\n temp = {\n \"id\": item[\"_id\"],\n \"title\": title,\n \"description\": descr,\n \"path\": os_path,\n \"highlight\": highlight,\n }\n interesting.append(temp)\n self.interesting = interesting\n return interesting",
"def processSearchResult(self):",
"def parse_query_results(self):\n # TODO: nicely parsed needs defining; may depend on query\n return self.json_result",
"def _transform_search_database(self):\n # TODO: Create this and link with protein object when fasta file is provided\n return {\n \"file_format\": \"fasta format\",\n \"name\": \"\",\n \"id\": 1,\n \"location\": \"\",\n \"params\": [],\n }",
"def parseResults(result):\n # Split the results based on newline characters\n results_cut = result.text.split('\\n')[12:-49]\n # Initialize lists of the values to be parsed from results_cut \n visit_id = []\n name = []\n ra_hour = []\n ra_min = []\n ra_sec = []\n dec_deg = []\n dec_min = []\n dec_sec = []\n v_mag = []\n ra_motion = []\n dec_motion = []\n # Iterate through results_cut and append them to the respective lists\n for line in results_cut:\n visit_id.append(int(line[6:12]))\n name.append(line[12:36])\n ra_hour.append(int(line[38:40]))\n ra_min.append(int(line[41:43]))\n ra_sec.append(float(line[44:48]))\n dec_deg.append(int(line[49:52]))\n dec_min.append(int(line[53:55]))\n dec_sec.append(int(line[56:58]))\n try:\n v_mag.append(float(line[60:64]))\n except ValueError:\n # If there is no reported v_mag for the object, return -99\n v_mag.append(-99.0)\n ra_motion.append('%s%i' % (line[84], int(line[82:84])))\n dec_motion.append('%s%i' % (line[91], int(line[89:91])))\n # Initialize the pandas dataframe to be returned\n results_df = pd.DataFrame(np.array([visit_id, name, ra_hour, ra_min, ra_sec, \n dec_deg, dec_min, dec_sec, v_mag, \n ra_motion, dec_motion]).T, \n columns=['visit_id', 'name', 'ra_hour', 'ra_min', 'ra_sec', \n 'dec_deg', 'dec_min', 'dec_sec', 'v_mag', \n 'ra_motion', 'dec_motion'])\n # Add the lists to the dataframe\n results_df['visit_id'] = pd.to_numeric(results_df['visit_id'])\n results_df['ra_hour'] = pd.to_numeric(results_df['ra_hour'])\n results_df['ra_min'] = pd.to_numeric(results_df['ra_min'])\n results_df['ra_sec'] = pd.to_numeric(results_df['ra_sec'])\n results_df['dec_deg'] = pd.to_numeric(results_df['dec_deg'])\n results_df['dec_min'] = pd.to_numeric(results_df['dec_min'])\n results_df['dec_sec'] = pd.to_numeric(results_df['dec_sec'])\n results_df['v_mag'] = pd.to_numeric(results_df['v_mag'])\n results_df['ra_motion'] = pd.to_numeric(results_df['ra_motion'])\n results_df['dec_motion'] = pd.to_numeric(results_df['dec_motion'])\n \n return results_df",
"def parse_search_results(query, offset, data):\n # type: (str, int, dict) -> Union[None, bool]\n if data is None or not data[\"results\"]:\n return False\n paginate(query, len(data[\"results\"]), int(data[\"ttlResults\"]), offset)\n for child in data[\"results\"]:\n add_menu_item(\n play_film,\n iwm.clean_title(child[\"Title\"]),\n args={\"href\": child[\"url\"]},\n info={\n \"year\": child[\"fieldClasses\"][\"date\"],\n \"plot\": child[\"Summary\"],\n \"duration\": iwm.time_to_seconds(child[\"mediaObjectDuration\"])\n },\n art=ku.art(child[\"refImageUrl\"]),\n directory=False)\n xbmcplugin.setContent(plugin.handle, \"videos\")\n xbmcplugin.addSortMethod(plugin.handle, xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE)\n xbmcplugin.addSortMethod(plugin.handle, xbmcplugin.SORT_METHOD_VIDEO_YEAR)\n xbmcplugin.endOfDirectory(plugin.handle)",
"def display_search_results(results):\n\n new_list = results.split(',')\n\n print('Date : {}'.format(new_list[0]))\n print('Title : {}'.format(new_list[1]))\n print('Time Spent : {}'.format(new_list[2]))\n print('Notes : {}'.format(new_list[3]))\n clear()",
"def search_results(self, results):\n for index, item in enumerate(results):\n print '[%s] %s (%s) {%s}' % (\n index, \n self._color(item.title), \n self._color(item.year, 'RED'), \n self._color(item.imdbid, 'GREEN'))",
"def parse(self):\n\n try:\n query = SearchIO.parse(self.resultsFile, \"hmmer3-text\").next()\n except StopIteration:\n raise RuntimeError(\"Invalid HMMER output\")\n\n\n self.hmmLength = query.seq_len\n self.total_gaps = [0]*self.hmmLength\n num_hits = 0\n for i, hit in enumerate(query):\n #if not hit.is_included:\n #Skip sequences below threshold\n #continue\n origSeqLength = int(hit.id.split(\"|\")[-1])\n for j, hsp in enumerate(hit):\n num_hits += 1\n seq = HMMERSequence(\n str(hsp.hit.seq), \n query.seq_len, \n origSeqLength, \n hsp.evalue,\n hsp.hit_start, \n hsp.hit_end, \n hsp.query_start, \n hsp.query_end\n )\n seq.align(hsp.hit_start, hsp.hit_end, hsp.query_start, hsp.query_end)\n seq.determineGapPositions()\n _id = \"{}_{}\".format(num_hits, hit.id)\n desc = \"[Seq:{}-{}; HMM: {}-{}; e-value: {}; program={}]\".format(\n hsp.hit_start+1,\n hsp.hit_end,\n hsp.query_start,\n hsp.query_end,\n hsp.evalue,\n query.program\n )\n record = SeqRecord(seq, id=_id, description=desc)\n\n #Update gaps for all sequences, even if not saved\n self.updateGaps(seq.gaps)\n\n if not seq.skip() and hit.is_included:\n self.records.append(record)",
"def parse_search_results(data, category):\n paginate(data[\"pagination\"], category)\n for data in data[\"results\"]:\n add_menu_item(play_film,\n data.get(\"title\").title(),\n {\"href\": \"{}?fo=json\".format(data.get(\"url\"))},\n locs.get_art(data),\n locs.get_info(data),\n False)\n xbmcplugin.addSortMethod(plugin.handle, xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE)\n xbmcplugin.addSortMethod(plugin.handle, xbmcplugin.SORT_METHOD_GENRE)\n xbmcplugin.addSortMethod(plugin.handle, xbmcplugin.SORT_METHOD_VIDEO_YEAR)",
"def parseSearchHtml(self):\n pass",
"def parseSearchHtml(self):\n pass",
"def animeScrape(limitOfNew):\r\n query = '''\r\n query ($id: Int){\r\n Media (id: $id, type: ANIME) {\r\n id\r\n description\r\n title {\r\n english\r\n romaji\r\n native\r\n }\r\n startDate{\r\n year\r\n month\r\n day\r\n }\r\n endDate{\r\n year\r\n month\r\n day\r\n }\r\n coverImage{\r\n large\r\n medium\r\n }\r\n isAdult\r\n studios{\r\n nodes{\r\n name\r\n }\r\n }\r\n }\r\n }\r\n '''\r\n id_count = 1\r\n \r\n while limitOfNew > 0:\r\n #15125\r\n variables = {\r\n 'id': id_count\r\n }\r\n \r\n id_count += 1\r\n url = 'https://graphql.anilist.co'\r\n response = requests.post(url, json={'query': query, 'variables': variables})\r\n data = json.loads(response.text)\r\n if data.has_key(\"errors\"):\r\n continue\r\n \r\n start_node = data[\"data\"][\"Media\"]\r\n \r\n # Get all necessary info from json\r\n creator = start_node[\"studios\"][\"nodes\"][0][\"name\"]\r\n date = str(start_node[\"startDate\"][\"year\"]) + \"-\" + str(start_node[\"startDate\"][\"month\"]) +\"-\"+ str(start_node[\"startDate\"][\"day\"])\r\n name = start_node[\"title\"][\"english\"]\r\n url = start_node[\"coverImage\"][\"large\"]\r\n desc = start_node[\"description\"]\r\n \r\n # Change from unicode str\r\n creator = scraper.uni_to_str(creator)\r\n name = scraper.uni_to_str(name)\r\n url= scraper.uni_to_str(url)\r\n desc = scraper.uni_to_str(desc.replace(\"'\", \"\"))\r\n \r\n print(creator, date, name, url, desc)\r\n \r\n error = scraper.scrape(creator, name, desc, date, \"anime\", url)\r\n \r\n if error == 0:\r\n limitOfNew -= 1",
"def search(self, query):",
"def parse(self):",
"def test_parse_search_result(self):\n datafile = pathlib.Path(__file__).parent.joinpath(\"../data/ol_search.json\")\n search_data = json.loads(datafile.read_bytes())\n result = list(self.connector.parse_search_data(search_data, 0))[0]\n\n self.assertIsInstance(result, SearchResult)\n self.assertEqual(result.title, \"This Is How You Lose the Time War\")\n self.assertEqual(result.key, \"https://openlibrary.org/works/OL20639540W\")\n self.assertEqual(result.author, \"Amal El-Mohtar, Max Gladstone\")\n self.assertEqual(result.year, 2019)\n self.assertEqual(result.connector, self.connector)",
"def parse_search_results(search_results):\n return \", \".join(search_results)",
"def search_results():\n skip = int(flask.request.args.get(\"skip\", \"0\"))\n limit = int(flask.request.args.get(\"limit\", \"20\"))\n\n obj = {}\n\n # query : will be event kit in case of triage information\n uidstr = flask.request.args.get(\"query\", None)\n\n if uidstr == None:\n obj[\"error\"] = \"Missing search ID\"\n\n uidstr = json.loads(uidstr)\n\n obj[\"query\"] = {}\n obj[\"query\"][\"uid\"] = uidstr\n obj[\"clips\"] = []\n states = backend.get_search_sessions()\n obj[\"sessions\"] = []\n for astate in states:\n obj[\"sessions\"].append(str(astate))\n try:\n uid = uuid.UUID(uidstr)\n state = backend.get_iqr_search_state(uid)\n # use the uid of the state and get the information from the database\n col = str(state.uuid)\n obj[\"collection\"] = col\n searchdb[col].ensure_index([(\"model_id\", pymongo.ASCENDING),(\"probability\", pymongo.DESCENDING) ])\n # Force probabilities\n obj[\"positives\"] = list(state.positives)\n obj[\"negatives\"] = list(state.negatives)\n log = \"\"\n for id in state.positives:\n # log = log + \"Found %d\"%(searchdb[col].find({\"model_id\" : \"FUSION\", \"clip_id\" : id}).count()) + \", \"\n # res = searchdb[col].update({\"model_id\" : \"FUSION\", \"clip_id\" : id}, {\"$set\" : { \"probability\" : 1.0}})\n # log = log + \"Done %d\"%id + \", \"\n news = searchdb[col].find_one({\"model_id\" : \"FUSION\", \"clip_id\" : id})\n news[\"probability\"] = 1.0001\n searchdb[col].save(news)\n log = log + \"Now : \" + str(news)\n\n\n for id in state.negatives:\n # log = log + \"Found %d\"%(searchdb[col].find({\"model_id\" : \"FUSION\", \"clip_id\" : id}).count()) + \", \"\n # res = searchdb[col].update({\"model_id\" : \"FUSION\", \"clip_id\" : id}, {\"$set\" : { \"probability\" : 0.0}})\n # log = log + \"Done %d\"%id + \", \"\n news = searchdb[col].find_one({\"model_id\" : \"FUSION\", \"clip_id\" : id})\n news[\"probability\"] = 0.0\n searchdb[col].save(news)\n log = log + \"Now : \" + str(news)\n\n obj[\"log\"] = log\n\n allres = searchdb[col].find({\"model_id\" : \"FUSION\"}).sort([(\"probability\", pymongo.DESCENDING)]).skip(skip).limit(limit)\n rank = skip + 1\n for one in allres:\n aclip = {}\n aclip[\"score\"] = one[\"probability\"]\n aclip[\"id\"] = \"HVC\" + str(one[\"clip_id\"]).zfill(6)\n clipobj = db[\"clips\"].find_one({\"id\" : \"HVC\" + str(one[\"clip_id\"]).zfill(6)},{\"duration\" : 1})\n aclip[\"duration\"] = clipobj[\"duration\"]\n aclip[\"rank\"] = rank\n rank = rank + 1\n obj[\"clips\"].append(aclip)\n obj[\"count\"] = len(obj[\"clips\"])\n\n except Exception as e:\n obj[\"error\"] = str(type(e)) + \": \" + str(e)\n return jsonify(obj)\n\n obj[\"next\"] = \"http://localhost:5003/iqr/search_results?\" + urllib.urlencode({\"uid\" : uid, \"skip\" : skip+limit } )\n return jsonify(obj)",
"def api_search(title: str) -> Dict[str,List[AnimeThemeAnime]]:\n if not title:\n return None # an empty anime title\n \n r = session.get(URL.format(title))\n if r.status_code == 200:\n return r.json()\n elif r.status_code == 429:\n raise AnimeThemesTimeout('Got 429 error from animethemes.moe, please wait 30s to get the rest of entries.')\n else:\n r.raise_for_status()",
"def results(self):\n out = []\n fields = 'eid doi pii pubmed_id title subtype creator afid affilname '\\\n 'affiliation_city affiliation_country author_count '\\\n 'author_names author_ids author_afids coverDate '\\\n 'coverDisplayDate publicationName issn source_id eIssn '\\\n 'aggregationType volume issueIdentifier article_number '\\\n 'pageRange description authkeywords citedby_count '\\\n 'openaccess fund_acr fund_no fund_sponsor'\n doc = namedtuple('Document', fields)\n for item in self._json:\n info = {}\n # Parse affiliations\n try:\n info[\"affilname\"] = _join(item['affiliation'], 'affilname')\n info[\"afid\"] = _join(item['affiliation'], 'afid')\n info[\"aff_city\"] = _join(item['affiliation'], 'affiliation-city')\n info[\"aff_country\"] = _join(item['affiliation'],\n 'affiliation-country')\n except KeyError:\n pass\n # Parse authors\n try:\n # Deduplicate list of authors\n authors = _deduplicate(item['author'])\n # Extract information\n surnames = _replace_none([d['surname'] for d in authors])\n firstnames = _replace_none([d['given-name'] for d in authors])\n info[\"auth_names\"] = \";\".join([\", \".join([t[0], t[1]]) for t in\n zip(surnames, firstnames)])\n info[\"auth_ids\"] = \";\".join([d['authid'] for d in authors])\n affs = []\n for auth in authors:\n aff = listify(_deduplicate(auth.get('afid', [])))\n affs.append('-'.join([d['$'] for d in aff]))\n info[\"auth_afid\"] = (';'.join(affs) or None)\n except KeyError:\n pass\n date = item.get('prism:coverDate')\n if isinstance(date, list):\n date = date[0].get('$')\n new = doc(article_number=item.get('article-number'),\n title=item.get('dc:title'), fund_sponsor=item.get('fund-sponsor'),\n subtype=item.get('subtype'), issn=item.get('prism:issn'),\n creator=item.get('dc:creator'), affilname=info.get(\"affilname\"),\n author_names=info.get(\"auth_names\"), doi=item.get('prism:doi'),\n coverDate=date, volume=item.get('prism:volume'),\n coverDisplayDate=item.get('prism:coverDisplayDate'),\n publicationName=item.get('prism:publicationName'),\n source_id=item.get('source-id'), author_ids=info.get(\"auth_ids\"),\n aggregationType=item.get('prism:aggregationType'),\n issueIdentifier=item.get('prism:issueIdentifier'),\n pageRange=item.get('prism:pageRange'),\n author_afids=info.get(\"auth_afid\"), fund_no=item.get('fund-no'),\n affiliation_country=info.get(\"aff_country\"),\n citedby_count=item.get('citedby-count'),\n openaccess=item.get('openaccess'), eIssn=item.get('prism:eIssn'),\n author_count=item.get('author-count', {}).get('$'),\n affiliation_city=info.get(\"aff_city\"), afid=info.get(\"afid\"),\n description=item.get('dc:description'), pii=item.get('pii'),\n authkeywords=item.get('authkeywords'), eid=item.get('eid'),\n fund_acr=item.get('fund-acr'), pubmed_id=item.get('pubmed-id'))\n out.append(new)\n return out or None",
"def parse_search_results (self, response_data):\n search_results = {}\n raw_search_results = response_data['value']['videos']\n for entry_id in raw_search_results:\n if self._is_size_key(key=entry_id) == False:\n # fetch information about each show & build up a proper search results dictionary\n show = self.parse_show_list_entry(id=entry_id, entry=raw_search_results[entry_id])\n show[entry_id].update(self.parse_show_information(id=entry_id, response_data=self.fetch_show_information(id=entry_id, type=show[entry_id]['type'])))\n search_results.update(show)\n return search_results",
"def parse(self: object, data_row: list[str]):\n if len(data_row) == 0:\n return\n logging.debug(\"data row {}\".format(data_row))\n # Episode number is first element of row\n episode_id_raw: Match[str] = re.search(r\"([0-9]+)\", data_row[0])\n self.episode_id = int(episode_id_raw.group(1))\n # Year of episode\n episode_year_raw: Match[str] = re.search(r\"([0-9]{4})\", data_row[3])\n self.episode_year = int(episode_year_raw.group(1))\n # Episode name is second element of row, strip unwanted information like '(Folge 332 trägt den gleichen Titel)' using regexp\n self.episode_name = re.sub(r\"\\(Folge [0-9]+(.)+\\)\", \"\", data_row[1].strip()).strip()\n # Inspectors of episode, 5th element of row, strip unwanted information like '(Gastauftritt XXX)' using regexp but keep all anmes of comissioners\n episode_inspectors_raw: Match[str] = re.search(r\"([a-zA-zäöüÄÖÜß, ]+)(\\s+)?(\\(Gastauftritt\\s([a-zA-zäöüÄÖÜß, ]+){1}\\))?\", data_row[4])\n self.episode_inspectors = episode_inspectors_raw.group(1)\n if episode_inspectors_raw.group(4):\n self.episode_inspectors = \"{}, {}\".format(episode_inspectors_raw.group(1), episode_inspectors_raw.group(4))\n # Get name of broadcast station, 3rd element of row\n self.episode_broadcast = data_row[2].strip()\n # Get sequence number of detective team, strip alternative numbering\n self.episode_sequence = re.sub(r\"(\\(\\s*[0-9]*\\)*)\", \"\", data_row[5].strip()).strip()\n # Strip invalid characters\n self._strip_invalid_characters()\n # Mark as not empty\n self.empty = False",
"def _parseOutput(self, dbResponse, queryMeta):\n\t\tif dbResponse is None:\n\t\t\tdbResponse = []\n\t\tqueryMeta[\"Matched\"] = len(dbResponse)\n\t\tfieldNames = self.outputTable.dictKeys\n\t\treturn rsc.TableForDef(self.outputTable,\n\t\t\trows=[dict((k,v) \n\t\t\t\t\tfor k,v in itertools.izip(fieldNames, row))\n\t\t\t\tfor row in dbResponse])",
"def get_anime_info(anime_id, fields):\n my_fields = []\n for f in fields:\n try:\n my_fields.append(getattr(Anime, f))\n except AttributeError:\n pass\n\n my_filters = [Anime.malId == anime_id]\n\n results = db.session.query(*my_fields).filter(*my_filters).limit(1)\n return parse_search_results(fields, results)",
"def parse(self, response):\n\n soup = self.get_soup(response.text)\n try:\n results = soup\\\n .find('table', {'id': 'searchResult'})\\\n .find_all('tr')[1:]\n except AttributeError:\n return\n\n for result in results:\n torrent = items.Torrent(spider=self.name)\n torrent['categories'] = [\n self._category_map.get(\n furl.furl(category.attrs['href']).path.segments[-1],\n items.TorrentCategory.Unknown\n ) for category in result.find(\n 'td', {'class': 'vertTh'}\n ).find_all('a')\n ]\n torrent['magnet'] = result.find(\n 'a', {'href': re.compile('^magnet\\:.*')}\n )['href']\n torrent['hash'] = re.match(\n r'.*magnet:\\?xt=urn:(?:btih)+:([a-zA-Z0-9]+).*',\n torrent['magnet']\n ).groups()[0].lower()\n (torrent['seeders'], torrent['leechers'],) = tuple([\n int(column.contents[0])\n for column in result.find_all('td', {'align': 'right'})\n ])\n\n result_links = result.find('a', {'class': 'detLink'})\n if 'href' in result_links.attrs:\n torrent['source'] = furl.furl(response.url).set(\n path=result_links.attrs['href'], args={}\n ).url\n\n torrent['name'] = result_links.contents[0].strip()\n\n result_desc = result.find('font', {'class': 'detDesc'})\n (time_content, size_content,) = \\\n result_desc.contents[0].split(',')[:2]\n torrent['uploaded'] = self.parse_datetime(\n time_content.split(' ')[-1],\n formats=[\n '%m-%d %Y',\n '%m-%d %H:%M',\n '%H:%M',\n 'Y-day %H:%M'\n ]\n )\n torrent['size'] = self.parse_size(\n size_content.split(' ')[-1]\n )\n\n try:\n torrent['uploader'] = result_desc.find(\n 'a', {'href': re.compile('^/user/.*')}\n ).contents[0]\n except AttributeError:\n pass\n\n yield torrent",
"def test_parse_hit_details(self):\n for query in self.result:\n first_hsp = self.result[query][0][0]\n self.assertEqual(first_hsp[\"SUBJECT_ID\"], \"gi|148670104|gb|EDL02051.1|\")\n self.assertEqual(\n first_hsp[\"HIT_DEF\"],\n \"insulin-like growth factor 2 receptor, isoform CRA_c [Mus musculus]\",\n )\n self.assertEqual(first_hsp[\"HIT_ACCESSION\"], \"2001\")\n self.assertEqual(first_hsp[\"HIT_LENGTH\"], 707)",
"def search():\n\tif not request.vars.search_term:\n\t\tredirect(URL('index'))\n\tterm = request.vars.search_term\n\torigterm = term\n\tterm = term.replace(' ','|')\n\tartists = db.executesql(\"select distinct(m1.id), m1.art_name, m1.artist_type, m1.country, m1.b_year,m1.b_month,m1.b_date,m1.e_year,m1.e_month,m1.e_day,ts_rank(to_tsvector(m1.art_name),to_tsquery('\"+term+\"')) rank from art_info m1 where to_tsvector('english',m1.art_name) @@ to_tsquery('\"+term+\"') order by rank desc limit 20;\")\n\talbums = db.executesql(\"select distinct(m1.id),m2.name,m1.art_id,m1.art_name,m1.rel_type,m1.count,ts_rank(to_tsvector(m2.name),to_tsquery('\"+term+\"')) rank from rel_art m1, release_name m2, release_group m3 where m3.name = m2.id and m3.id = m1.id and to_tsvector('english',m2.name) @@ to_tsquery('\"+term+\"') order by rank desc limit 20;\")\n\tsongs = db.executesql(\"select m2.id, m1.name, m3.art_id, m3.art_name, m3.rel_id, m3.rel_name from track_name m1, recording m2, rec_rel_art m3 where m1.id = m2.name and m2.id = m3.rec_id and lower(m1.name) LIKE lower('%%\"+origterm+\"%%') limit 20;\")\n\treturn dict(songs=songs, albums=albums, artists=artists)",
"def parse_data( self ):\n self.parsed_data = dict( self.results )",
"def _parse_results(self, handle):\n result_reader = ResultsReader(handle)\n for result in result_reader:\n\n # Diagnostic messages may be returned in the results\n if isinstance(result, Message):\n logger.debug('[{}] {}'.format(result.type, result.message))\n\n # Normal events are returned as dicts\n elif isinstance(result, dict):\n result = dict(result)\n if '_time' in result:\n result['_time'] = SplunkAbstraction._to_datetime(result['_time'])\n yield {\n 'time': result['_time'] if '_time' in result else '',\n 'metadata': {k: v for k, v in result.items() if k.startswith('_')},\n 'state': {k: v for k, v in result.items() if not k.startswith('_')}\n }\n\n else:\n logger.warning('Unknown result type in _parse_results: {}'.format(result))\n\n assert result_reader.is_preview is False",
"def getResults():"
] | [
"0.6209895",
"0.5871288",
"0.58117527",
"0.57239765",
"0.57141274",
"0.56517756",
"0.56350136",
"0.56172013",
"0.55664617",
"0.5535393",
"0.5498918",
"0.5498918",
"0.54667425",
"0.54594475",
"0.5428907",
"0.5407299",
"0.53945076",
"0.5391623",
"0.5381492",
"0.537054",
"0.5364545",
"0.5325635",
"0.53035504",
"0.52828366",
"0.52813774",
"0.52623224",
"0.5242788",
"0.52306855",
"0.51912284",
"0.51790565"
] | 0.6991769 | 0 |
Open a host program output file and parse out the runtime of the program. | def get_host_runtime(path: pathlib.Path):
with path.open() as f:
# Read lines
lines = f.readlines()
# Parse out the runtime
runtime = 0
for line in lines:
if "Host time" in line:
line_split = line.split(' ')
runtime = float(line_split[-1])
break
return runtime | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _load_program():\n filepath = os.path.join(os.getcwd(), os.path.dirname(__file__), PROGRAM_TXT)\n f = open(filepath, 'r')\n program = f.read()\n f.close()\n return program.strip().split('\\n')",
"def load(self, program_file):\n\n if self.debug:\n print()\n print_heading(\"reading program from file...\", width=40)\n\n program = []\n\n with open(program_file) as file:\n\n for line in file:\n\n line_str = line.split(\"#\")[0].strip()\n\n if line_str:\n\n word = int(line_str, base=2)\n program.append(word)\n\n if self.debug:\n print(self.format_value(word))\n\n if self.debug:\n print()\n print_heading(\"writing program to memory...\", width=40)\n\n for (i, word) in enumerate(program):\n\n self.write_memory(i, word)\n\n if self.debug:\n print(\"[{}]: {}\".format(*self.format_iterable(i, word)))\n\n return",
"def main():\n\n\t# Parse the file\n\tmem_file = advanced_analysis('../data_1/mempages.dat.out')",
"def main():\n # get command line args\n args = create_parser()\n\n # report args\n report_args(args)\n\n # check and create instance of process, if possible\n eM = create_process(args)\n\n # write data\n write_data(args.directory, args.file, args.process, args.length, eM)\n\n # write machine to pickle\n write_em_pickle(args.file, eM)",
"def procinfo() -> None:\n if pwndbg.gdblib.qemu.is_qemu():\n print(\n message.error(\n \"QEMU target detected: showing result for the qemu process\"\n \" - so it will be a bit inaccurate (excessive for the parts\"\n \" used directly by the qemu process)\"\n )\n )\n exe = pwndbg.auxv.get()[\"AT_EXECFN\"]\n print(\"%-10s %r\" % (\"exe\", exe))\n\n proc = Process()\n\n # qemu-usermode fail!\n if not proc.status:\n return\n\n print(\"%-10s %s\" % (\"cmdline\", proc.cmdline))\n\n print(\"%-10s %s\" % (\"cwd\", proc.cwd))\n\n files = dict(proc.open_files)\n\n for c in proc.connections:\n files[c.fd] = str(c)\n\n print(\"%-10s %s\" % (\"pid\", proc.pid))\n print(\"%-10s %s\" % (\"tid\", proc.tid))\n\n if proc.selinux != \"unconfined\":\n print(\"%-10s %s\" % (\"selinux\", proc.selinux))\n\n print(\"%-10s %s\" % (\"ppid\", proc.ppid))\n\n if not pwndbg.gdblib.android.is_android():\n print(\"%-10s %s\" % (\"uid\", proc.uid))\n print(\"%-10s %s\" % (\"gid\", proc.gid))\n print(\"%-10s %s\" % (\"groups\", proc.groups))\n else:\n print(\"%-10s %s\" % (\"uid\", list(map(pwndbg.lib.android.aid_name, proc.uid))))\n print(\"%-10s %s\" % (\"gid\", list(map(pwndbg.lib.android.aid_name, proc.gid))))\n print(\"%-10s %s\" % (\"groups\", list(map(pwndbg.lib.android.aid_name, proc.groups))))\n\n for fd, path in files.items():\n if not set(path) < set(string.printable):\n path = repr(path)\n\n print(\"%-10s %s\" % (\"fd[%i]\" % fd, path))\n\n return",
"def load(self):\n \"\"\"Load a program into memory.\"\"\"\n\n if len(sys.argv) != 2:\n print(\"format: ls8.py [filename]\")\n sys.exit(1)\n\n program = sys.argv[1]\n address = 0\n\n # For now, we've just hardcoded a program:\n\n # program = [\n # # From print8.ls8\n # 0b10000010, # LDI R0,8\n # 0b00000000,\n # 0b00001000,\n # 0b01000111, # PRN R0\n # 0b00000000,\n # 0b00000001, # HLT\n # ]\n\n #open file\n with open(program) as file:\n #read the lines\n for line in file:\n #parse out comments\n line = line.strip().split(\"#\")[0]\n #cast numbers from strings to ints\n val = line.strip()\n #ignore blank lines\n if line == \"\":\n continue\n\n value = int(val, 2)\n self.ram[address] = value\n address +=1",
"def main():\n run_program()",
"def program_data(progf):\r\n if os.path.exists(progf):\r\n prog = \"\"\r\n for line in open(progf, \"r\", encoding=\"utf-8\"):\r\n line = line.split(\"#\")[0]\r\n prog += line\r\n prog = prog.split()\r\n return prog",
"def parseProgram(inputFile):\n print(\"Program\")\n parseStatements(inputFile)",
"def main():\n\t\n\tfilename = optParse()\n\t\n\ttry:\n\t\tinput = loadFile(filename)\n\texcept IOError, (errno, msg):\n\t\tprint >>sys.stderr, msg\n\t\tsys.exit(-1)\n\t\n\toutput = parse(input)\n\tprint output",
"def run_executable(args, input_file) -> dict:\n try:\n Path(args.exe).resolve(strict=True)\n except FileNotFoundError as error:\n raise error\n\n os.environ[\"OMP_NUM_THREADS\"] = str(args.omp_num_threads)\n run_command = ['./' + args.exe, input_file]\n\n if 'mpi' or 'hybrid' in args.build_type:\n run_command = ['mpirun', '-np', str(args.np)] + run_command\n\n process = subprocess.run(run_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\n if process.returncode != 0:\n warnings.warn('process returned to stderr: ' + \" \".join(run_command))\n\n return {'stdout': process.stdout.decode(\"utf-8\").split('\\n'),\n 'stderr': process.stderr.decode(\"utf-8\").split('\\n'),\n 'returncode': process.returncode}",
"def open_program(path):\r\n os.startfile(path)",
"def load(self):\n\n address = 0\n program = []\n\n if len(sys.argv) < 2:\n print(\"Please pass in a second file.\")\n sys.exit()\n\n file_name = sys.argv[1]\n try:\n with open(file_name) as file:\n for line in file:\n split_line = line.split('#')[0]\n command = split_line.strip()\n\n if command == '':\n continue\n\n program.append(int(command, 2))\n\n except FileNotFoundError:\n print(f'{sys.argv[0]}: {sys.argv[1]} file was not found')\n sys.exit()\n\n for instruction in program:\n self.ram[address] = instruction\n address += 1",
"def run_file(self, user_input):\n # Extract the important information\n self.path, self.name = self.extractor.extract_program_information(user_input)\n\n # Determine what language the program is\n program_type = self.determine_program_type(path, name)\n\n # If the file is python, run it the specific way\n # @TODO: Make it work without shell=True\n if program_type == \"python\":\n subprocess.Popen(\"python \" + self.path + self.name, shell=True)",
"def main():\n usage = \"usage: %prog [options] input\"\n parser = OptionParser(usage=usage)\n\n (options, args) = parser.parse_args()\n\n if len(args) != 0:\n parser.print_help()\n return 2\n\n # Download and execute binary.\n print('hello from stage2!')\n times = 0\n uri = binarypath()\n exe = os.path.join('.', uri[uri.rfind('/') + 1:])\n\n # Download and run binary\n while times < 10:\n try:\n with open(exe, 'wb') as binary:\n response = urllib2.urlopen(uri)\n binary.write(response.read())\n break\n except Exception as e:\n sys.stderr.write('stage2: ' + str(e) + '\\n')\n sys.stderr.write('uri: ' + uri + '\\n')\n times += 1\n time.sleep(7)\n\n try:\n os.chmod(exe, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)\n os.system(exe)\n except Exception as e:\n sys.stderr.write('stage2: ' + str(e) + '\\n')\n sys.stderr.write('Malware sample execution failed!')\n return 1",
"def run_processing_engine(input_file):\n from subprocess import Popen, PIPE\n p = Popen([\"python\", \"process.py\", input_file, \"-v\"], stdout=PIPE)\n return p.wait()",
"def main():\n\n # Check command line arguments\n arguments = sys.argv[1:]\n if len(arguments) != 1:\n print(\"Error! One command line argument is required.\")\n sys.exit()\n\n else:\n print(\"\\nNow opening file...\")\n # Print the path provided and try to open the file for reading\n path = os.getcwd()+ \"/\" + arguments[0]\n print(path) #print path\n names = Names()\n devices = Devices(names)\n network = Network(names, devices)\n monitors = Monitors(names, devices, network)\n scanner = Scanner(path, names)\n parser = Parser(names, devices, network, monitors, scanner)\n Error.reset()\n parser.parse_network()",
"def load(self):\n\n address = 0\n\n if len(sys.argv) < 2:\n print('ERROR - Provide program address to load')\n return\n\n program_filename = sys.argv[1]\n\n program_text = open(program_filename).read()\n program_lines = program_text.split('\\n')\n program = []\n\n for line in program_lines:\n blocks = line.split()\n if len(blocks) > 0:\n if blocks[0] != '#':\n inst = blocks[0]\n program.append(int(inst, 2))\n\n for instruction in program:\n self.ram[address] = instruction\n address += 1",
"def get_prog_file():\n get_file()\n ## Executa\n file = ARGS.output\n os.system(\"chmod +x \" + file)\n subprocess.call([file])",
"def build_runtime(self, runtime_name, file):\n self.compute_handler.build_runtime(runtime_name, file)",
"def load(self):\n\n address = 0\n\n program = sys.argv[1]\n\n with open(program) as p:\n for instruction in p:\n if instruction[0] == '#':\n continue\n\n instruction = instruction.strip()\n temp = instruction.split()\n\n if len(temp) == 0:\n continue\n\n self.ram[address] = int(temp[0], 2)\n address += 1\n \n # print(\"======= PROGRAM =========\")\n # for i in self.ram[:35]:\n # print(i)",
"def main():\n\n BASIC.run(PROGRAM)",
"def get_program(self, params):\n return [json.load(argparse.FileType('r')(params['infile']))], dict()",
"def ez_run(cls, program_string):\n res = Cpu('test')\n res.load(program_string.split('\\n'))\n res.run()\n return res",
"def execute_file (self, program):\n with open (program, \"r\") as stream:\n self.execute (stream.read ())\n return self.context",
"def main():\n heap = Heap()\n if len(sys.argv) != 2:\n print('usage: python Driver.py <text_file>')\n else:\n heap.go(str(sys.argv[1]))",
"def cl_program_from_file(context, filename):\n return cl.Program(context, open(os.path.join(CL_PATH, filename)).read())",
"def getCompilerOutput(uname):\n fname = os.path.join(webapp.config['UPLOADED_BUILD_DEST'], uname, 'output').encode('utf8')\n if os.path.exists(fname):\n stdout_file = open(fname, 'r')\n output = unicode(stdout_file.read(), 'utf-8')\n stdout_file.close()\n return output\n else:\n return returnError(\"Output not available for \" + uname, 404)",
"def getProgramFile(self) -> java.io.File:\n ...",
"def exec_parser(src_file: str):\n stdin = open(src_file)\n result = subprocess.run(['php', 'parse.php'], check=True, stdout=subprocess.PIPE, stdin=stdin)\n return result.stdout"
] | [
"0.58105475",
"0.57347614",
"0.5549151",
"0.5497355",
"0.5476351",
"0.5475047",
"0.54340535",
"0.54338425",
"0.5428344",
"0.53656286",
"0.5295708",
"0.5295352",
"0.52802324",
"0.52734137",
"0.52634996",
"0.5248898",
"0.5242457",
"0.52363616",
"0.5235858",
"0.5185998",
"0.51673996",
"0.5152091",
"0.5094969",
"0.50870997",
"0.50818455",
"0.5078691",
"0.5075128",
"0.50744617",
"0.5072456",
"0.5069372"
] | 0.5749508 | 1 |
Calculate the average runtime reported in all host output files in a given folder for a particular test case. | def get_avg_host_runtime(path: pathlib.Path, testfile):
total_time = 0.0
num_files = 0
for filename in path.iterdir():
if (testfile in str(filename)) and ('host' in str(filename)):
total_time += get_host_runtime(filename)
total_time += get_overhead_time(filename)[0]
num_files += 1
if num_files > 0:
return (total_time / num_files)
else:
return -1 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _avg_performance(bd_dims, BD_directory, run,archive_file_path,max_performance,conversion_func=None,from_fitfile=False):\n path=get_archive_filepath(BD_directory,run, archive_file_path)\n all_performances=get_all_performances(bd_dims, path, conversion_func,from_fitfile)\n return np.mean(all_performances)/max_performance",
"def get_avg_overhead_time(path: pathlib.Path, testfile, num_dpus, num_tasks):\n time = [0.0]*6\n num_files = 0\n for filename in path.iterdir():\n dpus = re.search(rf\"dpus={num_dpus}[^0-9]\", str(filename))\n tasklets = re.search(rf\"tasklets={num_tasks}[^0-9]\", str(filename))\n \n if (testfile in str(filename)) and (dpus is not None) and (tasklets is not None):\n time = list(map(add, time, get_overhead_time(filename)))\n num_files += 1\n\n if num_files > 0:\n return [x / num_files for x in time]\n else:\n return -1",
"def get_avg_max_cycles(path: pathlib.Path, testfile, num_dpus, num_tasks):\n total_cycles = 0\n num_files = 0\n for filename in path.iterdir():\n dpus = re.search(rf\"dpus={num_dpus}[^0-9]\", str(filename))\n tasklets = re.search(rf\"tasklets={num_tasks}[^0-9]\", str(filename))\n \n if (testfile in str(filename)) and (dpus is not None) and (tasklets is not None):\n total_cycles += get_max_cycles(filename)\n num_files += 1\n\n if num_files > 0:\n return (total_cycles / num_files)\n else:\n return -1",
"def _get_running_time(self):\n time_sum = 0.0\n for subdir in os.listdir(self.path):\n if subdir.startswith('.'):\n continue\n try:\n line = open('{0}/{1}/{2}/out/OUTDOCK'.format(self.path, subdir, DOCKING_RUN_FILES),'r').readlines()[-1]\n if line.startswith('elapsed time'):\n time = float(line.split()[-1])\n time_sum = time_sum + time\n except:\n pass \n self.running_time = time_sum",
"def driver(rootdir, destination, dataset_name):\n global metric_result \n global result\n metric_result = {\"query image\": [], \n \"k\": [], \n \"precision for k = 3\": [], \n \"reciprocal rank for k = 3\": [],\n \"precision for k = 5\": [], \n \"reciprocal rank for k = 5\": [], \n \"precision for k = 7\": [],\n \"reciprocal rank for k = 7\": [], \n \"time in seconds\": []}\n \n siamese_model = get_siamese(input_shape=(1, 48, 48))\n siamese_model.summary()\n APlist_3 = []\n RRlist_3 = []\n APlist_5 = []\n RRlist_5 = []\n APlist_7 = []\n RRlist_7 = []\n # destination = \"..\\\\result\\\\seamese_net_avg_images_seed_np_2_tf_2\\\\\" # + subdir1.split(\"\\\\\")[-1]\n \n \n for subdir1, dirs1, files1 in os.walk(rootdir):\n start = time.time()\n query1_name = subdir1.split(\"\\\\\")[-1]\n \n os.makedirs(destination, exist_ok=True)\n \n query1_average_image_time_start = time.time()\n query1 = averageImage(subdir1)\n query1_average_image_time_end = time.time()\n \n result = {\"query1\": [], \"query2\":[], \"size\": [], \"siamese_distance\": [], \"average_image_time_query1\": [], \"average_image_time_query2\": [], \"patch_retrieval_time\": [], \"image_comparison_time\": [],\"total_time\": []}\n \n \n if not subdir1.endswith(\"\\\\\"+ dataset_name +\"\\\\\"):\n for subdir2, dirs2, files2 in os.walk(rootdir):\n if not subdir2.endswith(\"\\\\\"+ dataset_name +\"\\\\\"):\n if (subdir1 != subdir2):\n \n start_per_image = time.time()\n \n query2_name = subdir2.split(\"\\\\\")[-1]\n # print(subdir1, subdir2)\n \n query2_average_image_time_start = time.time()\n query2 = averageImage(subdir2)\n query2_average_image_time_end = time.time()\n\n siamese_distance = compare(siamese_model, query1, query2)\n # print(\"siamese_distance between {} and {} value : {}\".format(query1_name, query2_name, siamese_distance))\n end_per_image = time.time()\n \n result[\"query1\"].append(query1_name)\n result[\"query2\"].append(query2_name)\n result[\"size\"].append((496, 512))\n result[\"siamese_distance\"].append(siamese_distance)\n result[\"average_image_time_query1\"].append(query1_average_image_time_end - query1_average_image_time_start)\n result[\"average_image_time_query2\"].append(query2_average_image_time_end - query2_average_image_time_start)\n result[\"total_time\"].append(end_per_image - start_per_image)\n \n #save result tp csv file sorted w.r.t siamese_distance\n df = pd.DataFrame(data=result)\n df = df.sort_values(by=[\"siamese_distance\"])\n df.to_csv(destination + \"\\\\\" + query1_name +\".csv\")\n \n APlist_3.append(calculateAvgPrecision(df, 3))\n RRlist_3.append(calculateReciprocalRank(df, 3))\n \n APlist_5.append(calculateAvgPrecision(df, 5))\n RRlist_5.append(calculateReciprocalRank(df, 5))\n \n APlist_7.append(calculateAvgPrecision(df, 7))\n RRlist_7.append(calculateReciprocalRank(df, 7))\n \n # print(APlist, RRlist)\n end = time.time()\n metric_result[\"query image\"].append(query1_name)\n metric_result[\"k\"].append(\"3, 5, 7\")\n metric_result[\"precision for k = 3\"].append(calculateAvgPrecision(df, 3))\n metric_result[\"reciprocal rank for k = 3\"].append(calculateReciprocalRank(df, 3))\n \n metric_result[\"precision for k = 5\"].append(calculateAvgPrecision(df, 5))\n metric_result[\"reciprocal rank for k = 5\"].append(calculateReciprocalRank(df, 5))\n \n metric_result[\"precision for k = 7\"].append(calculateAvgPrecision(df, 7))\n metric_result[\"reciprocal rank for k = 7\"].append(calculateReciprocalRank(df, 7))\n metric_result[\"time in seconds\"].append((end - start))\n \n print(\"Average Precision (AP) considering K = 3 : {}\".format(sum(APlist_3)/len(APlist_3)))\n print(\"Reciprocal Rank (RR) considering K = 3 : {}\".format(sum(RRlist_3)/len(RRlist_3)))\n \n print(\"Average Precision (AP) considering K = 5 : {}\".format(sum(APlist_5)/len(APlist_5)))\n print(\"Reciprocal Rank (RR) considering K = 5 : {}\".format(sum(RRlist_5)/len(RRlist_5)))\n \n print(\"Average Precision (AP) considering K = 7 : {}\".format(sum(APlist_7)/len(APlist_7)))\n print(\"Reciprocal Rank (RR) considering K = 7 : {}\".format(sum(RRlist_7)/len(RRlist_7)))\n \n metric_result[\"query image\"].append(\"Average AP and Average RR\")\n metric_result[\"k\"].append(\"3, 5, 7\")\n metric_result[\"precision for k = 3\"].append(sum(APlist_3)/len(APlist_3))\n metric_result[\"reciprocal rank for k = 3\"].append(sum(RRlist_3)/len(RRlist_3))\n \n metric_result[\"precision for k = 5\"].append(sum(APlist_5)/len(APlist_5))\n metric_result[\"reciprocal rank for k = 5\"].append(sum(RRlist_5)/len(RRlist_5))\n \n metric_result[\"precision for k = 7\"].append(sum(APlist_7)/len(APlist_7))\n metric_result[\"reciprocal rank for k = 7\"].append(sum(RRlist_7)/len(RRlist_7))\n \n metric_result[\"time in seconds\"].append(sum(metric_result[\"time in seconds\"]))\n\n\n MAP = (sum(APlist_3)/len(APlist_3) + sum(APlist_5)/len(APlist_5) + sum(APlist_7)/len(APlist_7))/3\n MRR = (sum(RRlist_3)/len(RRlist_3) + sum(RRlist_5)/len(RRlist_5) + sum(RRlist_7)/len(RRlist_7))/3\n \n metric_result[\"query image\"].append(\"MAP and MRR\")\n metric_result[\"k\"].append(\"3, 5, 7\")\n metric_result[\"precision for k = 3\"].append(MAP)\n metric_result[\"reciprocal rank for k = 3\"].append(MRR)\n \n metric_result[\"precision for k = 5\"].append(0)\n metric_result[\"reciprocal rank for k = 5\"].append(0)\n \n metric_result[\"precision for k = 7\"].append(0)\n metric_result[\"reciprocal rank for k = 7\"].append(0)\n \n \n metric_result[\"time in seconds\"].append(0)\n \n \n metric_df = pd.DataFrame(data=metric_result)\n metric_df.to_csv(destination + \"\\\\\" + \"CBIR metric.csv\")\n \n del siamese_model\n return MAP, MRR",
"def runTests(tests_dir, output_dir):\n\n runtime = 0\n os.makedirs(tests_dir, exist_ok=True)\n for test_case in os.listdir(tests_dir):\n print()\n print(\"Running test: \" + str(test_case))\n\n with open(tests_dir + test_case, \"r\") as f:\n tar, n = list(map(int, f.readline().split(\" \")))\n arr = list(map(int, f.readline().split(\" \")))\n\n start = timeit.default_timer()\n\n try:\n writeOutput(maxCombinationSum(tar, arr), test_case, output_dir)\n except KeyboardInterrupt:\n print(\"\\n\\tTest cancelled - KeyboardInterrupt\")\n except Exception as e:\n print(\"\\tError: \" + str(e))\n\n stop = timeit.default_timer()\n print(\"\\tTime for test: \" + str(stop - start) + \" seconds.\")\n\n runtime += (stop - start)\n\n if runtime == 0:\n print(\"No test case files found in tests directory.\\nPlease run solution from inside solution directory.\")\n else:\n print(\"\\nCompleted all tests in : \" + str(runtime) + \" seconds\")",
"def main(rundir, outputfile):\n # Read avg_count for all runs in the ranking\n results = list()\n for run in Runs(rundir):\n filename = run.get_file(name='results/analytics.json')\n doc = util.read_object(filename=filename)\n results.append(doc)\n # Delay execution to allow for testing running post-processing\n # workflows\n time.sleep(1)\n # Write analytics results. Ensure that output directory exists:\n # influenced by http://stackoverflow.com/a/12517490\n if not os.path.exists(os.path.dirname(outputfile)):\n try:\n os.makedirs(os.path.dirname(outputfile))\n except OSError as exc: # guard against race condition\n if exc.errno != errno.EEXIST:\n raise\n with open(outputfile, \"at\") as f:\n json.dump(results, f)",
"def runtime_analysis(config, overall_report):\n test_case_report_list = []\n \n for test_suite in config.get_test_suite():\n report = dict()\n report['stdout_stream'] = ''\n report['stderr_stream'] = ''\n report['outfile'] = ''\n\n input_for_stdin = config.get_test_suite_input_for_stdin(test_suite)\n # using Popen instead of run because I need access to the pid\n # See comment under \"except subprocess.TimeoutExpired:\"\n infile = \"xinfile_\" + uuid.uuid4().hex[0:16] + \".txt\"\n outfile = \"xoutfile_\" + uuid.uuid4().hex[0:16] + \".txt\"\n p = subprocess.Popen(['./run_jail.sh',\n config.output_filename,\n str(len(test_suite)), infile, outfile], # command\n stdout=subprocess.PIPE, # capture stdout\n stderr=subprocess.PIPE, # capture stderr\n stdin=subprocess.PIPE, # capture stdin\n universal_newlines=True, # use text mode for std* file objects\n start_new_session=True, # otherwise killing the process group will also kill the Python interpreter\n )\n\n try:\n # send test suite input\n with open(infile, \"w\") as f:\n f.write(input_for_stdin)\n (stdout_stream, stderr_stream) = p.communicate(timeout=config.timeout)\n \n report['return_code'] = p.returncode\n report['stderr_stream'] += stderr_stream\n report['stdout_stream'] += stdout_stream\n with open(outfile, \"r\") as f:\n current_outfile = f.read()\n report['outfile'] += current_outfile\n \n # check if test cases passed\n ret_output_match = config.check_for_output_match(current_outfile, test_suite)\n report['test_suite'] = test_suite\n report['output_match'] = ret_output_match\n \n except subprocess.TimeoutExpired:\n # kill the process group so that all child processes spawned by the process are also killed\n # The child need to be killed because, in addition to wasting CPU cycles,\n # it can hold stdout and then Python will wait indefinitely even if the timeout is expired\n os.killpg(os.getpgid(p.pid), signal.SIGKILL) \n report['timeout'] = True\n finally:\n test_case_report_list.append(report)\n \n overall_report['runtime_analysis_done'] = True\n\n return overall_report, test_case_report_list",
"def avg_metric(sharp_path, deblurred_path): # TODO1 do multiprocessing in those methods\n sum_psnr = 0\n sum_mse = 0\n sum_ssim = 0\n\n # List all files\n files_orig = [f for f in listdir(sharp_path) if isfile(join(sharp_path, f))]\n files_deb = [f for f in listdir(deblurred_path) if isfile(join(deblurred_path, f))]\n\n count = 0\n for orig, deb in zip(files_orig, files_deb):\n orig_fn = join(sharp_path, orig)\n deb_fn = join(deblurred_path, deb)\n # Load images\n orig_img = cv2.imread(orig_fn)\n deb_img = cv2.imread(deb_fn)\n orig_img = np.divide(orig_img, 255)\n deb_img = np.divide(deb_img, 255)\n\n # Compute metrics\n sum_psnr += peak_signal_noise_ratio(orig_img, deb_img)\n sum_mse += mean_squared_error(orig_img, deb_img)\n sum_ssim += structural_similarity(orig_img, deb_img, multichannel=True)\n\n count += 1\n print('Analyzed: {}/{}'.format(count, len(files_orig)))\n\n # Average\n avg_psnr = sum_psnr/len(files_orig)\n avg_mse = sum_mse/len(files_orig)\n avg_ssim = sum_ssim/len(files_orig)\n\n return avg_mse, avg_psnr, avg_ssim",
"def _get_average_run_time(cls, suite_model):\n counter = 0\n execution_time = 0.0\n suite_documents = suite_model.get_last_five()\n for suite_document in suite_documents:\n counter += 1\n start_date = dateutil.parser.parse(suite_document['start_date'])\n end_date = dateutil.parser.parse(suite_document['end_date'])\n time_taken = end_date - start_date\n execution_time += time_taken.seconds\n if counter == 0:\n return \"30 Minutes\"\n minutes = math.floor(execution_time / 60)\n seconds = int(execution_time - (minutes * 60))\n return \"{} Minutes {} Seconds\".format(minutes, seconds)",
"def get_average_scores(self):\n models = self.eval_parameters['average_experiment']['models']\n metrics = self.eval_parameters['average_experiment']['metrics_list']\n metrics_keys = list(metrics.keys())\n print(r\"\\begin{table}[]\")\n print(\"\\centering\")\n print(r\"\\tiny\")\n print(\"\\caption{Average results over 85 topics. Each row represents a different run (top 10 runs of each model). Each column represents a different assessments aggregation.}\")\n print(\"\\label{tab:average_results}\")\n print(r\"\\begin{tabular}{@{}\"+''.join(['l']*(len(metrics_keys)+1))+\"@{}}\")\n print(\"runid\", '&'.join(metric.replace('_','\\_')for metric in metrics_keys),sep='&')\n print(r\"\\\\ \\midrule\")\n for model in models:\n runs = self.get_list_files(self.eval_parameters['average_experiment']['runs_folder'] + model+\"/\")\n for file in runs:\n val = []\n for metric_id in metrics_keys:\n out1 = subprocess.check_output(\n ['../trec_eval-master/trec_eval', '-m', metrics[metric_id]['metric'],\n metrics[metric_id]['qrels'], file])\n val += [str(out1.rstrip().split()[2]).replace('b\\'', '').replace('\\'', '')]\n print(file.replace(self.eval_parameters['average_experiment']['runs_folder'],'').replace('_','\\_'),'&', '&'.join(val),r\"\\\\\")\n print(r\"\\bottomrule\")\n print(r\"\\end{tabular}\")\n print(r\"\\end{table}\")",
"def summarize_models(folder):\r\n filepaths = sorted([n for n in glob(folder+r'/*') if '.json' in n])\r\n # create dictionary of results by looping over each file\r\n d = {}\r\n for fi, f in enumerate(filepaths):\r\n # load dictionary from json file\r\n print('processing model {}/{}'.format(fi+1, len(filepaths)))\r\n with open(f, 'r') as fp:\r\n data = json.load(fp)\r\n # extract label and trial number from filename\r\n L = os.path.split(f)[1].split('.')[0].split('__trial_')[0]\r\n t = int(os.path.split(f)[1].split('.')[0].split('__trial_')[1])\r\n # create new dictionary for each model\r\n if L not in d:\r\n d[L] = {\r\n 'img': None,\r\n 'sim': {},\r\n 'avg_err': [],\r\n 'bub_num': [],\r\n 'df': pd.DataFrame()}\r\n # populate dictionary with model information\r\n d[L]['img'] = data['img']\r\n d[L]['sim'][t] = data['sim']\r\n d[L]['df']['rad_trial_'+str(t)] = pd.Series(data['rad'])\r\n d[L]['bub_num'].append(len(pd.Series(data['rad']).dropna()))\r\n d[L]['df']['x_trial_'+str(t)] = pd.Series(np.array(data['cent'])[:, 0])\r\n d[L]['df']['y_trial_'+str(t)] = pd.Series(np.array(data['cent'])[:, 1])\r\n d[L]['avg_err'].append(data['tot_err_percent'])\r\n return d",
"def analyze(self,filenames,output_dir,diffs_only=False):\n def okey(value):\n r = max((['PASS', 'ERROR', 'FAIL', 'UNTESTED', 'SKIPPED'].index(r.outcome) for r in tests[value] if r))\n if r == 0:\n return value\n else:\n return r\n def overall_outcome_weight(results):\n return max((['PASS', 'ERROR', 'FAIL', 'UNTESTED', 'SKIPPED'].index(r.outcome) for r in results if r))\n\n # pass 0: Load results\n results = [RunResults.load(filename) for filename in filenames]\n # step 1: Check if all results are for the same version\n version = results[0].version\n for result in results:\n if result.version != version:\n raise Exception('Analyze: Results for the same FB version required.')\n # step 2: Sort results into groups (platform, cpuarch, arch, run)\n results.sort(key=operator.attrgetter('platform','cpuarch','arch','sequence'))\n\n # pass 1: Create list of tests with results\n tests = {} # Dictionary of all tests found in results; Test ID: list of results\n for result in results:\n column = results.index(result)\n for test_id,test_result in result.items():\n tests.setdefault(test_id,len(results)*[None])[column] = test_result\n\n # pass 2: Analyze results for each tests that didn't pass in all runs\n test_details = {}\n # step 1: Collect details for tests that didn't pass\n for test_id,test_results in tests.items():\n for test_result in test_results:\n if test_result and test_result.outcome != Result.PASS:\n l = test_details.setdefault(test_id,list())\n result = results[test_results.index(test_result)]\n l.append((self.get_run_tag(result.platform,result.cpuarch,result.arch,result.sequence),test_result))\n # step 2: group results for each test\n for test_id,test_results in test_details.items():\n groups = [] # item format: (result,[list_of_runs])\n for result_id,test_result in test_results:\n added = False\n for group in groups:\n if self.compare_results(group[0],test_result):\n group[1].append(result_id)\n added = True\n if not added:\n groups.append((test_result,[result_id]))\n del test_results[:]\n test_results.extend(groups)\n\n # pass 3: Order tests\n test_order = tests.keys()\n test_order.sort(key=okey)\n\n # pass 4: Generate report\n self.print_analysis(version,results,tests,test_details,test_order,\n output_dir, diffs_only)",
"def fileAgglomeration(self, dataset: list):\n result = dict()\n\n startTimeForAgglomeration = time.clock_gettime(time.CLOCK_THREAD_CPUTIME_ID)\n print(\"CPU Model,Index, Filename, Elapsed Time\")\n for idx, e in enumerate(dataset):\n # CPU TIME\n startTime = time.clock_gettime(time.CLOCK_THREAD_CPUTIME_ID)\n result[idx] = self._count_occurrences(filename=e)\n endTime = time.clock_gettime(time.CLOCK_THREAD_CPUTIME_ID)\n\n # CPU Model, Index, Filename, Time Taken Processing File\n fileName = e.split(\"/\")[-1]\n print(f\"{self.cpuModel},{idx + 1},{fileName},{endTime - startTime}\") # Logger ^ Markdown\n\n endTimeForAgglomeration = time.clock_gettime(time.CLOCK_THREAD_CPUTIME_ID)\n print(\n f\"Total Files Aggregated: {len(dataset)} and total {endTimeForAgglomeration - startTimeForAgglomeration} seconds elapsed.\")\n\n return result",
"def compile_global_stats(results_dir='./../data/*/*cr_sizes*hdf5'):\n\n flist = glob.glob(results_dir)\n output = defaultdict(list)\n flist = [f for f in flist if 'nicmos' not in f]\n print(flist)\n flist.append('./../data/STIS/stis_cr_sizes.hdf5')\n results = [dask.delayed(tally_stats)(f) for f in flist]\n results = list(dask.compute(*results, scheduler='processes'))\n\n for instr, data in results:\n output[instr].append(data)\n\n for key in output.keys():\n cr_count = 0\n img_count = 0\n total_exptime = 0\n for val in output[key]:\n cr_count += val.cr_count\n img_count += val.img_count\n total_exptime += val.total_exptime\n output[key] = [cr_count, img_count, total_exptime]\n\n df = pd.DataFrame(output, index=['cr_count', 'img_count', 'total_exptime'])\n print(df)\n print('Total CR count: {}'.format(df.loc['cr_count', :].sum()))\n print('Total number of images analyzed: {}'.format(df.loc['img_count', :].sum()))\n print('Cumulative exposure time: {}'.format(df.loc['total_exptime', :].sum()))",
"def emPerformance(filesAndDirectories='None', resultsFileName='None', iterationCount='3', modes='None', testTypes='None', viewports='None', verbose='False'):\n\n pass",
"def _extract_timings(self, outfile):\n f = open_general(outfile)\n tmptxt = f.readlines()\n f.close()\n search_keys = ['time until scf starts',\n 'vpot->tmat',\n 'gref->gmat',\n 'gonsite->density',\n 'energyloop',\n 'Iteration number',\n 'Total running time']\n\n res = {}\n for isearch in search_keys:\n tmpval = []\n itmp = 0\n while itmp>=0:\n itmp = search_string(isearch, tmptxt)\n if itmp>=0:\n tmpval.append(float(tmptxt.pop(itmp).split()[-1]))\n if len(tmpval)>0:\n res[isearch] = tmpval\n # average over iterations\n niter = len(res.get(search_keys[-2], []))\n if niter>0:\n for key in search_keys[1:6]:\n res[key] = sum(res[key])/niter\n for key in [search_keys[0], search_keys[-1]]:\n res[key] = res[key][0]\n return res",
"def run_psavg_sims(bursttimefile):\n\n nfolder = [5,6,8,12]\n datadirs = [\"P20165/20165-01-01-000\", \"P20165/20165-01-01-001\", \"P20165/20165-01-01-002\",\n \"P10223/10223-01-03-01\", \"P10223/10223-01-03-010\" ]\n\n data_all, unbary_all, tstart_all, tend_all, t0_all, pcus_all, std1dir_all = [], [], [], [], [], [], []\n\n for d in datadirs:\n print(\"I am on directory %s\" %d)\n files = rxteburst.search_filenames_recursively(\"./%s/\"%d, \"*1div8192*.asc\")\n if len(files) == 0:\n files = rxteburst.search_filenames_recursively(\"./%s/\"%d, \"*1div-32768s*.asc\")\n if len(files) == 0:\n files = rxteburst.search_filenames_recursively(\"./%s/\"%d, \"*1div8*.asc\")\n #print(\"File to use %s\" %files[0])\n data = rxte.RXTEData(times=None, channels=None, datafile=files[0], npcus=None, ra=None, dec=None, emid=None, emiddir=None, bary=True)\n\n len_datafile = len(files[0].split(\"/\")[-1])\n len_processed = len(files[0].split(\"/\")[-2])\n std1dir_all.append(files[0][:-(len_datafile+len_processed+1)])\n\n data_all.append(np.array([p.time for p in data.photons])+data.t0)\n unbary_all.append(np.array([p.unbary for p in data.photons])+data.t0)\n tstart_all.append(data.photons[0].unbary+data.t0)\n tend_all.append(data.photons[-1].unbary+data.t0)\n t0_all.append(data.t0)\n pcus_all.append(data.pcus)\n\n t0_sorted, tstart_sorted, tend_sorted, data_sorted, pcus_sorted, std1dir_sorted, unbary_sorted = \\\n zip(*sorted(zip(t0_all, tstart_all, tend_all, data_all, pcus_all, std1dir_all, unbary_all)))\n t0_sorted = np.array(t0_sorted)\n\n psno = [5,6,8,12]\n m_all = [30, 23, 23, 50]\n\n for n,m in zip(psno, m_all):\n psavg_all = sgr1900_results.make_randomly_sampled_periodograms(datadirs, bursttimefile, m, n=1000,\n save_step=100, fileroot=\"sgr1806_psavg%i\"%n,\n data_sorted=data_sorted, t0_sorted=t0_sorted,\n pcus_sorted=pcus_sorted, tend_sorted=tend_sorted,\n tstart_sorted=tstart_sorted,\n unbary_sorted=unbary_sorted)\n\n return",
"def test_time(cmd, samples=16, warmup=4):\n # do testing\n print()\n avg_time = 0\n for s in range(samples + warmup):\n # report progress\n progress = s / (samples + warmup)\n print(CSI_UP + CSI_CLEARLN + \"Testing [{}%]\".format(floor(progress * 100)))\n\n output = shell(cmd) # run command\n tables = csv_mt.read_string(output, parse_float=True) # parse its output\n time = tables[\"statistics\"][\"time_us\"][0] # get its timing data\n\n # skip a few runs to let the system \"warm up\"\n if s >= warmup:\n avg_time += time / samples # compute average execution time\n\n # log the average time for this test case\n return avg_time",
"def main(rootpath):\n folders = [folder for folder in os.listdir(\n rootpath) if os.path.isdir(rootpath+\"/\"+folder)]\n print(\"=\"*100)\n print(folders)\n print(\"=\"*100)\n\n event2eps = {\"BandyLee_0110_0115\": 0.65,\n \"Capriccio_0516_0523_new\": 0.5,\n \"Gabapentin_0628_0121\": 0.35,\n \"immigrants_0622_0624\": 0.3,\n \"Ingraham_0618_0624\": 0.5,\n \"ItsJustAJacket_0621_0624\": 0.45,\n \"JackBreuer_1228_0115\": 0.6,\n \"JetLi_0519_0523\": 0.6,\n \"SanctuaryCities_0516_0523\": 0.3,\n \"SouthwestKey_0620_0624\": 0.45,\n \"WhereAreTheChildren_0418_0527\": 0.35}\n\n for folder in folders:\n # exclude some events\n # if folder not in [\"germanwings-crash-all-rnr-threads\"]:\n # continue\n\n # specify an event\n # if folder != \"immigrants_0622_0624\":\n # continue\n\n # run total events\n # if folder[0] == \".\" or folder in [\"BandyLee_0110_0115\", \"Gabapentin_0628_0121\", \"Ingraham_0618_0624\", \"ItsJustAJacket_0621_0624\", \"SanctuaryCities_0516_0523\", \"WhereAreTheChildren_0418_0527\"]:\n # continue\n\n print(\"=\" * 100)\n print(\"Running code for {}\".format(folder))\n args = ['python', 'main.py', '-r', rootpath,\n '-f', folder, '-q', \"#\"+folder.split(\"_\")[0],\n '-s', 'test', '-e', 'test', '-p',\n str(event2eps.get(folder, 0.45))]\n print(\"Command line is {}\".format(\" \".join(args)))\n with open(\"./output/\"+folder+\"_output.txt\", \"wb\", 0) as out:\n subprocess.run(args, stdout=out, check=True)\n subprocess.call(args)\n # break\n # time.sleep(random.randint(1, 121))",
"def main(path_gt, path_pred, eval_dir):\n\n if not os.path.exists(eval_dir):\n os.makedirs(eval_dir)\n\n if os.path.isdir(path_gt) and os.path.isdir(path_pred):\n\n metrics_out, phase, measure_names, file_names = compute_metrics_on_directories_raw(path_gt, path_pred)\n df = mat_to_df(metrics_out, phase, measure_names, file_names)\n print_stats(df)\n print_table1(df, eval_dir)\n print_table2(df, eval_dir)\n\n [dice1, dice2, dice3, vold1, vold2, vold3] = compute_metrics_on_directories(path_gt, path_pred)\n\n logging.info('------------Average Dice Figures----------')\n logging.info('Dice 1: %f' % dice1)\n logging.info('Dice 2: %f' % dice2)\n logging.info('Dice 3: %f' % dice3)\n logging.info('Mean dice: %f' % np.mean([dice1, dice2, dice3]))\n logging.info('------------------------------------------')\n\n else:\n raise ValueError(\n \"The paths given needs to be two directories or two files.\")",
"def analysis_host_sec(self):\n #calc the date\n time_now = int(time.time())\n time_local = time.localtime(time_now)\n date = time.strftime(\"%Y-%m-%d\",time_local)\n sum_cpu_ratio = 0\n sum_phy_mem_size = 0\n sum_virt_mem_size = 0\n\n key_re_word = \"%s qa_work\" % self.pid\n for line in self.file_top.readlines():\n if re.search(key_re_word, line):\n #analysis_cpu_rate()\n sum_cpu_ratio += float(line.split()[8])\n self.cpu_list_1sec.append(float(line.split()[8]))\n\n #analysis_host_phy_mem_size(), the standerd unit is \"g\"\n if \"m\" in line.split()[5]:\n phy_mem_size = float(line.split()[5].strip(\"m\")) / 1000\n elif \"g\" in line.split()[5]:\n phy_mem_size = float(line.split()[5].strip(\"g\"))\n elif \"k\" in line.split()[5]:\n phy_mem_size = float(line.split()[5].strip(\"k\")) / 1000 / 1000\n else:\n phy_mem_size = 0.0\n self.phy_mem_list_1sec.append(float(phy_mem_size))\n sum_phy_mem_size += phy_mem_size\n\n #analysis_host_virt_mem_size(), the standerd unit is \"g\"\n if \"m\" in line.split()[4]:\n vir_mem_size = float(line.split()[4].strip(\"m\")) / 1000\n elif \"g\" in line.split()[4]:\n vir_mem_size = float(line.split()[4].strip(\"g\"))\n elif \"k\" in line.split()[4]:\n vir_mem_size = float(line.split()[4].strip(\"k\")) / 1000 / 1000\n else:\n vir_mem_size = 0\n self.virt_mem_list_1sec.append(float(vir_mem_size))\n sum_virt_mem_size += vir_mem_size\n\n elif re.search(\"top -\", line):\n final_time = date + \" \" + line.split()[2]\n self.top_pertime.append(final_time)\n top_num = min(len(self.top_pertime), len(self.cpu_list_1sec), len(self.phy_mem_list_1sec), len(self.virt_mem_list_1sec))\n\n #cal the average data\n average_cpu_ratio = round(sum_cpu_ratio/len(self.cpu_list_1sec), 2)\n average_phy_mem_size = round(sum_phy_mem_size/len(self.phy_mem_list_1sec), 2)\n average_virt_mem_size = round(sum_virt_mem_size/len(self.virt_mem_list_1sec), 2)\n #cal the max data\n max_cpu_ratio = max(self.cpu_list_1sec)\n max_phy_mem_size = max(self.phy_mem_list_1sec)\n max_virt_mem_size = max(self.virt_mem_list_1sec)\n #insert into mysql-top_list_1sec_avg\n print \"average_cpu_ratio: %s\" % average_cpu_ratio\n print \"average_phy_mem_size: %s\" % average_phy_mem_size\n print \"average_virt_mem_size: %s\" % average_virt_mem_size\n print \"max_cpu_ratio: %s\" % max_cpu_ratio\n print \"max_phy_mem_size: %s\" % max_phy_mem_size\n print \"max_virt_mem_size: %s\" % max_virt_mem_size\n if self.db_onoff == \"on\":\n self.mysql.insert_table_sql_top_avg(self.time_sql, max_cpu_ratio, max_phy_mem_size, max_virt_mem_size)",
"def compute_metrics_on_directories(dir_gt, dir_pred):\n\n res_mat, _, _, _ = compute_metrics_on_directories_raw(dir_gt, dir_pred)\n\n dice1 = np.mean(res_mat[:,0])\n dice2 = np.mean(res_mat[:,3])\n dice3 = np.mean(res_mat[:,6])\n\n vold1 = np.mean(res_mat[:,2])\n vold2 = np.mean(res_mat[:,5])\n vold3 = np.mean(res_mat[:,8])\n\n return [dice1, dice2, dice3, vold1, vold2, vold3]",
"def run_single(input_folder):\n\tstart = time.clock()\n\tinput_files = [os.path.join(input_folder, filename) for filename in os.listdir(input_folder)]\n\tfrequencies = defaultdict(int)\n\t\n\tfor input_file in input_files:\n\t\twith open(input_file, 'r') as f:\n\t\t\t\n\t\t\ttextstr = f.read()\n\t\t\n\t\ttokens = re.findall(token_regex, textstr)\n\t\tfor token in tokens:\n\t\t\tfrequencies[token.lower()] += 1\n\t\t\n\tprint 'Non-MR runtime:', time.clock() - start\n\treturn frequencies",
"def report_totals(output):\n groups = (STATS_PATC.match(line) for line in output.splitlines())\n tuples = (g.groups() for g in groups if g)\n\n results = [0,0,0,0,0]\n for t in tuples:\n results[0] += int(t[0]) # total\n results[1] += int(t[1]) # failures\n results[2] += int(t[2]) # errors\n results[3] += int(t[3]) # skipped\n results[4] += float(t[4]) # elapsed time\n\n print 'Tests run: %d, Failures: %d, Errors: %d, Skipped: %d, '\\\n 'Time elapsed: %.2f' % tuple(results)",
"def evaluate(prediction_folder, label_folder, verbose=False):\n prediction_tasks = next(os.walk(prediction_folder))[1]\n label_tasks = next(os.walk(label_folder))[1]\n # prediction_tasks = label_tasks = ['mlqa', 'tydiqa', 'xquad']\n\n detailed_scores = {}\n for task, langs in TASK2LANGS.items():\n if task in prediction_tasks and task in label_tasks:\n suffix = \"json\" if task in GROUP2TASK[\"qa\"] else \"tsv\"\n # collect scores over all languages\n score = defaultdict(dict)\n for lg in langs:\n prediction_file = os.path.join(prediction_folder, task, f\"test-{lg}.{suffix}\")\n label_file = os.path.join(label_folder, task, f\"test-{lg}.{suffix}\")\n score_lg = evaluate_one_task(prediction_file, label_file, task, language=lg)\n for metric in score_lg:\n score[metric][lg] = score_lg[metric]\n # average over all languages\n avg_score = {}\n for m in score:\n avg_score[f'avg_{m}'] = sum(score[m].values()) / len(score[m])\n score.update(avg_score)\n if task in GROUP2TASK[\"qa\"]:\n score['avg_metric'] = (score['avg_exact_match'] + score['avg_f1']) / 2\n elif 'avg_f1' in score:\n score['avg_metric'] = score['avg_f1']\n elif 'avg_accuracy' in score:\n score['avg_metric'] = score['avg_accuracy']\n detailed_scores[task] = score\n if verbose:\n avg_result = ', '.join(['{}={:.1f}'.format(k, v) for k, v in score.items() if k.startswith('avg')])\n print('- Evaluate {}:\\t{}'.format(task, avg_result))\n\n # Display logic:\n overall_scores = {}\n all_tasks = set(TASK2LANGS.keys())\n available_tasks = set(detailed_scores.keys())\n\n # If scores of all tasks are available, show the overall score in the main table\n if all_tasks == available_tasks:\n overall_scores['all_task'] = sum(detailed_scores[task]['avg_metric'] for task in all_tasks) / len(all_tasks)\n\n # If scores of all tasks in a sub group are available, show the score in the sub table\n for group, group_tasks in GROUP2TASK.items():\n if len(set(group_tasks) - available_tasks) == 0:\n overall_scores[group] = sum(detailed_scores[task]['avg_metric'] for task in group_tasks) / len(group_tasks)\n\n return overall_scores, detailed_scores",
"def compute(self, result_file_dict):\r\n for part in self.parts:\r\n #=====================Need to change, temporal=========================\r\n if part == 'train':\r\n continue # because the train not have the label\r\n #=======================================================================\r\n gt = self.gt_dict[part]\r\n result_file = result_file_dict[part]\r\n # import ipdb; ipdb.set_trace()\r\n for key, item in result_file.items():\r\n self._result_name = item\r\n # score_records, num_videos = self.load_results(result_file)\r\n score_records, num_videos = self.load_results(item)\r\n logger.info(f'Compute Metric of {item}')\r\n assert num_videos == len(gt), f'the number of saved videos does not match the ground truth, {num_videos} != {len(gt)}'\r\n temp_result = self.eval_method(score_records, gt, str(key))\r\n if temp_result > self.optimal_resulst:\r\n self.optimal_resulst = temp_result\r\n \r\n return self.optimal_resulst",
"def main():\n for opt in optimizations:\n compile_command = [\"g++\", \"main.cpp\", f\"-O{opt}\", \"-lpthread\"]\n run(compile_command, check=True)\n for threads in num_threads:\n print(f\"{opt=}, {threads=}\", end=\"\")\n stdout.flush()\n test_command = ['./a.out', str(iterations), str(threads)]\n total = 0\n for samples in range(1, repeats_for_average + 1):\n print(\".\", end=\"\")\n stdout.flush()\n output = run(test_command, check=True, capture_output=True).stdout\n total += int(output.split()[-2]) / 1000\n print(f\"\\t{total / samples:.03f}\")",
"def test_peformance(self):\n timedeltas = []\n for file in os.listdir(settings.ANALYSIS_REPORT_FOLDER):\n _file = open(os.path.join(settings.ANALYSIS_REPORT_FOLDER, file), \"r\")\n report = json.loads(_file.read())\n timedeltas.append(\n parse_datetime(report['finish']) - parse_datetime(report['start']))\n _file.close()\n\n # number of queue\n print('NUMBER OF QUEUE = {}'.format(len(timedeltas)))\n\n # get average time\n average_timedelta = sum(timedeltas, datetime.timedelta(0)) / len(timedeltas)\n print('AVERAGE = {}'.format(average_timedelta))\n self.assertTrue(average_timedelta < datetime.timedelta(minutes=3))\n\n # get total process time\n total = timedeltas[0]\n for delta in timedeltas[:1]:\n total += delta\n print('TOTAL = {}'.format(total))\n self.assertTrue(total < datetime.timedelta(minutes=3 * len(timedeltas)))",
"def calculate_mean(data_dir):\n data = ([each for each in os.listdir(data_dir)\n if each.endswith('.h5')])\n all_data = []\n for num_data in data:\n processed_data = os.path.join(data_dir, num_data)\n file = h5py.File(processed_data, 'r') \n data = file.get('Processed_data') \n all_data.append(data)\n all_data = np.array(all_data)\n all_data = np.mean(all_data, axis=0)\n return all_data"
] | [
"0.65712726",
"0.6475712",
"0.64513355",
"0.6136365",
"0.6105839",
"0.60927486",
"0.5996279",
"0.59590924",
"0.5829954",
"0.57890874",
"0.5748628",
"0.5721866",
"0.5715467",
"0.5701655",
"0.56953984",
"0.56830746",
"0.5660013",
"0.5658348",
"0.5656913",
"0.5650345",
"0.5627572",
"0.5626511",
"0.56138647",
"0.5602048",
"0.5600864",
"0.55995697",
"0.55933875",
"0.5583761",
"0.5564113",
"0.55509794"
] | 0.7699728 | 0 |
Calculate the average broken down overhead time reported by output files in a given folder for a particular test case. | def get_avg_overhead_time(path: pathlib.Path, testfile, num_dpus, num_tasks):
time = [0.0]*6
num_files = 0
for filename in path.iterdir():
dpus = re.search(rf"dpus={num_dpus}[^0-9]", str(filename))
tasklets = re.search(rf"tasklets={num_tasks}[^0-9]", str(filename))
if (testfile in str(filename)) and (dpus is not None) and (tasklets is not None):
time = list(map(add, time, get_overhead_time(filename)))
num_files += 1
if num_files > 0:
return [x / num_files for x in time]
else:
return -1 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_avg_host_runtime(path: pathlib.Path, testfile):\n total_time = 0.0\n num_files = 0\n for filename in path.iterdir():\n if (testfile in str(filename)) and ('host' in str(filename)):\n total_time += get_host_runtime(filename)\n total_time += get_overhead_time(filename)[0]\n num_files += 1\n\n if num_files > 0:\n return (total_time / num_files)\n else:\n return -1",
"def _get_running_time(self):\n time_sum = 0.0\n for subdir in os.listdir(self.path):\n if subdir.startswith('.'):\n continue\n try:\n line = open('{0}/{1}/{2}/out/OUTDOCK'.format(self.path, subdir, DOCKING_RUN_FILES),'r').readlines()[-1]\n if line.startswith('elapsed time'):\n time = float(line.split()[-1])\n time_sum = time_sum + time\n except:\n pass \n self.running_time = time_sum",
"def get_avg_max_cycles(path: pathlib.Path, testfile, num_dpus, num_tasks):\n total_cycles = 0\n num_files = 0\n for filename in path.iterdir():\n dpus = re.search(rf\"dpus={num_dpus}[^0-9]\", str(filename))\n tasklets = re.search(rf\"tasklets={num_tasks}[^0-9]\", str(filename))\n \n if (testfile in str(filename)) and (dpus is not None) and (tasklets is not None):\n total_cycles += get_max_cycles(filename)\n num_files += 1\n\n if num_files > 0:\n return (total_cycles / num_files)\n else:\n return -1",
"def _avg_performance(bd_dims, BD_directory, run,archive_file_path,max_performance,conversion_func=None,from_fitfile=False):\n path=get_archive_filepath(BD_directory,run, archive_file_path)\n all_performances=get_all_performances(bd_dims, path, conversion_func,from_fitfile)\n return np.mean(all_performances)/max_performance",
"def _get_average_run_time(cls, suite_model):\n counter = 0\n execution_time = 0.0\n suite_documents = suite_model.get_last_five()\n for suite_document in suite_documents:\n counter += 1\n start_date = dateutil.parser.parse(suite_document['start_date'])\n end_date = dateutil.parser.parse(suite_document['end_date'])\n time_taken = end_date - start_date\n execution_time += time_taken.seconds\n if counter == 0:\n return \"30 Minutes\"\n minutes = math.floor(execution_time / 60)\n seconds = int(execution_time - (minutes * 60))\n return \"{} Minutes {} Seconds\".format(minutes, seconds)",
"def report_totals(output):\n groups = (STATS_PATC.match(line) for line in output.splitlines())\n tuples = (g.groups() for g in groups if g)\n\n results = [0,0,0,0,0]\n for t in tuples:\n results[0] += int(t[0]) # total\n results[1] += int(t[1]) # failures\n results[2] += int(t[2]) # errors\n results[3] += int(t[3]) # skipped\n results[4] += float(t[4]) # elapsed time\n\n print 'Tests run: %d, Failures: %d, Errors: %d, Skipped: %d, '\\\n 'Time elapsed: %.2f' % tuple(results)",
"def _extract_timings(self, outfile):\n f = open_general(outfile)\n tmptxt = f.readlines()\n f.close()\n search_keys = ['time until scf starts',\n 'vpot->tmat',\n 'gref->gmat',\n 'gonsite->density',\n 'energyloop',\n 'Iteration number',\n 'Total running time']\n\n res = {}\n for isearch in search_keys:\n tmpval = []\n itmp = 0\n while itmp>=0:\n itmp = search_string(isearch, tmptxt)\n if itmp>=0:\n tmpval.append(float(tmptxt.pop(itmp).split()[-1]))\n if len(tmpval)>0:\n res[isearch] = tmpval\n # average over iterations\n niter = len(res.get(search_keys[-2], []))\n if niter>0:\n for key in search_keys[1:6]:\n res[key] = sum(res[key])/niter\n for key in [search_keys[0], search_keys[-1]]:\n res[key] = res[key][0]\n return res",
"def driver(rootdir, destination, dataset_name):\n global metric_result \n global result\n metric_result = {\"query image\": [], \n \"k\": [], \n \"precision for k = 3\": [], \n \"reciprocal rank for k = 3\": [],\n \"precision for k = 5\": [], \n \"reciprocal rank for k = 5\": [], \n \"precision for k = 7\": [],\n \"reciprocal rank for k = 7\": [], \n \"time in seconds\": []}\n \n siamese_model = get_siamese(input_shape=(1, 48, 48))\n siamese_model.summary()\n APlist_3 = []\n RRlist_3 = []\n APlist_5 = []\n RRlist_5 = []\n APlist_7 = []\n RRlist_7 = []\n # destination = \"..\\\\result\\\\seamese_net_avg_images_seed_np_2_tf_2\\\\\" # + subdir1.split(\"\\\\\")[-1]\n \n \n for subdir1, dirs1, files1 in os.walk(rootdir):\n start = time.time()\n query1_name = subdir1.split(\"\\\\\")[-1]\n \n os.makedirs(destination, exist_ok=True)\n \n query1_average_image_time_start = time.time()\n query1 = averageImage(subdir1)\n query1_average_image_time_end = time.time()\n \n result = {\"query1\": [], \"query2\":[], \"size\": [], \"siamese_distance\": [], \"average_image_time_query1\": [], \"average_image_time_query2\": [], \"patch_retrieval_time\": [], \"image_comparison_time\": [],\"total_time\": []}\n \n \n if not subdir1.endswith(\"\\\\\"+ dataset_name +\"\\\\\"):\n for subdir2, dirs2, files2 in os.walk(rootdir):\n if not subdir2.endswith(\"\\\\\"+ dataset_name +\"\\\\\"):\n if (subdir1 != subdir2):\n \n start_per_image = time.time()\n \n query2_name = subdir2.split(\"\\\\\")[-1]\n # print(subdir1, subdir2)\n \n query2_average_image_time_start = time.time()\n query2 = averageImage(subdir2)\n query2_average_image_time_end = time.time()\n\n siamese_distance = compare(siamese_model, query1, query2)\n # print(\"siamese_distance between {} and {} value : {}\".format(query1_name, query2_name, siamese_distance))\n end_per_image = time.time()\n \n result[\"query1\"].append(query1_name)\n result[\"query2\"].append(query2_name)\n result[\"size\"].append((496, 512))\n result[\"siamese_distance\"].append(siamese_distance)\n result[\"average_image_time_query1\"].append(query1_average_image_time_end - query1_average_image_time_start)\n result[\"average_image_time_query2\"].append(query2_average_image_time_end - query2_average_image_time_start)\n result[\"total_time\"].append(end_per_image - start_per_image)\n \n #save result tp csv file sorted w.r.t siamese_distance\n df = pd.DataFrame(data=result)\n df = df.sort_values(by=[\"siamese_distance\"])\n df.to_csv(destination + \"\\\\\" + query1_name +\".csv\")\n \n APlist_3.append(calculateAvgPrecision(df, 3))\n RRlist_3.append(calculateReciprocalRank(df, 3))\n \n APlist_5.append(calculateAvgPrecision(df, 5))\n RRlist_5.append(calculateReciprocalRank(df, 5))\n \n APlist_7.append(calculateAvgPrecision(df, 7))\n RRlist_7.append(calculateReciprocalRank(df, 7))\n \n # print(APlist, RRlist)\n end = time.time()\n metric_result[\"query image\"].append(query1_name)\n metric_result[\"k\"].append(\"3, 5, 7\")\n metric_result[\"precision for k = 3\"].append(calculateAvgPrecision(df, 3))\n metric_result[\"reciprocal rank for k = 3\"].append(calculateReciprocalRank(df, 3))\n \n metric_result[\"precision for k = 5\"].append(calculateAvgPrecision(df, 5))\n metric_result[\"reciprocal rank for k = 5\"].append(calculateReciprocalRank(df, 5))\n \n metric_result[\"precision for k = 7\"].append(calculateAvgPrecision(df, 7))\n metric_result[\"reciprocal rank for k = 7\"].append(calculateReciprocalRank(df, 7))\n metric_result[\"time in seconds\"].append((end - start))\n \n print(\"Average Precision (AP) considering K = 3 : {}\".format(sum(APlist_3)/len(APlist_3)))\n print(\"Reciprocal Rank (RR) considering K = 3 : {}\".format(sum(RRlist_3)/len(RRlist_3)))\n \n print(\"Average Precision (AP) considering K = 5 : {}\".format(sum(APlist_5)/len(APlist_5)))\n print(\"Reciprocal Rank (RR) considering K = 5 : {}\".format(sum(RRlist_5)/len(RRlist_5)))\n \n print(\"Average Precision (AP) considering K = 7 : {}\".format(sum(APlist_7)/len(APlist_7)))\n print(\"Reciprocal Rank (RR) considering K = 7 : {}\".format(sum(RRlist_7)/len(RRlist_7)))\n \n metric_result[\"query image\"].append(\"Average AP and Average RR\")\n metric_result[\"k\"].append(\"3, 5, 7\")\n metric_result[\"precision for k = 3\"].append(sum(APlist_3)/len(APlist_3))\n metric_result[\"reciprocal rank for k = 3\"].append(sum(RRlist_3)/len(RRlist_3))\n \n metric_result[\"precision for k = 5\"].append(sum(APlist_5)/len(APlist_5))\n metric_result[\"reciprocal rank for k = 5\"].append(sum(RRlist_5)/len(RRlist_5))\n \n metric_result[\"precision for k = 7\"].append(sum(APlist_7)/len(APlist_7))\n metric_result[\"reciprocal rank for k = 7\"].append(sum(RRlist_7)/len(RRlist_7))\n \n metric_result[\"time in seconds\"].append(sum(metric_result[\"time in seconds\"]))\n\n\n MAP = (sum(APlist_3)/len(APlist_3) + sum(APlist_5)/len(APlist_5) + sum(APlist_7)/len(APlist_7))/3\n MRR = (sum(RRlist_3)/len(RRlist_3) + sum(RRlist_5)/len(RRlist_5) + sum(RRlist_7)/len(RRlist_7))/3\n \n metric_result[\"query image\"].append(\"MAP and MRR\")\n metric_result[\"k\"].append(\"3, 5, 7\")\n metric_result[\"precision for k = 3\"].append(MAP)\n metric_result[\"reciprocal rank for k = 3\"].append(MRR)\n \n metric_result[\"precision for k = 5\"].append(0)\n metric_result[\"reciprocal rank for k = 5\"].append(0)\n \n metric_result[\"precision for k = 7\"].append(0)\n metric_result[\"reciprocal rank for k = 7\"].append(0)\n \n \n metric_result[\"time in seconds\"].append(0)\n \n \n metric_df = pd.DataFrame(data=metric_result)\n metric_df.to_csv(destination + \"\\\\\" + \"CBIR metric.csv\")\n \n del siamese_model\n return MAP, MRR",
"def avg_metric(sharp_path, deblurred_path): # TODO1 do multiprocessing in those methods\n sum_psnr = 0\n sum_mse = 0\n sum_ssim = 0\n\n # List all files\n files_orig = [f for f in listdir(sharp_path) if isfile(join(sharp_path, f))]\n files_deb = [f for f in listdir(deblurred_path) if isfile(join(deblurred_path, f))]\n\n count = 0\n for orig, deb in zip(files_orig, files_deb):\n orig_fn = join(sharp_path, orig)\n deb_fn = join(deblurred_path, deb)\n # Load images\n orig_img = cv2.imread(orig_fn)\n deb_img = cv2.imread(deb_fn)\n orig_img = np.divide(orig_img, 255)\n deb_img = np.divide(deb_img, 255)\n\n # Compute metrics\n sum_psnr += peak_signal_noise_ratio(orig_img, deb_img)\n sum_mse += mean_squared_error(orig_img, deb_img)\n sum_ssim += structural_similarity(orig_img, deb_img, multichannel=True)\n\n count += 1\n print('Analyzed: {}/{}'.format(count, len(files_orig)))\n\n # Average\n avg_psnr = sum_psnr/len(files_orig)\n avg_mse = sum_mse/len(files_orig)\n avg_ssim = sum_ssim/len(files_orig)\n\n return avg_mse, avg_psnr, avg_ssim",
"def main(\n file_pattern=INFILE_PATTERN,\n # folder_pattern=INFOLDER_PATTERN,\n tol_td=TOLERANCE_TIMEDELTA,\n outlier=OUTLIER_THRESHOLD,\n args=ARGS,\n):\n # Initialize IO-directories and setup logging\n path_in, path_out = initialize_io()\n\n # path_diffs = path_out / \"diff_imgs\"\n # if args.export:\n # # Folder not needed otherwise, but variable needs to be passed\n # if not path_diffs.is_dir():\n # path_diffs.mkdir()\n # logging.info(f\"Created folder '{path_diffs}'\")\n\n # Find matching files\n # NOTE: This can take potentially long\n # A folderwise sorting would be much faster\n # t0 = time.time()\n filelist = sorted(path_in.rglob(file_pattern))\n # dur = time.time() - t0\n\n n_files = len(filelist)\n logging.info(f\"Found {n_files} matching files in '{path_in}'\")\n # f\"(took {dur:.4} seconds)\")\n\n # act_list = []\n # df_agg = None\n df_list = []\n med_list = []\n for csv_path in filelist:\n logging.info(f\"Reading '{csv_path.name}'\")\n\n hive, rpi, method, day_str = parse_filename(csv_path.name)\n name = f\"RPi{rpi}_{day_str}_{method}\"\n # Read CSV\n # header = [\n # \"time_central\", \"duration\", \"activity\",\n # \"time1\", \"time2\",\n # \"file1\", \"file2\"\n # ]\n # See https://pandas.pydata.org/pandas-docs/stable/reference/\n # api/pandas.read_csv.html\n # df = pd.read_csv(csv_path, index_col=\"time\", parse_dates=True,\n # date_parser=my_date_parser)\n # Works only with the default pandas time format:\n df = pd.read_csv(\n csv_path,\n index_col=\"time_central\",\n parse_dates=[\"time_central\", \"time1\", \"time2\"],\n # converters={\"path\": my_path_parser}),\n )\n df[\"hour\"] = df.index.hour\n df[\"hive\"] = [hive] * len(df)\n df[\"rpi\"] = [rpi] * len(df)\n df[\"method\"] = [method] * len(df)\n\n # if df_agg is None:\n # df_agg = df\n # else:\n # df_agg = pd.concat([df_agg])\n\n # act_dict = {name: df[\"activity\"]}\n #\n # act_list.append(act_dict)\n\n # Plot_single_activity day\n h_median = plot_single_activity(df[\"activity\"], name, path_out)[1]\n\n # series = df.activity\n # series.index = series.index.hour\n hourly_bxpl_single(df, name, path_out)\n\n # Remove outliers\n if any(df.activity >= outlier):\n logging.warning(\n f\"Found {sum(df.activity >= outlier)} outliers \"\n f\"in {csv_path.name}, filtering them out.\")\n\n # Crop df to plausible measurements\n df = df[df.activity < outlier]\n\n if len(df) > 0:\n name += \"_removed-ols\"\n\n # Plot_single_activity day\n h_median = plot_single_activity(\n df[\"activity\"], name, path_out)[1]\n else:\n logging.warning(f\"All data in {csv_path.name} are outliers, \"\n \"skipping..\")\n continue\n\n df_list.append(df)\n med_list.append(h_median)\n\n df_agg = pd.concat(df_list)\n\n name = \"aggregated\"\n # name_euc = name + \"_euclidean\"\n # name_man = name + \"_manhattan\"\n\n # df_agg_euc = df_agg[df_agg.method == \"euclidean\"]\n # df_agg_man = df_agg[df_agg.method == \"manhattan\"]\n\n # Plot_single_activity day\n # plot_single_activity(df_agg_euc[\"activity\"], name_euc, path_out)\n plot_single_activity(df_agg[\"activity\"], name, path_out)\n\n # series = df.activity\n # series.index = series.index.hour\n\n # hourly_bxpl_single(df_agg_euc, name_euc, path_out)\n hourly_bxpl_single(df_agg, name, path_out)\n\n # Plot all medians\n plot_median_days(med_list, \"median-days\", path_out)\n\n # Plot functional median boxplot\n\n try:\n pass\n\n except KeyboardInterrupt:\n logging.info(\"Manually interrupted script\")\n\n finally:\n # if len(rows) > 0:\n # logging.info(f\"Exporting {len(rows)} rows to CSV\")\n # export_csv(rows, row_cols, path_out, hive, rpi, method)\n\n logging.info(\"Done.\")",
"def compile_global_stats(results_dir='./../data/*/*cr_sizes*hdf5'):\n\n flist = glob.glob(results_dir)\n output = defaultdict(list)\n flist = [f for f in flist if 'nicmos' not in f]\n print(flist)\n flist.append('./../data/STIS/stis_cr_sizes.hdf5')\n results = [dask.delayed(tally_stats)(f) for f in flist]\n results = list(dask.compute(*results, scheduler='processes'))\n\n for instr, data in results:\n output[instr].append(data)\n\n for key in output.keys():\n cr_count = 0\n img_count = 0\n total_exptime = 0\n for val in output[key]:\n cr_count += val.cr_count\n img_count += val.img_count\n total_exptime += val.total_exptime\n output[key] = [cr_count, img_count, total_exptime]\n\n df = pd.DataFrame(output, index=['cr_count', 'img_count', 'total_exptime'])\n print(df)\n print('Total CR count: {}'.format(df.loc['cr_count', :].sum()))\n print('Total number of images analyzed: {}'.format(df.loc['img_count', :].sum()))\n print('Cumulative exposure time: {}'.format(df.loc['total_exptime', :].sum()))",
"def report_total_usage(self):\n work_time = 0\n if self.type == 'normal':\n work_time = self.fwk.fwk_global_time - self.start_exec_time\n elif self.type == 'sandia_work':\n self.total_time += self.fwk.fwk_global_time - self.start_exec_time\n self.total_usage = self.total_time * self.nproc\n if self.state == \"running\":\n # update total work done\n self.sim.completed_work += self.fwk.fwk_global_time - self.start_exec_time\n elif self.state == \"failed\":\n # add this work to the work to be redone\n self.sim.rework_todo += self.fwk.fwk_global_time - self.start_exec_time\n self.state = \"not_ready\"\n self.num_faults += 1\n elif self.type == 'sandia_rework':\n self.total_rework_time += self.fwk.fwk_global_time - self.start_exec_time\n self.total_rework_usage = self.total_rework_time * self.nproc\n if self.state == \"running\":\n # update total work done\n self.sim.next_ckpt = self.sim.ckpt_interval - (self.fwk.fwk_global_time - self.start_exec_time)\n self.sim.rework_todo -= self.fwk.fwk_global_time - self.start_exec_time\n elif self.state == \"failed\":\n # add this work to the work to be redone\n self.state = \"not_ready\"\n self.num_faults += 1\n elif self.type == 'sandia_ckpt':\n self.total_ckpt_time += self.fwk.fwk_global_time - self.start_exec_time\n self.total_ckpt_usage = self.total_ckpt_time * self.nproc\n if self.state == \"running\":\n # update last ckpt\n self.sim.last_ckpt = self.sim.completed_work\n elif self.state == \"failed\":\n # add work to rework\n self.sim.rework_todo += self.sim.next_ckpt\n self.state = \"not_ready\"\n self.num_faults += 1\n elif self.type == 'sandia_restart':\n print(\"time spent in rework\", self.fwk.fwk_global_time - self.start_exec_time)\n self.total_restart_time += self.fwk.fwk_global_time - self.start_exec_time\n self.total_restart_usage = self.total_restart_time * self.nproc\n #if self.state == \"running\":\n # nothing to do?\n # pass\n if self.state == \"failed\":\n # gotta try again\n self.state = \"ready\"\n self.num_faults += 1\n else:\n print(\"problems updating state in report_total_usage\")\n raise\n if self.type == 'normal':\n if self.sim.state == 'rework':\n self.total_rework_time += work_time\n self.total_rework_usage = self.total_rework_time * self.nproc\n else: # sim.state == 'work'\n if self.retry:\n self.total_rework_time += work_time\n self.total_rework_usage = self.total_rework_time * self.nproc\n else:\n self.total_time += work_time\n self.total_usage = self.total_time * self.nproc",
"def _calculate_runtime(self):\n lines = self.file_dic['output'].splitlines()\n start_time = datetime.strptime(lines[0].strip(), self.timestring_format)\n fin_time = datetime.strptime(lines[-1].strip(), self.timestring_format)\n dif = fin_time - start_time\n self.date = fin_time.strftime('%d %b %Y')\n self.runtime = dif.total_seconds()",
"def test_peformance(self):\n timedeltas = []\n for file in os.listdir(settings.ANALYSIS_REPORT_FOLDER):\n _file = open(os.path.join(settings.ANALYSIS_REPORT_FOLDER, file), \"r\")\n report = json.loads(_file.read())\n timedeltas.append(\n parse_datetime(report['finish']) - parse_datetime(report['start']))\n _file.close()\n\n # number of queue\n print('NUMBER OF QUEUE = {}'.format(len(timedeltas)))\n\n # get average time\n average_timedelta = sum(timedeltas, datetime.timedelta(0)) / len(timedeltas)\n print('AVERAGE = {}'.format(average_timedelta))\n self.assertTrue(average_timedelta < datetime.timedelta(minutes=3))\n\n # get total process time\n total = timedeltas[0]\n for delta in timedeltas[:1]:\n total += delta\n print('TOTAL = {}'.format(total))\n self.assertTrue(total < datetime.timedelta(minutes=3 * len(timedeltas)))",
"def print_timings(binary: str, testdata_filename: str):\n\n # Ensure we throw away an integer number of iterations\n assert ((100 - PRECISION_PERCENT) * ITERATIONS) % 200 == 0\n THROW_AWAY_AT_EACH_END = ((100 - PRECISION_PERCENT) * ITERATIONS) // 200\n\n # Do some warmup runs\n for _ in range(WARMUP_RUNS):\n with open(testdata_filename) as testdata:\n subprocess.check_call(binary, stdin=testdata, stdout=subprocess.DEVNULL)\n\n # Do the actual benchmarking runs\n deltas = []\n for _ in range(ITERATIONS):\n with open(testdata_filename) as testdata:\n t0 = time.time()\n subprocess.check_call(binary, stdin=testdata, stdout=subprocess.DEVNULL)\n t1 = time.time()\n dt_seconds = t1 - t0\n deltas.append(dt_seconds)\n\n deltas.sort()\n from_ms = deltas[THROW_AWAY_AT_EACH_END] * 1000\n to_ms = deltas[-THROW_AWAY_AT_EACH_END - 1] * 1000\n mid_ms = (from_ms + to_ms) / 2\n spread_ms = to_ms - from_ms\n print(f\"{mid_ms:.1f}ms±{spread_ms:.1f}ms: {binary}\")",
"def test_time(cmd, samples=16, warmup=4):\n # do testing\n print()\n avg_time = 0\n for s in range(samples + warmup):\n # report progress\n progress = s / (samples + warmup)\n print(CSI_UP + CSI_CLEARLN + \"Testing [{}%]\".format(floor(progress * 100)))\n\n output = shell(cmd) # run command\n tables = csv_mt.read_string(output, parse_float=True) # parse its output\n time = tables[\"statistics\"][\"time_us\"][0] # get its timing data\n\n # skip a few runs to let the system \"warm up\"\n if s >= warmup:\n avg_time += time / samples # compute average execution time\n\n # log the average time for this test case\n return avg_time",
"def tally_stats(hdf5_file):\n Stat = namedtuple('Stat', ['cr_count',\n 'img_count',\n 'total_exptime'])\n\n with h5py.File(hdf5_file,mode='r') as f:\n instr = list(f.keys())[0]\n print(instr)\n grp = f['/{}/sizes'.format(instr)]\n num_images = 0\n num_cr = 0\n total_exptime = 0\n for key in grp.keys():\n dset = grp[key][...]\n attrs = grp[key].attrs\n # print(list(attrs.items()))\n num_cr += dset.shape[1]\n num_images += 1\n total_exptime += attrs['exptime']\n\n result = Stat(cr_count=num_cr,\n img_count=num_images,\n total_exptime=total_exptime)\n\n return instr, result",
"def calculate_average_run_accuracy(self):\n overall_true_rate, true_positive_rate, true_negative_rate, false_positive_rate, false_negative_rate, true_positive_rate_cutoff, true_negative_rate_cutoff, \\\n false_positive_rate_cutoff, false_negative_rate_cutoff, unclassified_cutoff, matthews_correlation_coefficient, brier_score, auc_score, fit_time, hmeasure = [0] * 15\n balanced_accuracy_arr = []\n auc_arr = []\n hmeasure_arr = []\n brier_score_arr = []\n fit_time_arr = []\n mcc_arr = []\n true_positive_arr = []\n true_negative_arr = []\n false_positive_arr = []\n false_negative_arr = []\n\n count = 0\n for result_dictionary in self.errors:\n for z in range(len(result_dictionary[\"balanced_accuracy_arr\"])):\n overall_true_rate += result_dictionary[\"balanced_accuracy_arr\"][z]\n true_positive_rate += result_dictionary[\"true_positive_rate_arr\"][z]\n true_negative_rate += result_dictionary[\"true_negative_rate_arr\"][z]\n false_positive_rate += result_dictionary[\"false_positive_rate_arr\"][z]\n false_negative_rate += result_dictionary[\"false_negative_rate_arr\"][z]\n matthews_correlation_coefficient += result_dictionary[\"mcc_arr\"][z]\n auc_score += result_dictionary[\"auc_arr\"][z]\n brier_score += result_dictionary[\"brier_score_arr\"][z]\n fit_time += result_dictionary[\"fit_time_arr\"][z]\n hmeasure += result_dictionary[\"hmeasure_arr\"][z]\n count += 1\n\n true_positive_rate_cutoff += result_dictionary[\"avg_true_positive_rate_with_prob_cutoff\"]\n true_negative_rate_cutoff += result_dictionary[\"avg_true_negative_rate_with_prob_cutoff\"]\n false_positive_rate_cutoff += result_dictionary[\"avg_false_positive_rate_with_prob_cutoff\"]\n false_negative_rate_cutoff += result_dictionary[\"avg_false_negative_rate_with_prob_cutoff\"]\n unclassified_cutoff += result_dictionary[\"avg_false_negative_rate_with_prob_cutoff\"]\n balanced_accuracy_arr += result_dictionary[\"balanced_accuracy_arr\"]\n hmeasure_arr += result_dictionary[\"hmeasure_arr\"]\n auc_arr += result_dictionary[\"auc_arr\"]\n brier_score_arr += result_dictionary[\"brier_score_arr\"]\n fit_time_arr += result_dictionary[\"fit_time_arr\"]\n mcc_arr += result_dictionary[\"mcc_arr\"]\n true_positive_arr += result_dictionary[\"true_positive_rate_arr\"]\n true_negative_arr += result_dictionary[\"true_negative_rate_arr\"]\n false_positive_arr += result_dictionary[\"false_positive_rate_arr\"]\n false_negative_arr += result_dictionary[\"false_negative_rate_arr\"]\n\n avg_run_results = [None] * 31\n avg_run_results[0] = matthews_correlation_coefficient / float(count)\n avg_run_results[1] = brier_score / float(count)\n avg_run_results[2] = overall_true_rate / float(count)\n avg_run_results[3] = true_positive_rate / float(count)\n avg_run_results[4] = true_negative_rate / float(count)\n avg_run_results[5] = false_positive_rate / float(count)\n avg_run_results[6] = false_negative_rate / float(count)\n avg_run_results[7] = true_positive_rate_cutoff / float(len(self.errors))\n avg_run_results[8] = true_negative_rate_cutoff / float(len(self.errors))\n avg_run_results[9] = false_positive_rate_cutoff / float(len(self.errors))\n avg_run_results[10] = false_negative_rate_cutoff / float(len(self.errors))\n avg_run_results[11] = unclassified_cutoff / float(len(self.errors))\n avg_run_results[12] = fit_time / float(count)\n avg_run_results[14] = balanced_accuracy_arr\n avg_run_results[15] = auc_score / float(count)\n avg_run_results[16] = auc_arr\n avg_run_results[17] = brier_score_arr\n avg_run_results[18] = fit_time_arr\n avg_run_results[19] = mcc_arr\n avg_run_results[13] = self.calculate_std_deviation(balanced_accuracy_arr)\n avg_run_results[20] = self.calculate_std_deviation(mcc_arr)\n avg_run_results[21] = self.calculate_std_deviation(brier_score_arr)\n avg_run_results[22] = self.calculate_std_deviation(auc_arr)\n avg_run_results[23] = self.calculate_std_deviation(fit_time_arr)\n avg_run_results[24] = self.calculate_std_deviation(true_positive_arr)\n avg_run_results[25] = self.calculate_std_deviation(true_negative_arr)\n avg_run_results[26] = self.calculate_std_deviation(false_positive_arr)\n avg_run_results[27] = self.calculate_std_deviation(false_negative_arr)\n avg_run_results[28] = hmeasure / float(count)\n avg_run_results[29] = self.calculate_std_deviation(hmeasure_arr)\n avg_run_results[30] = hmeasure_arr\n\n return avg_run_results",
"def runTests(tests_dir, output_dir):\n\n runtime = 0\n os.makedirs(tests_dir, exist_ok=True)\n for test_case in os.listdir(tests_dir):\n print()\n print(\"Running test: \" + str(test_case))\n\n with open(tests_dir + test_case, \"r\") as f:\n tar, n = list(map(int, f.readline().split(\" \")))\n arr = list(map(int, f.readline().split(\" \")))\n\n start = timeit.default_timer()\n\n try:\n writeOutput(maxCombinationSum(tar, arr), test_case, output_dir)\n except KeyboardInterrupt:\n print(\"\\n\\tTest cancelled - KeyboardInterrupt\")\n except Exception as e:\n print(\"\\tError: \" + str(e))\n\n stop = timeit.default_timer()\n print(\"\\tTime for test: \" + str(stop - start) + \" seconds.\")\n\n runtime += (stop - start)\n\n if runtime == 0:\n print(\"No test case files found in tests directory.\\nPlease run solution from inside solution directory.\")\n else:\n print(\"\\nCompleted all tests in : \" + str(runtime) + \" seconds\")",
"def averageTime(self):\n \n pass",
"def MainStats(path, filetype, NrExp, col, start, stop):\n# path= path.split('/') # here is better to google and see what is going on. Or experiment alone\n# path= \"/\".join(path[:-1]) \n dato=ExtractData_raw_files(path, filetype)\n dBase=dato.createDictBase()\n stats = Stats(dBase, NrExp, col, start, stop)\n means, stds=stats.Means_Stds()\n times = stats.time_return()\n return means , stds, times",
"def emPerformanceTest(filesAndDirectories='None', resultsFileName='None', options='None'):\n\n pass",
"def process_stat_files(param):\n\n #get the files that are actually in the output directory\n call = ['cp', '-R']\n call.append(param['working_dir']+'results/featureCount/')\n call.append(param['working_dir']+'report/')\n _, _ = subprocess.Popen(call,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE).communicate()\n\n featurecount_file = (param['working_dir']+\n 'results/featureCount/featureCount_stats.txt')\n #extract table\n table = []\n filehandle = open(featurecount_file)\n #header\n table.append(filehandle.readlines()[0].rstrip().split('\\t'))\n table[0] = table[0][1:]\n filehandle.close()\n\n #total number of aligned reads\n tot_reads = param['bam_qc']['unique_aligned_reads']\n counter = [0] * len(param['bam_qc']['unique_aligned_reads'])\n \n filehandle = open(featurecount_file)\n for line in filehandle.readlines()[1:]:\n cur_line = line.rstrip().split('\\t')\n cur_line[0] = re.sub(r'_',' ',cur_line[0])\n if cur_line[0] not in ['Unassigned MultiMapping','Assigned']:\n counter = [ct + int(cr) for ct, cr in zip(counter, cur_line[1:])]\n perc = ([cur_line[0]]+\n MODULE_HELPER.get_percentage(cur_line[1:],\n tot_reads,\n len(cur_line)-1))\n table.append(perc)\n filehandle.close()\n assigned = [tot_reads[idx] - counter[idx] for idx in range(len(tot_reads))]\n perc = ['Assigned'] + MODULE_HELPER.get_percentage(assigned,\n tot_reads,\n len(counter))\n return table",
"def main(\n input_dir: Path,\n defence: str,\n tamaraw_config: Optional[str],\n n_jobs: int\n):\n common.init_logging()\n _LOGGER.info(\"Using parameters: %s\", locals())\n\n if defence == \"tamaraw\":\n if tamaraw_config is None:\n raise ValueError(\"Tamaraw configuration required.\")\n tamaraw_config = json.loads(tamaraw_config)\n\n assert input_dir.is_dir(), f\"invalid path {input_dir}\"\n directories = sorted(\n [x.parent for x in Path(input_dir).glob(\"**/defended/\")]\n )\n _LOGGER.info(\"Found %d samples\", len(directories))\n\n func = functools.partial(\n _calculate_overhead, defence=defence, tamaraw_config=tamaraw_config)\n if n_jobs > 1:\n chunksize = max(len(directories) // (n_jobs * 2), 1)\n with multiprocessing.pool.Pool(n_jobs) as pool:\n scores = list(\n pool.imap_unordered(func, directories, chunksize=chunksize)\n )\n else:\n # Run in the main process\n scores = [func(x) for x in directories]\n _LOGGER.info(\"Overhead calculation complete\")\n\n results = pd.DataFrame.from_records(itertools.chain.from_iterable(scores))\n print(results.to_csv(header=True, index=False), end=\"\")",
"def count_time(start, end, folder, model_type, task):\n print(\"It has been \", str(datetime.timedelta(seconds=(end - start))))\n timee = (end - start)/3600\n #if the folder doesn't exist, create it\n if not os.path.exists(''.join(string for string in [absPath, 'data/results/', folder, task, model_type, '/'])):\n os.makedirs(''.join(string for string in [absPath, 'data/results/', folder, task, model_type, '/']))\n file_time = ''.join(string for string in [absPath, 'data/results/', folder, task, model_type, '/time.pickle'])\n\n with open(file_time, \"wb\") as output_file:\n pickle.dump(timee, output_file)",
"def main(output_file):\n with open(output_file, 'w+') as fl:\n poor_perf_stats = pstats.Stats('poor_perf.log', stream=fl)\n good_perf_stats = pstats.Stats('good_perf.log', stream=fl)\n\n poor_perf_stats.sort_stats('cumtime')\n\n fl.write('--------------------------------------------\\n')\n fl.write('POOR PERFORMANCE STATS\\n')\n fl.write(f\"Time: {poor_perf_stats.total_tt}\\n\")\n fl.write(f\"Function Calls: {poor_perf_stats.total_calls}\\n\")\n fl.write(f\"Top cumulative times\\n\")\n poor_perf_stats.print_stats(20)\n\n fl.write('--------------------------------------------\\n')\n fl.write('GOOD PERFORMANCE STATS\\n')\n fl.write(f\"Time: {good_perf_stats.total_tt}\\n\")\n fl.write(f\"Function Calls: {good_perf_stats.total_calls}\\n\")\n fl.write(f\"Top 20 cumulative times\\n\")\n good_perf_stats.print_stats(20)",
"def summarize_models(folder):\r\n filepaths = sorted([n for n in glob(folder+r'/*') if '.json' in n])\r\n # create dictionary of results by looping over each file\r\n d = {}\r\n for fi, f in enumerate(filepaths):\r\n # load dictionary from json file\r\n print('processing model {}/{}'.format(fi+1, len(filepaths)))\r\n with open(f, 'r') as fp:\r\n data = json.load(fp)\r\n # extract label and trial number from filename\r\n L = os.path.split(f)[1].split('.')[0].split('__trial_')[0]\r\n t = int(os.path.split(f)[1].split('.')[0].split('__trial_')[1])\r\n # create new dictionary for each model\r\n if L not in d:\r\n d[L] = {\r\n 'img': None,\r\n 'sim': {},\r\n 'avg_err': [],\r\n 'bub_num': [],\r\n 'df': pd.DataFrame()}\r\n # populate dictionary with model information\r\n d[L]['img'] = data['img']\r\n d[L]['sim'][t] = data['sim']\r\n d[L]['df']['rad_trial_'+str(t)] = pd.Series(data['rad'])\r\n d[L]['bub_num'].append(len(pd.Series(data['rad']).dropna()))\r\n d[L]['df']['x_trial_'+str(t)] = pd.Series(np.array(data['cent'])[:, 0])\r\n d[L]['df']['y_trial_'+str(t)] = pd.Series(np.array(data['cent'])[:, 1])\r\n d[L]['avg_err'].append(data['tot_err_percent'])\r\n return d",
"def compute_metrics_on_directories_raw(dir_gt, dir_pred):\n\n lst_gt = sorted(glob(os.path.join(dir_gt, '*')), key=natural_order)\n lst_pred = sorted(glob(os.path.join(dir_pred, '*')), key=natural_order)\n\n res = []\n cardiac_phase = []\n file_names = []\n\n measure_names = ['Dice LV', 'Volume LV', 'Err LV(ml)',\n 'Dice RV', 'Volume RV', 'Err RV(ml)', 'Dice MYO', 'Volume MYO', 'Err MYO(ml)',\n 'Hausdorff LV', 'Hausdorff RV', 'Hausdorff Myo',\n 'ASSD LV', 'ASSD RV', 'ASSD Myo']\n\n res_mat = np.zeros((len(lst_gt), len(measure_names)))\n\n ind = 0\n for p_gt, p_pred in zip(lst_gt, lst_pred):\n if os.path.basename(p_gt) != os.path.basename(p_pred):\n raise ValueError(\"The two files don't have the same name\"\n \" {}, {}.\".format(os.path.basename(p_gt),\n os.path.basename(p_pred)))\n\n\n gt, _, header = load_nii(p_gt)\n pred, _, _ = load_nii(p_pred)\n zooms = header.get_zooms()\n res.append(metrics(gt, pred, zooms))\n cardiac_phase.append(os.path.basename(p_gt).split('.nii.gz')[0].split('_')[-1])\n\n file_names.append(os.path.basename(p_pred))\n\n res_mat[ind, :9] = metrics(gt, pred, zooms)\n\n for ii, struc in enumerate([3,1,2]):\n\n gt_binary = (gt == struc) * 1\n pred_binary = (pred == struc) * 1\n\n res_mat[ind, 9+ii] = hd(gt_binary, pred_binary, voxelspacing=zooms, connectivity=1)\n res_mat[ind, 12+ii] = assd(pred_binary, gt_binary, voxelspacing=zooms, connectivity=1)\n\n ind += 1\n\n return res_mat, cardiac_phase, measure_names, file_names",
"def get_elapsed_time(self):\r\n self.get_bb_result()\r\n csv_path = self.bb_log_path + os.sep + 'run-logs' + os.sep + 'BigBenchTimes.csv'\r\n if not os.path.isfile(csv_path):\r\n print('BigBenchTimes.csv does not exist in {0}, existing...'.format(self.bb_log_path))\r\n exit(-1)\r\n df = pd.read_csv(csv_path, delimiter=';').loc[:,\r\n ['benchmarkPhase', 'streamNumber', 'queryNumber', 'durationInSeconds']]\r\n elapsed_time = pd.DataFrame()\r\n is_exist = False\r\n for phase in ['POWER_TEST', 'THROUGHPUT_TEST_1']:\r\n benchmark_phase = (df['benchmarkPhase'] == phase)\r\n if any(benchmark_phase): # whether this phase exist in the BB logs\r\n if phase == 'POWER_TEST': # power test overall and each query\r\n stream_num = ((df['streamNumber']) == 0)\r\n query_num = (pd.notnull(df['queryNumber']))\r\n mask = benchmark_phase & stream_num & query_num\r\n seconds = df[mask]['durationInSeconds'].values\r\n elapsed_time.insert(0, phase, seconds)\r\n elapsed_time.index = df[mask]['queryNumber'].astype('int64')\r\n elif phase == 'THROUGHPUT_TEST_1':\r\n streams = int(np.max(df['streamNumber']))\r\n for stream in range(streams + 1):\r\n stream_num = ((df['streamNumber']) == stream)\r\n query_num = (pd.notnull(df['queryNumber']))\r\n mask = benchmark_phase & stream_num & query_num\r\n seconds = df[mask]['durationInSeconds'].values\r\n elapsed_time.insert(stream + 1, 'stream{0}'.format(stream), seconds)\r\n elapsed_time.index = df[mask]['queryNumber'].astype('int64')\r\n is_exist = True\r\n if is_exist:\r\n print('*' * 100)\r\n print('Elapsed time of each query:\\n {0} \\n'.format(elapsed_time.to_string()))\r\n\r\n result_path = self.bb_log_path + os.sep + 'bb_results.log'\r\n with open(result_path, 'a') as f:\r\n f.write('*' * 100 + '\\n')\r\n f.write('Elapsed time of each query:\\n {0} \\n'.format(elapsed_time.to_string()))\r\n else:\r\n print('It seems BigBenchTimes.csv in {0} does not include TPCx-BB phases:POWER_TEST, THROUGHPUT_TEST_1' \\\r\n 'existing...'.format(self.bb_log_path))\r\n exit(-1)",
"def run_single(input_folder):\n\tstart = time.clock()\n\tinput_files = [os.path.join(input_folder, filename) for filename in os.listdir(input_folder)]\n\tfrequencies = defaultdict(int)\n\t\n\tfor input_file in input_files:\n\t\twith open(input_file, 'r') as f:\n\t\t\t\n\t\t\ttextstr = f.read()\n\t\t\n\t\ttokens = re.findall(token_regex, textstr)\n\t\tfor token in tokens:\n\t\t\tfrequencies[token.lower()] += 1\n\t\t\n\tprint 'Non-MR runtime:', time.clock() - start\n\treturn frequencies"
] | [
"0.6887931",
"0.635288",
"0.6345429",
"0.6250087",
"0.5861928",
"0.58488417",
"0.57700205",
"0.5639624",
"0.55859274",
"0.5584842",
"0.55632645",
"0.5554245",
"0.55452335",
"0.5545061",
"0.55297774",
"0.55194134",
"0.5515678",
"0.55038553",
"0.55022204",
"0.54996353",
"0.5495038",
"0.5440943",
"0.5436569",
"0.54251266",
"0.54050964",
"0.5384384",
"0.5361338",
"0.53435904",
"0.5337038",
"0.5306938"
] | 0.71506226 | 0 |