function
stringlengths
11
56k
repo_name
stringlengths
5
60
features
sequence
def getfield(self, pkt, s): l = 8 return s[l:],self.m2i(pkt, s[:l])
phaethon/scapy
[ 847, 200, 847, 72, 1422527787 ]
def m2i(self, pkt, x): l = [] l.append(struct.unpack("!H",x[6:8])[0]) octet = struct.unpack("B",x[8:9])[0] l.append( (octet & 2**7 ) >> 7 ) l.append( (octet & 2**6 ) >> 6 ) l.append( struct.unpack("B",x[9:10])[0] ) l.append( struct.unpack("!H",x[10:12])[0] ) l.append( inet_ntoa(x[12:16]) ) l.append( struct.unpack("!H",x[16:18])[0] ) return l
phaethon/scapy
[ 847, 200, 847, 72, 1422527787 ]
def getfield(self, pkt, s): l = 18 return s[l:],self.m2i(pkt, s[:l])
phaethon/scapy
[ 847, 200, 847, 72, 1422527787 ]
def post_build(self, p, pay): if self.len is None: l = len(p) - 4 p = p[:2]+struct.pack("!H", l)+p[4:] return p+pay
phaethon/scapy
[ 847, 200, 847, 72, 1422527787 ]
def post_build(self, p, pay): if self.len is None: l = len(p) - 4 p = p[:2]+struct.pack("!H", l)+p[4:] return p+pay
phaethon/scapy
[ 847, 200, 847, 72, 1422527787 ]
def post_build(self, p, pay): if self.len is None: l = len(p) - 4 p = p[:2]+struct.pack("!H", l)+p[4:] return p+pay
phaethon/scapy
[ 847, 200, 847, 72, 1422527787 ]
def post_build(self, p, pay): if self.len is None: l = len(p) - 4 p = p[:2]+struct.pack("!H", l)+p[4:] return p+pay
phaethon/scapy
[ 847, 200, 847, 72, 1422527787 ]
def post_build(self, p, pay): if self.len is None: l = len(p) - 4 p = p[:2]+struct.pack("!H", l)+p[4:] return p+pay
phaethon/scapy
[ 847, 200, 847, 72, 1422527787 ]
def post_build(self, p, pay): if self.len is None: l = len(p) - 4 p = p[:2]+struct.pack("!H", l)+p[4:] return p+pay
phaethon/scapy
[ 847, 200, 847, 72, 1422527787 ]
def post_build(self, p, pay): if self.len is None: l = len(p) - 4 p = p[:2]+struct.pack("!H", l)+p[4:] return p+pay
phaethon/scapy
[ 847, 200, 847, 72, 1422527787 ]
def post_build(self, p, pay): if self.len is None: l = len(p) - 4 p = p[:2]+struct.pack("!H", l)+p[4:] return p+pay
phaethon/scapy
[ 847, 200, 847, 72, 1422527787 ]
def post_build(self, p, pay): if self.len is None: l = len(p) - 4 p = p[:2]+struct.pack("!H", l)+p[4:] return p+pay
phaethon/scapy
[ 847, 200, 847, 72, 1422527787 ]
def post_build(self, p, pay): if self.len is None: l = len(p) - 4 p = p[:2]+struct.pack("!H", l)+p[4:] return p+pay
phaethon/scapy
[ 847, 200, 847, 72, 1422527787 ]
def post_build(self, p, pay): if self.len is None: l = len(p) - 4 p = p[:2]+struct.pack("!H", l)+p[4:] return p+pay
phaethon/scapy
[ 847, 200, 847, 72, 1422527787 ]
def post_build(self, p, pay): if self.len is None: l = len(p)+len(pay)-4 p = p[:2]+struct.pack("!H", l)+p[4:] return p+pay
phaethon/scapy
[ 847, 200, 847, 72, 1422527787 ]
def getEndpoint(endpoint): r = requests.get("http://localhost:80/api/{}".format(endpoint)) if r.status_code != 200: raise Exception("Bad status code for endpoint {}: {}".format(endpoint, r.status_code)) return r.json()
tech-server/gondul
[ 41, 10, 41, 64, 1460476181 ]
def add_header(response): if response.status_code == 200: response.cache_control.max_age = 5 response.cache_control.s_maxage = 1 return response
tech-server/gondul
[ 41, 10, 41, 64, 1460476181 ]
def root_get(path): updateData() try: template = env.get_template(path) body = template.render(objects=objects, options=request.args) except TemplateNotFound: return 'Template "{}" not found\n'.format(path), 404 except Exception as err: return 'Templating of "{}" failed to render. Most likely due to an error in the template. Error transcript:\n\n{}\n----\n\n{}\n'.format(path, err, traceback.format_exc()), 400 return body, 200
tech-server/gondul
[ 41, 10, 41, 64, 1460476181 ]
def root_post(path): updateData() try: content = request.stream.read(int(request.headers["Content-Length"])) template = env.from_string(content.decode("utf-8")) body = template.render(objects=objects, options=request.args) except Exception as err: return 'Templating of "{}" failed to render. Most likely due to an error in the template. Error transcript:\n\n{}\n----\n\n{}\n'.format(path, err, traceback.format_exc()), 400 return body, 200
tech-server/gondul
[ 41, 10, 41, 64, 1460476181 ]
def __init__(self, progress_callback=None): self.progress_callback = progress_callback
jchome/LocalGuide-Mobile
[ 1, 1, 1, 1, 1412701936 ]
def setProgressCallback(self, progress_callback): self.progress_callback = progress_callback
jchome/LocalGuide-Mobile
[ 1, 1, 1, 1, 1412701936 ]
def getInfo(self, lat, lon, epsilon): return None
jchome/LocalGuide-Mobile
[ 1, 1, 1, 1, 1412701936 ]
def get(self, parent, width, height):
jchome/LocalGuide-Mobile
[ 1, 1, 1, 1, 1412701936 ]
def getLegendGraphic(self): if self.legend is None and not self.triedlegend: self.triedlegend = True layer = self.layer if "," in layer: layer = layer[layer.rindex(",") + 1:] if self.legendlayer: layer = self.legendlayer url = self.baseurl + "?REQUEST=GetLegendGraphic&VERSION=1.0.0&FORMAT=image/png&LAYER=%s&ext=.png" % (layer) try: print 'http://' + self.provider_host + url image = Loader.image('http://' + self.provider_host + url) self.legend = image except Exception, e: Logger.error('OverlayServer could not find LEGENDGRAPHICS for %s %s' % (self.baseurl, layer)) return self.legend
jchome/LocalGuide-Mobile
[ 1, 1, 1, 1, 1412701936 ]
def co_to_ll(self, x, y): if self.customBounds: u, v = custom_to_unit(lat, lon, self.bounds) l, m = unit_to_latlon(u, v) elif self.isPLatLon: # patch for android - does not require pyproj library l, m = y, x elif self.isPGoogle: # patch for android - does not require pyproj library l, m = google_to_latlon (y, x) else: l, m = transform(self.projection, pLatlon, y, x) return l, m
jchome/LocalGuide-Mobile
[ 1, 1, 1, 1, 1412701936 ]
def geturl(self, lat1, lon1, lat2, lon2, zoom, w, h): try: x1, y1 = self.xy_to_co(lat1, lon1) x2, y2 = self.xy_to_co(lat2, lon2) return self.url + "&BBOX=%f,%f,%f,%f&WIDTH=%i&HEIGHT=%i&ext=.png" % (x1, y1, x2, y2, w, h) except RuntimeError, e: return None
jchome/LocalGuide-Mobile
[ 1, 1, 1, 1, 1412701936 ]
def parseLayer(self, layer, data): try: name = layer.find("Name").text except: name = None srss = layer.findall("SRS") if name: # and srss: data[name] = map(lambda x:x.text, srss) if self.debug: print "Provider %s provides layer %s in projections %s" % (self.provider_host, name, data[name]) subs = layer.findall("Layer") for sub in subs: self.parseLayer(sub, data)
jchome/LocalGuide-Mobile
[ 1, 1, 1, 1, 1412701936 ]
def initFromGetCapabilities(self, host, baseurl, layer=None, index=0, srs=None): self.debug = (layer == None) and (index == 0) # GetCapabilities (Layers + SRS) if layer is None or srs is None: capabilities = urlopen(host + baseurl + "?SERVICE=WMS&VERSION=1.1.1&Request=GetCapabilities").read().strip() try: tree = ET.fromstring(capabilities) if self.debug: ET.dump(tree) layers = tree.findall("Capability/Layer") # TODO: proper parsing of cascading layers and their SRS data = {} for l in layers: self.parseLayer(l, data)
jchome/LocalGuide-Mobile
[ 1, 1, 1, 1, 1412701936 ]
def __init__(self, working_directories=None): super(Configuration, self).__init__() self.img_model = ImgModel() self.mask_model = MaskModel() self.calibration_model = CalibrationModel(self.img_model) self.batch_model = BatchModel(self.calibration_model, self.mask_model) self.pattern_model = PatternModel() if working_directories is None: self.working_directories = {'calibration': '', 'mask': '', 'image': os.path.expanduser("~"), 'pattern': '', 'overlay': '', 'phase': '', 'batch': os.path.expanduser("~")} else: self.working_directories = working_directories self.use_mask = False self.transparent_mask = False self._integration_rad_points = None self._integration_unit = '2th_deg' self._oned_azimuth_range = None self._cake_azimuth_points = 360 self._cake_azimuth_range = None self._auto_integrate_pattern = True self._auto_integrate_cake = False self.auto_save_integrated_pattern = False self.integrated_patterns_file_formats = ['.xy'] self.cake_changed = Signal() self._connect_signals()
Dioptas/Dioptas
[ 44, 25, 44, 14, 1421345772 ]
def integrate_image_1d(self): """ Integrates the image in the ImageModel to a Pattern. Will also automatically save the integrated pattern, if auto_save_integrated is True. """ if self.calibration_model.is_calibrated: if self.use_mask: mask = self.mask_model.get_mask() elif self.mask_model.roi is not None: mask = self.mask_model.roi_mask else: mask = None x, y = self.calibration_model.integrate_1d(azi_range=self.oned_azimuth_range, mask=mask, unit=self.integration_unit, num_points=self.integration_rad_points) self.pattern_model.set_pattern(x, y, self.img_model.filename, unit=self.integration_unit) # if self.auto_save_integrated_pattern: self._auto_save_patterns()
Dioptas/Dioptas
[ 44, 25, 44, 14, 1421345772 ]
def save_pattern(self, filename=None, subtract_background=False): """ Saves the current integrated pattern. The format depends on the file ending. Possible file formats: [*.xy, *.chi, *.dat, *.fxye] :param filename: where to save the file :param subtract_background: flat whether the pattern should be saved with or without subtracted background """ if filename is None: filename = self.img_model.filename if filename.endswith('.xy'): self.pattern_model.save_pattern(filename, header=self._create_xy_header(), subtract_background=subtract_background) elif filename.endswith('.fxye'): self.pattern_model.save_pattern(filename, header=self._create_fxye_header(filename), subtract_background=subtract_background) else: self.pattern_model.save_pattern(filename, subtract_background=subtract_background)
Dioptas/Dioptas
[ 44, 25, 44, 14, 1421345772 ]
def _create_xy_header(self): """ Creates the header for the xy file format (contains information about calibration parameters). :return: header string """ header = self.calibration_model.create_file_header() header = header.replace('\r\n', '\n') header = header + '\n#\n# ' + self._integration_unit + '\t I' return header
Dioptas/Dioptas
[ 44, 25, 44, 14, 1421345772 ]
def _auto_save_patterns(self): """ Saves the current pattern in the pattern working directory (specified in self.working_directories['pattern']. When background subtraction is enabled in the pattern model the pattern will be saved with background subtraction and without in another sub-folder. ('bkg_subtracted') """ for file_ending in self.integrated_patterns_file_formats: filename = os.path.join( self.working_directories['pattern'], os.path.basename(str(self.img_model.filename)).split('.')[:-1][0] + file_ending) filename = filename.replace('\\', '/') self.save_pattern(filename) if self.pattern_model.pattern.has_background(): for file_ending in self.integrated_patterns_file_formats: directory = os.path.join(self.working_directories['pattern'], 'bkg_subtracted') if not os.path.exists(directory): os.mkdir(directory) filename = os.path.join(directory, self.pattern_model.pattern.name + file_ending) filename = filename.replace('\\', '/') self.save_pattern(filename, subtract_background=True)
Dioptas/Dioptas
[ 44, 25, 44, 14, 1421345772 ]
def integration_rad_points(self): return self._integration_rad_points
Dioptas/Dioptas
[ 44, 25, 44, 14, 1421345772 ]
def integration_rad_points(self, new_value): self._integration_rad_points = new_value self.integrate_image_1d() if self.auto_integrate_cake: self.integrate_image_2d()
Dioptas/Dioptas
[ 44, 25, 44, 14, 1421345772 ]
def cake_azimuth_points(self): return self._cake_azimuth_points
Dioptas/Dioptas
[ 44, 25, 44, 14, 1421345772 ]
def cake_azimuth_points(self, new_value): self._cake_azimuth_points = new_value if self.auto_integrate_cake: self.integrate_image_2d()
Dioptas/Dioptas
[ 44, 25, 44, 14, 1421345772 ]
def cake_azimuth_range(self): return self._cake_azimuth_range
Dioptas/Dioptas
[ 44, 25, 44, 14, 1421345772 ]
def cake_azimuth_range(self, new_value): self._cake_azimuth_range = new_value if self.auto_integrate_cake: self.integrate_image_2d()
Dioptas/Dioptas
[ 44, 25, 44, 14, 1421345772 ]
def oned_azimuth_range(self): return self._oned_azimuth_range
Dioptas/Dioptas
[ 44, 25, 44, 14, 1421345772 ]
def oned_azimuth_range(self, new_value): self._oned_azimuth_range = new_value if self.auto_integrate_pattern: self.integrate_image_1d()
Dioptas/Dioptas
[ 44, 25, 44, 14, 1421345772 ]
def integration_unit(self): return self._integration_unit
Dioptas/Dioptas
[ 44, 25, 44, 14, 1421345772 ]
def integration_unit(self, new_unit): old_unit = self.integration_unit self._integration_unit = new_unit auto_bg_subtraction = self.pattern_model.pattern.auto_background_subtraction if auto_bg_subtraction: self.pattern_model.pattern.auto_background_subtraction = False self.integrate_image_1d() self.update_auto_background_parameters_unit(old_unit, new_unit) if auto_bg_subtraction: self.pattern_model.pattern.auto_background_subtraction = True self.pattern_model.pattern.recalculate_pattern() self.pattern_model.pattern_changed.emit()
Dioptas/Dioptas
[ 44, 25, 44, 14, 1421345772 ]
def correct_solid_angle(self): return self.calibration_model.correct_solid_angle
Dioptas/Dioptas
[ 44, 25, 44, 14, 1421345772 ]
def correct_solid_angle(self, new_val): self.calibration_model.correct_solid_angle = new_val if self.auto_integrate_pattern: self.integrate_image_1d() if self._auto_integrate_cake: self.integrate_image_2d()
Dioptas/Dioptas
[ 44, 25, 44, 14, 1421345772 ]
def auto_integrate_cake(self): return self._auto_integrate_cake
Dioptas/Dioptas
[ 44, 25, 44, 14, 1421345772 ]
def auto_integrate_cake(self, new_value): if self._auto_integrate_cake == new_value: return self._auto_integrate_cake = new_value if new_value: self.img_model.img_changed.connect(self.integrate_image_2d) else: self.img_model.img_changed.disconnect(self.integrate_image_2d)
Dioptas/Dioptas
[ 44, 25, 44, 14, 1421345772 ]
def auto_integrate_pattern(self): return self._auto_integrate_pattern
Dioptas/Dioptas
[ 44, 25, 44, 14, 1421345772 ]
def auto_integrate_pattern(self, new_value): if self._auto_integrate_pattern == new_value: return self._auto_integrate_pattern = new_value if new_value: self.img_model.img_changed.connect(self.integrate_image_1d) else: self.img_model.img_changed.disconnect(self.integrate_image_1d)
Dioptas/Dioptas
[ 44, 25, 44, 14, 1421345772 ]
def cake_img(self): return self.calibration_model.cake_img
Dioptas/Dioptas
[ 44, 25, 44, 14, 1421345772 ]
def roi(self): return self.mask_model.roi
Dioptas/Dioptas
[ 44, 25, 44, 14, 1421345772 ]
def roi(self, new_val): self.mask_model.roi = new_val self.integrate_image_1d()
Dioptas/Dioptas
[ 44, 25, 44, 14, 1421345772 ]
def save_in_hdf5(self, hdf5_group): """ Saves the configuration group in the given hdf5_group. :type hdf5_group: h5py.Group """ f = hdf5_group # save general information general_information = f.create_group('general_information') # integration parameters: general_information.attrs['integration_unit'] = self.integration_unit if self.integration_rad_points: general_information.attrs['integration_num_points'] = self.integration_rad_points else: general_information.attrs['integration_num_points'] = 0 # cake parameters: general_information.attrs['auto_integrate_cake'] = self.auto_integrate_cake general_information.attrs['cake_azimuth_points'] = self.cake_azimuth_points if self.cake_azimuth_range is None: general_information.attrs['cake_azimuth_range'] = "None" else: general_information.attrs['cake_azimuth_range'] = self.cake_azimuth_range # mask parameters general_information.attrs['use_mask'] = self.use_mask general_information.attrs['transparent_mask'] = self.transparent_mask # auto save parameters general_information.attrs['auto_save_integrated_pattern'] = self.auto_save_integrated_pattern formats = [n.encode('ascii', 'ignore') for n in self.integrated_patterns_file_formats] general_information.create_dataset('integrated_patterns_file_formats', (len(formats), 1), 'S10', formats) # save working directories working_directories_gp = f.create_group('working_directories') try: for key in self.working_directories: working_directories_gp.attrs[key] = self.working_directories[key] except TypeError: self.working_directories = {'calibration': '', 'mask': '', 'image': '', 'pattern': '', 'overlay': '', 'phase': '', 'batch': ''} for key in self.working_directories: working_directories_gp.attrs[key] = self.working_directories[key] # save image model image_group = f.create_group('image_model') image_group.attrs['auto_process'] = self.img_model.autoprocess image_group.attrs['factor'] = self.img_model.factor image_group.attrs['has_background'] = self.img_model.has_background() image_group.attrs['background_filename'] = self.img_model.background_filename image_group.attrs['background_offset'] = self.img_model.background_offset image_group.attrs['background_scaling'] = self.img_model.background_scaling if self.img_model.has_background(): background_data = self.img_model.untransformed_background_data image_group.create_dataset('background_data', background_data.shape, 'f', background_data) image_group.attrs['series_max'] = self.img_model.series_max image_group.attrs['series_pos'] = self.img_model.series_pos # image corrections corrections_group = image_group.create_group('corrections') corrections_group.attrs['has_corrections'] = self.img_model.has_corrections() for correction, correction_object in self.img_model.img_corrections.corrections.items(): if correction in ['cbn', 'oiadac']: correction_data = correction_object.get_data() imcd = corrections_group.create_dataset(correction, correction_data.shape, 'f', correction_data) for param, value in correction_object.get_params().items(): imcd.attrs[param] = value elif correction == 'transfer': params = correction_object.get_params() transfer_group = corrections_group.create_group('transfer') original_data = params['original_data'] response_data = params['response_data'] original_ds = transfer_group.create_dataset('original_data', original_data.shape, 'f', original_data) original_ds.attrs['filename'] = params['original_filename'] response_ds = transfer_group.create_dataset('response_data', response_data.shape, 'f', response_data) response_ds.attrs['filename'] = params['response_filename'] # the actual image image_group.attrs['filename'] = self.img_model.filename current_raw_image = self.img_model.untransformed_raw_img_data raw_image_data = image_group.create_dataset('raw_image_data', current_raw_image.shape, dtype='f') raw_image_data[...] = current_raw_image # image transformations transformations_group = image_group.create_group('image_transformations') for ind, transformation in enumerate(self.img_model.get_transformations_string_list()): transformations_group.attrs[str(ind)] = transformation # save roi data if self.roi is not None: image_group.attrs['has_roi'] = True image_group.create_dataset('roi', (4,), 'i8', tuple(self.roi)) else: image_group.attrs['has_roi'] = False # save mask model mask_group = f.create_group('mask') current_mask = self.mask_model.get_mask() mask_data = mask_group.create_dataset('data', current_mask.shape, dtype=bool) mask_data[...] = current_mask # save detector information detector_group = f.create_group('detector') detector_mode = self.calibration_model.detector_mode detector_group.attrs['detector_mode'] = detector_mode.value if detector_mode == DetectorModes.PREDEFINED: detector_group.attrs['detector_name'] = self.calibration_model.detector.name elif detector_mode == DetectorModes.NEXUS: detector_group.attrs['nexus_filename'] =self.calibration_model.detector.filename # save calibration model calibration_group = f.create_group('calibration_model') calibration_filename = self.calibration_model.filename if calibration_filename.endswith('.poni'): base_filename, ext = self.calibration_model.filename.rsplit('.', 1) else: base_filename = self.calibration_model.filename ext = 'poni' calibration_group.attrs['calibration_filename'] = base_filename + '.' + ext pyfai_param, fit2d_param = self.calibration_model.get_calibration_parameter() pfp = calibration_group.create_group('pyfai_parameters') for key in pyfai_param: try: pfp.attrs[key] = pyfai_param[key] except TypeError: pfp.attrs[key] = '' calibration_group.attrs['correct_solid_angle'] = self.correct_solid_angle if self.calibration_model.distortion_spline_filename is not None: calibration_group.attrs['distortion_spline_filename'] = self.calibration_model.distortion_spline_filename # save background pattern and pattern model background_pattern_group = f.create_group('background_pattern') try: background_pattern_x = self.pattern_model.background_pattern.original_x background_pattern_y = self.pattern_model.background_pattern.original_y except (TypeError, AttributeError): background_pattern_x = None background_pattern_y = None if background_pattern_x is not None and background_pattern_y is not None: background_pattern_group.attrs['has_background_pattern'] = True bgx = background_pattern_group.create_dataset('x', background_pattern_x.shape, dtype='f') bgy = background_pattern_group.create_dataset('y', background_pattern_y.shape, dtype='f') bgx[...] = background_pattern_x bgy[...] = background_pattern_y else: background_pattern_group.attrs['has_background_pattern'] = False pattern_group = f.create_group('pattern') try: pattern_x = self.pattern_model.pattern.original_x pattern_y = self.pattern_model.pattern.original_y except (TypeError, AttributeError): pattern_x = None pattern_y = None if pattern_x is not None and pattern_y is not None: px = pattern_group.create_dataset('x', pattern_x.shape, dtype='f') py = pattern_group.create_dataset('y', pattern_y.shape, dtype='f') px[...] = pattern_x py[...] = pattern_y pattern_group.attrs['pattern_filename'] = self.pattern_model.pattern_filename pattern_group.attrs['unit'] = self.pattern_model.unit pattern_group.attrs['file_iteration_mode'] = self.pattern_model.file_iteration_mode if self.pattern_model.pattern.auto_background_subtraction: pattern_group.attrs['auto_background_subtraction'] = True auto_background_group = pattern_group.create_group('auto_background_settings') auto_background_group.attrs['smoothing'] = \ self.pattern_model.pattern.auto_background_subtraction_parameters[0] auto_background_group.attrs['iterations'] = \ self.pattern_model.pattern.auto_background_subtraction_parameters[1] auto_background_group.attrs['poly_order'] = \ self.pattern_model.pattern.auto_background_subtraction_parameters[2] auto_background_group.attrs['x_start'] = self.pattern_model.pattern.auto_background_subtraction_roi[0] auto_background_group.attrs['x_end'] = self.pattern_model.pattern.auto_background_subtraction_roi[1] else: pattern_group.attrs['auto_background_subtraction'] = False
Dioptas/Dioptas
[ 44, 25, 44, 14, 1421345772 ]
def __init__(self, conrev=1): # conrev defaulting to 1 to maintain compatibility with old definitions # conrev=1 -> old controllers (DEFAULT) # conrev=2 -> newer controllers (17R4 ...) self.zone_map = {} self.power_zones = [] self.reset_types = {} self.state_map = {} self.vendor_id = 0 self.product_id = 0 self.cmd_packet = alienfx_cmdpacket.AlienFXCmdPacket(conrev) # Loads the cmdpacket. self._driver = alienfx_usbdriver.AlienFXUSBDriver(self)
ashwinm76/alienfx
[ 227, 67, 227, 19, 1396889661 ]
def get_state_name(self, state): """ Given a state number, return a string state name """ for state_name in self.state_map: if self.state_map[state_name] == state: return state_name return "UNKNOWN"
ashwinm76/alienfx
[ 227, 67, 227, 19, 1396889661 ]
def get_reset_type_name(self, num): """ Given a reset number, return a string reset name """ if num in list(self.reset_types.keys()): return self.reset_types[num] else: return "UNKNOWN"
ashwinm76/alienfx
[ 227, 67, 227, 19, 1396889661 ]
def _reset(self, reset_type): """ Send a "reset" packet to the AlienFX controller.""" reset_code = self._get_reset_code(reset_type) pkt = self.cmd_packet.make_cmd_reset(reset_code) logging.debug("SENDING: {}".format(self.pkt_to_string(pkt))) self._driver.write_packet(pkt)
ashwinm76/alienfx
[ 227, 67, 227, 19, 1396889661 ]
def _wait_controller_ready(self): """ Keep sending a "get status" packet to the AlienFX controller and return only when the controller is ready """ ready = False errcount=0 while not ready: pkt = self.cmd_packet.make_cmd_get_status() logging.debug("SENDING: {}".format(self.pkt_to_string(pkt))) self._driver.write_packet(pkt) try: resp = self._driver.read_packet() ready = (resp[0] == self.cmd_packet.STATUS_READY) except TypeError: errcount += 1 logging.debug("No Status received yet... Failed tries=" + str(errcount)) if errcount > 50: logging.error("Controller status could not be retrieved. Is the device already in use?") quit(-99)
ashwinm76/alienfx
[ 227, 67, 227, 19, 1396889661 ]
def pkt_to_string(self, pkt_bytes): """ Return a human readable string representation of an AlienFX command packet. """ return self.cmd_packet.pkt_to_string(pkt_bytes, self)
ashwinm76/alienfx
[ 227, 67, 227, 19, 1396889661 ]
def _get_no_zone_code(self): """ Return a zone code corresponding to all non-visible zones.""" zone_codes = [self.zone_map[x] for x in self.zone_map] return ~reduce(lambda x,y: x|y, zone_codes, 0)
ashwinm76/alienfx
[ 227, 67, 227, 19, 1396889661 ]
def _get_zone_codes(self, zone_names): """ Given zone names, return the zone codes they refer to. """ zones = 0 for zone in zone_names: if zone in self.zone_map: zones |= self.zone_map[zone] return zones
ashwinm76/alienfx
[ 227, 67, 227, 19, 1396889661 ]
def _get_reset_code(self, reset_name): """ Given the name of a reset action, return its code. """ for reset in self.reset_types: if reset_name == self.reset_types[reset]: return reset logging.warning("Unknown reset type: {}".format(reset_name)) return 0
ashwinm76/alienfx
[ 227, 67, 227, 19, 1396889661 ]
def _make_loop_cmds(self, themefile, zones, block, loop_items): """ Given loop-items from the theme file, return a list of loop commands. """ loop_cmds = [] pkt = self.cmd_packet for item in loop_items: item_type = themefile.get_action_type(item) item_colours = themefile.get_action_colours(item) if item_type == AlienFXThemeFile.KW_ACTION_TYPE_FIXED: if len(item_colours) != 1: logging.warning("fixed must have exactly one colour value") continue loop_cmds.append( pkt.make_cmd_set_colour(block, zones, item_colours[0])) elif item_type == AlienFXThemeFile.KW_ACTION_TYPE_BLINK: if len(item_colours) != 1: logging.warning("blink must have exactly one colour value") continue loop_cmds.append( pkt.make_cmd_set_blink_colour(block, zones, item_colours[0])) elif item_type == AlienFXThemeFile.KW_ACTION_TYPE_MORPH: if len(item_colours) != 2: logging.warning("morph must have exactly two colour values") continue loop_cmds.append( pkt.make_cmd_set_morph_colour( block, zones, item_colours[0], item_colours[1])) else: logging.warning("unknown loop item type: {}".format(item_type)) return loop_cmds
ashwinm76/alienfx
[ 227, 67, 227, 19, 1396889661 ]
def _make_zone_cmds(self, themefile, state_name, boot=False): """ Given a theme file, return a list of zone commands.
ashwinm76/alienfx
[ 227, 67, 227, 19, 1396889661 ]
def _send_cmds(self, cmds): """ Send the given commands to the controller. """ for cmd in cmds: logging.debug("SENDING: {}".format(self.pkt_to_string(cmd))) self._driver.write_packet(cmd)
ashwinm76/alienfx
[ 227, 67, 227, 19, 1396889661 ]
def setUp(self): self.person = Person.objects.create(name=u"A person") self.category = Category.objects.create(name="Education") self.promise = Promise.objects.create(name="this is a promise",\ description="this is a description",\ date = nownow,\ person = self.person, category = self.category )
ciudadanointeligente/check-it
[ 12, 3, 12, 7, 1397502331 ]
def test_tag_css_unicode(self): '''A tag css has a unicode''' self.promise.tags.add("test") tag = self.promise.tags.first() extracss = TagExtraCss.objects.create(tag=tag, classes="extraclass") self.assertEquals(extracss.__unicode__(), u"extraclass for test")
ciudadanointeligente/check-it
[ 12, 3, 12, 7, 1397502331 ]
def line_displaced(start_coord, end_coord, displacements): '''Returns a Path defined as a line spanning points `start_coord` and `end_coord`, displaced by scalars `displacements`. The number of points in the path is determined by the lenght of `displacements`. ''' p = path([start_coord, end_coord]) perpendicular_displace(p, displacements) return p
drepetto/chiplotle
[ 27, 16, 27, 7, 1432836867 ]
def test_permission_from_list_to_list(self): expected = ['MANAGE_WEBHOOKS', 'USE_EXTERNAL_EMOJIS'] permission = Permission(['MANAGE_WEBHOOKS', 'USE_EXTERNAL_EMOJIS']) actual = permission.to_list() self.assertListEqual(sorted(actual), sorted(expected))
Arcbot-Org/Arcbot
[ 8, 6, 8, 16, 1416042396 ]
def test_permission_in_permission(self): self.assertTrue("ADMINISTRATOR" in Permission(2056))
Arcbot-Org/Arcbot
[ 8, 6, 8, 16, 1416042396 ]
def test_permission_not_in_permission(self): self.assertTrue("USE_VAD" not in Permission(2056))
Arcbot-Org/Arcbot
[ 8, 6, 8, 16, 1416042396 ]
def test_permission_add(self): permission = Permission(2056) self.assertTrue(permission.allows("ADMINISTRATOR")) self.assertFalse(permission.allows("MENTION_EVERYONE")) permission.add("MENTION_EVERYONE") self.assertTrue(permission.allows("MENTION_EVERYONE"))
Arcbot-Org/Arcbot
[ 8, 6, 8, 16, 1416042396 ]
def __init__(self, item_spacing=ITEM_SPACING, *args, **kwargs): super().__init__(*args, **kwargs)
khalim19/gimp-plugin-export-layers
[ 522, 38, 522, 29, 1403878280 ]
def add_item(self, item): self._vbox_items.pack_start(item.widget, expand=False, fill=False)
khalim19/gimp-plugin-export-layers
[ 522, 38, 522, 29, 1403878280 ]
def reorder_item(self, item, position): new_position = min(max(position, 0), len(self._items) - 1)
khalim19/gimp-plugin-export-layers
[ 522, 38, 522, 29, 1403878280 ]
def remove_item(self, item): item_position = self._get_item_position(item) if item_position < len(self._items) - 1: next_item_position = item_position + 1 self._items[next_item_position].item_widget.grab_focus()
khalim19/gimp-plugin-export-layers
[ 522, 38, 522, 29, 1403878280 ]
def clear(self): for unused_ in range(len(self._items)): self.remove_item(self._items[0])
khalim19/gimp-plugin-export-layers
[ 522, 38, 522, 29, 1403878280 ]
def _setup_drag(self, item): self._drag_and_drop_context.setup_drag( item.item_widget, self._get_drag_data, self._on_drag_data_received, [item], [item], self)
khalim19/gimp-plugin-export-layers
[ 522, 38, 522, 29, 1403878280 ]
def _get_drag_data(self, dragged_item): return str(self._items.index(dragged_item))
khalim19/gimp-plugin-export-layers
[ 522, 38, 522, 29, 1403878280 ]
def _on_drag_data_received(self, dragged_item_index_str, destination_item): dragged_item = self._items[int(dragged_item_index_str)] self.reorder_item(dragged_item, self._get_item_position(destination_item))
khalim19/gimp-plugin-export-layers
[ 522, 38, 522, 29, 1403878280 ]
def _on_item_widget_key_press_event(self, widget, event, item): if event.state & gtk.gdk.MOD1_MASK: # Alt key key_name = gtk.gdk.keyval_name(event.keyval) if key_name in ["Up", "KP_Up"]: self.reorder_item( item, self._get_item_position(item) - 1) elif key_name in ["Down", "KP_Down"]: self.reorder_item( item, self._get_item_position(item) + 1)
khalim19/gimp-plugin-export-layers
[ 522, 38, 522, 29, 1403878280 ]
def _on_item_button_remove_clicked(self, button, item): self.remove_item(item)
khalim19/gimp-plugin-export-layers
[ 522, 38, 522, 29, 1403878280 ]
def _get_item_position(self, item): return self._items.index(item)
khalim19/gimp-plugin-export-layers
[ 522, 38, 522, 29, 1403878280 ]
def __init__(self, item_widget): self._item_widget = item_widget
khalim19/gimp-plugin-export-layers
[ 522, 38, 522, 29, 1403878280 ]
def widget(self): return self._event_box
khalim19/gimp-plugin-export-layers
[ 522, 38, 522, 29, 1403878280 ]
def item_widget(self): return self._item_widget
khalim19/gimp-plugin-export-layers
[ 522, 38, 522, 29, 1403878280 ]
def button_remove(self): return self._button_remove
khalim19/gimp-plugin-export-layers
[ 522, 38, 522, 29, 1403878280 ]
def remove_item_widget(self): self._hbox.remove(self._item_widget)
khalim19/gimp-plugin-export-layers
[ 522, 38, 522, 29, 1403878280 ]
def _setup_item_button(self, item_button, icon, position=None): item_button.set_relief(gtk.RELIEF_NONE)
khalim19/gimp-plugin-export-layers
[ 522, 38, 522, 29, 1403878280 ]
def _on_event_box_enter_notify_event(self, event_box, event): if event.detail != gtk.gdk.NOTIFY_INFERIOR: self._hbox_buttons.show()
khalim19/gimp-plugin-export-layers
[ 522, 38, 522, 29, 1403878280 ]
def _on_event_box_leave_notify_event(self, event_box, event): if event.detail != gtk.gdk.NOTIFY_INFERIOR: self._hbox_buttons.hide()
khalim19/gimp-plugin-export-layers
[ 522, 38, 522, 29, 1403878280 ]
def _on_event_box_size_allocate(self, event_box, allocation): if self._is_event_box_allocated_size: return
khalim19/gimp-plugin-export-layers
[ 522, 38, 522, 29, 1403878280 ]
def _on_event_box_buttons_size_allocate(self, event_box, allocation): if self._buttons_allocation is not None: return
khalim19/gimp-plugin-export-layers
[ 522, 38, 522, 29, 1403878280 ]
def __init__( self, new_item_default_value, min_size=0, max_size=None, item_spacing=ItemBox.ITEM_SPACING, max_width=None, max_height=None, *args, **kwargs): """ Parameters:
khalim19/gimp-plugin-export-layers
[ 522, 38, 522, 29, 1403878280 ]
def _init_gui(self): self._size_spin_button = gtk.SpinButton( gtk.Adjustment( value=0, lower=self._min_size, upper=self._max_size, step_incr=1, page_incr=10, ), digits=0)
khalim19/gimp-plugin-export-layers
[ 522, 38, 522, 29, 1403878280 ]
def add_item(self, item_value=None, index=None): if item_value is None: item_value = self._new_item_default_value
khalim19/gimp-plugin-export-layers
[ 522, 38, 522, 29, 1403878280 ]
def reorder_item(self, item, new_position): orig_position = self._get_item_position(item) processed_new_position = super().reorder_item(item, new_position)
khalim19/gimp-plugin-export-layers
[ 522, 38, 522, 29, 1403878280 ]
def remove_item(self, item): if (self._locker.is_unlocked("prevent_removal_below_min_size") and len(self._items) == self._min_size): return
khalim19/gimp-plugin-export-layers
[ 522, 38, 522, 29, 1403878280 ]
def set_values(self, values): self._locker.lock("emit_size_spin_button_value_changed") self._locker.lock("prevent_removal_below_min_size")
khalim19/gimp-plugin-export-layers
[ 522, 38, 522, 29, 1403878280 ]
def _setup_drag(self, item): self._drag_and_drop_context.setup_drag( # Using the entire item allows dragging only by the label rather than the # widget itself. This avoids problems with widgets such as spin buttons # that do not behave correctly when reordering and also avoids accidental # clicking and modifying the widget by the user. item.widget, self._get_drag_data, self._on_drag_data_received, [item], [item], self)
khalim19/gimp-plugin-export-layers
[ 522, 38, 522, 29, 1403878280 ]