code
stringlengths
4
4.48k
docstring
stringlengths
1
6.45k
_id
stringlengths
24
24
def set_extension(self, extension, real=True): <NEW_LINE> <INDENT> extension = self.extension_map(extension, extension) <NEW_LINE> if real: <NEW_LINE> <INDENT> self.extension = extension <NEW_LINE> <DEDENT> self.kwdict["extension"] = self.prefix + extension <NEW_LINE> self.build_path()
Set filename extension
625941b124f1403a926008de
def restore_instance_snapshot(self, context, instance, snapshot_id=None): <NEW_LINE> <INDENT> LOG.debug("Restore to a snapshot of instance", instance=instance) <NEW_LINE> vm_ref = vm_util.get_vm_ref(self._session, instance) <NEW_LINE> if snapshot_id is not None: <NEW_LINE> <INDENT> snapshot_ref = vm_util.get_snapshot_ref_by_snapshot_id( self._session, vm_ref, snapshot_id) <NEW_LINE> snapshot_task = self._session._call_method( self._session.vim, "RevertToSnapshot_Task", snapshot_ref) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> current_sp_id = vm_util.get_current_snapshot_from_vm_ref( self._session, vm_ref) <NEW_LINE> if current_sp_id is None: <NEW_LINE> <INDENT> raise exception.NotFound(_("This virtual machine does not have" " a current snapshot.")) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> snapshot_task = self._session._call_method( self._session.vim, "RevertToCurrentSnapshot_Task", vm_ref) <NEW_LINE> <DEDENT> <DEDENT> self._session._wait_for_task(snapshot_task) <NEW_LINE> LOG.debug("Restored the snapshot of the VM instance", instance=instance)
Restore snapshot of the instance.
625941b1627d3e7fe0d68bb9
def CreateDBInstanceHour(self, request): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> params = request._serialize() <NEW_LINE> body = self.call("CreateDBInstanceHour", params) <NEW_LINE> response = json.loads(body) <NEW_LINE> if "Error" not in response["Response"]: <NEW_LINE> <INDENT> model = models.CreateDBInstanceHourResponse() <NEW_LINE> model._deserialize(response["Response"]) <NEW_LINE> return model <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> code = response["Response"]["Error"]["Code"] <NEW_LINE> message = response["Response"]["Error"]["Message"] <NEW_LINE> reqid = response["Response"]["RequestId"] <NEW_LINE> raise TencentCloudSDKException(code, message, reqid) <NEW_LINE> <DEDENT> <DEDENT> except Exception as e: <NEW_LINE> <INDENT> if isinstance(e, TencentCloudSDKException): <NEW_LINE> <INDENT> raise <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise TencentCloudSDKException(e.message, e.message)
本接口(CreateDBInstanceHour)用于创建按量计费的MongoDB云数据库实例(包括主实例、灾备实例和只读实例),可通过传入实例规格、实例类型、MongoDB版本、购买时长和数量等信息创建云数据库实例。 :param request: 调用CreateDBInstanceHour所需参数的结构体。 :type request: :class:`tencentcloud.mongodb.v20180408.models.CreateDBInstanceHourRequest` :rtype: :class:`tencentcloud.mongodb.v20180408.models.CreateDBInstanceHourResponse`
625941b10383005118ecf355
def enroll_dl0_replica(installer, fstore, remote_api, debug=False): <NEW_LINE> <INDENT> logger.info("Enrolling host to IPA domain") <NEW_LINE> config = installer._config <NEW_LINE> hostname = config.host_name <NEW_LINE> try: <NEW_LINE> <INDENT> installer._enrollment_performed = True <NEW_LINE> host_result = remote_api.Command.host_add( unicode(config.host_name))['result'] <NEW_LINE> host_princ = unicode(host_result['krbcanonicalname'][0]) <NEW_LINE> purge_host_keytab(config.realm_name) <NEW_LINE> getkeytab_args = [ paths.IPA_GETKEYTAB, '-s', config.master_host_name, '-p', host_princ, '-D', unicode(ipaldap.DIRMAN_DN), '-w', config.dirman_password, '-k', paths.KRB5_KEYTAB, '--cacert', os.path.join(config.dir, 'ca.crt') ] <NEW_LINE> ipautil.run(getkeytab_args, nolog=(config.dirman_password,)) <NEW_LINE> _hostname, _sep, host_domain = hostname.partition('.') <NEW_LINE> fstore.backup_file(paths.KRB5_CONF) <NEW_LINE> configure_krb5_conf( config.realm_name, config.domain_name, [config.master_host_name], [config.master_host_name], False, paths.KRB5_CONF, host_domain, hostname, configure_sssd=False ) <NEW_LINE> <DEDENT> except CalledProcessError as e: <NEW_LINE> <INDENT> raise RuntimeError("Failed to fetch host keytab: {}".format(e))
Do partial host enrollment in DL0: * add host entry to remote master * request host keytab from remote master * configure client-like /etc/krb5.conf to enable GSSAPI auth further down the replica installation
625941b182261d6c526ab20f
def run_ES(self): <NEW_LINE> <INDENT> self.logger.info('started running (μ + λ)-ES') <NEW_LINE> self.create_initial_population(); <NEW_LINE> self.evaluate_population(self.population) <NEW_LINE> current_gen = 0 <NEW_LINE> self.successful_individuals = 0 <NEW_LINE> self.total_individuals = 0 <NEW_LINE> while current_gen < self.max_generation: <NEW_LINE> <INDENT> self.logger.debug('current_gen:{}'.format(current_gen)) <NEW_LINE> if current_gen % self.k == 0 and current_gen != 0: <NEW_LINE> <INDENT> success_probability = self.successful_individuals / self.total_individuals <NEW_LINE> if success_probability > 0.2: <NEW_LINE> <INDENT> self.sigma /= self.tau <NEW_LINE> <DEDENT> elif success_probability < 0.2: <NEW_LINE> <INDENT> self.sigma *= self.tau <NEW_LINE> <DEDENT> elif success_probability == 0.2: <NEW_LINE> <INDENT> self.sigma = self.sigma <NEW_LINE> <DEDENT> <DEDENT> parent = self.get_parent_for_mutation() <NEW_LINE> new_population = self.create_new_population(parent) <NEW_LINE> self.success_probability_counter(new_population) <NEW_LINE> self.evaluate_population(new_population) <NEW_LINE> self.selection_operator(new_population) <NEW_LINE> self.logger.debug(f'best individual in generation {current_gen}: {self.get_best_individual()}') <NEW_LINE> current_gen += 1 <NEW_LINE> <DEDENT> self.logger.info('finished running (μ + λ)-ES')
Run the (μ + λ)-ES using Rechenberg's 1/5th Success Rule
625941b1d486a94d0b98debc
def main(): <NEW_LINE> <INDENT> args = argument_parser().parse_args() <NEW_LINE> random.seed(args.seed) <NEW_LINE> args.checkpoint_dir = CHECKPOINT_DIR <NEW_LINE> os.environ['CUDA_VISIBLE_DEVICES'] = args.gpus <NEW_LINE> data_source = SinusoidDataSource(amp_range=[0.1, 5.0], phase_range=[0.1, np.pi], input_range=[-5.0, 5.0]) <NEW_LINE> train_set = Dataset(data_source, which_set='train', task_type='regression') <NEW_LINE> test_set = Dataset(data_source, which_set='test', task_type='regression') <NEW_LINE> model = SinusoidModel(args.classes, **model_kwargs(args)) <NEW_LINE> config = tf.ConfigProto() <NEW_LINE> config.gpu_options.allow_growth = True <NEW_LINE> with tf.Session(config=config) as sess: <NEW_LINE> <INDENT> if not args.pretrained: <NEW_LINE> <INDENT> print('Training...') <NEW_LINE> train(sess, model, train_set, test_set, os.path.join(args.checkpoint_dir, args.checkpoint), **train_kwargs(args)) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> print('Restoring from checkpoint...') <NEW_LINE> tf.train.Saver().restore(sess, tf.train.latest_checkpoint(os.path.join(args.checkpoint_dir, args.checkpoint))) <NEW_LINE> <DEDENT> print('Evaluating...') <NEW_LINE> eval_kwargs = evaluate_kwargs(args) <NEW_LINE> print('Train accuracy: ' + str(evaluate(sess, model, train_set, **eval_kwargs))) <NEW_LINE> print('Test accuracy: ' + str(evaluate(sess, model, test_set, **eval_kwargs)))
Load data and train a model on it.
625941b1be7bc26dc91cd37a
def summary(self, kind='raw'): <NEW_LINE> <INDENT> if kind=='raw': <NEW_LINE> <INDENT> return self._raw_data['summary'].copy() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise NotImplementedError
Summary statistics
625941b16fece00bbac2d4a6
def findMedianSortedArrays(self, nums1, nums2): <NEW_LINE> <INDENT> len1, len2 = len(nums1), len(nums2) <NEW_LINE> n_elems = len1 + len2 <NEW_LINE> if n_elems & 1: <NEW_LINE> <INDENT> k_elem_ind = (n_elems // 2) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> k_elem_ind = (n_elems - 1) // 2 <NEW_LINE> <DEDENT> k_val, k_val_next = None, None <NEW_LINE> k_elem_ind += 1 <NEW_LINE> index1, index2, k_index = 0, 0, 0 <NEW_LINE> while index1 < len1 and index2 < len2 and k_index <= k_elem_ind: <NEW_LINE> <INDENT> k_val = k_val_next <NEW_LINE> a, b = nums1[index1], nums2[index2] <NEW_LINE> if a <= b: <NEW_LINE> <INDENT> k_val_next = a <NEW_LINE> index1 += 1 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> k_val_next = b <NEW_LINE> index2 += 1 <NEW_LINE> <DEDENT> k_index += 1 <NEW_LINE> <DEDENT> if index1 == len1: <NEW_LINE> <INDENT> index_left, len_left, nums_left = index2, len2, nums2 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> index_left, len_left, nums_left = index2, len2, nums2 <NEW_LINE> <DEDENT> while index_left < len_left and k_index <= k_elem_ind: <NEW_LINE> <INDENT> k_val, k_val_next = k_val_next, nums_left[index_left] <NEW_LINE> index_left, k_index = index_left + 1, k_index + 1 <NEW_LINE> <DEDENT> if n_elems & 1: <NEW_LINE> <INDENT> return k_val or k_val_next <NEW_LINE> <DEDENT> return (k_val + k_val_next) / 2.0
:type nums1: List[int] :type nums2: List[int] :rtype: float
625941b18a43f66fc4b53ddf
def get_agent(self): <NEW_LINE> <INDENT> return self.request("self", method="get").json()
Query the state of the target agent. https://www.nomadproject.io/docs/http/agent-self.html returns: dict raises: - nomad.api.exceptions.BaseNomadException - nomad.api.exceptions.URLNotFoundNomadException
625941b145492302aab5e034
def t_instring(t): <NEW_LINE> <INDENT> t.lexer.value_buffer = [] <NEW_LINE> t.lexer.string_startpos = t.lexpos <NEW_LINE> t.lexer.level = 1 <NEW_LINE> t.lexer.begin('instring')
\(
625941b1d8ef3951e32432ae
def clear_tasks(self): <NEW_LINE> <INDENT> self.logger.debug('Clearing {0} tasks'.format(len(self.tasks))) <NEW_LINE> self.tasks = {}
Empty the task dictionary.
625941b1293b9510aa2c3007
def result_metadata(self, handle): <NEW_LINE> <INDENT> req = TCLIService.TGetResultSetMetadataReq() <NEW_LINE> req.operationHandle = handle <NEW_LINE> resp = self.hs2_client.GetResultSetMetadata(req) <NEW_LINE> HS2TestSuite.check_response(resp) <NEW_LINE> return resp
Gets the schema for the query identified by the handle
625941b1925a0f43d2549be7
def propagate(self, input, save_path=False): <NEW_LINE> <INDENT> output = [] <NEW_LINE> raw_input, net_input = [], [] <NEW_LINE> for i in input: <NEW_LINE> <INDENT> into_layer = np.array([i]).T <NEW_LINE> for j in xrange(self.num_layer): <NEW_LINE> <INDENT> n = np.dot(self.weight[j], into_layer) + self.bias[j] <NEW_LINE> if save_path: <NEW_LINE> <INDENT> raw_input.append(into_layer) <NEW_LINE> net_input.append(n) <NEW_LINE> <DEDENT> outof_layer = self.transfer_function[j](n) <NEW_LINE> into_layer = outof_layer <NEW_LINE> <DEDENT> output.append(outof_layer.flatten()) <NEW_LINE> <DEDENT> if save_path: <NEW_LINE> <INDENT> return np.array(output), raw_input, net_input <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return np.array(output)
Input: array of arrays.
625941b1187af65679ca4e93
def test_add_vip_card(self): <NEW_LINE> <INDENT> with open("D:\\mzmy\\add_csv\\add_vipcard.csv") as avc: <NEW_LINE> <INDENT> avc = csv.reader(avc) <NEW_LINE> for a in avc: <NEW_LINE> <INDENT> if a[0] == "会员卡编号": <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.driver.get(self.url) <NEW_LINE> self.driver.maximize_window() <NEW_LINE> self.lp.send_username("18180470015") <NEW_LINE> self.lp.send_password("123456") <NEW_LINE> self.lp.click_button() <NEW_LINE> self.apt.click01() <NEW_LINE> self.apt.click02() <NEW_LINE> self.apt.click03() <NEW_LINE> self.apt.click04() <NEW_LINE> self.apt.click05() <NEW_LINE> self.apt.send06(a[0]) <NEW_LINE> self.apt.click07() <NEW_LINE> self.apt.send08(a[1]) <NEW_LINE> self.apt.click09()
添加会员卡
625941b197e22403b379cd0d
def days_remaining(start_dt1, start_dt2, end_dt): <NEW_LINE> <INDENT> if start_dt1 <= start_dt2: <NEW_LINE> <INDENT> start_dt = start_dt1 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> start_dt = start_dt2 <NEW_LINE> <DEDENT> days_remng = (end_dt - start_dt).total_seconds()/86400 <NEW_LINE> return days_remng
Calc time delta between end_dt and min(start_dt1, start_dt2) All three inputs are expected to be datetimes.
625941b1507cdc57c6306a3e
def parse_labels(response): <NEW_LINE> <INDENT> return {label['Name']: round(label['Confidence'], 2) for label in response['Labels']}
Parse the response and return labels data.
625941b14a966d76dd550d7f
def delete_item_list(self, itemlist_id): <NEW_LINE> <INDENT> os.remove(os.path.join(self.basedir, "%s.n3" % self.get_item_list_name(itemlist_id))) <NEW_LINE> self.graph.remove((URIRef(itemlist_id), None, None)) <NEW_LINE> self.graph.remove((None, None, URIRef(itemlist_id)))
Delete a given item list
625941b1627d3e7fe0d68bbb
def load_rprocess_yields(self): <NEW_LINE> <INDENT> self.rprocess_yields = pickle_read(join(self.path_rprocess, 'cescutti06_yields.pck'))
Load r-process element yields. Cescutti et al. (2006) r-process Ba & Eu yields for M = 12, 15, 30 Msun that are metallicity independent.
625941b18e05c05ec3eea0e5
def _rel_dist(self, tree): <NEW_LINE> <INDENT> self._avg_descendant_rate(tree) <NEW_LINE> for node in tree.preorder_node_iter(): <NEW_LINE> <INDENT> if node == tree.seed_node: <NEW_LINE> <INDENT> node.rel_dist = 0.0 <NEW_LINE> <DEDENT> elif node.is_leaf(): <NEW_LINE> <INDENT> node.rel_dist = 1.0 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> a = node.edge_length <NEW_LINE> b = node.mean_dist <NEW_LINE> x = node.parent_node.rel_dist <NEW_LINE> if (a + b) != 0: <NEW_LINE> <INDENT> rel_dist = x + (a / (a + b)) * (1.0 - x) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> rel_dist = x <NEW_LINE> <DEDENT> node.rel_dist = rel_dist
Calculate relative distance to each internal node. Parameters ---------- tree : Dendropy Tree Phylogenetic tree. Returns ------- The following attributes are added to each node: mean_dist: mean distance to tips num_taxa: number of terminal taxa rel_dists: relative distance of node between root and extant organisms
625941b13539df3088e2e0ba
def get_details(self, model_uid): <NEW_LINE> <INDENT> model_uid = str_type_conv(model_uid) <NEW_LINE> self._validate_type(model_uid, u'model_uid', STR_TYPE, True) <NEW_LINE> response = requests.get( self._href_definitions.get_published_model_href(model_uid), headers=self._client._get_headers() ) <NEW_LINE> details = self._handle_response(200, u'getting learning system details', response, True) <NEW_LINE> return details[u'entity'][u'learning_configuration']
Get details of learning system. :param model_uid: ID of model for this learning system :type model_uid: str :returns: learning system details :rtype: dict **Example**: >>> learning_system_details = client.learning_system.get_details(model_uid)
625941b10a50d4780f666c03
def insert_doc(d, a): <NEW_LINE> <INDENT> client = py.MongoClient('mongo') <NEW_LINE> db = client['docs'] <NEW_LINE> col = db['aug_meta'] <NEW_LINE> doc, file_stream = get_tika_content(d, a) <NEW_LINE> raw_file = create_temp_file(d['latest_version']['download_url'], a) <NEW_LINE> doc['raw_file'] = import_to_gridfs(db, raw_file.name, doc['uuid']) <NEW_LINE> col.insert_one(doc) <NEW_LINE> clean_temp_files(raw_file.name) <NEW_LINE> return True
Insert TIKA extracted metadata and content.
625941b116aa5153ce3621ee
def _getDataLayerId(self): <NEW_LINE> <INDENT> return self.qgs_layer.id()
Get name tag content from xml :return: return QGIS layerID :rtype: str
625941b1b57a9660fec335ec
def singular_points(self, F=None): <NEW_LINE> <INDENT> if F is None: <NEW_LINE> <INDENT> if not self.base_ring() in Fields(): <NEW_LINE> <INDENT> raise TypeError("curve must be defined over a field") <NEW_LINE> <DEDENT> <DEDENT> elif not F in Fields(): <NEW_LINE> <INDENT> raise TypeError("(=%s) must be a field"%F) <NEW_LINE> <DEDENT> X = self.singular_subscheme() <NEW_LINE> return X.rational_points(F=F)
Return the set of singular points of this curve. INPUT: - ``F`` -- (default: None) field over which to find the singular points. If not given, the base ring of this curve is used. OUTPUT: - a list of points in the ambient space of this curve. EXAMPLES:: sage: A.<x,y,z> = AffineSpace(QQ, 3) sage: C = Curve([y^2 - x^5, x - z], A) sage: C.singular_points() [(0, 0, 0)] :: sage: R.<a> = QQ[] sage: K.<b> = NumberField(a^8 - a^4 + 1) sage: P.<x,y,z> = ProjectiveSpace(QQ, 2) sage: C = Curve([359/12*x*y^2*z^2 + 2*y*z^4 + 187/12*y^3*z^2 + x*z^4\ + 67/3*x^2*y*z^2 + 117/4*y^5 + 9*x^5 + 6*x^3*z^2 + 393/4*x*y^4\ + 145*x^2*y^3 + 115*x^3*y^2 + 49*x^4*y], P) sage: C.singular_points(K) [(b^6 : -b^6 : 1), (-b^6 : b^6 : 1), (1/2*b^5 + 1/2*b^3 - 1/2*b - 1 : 1 : 0), (-1/2*b^5 - 1/2*b^3 + 1/2*b - 1 : 1 : 0), (2/3*b^4 - 1/3 : 0 : 1), (-2/3*b^4 + 1/3 : 0 : 1)]
625941b16fece00bbac2d4a8
def contour(self, z=0): <NEW_LINE> <INDENT> raise NotImplementedError
Contour of surface in the xy (or any parallel) plane. Parameters ---------- z : float The z-level of the plane the contour is taken at. Returns ------- : surface. A symbolic A contour line or lines, type and format are dependent on the subclass.
625941b16aa9bd52df036b0f
def __init__( self, log_path=None, config_path=None, logger=None, printer=None ): <NEW_LINE> <INDENT> self.logger = logger or logging.getLogger(__name__) <NEW_LINE> self.printer = printer or pprint.PrettyPrinter() <NEW_LINE> self.log_path = log_path or (os.getcwd() + "/report.log") <NEW_LINE> self._bar_fmt = "{l_bar}{bar}|{n_fmt}/{total_fmt}{postfix}" <NEW_LINE> self._env_key = "LOG_CFG" <NEW_LINE> self._default_config = { "version": 1, "disable_existing_loggers": False, "formatters": { "standard": { "format": "%(asctime)s - %(name)s - %(levelname)s - %(message)s" } }, "handlers": { "default": { "level": "INFO", "class": "logging.StreamHandler", "formatter": "standard", }, "file_default": { "level": "INFO", "formatter": "standard", "class": "logging.handlers.RotatingFileHandler", "filename": self.log_path, "encoding": "utf8", "maxBytes": 10485760, "backupCount": 20, }, }, "loggers": { "": { "handlers": ["default", "file_default"], "level": "INFO", "propagate": True, } }, } <NEW_LINE> self._setup_logger(config_path)
Initialize the reporter Attributes ---------- log_path : str (default is :code:`None`) Sets the default log path (overriden when :code:`path` is given to :code:`_setup_logger()`) config_path : str (default is :code:`None`) Sets the configuration path for custom loggers logger : logging.Logger (default is :code:`None`) The logger object. By default, it creates a new :code:`Logger` instance printer : pprint.PrettyPrinter (default is :code:`None`) A printer object. By default, it creates a :code:`PrettyPrinter` instance with default values
625941b18a349b6b435e7eea
def get_manifest_key(self): <NEW_LINE> <INDENT> return self._generate_path("manifest_relpath")
Return the path to the readme file.
625941b123849d37ff7b2e08
def lagder(c, m=1, scl=1, axis=0) : <NEW_LINE> <INDENT> c = np.array(c, ndmin=1, copy=1) <NEW_LINE> if c.dtype.char in '?bBhHiIlLqQpP': <NEW_LINE> <INDENT> c = c.astype(np.double) <NEW_LINE> <DEDENT> cnt, iaxis = [int(t) for t in [m, axis]] <NEW_LINE> if cnt != m: <NEW_LINE> <INDENT> raise ValueError("The order of derivation must be integer") <NEW_LINE> <DEDENT> if cnt < 0: <NEW_LINE> <INDENT> raise ValueError("The order of derivation must be non-negative") <NEW_LINE> <DEDENT> if iaxis != axis: <NEW_LINE> <INDENT> raise ValueError("The axis must be integer") <NEW_LINE> <DEDENT> if not -c.ndim <= iaxis < c.ndim: <NEW_LINE> <INDENT> raise ValueError("The axis is out of range") <NEW_LINE> <DEDENT> if iaxis < 0: <NEW_LINE> <INDENT> iaxis += c.ndim <NEW_LINE> <DEDENT> if cnt == 0: <NEW_LINE> <INDENT> return c <NEW_LINE> <DEDENT> c = np.rollaxis(c, iaxis) <NEW_LINE> n = len(c) <NEW_LINE> if cnt >= n: <NEW_LINE> <INDENT> c = c[:1]*0 <NEW_LINE> <DEDENT> else : <NEW_LINE> <INDENT> for i in range(cnt): <NEW_LINE> <INDENT> n = n - 1 <NEW_LINE> c *= scl <NEW_LINE> der = np.empty((n,) + c.shape[1:], dtype=c.dtype) <NEW_LINE> for j in range(n, 1, -1): <NEW_LINE> <INDENT> der[j - 1] = -c[j] <NEW_LINE> c[j - 1] += c[j] <NEW_LINE> <DEDENT> der[0] = -c[1] <NEW_LINE> c = der <NEW_LINE> <DEDENT> <DEDENT> c = np.rollaxis(c, 0, iaxis + 1) <NEW_LINE> return c
Differentiate a Laguerre series. Returns the Laguerre series coefficients `c` differentiated `m` times along `axis`. At each iteration the result is multiplied by `scl` (the scaling factor is for use in a linear change of variable). The argument `c` is an array of coefficients from low to high degree along each axis, e.g., [1,2,3] represents the series ``1*L_0 + 2*L_1 + 3*L_2`` while [[1,2],[1,2]] represents ``1*L_0(x)*L_0(y) + 1*L_1(x)*L_0(y) + 2*L_0(x)*L_1(y) + 2*L_1(x)*L_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``. Parameters ---------- c: array_like Array of Laguerre series coefficients. If `c` is multidimensional the different axis correspond to different variables with the degree in each axis given by the corresponding index. m : int, optional Number of derivatives taken, must be non-negative. (Default: 1) scl : scalar, optional Each differentiation is multiplied by `scl`. The end result is multiplication by ``scl**m``. This is for use in a linear change of variable. (Default: 1) axis : int, optional Axis over which the derivative is taken. (Default: 0). .. versionadded:: 1.7.0 Returns ------- der : ndarray Laguerre series of the derivative. See Also -------- lagint Notes ----- In general, the result of differentiating a Laguerre series does not resemble the same operation on a power series. Thus the result of this function may be "unintuitive," albeit correct; see Examples section below. Examples -------- >>> from numpy.polynomial.laguerre import lagder >>> lagder([ 1., 1., 1., -3.]) array([ 1., 2., 3.]) >>> lagder([ 1., 0., 0., -4., 3.], m=2) array([ 1., 2., 3.])
625941b163b5f9789fde6e5a
def original_url(self, fieldname): <NEW_LINE> <INDENT> url = self.context.absolute_url() <NEW_LINE> return '{0}/images/{1}'.format(url, fieldname)
Returns the url to the unscaled image
625941b145492302aab5e036
def DeleteAllEntities(kind): <NEW_LINE> <INDENT> if not kind: <NEW_LINE> <INDENT> raise ValueError('"kind" cannot be empty') <NEW_LINE> <DEDENT> keys, _, more = ndb.Query(kind=kind).fetch_page( QUERY_PAGE_LIMIT, keys_only=True) <NEW_LINE> logging.info('Fetched %d keys; more=%r', len(keys), more) <NEW_LINE> ndb.delete_multi(keys) <NEW_LINE> if more: <NEW_LINE> <INDENT> deferred.defer(DeleteAllEntities, kind)
DELETES ALL ENTITIES OF KIND |kind|. Args: kind: Required string name of model.
625941b1bf627c535bc12f46
def get_associations(self, ontology=None): <NEW_LINE> <INDENT> if ontology is not None and ontology not in ("P", "F", "C"): <NEW_LINE> <INDENT> raise GeneOntologyError(f"Not a valid ontology: {ontology}") <NEW_LINE> <DEDENT> if not hasattr(self, "all_associations"): <NEW_LINE> <INDENT> self.all_associations = read_gaf(self.associations_path) <NEW_LINE> <DEDENT> all_associations = copy.deepcopy(self.all_associations) <NEW_LINE> wanted_genes = set(rec["DB_Object_ID"] for rec in self) <NEW_LINE> associations = self.remove_unwanted_genes(wanted_genes, all_associations) <NEW_LINE> if ontology is not None: <NEW_LINE> <INDENT> d = self.ontology2term() <NEW_LINE> accepted_terms = d[ontology] <NEW_LINE> for gene, go_terms in associations.items(): <NEW_LINE> <INDENT> for go_id in go_terms.copy(): <NEW_LINE> <INDENT> if go_id in self.go_dag: <NEW_LINE> <INDENT> if go_id not in accepted_terms: <NEW_LINE> <INDENT> go_terms.remove(go_id) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> <DEDENT> self.associations = associations <NEW_LINE> return associations
Get associations of gene IDs to GO terms. Ontologies: P = biological process, F = molecular function, C = cellular component # Arguments ontology: str (optional), one of {"P", "F", "C"} # Returns dict: maps gene IDs to the GO terms it is annotated them # Raises GeneOntologyError: if `ontology` is not valid
625941b1293b9510aa2c3009
def calcmeplzsenpai(inputx): <NEW_LINE> <INDENT> return (145 * (inputx**4)) + (349 * (inputx**3)) - (914 * (inputx**2)) - (112 * inputx) - 444
WOW Notice me Senpai!
625941b167a9b606de4a7c33
def get_load_balancer_pool_statistics_with_http_info(self, service_id, pool_id, **kwargs): <NEW_LINE> <INDENT> all_params = ['service_id', 'pool_id', 'source'] <NEW_LINE> all_params.append('async_req') <NEW_LINE> all_params.append('_return_http_data_only') <NEW_LINE> all_params.append('_preload_content') <NEW_LINE> all_params.append('_request_timeout') <NEW_LINE> params = locals() <NEW_LINE> for key, val in six.iteritems(params['kwargs']): <NEW_LINE> <INDENT> if key not in all_params: <NEW_LINE> <INDENT> raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_load_balancer_pool_statistics" % key ) <NEW_LINE> <DEDENT> params[key] = val <NEW_LINE> <DEDENT> del params['kwargs'] <NEW_LINE> if ('service_id' not in params or params['service_id'] is None): <NEW_LINE> <INDENT> raise ValueError("Missing the required parameter `service_id` when calling `get_load_balancer_pool_statistics`") <NEW_LINE> <DEDENT> if ('pool_id' not in params or params['pool_id'] is None): <NEW_LINE> <INDENT> raise ValueError("Missing the required parameter `pool_id` when calling `get_load_balancer_pool_statistics`") <NEW_LINE> <DEDENT> collection_formats = {} <NEW_LINE> path_params = {} <NEW_LINE> if 'service_id' in params: <NEW_LINE> <INDENT> path_params['service-id'] = params['service_id'] <NEW_LINE> <DEDENT> if 'pool_id' in params: <NEW_LINE> <INDENT> path_params['pool-id'] = params['pool_id'] <NEW_LINE> <DEDENT> query_params = [] <NEW_LINE> if 'source' in params: <NEW_LINE> <INDENT> query_params.append(('source', params['source'])) <NEW_LINE> <DEDENT> header_params = {} <NEW_LINE> form_params = [] <NEW_LINE> local_var_files = {} <NEW_LINE> body_params = None <NEW_LINE> header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) <NEW_LINE> auth_settings = ['BasicAuth'] <NEW_LINE> return self.api_client.call_api( '/loadbalancer/services/{service-id}/pools/{pool-id}/statistics', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='LbPoolStatistics', auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
Get the statistics of load balancer pool # noqa: E501 Returns the statistics of the given load balancer pool by given load balancer serives id and load balancer pool id. Currently, only realtime mode is supported. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_load_balancer_pool_statistics_with_http_info(service_id, pool_id, async_req=True) >>> result = thread.get() :param async_req bool :param str service_id: (required) :param str pool_id: (required) :param str source: Data source type. :return: LbPoolStatistics If the method is called asynchronously, returns the request thread.
625941b16fece00bbac2d4a9
def convertSessionsToJSON(session): <NEW_LINE> <INDENT> if len(session) < 10: <NEW_LINE> <INDENT> return errorMessage("passed wrong amount of values to convertSessionsToJSON, it needs all elements in session table") <NEW_LINE> <DEDENT> result = { 'sessionID' : session[0], 'userID' : session[1], 'moduleID' : session[2], 'sessionDate' : str(session[3]), 'playerScore' : session[4], 'startTime' : str(session[5]), 'endTime' : str(session[6]), 'platform' : session[7], 'mode' : session[8] } <NEW_LINE> if len(session) >= 11: <NEW_LINE> <INDENT> result['moduleName'] = session[10] <NEW_LINE> <DEDENT> return result
Converts the session object into a JSON object and returns the latter. A session record has 10 fields. If the passed in object has less than 10 fields, it is assumed to be just a regular session object, and so it is converted into a JSON object those the keys are the respective field names. But if the passed in object's size is greater than 10, it is assumed to have the module name as the 11th object, so appends that. Keyword arguments: session -- the session record object (list/array)
625941b1e64d504609d745b6
def load_configs(cfile): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> with open(cfile, "r") as conf_file: <NEW_LINE> <INDENT> conf = yaml.load(conf_file) <NEW_LINE> <DEDENT> <DEDENT> except : <NEW_LINE> <INDENT> e = sys.exc_info()[0] <NEW_LINE> print("Could not load: {}".format(e)) <NEW_LINE> conf = dict() <NEW_LINE> <DEDENT> return conf
Load the config from the given absolute path file name
625941b121a7993f00bc7a5d
def create_ws_lex(*lex_list): <NEW_LINE> <INDENT> f_lex = _tempfile.NamedTemporaryFile(mode='w') <NEW_LINE> lex_file = f_lex.name <NEW_LINE> for lex in lex_list: <NEW_LINE> <INDENT> print('\t'.join(lex), file=f_lex) <NEW_LINE> <DEDENT> f_lex.flush() <NEW_LINE> return lex_file, f_lex
Generate CKIP word segmentation lexicon file. Parameters ---------- *lex_list : Tuple[str, str] the lexicon word and its POS-tag. Returns ------- lex_file : str the name of the lexicon file. f_lex : TextIO the file object. .. attention:: Remember to close **f_lex** manually.
625941b1d164cc6175782abd
def import_txt_files(self, path): <NEW_LINE> <INDENT> print("Importing text files...") <NEW_LINE> txt_files = glob.glob(os.path.join(os.getcwd(), path, "*.txt")) <NEW_LINE> corpus = [] <NEW_LINE> try: <NEW_LINE> <INDENT> for individual_file in txt_files: <NEW_LINE> <INDENT> with open(individual_file) as f_input: <NEW_LINE> <INDENT> corpus.append(f_input.read()) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> except: <NEW_LINE> <INDENT> print("ERROR: Could not import text files. Please ensure that you entered that correct directory path "+ "and that all of the files you want to analyze have a .txt extension.") <NEW_LINE> <DEDENT> print("Number of documents in corpus: ",len(corpus)) <NEW_LINE> return corpus
Loads a series of text files into Python. Input ----------------------------------------- path : string This is the directory path to your folder of text files. Note that the path must be in single ('') or double ("") quotations. To avoid confusion during processing, this directory should only contain the text files that you want to analyze for entities. Example - '/Users/myusername/Desktop/soul-of-reason-as-data/data' (where /data is the folder containing all text files) Returns ----------------------------------------- corpus : list Returns a Python list of all of the text files, as well as the total number of documents in the corpus (the number of text files in your folder).
625941b1e5267d203edcda18
def compute_ncc_fast(gray_left, gray_right, mask_halfwidth): <NEW_LINE> <INDENT> m_height, m_width = gray_left.shape <NEW_LINE> patches_left = np.zeros((m_height, m_width, (2 * mask_halfwidth + 1) ** 2)) <NEW_LINE> patches_right = np.zeros((m_height, m_width, (2 * mask_halfwidth + 1) ** 2)) <NEW_LINE> corr = np.zeros((m_height - 2 * mask_halfwidth, m_width - 2 * mask_halfwidth, m_width - 2 * mask_halfwidth)) <NEW_LINE> return corr
Faster version of compute_ncc(). Args: gray_left (np.array of shape (num_rows, num_cols)): left grayscale image gray_right (np.array of shape (num_rows, num_cols)): right grayscale image mask_halfwidth (int): Half-size of the square neighbourhood used for computing NCC. Thus a patch of size (2*mask_halfwidth+1, 2*mask_halfwidth+1) will be used. Returns: corr (np.array of shape (num_rows - 2*mask_halfwidth, num_cols - 2*mask_halfwidth, num_cols - 2*mask_halfwidth)): Array containing the normalized cross-correlation (NCC) between patches in the two images. The value corr[r, c_l, c_r] denotes the NCC between the patch centered at (r + mask_halfwidth, c_l + mask_halfwidth) in the left image and the patch centered at (r + mask_halfwidth, c_r + mask_halfwidth) at the right image.
625941b124f1403a926008e2
def text2num( s: str, search_fraction: bool = True, ) -> Optional[Decimal]: <NEW_LINE> <INDENT> n: Decimal = Decimal(0) <NEW_LINE> prefix: Decimal = Decimal(0) <NEW_LINE> s: str = cleanup(s) <NEW_LINE> if s in ('k', 'm', 'b'): <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> if NON_WRIT_RE.fullmatch(s): <NEW_LINE> <INDENT> return Decimal(s) <NEW_LINE> <DEDENT> if not NUM_FRACTION_RE.fullmatch(s): <NEW_LINE> <INDENT> p, s = MIXED_WRIT_RE.search(s).groups() <NEW_LINE> if p: <NEW_LINE> <INDENT> prefix = Decimal(p) <NEW_LINE> <DEDENT> <DEDENT> if ONLY_BIG_WRIT_RE.search(s) and not prefix: <NEW_LINE> <INDENT> s: str = f'one {s}' <NEW_LINE> <DEDENT> d: Decimal = Decimal(0) <NEW_LINE> dnd = NUM_FRACTION_RE.search(s) <NEW_LINE> fs = FRACTION_PTN_RE.search(s) <NEW_LINE> q = QUARTER_RE.search(s) <NEW_LINE> if q: <NEW_LINE> <INDENT> s: str = QUARTER_RE.sub('', s) <NEW_LINE> nu = q.groups()[0] <NEW_LINE> d = text2num(nu) / 4 <NEW_LINE> <DEDENT> elif dnd: <NEW_LINE> <INDENT> dn, dd = dnd.groups() <NEW_LINE> if dn.isdigit(): <NEW_LINE> <INDENT> d: Decimal = Decimal(dn) / Decimal(dd) <NEW_LINE> <DEDENT> s: str = NUM_FRACTION_SUB_RE.sub('', s) <NEW_LINE> <DEDENT> elif fs and search_fraction: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> s: str = FRACTION_PTN_RE.sub('', s) <NEW_LINE> fe = fs.group(0) <NEW_LINE> fn, fd = FRACTION_EXTRACT_PTN_RE.search(fe).groups() <NEW_LINE> fn = text2num(fn, search_fraction=False) <NEW_LINE> fd = text2num(fd, search_fraction=False) <NEW_LINE> d = fn / fd <NEW_LINE> <DEDENT> except (ValueError, TypeError, ZeroDivisionError): <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> <DEDENT> s_split: List[str] = s.split() <NEW_LINE> x1: int = 0 <NEW_LINE> for token in s_split: <NEW_LINE> <INDENT> if token in ('a', 'and'): <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> x: int = SMALL_NUMBERS_MAP.get(token, None) <NEW_LINE> if x is not None: <NEW_LINE> <INDENT> prefix += x <NEW_LINE> <DEDENT> elif 'hundred' in token and prefix != 0: <NEW_LINE> <INDENT> prefix *= 100 <NEW_LINE> <DEDENT> elif token == 'dozen' and prefix != 0: <NEW_LINE> <INDENT> prefix *= 12 <NEW_LINE> <DEDENT> elif token == 'half': <NEW_LINE> <INDENT> if x1: <NEW_LINE> <INDENT> prefix += (x1 * Decimal(0.5)) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> prefix += Decimal(0.5) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> x = x1 = MAGNITUDE_MAP.get(token, None) <NEW_LINE> if x is not None: <NEW_LINE> <INDENT> n += prefix * x <NEW_LINE> prefix = Decimal(0) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise RuntimeError(f'Unknown number: {token}') <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return Decimal(n + prefix + d)
Convert written amount into Decimal. :param s: written number :param search_fraction: extract fraction :return: Decimal or None
625941b191af0d3eaac9b789
def test_initial(self): <NEW_LINE> <INDENT> while (True): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> conn['test']['test'].remove(safe=True) <NEW_LINE> break <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> <DEDENT> s.delete(q='*:*') <NEW_LINE> self.assertEqual(conn['test']['test'].find().count(), 0) <NEW_LINE> self.assertEqual(len(s.search('*:*')), 0) <NEW_LINE> print("PASSED TEST INITIAL")
Tests search and assures that the databases are clear.
625941b1460517430c393f09
def merge(self, other_distro): <NEW_LINE> <INDENT> if TRACE: logger.debug(f'merge: {self!r} with: {other_distro!r}') <NEW_LINE> existing = self.to_dict() <NEW_LINE> if other_distro: <NEW_LINE> <INDENT> other_non_empty = { k: v for k, v in other_distro.to_dict().items() if v } <NEW_LINE> existing.update(other_non_empty) <NEW_LINE> if TRACE: logger.debug(f'merge: updated data: {existing!r}') <NEW_LINE> <DEDENT> if TRACE: logger.debug(f'merge: merged data: {existing!r}') <NEW_LINE> return type(self)(**existing)
Return a new distro based on this Distro data updated with non-empty values from the ``other_distro`` Distro object.
625941b182261d6c526ab213
def on_new(self, evt): <NEW_LINE> <INDENT> dlg = wx.MessageDialog(None, "Unsaved data will be lost", style=wx.OK | wx.CENTRE | wx.CANCEL | wx.ICON_WARNING) <NEW_LINE> dlg.SetOKCancelLabels("OK", "Cancel") <NEW_LINE> result = dlg.ShowModal() <NEW_LINE> if result == wx.ID_CANCEL: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> self.unbind_events() <NEW_LINE> self.destroy_windows() <NEW_LINE> self.generate_windows() <NEW_LINE> self.bind_events() <NEW_LINE> self.sound_board_panel.bpm_numCtrl = self.play_menu.bpm <NEW_LINE> self.instrumentsPanel.update_instruments() <NEW_LINE> <DEDENT> except IOError: <NEW_LINE> <INDENT> wx.LogError("Something went wrong")
loads saved serialized data back to program
625941b13539df3088e2e0bb
def test_list(self): <NEW_LINE> <INDENT> view = SnippetViewSet.as_view({'get': 'list'}) <NEW_LINE> request = self.factory.get('/snippets') <NEW_LINE> response = view(request) <NEW_LINE> self.assertEqual(response.status_code, 200) <NEW_LINE> self.assertEqual(response.data['count'], 1)
Snippet list may be retrieved without logging in
625941b10a50d4780f666c05
def var_handle_op(dtype, shape, container="", shared_name="", name=None): <NEW_LINE> <INDENT> _ctx = _context._context or _context.context() <NEW_LINE> if _ctx is not None and _ctx._thread_local_data.is_eager: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._thread_local_data.device_name, "VarHandleOp", name, _ctx._post_execution_callbacks, "container", container, "shared_name", shared_name, "dtype", dtype, "shape", shape) <NEW_LINE> return _result <NEW_LINE> <DEDENT> except _core._FallbackException: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> return var_handle_op_eager_fallback( container=container, shared_name=shared_name, dtype=dtype, shape=shape, name=name, ctx=_ctx) <NEW_LINE> <DEDENT> except _core._SymbolicException: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> <DEDENT> except _core._NotOkStatusException as e: <NEW_LINE> <INDENT> if name is not None: <NEW_LINE> <INDENT> message = e.message + " name: " + name <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> message = e.message <NEW_LINE> <DEDENT> _six.raise_from(_core._status_to_exception(e.code, message), None) <NEW_LINE> <DEDENT> <DEDENT> dtype = _execute.make_type(dtype, "dtype") <NEW_LINE> shape = _execute.make_shape(shape, "shape") <NEW_LINE> if container is None: <NEW_LINE> <INDENT> container = "" <NEW_LINE> <DEDENT> container = _execute.make_str(container, "container") <NEW_LINE> if shared_name is None: <NEW_LINE> <INDENT> shared_name = "" <NEW_LINE> <DEDENT> shared_name = _execute.make_str(shared_name, "shared_name") <NEW_LINE> _, _, _op = _op_def_lib._apply_op_helper( "VarHandleOp", dtype=dtype, shape=shape, container=container, shared_name=shared_name, name=name) <NEW_LINE> _result = _op.outputs[:] <NEW_LINE> _inputs_flat = _op.inputs <NEW_LINE> _attrs = ("container", _op.get_attr("container"), "shared_name", _op.get_attr("shared_name"), "dtype", _op.get_attr("dtype"), "shape", _op.get_attr("shape")) <NEW_LINE> _execute.record_gradient( "VarHandleOp", _inputs_flat, _attrs, _result, name) <NEW_LINE> _result, = _result <NEW_LINE> return _result
Creates a handle to a Variable resource. Args: dtype: A `tf.DType`. the type of this variable. Must agree with the dtypes of all ops using this variable. shape: A `tf.TensorShape` or list of `ints`. The (possibly partially specified) shape of this variable. container: An optional `string`. Defaults to `""`. the container this variable is placed in. shared_name: An optional `string`. Defaults to `""`. the name by which this variable is referred to. name: A name for the operation (optional). Returns: A `Tensor` of type `resource`.
625941b1d7e4931a7ee9dc93
def run(self): <NEW_LINE> <INDENT> while (self.percepts['current_position'] != self.percepts['target']).any() and self.frontier: <NEW_LINE> <INDENT> self.act() <NEW_LINE> <DEDENT> print(self.percepts['current_position']) <NEW_LINE> print(self.path)
Keeps the agent acting until it finds the target
625941b1046cf37aa974cac2
def __init__(__self__, *, ip: str, description: Optional[str] = None, ports: Optional[Sequence[int]] = None, services: Optional[Sequence[str]] = None): <NEW_LINE> <INDENT> pulumi.set(__self__, "ip", ip) <NEW_LINE> if description is not None: <NEW_LINE> <INDENT> pulumi.set(__self__, "description", description) <NEW_LINE> <DEDENT> if ports is not None: <NEW_LINE> <INDENT> pulumi.set(__self__, "ports", ports) <NEW_LINE> <DEDENT> if services is not None: <NEW_LINE> <INDENT> pulumi.set(__self__, "services", services)
:param str ip: Source ip and netmask for the rule. (e.g. 10.56.72.0/24) :param str description: Description name of the rule. e.g. Default. :param Sequence[int] ports: Custom ports to be opened :param Sequence[str] services: Pre-defined service ports, see table below
625941b17b180e01f3dc4580
def cost(self, coefs): <NEW_LINE> <INDENT> A = self.feed_forward(self.X, coefs) <NEW_LINE> J = (1 / (2 * len(self.y))) * np.sum((A - self.y_hot) ** 2) <NEW_LINE> regularization = np.dot(coefs ** 2, self.regularization_mask) <NEW_LINE> return J + (self.alpha / 2) * regularization
Calculates the cost of the weights. :param coefs: :return:
625941b16aa9bd52df036b12
def get_shuffle_mode(self): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> return self.__read_property('Shuffle') <NEW_LINE> <DEDENT> except dbus.exceptions.DBusException as err: <NEW_LINE> <INDENT> raise
Check if shuffle mode is enabled or disabled. :returns: 1 if shuffle is enabled, 0 if not
625941b1adb09d7d5db6c50c
def sections(self): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> sections = self.read(self.file_spec) <NEW_LINE> <DEDENT> except IOError: <NEW_LINE> <INDENT> raise ConfigError("No initiation file named: %s" % (self.file_spec)) <NEW_LINE> <DEDENT> return [section for section in sections]
Return a list of section names for the given file.
625941b18a43f66fc4b53de3
def tracked_kickstart_elements_get_refs_kickstart_test(self): <NEW_LINE> <INDENT> appended_elements = [self._element1, self._element2, self._element3, self._element4, self._element5, self._element6, self._element7] <NEW_LINE> elements = TrackedKickstartElements() <NEW_LINE> for element in appended_elements: <NEW_LINE> <INDENT> elements.append(element) <NEW_LINE> <DEDENT> element_refs = elements.get_references_from_elements(elements.all_elements) <NEW_LINE> self.assertEqual(element_refs, self._expected_element_refs)
Test getting of element references.
625941b1b57a9660fec335ef
def _choose_split_index(self, X, y): <NEW_LINE> <INDENT> y_entropy = self.impurity_criterion(y) <NEW_LINE> lst_splits = [] <NEW_LINE> for coln in xrange(1, len(X[0])): <NEW_LINE> <INDENT> for value in xrange(len(X)): <NEW_LINE> <INDENT> X1, y1, X2, y2 = self._make_split( X, y, coln, X[value, coln] ) <NEW_LINE> rst = self._information_gain(y, y1, y2) <NEW_LINE> lst_splits.append( ( coln, X[value, coln], rst) ) <NEW_LINE> <DEDENT> <DEDENT> srt_lst = sorted(lst_splits, key=itemgetter(2), reverse = True) <NEW_LINE> if srt_lst[0][2] > y_entropy: <NEW_LINE> <INDENT> return None, None, None <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> ix = srt_lst[0][0] <NEW_LINE> val = srt_lst[0][1] <NEW_LINE> return ix, val, self._make_split(X, y, ix, val)
INPUT: - X: 2d numpy array - y: 1d numpy array OUTPUT: - index: int (index of feature) - value: int/float/bool/str (value of feature) - splits: (2d array, 1d array, 2d array, 1d array) Determine which feature and value to split on. Return the index and value of the optimal split along with the split of the dataset. Return None, None, None if there is no split which improves information gain. Call the method like this: >>> index, value, splits = self._choose_split_index(X, y) >>> X1, y1, X2, y2 = splits
625941b173bcbd0ca4b2bded
def testIncidentViewModel(self): <NEW_LINE> <INDENT> pass
Test IncidentViewModel
625941b163b5f9789fde6e5c
def __getitem__(self, key: Union[slice, int]) -> Observable[_T]: <NEW_LINE> <INDENT> if isinstance(key, slice): <NEW_LINE> <INDENT> start, stop, step = key.start, key.stop, key.step <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> start, stop, step = key, key + 1, 1 <NEW_LINE> <DEDENT> from ..operators._slice import slice_ <NEW_LINE> return slice_(start, stop, step)(self)
Pythonic version of :func:`slice <reactivex.operators.slice>`. Slices the given observable using Python slice notation. The arguments to slice are `start`, `stop` and `step` given within brackets `[]` and separated by the colons `:`. It is basically a wrapper around the operators :func:`skip <reactivex.operators.skip>`, :func:`skip_last <reactivex.operators.skip_last>`, :func:`take <reactivex.operators.take>`, :func:`take_last <reactivex.operators.take_last>` and :func:`filter <reactivex.operators.filter>`. The following diagram helps you remember how slices works with streams. Positive numbers are relative to the start of the events, while negative numbers are relative to the end (close) of the stream. .. code:: r---e---a---c---t---i---v---e---! 0 1 2 3 4 5 6 7 8 -8 -7 -6 -5 -4 -3 -2 -1 0 Examples: >>> result = source[1:10] >>> result = source[1:-2] >>> result = source[1:-1:2] Args: key: Slice object Returns: Sliced observable sequence. Raises: TypeError: If key is not of type :code:`int` or :code:`slice`
625941b13c8af77a43ae3518
def swipe_to_right(self): <NEW_LINE> <INDENT> window_size = self.get_size() <NEW_LINE> width = window_size.get("width") <NEW_LINE> height = window_size.get("height") <NEW_LINE> self.driver.swipe(width * 4 / 5, height / 2, width / 5, height / 2, 500)
向右移动 :return:
625941b1e1aae11d1e749a2b
def new_entity(self, attributes): <NEW_LINE> <INDENT> entity = self.model.lookup(attributes['email']) <NEW_LINE> if entity: <NEW_LINE> <INDENT> raise BadValueError('user already exists') <NEW_LINE> <DEDENT> attributes['email'] = [attributes['email']] <NEW_LINE> entity = self.model.from_dict(attributes) <NEW_LINE> return entity
Creates a new entity with given attributes. :param attributes: (dictionary) default values loaded on object instantiation :return: entity with loaded attributes
625941b1187af65679ca4e97
def _update(self, start, end): <NEW_LINE> <INDENT> means = self._moving_mean[start:end] <NEW_LINE> tail_runner = self._moving_mean.tail_runner <NEW_LINE> tail_size = tail_runner.tail_size <NEW_LINE> n = tail_size <NEW_LINE> if self._with_unbiased_correction: <NEW_LINE> <INDENT> n -= 1 <NEW_LINE> <DEDENT> variance = empty((end - start, 1), dtype=float) <NEW_LINE> for idx, chunk, incomplete in tail_runner.tail_iterate(start, end, self._moving_mean.parent): <NEW_LINE> <INDENT> mean = means[idx - start] <NEW_LINE> if incomplete: <NEW_LINE> <INDENT> variance[idx - start] = NaN <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> variance[idx - start] = ((chunk - mean) ** 2).sum() / n <NEW_LINE> <DEDENT> <DEDENT> stderr = None <NEW_LINE> if self._use_stderr: <NEW_LINE> <INDENT> stderr = variance ** 0.5 <NEW_LINE> <DEDENT> if self._use_var and self._use_stderr: <NEW_LINE> <INDENT> self._data[start:end] = hstack([variance, stderr]) <NEW_LINE> <DEDENT> elif self._use_var: <NEW_LINE> <INDENT> self._data[start:end] = variance <NEW_LINE> <DEDENT> elif self._use_stderr: <NEW_LINE> <INDENT> self._data[start:end] = stderr
Adds calculation of the variance and/or :param start: The start index to update. :param end: The end index to update. :return:
625941b1e64d504609d745b8
def on_child_end(self, child_state): <NEW_LINE> <INDENT> logger.warning('Probably missing implementation for ' '%r.on_child_end(%r): ', self, child_state)
B{May} be implemented in every subclass. If implemented, should contain the code which is called whenever any child transaction (i.e. a transaction executed from inside the C{self} one) is completed. At the moment o @note: if overriding in a subclass, you may ignore calling the C{super} method (i.e. this one). Actually, it is suggest to not call the C{super(...).on_child_end()}, cause the existing implementation is made specifically to spot the transactions which has forgot to implement their real C{on_child_end()}-s. That is, if you see the warning log like below, you should visit the violating transaction and create a proper C{.on_child_end} implementation. @param child_state: the state of the child transaction. @type child_state: AbstractTransaction.State @returns: either nothing, or a C{Deferred} which runs its callback when the actions really completed. @rtype: NoneType, Deferred
625941b1507cdc57c6306a42
def delete_ref(refname, oldvalue=None): <NEW_LINE> <INDENT> assert refname.startswith(b'refs/') <NEW_LINE> oldvalue = [] if not oldvalue else [oldvalue] <NEW_LINE> p = subprocess.Popen([b'git', b'update-ref', b'-d', refname] + oldvalue, env=_gitenv()) <NEW_LINE> _git_wait('git update-ref', p)
Delete a repository reference (see git update-ref(1)).
625941b10a366e3fb873e589
def get(self, request, **kwargs): <NEW_LINE> <INDENT> slug = kwargs.get("article_slug") <NEW_LINE> comment_id = kwargs.get("comment_pk") <NEW_LINE> response = UpdateDestroyCommentsAPIView.check_exists(slug, comment_id) <NEW_LINE> if not isinstance(response, list): <NEW_LINE> <INDENT> return response <NEW_LINE> <DEDENT> comment = response[1] <NEW_LINE> edits = CommentEditHistory.objects.filter(comment=comment).order_by('comment__created_at') <NEW_LINE> serialized_comment = CommentSerializer(comment) <NEW_LINE> serialized = CommentEditHistorySerializer(edits, many=True) <NEW_LINE> final = dict() <NEW_LINE> final.update({"comment": serialized_comment.data}) <NEW_LINE> if serialized.data: <NEW_LINE> <INDENT> final.update({"previous_versions": serialized.data}) <NEW_LINE> <DEDENT> return Response(final, status=status.HTTP_200_OK)
Get a single comment
625941b11d351010ab855897
def test_pharmaceutical_list(self): <NEW_LINE> <INDENT> test_response = self.client.get('/parameter/pharmaceutical') <NEW_LINE> self.assertEqual(test_response.status_code, 200) <NEW_LINE> self.assertTrue('pharmaceutical_list' in test_response.context) <NEW_LINE> self.assertTemplateUsed(test_response, 'pharmaceutical_list.html') <NEW_LINE> self.assertEqual(test_response.context['pharmaceutical_list'][0].pk, 1) <NEW_LINE> self.assertEqual(test_response.context['pharmaceutical_list'][0].__unicode__(), u'Test Drug at 1 mg/kg, daily')
This tests the pharmaceutical-list view, ensuring that templates are loaded correctly. This view uses a user with superuser permissions so does not test the permission levels for this view.
625941b132920d7e50b27f45
def apply_variable_template_tags(line): <NEW_LINE> <INDENT> return re.sub(r'\${\s*(\w+)\s*}', TEMPLATE_VARIABLE_OPENING_TAG + r"\1" + TEMPLATE_VARIABLE_CLOSING_TAG, line, flags=re.UNICODE)
Replaces variable indicators ${ and } with tags, so subsequent formatting is easier.
625941b18e05c05ec3eea0e9
def test_find_mssm_decay_groups(self): <NEW_LINE> <INDENT> mssm = import_ufo.import_model('mssm') <NEW_LINE> decay_mssm = decay_objects.DecayModel(mssm, True) <NEW_LINE> decay_mssm.find_decay_groups() <NEW_LINE> goal_groups = [[25, 35, 36, 37], [1000001, 1000002, 1000003, 1000004, 1000005, 1000006, 1000011, 1000012, 1000013, 1000014, 1000015, 1000016, 1000021, 1000022, 1000023, 1000024, 1000025, 1000035, 1000037, 2000001, 2000002, 2000003, 2000004, 2000005, 2000006, 2000011, 2000013, 2000015]] <NEW_LINE> for i, group in enumerate(decay_mssm['decay_groups']): <NEW_LINE> <INDENT> self.assertEqual(sorted([p.get('pdg_code') for p in group]), goal_groups[i])
Test finding the decay groups of the MSSM
625941b1d486a94d0b98dec2
def generate_filename_and_macs(items): <NEW_LINE> <INDENT> hw_items = list(items) <NEW_LINE> sysvars = {} <NEW_LINE> sysvars['sysname'] = '' <NEW_LINE> match_spec(('system', 'product', 'vendor', '$sysprodvendor'), hw_items, sysvars) <NEW_LINE> if 'sysprodvendor' in sysvars: <NEW_LINE> <INDENT> sysvars['sysname'] += (re.sub(r'\W+', '', sysvars['sysprodvendor']) + '-') <NEW_LINE> <DEDENT> match_spec(('system', 'product', 'name', '$sysprodname'), hw_items, sysvars) <NEW_LINE> if 'sysprodname' in sysvars: <NEW_LINE> <INDENT> sysvars['sysname'] = re.sub(r'\W+', '', sysvars['sysprodname']) + '-' <NEW_LINE> <DEDENT> match_spec(('system', 'product', 'serial', '$sysserial'), hw_items, sysvars) <NEW_LINE> if 'sysserial' in sysvars: <NEW_LINE> <INDENT> sysvars['sysname'] += re.sub(r'\W+', '', sysvars['sysserial']) <NEW_LINE> <DEDENT> if match_multiple(hw_items, ('network', '$eth', 'serial', '$serial'), sysvars): <NEW_LINE> <INDENT> if 'sysserial' not in sysvars: <NEW_LINE> <INDENT> sysvars['sysname'] += sysvars['serial'][0].replace(':', '-') <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> LOG.warning('unable to detect network macs') <NEW_LINE> <DEDENT> return sysvars
Generate a file name for a hardware using DMI information. (product name and version) then if the DMI serial number is available we use it unless we lookup the first mac address. As a result, we do have a filename like : <dmi_product_name>-<dmi_product_version>-{dmi_serial_num|mac_address}
625941b1596a89723608983d
def _check_event_type(self, et): <NEW_LINE> <INDENT> if et not in EVENT_TYPE_CONFIG.keys(): <NEW_LINE> <INDENT> raise MetadataPostException('{0} is not a valid event type'.format(et))
Validate user supplied event type strings.
625941b16fece00bbac2d4ac
def __init__(self, token): <NEW_LINE> <INDENT> self.session = requests.Session() <NEW_LINE> self.token = token <NEW_LINE> self.session.headers.update({"Authorization": f"bearer {token}"}) <NEW_LINE> self.session.headers.update({"User-Agent": f"thoth_sesheta_topic_checker/{sesheta.__version__}"})
Init with some sane defaults.
625941b13317a56b869399dd
def __after(self, location: str) -> tuple: <NEW_LINE> <INDENT> result_dict = WeatherSearcher().naver_search(location) <NEW_LINE> result, josa = WeatherEditor().edit_after(result_dict) <NEW_LINE> return WeatherAnswerer().morning_afternoon_form(location, "모레", result, josa), result_dict
모네 날씨를 검색하고 조합합니다. :param location: 지역 :return: 모레 날씨
625941b176d4e153a657e8a5
def create_from_data(self, repository, diff_file_name, diff_file_contents, parent_diff_file_name, parent_diff_file_contents, diffset_history, basedir, request, base_commit_id=None, save=True): <NEW_LINE> <INDENT> from reviewboard.diffviewer.diffutils import convert_to_unicode <NEW_LINE> from reviewboard.diffviewer.models import FileDiff <NEW_LINE> tool = repository.get_scmtool() <NEW_LINE> parser = tool.get_parser(diff_file_contents) <NEW_LINE> files = list(self._process_files( parser, basedir, repository, base_commit_id, request, check_existence=(not parent_diff_file_contents))) <NEW_LINE> if len(files) == 0: <NEW_LINE> <INDENT> raise EmptyDiffError(_("The diff file is empty")) <NEW_LINE> <DEDENT> files.sort(cmp=self._compare_files, key=lambda f: f.origFile) <NEW_LINE> parent_files = {} <NEW_LINE> parent_commit_id = None <NEW_LINE> if parent_diff_file_contents: <NEW_LINE> <INDENT> diff_filenames = set([f.origFile for f in files]) <NEW_LINE> parent_parser = tool.get_parser(parent_diff_file_contents) <NEW_LINE> for f in self._process_files(parent_parser, basedir, repository, base_commit_id, request, check_existence=True, limit_to=diff_filenames): <NEW_LINE> <INDENT> parent_files[f.origFile] = f <NEW_LINE> <DEDENT> parent_commit_id = parent_parser.get_orig_commit_id() <NEW_LINE> <DEDENT> diffset = self.model( name=diff_file_name, revision=0, basedir=basedir, history=diffset_history, repository=repository, diffcompat=DiffCompatVersion.DEFAULT, base_commit_id=base_commit_id) <NEW_LINE> if save: <NEW_LINE> <INDENT> diffset.save() <NEW_LINE> <DEDENT> encoding_list = repository.get_encoding_list() <NEW_LINE> for f in files: <NEW_LINE> <INDENT> if f.origFile in parent_files: <NEW_LINE> <INDENT> parent_file = parent_files[f.origFile] <NEW_LINE> parent_content = parent_file.data <NEW_LINE> orig_rev = parent_file.origInfo <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> parent_content = b'' <NEW_LINE> if parent_commit_id and f.origInfo != PRE_CREATION: <NEW_LINE> <INDENT> orig_rev = parent_commit_id <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> orig_rev = f.origInfo <NEW_LINE> <DEDENT> <DEDENT> enc, orig_file = convert_to_unicode(f.origFile, encoding_list) <NEW_LINE> enc, new_file = convert_to_unicode(f.newFile, encoding_list) <NEW_LINE> dest_file = os.path.join(basedir, new_file).replace("\\", "/") <NEW_LINE> if f.deleted: <NEW_LINE> <INDENT> status = FileDiff.DELETED <NEW_LINE> <DEDENT> elif f.moved: <NEW_LINE> <INDENT> status = FileDiff.MOVED <NEW_LINE> <DEDENT> elif f.copied: <NEW_LINE> <INDENT> status = FileDiff.COPIED <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> status = FileDiff.MODIFIED <NEW_LINE> <DEDENT> filediff = FileDiff( diffset=diffset, source_file=parser.normalize_diff_filename(orig_file), dest_file=parser.normalize_diff_filename(dest_file), source_revision=smart_unicode(orig_rev), dest_detail=f.newInfo, diff=f.data, parent_diff=parent_content, binary=f.binary, status=status) <NEW_LINE> filediff.set_line_counts(raw_insert_count=f.insert_count, raw_delete_count=f.delete_count) <NEW_LINE> if save: <NEW_LINE> <INDENT> filediff.save() <NEW_LINE> <DEDENT> <DEDENT> return diffset
Create a DiffSet from raw diff data. The diff_file_contents and parent_diff_file_contents parameters are strings with the actual diff contents.
625941b1287bf620b61d37e5
def store(self, guid): <NEW_LINE> <INDENT> if guid == 'public': <NEW_LINE> <INDENT> if not self.public_store: <NEW_LINE> <INDENT> raise NotFoundError( "no public store for company '%s'" % self.name ) <NEW_LINE> <DEDENT> return self.public_store <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return self.server.store(guid)
Store for the given GUID :param guid: store guid :return: :class:`store <Store>` with given GUID.
625941b1bf627c535bc12f4a
def copy(self, cr, uid, id, default=None, context=None): <NEW_LINE> <INDENT> default.update({ 'name': self.pool.get('ir.sequence').get(cr, uid, 'credit.sj.guarantee.application'), 'submit_time': False, 'guarantee_user': False, 'guarantee_result': False, 'guarantee_result_note':False, 'route_line':False }) <NEW_LINE> return super(credit_sj_guarantee_application, self).copy(cr, uid, id, default=default, context=context)
复制方法重写
625941b16fece00bbac2d4ad
def gateway(): <NEW_LINE> <INDENT> if settings.CAS_GATEWAY == False: <NEW_LINE> <INDENT> raise ImproperlyConfigured('CAS_GATEWAY must be set to True') <NEW_LINE> <DEDENT> def wrap(func): <NEW_LINE> <INDENT> def wrapped_f(*args): <NEW_LINE> <INDENT> from django_cas.views import login <NEW_LINE> request = args[0] <NEW_LINE> if request.user.is_authenticated(): <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> path_with_params = request.path + '?' + urlencode(request.GET.copy()) <NEW_LINE> if request.GET.get('ticket'): <NEW_LINE> <INDENT> return login(request, path_with_params, False, True) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> gatewayed = request.GET.get('gatewayed') <NEW_LINE> if gatewayed == 'true': <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return login(request, path_with_params, False, True) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return func(*args) <NEW_LINE> <DEDENT> return wrapped_f <NEW_LINE> <DEDENT> return wrap
Authenticates single sign on session if ticket is available, but doesn't redirect to sign in url otherwise.
625941b13c8af77a43ae351a
def write_contents(lang_fd_map, lang_limit_map, category): <NEW_LINE> <INDENT> catQ = Queue() <NEW_LINE> catQ.put(category) <NEW_LINE> while not catQ.empty(): <NEW_LINE> <INDENT> cat = catQ.get() <NEW_LINE> printTitle(cat, prefix="category: ") <NEW_LINE> pages, sub_cats = retrieve_contents(cat) <NEW_LINE> [catQ.put(cat) for cat in sub_cats] <NEW_LINE> for lang in lang_fd_map: <NEW_LINE> <INDENT> if lang not in lang_limit_map: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> wiki.set_lang(lang) <NEW_LINE> print("\tlanguage: ", lang) <NEW_LINE> for wiki_page in pages: <NEW_LINE> <INDENT> printTitle(wiki_page, prefix="\t\tpage: ") <NEW_LINE> try: <NEW_LINE> <INDENT> wpage = wiki.page(wiki_page) <NEW_LINE> <DEDENT> except wiki.exceptions.DisambiguationError: <NEW_LINE> <INDENT> wpage = handleDisambiguation(wiki_page, lang) <NEW_LINE> if wpage is None: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> <DEDENT> except: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> n_sentences = dump_content(lang_fd_map[lang], wpage) <NEW_LINE> lang_limit_map[lang] -= n_sentences <NEW_LINE> if lang_limit_map[lang] <= 0: <NEW_LINE> <INDENT> lang_fd_map[lang].close() <NEW_LINE> del lang_limit_map[lang] <NEW_LINE> break <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> if len(lang_limit_map) == 0: <NEW_LINE> <INDENT> break
Starts from the given category and in top-down way, using BFS, parse the content of all pages under the given category and its sub-categories at all levels. Then writing the sentences in the related files up to the given limit. :param: lang_fd_map - dictionary between language prefix and the fd which stores the sentences of the given top category in that languages :param: lang_limit_map - dictionary between language prefix and the limit to the file which relates to that languages. :param: category - the top category to start digging pages and more sub-categories
625941b1187af65679ca4e99
def test_import_record_with_invalid_backup(self): <NEW_LINE> <INDENT> export = self._create_exported_record_entry() <NEW_LINE> backup_driver = self.backup_mgr.service.get_backup_driver(self.ctxt) <NEW_LINE> _mock_record_import_class = ('%s.%s.%s' % (backup_driver.__module__, backup_driver.__class__.__name__, 'import_record')) <NEW_LINE> imported_record = self._create_export_record_db_entry() <NEW_LINE> backup_hosts = [] <NEW_LINE> with mock.patch(_mock_record_import_class) as _mock_record_import: <NEW_LINE> <INDENT> _mock_record_import.side_effect = FakeBackupException('fake') <NEW_LINE> self.assertRaises(exception.InvalidBackup, self.backup_mgr.import_record, self.ctxt, imported_record, export['backup_service'], export['backup_url'], backup_hosts) <NEW_LINE> self.assertTrue(_mock_record_import.called) <NEW_LINE> <DEDENT> backup = db.backup_get(self.ctxt, imported_record) <NEW_LINE> self.assertEqual(backup['status'], 'error')
Test error handling when attempting an import of a backup record where the backup driver returns an exception.
625941b130bbd722463cbb3d
def notationDecl(self, QString, QString_1, QString_2): <NEW_LINE> <INDENT> return False
QXmlDefaultHandler.notationDecl(QString, QString, QString) -> bool
625941b166673b3332b91e09
def childEq(self, node, other): <NEW_LINE> <INDENT> raise NotImplementedError()
Returns equality of `node` and an `other` node as children. ``True`` if the child features of the two nodes are equal without considering the root. Subclasses must override this.
625941b191af0d3eaac9b78d
def stest_03(self): <NEW_LINE> <INDENT> self.new.lineEdit(self.new.news_time_loc, '20117--12-12') <NEW_LINE> self.new.buttonClick(self.new.news_author_loc) <NEW_LINE> sleep(1) <NEW_LINE> self.assertEqual(self.new.switch_to_alert().text, '不合法的日期格式或者日期超出限定范围,需要撤销吗?') <NEW_LINE> self.new.switch_to_alert().accept() <NEW_LINE> self.assertNotEqual(self.new.find_element(self.new.news_time_loc).get_attribute('value'), '2017--12-12') <NEW_LINE> self.new.lineEdit(self.new.news_time_loc, '21425d3') <NEW_LINE> self.new.buttonClick(self.new.news_author_loc) <NEW_LINE> self.assertEqual(self.new.switch_to_alert().text, '不合法的日期格式或者日期超出限定范围,需要撤销吗?') <NEW_LINE> self.new.switch_to_alert().dismiss() <NEW_LINE> self.assertEqual(self.new.find_element(self.new.news_time_loc).get_attribute('value'), '21425d3') <NEW_LINE> self.new.lineEdit(self.new.news_time_loc, '21425d3') <NEW_LINE> self.new.buttonClick(self.new.news_author_loc) <NEW_LINE> self.assertEqual(self.new.switch_to_alert().text, '不合法的日期格式或者日期超出限定范围,需要撤销吗?') <NEW_LINE> self.new.switch_to_alert().accept() <NEW_LINE> self.assertNotEqual(self.new.find_element(self.new.news_time_loc).get_attribute('value'), '21425d3')
发布新闻-时间输入错误的格式
625941b14a966d76dd550d85
def deserialize(self, str): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> if self.joint is None: <NEW_LINE> <INDENT> self.joint = None <NEW_LINE> <DEDENT> end = 0 <NEW_LINE> start = end <NEW_LINE> end += 4 <NEW_LINE> (length,) = _struct_I.unpack(str[start:end]) <NEW_LINE> self.joint = [] <NEW_LINE> for i in range(0, length): <NEW_LINE> <INDENT> val1 = youbot_motion_control.msg.joint() <NEW_LINE> _x = val1 <NEW_LINE> start = end <NEW_LINE> end += 16 <NEW_LINE> (_x.position, _x.velocity, _x.acceleration, _x.jerk,) = _struct_4f.unpack(str[start:end]) <NEW_LINE> self.joint.append(val1) <NEW_LINE> <DEDENT> return self <NEW_LINE> <DEDENT> except struct.error as e: <NEW_LINE> <INDENT> raise genpy.DeserializationError(e)
unpack serialized message in str into this message instance :param str: byte array of serialized message, ``str``
625941b182261d6c526ab217
def project_redundancies_and_constraints(self, fq, H): <NEW_LINE> <INDENT> logger = logging.getLogger(__name__) <NEW_LINE> Nint = self.num_intcos <NEW_LINE> G = self.Gmat() <NEW_LINE> G_inv = symm_mat_inv(G, redundant=True) <NEW_LINE> Pprime = np.dot(G, G_inv) <NEW_LINE> C = self.constraint_matrix(fq) <NEW_LINE> if C is not None: <NEW_LINE> <INDENT> logger.debug("Adding constraints for projection.\n" + print_mat_string(C)) <NEW_LINE> CPC = np.zeros((Nint, Nint)) <NEW_LINE> CPC[:, :] = np.dot(C, np.dot(Pprime, C)) <NEW_LINE> CPCInv = symm_mat_inv(CPC, redundant=True) <NEW_LINE> P = np.zeros((Nint, Nint)) <NEW_LINE> P[:, :] = Pprime - np.dot(Pprime, np.dot(C, np.dot(CPCInv, np.dot(C, Pprime)))) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> P = Pprime <NEW_LINE> <DEDENT> fq[:] = np.dot(P, fq.T) <NEW_LINE> logger.debug( "\n\tInternal forces in au, after projection of redundancies" + " and constraints.\n" + print_array_string(fq) ) <NEW_LINE> tempMat = np.dot(H, P) <NEW_LINE> H[:, :] = np.dot(P, tempMat) <NEW_LINE> if op.Params.print_lvl >= 3: <NEW_LINE> <INDENT> logger.debug("Projected (PHP) Hessian matrix\n" + print_mat_string(H))
Project redundancies and constraints out of forces and Hessian
625941b13346ee7daa2b2adb
def test_empty(self): <NEW_LINE> <INDENT> self.assertEqual(1, solution([]))
Empty array
625941b1b57a9660fec335f2
def testPrepaymentRechargeResponse(self): <NEW_LINE> <INDENT> model = kinow_client.models.prepayment_recharge_response.PrepaymentRechargeResponse()
Test PrepaymentRechargeResponse
625941b10a50d4780f666c09
def create_superuser(self,email,name,password): <NEW_LINE> <INDENT> user = self.create_user(email,name,password) <NEW_LINE> user.is_superuser = True <NEW_LINE> user.is_staff = True <NEW_LINE> user.save(using=self._db)
creates and saves a new superuser with given details.
625941b17b180e01f3dc4584
def clDummy(*args, **kw): <NEW_LINE> <INDENT> pass
Dummy do-nothing function
625941b1711fe17d825420f1
def get(self, name_or_klass): <NEW_LINE> <INDENT> if not isinstance(name_or_klass, str): <NEW_LINE> <INDENT> name_or_klass = name_or_klass.__name__ <NEW_LINE> <DEDENT> for zone in range(4): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> panel = self._panels[zone][name_or_klass] <NEW_LINE> <DEDENT> except KeyError: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return panel <NEW_LINE> <DEDENT> <DEDENT> raise KeyError(name_or_klass)
Gets a specific panel instance. :param name_or_klass: Name or class of the panel to retrieve. :return: The specified panel instance.
625941b13317a56b869399e0
def beginPage(self, nextpos): <NEW_LINE> <INDENT> self.pagecount += 1 <NEW_LINE> mediatypelabel = "Plain" <NEW_LINE> mediasourcelabel = "Main" <NEW_LINE> mediasizelabel = "Default" <NEW_LINE> orientationlabel = "Portrait" <NEW_LINE> duplexmode = None <NEW_LINE> minfile = self.minfile <NEW_LINE> pos = nextpos - 2 <NEW_LINE> while pos > 0: <NEW_LINE> <INDENT> val = minfile[pos] <NEW_LINE> if val in (0x44, 0x48, 0x41): <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> if val == 0x26: <NEW_LINE> <INDENT> mediasource = minfile[pos - 2] <NEW_LINE> mediasourcelabel = self.mediasources.get(mediasource, str(mediasource)) <NEW_LINE> pos -= 4 <NEW_LINE> <DEDENT> elif val == 0x25: <NEW_LINE> <INDENT> while (pos > 0) and (minfile[pos] != 0xc0): <NEW_LINE> <INDENT> pos -= 1 <NEW_LINE> <DEDENT> if pos > 0: <NEW_LINE> <INDENT> if minfile[pos-1] == 0xc8: <NEW_LINE> <INDENT> arraylength = minfile[pos+1] <NEW_LINE> mediasizelabel = minfile[pos+2:pos+2+arraylength].title() <NEW_LINE> pos -= 1 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> mediasize = minfile[pos+1] <NEW_LINE> mediasizelabel = self.mediasizes.get(mediasize, str(mediasize)) <NEW_LINE> <DEDENT> pos -= 1 <NEW_LINE> <DEDENT> <DEDENT> elif val == 0x28: <NEW_LINE> <INDENT> orientation = minfile[pos - 2] <NEW_LINE> orientationlabel = self.orientations.get(orientation, str(orientation)) <NEW_LINE> pos -= 4 <NEW_LINE> <DEDENT> elif val == 0x27: <NEW_LINE> <INDENT> savepos = pos <NEW_LINE> pos -= 1 <NEW_LINE> startpos = size = None <NEW_LINE> while pos > 0: <NEW_LINE> <INDENT> val = minfile[pos] <NEW_LINE> pos -= 1 <NEW_LINE> if val == 0xc8: <NEW_LINE> <INDENT> length = self.tags[minfile[pos+2]] <NEW_LINE> if length == 1: <NEW_LINE> <INDENT> startpos = pos + 4 <NEW_LINE> size = unpack("B", self.minfile[pos+3:startpos])[0] <NEW_LINE> <DEDENT> elif length == 2: <NEW_LINE> <INDENT> startpos = pos + 5 <NEW_LINE> size = unpack(self.unpackShort, self.minfile[pos+3:startpos])[0] <NEW_LINE> <DEDENT> elif length == 4: <NEW_LINE> <INDENT> startpos = pos + 7 <NEW_LINE> size = unpack(self.unpackLong, self.minfile[pos+3:startpos])[0] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise pdlparser.PDLParserError("Error on size at %s: %s" % (pos+2, length)) <NEW_LINE> <DEDENT> break <NEW_LINE> <DEDENT> <DEDENT> try: <NEW_LINE> <INDENT> mediatypelabel = minfile[startpos:startpos+size] <NEW_LINE> <DEDENT> except TypeError: <NEW_LINE> <INDENT> self.logdebug("PCL/XL parser problem at %i" % savepos) <NEW_LINE> <DEDENT> <DEDENT> elif val == 0x34: <NEW_LINE> <INDENT> duplexmode = "Simplex" <NEW_LINE> pos -= 2 <NEW_LINE> <DEDENT> elif val in (0x35, 0x36): <NEW_LINE> <INDENT> duplexmode = "Duplex" <NEW_LINE> pos -= 2 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> pos -= 1 <NEW_LINE> <DEDENT> <DEDENT> self.pages[self.pagecount] = { "copies": 1, "orientation": orientationlabel, "mediatype": mediatypelabel, "mediasize": mediasizelabel, "mediasource": mediasourcelabel, "duplex": duplexmode, } <NEW_LINE> return 0
Indicates the beginning of a new page, and extracts media information.
625941b15166f23b2e1a4ed4
def DCT(mspec, nceps): <NEW_LINE> <INDENT> ceps = scipy.fftpack.realtransforms.dct(mspec, type=2, norm="ortho", axis=-1) <NEW_LINE> return ceps[:nceps]
離散コサイン変換
625941b15e10d32532c5eca5
def to_dict(self): <NEW_LINE> <INDENT> return self._instance_document
Render this object as a dictionary.
625941b18a349b6b435e7ef1
def test_annotate(self): <NEW_LINE> <INDENT> sentences, mentions = annotate_document(self._doc, self._client) <NEW_LINE> self.assertEqual(3, len(sentences)) <NEW_LINE> self.assertEqual(21, len(mentions)) <NEW_LINE> m_he = mentions[0] <NEW_LINE> m_barack = mentions[6] <NEW_LINE> self.assertEqual(sentences[0], m_barack.sentence) <NEW_LINE> self.assertEqual(sentences[1], m_he.sentence)
Verify that, after processing the annotated document, the mentions should correctly identify:
625941b18a43f66fc4b53de7
def ueber3morgen_request(bot, update): <NEW_LINE> <INDENT> wanted_date = plusdays_date(4) <NEW_LINE> essens,status=get_food(wanted_date) <NEW_LINE> if status: <NEW_LINE> <INDENT> food_string= make_pretty_string(essens,wanted_date,update.message.from_user.first_name) <NEW_LINE> bot.send_message(chat_id=update.message.chat_id,text= food_string) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> bot.send_message(chat_id=update.message.chat_id, text=emojize(Context.strings['mensa_false'], use_aliases=True))
args == plusdays
625941b1bf627c535bc12f4c
def assign_streaming_edge_service(self, cdn_region, pop=None): <NEW_LINE> <INDENT> assert type(cdn_region) is str <NEW_LINE> assert pop is None or type(pop) is str <NEW_LINE> cdn_region = self.cdn_region_map[cdn_region] <NEW_LINE> if pop is not None: <NEW_LINE> <INDENT> pop = pop.encode('utf-8') <NEW_LINE> <DEDENT> assert self.raw_ptr is not None <NEW_LINE> svc = lib.srl__router__assign_streaming_edge_service(self.raw_ptr, cdn_region, pop) <NEW_LINE> if not svc: <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> return StreamingEdgeService(svc)
Assign streaming edge service from a given region and POP (if given).
625941b1d8ef3951e32432b2
def is_now(self) -> bool: <NEW_LINE> <INDENT> return type(self) == Now
Returns: bool: True if `self` is instance of `Now`, False otherwise
625941b16fece00bbac2d4af
def p_spdx_version_1(self, p): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> if six.PY2: <NEW_LINE> <INDENT> value = p[2].decode(encoding='utf-8') <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> value = p[2] <NEW_LINE> <DEDENT> self.builder.set_doc_version(self.document, value) <NEW_LINE> <DEDENT> except CardinalityError: <NEW_LINE> <INDENT> self.more_than_one_error('SPDXVersion', p.lineno(1)) <NEW_LINE> <DEDENT> except SPDXValueError: <NEW_LINE> <INDENT> self.error = True <NEW_LINE> msg = ERROR_MESSAGES['DOC_VERSION_VALUE'].format(p[2], p.lineno(1)) <NEW_LINE> self.logger.log(msg)
spdx_version : DOC_VERSION LINE
625941b1d268445f265b4be9
def id(self): <NEW_LINE> <INDENT> return self.data.id
Returns the id of the proc @rtype: str @return: Proc uuid
625941b1de87d2750b85fb04
def _echelon_matrix_richcmp(self, other, op): <NEW_LINE> <INDENT> if self is other: <NEW_LINE> <INDENT> return rich_to_bool(op, 0) <NEW_LINE> <DEDENT> if not isinstance(other, FreeModule_generic): <NEW_LINE> <INDENT> return NotImplemented <NEW_LINE> <DEDENT> lx = self.ambient_vector_space() <NEW_LINE> rx = other.ambient_vector_space() <NEW_LINE> if lx != rx: <NEW_LINE> <INDENT> return lx._echelon_matrix_richcmp( rx, op) <NEW_LINE> <DEDENT> lx = self.dimension() <NEW_LINE> rx = other.dimension() <NEW_LINE> if lx != rx: <NEW_LINE> <INDENT> return richcmp_not_equal(lx, rx, op) <NEW_LINE> <DEDENT> lx = self.base_ring() <NEW_LINE> rx = other.base_ring() <NEW_LINE> if lx != rx: <NEW_LINE> <INDENT> return richcmp_not_equal(lx, rx, op) <NEW_LINE> <DEDENT> return richcmp(self.echelonized_basis_matrix(), other.echelonized_basis_matrix(), op)
Compare the free module ``self`` with other. Modules are ordered by their ambient spaces, then by dimension, then in order by their echelon matrices. .. NOTE:: Use :meth:`is_submodule` to determine if one module is a submodule of another. EXAMPLES: First we compare two equal vector spaces. :: sage: from sage.structure.richcmp import op_LT,op_LE,op_EQ,op_NE,op_GT,op_GE sage: V = span([[1,2,3], [5,6,7], [8,9,10]], QQ) sage: W = span([[5,6,7], [8,9,10]], QQ) sage: V._echelon_matrix_richcmp(W,op_EQ) True Next we compare a one dimensional space to the two dimensional space defined above. :: sage: from sage.structure.richcmp import op_LT,op_LE,op_EQ,op_NE,op_GT,op_GE sage: M = span([[5,6,7]], QQ) sage: V._echelon_matrix_richcmp(M,op_EQ) False sage: M._echelon_matrix_richcmp(V, op_LT) True sage: V._echelon_matrix_richcmp(M, op_LT) False We compare a `\ZZ`-module to the one-dimensional space above.:: sage: from sage.structure.richcmp import op_LT,op_LE,op_EQ,op_NE,op_GT,op_GE sage: V = span([[5,6,7]], ZZ).scale(1/11); V Free module of degree 3 and rank 1 over Integer Ring Echelon basis matrix: [5/11 6/11 7/11] sage: V._echelon_matrix_richcmp(M, op_LT) True sage: M._echelon_matrix_richcmp(V, op_LT) False
625941b1462c4b4f79d1d44b
def __call__(self, uid, fname, value): <NEW_LINE> <INDENT> res = {'errmsg': ''} <NEW_LINE> rc = getToolByName(aq_inner(self.context), 'reference_catalog') <NEW_LINE> instance = rc.lookupObject(uid) <NEW_LINE> if instance is None: <NEW_LINE> <INDENT> instance = self.context <NEW_LINE> <DEDENT> field = instance.getField(fname) <NEW_LINE> if field and field.type not in SKIP_VALIDATION_FIELDTYPES: <NEW_LINE> <INDENT> return super(InlineValidationView, self).__call__(uid, fname, value) <NEW_LINE> <DEDENT> self.request.response.setHeader('Content-Type', 'application/json') <NEW_LINE> return json.dumps(res)
Validate a given field. Return any error messages.
625941b13539df3088e2e0c0
def test_unicode_sockopts(self): <NEW_LINE> <INDENT> topic = "tést" <NEW_LINE> if str is not unicode: <NEW_LINE> <INDENT> topic = topic.decode('utf8') <NEW_LINE> <DEDENT> p,s = self.create_bound_pair(zmq.PUB, zmq.SUB) <NEW_LINE> self.assertEqual(s.send_unicode, s.send_unicode) <NEW_LINE> self.assertEqual(p.recv_unicode, p.recv_unicode) <NEW_LINE> self.assertRaises(TypeError, s.setsockopt, zmq.SUBSCRIBE, topic) <NEW_LINE> self.assertRaises(TypeError, s.setsockopt, zmq.IDENTITY, topic) <NEW_LINE> s.setsockopt_unicode(zmq.IDENTITY, topic, 'utf16') <NEW_LINE> self.assertRaises(TypeError, s.setsockopt, zmq.AFFINITY, topic) <NEW_LINE> s.setsockopt_unicode(zmq.SUBSCRIBE, topic) <NEW_LINE> self.assertRaises(TypeError, s.getsockopt_unicode, zmq.AFFINITY) <NEW_LINE> self.assertRaisesErrno(zmq.EINVAL, s.getsockopt_unicode, zmq.SUBSCRIBE) <NEW_LINE> st = s.getsockopt(zmq.IDENTITY) <NEW_LINE> self.assertEqual(st.decode('utf16'), s.getsockopt_unicode(zmq.IDENTITY, 'utf16')) <NEW_LINE> time.sleep(0.1) <NEW_LINE> p.send_unicode(topic,zmq.SNDMORE) <NEW_LINE> p.send_unicode(topic*2, encoding='latin-1') <NEW_LINE> self.assertEqual(topic, s.recv_unicode()) <NEW_LINE> self.assertEqual(topic*2, s.recv_unicode(encoding='latin-1'))
test setting/getting sockopts with unicode strings
625941b11d351010ab85589b
def test_password_too_short(self): <NEW_LINE> <INDENT> payload = { 'email': 'test@londonappdev.com', 'password': 'pw', 'name': 'Test', } <NEW_LINE> res = self.client.post(CREATE_USER_URL, payload) <NEW_LINE> self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST) <NEW_LINE> user_exists = get_user_model().objects.filter( email=payload['email'] ).exists() <NEW_LINE> self.assertFalse(user_exists)
Test that the password must be more then 5 characters
625941b132920d7e50b27f49
def get_word(html): <NEW_LINE> <INDENT> Chinese_list = [] <NEW_LINE> Chinese_list_plus = [] <NEW_LINE> soup = BeautifulSoup(html, 'lxml') <NEW_LINE> [s.extract() for s in soup(['style', 'script', '[document]', 'head', 'title'])] <NEW_LINE> visible_text = soup.getText() <NEW_LINE> visible_text_001 = visible_text.replace('\n', ' ').replace('\r', ' ').replace('\t', ' ') <NEW_LINE> visible_text_01 = ' '.join(visible_text_001.split()) <NEW_LINE> words = re.findall("{(.+?)}", visible_text_01) <NEW_LINE> for word in words: <NEW_LINE> <INDENT> pre = re.compile(u'[\u4e00-\u9fa5-\,\。]') <NEW_LINE> res = re.findall(pre, word) <NEW_LINE> res1 = ''.join(res) <NEW_LINE> Chinese_list.append(res1) <NEW_LINE> <DEDENT> for i in Chinese_list: <NEW_LINE> <INDENT> if len(i) != 0: <NEW_LINE> <INDENT> Chinese_list_plus.append(i) <NEW_LINE> <DEDENT> <DEDENT> China = ''.join(Chinese_list_plus) <NEW_LINE> abc = re.sub(r"{(.*)}", '', visible_text_01) <NEW_LINE> Ultimate_text = abc + China <NEW_LINE> Chinese_list.clear() <NEW_LINE> Chinese_list_plus.clear() <NEW_LINE> return Ultimate_text
Args: html crawled by request Returns: Return the plain text of the page
625941b1d8ef3951e32432b3
def GoToUserTwitterProfile(self): <NEW_LINE> <INDENT> self.driver.find_element_by_xpath(XPATH_USER_PROFILE).click() <NEW_LINE> time.sleep(LOAD_TIME * 2)
Navigates to the user's Twitter profile.
625941b1627d3e7fe0d68bc4
@api_view(['GET','PUT']) <NEW_LINE> def object_upload(request, container, format=None): <NEW_LINE> <INDENT> url = 'http://10.129.103.86:5000/v3/auth/tokens' <NEW_LINE> headers = {'content-type': 'application/json'} <NEW_LINE> data = '\n{ "auth": {\n "identity": {\n "methods": ["password"],\n "password": {\n "user": {\n "name": "swift2",\n "domain": { "name": "default" },\n "password": "swift2"\n }\n }\n },\n "scope": {\n "project": {\n "name": "service2",\n "domain": { "name": "default" }\n }\n }\n }\n}' <NEW_LINE> r = requests.post(url, headers=headers, data=data) <NEW_LINE> token = r.headers.get('X-Subject-Token') <NEW_LINE> if request.method == 'GET': <NEW_LINE> <INDENT> r = requests.get('http://10.129.103.86:8080/v1/AUTH_0d979205409846a7a2950fe8279d1988/' + container, headers={'X-Auth-Token': token}).text <NEW_LINE> obj_arr = r.split ("\n") <NEW_LINE> obj_arr.pop() <NEW_LINE> return Response(obj_arr) <NEW_LINE> <DEDENT> if request.method =='PUT': <NEW_LINE> <INDENT> url = request.data <NEW_LINE> obj = os.path.basename(url) <NEW_LINE> r = requests.put ('http://10.129.103.86:8080/v1/AUTH_0d979205409846a7a2950fe8279d1988/'+container + '/' + obj , headers={'X-Auth-Token': token}.text, data = open(url, "rb")).text <NEW_LINE> return Response (r)
Generating Token each time
625941b10383005118ecf35b
def lex_prefixed_str(self, prefix): <NEW_LINE> <INDENT> s = self.match(re.compile('[a-z]+[\'"]')) <NEW_LINE> if s.endswith("'"): <NEW_LINE> <INDENT> re1 = self.str_exp_single <NEW_LINE> re2 = self.str_exp_single_multi <NEW_LINE> if 'r' in prefix: <NEW_LINE> <INDENT> re1 = self.str_exp_raw_single <NEW_LINE> re2 = self.str_exp_raw_single_multi <NEW_LINE> <DEDENT> self.lex_str(re1, re2, self.str_exp_single3, self.str_exp_single3end, prefix) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> re1 = self.str_exp_double <NEW_LINE> re2 = self.str_exp_double_multi <NEW_LINE> if 'r' in prefix: <NEW_LINE> <INDENT> re1 = self.str_exp_raw_double <NEW_LINE> re2 = self.str_exp_raw_double_multi <NEW_LINE> <DEDENT> self.lex_str(re1, re2, self.str_exp_double3, self.str_exp_double3end, prefix)
Analyse a string literal with a prefix, such as r'...'.
625941b13317a56b869399e2
def sort(self, tags): <NEW_LINE> <INDENT> if not isinstance(tags, tuple): <NEW_LINE> <INDENT> tags = (tags,) <NEW_LINE> <DEDENT> ascend_descend = [SSort(abs(tag), TABLE_SORT_DESCEND if tag < 0 else TABLE_SORT_ASCEND) for tag in tags] <NEW_LINE> self.mapitable.SortTable(SSortOrderSet(ascend_descend, 0, 0), 0)
Sort table. :param tags: Tag(s) on which to sort.
625941b18a43f66fc4b53de8
def set_device(self, gpus, device): <NEW_LINE> <INDENT> self.device = device <NEW_LINE> if len(gpus) > 1: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.model_with_loss = self.model_with_loss.to(self.device) <NEW_LINE> <DEDENT> for state in self.optimizer.state.values(): <NEW_LINE> <INDENT> for key, value in state.items(): <NEW_LINE> <INDENT> if isinstance(value, torch.Tensor): <NEW_LINE> <INDENT> state[key] = value.to(device=device, non_blocking=True)
将model_with_loss与优化器, 放置到合适的设备上(cpu或gpus) :param gpus: 可以使用的gpu索引列表, 例如使用3个GPU [0, 1, 2] :param device: 使用的设备: 'cpu'/'cuda' :return: 无
625941b14f6381625f1147bd