text
string | source
string |
|---|---|
n=int(input())
a=list()
for i in range(n):
a.append('0')
print (a)
def gray(i):
if(i==n-1):
print("".join(a))
a[i]='1' if(a[i]=='0')else('0')
print("".join(a))
return
c=1
while (c<=2):
gray(i+1)
if(c!=2):
a[i]='1' if(a[i]=='0')else('0')
c+=1
gray(0)
|
code
|
from car import Car
import math
import numpy as np
import random
import shared as g
class BufferBuilder(Car):
def __init__(self, sim, lane, speed, maxspeed, id, carAhead, carUpAhead, carDownAhead, laneidx, size, canvasheight,
lanes, slowdown):
super(BufferBuilder, self).__init__(sim, lane, speed, maxspeed, id, carAhead, carUpAhead, carDownAhead, laneidx, size,
canvasheight, lanes, slowdown)
self.aheadbufmin = 2
self.aheadbufmax = 8
self.accel = 0
self.delay = 1500
self.reaction = 0
self.color = g.buffercolor
self.max_accel = 0.002
self.name = "bufferbuilder"
def general_behavior(self):
# use different acceleration functins depending on difference between v0 and vF
# currenlty just linearly accelerates(or deaccelerates) to v of front car
if (self.ahead != None):
if(self.ahead.posx - self.posx - (self.length * 1.2) < 0):
self.accel = -self.max_accel * 10
if(self.ahead.posx - self.posx - (self.length) - self.speedx < 0.05):
self.accel = -self.speedx
elif (self.aheadbufmax * self.length >= self.ahead.posx - self.posx): # if at max buffer
prevAccel = self.accel # save previous accel
# find distance between the two cars
dist = self.ahead.posx - self.posx - (self.aheadbufmin * self.length)
if (self.reaction == 0): #if not reacting
# find accel so that after dist, this car will be matching the speed of front car
safespeed = 0.000001
self.accel = (math.pow(self.ahead.speedx + safespeed, 2) - math.pow(self.speedx + safespeed, 2)) / (dist * 2)
#check accel changed from + to - or vice versa
if ((prevAccel < 0 and self.accel > 0) or (prevAccel > 0 and self.accel < 0)):
self.accel = 0 #dont move car
#self.reaction = 100 #set delay
else:
pass #self.reaction -= 1
if (prevAccel <= 0 and self.accel > 0 and self.speedx < 1 and self.aheadbufmin * self.length >= self.ahead.posx - self.posx):
self.accel = 0
else:
if (self.speedx < self.maxspeed):
self.accel = self.speedx * .005
else: # front car
# for delay amount of time make car come to a stop
if (self.SLOWDOWN and self.delay > g.TICKS):
self.accel = -self.speedx * .003
# after delay accel to maxspeed
else:
if (self.speedx < self.maxspeed):
#self.accel = self.speedx * .003
self.accel = self.max_accel
# limit accel to max
self.accel = min(self.accel, self.max_accel)
# update speed to reflect accel change
self.speedx = self.speedx + self.accel
self.speedx = min(self.inst_max, self.speedx)
|
code
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
class Search(object):
def __init__(self, vocab_size, pad, unk, eos):
self.pad = pad
self.unk = unk
self.eos = eos
self.vocab_size = vocab_size
self.scores_buf = None
self.indices_buf = None
self.beams_buf = None
def _init_buffers(self, t):
if self.scores_buf is None:
self.scores_buf = t.new()
self.indices_buf = torch.LongTensor().to(device=t.device)
self.beams_buf = torch.LongTensor().to(device=t.device)
def step(self, step, lprobs, scores):
"""Take a single search step.
Args:
step: the current search step, starting at 0
lprobs: (bsz x input_beam_size x vocab_size)
the model's log-probabilities over the vocabulary at the current step
scores: (bsz x input_beam_size x step)
the historical model scores of each hypothesis up to this point
Return: A tuple of (scores, indices, beams) where:
scores: (bsz x output_beam_size)
the scores of the chosen elements; output_beam_size can be
larger than input_beam_size, e.g., we may return
2*input_beam_size to account for EOS
indices: (bsz x output_beam_size)
the indices of the chosen elements
beams: (bsz x output_beam_size)
the hypothesis ids of the chosen elements, in the range [0, input_beam_size)
"""
raise NotImplementedError
def set_src_lengths(self, src_lengths):
self.src_lengths = src_lengths
class BeamSearch(Search):
def __init__(self, vocab_size, pad, unk, eos):
super().__init__(vocab_size, pad, unk, eos)
def step(self, step, lprobs, scores, output_beam_size):
super()._init_buffers(lprobs)
bsz, beam_size, vocab_size = lprobs.size()
if step == 0:
# at the first step all hypotheses are equally likely, so use
# only the first beam
lprobs = lprobs[:, ::beam_size, :].contiguous()
else:
# make probs contain cumulative scores for each hypothesis
lprobs.add_(scores[:, :, step - 1].unsqueeze(-1))
torch.topk(
lprobs.view(bsz, -1),
k=min(
# Take the best 2 x beam_size predictions. We'll choose the first
# beam_size of these which don't predict eos to continue with.
output_beam_size * 2,
lprobs.view(bsz, -1).size(1) - 1, # -1 so we never select pad
),
out=(self.scores_buf, self.indices_buf),
)
torch.div(self.indices_buf, vocab_size, out=self.beams_buf)
self.indices_buf.fmod_(vocab_size)
return self.scores_buf, self.indices_buf, self.beams_buf
class BeamSearchNMT(Search):
"""Google's Neural Machine Translation beam search implementation:
https://arxiv.org/pdf/1609.08144.pdf
"""
def __init__(self, vocab_size, pad, unk, eos, alpha=0.6):
super().__init__(vocab_size, pad, unk, eos)
self.alpha = alpha
def step(self, step, lprobs, scores, output_beam_size):
super()._init_buffers(lprobs)
bsz, beam_size, vocab_size = lprobs.size()
# Calculate
if step == 0:
# at the first step all hypotheses are equally likely, so use
# only the first beam
lprobs = lprobs[:, ::beam_size, :].contiguous()
else:
# make probs contain cumulative scores for each hypothesis
lprobs.add_(scores[:, :, step - 1].unsqueeze(-1))
# Update by length penalty
length_penalty = self._length_penalty(step)
scores = lprobs / length_penalty
torch.topk(
scores.view(bsz, -1),
k=min(
# Take the best 2 x beam_size predictions. We'll choose the first
# beam_size of these which don't predict eos to continue with.
output_beam_size * 2,
scores.view(bsz, -1).size(1) - 1, # -1 so we never select pad
),
out=(self.scores_buf, self.indices_buf),
)
torch.div(self.indices_buf, vocab_size, out=self.beams_buf)
self.indices_buf.fmod_(vocab_size)
return self.scores_buf, self.indices_buf, self.beams_buf
def _length_penalty(self, step):
"""Length penalty:
lp(Y) = ((5+|Y|)^alpha)/(5+1)^alpha
Args:
step: the current search step, starting at 0
Returns:
length_penalty: float
length penalty of current step.
"""
return ((step+1)/6.0)**self.alpha
def _coverage_penalty(self, attn):
"""Coverage penalty:
cp(X;Y) =beta * sum(log(min(attn_i_j, 1.0)))
Args:
attn: (bsz x beam_size x src_seqlen)
the total attention prob of i-th source word.
Return:
coverage_penalty: (bsz x beam_size) or 0.0
the coverage penalty for each beam.
TOOD (@xiaolan): finish implementation
"""
return 0.0
class DiverseBeamSearch(Search):
"""Diverse Beam Search.
See "Diverse Beam Search: Decoding Diverse Solutions from Neural Sequence
Models" for details.
We only implement the Hamming Diversity penalty here, which performed best
in the original paper.
Recommended setting in original paper:
num_groups: "Setting G=beam_size allows for the maximum exploration
of the space,"
diversity_strength: "We find a wide range of λ values (0.2 to 0.8) work well
for most tasks and datasets."
"""
def __init__(self, vocab_size, pad, unk, eos, num_groups, diversity_strength=0.5):
super().__init__(vocab_size, pad, unk, eos)
self.num_groups = num_groups
self.diversity_strength = -diversity_strength
self.diversity_buf = None
self.beam = BeamSearch(vocab_size, pad, unk, eos)
def step(self, step, lprobs, scores, output_beam_size):
super()._init_buffers(lprobs)
bsz, beam_size, vocab_size = lprobs.size()
num_groups = self.num_groups if beam_size > 1 else 1
if beam_size % num_groups != 0:
raise ValueError(
'DiverseBeamSearch requires --beam to be divisible by the number of groups'
)
# initialize diversity penalty
if self.diversity_buf is None:
self.diversity_buf = lprobs.new()
torch.zeros(lprobs[:, 0, :].size(), out=self.diversity_buf)
scores_G, indices_G, beams_G = [], [], []
for g in range(num_groups):
lprobs_g = lprobs[:, g::num_groups, :]
scores_g = scores[:, g::num_groups, :] if step > 0 else None
# apply diversity penalty
if g > 0:
lprobs_g = torch.add(lprobs_g, self.diversity_strength, self.diversity_buf.unsqueeze(1))
else:
lprobs_g = lprobs_g.contiguous()
scores_buf, indices_buf, beams_buf = self.beam.step(step, lprobs_g, scores_g, output_beam_size)
beams_buf.mul_(num_groups).add_(g)
scores_G.append(scores_buf.clone())
indices_G.append(indices_buf.clone())
beams_G.append(beams_buf.clone())
# update diversity penalty
self.diversity_buf.scatter_add_(
1,
indices_buf,
self.diversity_buf.new_ones(indices_buf.size())
)
# interleave results from different groups
self.scores_buf = torch.stack(scores_G, dim=2, out=self.scores_buf).view(bsz, -1)
self.indices_buf = torch.stack(indices_G, dim=2, out=self.indices_buf).view(bsz, -1)
self.beams_buf = torch.stack(beams_G, dim=2, out=self.beams_buf).view(bsz, -1)
return self.scores_buf, self.indices_buf, self.beams_buf
class NgramBlocking:
def __init__(self, no_repeat_ngram_size):
""" Ngram blocking: avoid generating sequences with repetitive n-grams.
"""
self.no_repeat_ngram_size = no_repeat_ngram_size
def update(self, step, sequence, lprobs):
""" Update lprobs: set token probability as -math.inf for repetitive n-grams
"""
blocked_ngrams = self._gen_blocked_ngram(sequence[:step+1])
banned_tokens = self._gen_banned_tokens(step, sequence, blocked_ngrams)
lprobs[banned_tokens] = -math.inf
return lprobs
def _gen_blocked_ngram(self, sequence):
""" Generate a dict of ngrams that already exist in previous sequence.
e.g.,
Given a sequence of: [0, 1, 2, 3, 4]
And no_repeat_ngram_size = 3,
The blocked ngrams are:
{
(0, 1): [2]
(1, 2): [3]
(2, 3): [4]
}
Modified from https://github.com/pytorch/fairseq/sequence_generator.py#L338-L450
"""
blocked_ngrams = {}
for ngram in zip(*[sequence[i:].tolist() for i in range(self.no_repeat_ngram_size)]):
blocked_ngrams[tuple(ngram[:-1])] = blocked_ngrams.get(tuple(ngram[:-1]), []) + [ngram[-1]]
return blocked_ngrams
def _gen_banned_tokens(self, step, sequence, blocked_ngrams):
""" Generate tokens that should be banned for (step+1).
"""
banned_tokens = []
if step+2-self.no_repeat_ngram_size < 0:
return banned_tokens
ngram_index = tuple(sequence[step+2-self.no_repeat_ngram_size:step+1].tolist())
return blocked_ngrams.get(ngram_index, [])
|
code
|
#!/usr/bin/env python
# coding=utf-8
"""
@desc:
@author: Luo.lu
@date: 2019-07-10
"""
class Solution(object):
def wordBreak(self, s, wordDict):
"""
:type s: str
:type wordDict: List[str]
:rtype: List[str]
"""
# 借助139,判断是否存在可分的情况,否则特例可能会超时
if not s:
return True
res = [0] * (len(s) + 1)
# 开始位置查找:
for word in wordDict:
if s.startswith(word):
res[len(word)] = 1
for i in range(len(s)):
if res[i]:
for word in wordDict:
if s[i:].startswith(word):
res[i + len(word)] = 1
new_res = []
if bool(res[-1]):
self.DFS(s, wordDict, new_res, "")
return new_res
# DFS遍历
def DFS(self, s, wordDict, res, tmp):
if not s:
res.append(tmp.strip())
for word in wordDict:
if s.startswith(word):
self.DFS(s[len(word):], wordDict, res, tmp + word + " ")
else:
continue
|
code
|
import os
import sys
import json
import matplotlib.pyplot as plt
import pandas as pd
def test_plot():
test_reses = []
for i in range(10):
res_path = './RESULTS/zoom20_rr%d/' % i
test_file = os.path.join(res_path, 'test.json')
with open(test_file, 'r') as f:
test_res = json.load(f)
test_res['rank_ratio'] = i / 10
test_reses.append(test_res)
test_reses = pd.DataFrame(test_reses)
test_reses = test_reses.set_index('rank_ratio')
test_reses['CIndexForSlide'].plot()
plt.show()
def train_plot():
train_reses = []
for i in range(10):
res_path = './RESULTS/zoom20_rr%d/' % i
train_file = os.path.join(res_path, 'train.csv')
train_res = pd.read_csv(train_file, index_col=0)
train_res.index.name = 'epoch'
# 将columns设置成multiindex
multiindex = [tuple(c.split('_')) for c in train_res.columns]
multiindex = pd.MultiIndex.from_tuples(
multiindex, names=['metric', 'phase'])
train_res.columns = multiindex
train_res = train_res.stack(level=[0, 1]).reset_index()
train_res['rank_ratio'] = i / 10
train_reses.append(train_res)
train_reses = pd.concat(train_reses)
train_reses.rename({0: 'value'}, axis=1, inplace=True)
all_metrics = train_reses['metric'].unique()
fig, axes = plt.subplots(nrows=2, ncols=2)
for i, p in enumerate(['train', 'valid']):
for j, m in enumerate(all_metrics):
ax = axes[i, j]
for rr in range(10):
rr /= 10
subdf = train_reses[
(train_reses['phase'] == p) &
(train_reses['metric'] == m) &
(train_reses['rank_ratio'] == rr)
]
xvalue = subdf['epoch'].values
yvalue = subdf['value'].values
ax.plot(xvalue, yvalue, label=('rr=%.1f' % rr))
ax.set_title('Phase: %s, Metric: %s' % (p, m))
ax.set_xlabel('epoch')
ax.set_ylabel(m)
plt.legend()
plt.show()
if __name__ == '__main__':
arg = sys.argv[1]
if arg == 'train':
train_plot()
elif arg == 'test':
test_plot()
else:
raise ValueError()
|
code
|
"""
Description
-----------
This module defines the :obj:`ParaMol.Force_field.force_field.ForceField` class which is the ParaMol representation of a force field that contains all the information about the force field terms and correspondent parameters (even relatively to those that will not enter the optimization).
"""
import os, copy
import numpy as np
import logging
# ParaMol imports
from .force_field_term import *
class ForceField:
"""
ParaMol representation of a force field.
Parameters
----------
openmm_engine : :obj:`ParaMol.MM_engines.openmm.OpenMMEngine`
ParaMol representation of the OpenMMEngine
Attributes
----------
force_field : dict
Dictionary that contains as keys force groups names and as values and the correspondent :obj:`ParaMol.Force_field.force_field_term.FFTerm`. This mapping is constructed as given by OpenMM.
force_field_optimizable : dict
Same as before but only containing optimizable force field terms. Force groups that do not have optimizable force field terms will not be part of this dictionary.
force_groups : dict
Dictionary that defines the mapping between force group names and force group numbers, which is defined accordingly to the information obtained form the OpenMM System.
optimizable_parameters : list
List that contains instances of :obj:`ParaMol.Force_field.force_field_term_parameter.Parameter` that are optimizable.
optimizable_parameters_values : list of float/int
List that contains the values of the optimizable force field parameters. This is usually fed into the optimization itself.
"""
symmetry_group_default = "X"
def __init__(self, openmm_engine):
self._openmm = openmm_engine
self.force_field = None
self.force_field_optimizable = None
self.force_groups = None
self.optimizable_parameters = None
self.optimizable_parameters_values = None
# ------------------------------------------------------------ #
# #
# PUBLIC METHODS #
# #
# ------------------------------------------------------------ #
def create_force_field(self, opt_bonds=False, opt_angles=False, opt_torsions=False, opt_charges=False, opt_lj=False, opt_sc=False, ff_file=None):
"""
Method that wraps the methods create_force_field_from_openmm/read_ff_file and create_force_field_optimizable in order to ease the procedure of creating a ParaMol representation of a force field.
Notes
-----
If `ff_file` is not `None` the force field will be created from the provided file. The system stored in :obj:`ParaMol.MM_engines.openmm.OpenMMEngine` should contain exactly the same forces and force field terms as the ones in this file.
Parameters
----------
opt_bonds : bool
Flag that signals whether or not the bond parameters will be optimized.
opt_angles : bool
Flag that signals whether or not the angle parameters will be optimized.
opt_torsions : bool
Flag that signals whether or not the dihedral parameters will be optimized.
opt_charges : bool
Flag that signal whether or not the charges will be optimized.
opt_lj : bool
Flag that signal whether or not the charges will be optimized.
opt_sc : bool
Flag that signal whether or not the 1-4 Lennard-Jones and electrostatic scaling factor will be optimized.
ff_file : str
Name of the ParaMol force field file to be read.
Returns
-------
force_field : dict
Dictionary that contains as keys force groups names and as values and the correspondent :obj:`ParaMol.Force_field.force_field_term.FFTerm`.
"""
# Set empty Force Field and Force Group dictionaries
self.force_field = {}
self.force_groups = {}
if ff_file is None:
# No .ff file was provided - create parameter list from force field
logging.info("Creating force field directly from OpenMM.")
assert self._openmm is not None, "OpenMM was not set."
self.create_force_field_from_openmm(opt_bonds, opt_angles, opt_torsions, opt_charges, opt_lj, opt_sc)
else:
logging.info("Creating force from .ff file named '{}'.".format(ff_file))
# A .param file was provided - create parameter list from the information contained in this file.
assert os.path.exists(ff_file), "\t * ERROR: .param file provided - {} - does not exist."
self.read_ff_file(ff_file)
self.create_force_field_optimizable()
return self.force_field
def update_force_field(self, optimizable_parameters_values, symmetry_constrained=True):
"""
Method that updates the value of each Parameter object instance.
Parameters
----------
optimizable_parameters_values : list of float/int
List that contains the values of the optimizable force field parameters.
symmetry_constrained : bool
Whether or not the optimization is constrained by symmetries.
Returns
-------
optimizable_parameters : list of :obj:`ParaMol.Force_field.force_field_term_parameter.Parameter`
List that contains instances of :obj:`ParaMol.Force_field.force_field_term_parameter.Parameter` that are optimizable.
"""
if symmetry_constrained:
# Update the parameter list taking into account the symmetry constraints
symm_groups = {}
# Iterate over all optimizable parameters; update all the parameters that belong do the default
# symmetry group and save the new paramter values of the others
for i in range(len(self.optimizable_parameters)):
parameter = self.optimizable_parameters[i]
if parameter.symmetry_group == self.symmetry_group_default:
# If symmetry group of optimizable parameter is default just update it
parameter.value = optimizable_parameters_values[i]
else:
# Optimizable parameter does not belong to default symmetry group
if parameter.symmetry_group in symm_groups.keys():
# If symmetry group is already in symm_groups
if parameter.param_key not in symm_groups[parameter.symmetry_group].keys():
# If the param_key is not in symm_groups
symm_groups[parameter.symmetry_group][parameter.param_key] = optimizable_parameters_values[i]
else:
symm_groups[parameter.symmetry_group] = {}
symm_groups[parameter.symmetry_group][parameter.param_key] = optimizable_parameters_values[i]
# The only parameters that were not updated yet were the ones that do not belong to the default
# symmetry group. We have to iterate over force_field_optimizable and update them.
for force in self.force_field_optimizable:
# For a given force, iterate over all force field terms
for sub_force in self.force_field_optimizable[force]:
for force_field_term in sub_force:
# For each term, iterate over all its Parameter instances
for parameter in force_field_term.parameters.values():
if parameter.optimize and parameter.symmetry_group != self.symmetry_group_default:
parameter.value = symm_groups[parameter.symmetry_group][parameter.param_key]
else:
for i in range(len(self.optimizable_parameters)):
self.optimizable_parameters[i].value = optimizable_parameters_values[i]
# TODO: check if there's a better way do this
# Make all scee, scnb positive and eps and sigma positive
if "Scaling14" in self.force_field_optimizable:
for sub_force in self.force_field_optimizable["Scaling14"]:
for ff_term in sub_force:
ff_term.parameters["scee"].value = abs(ff_term.parameters["scee"].value)
ff_term.parameters["scnb"].value = abs(ff_term.parameters["scnb"].value)
if "NonbondedForce" in self.force_field_optimizable:
for sub_force in self.force_field_optimizable["NonbondedForce"]:
for ff_term in sub_force:
ff_term.parameters["lj_eps"].value = abs(ff_term.parameters["lj_eps"].value)
ff_term.parameters["lj_sigma"].value = abs(ff_term.parameters["lj_sigma"].value)
return self.optimizable_parameters
def create_force_field_from_openmm(self, opt_bonds, opt_angles, opt_torsions, opt_charges, opt_lj, opt_sc):
"""
Method that creates the force field dictionary that contains all the FFTerms of the force field as given by OpenMM.
The FFTerms are grouped in lists that can be accessed by the key of the correspondent force group.
Notes
-----
This method constructs the force_groups dictionary, and calls the methods create_harmonic_bond_force_field,create_harmonic_angle_force_field, create_periodic_torsion_force_field, create_nonbonded_force_field in order to construct the force_filed dictionary.
Parameters
----------
opt_bonds : bool
Flag that signals whether or not the bond parameters will be optimized.
opt_angles : bool
Flag that signals whether or not the angle parameters will be optimized.
opt_torsions : bool
Flag that signals whether or not the dihedral parameters will be optimized.
opt_charges : bool
Flag that signal whether or not the charges will be optimized.
opt_lj : bool
Flag that signal whether or not the charges will be optimized.
opt_sc : bool
Flag that signal whether or not the 1-4 Lennard-Jones and electrostatic scaling factor will be optimized.
Returns
-------
force_field : dict
Dictionary that contains as keys force groups names and as values and the correspondent :obj:`ParaMol.Force_field.force_field_term.FFTerm`.
"""
# Iterate over all forces present in the system and determine the force groups
assert self._openmm.system is not None, "System was not set"
"""
# Left here only if needed in the future
forces = self._openmm.system.getForces()
for i in range(len(forces)):
force = forces[i]
# Get force group name
# Alternatively,force_key = force.__str__().split(".")[3].split(";")[0]
force_key = force.__class__.__name__
# Set force group number
force.setForceGroup(i)
assert force_key not in self.force_groups, "\t * ERROR: Force {} already in the dictionary.".format(force_key)
self.force_groups[force_key] = i
"""
self.force_groups = copy.deepcopy(self._openmm.forces_indexes)
# Add extra force group for 1-4 scaling factors
force_key = "Scaling14"
assert force_key not in self.force_groups, "\t * ERROR: Force {} already in the dictionary.".format(force_key)
self.force_groups[force_key] = self.force_groups["NonbondedForce"]
# Create the force field from OpenMM
self.create_harmonic_bond_force_field(opt_bonds)
self.create_harmonic_angle_force_field(opt_angles)
self.create_periodic_torsion_force_field(opt_torsions)
self.create_nonbonded_force_field(opt_charges, opt_lj, opt_sc)
return self.force_field
def create_force_field_optimizable(self):
"""
Method that creates the optimizable force field dictionary that contains all the optimizable FFTerms.
The FFTerms are grouped in lists that can be accessed by the key of the correspondent force group.
Returns
-------
force_field_optimizable : dict
Dictionary that contains as keys force groups names and as values and the correspondent :obj:`ParaMol.Force_field.force_field_term.FFTerm`.
"""
assert self.force_field is not None, "\t * force_field dictionary was not created yet. Run create_force_field " \
"method before"
self.force_field_optimizable = {}
# Structure:
# force_field["HarmonicBondForce"] = [first_occurrence, second_occurrence]
# where
# first_occurrence = [ff_term_1_1, ff_term_1_2, ...]
# second_occurrence = [ff_term_2_1, ff_term_2_2, ...]
# Iterate over all existent forces
for force in self.force_field:
# For a given force, iterate over all occurrence of that force
for sub_force in self.force_field[force]:
sub_force_field_optimizable = []
# For a given force, iterate over all force field terms
for force_field_term in sub_force:
# For each term, iterate over all its Parameter instances
for parameter in force_field_term.parameters.values():
if parameter.optimize:
if force not in self.force_field_optimizable:
self.force_field_optimizable[force] = []
sub_force_field_optimizable.append(force_field_term)
break
if force in self.force_field_optimizable:
self.force_field_optimizable[force].append(sub_force_field_optimizable)
return self.force_field_optimizable
def get_optimizable_parameters(self, symmetry_constrained=True):
"""
Method that gets the lists containing all optimizable Parameter instances and parameter values.
Parameters
----------
symmetry_constrained : bool
Whether or not the optimization is constrained by symmetries.
Returns
-------
optimizable_parameters, optimizable_parameters_values : list of :obj:`ParaMol.Force_field.force_field_term_parameter.Parameter`, list of int/float
Attributes of self.
"""
assert self.force_field_optimizable is not None, "\t * force_field_optimizable dictionary was not created yet." \
" First run create_force_field_optimizable method."
self.optimizable_parameters = []
self.optimizable_parameters_values = []
# Multiplicity of the parameters
ref_parameters = {}
if symmetry_constrained:
# Keep track of symmetry groups already included
symm_groups = {}
# Iterate over all existent forces
for force in self.force_field_optimizable:
for sub_force in self.force_field_optimizable[force]:
# For a given force, iterate over all force field terms
for force_field_term in sub_force:
# For each term, iterate over all its Parameter instances
for parameter in force_field_term.parameters.values():
if parameter.optimize:
if parameter.symmetry_group == self.symmetry_group_default:
# If symmetry group is the default ("X")
self.optimizable_parameters.append(parameter)
self.optimizable_parameters_values.append(parameter.value)
elif parameter.symmetry_group in symm_groups.keys():
# If group is not the default one ("X")
# but that symmetry_group is already in symm_groups
if parameter.param_key not in symm_groups[parameter.symmetry_group]:
# Add missing param_key
symm_groups[parameter.symmetry_group].append(parameter.param_key)
self.optimizable_parameters.append(parameter)
self.optimizable_parameters_values.append(parameter.value)
# Parameter multiplicity
ref_parameters[parameter.symmetry_group].update({parameter.param_key : parameter})
parameter.multiplicity = 1
else:
# Increase multiplicity of the reference parameter
ref_parameters[parameter.symmetry_group][parameter.param_key].multiplicity += 1
else:
# If group is not the default one ("X") and not in symm_groups
symm_groups[parameter.symmetry_group] = []
symm_groups[parameter.symmetry_group].append(parameter.param_key)
self.optimizable_parameters.append(parameter)
self.optimizable_parameters_values.append(parameter.value)
# Parameter multiplicity
ref_parameters[parameter.symmetry_group] = {parameter.param_key : parameter}
parameter.multiplicity = 1
else:
# Iterate over all existent forces
for force in self.force_field_optimizable:
for sub_force in self.force_field_optimizable[force]:
# For a given force, iterate over all force field terms
for force_field_term in sub_force:
# For each term, iterate over all its Parameter instances
for parameter in force_field_term.parameters.values():
if parameter.optimize:
self.optimizable_parameters.append(parameter)
self.optimizable_parameters_values.append(parameter.value)
return self.optimizable_parameters, self.optimizable_parameters_values
def create_harmonic_bond_force_field(self, opt_bonds):
"""
Method that creates the part of the force field regarding OpenMM's force 'HarmonicBondForce'.
Parameters
----------
opt_bonds : bool
Flag that signals whether or not the bond parameters will be optimized.
Returns
-------
force_field : dict
Dictionary that contains as keys force groups names and as values and the correspondent :obj:`ParaMol.Force_field.force_field_term.FFTerm`.
"""
force_key = "HarmonicBondForce"
assert force_key not in self.force_field, "\t * ERROR: " \
"Force group {} already exists.".format(force_key)
# Create empty list for
self.force_field[force_key] = []
for force_idx in self.force_groups[force_key]:
bond_force = self._openmm.system.getForce(force_idx)
sub_force_field = []
for i in range(bond_force.getNumBonds()):
# Create the FFTerm for this bond term
at1, at2, length, k = bond_force.getBondParameters(i)
force_field_term = FFTerm(self.force_groups[force_key], i, [at1, at2])
# Add parameters to this FFTerm
force_field_term.add_parameter(self.symmetry_group_default, int(opt_bonds), "bond_eq", length._value)
force_field_term.add_parameter(self.symmetry_group_default, int(opt_bonds), "bond_k", k._value)
# Append FFTerm to sub_force_field
sub_force_field.append(force_field_term)
# Append sub_force_field to force_field[force_key]
self.force_field[force_key].append(sub_force_field)
return self.force_field
def create_harmonic_angle_force_field(self, opt_angles):
"""
Method that creates the part of the force field regarding OpenMM's force 'HarmonicAngleForce'.
Parameters
----------
opt_angles : bool
Flag that signals whether or not the angle parameters will be optimized.
Returns
-------
force_field : dict
Dictionary that contains as keys force groups names and as values and the correspondent :obj:`ParaMol.Force_field.force_field_term.FFTerm`.
"""
force_key = "HarmonicAngleForce"
assert force_key not in self.force_field, "\t * ERROR: " \
"Force group {} already exists.".format(force_key)
# Create empty list for
self.force_field[force_key] = []
for force_idx in self.force_groups[force_key]:
angle_force = self._openmm.system.getForce(force_idx)
sub_force_field = []
for i in range(angle_force.getNumAngles()):
# Create the FFTerm for this bond term
at1, at2, at3, angle, k = angle_force.getAngleParameters(i)
force_field_term = FFTerm(self.force_groups[force_key], i, [at1, at2, at3])
# Add parameters to this FFTerm
force_field_term.add_parameter(self.symmetry_group_default, int(opt_angles), "angle_eq", angle._value)
force_field_term.add_parameter(self.symmetry_group_default, int(opt_angles), "angle_k", k._value)
# Append FFTerm to sub_force_field
sub_force_field.append(force_field_term)
# Append sub_force_field to force_field[force_key]
self.force_field[force_key].append(sub_force_field)
return self.force_field
def create_periodic_torsion_force_field(self, opt_torsions):
"""
Method that creates the part of the force field regarding OpenMM's force 'PeriodicTorsionForce'.
Parameters
----------
opt_torsions : bool
Flag that signals whether or not the torsion parameters will be optimized.
Returns
-------
force_field : dict
Dictionary that contains as keys force groups names and as values and the correspondent :obj:`ParaMol.Force_field.force_field_term.FFTerm`.
"""
force_key = "PeriodicTorsionForce"
assert force_key not in self.force_field, "\t * ERROR: " \
"Force group {} already exists.".format(force_key)
# Create empty list for
self.force_field[force_key] = []
for force_idx in self.force_groups[force_key]:
dihedral_force = self._openmm.system.getForce(force_idx)
sub_force_field = []
for i in range(dihedral_force.getNumTorsions()):
# Create the FFTerm for this bond term
at1, at2, at3, at4, per, phase, k = dihedral_force.getTorsionParameters(i)
force_field_term = FFTerm(self.force_groups[force_key], i, [at1, at2, at3, at4])
# Add parameters to this FFTerm
# OBS: currently not possible to optimize the periodicity
force_field_term.add_parameter(self.symmetry_group_default, 0, "torsion_periodicity", int(per))
force_field_term.add_parameter(self.symmetry_group_default, int(opt_torsions), "torsion_phase", phase._value)
force_field_term.add_parameter(self.symmetry_group_default, int(opt_torsions), "torsion_k", k._value)
# Append FFTerm to sub_force_field
sub_force_field.append(force_field_term)
# Append sub_force_field to force_field[force_key]
self.force_field[force_key].append(sub_force_field)
return self.force_field
def create_nonbonded_force_field(self, opt_charges, opt_lj, opt_sc):
"""
Method that creates the part of the force field regarding OpenMM's force 'NonbondedForce'.
Parameters
----------
opt_charges : bool
Flag that signals whether or not the charge parameters will be optimized.
opt_lj : bool
Flag that signals whether or not the Lennard-Jones 12-6 parameters will be optimized.
opt_sc : bool
Flag that signals whether or not the 1-4 Lennard-Jones and electrostatic scaling factors's parameters will be optimized.
Returns
-------
force_field : dict
Dictionary that contains as keys force groups names and as values and the correspondent :obj:`ParaMol.Force_field.force_field_term.FFTerm`.
"""
force_key = "NonbondedForce"
assert force_key not in self.force_field, "\t * ERROR: " \
"Force group {} already exists.".format(force_key)
# Create empty list for
self.force_field[force_key] = []
for force_idx in self.force_groups[force_key]:
nonbonded_force = self._openmm.system.getForce(force_idx)
sub_force_field = []
for i in range(nonbonded_force.getNumParticles()):
# Create the FFTerm for this bond term
charge, sigma, eps = nonbonded_force.getParticleParameters(i)
force_field_term = FFTerm(self.force_groups[force_key], i, [i])
# Add parameters to this FFTerm
force_field_term.add_parameter(self.symmetry_group_default, int(opt_charges), "charge", charge._value)
force_field_term.add_parameter(self.symmetry_group_default, int(opt_lj), "lj_sigma", sigma._value)
force_field_term.add_parameter(self.symmetry_group_default, int(opt_lj), "lj_eps", eps._value)
# Append FFTerm to sub_force_field
sub_force_field.append(force_field_term)
# Append sub_force_field to force_field[force_key]
self.force_field[force_key].append(sub_force_field)
# Create empty list for 1-4 scaling
force_key = "Scaling14"
assert force_key not in self.force_field, "\t * ERROR: " \
"Force group {} already exists.".format(force_key)
# Create empty list for
self.force_field[force_key] = []
sub_force_field = []
for i in range(nonbonded_force.getNumExceptions()):
at1, at2, charge_prod, sigma, eps, = nonbonded_force.getExceptionParameters(i)
force_field_term = FFTerm(self.force_groups[force_key], i, [at1, at2])
if abs(charge_prod._value) < 1e-8 and abs(eps._value) < 1e-8:
# No scaling
scee = 0.0
scnb = 0.0
force_field_term.add_parameter(self.symmetry_group_default, 0, "scee", float(scee))
force_field_term.add_parameter(self.symmetry_group_default, 0, "scnb", float(scnb))
continue
else:
# Determine default scaling
charge_at1, sigma_at1, eps_at1 = nonbonded_force.getParticleParameters(at1)
charge_at2, sigma_at2, eps_at2 = nonbonded_force.getParticleParameters(at2)
try:
scee = charge_prod / (charge_at1 * charge_at2)
except:
scee = 1 / 1.2
try:
scnb = eps / np.sqrt(eps_at1 * eps_at2)
except:
scnb = 1 / 2.0
# Add parameters to this FFTerm
force_field_term.add_parameter(self.symmetry_group_default, int(opt_sc), "scee", float(scee))
force_field_term.add_parameter(self.symmetry_group_default, int(opt_sc), "scnb", float(scnb))
# Append FFTerm to sub_force_field
sub_force_field.append(force_field_term)
# Append sub_force_field to force_field[force_key]
self.force_field[force_key].append(sub_force_field)
return self.force_field
def write_ff_file(self, file_name):
"""
Method that writes the force field parameters in the standard format used by ParaMol (usually .ff extension).
Parameters
----------
file_name : str
Name of the file to be written.
Returns
-------
`True` if file was closed successfully. `False` otherwise.
"""
logging.info("Writing force field to .ff file named '{}'.".format(file_name))
# Open file for writing
ff_file = open(file_name, 'w')
# Iterate over all existent forces
for force in self.force_field:
# Iterate over all force field term
for k, sub_force in enumerate(self.force_field[force]):
# For a given force occurrence, iterate over all force field terms
ff_file.write("{} {:3d} \n".format(force, self.force_groups[force][k]))
# For a given force, iterate over all force field terms
for force_field_term in sub_force:
ff_term_line = ("{:3d} " + "{:3d} " * len(force_field_term.atoms)).format(force_field_term.idx, *force_field_term.atoms)
# For each term, iterate over all its Parameter instances
optimization_flags = ""
for parameter in force_field_term.parameters.values():
ff_term_line += "{:16.8f} ".format(parameter.value)
optimization_flags += "{:3d} ".format(int(parameter.optimize))
ff_term_line += optimization_flags
ff_term_line += " " + str(parameter.symmetry_group) + " \n"
ff_file.write(ff_term_line)
ff_file.write("END \n")
return ff_file.close()
def read_ff_file(self, file_name):
"""
Method that reads the force field parameters in the standard format used by ParaMol (usually .ff extension) and creates its ParaMol representation.
Parameters
----------
file_name : str
Name of the file to be read.
Returns
-------
`True` if file was closed successfully. `False` otherwise.
"""
# Open file for writing
ff_file = open(file_name, 'r')
# Iterate over all existent forces
for line in ff_file:
line_split = line.split()
if 'END' in line_split:
break
elif len(line_split) == 2:
# A new force was found; set the force key and force group
force_key = line_split[0]
force_index = int(line_split[1])
if force_key not in self.force_groups:
self.force_groups[force_key] = []
self.force_groups[force_key].append(force_index)
if force_key not in self.force_field:
# Create empty list for the force_key
self.force_field[force_key] = []
self.force_field[force_key].append([])
# current_occurrence of this force_key
current_occurrence = len(self.force_field[force_key])-1
continue
else:
if force_key == 'HarmonicBondForce':
idx, at1, at2, bond_eq, bond_k, bond_eq_opt, bond_k_opt, symm_group = line_split
force_field_term = FFTerm(self.force_groups[force_key], int(idx), [int(at1), int(at2)])
# Add parameters to this FFTerm
force_field_term.add_parameter(symm_group, int(bond_eq_opt), "bond_eq", float(bond_eq))
force_field_term.add_parameter(symm_group, int(bond_k_opt), "bond_k", float(bond_k))
# Append FFTerm to ForceField
self.force_field[force_key][current_occurrence].append(force_field_term)
elif force_key == 'HarmonicAngleForce':
idx, at1, at2, at3, angle_eq, angle_k, angle_eq_opt, angle_k_opt, symm_group = line_split
force_field_term = FFTerm(self.force_groups[force_key], int(idx), [int(at1), int(at2), int(at3)])
# Add parameters to this FFTerm
force_field_term.add_parameter(symm_group, int(angle_eq_opt), "angle_eq", float(angle_eq))
force_field_term.add_parameter(symm_group, int(angle_k_opt), "angle_k", float(angle_k))
# Append FFTerm to ForceField
self.force_field[force_key][current_occurrence].append(force_field_term)
elif force_key == 'PeriodicTorsionForce':
idx, at1, at2, at3, at4, torsion_periodicity, torsion_phase,\
torsion_k, torsion_periodicity_opt, torsion_phase_opt, torsion_k_opt, symm_group = line_split
force_field_term = FFTerm(self.force_groups[force_key], int(idx), [int(at1), int(at2), int(at3), int(at4)])
# Add parameters to this FFTerm
# OBS: currently not possible to optimize the periodicity
assert int(torsion_periodicity_opt) == 0, \
"Flag to parameterize torsions was set to {} but this is not possible.".format(torsion_periodicity_opt)
force_field_term.add_parameter(symm_group, int(0), "torsion_periodicity", int(float(torsion_periodicity)))
force_field_term.add_parameter(symm_group, int(torsion_phase_opt), "torsion_phase", float(torsion_phase))
force_field_term.add_parameter(symm_group, int(torsion_k_opt), "torsion_k", float(torsion_k))
# Append FFTerm to ForceField
self.force_field[force_key][current_occurrence].append(force_field_term)
elif force_key == 'NonbondedForce':
idx, at, charge, sigma, eps, charge_opt, sigma_opt, eps_opt, symm_group = line_split
force_field_term = FFTerm(self.force_groups[force_key], int(idx), [int(at)])
# Add parameters to this FFTerm
force_field_term.add_parameter(symm_group, int(charge_opt), "charge", float(charge))
force_field_term.add_parameter(symm_group, int(sigma_opt), "lj_sigma", float(sigma))
force_field_term.add_parameter(symm_group, int(eps_opt), "lj_eps", float(eps))
# Append FFTerm to ForceField
self.force_field[force_key][current_occurrence].append(force_field_term)
elif force_key == 'Scaling14':
idx, at1, at2, scee, scnb, scee_opt, scnb_opt, symm_group = line_split
force_field_term = FFTerm(self.force_groups[force_key], int(idx), [int(at1), int(at2)])
# Add parameters to this FFTerm
force_field_term.add_parameter(symm_group, int(scee_opt), "scee", float(scee))
force_field_term.add_parameter(symm_group, int(scnb_opt), "scnb", float(scnb))
# Append FFTerm to ForceField
self.force_field[force_key][current_occurrence].append(force_field_term)
return ff_file.close()
def optimize_selection(self, lower_idx, upper_idx, change_other=False):
"""
Methods that sets a parameter as optimizable if it belongs to a force field term for which at least one of the atoms's indices is greather than lower_idx and lower than upper_idx.
Notes
-----
If [10,20] is given a the lower_idx list and [15,25] is given as the upper_idx list, the selection will comprise the atoms between 10-15 and 20-25.
Parameters
----------
lower_idx : list of int
Lower index limits.
upper_idx : list of int
Upper index limits.
change_other : bool
Whether or not the remaining parameter's optimization state is to be set to False. (default is False, i.e., their optimization state is not change)
Returns
-------
force_field : dict
Dictionary that contains as keys force groups names and as values and the correspondent :obj:`ParaMol.Force_field.force_field_term.FFTerm`.
"""
assert len(lower_idx) == len(upper_idx)
# Iterate over all forces
for force in self.force_field:
# Iterate over all force field term
for sub_force in self.force_field[force]:
# For a given force occurrence, iterate over all force field terms
for force_field_term in sub_force:
# Iterate over all atoms of a given force field term
for at in force_field_term.atoms:
for i in range(len(lower_idx)):
low_limit = lower_idx[i]
upper_limit = upper_idx[i]
if (at >= low_limit) and (at <= upper_limit):
for parameter in force_field_term.parameters.values():
parameter.optimize = 1
elif (at < low_limit) or (at > upper_limit) and change_other:
for parameter in force_field_term.parameters.values():
parameter.optimize = 0
else:
# If outside range but change other is False
pass
return self.force_field
def optimize_torsions(self, torsions, change_other_torsions=False, change_other_parameters=False):
"""
Methods that sets as optimizable all parameters of the torsions contained in the listed passed as an argument.
Parameters
----------
torsions : list of lists
List of list, wherein the inner lists contain indices of the quartets of atoms that define the torsion to be optimized.
change_other_torsions : bool
Whether or not the remaining torsions's optimization state is to be set to False. (default is False, i.e., their optimization state is not changed)
change_other_parameters : bool
Whether or not the remaining parameters' optimization state is to be set to False. (default is False, i.e., their optimization state is not changed)
Returns
-------
force_field : dict
Dictionary that contains as keys force groups names and as values and the correspondent :obj:`ParaMol.Force_field.force_field_term.FFTerm`.
"""
# ----------------------------------------------------------------------------------------------
# Set optimization flag in ParaMol Force Field representation for given dihedrals
# ----------------------------------------------------------------------------------------------
for force in self.force_field:
if force == 'PeriodicTorsionForce':
for sub_force in self.force_field[force]:
# For a given force occurrence, iterate over all force field terms
for force_field_term in sub_force:
for parameter in force_field_term.parameters.values():
# If the param key is not torsion periodicity since this are not handled by ParaMol
if parameter.param_key != "torsion_periodicity":
if force_field_term.atoms in torsions:
parameter.optimize = 1
elif change_other_torsions:
parameter.optimize = 0
elif change_other_parameters:
for sub_force in self.force_field[force]:
# For a given force occurrence, iterate over all force field terms
for force_field_term in sub_force:
for parameter in force_field_term.parameters.values():
parameter.optimize = 0
else:
pass
return self.force_field
def optimize_scaling_constants(self, atom_pairs, change_other_sc=False, change_other_parameters=False):
"""
Methods that sets as optimizable all parameters of the scaling factors contained in the listed passed as an argument.
Parameters
----------
atom_pairs : list of lists
List of list, wherein the inner lists contain indices of the pair of atoms for which the scaling factors are to be optimized.
change_other_sc : bool
Whether or not the remaining scaling constants's optimization state is to be set to False. (default is False, i.e., their optimization state is not changed)
change_other_parameters : bool
Whether or not the remaining parameters' optimization state is to be set to False. (default is False, i.e., their optimization state is not changed)
Returns
-------
force_field : dict
Dictionary that contains as keys force groups names and as values and the correspondent :obj:`ParaMol.Force_field.force_field_term.FFTerm`.
"""
# ----------------------------------------------------------------------------------------------
# Set optimization flag in ParaMol Force Field representation for given dihedrals
# ----------------------------------------------------------------------------------------------
for force in self.force_field:
if force == 'Scaling14':
for sub_force in self.force_field[force]:
# For a given force occurrence, iterate over all force field terms
for force_field_term in sub_force:
for parameter in force_field_term.parameters.values():
if force_field_term.atoms in atom_pairs:
parameter.optimize = 1
elif change_other_sc:
parameter.optimize = 0
elif change_other_parameters:
for sub_force in self.force_field[force]:
# For a given force occurrence, iterate over all force field terms
for force_field_term in sub_force:
for parameter in force_field_term.parameters.values():
parameter.optimize = 0
else:
pass
return self.force_field
def optimize_torsions_by_symmetry(self, torsions, change_other_torsions=False, change_other_parameters=False, set_zero=False):
"""
Methods that sets as optimizable all parameters of the torsions with the same symmetry groups as the ones contained in the listed passed as an argument.
Parameters
----------
torsions : list of lists
List of list, wherein the inner lists contain indices of the quartets of atoms that define the torsion to be optimized.
change_other_torsions : bool
Whether or not the remaining torsions's optimization state is to be set to False. (default is False, i.e., their optimization state is not changed)
change_other_parameters : bool
Whether or not the remaining parameters' optimization state is to be set to False. (default is False, i.e., their optimization state is not changed)
set_zero : bool
Whether or not to set the force constant of the optimizable torsions to 0.
Returns
-------
force_field : dict
Dictionary that contains as keys force groups names and as values and the correspondent :obj:`ParaMol.Force_field.force_field_term.FFTerm`.
"""
# ----------------------------------------------------------------------------------------------
# Set optimization flag in ParaMol Force Field representation for given dihedrals
# ----------------------------------------------------------------------------------------------
# Get symmetry groups of given dihedrals
dihedral_types = []
for force in self.force_field:
if force == 'PeriodicTorsionForce':
for sub_force in self.force_field[force]:
# For a given force, iterate over all force field terms
for force_field_term in sub_force:
for parameter in force_field_term.parameters.values():
if parameter.param_key is not "torsion_periodicity":
if force_field_term.atoms in torsions:
dihedral_types.append(parameter.symmetry_group)
# Change the necessary optimization states
for force in self.force_field:
if force == 'PeriodicTorsionForce':
for sub_force in self.force_field[force]:
# For a given force occurrence, iterate over all force field terms
for force_field_term in sub_force:
for parameter in force_field_term.parameters.values():
# If the param key is not torsion periodicity since this are not handled by ParaMol
if parameter.param_key != "torsion_periodicity":
if parameter.symmetry_group in dihedral_types:
parameter.optimize = 1
if parameter.param_key == "torsion_k" and set_zero:
parameter.value = 0.0
elif change_other_torsions:
parameter.optimize = 0
elif change_other_parameters:
for sub_force in self.force_field[force]:
# For a given force, iterate over all force field terms
for force_field_term in sub_force:
for parameter in force_field_term.parameters.values():
parameter.optimize = 0
else:
pass
return self.force_field
def set_parameter_optimization(self, force_key, sub_force, idx, param_key, optimize):
"""
Method that for the force field term with index `idx` of the force `force_key` set the parameter with name `param_key` to the optimization state in `optimize`.
Parameters
----------
force_key : str
Name of the force.
sub_force : int
Ocurrence of the force.
idx : int
Index of the force field term.
param_key : str
Name of the parameter.
optimize : bool
Optimization state (0 or 1).
Returns
-------
force_field : dict
Dictionary that contains as keys force groups names and as values and the correspondent :obj:`ParaMol.Force_field.force_field_term.FFTerm`.
"""
self.force_field[force_key][sub_force][idx].parameters[param_key].optimize = optimize
return self.force_field
|
code
|
# search any element in a html page
from selenium import webdriver
browser = webdriver.Firefox()
type(browser)
browser.get('https://gabrielecirulli.github.io/2048/')
try:
elem = browser.find_element_by_class_name('game-explanation')
print('found <%s> element with this class name!' %(elem.tag_name))
except:
print('no such element')
|
code
|
class Solution:
def isPowerOfTwo(self, n: int) -> bool:
#方法2
# 若 x 为 2 的幂,则它的二进制表示中只包含一个 1,则有 x & (-x) = x;
# 若x 不是2 的幂,则它的二进制中不止一个1,则有x &(-x) !=x
#时间复杂度:O(1)
#空间复杂度:O(1)
#if n==0:
# return False
#return n&(-n)==n
#方法1
#去除二进制中最右边的 1
#2 的幂二进制表示只含有一个 1
#x & (x - 1) 操作会将 2 的幂设置为 0,因此判断是否为 2 的幂是:判断 x & (x - 1) == 0
if n==0:
return False
return n&(n-1)==0
|
code
|
"""
Metaprogramming
"""
from collections import namedtuple
class ParamsRegistry(type):
def __init__(cls, name, bases, namespace):
super(ParamsRegistry, cls).__init__(name, bases, namespace)
if not hasattr(cls, 'registry'):
cls.registry = set()
cls.registry.add(cls)
cls.registry -= set(bases)
def __iter__(cls):
return iter(cls.registry)
def __str__(cls):
if cls in cls.registry:
return ':param: {}'.format(cls.__name__)
return cls.__name__ + ": " + ", ".join([sc.__name__ for sc in cls])
class Params(object, metaclass=ParamsRegistry):
pass
kls = type('HasOptimizer', (Params,), dict(
to_str=lambda self: 'str repr {}'.format(type(self))
))
print(kls)
print(type(kls))
obj = kls()
print(obj.to_str())
|
code
|
# test cases:
# cookie("Ryan") --> "Who ate the last cookie? It was Zach!"
# cookie(26) --> "Who ate the last cookie? It was Monica!"
# cookie(2.3) --> "Who ate the last cookie? It was Monica!"
# cookie(true) --> "Who ate the last cookie? It was the dog!"
def cookie(x):
if type(x) is str:
return "Who ate the last cookie? It was Zach!"
elif type(x) is float:
return "Who ate the last cookie? It was Monica!"
elif type(x) is int:
return "Who ate the last cookie? It was Monica!"
else:
return "Who ate the last cookie? It was the dog!"
|
code
|
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 9 10:01:24 2019
@author: ADMIN
"""
from sklearn.cluster import KMeans
#from sklearn import metrics
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
data=pd.read_csv("km1.csv")
df1=pd.DataFrame(data)
print(df1)
f1=df1['Distance_Feature'].values
f2=df1['Speeding_Feature'].values
x=np.matrix(list(zip(f1,f2)))
plt.plot()
plt.xlim([0,100])
plt.ylim([0,50])
plt.title('dataset')
plt.ylabel('Speeding_Feature')
plt.xlabel('Distance_Feature')
plt.scatter(f1,f2)
plt.show()
#create new plot and data
plt.plot()
colors=['b','g','r']
markers=['o','v','s']
#kmeans Algorithm
#k=3
KMeans_model=KMeans(n_clusters=3).fit(x)
plt.plot()
for i,l in enumerate(KMeans_model.labels_):
plt.plot(f1[i],f2[i],color=colors[l],marker=markers[l],ls='None')
plt.xlim([0,100])
plt.ylim([0,50])
plt.show()
|
code
|
#!/user/bin/env python
# coding=utf-8
import traceback
class func(object):
def __enter__(self):
# raise Exception("haha")
pass
def __exit__(self, type, value, trace):
print type
print value
print trace
print traceback.format_exc(trace)
# return True # 使用返回值True捕获with中抛出的不同的异常,异常不会抛出with上下文
return 1 # 同上
# return 0 # 使用返回值False(0)(None)抛出异常,异常会抛出with上下文
if __name__ == "__main__":
a = None
with func() as f:
raise Exception("bbb")
a = 1
print a
|
code
|
'''
@Author: Shuai Wang
@Github: https://github.com/wsustcid
@Version: 1.0.0
@Date: 2020-03-26 11:45:38
@LastEditTime: 2020-04-02 11:26:00
'''
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
# Read in the image
image = mpimg.imread('lane.jpg')
print('This image is: ',type(image),
'with dimensions:', image.shape)
# image.shape = [height, width]
# Grab the x and y size and make a copy of the image
height = image.shape[0]
width = image.shape[1]
# Note: Always make a copy of arrays or other variables in Python.
# If instead, you use "a = b" then all changes you make to "a"
# will be reflected in "b" as well!
color_select = np.copy(image)
# Define color selection criteria
###### MODIFY THESE VARIABLES TO MAKE YOUR COLOR SELECTION
red_threshold = 200
green_threshold = 200
blue_threshold = 200
######
# answer: 200 (recognize all 4 lane lines)
# If we set to 200, can extract two lines directly in front of the vehicle.
rgb_threshold = [red_threshold, green_threshold, blue_threshold]
# Do a boolean or with the "|" character to identify
# pixels below the thresholds
thresholds = (image[:,:,0] < rgb_threshold[0]) \
| (image[:,:,1] < rgb_threshold[1]) \
| (image[:,:,2] < rgb_threshold[2])
# thresholds is a 2-D boolean matrix,
# The matrix elements are False only when the RGB values are all above the corresponding rgb_thresholds.
color_select[thresholds] = [0,0,0]
# Uncomment the following code if you are running the code locally and wish to save the image
mpimg.imsave("lane_color_selection.png", color_select)
# Display the image
plt.imshow(color_select)
plt.show()
|
code
|
import numpy as np
import numpy.linalg as alg
l1 = []
rows = int(input("enter rows:"))
cols = int(input("enter cols:"))
for i in range(rows):
for j in range(cols):
l1.append(int(input()))
print(l1)
m = np.reshape(l1, (rows, cols))
print(m)
Values, Vectors = alg.eig(m)
print(Values)
print(Vectors[:, 0])
print(Vectors[:, 1])
|
code
|
# -*- coding: utf-8 -*-
## 时间: 2015-02-13
## 跟新内容:
## 增加URL请求时间计算
## 时间: 2015-04-01
## 跟新内容:
## 将指定的测试文件名写入配置文件中,同时增加一个获取当前路径的类
##
import urllib.request
import urllib.parse
import urllib.error
from pathlib import Path
import json
import io
import sys
import traceback
import os
import os.path
import time
import gc
import logging
import configparser
import xml.etree.ElementTree as ET
import random #用于产生随机数字及字母
# this class will create customer url request
class handleurl(object):
## for the future account
## URL_FILE_PATH = 'E:/Work/script/SimulatorAccountlist.xml'
## for the stock account
## StockAccountURLlistxml
#URL_FILE_PATH = 'StrategyURLlist.xml'
CONFIG_FILE=r'/conf/xmlFilename.conf'
URLTypes = []##保存<protocol name="https" brokername="微量期货仿真账户">name 属性
urllinks =[] #
URLSubNames=[] #login link
brokernames = [] #保存<protocol name="https" brokername="微量期货仿真账户">brokename 属性
httpstatus = ""
httpreason = ""
httpReturnValue= "" #JSON 对象
urltaketime ="" #URL请求花费时间
ISDEBUG=False ### True:debug模式,False:运行模式
global null # JSON 数据返回包含null 数据
null = 'null'
def __init__(self):
super()
def getConfigFile(self):
""" 测压文件已经放在config文件里"""
cf = configparser.ConfigParser()
if self.ISDEBUG: ## Use debug mode
cfFile= r'../conf/xmlFilename.conf'
else:
cfFile= os.path.normcase(os.getcwd() + os.sep +self.CONFIG_FILE)
print("[cofiguurefie]"+ cfFile)
cf.read(cfFile)
xmlfilename = cf['xmlfile']['FileName']
return xmlfilename
def getConfigFolder(self):
""" 返回配置文件xml所在的目录"""
cf = configparser.ConfigParser()
cf = configparser.ConfigParser()
if self.ISDEBUG: ## Use debug mode
cfFile= r'../conf/xmlFilename.conf'
else:
cfFile= os.path.normcase(os.getcwd() + os.sep +self.CONFIG_FILE)
print(cfFile)
cf.read(cfFile)
foldername = cf['xmlfile']['FoldName']
print(foldername)
return foldername
def getXMLFileFullPath(self):
if self.ISDEBUG: ## Use debug mode
xmlfilepath =os.path.normcase(r'..\\' +self.getConfigFolder() +os.sep +self.getConfigFile())
else:
xmlfilepath= os.path.normcase(os.getcwd() + os.sep +self.getConfigFolder() +os.sep +self.getConfigFile())
print(xmlfilepath)
return xmlfilepath
def getRootElement(self,xmlfilepath):
""" get Root element """
tree =ET.parse(xmlfilepath)
root =tree.getroot()
return root
def initTestArguments(self):
"Retyrb arry for link"
root = self.getRootElement(self.getXMLFileFullPath())
for child in root:
for item in child:
self.URLTypes.append(child.get('name'))
self.brokernames.append(child.get('Target'))
## rdmNum = random.randrange(1000,9999,4)
## rdmChar = random.choice('abcdefghijklmnopqrstuvwxyz')+ random.choice('abcdefghijklmnopqrstuvwxyz')
## itemurllink = item.get('url').replace('tang123','tang123'+str(rdmNum)+rdmChar)
itemurllink = item.get('url')
self.urllinks.append(itemurllink)
#self.urllinks.append(item.get('url'))
self.URLSubNames.append(item.text)
#print( self.URLTypes, self.brokernames,self.urllinks,self.URLSubNames)
def getURLType(self,urllink):
Re = urllib.parse.urlparse(urllink)
self.URLType = Re.scheme
return self.URLType
def getBaselink(self,urllink):
Re = urllib.parse.urlparse(urllink)
baseurl = Re.scheme + '://' + Re.netloc + Re.path + "?"
return baseurl
def getparams(self,urllink):
"""return interl parse mapping obj """
Re = urllib.parse.urlparse(urllink)
parm = urllib.parse.parse_qsl(Re.query)
return urllib.parse.urlencode(parm)
def PrpcesshttpRequest(self, baseurl,parms,encodemethod='utf-8',processmethod='GET'):
#print(baseurl)
#print(parms.encode(encodemethod))
#req = urllib.request.Request(url=baseurl,data=parms.encode(encodemethod),method=processmethod)
#print("[Handle URL]:",baseurl+str(parms.urldecode))
print('\n[URL]', baseurl + urllib.parse.unquote(parms))
try:
## strtime = time.process_time()
strtime = time.perf_counter()
req = urllib.request.Request(url=baseurl+str(parms))
httpresp = urllib.request.urlopen(req)
## endtime = time.process_time()
endtime = time.perf_counter()
## logging.info("URL tale time:", str((endtime-strtime)/1000000)) #计算http请求花费的时间
print("\n[URL 请求花费时间]:", str((endtime-strtime)/1000000),"秒") #计算http请求花费的时间
print("【URL 请求花费时间】:", str((endtime-strtime)/1000),"豪秒") #计算http请求花费的时间
self.urltaketime = str((endtime-strtime)/1000)#计算http请求花费的时间
self.httpstatus =httpresp.status
self.httpreason =httpresp.reason
#self.httpReturnValue = httpresp.read().decode('utf-8')
jstr = httpresp.read().decode('utf-8')
self.httpReturnValue = self.handleJsonString(jstr)
except urllib.error.HTTPError as httperr:
print("[Http Error]",httperr)
except urllib.error.URLError as urlerr:
print("[Error]",urlerr)
def handleJsonString(self, jstr):
"""
处理 http response 字串
返回一个处理好的 html 表的字串
# html 表格式:
===============================================================
|状态 (k)|返回值(v)|状态 (k)|返回值(v)| 状态 (k)|返回值(v)|
===============================================================
| state | 0 |info |xxxxx |total |XXXX |
================================================================
|Data| XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX|
================================================================
"""
tbstr = r'<table border="1">'\
+ r'<tr>'\
+ r'<td align="center">状态(k)</td>'\
+ r'<td align="center">值(v)</td>'\
+ r'<td align="center">状态(k)</td>'\
+ r'<td align="center">值(v)</td>'\
+ r'<td align="center">状态(k)</td>'\
+ r'<td align="center">值(v)</td>'\
+ r'</tr>'
tbend = r'</table>'
trstr = r'<tr>'
trend = r'</tr>'
tdstr = r'<td align="center">'
tdend = r'</td>'
tdfull = "" #完整的td标签 <td></td>
dictre = json.loads(jstr)#转为dict
for key,value in dictre.items():
if key != 'data':
tdfull = tdfull + tdstr+ key +tdend + tdstr+ str(value) +tdend #完整的td标签
trfull = trstr + tdfull + trend #完整的tr标签
datastr = dictre['data']
datacollect = ""
## print(type(datastr))
if type(datastr) == type (None):
datacollect = null
elif type(datastr) == type(dict()):
for k,v in datastr.items():
datacollect = datacollect + str(k) + r' : ' + str(v) + r'<hr />'
## print(k,"===.",v )
elif type(datastr) == type(list()):
if len(datastr) == 0:
datacollect = datacollect + '[]'
else:
for item in datastr:
datacollect = datacollect + str(item) + r'<hr />'
else:
datacollect = datacollect + str(datastr)
datatdfull = tdstr + "data" + tdend \
+ r'<td align="left" colspan ="5">' \
+ datacollect \
+ tdend
trfull = trfull + trstr + datatdfull + tdend #完整的td标签
tbstr = tbstr + trfull + tbend
return tbstr
## def getBaseURL(self,urlstr):
## """ Return base url"""
## Re = parse.urlparse(urlstr)
## baseurl = Re.scheme + "://" +Re.netloc + Re.path
## return baseurl
##
## def getEncodeParameters(self,urlstr):
## """ Return Parameters """
## Re = parse.urlparse(urlstr)
## PsRe= urllib.parse.parse_qsl(Re.query)
## params = PsRe.urlencode(PsRe)
## return params
##
##
## def getRequest(self,urlstr):
## """ Return overwrite Request """
## baseurl = getBaseURL(urlstr)
## encodeparams = getEncodeParameters(urlstr)
## req = request.Request(url = baseurl,data = encodeparams,method = 'GET')
## return req
## def getStatus(self,urlrequest):
## """Return http accwss status"""
## f = urllib.request.urlopen(urlrequest)
## return f.status
##
## def getReason(self,urlrequest):
## """Return http accwss status"""
## f = urllib.request.urlopen(urlrequest)
## return f.reason
##
## def getReturnVaile(self,urlrequest):
## """Return http accwss status"""
## f = urllib.request.urlopen(urlrequest)
## return f.read().decode('utf-8')
##if __name__ =='__main__':
##
## handURL = handleurl()
## handURL.initTestArguments()
##
|
code
|
number = int(input(" Please Enter any Positive Integer : "))
if((number % 5 == 0) and (number % 11 == 0)):
print("Given Number is Divisible by 5 and 11",number)
else:
print("Given Number is Not Divisible by 5 and 11",number)
|
code
|
matriz = [[], [], []]
R = list(range(0, 3))
for c in R:
for i in R:
matriz[c].append(int(input(f'Digite um valor para[{c}, {i}]: ')))
print('-' * 30)
for d in R:
print('(', end=' ')
for j in r:
print(f'[{matriz[d][j]:^5}]', end=' ')
print(')')
|
code
|
"""Sources:
dataset: https://archive.ics.uci.edu/ml/datasets/Statlog+%28German+Credit+Data%29
"""
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.compose import ColumnTransformer
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from backend.util.static import PATHS, dataset, params
from backend.util.helper_functions import get_ct_feature_names
from backend.util.util import clean_folder
import plotly.express as px
import plotly.offline as py
german_credit_dataset = dataset[params["dataset"]]
def initialize():
"""This is for the first initialization (splitting dataset and setting paths / folders) """
# clean preprocessed and output folder
clean_folder(folder=PATHS["02_data_preprocessed"])
clean_folder(folder=PATHS["03_data_outputs"])
clean_folder(folder=PATHS["04_trained_models"])
# load the data
df_credit = load_data()
# Small preparation
df_credit = data_preparation(df_credit)
# Encode into lime format
df_credit, categorical_encoding, categorical_encoding_label = encoding(df_credit)
# Split the data into training and testing
create_training_split(df_credit)
def load_data(type="raw"):
"""Loads the data, type can be raw, training, testing."""
if type == "raw":
df_credit = pd.read_excel(PATHS["01_data_raw"] + "credit_risk.xls", index_col=0)
return df_credit
elif type == "training":
X_train = pd.read_csv(PATHS["02_data_preprocessed"] + "X_train.csv", index_col=0)
y_train = pd.read_csv(PATHS["02_data_preprocessed"] + "y_train.csv", index_col=0)
return X_train, y_train
elif type == "testing":
X_test = pd.read_csv(PATHS["02_data_preprocessed"] + "X_test.csv", index_col=0)
y_test = pd.read_csv(PATHS["02_data_preprocessed"] + "y_test.csv", index_col=0)
return X_test, y_test
else:
raise AssertionError("Error in if-else statement")
def explore_data(df_credit):
"""Explore the data."""
print("Shape of the data")
print(df_credit.info())
#print("Looking unique values")
#print(df_credit.nunique())
print("Header")
print(df_credit.head())
# Prints unique data values
# print("Checking account : ", df_credit['Checking account'].unique())
# print("Credit history : ", df_credit['Credit history'].unique())
# print("Saving accounts : ", df_credit['Saving accounts'].unique())
# print("Length of current employment : ", df_credit['Length of current employment'].unique())
# print("Purpose : ", df_credit.Purpose.unique())
# print("Sex : ", df_credit['Sex'].unique())
# print("Marital status : ", df_credit['Marital status'].unique())
# print("Other debtors / guarantors : ", df_credit['Other debtors / guarantors'].unique())
# print("Property ", df_credit['Property'].unique())
# print("Other installment plans : ", df_credit['Other installment plans'].unique())
# print("Housing : ", df_credit.Housing.unique())
# print("Job : ", df_credit.Job.unique())
# print("Telephone : ", df_credit.Telephone.unique())
# print("Foreign Worker : ", df_credit['Foreign Worker'].unique())
# print("Risk : ", df_credit['Risk'].unique())
def create_training_split(df_credit):
"""Creates the train, val, and test split."""
y = df_credit["Risk"] # .map({"bad": 0, "good": 1})
X = df_credit.drop("Risk", axis=1)
# Splitting X and y into train and test version
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42) # test size of 100
# Save train and test set
pd.DataFrame(X_train).to_csv(PATHS["02_data_preprocessed"] + "X_train.csv")
pd.DataFrame(X_test).to_csv(PATHS["02_data_preprocessed"] + "X_test.csv")
pd.DataFrame(y_train).to_csv(PATHS["02_data_preprocessed"] + "y_train.csv")
pd.DataFrame(y_test).to_csv(PATHS["02_data_preprocessed"] + "y_test.csv")
return X_train, X_test, y_train, y_test
def data_preparation(df_credit):
"""Small dataset fixes"""
# df_credit.loc[(df_credit.Job == 0), 'Job'] = "unskilled and non-resident"
# df_credit.loc[(df_credit.Job == 1), 'Job'] = "unskilled and resident"
# df_credit.loc[(df_credit.Job == 2), 'Job'] = "skilled"
# df_credit.loc[(df_credit.Job == 3), 'Job'] = "highly skilled"
# df_credit["Saving accounts"] = df_credit["Saving accounts"].astype(str)
# df_credit["Checking account"] = df_credit["Checking account"].astype(str)
# interval = (18, 25, 35, 60, 120)
# categories = ['Student', 'Young', 'Adult', 'Senior']
# df_credit["Age_cat"] = pd.cut(df_credit.Age, interval, labels=categories)
# df_credit['Credit amount'] = np.log(df_credit['Credit amount'])
return df_credit
def analyze_dataset():
"""Analyze the datasest and give back which columns contains which type of features"""
df_credit = load_data()
categorical_features_names = [col for col in df_credit.columns if df_credit[col].dtype == 'object']
categorical_features_indices = german_credit_dataset["categorical_features_indices"]
feature_names = df_credit.columns.tolist()
return categorical_features_names, categorical_features_indices, feature_names
def encoding(df_credit):
"""Preprocessing: Encodes the categorical labels into the LIME format. This format should not be used for
training since it is not one-hot-encoded (high multicollinearity)"""
categorical_encoding = {}
for col in german_credit_dataset["categorical_features_indices"]:
label_encoder = preprocessing.LabelEncoder()
df_credit.iloc[:, col] = label_encoder.fit_transform(df_credit.iloc[:, col])
categorical_encoding[col] = label_encoder.classes_
categorical_encoding_label = {}
for col in german_credit_dataset["label_indices"]:
label_encoder = preprocessing.LabelEncoder()
df_credit.iloc[:, col] = label_encoder.fit_transform(df_credit.iloc[:, col])
categorical_encoding_label = label_encoder.classes_
return df_credit, categorical_encoding, categorical_encoding_label
def load_encoded_data():
"""Load lime-encoded training and testing data and return one-hot-encoded data."""
X_train, y_train = load_data(type="training")
X_test, y_test = load_data(type="testing")
# one-hot-encode the data
X_train_encoded, encoder, columns, encoder = build_and_fit_one_hot_encoder(X_train)
X_test_encoded = pd.DataFrame(encoder.transform(X_test), columns=columns)
return X_train_encoded, y_train, X_test_encoded, y_test, encoder
def build_and_fit_one_hot_encoder(X_train):
"""Returns a one hot encoder and an encoded dataset."""
numeric_features = german_credit_dataset["num_features"]
numeric_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy='median')),
('scaler', StandardScaler())])
categorical_features = german_credit_dataset["cat_features"]
categorical_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy='constant')),
('onehot', OneHotEncoder(handle_unknown='error', drop="first"))])
encoder = ColumnTransformer(
transformers=[
('num', numeric_transformer, numeric_features),
('cat', categorical_transformer, categorical_features)],
remainder="passthrough")
encoder.fit(X_train)
X_train_encoded = encoder.transform(X_train)
columns = get_ct_feature_names(encoder)
X_train_encoded = pd.DataFrame(X_train_encoded, columns=columns)
return X_train_encoded, encoder, columns, encoder
def one_hot_encoder_old(df_credit, nan_as_category=False):
"""OLD """
original_columns = list(df_credit.columns)
categorical_columns = [col for col in df_credit.columns if df_credit[col].dtype == 'object']
df_credit = pd.get_dummies(df_credit, columns=categorical_columns, dummy_na=nan_as_category, drop_first=True)
new_columns = [c for c in df_credit.columns if c not in original_columns]
return df_credit, new_columns
def data_exploration(df_credit):
"""Data exploration with seaborn"""
ax = sns.countplot(x="Risk",hue="Risk", data=df_credit)
plt.show()
# ax = sns.countplot(data=df_credit, x="Sex", hue="Risk")
# plt.show()
# ax = sns.histplot(data=df_credit, x="Credit amount", hue="Risk", element="step")
# plt.show()
# ax = sns.histplot(data=df_credit, x="Age", hue="Risk", element="step")
# plt.show()
# ax = sns.histplot(data=df_credit, x="Duration", hue="Risk", element="step")
# plt.show()
# ax = sns.countplot(data=df_credit, x="Purpose", hue="Risk")
# plt.show()
# ax = sns.countplot(data=df_credit, x="Saving accounts", hue="Risk")
# plt.show()
#ax = sns.countplot(data=df_credit, x="Checking account", hue="Risk")
#plt.show()
# ax = sns.pairplot(data=df_credit, hue="Risk", kind="kde")
# ax.savefig("pairplot_all.png")
# plt.show() # Takes a while to plot
# plt.figure(figsize=(14, 12))
# sns.heatmap(df_credit.astype(float).corr(),linewidths=0.1,vmax=1.0,
# square=True, linecolor='white', annot=True)
# plt.show()
# sns.countplot(x="Housing", hue="Risk", data=df_credit)
#plt.show()
|
code
|
v = int(input())
i = 0
while i < v:
n, m = input().split(" ")
n = int(n)
m = int(m)
tam = n ** m
tam = str(tam)
tam = list(tam)
print(len(tam))
i = i + 1
|
code
|
import maya.cmds as cmds
import random
import math
import imp
v = imp.load_source('v', '/lhome/kristina/Documents/code/maya/boids/vector_class.py') # maya python 2.7 weirdness
width = 100
height = 100
depth = 100 # was 150
class Particle(v.vec3):
'''
Class defining a single particle.
'''
def __init__(self, name):
self.name = name
super(v.vec3, self).__init__() # Complies with Python 2.7 conventions in Maya.
# Python 3+ is super().__init__(0.0, 0.0, 0.0)
self.position = v.vec3(random.uniform(0, width), random.uniform(0, height), random.uniform(0, depth))
self.velocity = v.vec3(math.cos(random.uniform(0.4, 1)), math.sin(random.uniform(0.4, 1)), math.tan(random.uniform(0.4, 1)))
self.acceleration = v.vec3(0.0, 0.0, 0.0)
self.size = 1
self.max_steering_speed = 0.4 * 6
self.max_steering_force = 0.8 * 2 # was 6
self.desired_separation = math.pow(self.size, 2) + 6 # was + 3
self.neighbour_distance = width/2
self.add_geometry()
# self.point_boid()
self.add_shader()
def __repr__(self):
return self.name
def add_shader(self):
'''
Create a random coloured shader.
Apply shader to object geometry.
'''
name_shader = 'aiStandardSurface' + self.name
red = random.uniform(0.0, 0.1)
green = random.uniform(0.0, 1.0)
blue = random.uniform(0.3, 1.0)
cmds.sets(renderable=True, noSurfaceShader=True, empty=True, name=name_shader+'SG')
cmds.shadingNode('aiStandardSurface', asShader=True, name=name_shader)
cmds.setAttr(name_shader+'.baseColor', red, green, blue, type='double3')
cmds.setAttr(name_shader+'.base', 0.85)
cmds.setAttr(name_shader+'.specular', 1)
cmds.setAttr(name_shader+'.emission', 1)
cmds.setAttr(name_shader+'.emissionColor', red, green, blue, type='double3')
cmds.setAttr(name_shader+'.specularColor', red, green, blue, type='double3')
cmds.connectAttr(name_shader+'.outColor', name_shader+'SG.surfaceShader')
cmds.sets(self.name, edit=True, forceElement=name_shader+'SG')
def add_geometry(self):
'''
Create boid geometry.
'''
# cmds.polyPlatonicSolid(n=self.name, solidType=0) # dodecahedron
#cmds.polyCone(n=self.name, sx=24, sy=1, sz=0, ax=[0, -1, 0], rcp=0, cuv=3, ch=1, radius=self.size/2)
cmds.sphere(n=self.name, radius=self.size)
# cmds.polyCube(n=self.name)
def set_key(self, frame):
'''
Set keyframe for boid at frame number.
'''
cmds.select(self.name)
cmds.setKeyframe(self.name, t=frame)
def point_boid(self):
'''
Point boid in the direction it is travelling in Maya scene.
'''
cmds.select(self.name)
degrees_tuple = self.velocity.cosine_direction()
cmds.rotate(degrees_tuple[0], degrees_tuple[1], degrees_tuple[2], absolute=True, componentSpace=True)
def move_boid(self):
'''
Move boid in Maya scene.
'''
cmds.select(self.name)
cmds.move(self.position.x, self.position.y, self.position.z, relative=True)
def move_boid_absolute(self):
'''
Move boid in Maya scene.
'''
cmds.select(self.name)
cmds.move(self.position.x, self.position.y, self.position.z, absolute=True)
def update(self):
'''
Update velocity, position and lifespan for this particle.
'''
self.velocity = self.velocity + self.acceleration
self.position = self.position + self.velocity
self.acceleration = self.acceleration * 0
self.move_boid()
self.point_boid()
def apply_force(self, force):
'''
Add force vector to acceleration vector
@param {float} force
'''
self.acceleration = self.acceleration + force
def flock(self, others):
'''
Apply flocking behaviours.
'''
separation_force = self.separate(others)
separation_force = separation_force * 0.5
self.apply_force(separation_force)
alignment_force = self.align(others)
alignment_force = alignment_force * 0.5
self.apply_force(alignment_force)
cohesion_force = self.cohesion(others)
cohesion_force = cohesion_force * 0.5
self.apply_force(cohesion_force)
def seek(self, target):
'''
Steer particle towards target.
Called by cohesion().
'''
desired = target - self.position # point from position to target
desired = desired.unit_vector()
desired = desired * self.max_steering_speed
steer = desired - self.velocity
steer.limit(self.max_steering_force)
return steer
def separate(self, others):
'''
Separate self from others.
Separation is the average of all the vectors pointing away from any close others.
'''
sum = v.vec3(0, 0, 0)
count = 0
steer = self.velocity
for other in others:
d = self.position.distance(other.position)
if ((d > 0) and (d < self.desired_separation)):
# calculate vector pointing away from other
difference = self.position - other.position
difference = difference.unit_vector()
difference = difference / d # weight by distance. More flee from closer things.
sum = sum + difference # average of all vectors pointing away from close particles.
count += 1
if count > 0:
sum = sum / count
sum = sum.unit_vector()
sum = sum * self.max_steering_speed # go this way!
steer = sum - self.velocity # steering = desired - velocity
steer.limit(self.max_steering_force)
return steer
def align(self, others):
'''
Align self with others.
'''
sum = v.vec3(0, 0, 0)
count = 0
for other in others:
d = self.position.distance(other.position)
if ((d > 0) and (d < self.neighbour_distance)):
sum = sum + other.velocity
count += 1
if count > 0:
sum = sum / count
sum = sum.unit_vector()
sum = sum * self.max_steering_speed # go this way!
steer = sum - self.velocity # steering = desired - velocity
steer.limit(self.max_steering_force)
return steer
else:
return v.vec3(0, 0, 0) # if no close boids then steering force is zero
def cohesion(self, others):
'''
Cohesion of self with others.
'''
sum = v.vec3(0, 0, 0)
count = 0
for other in others:
d = self.position.distance(other.position)
if ((d > 0) and (d < self.neighbour_distance)):
sum = sum + other.position # sum location of others
count += 1
if count > 0:
sum = sum / count
return self.seek(sum)
else:
return v.vec3(0, 0, 0) # if no close boids then cohesion force is zero
def borders(self):
'''
Move particle to wrap around borders of drawing area.
'''
if self.position.x < -self.desired_separation:
self.position.x = width + self.desired_separation
if self.position.y < -self.desired_separation:
self.position.y = height + self.desired_separation
if self.position.z < -self.desired_separation:
self.position.z = depth + self.desired_separation
if self.position.x > width + self.desired_separation:
self.position.x = -self.desired_separation
if self.position.y > height + self.desired_separation:
self.position.y = -self.desired_separation
if self.position.z > depth + self.desired_separation:
self.position.z = -self.desired_separation
self.move_boid_absolute()
self.point_boid()
def borders1(self):
'''
Move particle stay within borders of drawing area.
Not used.
'''
if self.position.x > width or self.position.x < 0:
self.velocity.x = self.velocity.x * -1
if self.position.y > height or self.position.y < 0:
self.velocity.y = self.velocity.y * -1
if self.position.z > depth or self.position.z < 0:
self.velocity.z = self.velocity.z * -1
self.move_boid_absolute()
self.point_boid()
# initialise particle system
boids = []
for a in range(2400):
name = 'cube' + str(a)
obj = Particle(name)
boids.append(obj)
FRAMES = 420
frame = 1
while frame < FRAMES:
print('frame = ', frame)
for boid in boids:
boid.borders()
boid.flock(boids)
boid.update()
boid.set_key(frame)
frame += 1
|
code
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 27 11:18:36 2017
@author: roger
"""
import numpy as np
import itertools as itt
import matplotlib.pyplot as plt
from numpy import random as rand
def proj_basis(d,D): #Projects the basis of a D-dim space to a d-dim space
W=rand.normal(0,1/d,(d,D)) #Generate a random matrix to project D-dim vectors to d-dim space
basis=np.identity(D) #Generate the basis of a D-dim space
proj_vect=np.dot(W,basis) #Project the basis
proj_vect[0]=proj_vect[0]-np.mean(proj_vect[0]) #center first component
proj_vect[1]=proj_vect[1]-np.mean(proj_vect[1]) #center second component
std_dev=np.sqrt(np.var(proj_vect[0,])) #compute the std dev of the first component
proj_vect=proj_vect/std_dev #rescale by first component
return proj_vect
d=2
i=0
rng=[10,50,100,150,200,500,1000,10000]
for D in rng: #Plot the proj basis for a rng of dimensions D into a d-dim space
i=i+1
proj_vect=proj_basis(d,D)
rnd_vect_plane=rand.normal(0,1,(2,D)) #generate random normals
plt.subplot(4,2,i) #more than one plot
plt.scatter(proj_vect[0],proj_vect[1])
plt.scatter(rnd_vect_plane[0],rnd_vect_plane[1],color="red")
plt.title("N=%d"%D) #change title
#hypercube
def proj_hypercube(d,D):
vmax=[1]*D
vmin=[-1]*D
hypercube=np.transpose(np.asarray(list(itt.product(*zip(vmin,vmax))))) #generates the vertices
W=rand.normal(0,1/d,(d,D)) #Generates the projection matrix
proj_hyp_cube=np.dot(W,hypercube) #Projects
proj_hyp_cube[0]=proj_hyp_cube[0]-np.mean(proj_hyp_cube[0])
proj_hyp_cube[1]=proj_hyp_cube[1]-np.mean(proj_hyp_cube[1])
std_dev=np.sqrt(np.var(proj_hyp_cube[1,]))
proj_hyp_cube=proj_hyp_cube/std_dev
return proj_hyp_cube
d=2
rng=[2,3,4,5,6,10]
i=0
for D in rng: #projects the hypercubes from different dimensions to a 2-dim subspace
i=i+1
proj_hyp_cube=proj_hypercube(d,D)
plt.subplot(3,2,i) #more than one plot
plt.scatter(proj_hyp_cube[0],proj_hyp_cube[1])
plt.title("D=%d"%D) #change title
|
code
|
import sublime, sublime_plugin
class keymapperCommand(sublime_plugin.TextCommand,sublime_plugin.WindowCommand):
"""Key Mapper, sadly you still have to define all the keymaps in your
.sublime-keymap just point them all here that you want to be able to map around.
this subclasses both TextCommand and WindowCommand so that it can be used for anything!
in your project file have a new index called "keymapper", inside define keys as you would
in a .sublime-keymap. Note again that these keys that are available are only the ones
that you pointed to run keymapper in your master keymap file.
samples:::
** added to Default ($OS).sublime-keymap
{ "keys": ["ctrl+alt+r"], "command": "keymapper","args":{"key":"ctrl+alt+r"}}
** added to $PROJECT.sublime-project
"keymapper":[
{ "keys": ["ctrl+alt+r"], "command": "subprocess", "args": {"exe": "/home/admalledd/bin/pypy3"}},
]
Note that the .sublime-project sample is using one of my other plugins (sp.py/subprocess),
just because it is all I really use the keymapper for...
"""
def run(self,*args,key=None):
if ((not sublime.active_window().project_file_name()) or
'keymapper' not in sublime.active_window().project_data()):
print("keymapper: no project file found! aborting!")
return False
self.proj_keys = sublime.active_window().project_data()['keymapper']
for keymap in self.proj_keys:
if key in keymap['keys']:
print('keymapper: found keybinding!')
#here is where more complicated logics would go if more crazy is wanted
return sublime.active_window().active_view().run_command(
keymap['command'],keymap['args']
)
|
code
|
# adapted from https://www.kaggle.com/dan3dewey/santa-s-simple-scheduler
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from santaspkg.cost_function import soft_cost_function as cost_function
from santaspkg.refinement import refinement_pass, refine_until_convergence
from santaspkg.dataset import data
def initialize():
NDAYS = 100
NFAMS = 5000
MAX_PPL = 300
MIN_PPL = 125
# The family preference cost parameters
PENALTY_CONST = [0, 50, 50, 100, 200, 200, 300, 300, 400, 500, 500]
PENALTY_PPL = [0, 0, 9, 9, 9, 18, 18, 36, 36, 199+36, 398+36]
# The seed is set.
RANDOM_SEED = 127
np.random.seed(RANDOM_SEED)
def family_cost(ichoice, nppl):
#global PENALTY_CONST, PENALTY_PPL
return PENALTY_CONST[ichoice] + nppl*PENALTY_PPL[ichoice]
# Show the cost-per-person in matrix form
# Note that higher choice values can give lower per-person cost.
# Also created a dictionary mapping the choice-nppl tuple to cost_pp.
cost_pp_dict = {}
print(" Cost per Person")
print("\n nppl= 2 3 4 5 6 7 8\nichoice")
# choices are 0 to 10
for ichoice in range(11):
# numbers of people in a family are 2 to 8:
choice_str = str(ichoice).rjust(5)+":"
for nppl in range(2,9):
cost_pp = family_cost(ichoice, nppl)/nppl
cost_pp_dict[(ichoice,nppl)] = cost_pp
choice_str = choice_str + str(int(cost_pp)).rjust(8)
print(choice_str)
# Can use the cost_pp_dict to go through the ichoice, nppl combinations,
# in order from least to greatest cost-per-person, if that's useful.
# (Didn't use this, put values in by hand below.)
if False:
sorted_cost_pp = sorted(cost_pp_dict.items(), key =
lambda kv:(kv[1], kv[0]))
for ich_nppl in sorted_cost_pp:
ichoice = ich_nppl[0][0]
nppl = ich_nppl[0][1]
print(ichoice,nppl)
# Define the accounting cost function
def accounting_cost(people_count):
# people_count[iday] is an array of the number of people each day,
# valid for iday=1 to NDAYS (iday=0 not used).
total_cost = 0.0
ppl_yester = people_count[NDAYS]
for iday in range(NDAYS,0,-1):
ppl_today = people_count[iday]
ppl_delta = np.abs(ppl_today - ppl_yester)
day_cost = (ppl_today - 125)*(ppl_today**(0.5+ppl_delta/50.0))/400.0
total_cost += day_cost
##print("Day {}: delta = {}, $ {}".format(iday, ppl_delta, int(day_cost)))
# save for tomorrow
ppl_yester = people_count[iday]
print("Total accounting cost: {:.2f}. Ave costs: {:.2f}/day, {:.2f}/family".format(
total_cost,total_cost/NDAYS,total_cost/NFAMS))
return total_cost
# Read in the data
df_family = data
# The "choice_" column headings use a lot of room, change to "ch_"
the_columns = df_family.columns.values
for ich in range(10):
the_columns[ich] = "ch_"+str(ich)
df_family.columns = the_columns
# Total number of people
total_people = sum(df_family['n_people'])
# and average per day:
ave_ppl_day = int(total_people / NDAYS)
print("Total number of people visiting is {}, about {} per day".format(total_people, ave_ppl_day))
# Add an assigned day column, inititalize it to -1 (not assigned)
df_family['assigned_day'] = -1
# As the results of v1-v3 showed, there are certain days that are less subscribed than others.
# (v4) Fill using lowest to higher cost-per-person choices.
# Also fill the lower-demand days above day 60 first...
if True:
sched_method = 'LowHighCpp'
# Reset the assignements and the people_count_m1 array:
df_family['assigned_day'] = -1
people_count_m1 = np.zeros(NDAYS)
print("\nFilling the low-request days above day 60 ...\n")
# First, assign the lower-requested days.
# The low-people days are every 4 out of 7.
# The 6 low regions above day 60 are:
lower_days = [62,63,64,65, 69,70,71,72, 76,77,78,79, 83,84,85,86, 90,91,92,93, 97,98,99,100]
# include the 5 other low regions:
lower_days = lower_days + [20,21,22,23, 27,28,29,30, 34,35,36,37, 41,42,43,44, 48,49,50,51, 55,56,57,58]
# will fill these to the minimum needed, or a bit more:
max_ppl_day = 126+25
# Set the desired cost-per-person limit by specifying:
# i) specific choice to use, and ii) a minimum number of people (inclusive)
ichs = [0,1,2,1,2,3,2,3,3,1,2,4,3,4,5,4,3,6,7,6,7,4,5,6,8,7,4,8,5,6,7,8,6,7,8,8,9,9]
nppl_mins = [0,4,7,3,4,7,3,6,4,0,0,7,3,6,5,4,0,6,7,5,7,3,3,4,7,5,0,6,0,3,3,4,0,0,3,0,7,0]
for icost in range(len(ichs)):
ich = ichs[icost]
ich_str = 'ch_'+str(ich)
nppl_min = nppl_mins[icost]
print("Doing ",ich_str," nppl >=",nppl_min)
#
# Go though the families and assign ones that meet the criteria
for ifam in df_family.index:
day_ich = df_family.loc[ifam,ich_str]
nppl = df_family.loc[ifam,'n_people']
if ((df_family.loc[ifam,'assigned_day'] < 0) and
(day_ich in lower_days) and (nppl >= nppl_min) and
(people_count_m1[day_ich-1] < max_ppl_day)):
##print(ifam,day_ich,nppl,sum(people_count_m1))
# OK, got one. Assign it:
df_family.loc[ifam,'assigned_day'] = day_ich
# and keep track of the people count:
people_count_m1[day_ich-1] += df_family.loc[ifam,'n_people']
print("\nTotal assigned families = ",sum(df_family['assigned_day'] > 0),
" and people =",sum(people_count_m1))
print("\nFilling all the rest of the days ...\n")
# will fill the other days to a maximum amount, with a break above
max_ppl_day = 220
max_ppl_above = 170
lower_days = [62,63,64,65, 69,70,71,72, 76,77,78,79, 83,84,85,86, 90,91,92,93, 97,98,99,100]
# Set the desired cost-per-person limit by specifying:
# i) specific choice to use, and ii) a minimum number of people (inclusive)
# These look like enough to get 125 in each of the low
ichs = [0,1,2,1,2,3,2,3,3,1,2,4,3,4,5,4,3,6,7,6,7,4,5,6,8,7,4,8,5,6,7,8,6,7,8] #,8,9,9]
nppl_mins = [0,4,7,3,4,7,3,6,4,0,0,7,3,6,5,4,0,6,7,5,7,3,3,4,7,5,0,6,0,3,3,4,0,0,3] #,0,7,0]
for icost in range(len(ichs)):
ich = ichs[icost]
ich_str = 'ch_'+str(ich)
nppl_min = nppl_mins[icost]
print("Doing ",ich_str," nppl >=",nppl_min)
#
# Go though the families and assign ones that meet the criteria
for ifam in df_family.index:
day_ich = df_family.loc[ifam,ich_str]
nppl = df_family.loc[ifam,'n_people']
if day_ich < 59:
ppl_limit = max_ppl_day
else:
ppl_limit = max_ppl_above
if ((df_family.loc[ifam,'assigned_day'] < 0) and
not(day_ich in lower_days) and (nppl >= nppl_min) and
(people_count_m1[day_ich-1] < ppl_limit)):
##print(ifam,day_ich,nppl,sum(people_count_m1))
# OK, got one. Assign it:
df_family.loc[ifam,'assigned_day'] = day_ich
# and keep track of the people count:
people_count_m1[day_ich-1] += df_family.loc[ifam,'n_people']
print("\nTotal assigned families = ",sum(df_family['assigned_day'] > 0),
" and people =",sum(people_count_m1))
# Finally, the remaining families don't have any of their choices still available,
# increase the people limits to get them in
print("\nPut these last few anywhere ...\n")
max_ppl_day = 260
max_ppl_above = 210
# Set the desired cost-per-person limit by specifying:
# i) specific choice to use, and ii) a minimum number of people (inclusive)
# These look like enough to get 125 in each of the low
ichs = [0,1,2,1,2,3,2,3,3,1,2,4,3,4,5,4,3,6,7,6,7,4,5,6,8,7,4,8,5,6,7,8,6,7,8,8,9,9]
nppl_mins = [0,4,7,3,4,7,3,6,4,0,0,7,3,6,5,4,0,6,7,5,7,3,3,4,7,5,0,6,0,3,3,4,0,0,3,0,7,0]
for icost in range(len(ichs)):
ich = ichs[icost]
ich_str = 'ch_'+str(ich)
nppl_min = nppl_mins[icost]
print("Doing ",ich_str," nppl >=",nppl_min)
#
# Go though the families and assign ones that meet the criteria
for ifam in df_family.index:
day_ich = df_family.loc[ifam,ich_str]
nppl = df_family.loc[ifam,'n_people']
if day_ich < 59:
ppl_limit = max_ppl_day
else:
ppl_limit = max_ppl_above
if ((df_family.loc[ifam,'assigned_day'] < 0) and
(nppl >= nppl_min) and
(people_count_m1[day_ich-1] < ppl_limit)):
##print(ifam,day_ich,nppl,sum(people_count_m1))
# OK, got one. Assign it:
df_family.loc[ifam,'assigned_day'] = day_ich
# and keep track of the people count:
people_count_m1[day_ich-1] += df_family.loc[ifam,'n_people']
print("\nTotal assigned families = ",sum(df_family['assigned_day'] > 0),
" and people =",sum(people_count_m1))
# Done?
if (sum(df_family['assigned_day'] > 0) >= 5000):
break
# Check for any not-assigned families
if df_family['assigned_day'].min() < 0:
print("Ooops! Some families did not get days assigned!")
print("Number assigned = {}".format(sum(df_family['assigned_day'] > 0)))
halt_on_this_routine()
new = df_family['assigned_day'].tolist()
return new, df_family
if __name__ == "__main__":
new, df_family = initialize()
# Score it
score = cost_function(new)
print(f'Score: {score}')
# Write out the submission file:
df_family['family_id'] = df_family.index
df_family[['family_id','assigned_day']].to_csv(f"./santa-workshop-tour-2019/submission_{score}.csv", index=False)
|
code
|
#! python3
# _*_ coding: utf-8 _*_
from colorama import init, Fore
init(autoreset=False)
class Colored:
# 前景色:红色 背景色:默认
def red(self, s):
return Fore.LIGHTRED_EX + s + Fore.RESET
# 前景色:绿色 背景色:默认
def green(self, s):
return Fore.LIGHTGREEN_EX + s + Fore.RESET
# 前景色:黄色 背景色:默认
def yellow(self, s):
return Fore.LIGHTYELLOW_EX + s + Fore.RESET
# 前景色:白色 背景色:默认
def white(self,s):
return Fore.LIGHTWHITE_EX + s + Fore.RESET
# 前景色:蓝色 背景色:默认
def blue(self,s):
return Fore.LIGHTBLUE_EX + s + Fore.RESET
# 前景色:青色 背景色:默认
def cyan(self, s):
return Fore.LIGHTCYAN_EX + s + Fore.RESET
# 前景色:洋红色 背景色:默认
def magenta(self, s):
return Fore.LIGHTMAGENTA_EX + s + Fore.RESET
|
code
|
from textblob import TextBlob
lst=[]
with open('Adverb.txt','r') as f:
for i in f.readlines():
word=i.strip('\n')
text=TextBlob(word)
print(word,text.sentiment.polarity)
|
code
|
#!/usr/bin/env python
import sys
list1=[]
for line in sys.stdin:
line=line.strip()
words=line.split("\n")
list1.append(words[0])
for x in xrange(len(list1)):
print list1[x]
|
code
|
# Lucas de Jesus Silva - 20731356 - atividade 2 PLP - Estruturado
#=================================================================================================
# método para determinar vencedor do levantamento de pesos
def levantamentoPeso (x,y):
vencedor = ""
if x["peso"] > y["peso"]:
vencedor = x["nome"]
else:
vencedor = y["nome"]
print("Vencedor do levantamento de pesos: "+vencedor)
#=================================================================================================
# Método para determinar o vencedor do Judo:
def judo (x,y):
vencedor = ""
if x["ippon"] == True :
return "Vencedor do judo: " + x["nome"]
if y["ippon"] == True :
return "Vencedor do judo: " + y["nome"]
if x["wazari"] == y["wazari"] :
if x["yuko"] > y["yuko"] :
vencedor = x["nome"]
else:
vencedor = y["nome"]
if x["wazari"] > y["wazari"]:
vencedor = judocaX["nome"]
else:
vencedor = y["nome"]
return "Vencedor do judô: " + vencedor
#=================================================================================================
# O calculo do vencedor da modalidade de arremesso de pesos foi separado nos 3 proximos metodos
# encontra maior arremesso
def maiorArremesso(a,b,c):
maior = a;
if b > maior :
maior = b
if c > maior :
maior = c
return maior
# encontra segundo maior arremesso
def segundoMaior(a,b,c):
if a > b :
if c > a :
return a
if b > c :
return b
else :
if c > b :
return b
if a > c :
return a
return c
# determina vencedor do arremesso de pesos
def arremessoPesos (x, y):
vencedor = ""
xMaior = maiorArremesso(x["arr1"],x["arr2"],x["arr3"])
yMaior = maiorArremesso(y["arr1"],y["arr2"],y["arr3"])
xSegundo = segundoMaior(x["arr1"],x["arr2"],x["arr3"])
ySegundo = segundoMaior(y["arr1"],y["arr2"],y["arr3"])
if xMaior == yMaior:
if xSegundo > ySegundo:
vencedor = x["nome"]
else:
vencedor = y["nome"]
else:
if xMaior > yMaior:
vencedor = x["nome"]
else:
vencedor = y["nome"]
return "Vencedor do arremesso de pesos: "+vencedor
#=================================================================================================
# O calculo da vencedora da modalidade de ginastica artistica foi separado nos 4 proximos metodos
# encontra menor nota da ginasta para descarte
def menorNota(a,b,c,d,e):
menor = a
if b < menor :
menor = b
if c < menor :
menor = c
if d < menor :
menor = d
if e < menor :
menor = e
return menor;
# soma notas da ginasta para calculo da media
def somaNotas(a,b,c,d,e):
return a + b + c + d + e
# calcula media de ginasta, descartando menor nota
def mediaGinasta(a,b,c,d,e):
return (somaNotas(a,b,c,d,e) - menorNota(a,b,c,d,e))/4
# determina vencedora da ginastica artistica
def ginasticaArtistica(x,y):
vencedora = ""
mediaX = mediaGinasta(x["n1"],x["n2"],x["n3"],x["n4"],x["n5"])
mediaY = mediaGinasta(y["n1"],y["n2"],y["n3"],y["n4"],y["n5"])
if mediaX > mediaY :
vencedora = x["nome"]
else:
vencedora = y["nome"]
return "Vencedora da ginástica artística: "+vencedora
#=================================================================================================
# Levantamento de pesos
levantadorX = {"nome":"João","peso":310}
levantadorY = {"nome":"Carlos","peso":320}
levantamentoPeso(levantadorX,levantadorY)
#=================================================================================================
# Judo
judocaX = {"nome":"Thiago","ippon":False,"wazari":6,"yuko":10}
judocaY = {"nome":"Lucas","ippon":False,"wazari":5,"yuko":14}
print(judo(judocaX,judocaY))
#=================================================================================================
# Arremesso de pesos
arremessadorX = {"nome":"José","arr1":20.53,"arr2":21.9,"arr3":21.5}
arremessadorY = {"nome":"Luiz","arr1":20.78,"arr2":22.6,"arr3":22.7}
print(arremessoPesos(arremessadorX,arremessadorY))
#=================================================================================================
# Ginastica artistica
ginastaX = {"nome":"Beatriz","n1":9.5,"n2":9.0,"n3":9.1,"n4":8.75,"n5":8.8}
ginastaY = {"nome":"Lilian","n1":9.3,"n2":8.5,"n3":9.2,"n4":8.9,"n5":9.4}
print(ginasticaArtistica(ginastaX,ginastaY))
|
code
|
# Import
import pygame
# Initialize game engine
pygame.init()
# Open window
window_size = (640, 480)
screen = pygame.display.set_mode(window_size)
pygame.display.set_caption("The Quest")
WHITE = (255, 255, 255)
RED = (255, 0, 0)
done = False
clock = pygame.time.Clock()
# MAIN GAME LOOP
while not done:
# EVENTS
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
# GAME LOGIC
# WIPE SCREEN
screen.fill(WHITE)
# DRAWING
offset = 0
for x_offset in range(30,300,30):
pygame.draw.line(screen,RED,[x_offset,100],[x_offset-10,90],2)
pygame.draw.line(screen,RED,[x_offset,90],[x_offset-10,100],2)
font = pygame.font.SysFont('Calibri',25,True,False)
text = font.render("Anal Seepage",True,RED)
screen.blit(text,[250,250])
# UPDATE SCREEN
pygame.display.flip()
clock.tick(60)
pygame.quit()
|
code
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/7/29 18:56
# @Site :
# @File : qianxu_144.py
# @Software: PyCharm
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
#前序
class Solution(object):
def preorderTraversal(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
lst = []
s = []
s.append(root)
if not root:
return lst
while s:
temp = s.pop()
lst.append(temp.val)
if temp.right:
s.append(temp.right)
if temp.left:
s.append(temp.left)
return lst
class Solution1:
def preorderTraversal(self, root):
# 0 表示当前遍历到它,1 表示压入栈
# 刚开始是 1 ,不要写成 0 了
stack = [(1,root)]
s = []
while stack:
command, node = stack.pop()
if node is None:
# 不能写 return ,这不是递归
continue
if command == 0:
s.append(node.val)
else:
# 此时 command == 1 的时候,表示递归遍历到的
# 注意:写的时候倒过来写
#注意和后序顺序不一样,stack需要pop
stack.append((1, node.right))
stack.append((1, node.left))
stack.append((0, node))
return s
#递归
class Solution1:
def inorderTraversal(self, root):
res=[]
if root:
res+=self.inorderTraversal(root.left)
res.append(root.val)
res+=self.inorderTraversal(root.right)
return res
|
code
|
import pandas as pd
import spacy
from spacy.kb import KnowledgeBase
def entities_info(path):
entity_info = dict()
with open(path, 'r', encoding='utf8') as infile:
for line in infile:
row = line.split('\t')
entity_info[row[0]] = dict()
entity_info[row[0]]['name'] = row[1]
entity_info[row[0]]['description'] = row[2]
return entity_info
def error_analysis():
nlp = spacy.load('../resources/nen_nlp')
kb = KnowledgeBase(vocab=nlp.vocab, entity_vector_length=96)
kb.load_bulk('../resources/kb_probs')
predictions = pd.read_csv("../data/model_data/predictions.tsv", sep='\t')
entity_info = entities_info("../data/model_data/entities.tsv")
i = 0
for prediction, label, org, sent in zip(predictions['el_system'], predictions['label'], predictions['org'], predictions['sentence']):
label = str(label)
if prediction != label and prediction != 'NIL':
i += 1
print()
print(i, org)
print([c.entity_ for c in kb.get_candidates(org)])
print("Prediction:", entity_info[prediction]['name'], prediction)
print(entity_info[prediction]['description'])
print("Label:", entity_info[label]['name'], label)
print(entity_info[label]['description'])
print()
print("Sentence: ", sent)
print()
print(i, "errors.")
def main():
error_analysis()
if __name__ == "__main__":
main()
|
code
|
def process_text(filename):
"""Makes histogram of text"""
d = dict()
fp = open(filename, 'r')
for line in fp:
for word in line.split():
while not (word == '' or word[0].isalpha() or word[0].isdigit()):
word = word[1:]
while not (word == '' or word[-1].isalpha() or word[-1].isdigit()):
word = word[0:-1]
word = word.lower()
if word != '':
d[word] = d.get(word, 0) + 1
return d
def inverse_dict(d):
"""Reverse keys and values of dictionary"""
inverse = dict()
for key in d:
val = d[key]
if val not in inverse:
inverse[val] = [key]
else:
inverse[val].append(key)
return inverse
def subtract_common(freq, freq_word):
"""subtrace most common 100 words from inversed dictionary"""
common_freq = ['the', 'be', 'to', 'of', 'and', 'a', 'in', 'that', 'have', 'i',
'it', 'for', 'not', 'on', 'with', 'he', 'as', 'you', 'do', 'at'
'this', 'but', 'his', 'by', 'from', 'they', 'we', 'say', 'her', 'she',
'or', 'an', 'will', 'my', 'one', 'all', 'would', 'there', 'their', 'what',
'so', 'up', 'out', 'if', 'about', 'who', 'get', 'which', 'go', 'me',
'when', 'make', 'can', 'like', 'time', 'no', 'just', 'him', 'know', 'take',
'people', 'into', 'year', 'your', 'good', 'some', 'could', 'them', 'see', 'other',
'than', 'now', 'look', 'only', 'come', 'its', 'over', 'think', 'also',
'back', 'after', 'use', 'two', 'how', 'our', 'work', 'first', 'well', 'way',
'even', 'new', 'want', 'because', 'any', 'these', 'give', 'day', 'most', 'us',
'are', 'is', 'have', 'has', 'were', 'was', 'been', 'had']
top10_freq = []
for number in freq:
if freq_word[number][0] not in common_freq:
top10_freq.append(number)
if len(top10_freq) == 10:
break
top10_freq.sort()
top10_freq.reverse()
return top10_freq
stat = process_text('alice_in_wonderland.txt')
freq_word = inverse_dict(stat)
freq = freq_word.keys()
freq.sort()
freq.reverse()
top10_freq = subtract_common(freq, freq_word)
for number in top10_freq:
print (freq_word[number][0], number)
|
code
|
from math import ceil,floor,factorial,gcd,sqrt,log2,cos,sin,tan,acos,asin,atan,degrees,radians,pi,inf
from itertools import accumulate,groupby,permutations,combinations,product,combinations_with_replacement
from collections import deque,defaultdict,Counter
from bisect import bisect_left,bisect_right
from operator import itemgetter
from heapq import heapify,heappop,heappush
from queue import Queue,LifoQueue,PriorityQueue
from copy import deepcopy
from time import time
from functools import reduce
import string
import sys
sys.setrecursionlimit(10 ** 7)
def input() : return sys.stdin.readline().strip()
def INT() : return int(input())
def MAP() : return map(int,input().split())
def MAP1() : return map(lambda x:int(x)-1,input().split())
def LIST() : return list(MAP())
def solve():
N, M, Q = MAP()
A = [LIST() for _ in range(Q)]
ans = 0
for x in combinations_with_replacement(range(1, M+1), N):
score = 0
for a, b, c, d in A:
if x[b-1] - x[a-1] == c:
score += d
ans = max(ans, score)
print(ans)
if __name__ == '__main__':
solve()
|
code
|
size = int(input("Enter no. of items you want to add: "))
array = []
for n in range(size):
m = n + 1
array.insert(n, int(input("Enter item no. %d: " % m)))
max_val = 0
pair = []
for n in range(size-1):
for m in range(n+1, size):
val = array[n] * array[m]
if val > max_val:
max_val = val
m1 = array[n]
m2 = array[m]
elif val == max_val:
sec_pair = []
sec_pair.extend((array[n], array[m]))
pair.extend((m1, m2))
print(pair)
print(sec_pair)
|
code
|
import md5
import base64
import string
b64_str='./0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
final_str=""
def b64_from_24bit(a, b ,c ,d):
global final_str
w = (ord(a)<<16)|(ord(b)<<8)|ord(c)
for i in range(0, d):
final_str+=b64_str[w & 0x3f]
w = w >> 6
m=md5.new('chfT7jp2qc')
m_tem=m.digest()
m=md5.new('c$1$hfT7jp2q'+m_tem[0])
key='c'
length=len(key)
while(length>0):
if(length &1 !=0):
m.update('\0')
print 1
else:
m.update(key[0])
print 2
length>>=1
m_alt=m.digest()
print base64.encodestring(m_alt)
for i in range(0, 1000):
if( i&1 != 0):
m=md5.new('c')
else:
m=md5.new(m_alt)
if(i % 3 != 0):
m.update('hfT7jp2q')
if(i % 7 != 0):
m.update('c')
if(i & 1 !=0):
m.update(m_alt)
else:
m.update('c')
m_alt=m.digest()
print base64.encodestring(m.digest())
b64_from_24bit(m_alt[0],m_alt[6],m_alt[12],4)
b64_from_24bit(m_alt[1],m_alt[7],m_alt[13],4)
b64_from_24bit(m_alt[2],m_alt[8],m_alt[14],4)
b64_from_24bit(m_alt[3],m_alt[9],m_alt[15],4)
b64_from_24bit(m_alt[4],m_alt[10],m_alt[5],4)
b64_from_24bit('0','0',m_alt[11],2)
|
code
|
import os,magic
import re
from genericpath import isdir, isfile
from flask import Flask,send_file,render_template,request
def show_dir(pwd):
files = os.listdir(pwd)
return render_template("index.html",files = files,pwd = pwd)
def send_to_client(pwd):
path = pwd[:-1]
return send_file(path,as_attachment=True)
def file_or_dir(pwd):
if(os.path.isdir(pwd)):
return show_dir(pwd)
else:
return send_to_client(pwd)
app = Flask(__name__)
@app.route('/edit/<path:dummy>')
def editor(dummy):
file_path = '/'+str(dummy)
with open(file_path,'r',encoding='utf-8') as f:
content = f.read()
return render_template('editor.html',path = file_path,content = content)
@app.route('/save',methods=['POST'])
def save():
content = request.form['content']
path = request.form['path']
with open(path,'w') as f:
f.write(content)
return "saved!"
@app.route('/<path:dummy>')
def fallback(dummy):
if str(dummy).startswith('edit'):
return editor(str(dummy))
else:
return file_or_dir('/'+str(dummy)+'/')
@app.route('/')
def index():
html = file_or_dir("/")
return html
if __name__ == '__main__':
app.run(debug=True)
|
code
|
from pyspark.sql import SparkSession
from pyspark.sql.functions import UserDefinedFunction
from pyspark.sql.functions import collect_set, array_contains, col, max, mean, desc, sum
from pyspark.sql.types import ArrayType
import os
os.environ["PYSPARK_PYTHON"] = "/home/pawel/PycharmProjects/HPC/venv/bin/python3.5"
os.environ["PYSPARK_DRIVER_PYTHON"] = "/home/pawel/PycharmProjects/HPC/venv/bin/python3.5"
"""
Process data and build user profile vector with the following characteristics:
1) count of comments, posts (all), original posts, reposts and likes made by user
2) count of friends, groups, followers
3) count of videos, audios, photos, gifts
4) count of "incoming" (made by other users) comments, max and mean "incoming" comments per post
5) count of "incoming" likes, max and mean "incoming" likes per post
6) count of geo tagged posts
7) count of open / closed (e.g. private) groups a user participates in
Medium:
1) count of reposts from subscribed and not-subscribed groups
2) count of deleted users in friends and followers
3) aggregate (e.g. count, max, mean) characteristics for comments and likes (separtely) made by (a) friends
and (b) followers per post
4) aggregate (e.g. count, max, mean) characteristics for comments and likes (separtely) made by (a) friends
and (b) followers per user
5) find emoji (separately, count of: all, negative, positive, others) in (a) user's posts (b) user's comments
"""
class SparkTask:
def __init__(self):
"""
Configuration for Spark:
master: address to Master node or local
path: path to folder with *.parquet files
"""
self.path = "/home/pawel/bd_parquets/"
self.master = "local"
self.app_name = "Spark Task"
self.spark = SparkSession.builder \
.master(self.master) \
.appName(self.app_name) \
.getOrCreate()
def read_parquet_file(self, filename):
return self.spark.read.parquet(self.path + filename)
def task1a(self):
"""1) count of comments, posts (all), original posts, reposts and likes made by user"""
user_wall_likes = self.read_parquet_file("userWallLikes.parquet")
user_wall_posts = self.read_parquet_file("userWallPosts.parquet")
user_wall_comments = self.read_parquet_file("userWallComments.parquet")
likes_count = user_wall_likes \
.groupBy('likerId') \
.count() \
.withColumnRenamed('likerId', 'UserId') \
.withColumnRenamed('count', 'likes')
posts_count = user_wall_posts \
.groupBy('from_id') \
.count() \
.withColumnRenamed('from_id', 'UserId') \
.withColumnRenamed('count', 'posts(all)')
original_posts_count = user_wall_posts \
.filter(user_wall_posts['is_reposted'] == 'false') \
.groupBy('from_id') \
.count() \
.withColumnRenamed('from_id', 'UserId') \
.withColumnRenamed('count', 'original_posts')
reposts_count = user_wall_posts \
.filter(user_wall_posts['is_reposted'] == 'true') \
.groupBy('from_id') \
.count() \
.withColumnRenamed('from_id', 'UserId') \
.withColumnRenamed('count', 'reposts')
comments_cout = user_wall_comments \
.groupBy('from_id') \
.count() \
.withColumnRenamed('from_id', 'UserId') \
.withColumnRenamed('count', 'comments')
final_table = comments_cout \
.join(posts_count, 'UserId') \
.join(original_posts_count, 'UserId') \
.join(reposts_count, 'UserId') \
.join(likes_count, 'UserId')
return final_table
def task2a(self):
"""2) count of friends, groups, followers"""
followers = self.read_parquet_file("followers.parquet")
friends = self.read_parquet_file("friends.parquet")
groupsSubs = self.read_parquet_file("userGroupsSubs.parquet")
friends_count = friends \
.groupBy('profile') \
.count() \
.withColumnRenamed('profile', 'UserId') \
.withColumnRenamed('count', 'friends')
groups_count = groupsSubs \
.groupBy('user') \
.count() \
.withColumnRenamed('user', 'UserId') \
.withColumnRenamed('count', 'groups')
followers_count = followers \
.groupBy('profile') \
.count() \
.withColumnRenamed('profile', 'UserId') \
.withColumnRenamed('count', 'followers')
result_table = friends_count.join(groups_count, 'UserId').join(followers_count, 'UserId')
return result_table
def task3a(self):
"""3) count of videos, audios, photos, gifts"""
friends_profiles = self.read_parquet_file("followerProfiles.parquet")
result_table = friends_profiles \
.filter(friends_profiles.counters.isNotNull()) \
.select(friends_profiles.id.alias("UserId"),
friends_profiles.counters.videos.alias("videos"),
friends_profiles.counters.audios.alias("audios"),
friends_profiles.counters.photos.alias("photos"),
friends_profiles.counters.gifts.alias("gifts"))
return result_table
def task4a(self):
"""4) count of "incoming" (made by other users) comments, max and mean "incoming" comments per post"""
user_wall_comments = self.read_parquet_file("userWallComments.parquet")
comments_by_post_count = user_wall_comments \
.filter(user_wall_comments['from_id'] != user_wall_comments['post_owner_id']) \
.select('post_id', 'post_owner_id') \
.groupBy('post_id') \
.count()
comment_to_user = user_wall_comments \
.filter(user_wall_comments['from_id'] != user_wall_comments['post_owner_id']) \
.select('post_id', 'post_owner_id') \
.dropDuplicates()
result_table = comment_to_user\
.join(comments_by_post_count, "post_id")\
.groupBy("post_owner_id").agg(sum("count"), max("count"), mean("count"))
return result_table
def task5a(self):
"""5) count of "incoming" likes, max and mean "incoming" likes per post"""
userWallLikes = self.read_parquet_file("userWallLikes.parquet")
likes_per_post = userWallLikes \
.filter(userWallLikes['ownerId'] != userWallLikes['likerId']) \
.groupBy('itemId') \
.count()
post_to_user = userWallLikes \
.filter(userWallLikes['ownerId'] != userWallLikes['likerId']) \
.select('itemId', 'ownerId') \
.dropDuplicates()
result_table = post_to_user\
.join(likes_per_post, 'itemId')\
.groupBy('ownerId')\
.agg(sum('count'), max('count'), mean('count'))
return result_table
def task6a(self):
"""6) count of geo tagged posts"""
userWallPosts = self.read_parquet_file("userWallPosts.parquet")
geo_tagged_posts_count = userWallPosts \
.filter(userWallPosts['geo.coordinates'] != 'null') \
.groupBy('owner_id') \
.count() \
.withColumnRenamed('owned_id', 'UserId') \
.withColumnRenamed('count', 'geo_tagged_posts') \
result_table = geo_tagged_posts_count
return result_table
def task7a(self):
"""7) count of open / closed (e.g. private) groups a user participates in"""
groupsProfiles = self.read_parquet_file("groupsProfiles.parquet")
userGroupsSubs = self.read_parquet_file("userGroupsSubs.parquet")
invert_id = UserDefinedFunction(lambda x: -int(x))
user_to_group = userGroupsSubs \
.select("user", invert_id("group")) \
.withColumnRenamed("<lambda>(group)", "group")\
.dropDuplicates()
group_type = groupsProfiles\
.select("id", "is_closed")\
.withColumnRenamed("id", "group")\
.dropDuplicates()
user_to_group_type = user_to_group\
.join(group_type, "group")\
opened_groups = user_to_group_type\
.filter(user_to_group_type['is_closed'] == 0)\
.groupBy("user")\
.count()\
.withColumnRenamed("count", "opened")
closed_groups = user_to_group_type\
.filter(user_to_group_type['is_closed'] > 0)\
.groupBy("user")\
.count()\
.withColumnRenamed("count", "closed")
result_table = opened_groups\
.join(closed_groups, "user", how="full_outer")\
.fillna(0)
return result_table
def task1b(self):
"""1) count of reposts from subscribed and not-subscribed groups"""
userWallPosts = self.read_parquet_file("userWallPosts.parquet")
userGroupsSubs = self.read_parquet_file("userGroupsSubs.parquet")
reposts_t = userWallPosts \
.filter(userWallPosts.is_reposted) \
.select('owner_id', 'repost_info.orig_owner_id')\
.withColumnRenamed("owner_id", "user")
reposts = reposts_t.filter(reposts_t["orig_owner_id"] < 0)
user_to_group_sub = userGroupsSubs\
.select("user", "group")\
.groupBy("user")\
.agg(collect_set("group"))\
.withColumnRenamed("collect_set(group)", "groups")
def contains(id, groups):
if not groups:
return False
if str(id) in groups:
return True
else:
return False
contains_udf = UserDefinedFunction(contains)
temp = reposts.join(user_to_group_sub, "user", how="left_outer")
reposts_from = temp\
.withColumn("from_subscribed", contains_udf(temp.orig_owner_id, temp.groups))
reposts_from_subscribed = reposts_from\
.filter(reposts_from.from_subscribed == 'true')\
.select('user')\
.groupBy('user')\
.count()\
.withColumnRenamed("count", "from_subscribed")
reposts_not_from_subscribed = reposts_from \
.filter(reposts_from['from_subscribed'] == 'false') \
.select('user')\
.groupBy("user")\
.count()\
.withColumnRenamed("count", "not_from_subscribed")
result_table = reposts_from_subscribed\
.join(reposts_not_from_subscribed, 'user', how="full_outer")\
.fillna(0)
return result_table
def task2b(self):
"""2) count of deleted users in friends and followers"""
friends = self.read_parquet_file("friends.parquet")
followers = self.read_parquet_file("followers.parquet")
friendsProfiles = self.read_parquet_file("friendsProfiles.parquet")
followersProfiles = self.read_parquet_file("followerProfiles.parquet")
deleted_friends_profiles = friendsProfiles\
.filter(friendsProfiles.deactivated == "deleted")\
.select("id", "deactivated")\
.withColumnRenamed("id", "follower")
deleted_follower_profiles = followersProfiles\
.filter(followersProfiles.deactivated == "deleted")\
.select("id", "deactivated")\
.withColumnRenamed("id", "follower")
deleted_friends = friends\
.join(deleted_friends_profiles, "follower", how="inner")\
.select('profile', 'deactivated')\
.dropDuplicates()\
.groupBy('profile')\
.count()\
.withColumnRenamed('count', 'deleted_fiends_acc')
deleted_followers = followers\
.join(deleted_follower_profiles, "follower", how="inner")\
.select("profile", "deactivated")\
.dropDuplicates()\
.groupBy("profile")\
.count()\
.withColumnRenamed("count", "deleted_followers_acc")
result_table = deleted_friends\
.join(deleted_followers, "profile", how="full_outer")\
.fillna(0)
return result_table
def task3b(self):
"""3) aggregate (e.g. count, max, mean) characteristics for comments and likes (separtely) made by (a) friends
and (b) followers per post"""
pass
def task4b_friends(self):
"""3) aggregate (e.g. count, max, mean) characteristics for comments and likes (separtely) made by (a) friends
and (b) followers per user"""
friends = self.read_parquet_file("friends.parquet")
# userWallPosts = self.read_parquet_file("userWallPosts.parquet")
userWallComments = self.read_parquet_file("userWallComments.parquet")
userWallLikes = self.read_parquet_file("userWallLikes.parquet")
user_friends = friends\
.groupBy("profile")\
.agg(collect_set("follower"))\
.withColumnRenamed("collect_set(follower)", "friends")\
.select("profile", "friends")
comments = userWallComments.select("post_owner_id", "from_id", "post_id")
def contains(id, groups):
if not groups:
return False
if str(id) in groups:
return True
else:
return False
contains_udf = UserDefinedFunction(contains)
post_comment_to_relation = comments\
.withColumnRenamed("post_owner_id", "profile")\
.join(user_friends, "profile", how="left_outer")\
.withColumn("is_from_friend", contains_udf(col("from_id"), col("friends")))\
.select("profile", "is_from_friend", "post_id")\
.filter(col("is_from_friend") == "true")\
comments_from_friends_per_post = post_comment_to_relation\
.groupBy("post_id")\
.count()
result_table = post_comment_to_relation\
.select("profile", "post_id")\
.join(comments_from_friends_per_post, "post_id")\
.groupBy("profile")\
.agg(max("count"), mean("count"), sum("count"))\
.sort(desc("sum(count)"))
result_table.show()
def task4b_followers(self):
followers = self.read_parquet_file("followers.parquet")
user_followers = followers\
.groupBy("profile")\
.agg(collect_set("follower"))\
.withColumnRenamed("collect_set(follower)", "followers")\
.select("profile", "followers")
def task5b(self):
"""5) find emoji (separately, count of: all, negative, positive, others) in
(a) user's posts (b) user's comments """
pass
if __name__ == "__main__":
spark = SparkTask()
# spark.task1a().show()
# spark.task2a().show()
# spark.task3a().show()
spark.task4a().show()
# spark.task5a().show()
# spark.task6a().show()
# spark.task7a().show()
# spark.task1b().show()
# spark.task2b().show()
# spark.task4b_friends()
# print(res.show())
|
code
|
#Number guessing program
from random import randint
a=(randint(0, 50))
print(a)
inp=int(input(("Enter the value")))
while(1==1):
if(inp>a):
print('Your Guess is above the number please inputcorrectly')
inp=int(input(("continue the game")))
elif(inp<a):
print('Your Guess is below the number please inputcorrectly')
inp=int(input(("continue the game")))
else:
print('congrats Your gusess is correct')
break
|
code
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: 3ngthrust
"""
import os
import gpiozero
import functools
import time
from MCP3002 import MCP3002
def split_into_equal_sublists(a_list, number_of_parts):
""" Splits a list into a list of sublists
Arguments
----------
a_list : list object
List wich will be split into Sublists
number_of_parts : int
Number of sublists which should be created
Returns
-------
seperated_lists : list of lists
List of Sublists of the given list which are as equal sized as possible
"""
start = 0
seperated_lists = []
for i in range(number_of_parts):
end = round((len(a_list) / number_of_parts) * (i + 1))
seperated_lists.append(list(range(start,end)))
start = end
return seperated_lists
def update_station_faktory(num_of_stations, adc):
""" Creates a function to update the station of the player
Arguments
----------
num_of_stations : int
Number of different stations selectable on the player
adc : gpiozero MCO3xxx object
Analog to Digital Converter wich reads the raw slider position
Returns
-------
update_station : function
Function wich will take the current station number and update the
station on the player if necessary
"""
# Create sublists of equal sized percentage ranges for each station
percentage_sublists = split_into_equal_sublists(list(range(0,101)), num_of_stations)
def update_station(current_station, current_volume):
slider_value = int(adc.value * 100)
for i, l in enumerate(percentage_sublists):
if slider_value in l:
new_station_number = percentage_sublists.index(l)
break
if i == (len(percentage_sublists) - 1):
raise Exception("slider_value {} is not between 0 and 100".format(slider_value))
# First element in mpc is 1 not 0
new_station_number += 1
if current_station == new_station_number:
return current_station
else:
os.system('mpc --host=/home/pi/.config/mpd/socket_' + str(current_station) + ' volume 0')
os.system('mpc --host=/home/pi/.config/mpd/socket_' + str(new_station_number) + ' volume ' + str(current_volume))
return new_station_number
return update_station
def update_volume(adc, current_station, current_volume):
""" Updates the volume of the player
Arguments
----------
adc : gpiozero MCO3xxx object
Analog to Digital Converter wich reads the raw volume knob position
"""
new_volume = 100 - int(adc.value * 100)
if current_volume == new_volume:
return current_volume
else:
os.system('mpc --host=/home/pi/.config/mpd/socket_' + str(current_station) + ' volume ' + str(new_volume))
return new_volume
def toggle_mute_factory(led_0):
mute_list = [] # Small Hack: Empty List = False
def toggle_mute():
if not mute_list:
mute_list.append(1)
led_0.toggle()
else:
mute_list.pop()
led_0.toggle()
def mute():
return bool(mute_list)
return toggle_mute, mute
def reload_factory(led_1, num_of_stations):
def reload():
led_1.toggle()
for i in range(1, num_of_stations + 1):
os.system('mpc --host=/home/pi/.config/mpd/socket_' + str(i) + ' play ' + str(i))
led_1.toggle()
return reload
if __name__ == "__main__":
#time.sleep(25) # Wait for wifi connction to be established on startup
num_of_stations = 10
# Start mpd server
for i in range(1, num_of_stations + 1):
os.system('mpd /home/pi/.config/mpd/mpd_' + str(i) + '.conf')
os.system('mpc --host=/home/pi/.config/mpd/socket_' + str(i) + ' clear')
os.system('mpc --host=/home/pi/.config/mpd/socket_' + str(i) + ' load webradio_stations')
os.system('mpc --host=/home/pi/.config/mpd/socket_' + str(i) + ' volume 0')
os.system('mpc --host=/home/pi/.config/mpd/socket_' + str(i) + ' play ' + str(i))
print('All mpd servers started')
# Setup frontend
adc_0 = MCP3002(channel=0)
adc_1 = MCP3002(channel=1)
led_0 = gpiozero.LED(23)
led_1 = gpiozero.LED(22)
button_0 = gpiozero.Button(17)
button_1 = gpiozero.Button(18)
# Init
current_station = 0
current_volume = 0
# Create Functions
update_station = update_station_faktory(num_of_stations, adc_0)
update_volume = functools.partial(update_volume, adc_1)
toggle_mute, mute = toggle_mute_factory(led_0)
reload = reload_factory(led_1, num_of_stations)
# Assign functions
button_0.when_pressed = toggle_mute
button_1.when_pressed = reload
try:
while True:
if mute():
os.system('mpc --host=/home/pi/.config/mpd/socket_' + str(current_station) + ' volume 0')
current_volume = 0
while mute():
time.sleep(0.05)
current_station = update_station(current_station, current_volume)
current_volume = update_volume(current_station, current_volume)
time.sleep(0.05)
except KeyboardInterrupt:
os.system('mpc --host=/home/pi/.config/mpd/socket_' + str(current_station) + ' volume 0')
|
code
|
# ex23
def compare(x, y):
if x > y:
return x
elif y > x:
return y
else:
raise Exception("Numbers are equal")
def three_multiple(x):
if x % 3 == 0:
return True
else:
return False
def power(a, n):
return a ** n
|
code
|
from random import uniform
from sys import argv
from math import log
from matplotlib import pyplot as plt
counter = 0
Lambda=[]
n=int(argv[1])
for i in range(int(argv[2])):
Lambda.append(float(argv[i+3]))
for l in Lambda:
dist=[]
for i in range(1,n+1):
u = uniform(0,1)
x_i = -1*log(1-u)/l
dist.append(x_i)
plt.hist(dist, bins=1000, histtype='step',label='$\lambda$ ='+str(l))
plt.grid(True,which="both",ls="-")
plt.legend(loc=0, )
plt.show()
|
code
|
#! /usr/bin/env python3
# Copyright 2020 Desh Raj
# Apache 2.0.
"""This script takes an input RTTM and transforms it in a
particular way: all overlapping segments are re-labeled
as "overlap". This is useful for 2 cases:
1. By retaining just the overlap segments (grep overlap),
the resulting RTTM can be used to train an overlap
detector.
2. By retaining just the non-overlap segments (grep -v overlap),
the resulting file can be used to obtain (fairly) clean
speaker embeddings from the single-speaker regions of the
recording.
The output is written to stdout.
"""
import argparse, os
import itertools
from collections import defaultdict
def get_args():
parser = argparse.ArgumentParser(
description="""This script filters an RTTM in several ways.""",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--label", type=str, default="overlap",
help="Label for the overlap segments")
parser.add_argument("input_rttm", type=str,
help="path of input rttm file")
args = parser.parse_args()
return args
class Segment:
"""Stores all information about a segment"""
def __init__(self, reco_id, start_time, dur = None, end_time = None, spk_id = None):
self.reco_id = reco_id
self.start_time = start_time
if (dur is None):
self.end_time = end_time
self.dur = end_time - start_time
else:
self.dur = dur
self.end_time = start_time + dur
self.spk_id = spk_id
def groupby(iterable, keyfunc):
"""Wrapper around ``itertools.groupby`` which sorts data first."""
iterable = sorted(iterable, key=keyfunc)
for key, group in itertools.groupby(iterable, keyfunc):
yield key, group
def find_overlapping_segments(segs, label):
reco_id = segs[0].reco_id
tokens = []
for seg in segs:
tokens.append(("BEG", seg.start_time))
tokens.append(("END", seg.end_time))
sorted_tokens = sorted(tokens, key=lambda x: x[1])
overlap_segs = []
spkr_count = 0
ovl_begin = 0
ovl_end = 0
for token in sorted_tokens:
if (token[0] == "BEG"):
spkr_count +=1
if (spkr_count == 2):
ovl_begin = token[1]
else:
spkr_count -= 1
if (spkr_count == 1):
ovl_end = token[1]
overlap_segs.append(Segment(reco_id, ovl_begin, end_time=ovl_end, spk_id=label))
return overlap_segs
def find_single_speaker_segments(segs):
reco_id = segs[0].reco_id
tokens = []
for seg in segs:
tokens.append(("BEG", seg.start_time, seg.spk_id))
tokens.append(("END", seg.end_time, seg.spk_id))
sorted_tokens = sorted(tokens, key=lambda x: x[1])
single_speaker_segs = []
running_spkrs = set()
for token in sorted_tokens:
if (token[0] == "BEG"):
running_spkrs.add(token[2])
if (len(running_spkrs) == 1):
seg_begin = token[1]
cur_spkr = token[2]
elif (len(running_spkrs) == 2):
single_speaker_segs.append(Segment(reco_id, seg_begin, end_time=token[1], spk_id=cur_spkr))
elif (token[0] == "END"):
try:
running_spkrs.remove(token[2])
except:
Warning ("Speaker not found")
if (len(running_spkrs) == 1):
seg_begin = token[1]
cur_spkr = list(running_spkrs)[0]
elif (len(running_spkrs) == 0):
single_speaker_segs.append(Segment(reco_id, seg_begin, end_time=token[1], spk_id=cur_spkr))
return single_speaker_segs
def main():
args = get_args()
# First we read all segments and store as a list of objects
segments = []
with open(args.input_rttm, 'r') as f:
for line in f.readlines():
parts = line.strip().split()
segments.append(Segment(parts[1], float(parts[3]), dur=float(parts[4]), spk_id=parts[7]))
# We group the segment list into a dictionary indexed by reco_id
reco2segs = defaultdict(list,
{reco_id : list(g) for reco_id, g in groupby(segments, lambda x: x.reco_id)})
overlap_segs = []
for reco_id in reco2segs.keys():
segs = reco2segs[reco_id]
overlap_segs.extend(find_overlapping_segments(segs, args.label))
single_speaker_segs = []
for reco_id in reco2segs.keys():
segs = reco2segs[reco_id]
single_speaker_segs.extend(find_single_speaker_segments(segs))
final_segs = sorted(overlap_segs + single_speaker_segs, key = lambda x: (x.reco_id, x.start_time))
rttm_str = "SPEAKER {0} 1 {1:7.3f} {2:7.3f} <NA> <NA> {3} <NA> <NA>"
for seg in final_segs:
if (seg.dur > 0):
print(rttm_str.format(seg.reco_id, seg.start_time, seg.dur, seg.spk_id))
if __name__ == '__main__':
main()
|
code
|
# https://www.urionlinejudge.com.br/judge/pt/problems/view/1168
n = int(input())
for i in range(0,n):
v = input()
total = 0
for digito in v:
if digito == '0':
total += 6
elif digito == '1':
total += 2
elif digito == '2':
total += 5
elif digito == '3':
total += 5
elif digito == '4':
total += 4
elif digito == '5':
total += 5
elif digito == '6':
total += 6
elif digito == '7':
total += 3
elif digito == '8':
total += 7
else:
total += 6
print(str(total) + " leds")
|
code
|
input_int = list(input())
for index in range(int(len(input_int) / 2)):
temp = input_int[index]
input_int[index] = input_int[-index - 1]
input_int[-index - 1] = temp
print(''.join(input_int))
|
code
|
from time import sleep
from decouple import config
from bot import Near
from bot import Twitter
if __name__ == '__main__':
near = Near()
twitter = Twitter()
CURRENCY = config('CURRENCY_TO_CONVERT')
TEXT_LAST_24_HRS = config('TEXT_LAST_24_HRS')
sleep_time = 86400 / int(config('TWEETS_PER_DAY'))
while True:
data = near.getData()
price = f"{data['price']:.2f}"
percent_change_24h = f"{data['percent_change_24h']:.2f}"
twitter.send_tweet(f'1 $NEAR = {price} #{CURRENCY} ({percent_change_24h}% {TEXT_LAST_24_HRS})')
sleep(sleep_time)
|
code
|
import matplotlib.pyplot as plt
import numpy as np
import random
from matplotlib.backends.backend_pdf import PdfPages
from scipy import stats
import math
from scipy.special import factorial
from scipy.optimize import curve_fit
def poisson(k,lamb):
return (lamb**k/factorial(k)) * np.exp(-lamb)
if __name__ == "__main__":
with PdfPages("Project2_David_Grzan.pdf") as pdf:
#makes gaussian histogram
n = 100000
n2 = 100
garray = []
a = 0
for j in range(0,n):
for i in range(0,n2):
a+=random.uniform(-1,1)
garray.append(a*(float(n2/3))**(-1/2))
a = 0
plt.figure()
plt.title("Gaussian Distribution")
plt.hist(garray,100,density=0,range=(-4,4),facecolor="g")
pdf.savefig()
plt.close()
#makes normalized gaussian histogram with fit
m, s = stats.norm.fit(garray)
line = stats.norm.pdf(np.linspace(-4,4,100),m,s)
fig = plt.figure()
plt.plot(np.linspace(-4,4,100),line)
plt.title("Normalized Gaussian Distribution")
plt.hist(garray,100,density=1,range=(-4,4),facecolor="r")
plt.text(0.75,0.4,"Mean: {:.2f}, Sigma: {:.2f}".format(m,s),size=10)
pdf.savefig()
plt.close()
#makes poisson histogram
e = math.e
parray = []
lamb = 5
k = 0
p = 1.0
L = e**(-lamb)
for i in range(0,100000):
while p>L:
k = k+1
p = p*random.uniform(0,1)
parray.append(k-1)
p = 1.0
k = 0
plt.figure()
plt.title("Poisson Distribution")
plt.hist(parray,20,density=0,range=(-0.5,19.5),facecolor="g")
pdf.savefig()
plt.close()
#makes normalized poisson histogram with fit
plt.figure()
plt.title("Normalized Poisson Distribution")
entries, bin_edges, patches = plt.hist(parray,20,density=1,range=(-0.5,19.5),facecolor="r")
print(bin_edges)
bin_middles = 0.5*(bin_edges[1:] + bin_edges[:-1])
parameters, cov_matrix = curve_fit(poisson, bin_middles, entries)
xaxis = np.linspace(0,20,1000)
plt.plot(xaxis, poisson(xaxis, *parameters))
print(parameters)
plt.text(10,0.125,"Lambda: {:.2f}".format(*parameters),size=10)
pdf.savefig()
plt.close()
|
code
|
from __future__ import division, print_function, absolute_import
import numpy as np
from ..util import img_as_ubyte, crop
from ._skeletonize_3d_cy import _compute_thin_image
def skeletonize_3d(img):
"""Compute the skeleton of a binary image.
Thinning is used to reduce each connected component in a binary image
to a single-pixel wide skeleton.
Parameters
----------
img : ndarray, 2D or 3D
A binary image containing the objects to be skeletonized. Zeros
represent background, nonzero values are foreground.
Returns
-------
skeleton : ndarray
The thinned image.
See also
--------
skeletonize, medial_axis
Notes
-----
The method of [Lee94]_ uses an octree data structure to examine a 3x3x3
neighborhood of a pixel. The algorithm proceeds by iteratively sweeping
over the image, and removing pixels at each iteration until the image
stops changing. Each iteration consists of two steps: first, a list of
candidates for removal is assembled; then pixels from this list are
rechecked sequentially, to better preserve connectivity of the image.
The algorithm this function implements is different from the algorithms
used by either `skeletonize` or `medial_axis`, thus for 2D images the
results produced by this function are generally different.
References
----------
.. [Lee94] T.-C. Lee, R.L. Kashyap and C.-N. Chu, Building skeleton models
via 3-D medial surface/axis thinning algorithms.
Computer Vision, Graphics, and Image Processing, 56(6):462-478, 1994.
"""
# make sure the image is 3D or 2D
if img.ndim < 2 or img.ndim > 3:
raise ValueError("skeletonize_3d can only handle 2D or 3D images; "
"got img.ndim = %s instead." % img.ndim)
img = np.ascontiguousarray(img)
img = img_as_ubyte(img, force_copy=False)
# make an in image 3D and pad it w/ zeros to simplify dealing w/ boundaries
# NB: careful here to not clobber the original *and* minimize copying
img_o = img
if img.ndim == 2:
img_o = img[np.newaxis, ...]
img_o = np.pad(img_o, pad_width=1, mode='constant')
# normalize to binary
maxval = img_o.max()
img_o[img_o != 0] = 1
# do the computation
img_o = np.asarray(_compute_thin_image(img_o))
# crop it back and restore the original intensity range
img_o = crop(img_o, crop_width=1)
if img.ndim == 2:
img_o = img_o[0]
img_o *= maxval
return img_o
|
code
|
def format_duration(seconds):
if seconds==0:
return "now"
year = seconds // 31536000
yearDay = seconds % 31536000
day = yearDay // 86400
dayHour = yearDay % 86400
hour = dayHour // 3600
hourMinute = dayHour % 3600
minute = hourMinute // 60
second = hourMinute % 60
res = [year, day, hour, minute, second]
res[0] = yearInput(res[0])
res[1] = dayInput(res[1])
res[2] = hourInput(res[2])
res[3] = minuteInput(res[3])
res[4] = secondInput(res[4])
res = [i for i in res if i != ""]
if len(res) >= 2:
res[-2] = res[-2][:-1]+" and"
res[-1]=res[-1][0:-1]
return " ".join(res)
def secondInput(s):
if s == 0:
return ""
elif s == 1:
return "1 second,"
else:
return f"{s} seconds,"
def minuteInput(m):
if m == 0:
return ""
elif m == 1:
return "1 minute,"
else:
return f"{m} minutes,"
def hourInput(h):
if h == 0:
return ""
elif h == 1:
return "1 hour,"
else:
return f"{h} hours,"
def dayInput(d):
if d == 0:
return ""
elif d == 1:
return "1 day,"
else:
return f"{d} days,"
def yearInput(y):
if y == 0:
return ""
elif y == 1:
return "1 year,"
else:
return f"{y} years,"
|
code
|
import math
class TreasureAngleToWorldmapPositionConverter:
def __init__(self, table_calibration_service, treasure_angles, robot):
self.table_calibration_service = table_calibration_service
self.treasure_angles = treasure_angles
self.robot_angle = robot.get_angle()
self.robot_position = robot.get_position()
def get_treasures(self):
treasures = self.__convert_to_map_position__()
return treasures
def __convert_to_map_position__(self):
treasures_angles = self.treasure_angles
table_corners = self.table_calibration_service.get_table_corners()
bottom_slope = (table_corners[0][1] - table_corners[1][1]) / (table_corners[0][0] - table_corners[1][0])
bottom_offset = table_corners[0][1] - (bottom_slope * table_corners[0][0])
rear_line = table_corners[0][0]
top_slope = (table_corners[2][1] - table_corners[3][1]) / (table_corners[3][0] - table_corners[2][0])
top_offset = table_corners[3][1] - (top_slope * table_corners[3][0])
camera_position = self.__compute_camera_position__()
detected_treasures = []
for angle in treasures_angles:
treasure_slope = -math.tan(math.radians(self.robot_angle-(63.53/2)+angle))
treasure_offset = camera_position[1] - (treasure_slope * camera_position[0])
treasure_bottom_x = (bottom_offset - treasure_offset) / (treasure_slope - bottom_slope)
treasure_bottom_y = (bottom_slope * treasure_bottom_x) + treasure_offset
treasure_top_x = (top_offset - treasure_offset) / (treasure_slope - top_slope)
treasure_top_y = (top_slope * treasure_top_x) + treasure_offset
treasure_rear_x = rear_line
treasure_rear_y = (treasure_slope * treasure_rear_x) + treasure_offset
distance_bottom = math.sqrt((camera_position[0] - treasure_bottom_x)**2 + (camera_position[1] - treasure_bottom_y)**2)
distance_top = math.sqrt((camera_position[0] - treasure_top_x)**2 + (camera_position[1] - treasure_top_y)**2)
distance_rear = math.sqrt((camera_position[0] - treasure_rear_x)**2 + (camera_position[1] - treasure_rear_y)**2)
min_list = []
if treasure_bottom_x < camera_position[0]:
min_list.append(distance_bottom)
if treasure_top_x < camera_position[0]:
min_list.append(distance_top)
min_list.append(distance_rear)
treasure_distance = min(min_list)
if treasure_distance == distance_bottom:
detected_treasures.append({'x': treasure_bottom_x, 'y' : treasure_bottom_y})
elif treasure_distance == distance_rear:
detected_treasures.append({'x': treasure_rear_x, 'y' : treasure_rear_y})
elif treasure_distance == distance_top:
detected_treasures.append({'x': treasure_top_x, 'y' : treasure_top_y})
return detected_treasures
def __compute_camera_position__(self):
position = self.robot_position
angle = self.robot_angle
rayon_robot = self.table_calibration_service.get_pixel_per_meter_ratio() * 0.15
position_camera = (position.x + math.cos(math.radians(angle)) * rayon_robot, position.y - math.sin(math.radians(angle)) * rayon_robot)
return position_camera
|
code
|
import sys
# d = {'a': 'Maman', 'b': 'Papa', 'c':'Grand Father', 'd': 'Grand Mother', 'e': 'Son', 'f': 'Daughter'}
# for k in sorted(d.keys()):
# print('Key '+k.upper()+' -> '+d[k])
# print(d.items()[0])
def File(filename):
f = open(filename, 'rU')
for line in f:
a = line.split()
for word in a:
if(word > 1):
word += 1
print(word)
# print(line)
# lines = f.readlines()
text = f.read()
print(text)
f.close()
def main():
File(sys.argv[1])
if __name__ == '__main__':
main()
|
code
|
import math
# Anterior e Sucessor
num = int(input('Digita um número aí: '))
ant = num - 1
suc = num + 1
print('O número antes de {} é {} e o depois dele é {}'.format(num, ant, suc))
# Dobro, Triplo e Raiz quadrada
n = int(input('Manda um número: '))
d = n * 2
t = n * 3
r = math.sqrt(n)
# print('O dobro de {} é {}'.format(n, d))
# print('O triplo de {} é {}'.format(n, t))
# print('A raiz quadrada de {} é {:.3f}'.format(n, r))
print('O dobro de {} vale {}.'.format(n, (n*2)))
print('O triplo de {} vale {}. \n A raiz quadrada de {} vale {:.2f}'.format(n, (n*3), n, pow(n, (1/2)))) #pow(base, expoente)
|
code
|
# Definition for an interval.
class Interval:
def __init__(self, s=0, e=0):
self.start = s
self.end = e
class Solution:
def merge(self, intervals):
"""
:type intervals: List[Interval]
:rtype: List[Interval]
"""
intervals = intervals[:]
#Always think about the weird input
if len(intervals) <= 1:
return intervals
intervals.sort(key = self.sort_by_start)
res = []
i = 0
for i in range(0, len(intervals)): #and intervals[i].start <= intervals[len(intervals) - 1].end:
#you shall process the last element at first, especially when you have i+1 logic
if i == len(intervals) - 1:
res.append(intervals[i])
break
#if we always care about i+1 rather than modify i, then our lives easier. Like a grinding wheel!
#we know we'd never look back. If not for sorting, the time complexity will be O(n)
if intervals[i].end >= intervals[i + 1].start:
intervals[i + 1].start = intervals[i].start
if intervals[i].end > intervals[i + 1].end:
intervals[i + 1].end = intervals[i].end
else:
res.append(intervals[i])
return res
# if i.end < j.start, then must i.end < ()j+1).start
def sort_by_start(self, l):
return l.start
def pr(self, li):
for x in li:
print("[" + str(x.start) + ',' + str(x.end) + "],")
if __name__ == '__main__':
test = Solution()
case = [[5,5],[1,1],[5,7],[5,7],[1,1],[3,4],[4,4],[0,1],[5,5],[1,2],[5,5],[0,2]]
#case = [[1,4],[4,5]]
#case = [[1,3], [2,6], [8, 10], [15, 18]]
intervals = []
for i in case:
intervals.append(Interval(i[0], i[1]))
test.pr(test.merge(intervals))
|
code
|
def check(lista):
return sum(lista) == len(lista)
def convert(string):
return [True if i=='+' else False for i in string]
def invert(lis):
return [not i for i in lis]
def fun(string,n):
lista = convert(string)
count = 0
for i in range(len(lista)-n+1):
if not lista[i]:
lista[i:i+n] = invert(lista[i:i+n])
count += 1
res = 'IMPOSSIBLE'
if check(lista):
res = count
return res
t = int(input())
for i in range(1, t + 1):
n, m = input().split(" ") # read a list of integers, 2 in this case
print("Case #{}: {}".format(i, fun(n, int(m))))
|
code
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
from scrapy.loader import ItemLoader
from scrapy.loader.processors import TakeFirst, MapCompose
import re
from w3lib.html import remove_tags # 这个模块专门用来去除Html中的标签的
class LagouItemLoader(ItemLoader):
default_output_processor = TakeFirst()
def process_input_city_and_degree_years(value):
# 对输入的城市进行处理
value = re.sub("/", "", value).strip()
return value
def process_input_salary(value):
# 对输入的薪水进行处理
result = re.sub("k", "000", value).replace("-", ' ').strip()
return result
def process_input_publish_time(value):
# 对输入的发布时间进行处理
result = re.sub("\xa0", "", value).strip()
return result
def process_input_desc(value):
# 职位详情的处理
if value == "":
return "无"
else:
return value
class LagoujobItem(scrapy.Item):
url = scrapy.Field() # 职位url
url_object_id = scrapy.Field() # 职位url对应的id
title = scrapy.Field() # 职位名字
salary = scrapy.Field(
input_processor=MapCompose(process_input_salary)
) # 薪资水平
city = scrapy.Field(
input_processor=MapCompose(process_input_city_and_degree_years),
) # 工作城市
years = scrapy.Field(
input_processor=MapCompose(process_input_city_and_degree_years),
) # 工作年限
degree = scrapy.Field(
input_processor=MapCompose(process_input_city_and_degree_years),
) # 任职程度
job_type = scrapy.Field() # 工作类型
publish_time = scrapy.Field(
input_processor=MapCompose(process_input_publish_time)
) # 发布时间
tags = scrapy.Field(
input_processor=MapCompose(process_input_desc),
) # 标签
job_advantage = scrapy.Field() # 职位优势
job_desc = scrapy.Field(
input_processor=MapCompose(process_input_desc),
) # 职位描述
job_addr = scrapy.Field() # 职位地址
company_url = scrapy.Field() # 公司官方网站地址
company_name = scrapy.Field() # 公司名字
crawl_time = scrapy.Field() # 抓取时间
crawl_update_time = scrapy.Field() # 抓取更新时间
|
code
|
#!/usr/bin/python3
"""Task 6"""
from flask import Flask
from flask import render_template
from models import storage
from models.state import State
from models.city import City
app = Flask(__name__)
@app.route('/states', strict_slashes=False)
def states():
"""Function that return states"""
states = storage.all(State).values()
return render_template('7-states_list.html', states=states)
@app.route('/states/<id>', strict_slashes=False)
def list_states(id):
"""Function that return states according id"""
states = storage.all(State).values()
ban = False
name_st = ""
for state in states:
if id in state.id:
ban = True
name_st = state
break
else:
ban = False
return render_template('9-states.html', name_st=name_st, ban=ban)
@app.teardown_appcontext
def close_session(db):
storage.close()
if __name__ == "__main__":
app.run(host='0.0.0.0', port=5000)
|
code
|
greeting = "Hello"
name = "Michael"
message = f"{greeting}, {name}. Welcome!"
print(message)
|
code
|
# Importing the required libraries
import pandas as pd
import lightkurve as lk
import matplotlib.pyplot as plt
import os, shutil
import numpy as np
from scipy.stats import skew
from scipy.stats import kurtosis
from tqdm import tqdm
import warnings
import seaborn as sns
os.chdir('..')
tqdm.pandas(desc="Progress: ")
warnings.filterwarnings('ignore')
pd.set_option('display.width', 400)
pd.set_option('display.max_columns', 100)
pd.set_option('display.max_rows', 3000)
def my_custom_corrector_func(lc_raw):
# Source: https://docs.lightkurve.org/tutorials/05-advanced_patterns_binning.html
# Clean outliers, but only those that are above the mean level (e.g. attributable to stellar flares or cosmic rays).
lc_clean_outliers = lc_raw.remove_outliers(sigma=20, sigma_upper=4)
lc_nan_normalize_flatten = lc_clean_outliers.remove_nans().normalize().flatten(window_length=101)
lc_flat, trend_lc = lc_nan_normalize_flatten.flatten(return_trend=True)
return lc_flat
def read_kepler_data_from_external_HDD(kepler_id):
res_path = 'res/kepler_ID_' + kepler_id + '/'
try:
# Getting from local if already present
os.listdir(res_path)
except:
try:
# Pulling from the External HDD to the temp resource folder
res_path = '/Volumes/PaligraphyS/kepler_data/res/kepler_ID_' + kepler_id + '/'
shutil.copytree(res_path, 'temp_res/kepler_ID_' + kepler_id + '/')
res_path = 'temp_res/kepler_ID_' + kepler_id + '/'
except Exception as e:
if ('File exists: ' in str(e)):
res_path = 'temp_res/kepler_ID_' + kepler_id + '/'
else:
print('Data for KIC not downloaded')
return [False, np.array([])]
lc_list_files = []
for lc_file in os.listdir(res_path):
if ('llc.fits' in lc_file):
lc_list_files.append(lk.lightcurvefile.KeplerLightCurveFile(res_path + lc_file))
lc_collection = lk.LightCurveFileCollection(lc_list_files)
stitched_lc_PDCSAP = lc_collection.PDCSAP_FLUX.stitch()
corrected_lc = my_custom_corrector_func(stitched_lc_PDCSAP)
corrected_lc_df = corrected_lc.to_pandas()
corrected_lc_df['flux'] = corrected_lc_df['flux'] - 1
# Removing the kepler data brought to the temporary directory
shutil.rmtree('temp_res/kepler_ID_' + kepler_id)
return [True, np.array([corrected_lc_df['time'], corrected_lc_df['flux']])]
try:
stats_df = pd.read_csv('planetary_data/stats_df.csv', dtype={'KIC': str})
except:
stats_df = pd.DataFrame(columns=['KIC', 'flux_point_counts', 'max_flux_value', 'min_flux_value',
'avg_flux_value', 'median_flux_value', 'skewness_flux_value',
'kurtosis_flux_value', 'Q1_flux_value', 'Q3_flux_value', 'std_flux_value',
'variance_flux_value'])
# Getting the kepler ID's for which we will train and test the model
i = len(stats_df)
# for file in tqdm(os.listdir('res/KIC_flux_graphs_80_dpi_1_size_color_b/')):
# if ('.png' in file):
# kepler_id = file.split('_')[-1].split('.')[0]
# if (kepler_id in list(stats_df['KIC'])):
# continue
# try:
# response_list = read_kepler_data_from_external_HDD(kepler_id)
# except:
# print('Error in '+str(kepler_id))
# continue
# if (response_list[0]):
# stats_df.loc[i] = [str(kepler_id), response_list[1].shape[1], np.max(response_list[1][1]),
# np.min(response_list[1][1]), np.average(response_list[1][1]),
# np.nanmedian(response_list[1][1]), skew(response_list[1][1]),
# kurtosis(response_list[1][1]), np.nanquantile(response_list[1][1], 0.25),
# np.nanquantile(response_list[1][1], 0.75),np.nanstd(response_list[1][1]),
# np.nanvar(response_list[1][1])]
# i += 1
#
# if (i % 20 == 0):
# stats_df.drop_duplicates('KIC', inplace=True)
# stats_df.to_csv('planetary_data/stats_df.csv', sep=',', index=False)
# exit()
complete_kepler_df = pd.read_csv('planetary_data/planetary_data_kepler_mission.csv', sep=',', dtype={'kepid': str})
complete_kepler_df = complete_kepler_df[['kepid', 'nconfp', 'nkoi']]
stats_planets_df = pd.merge(stats_df, complete_kepler_df, left_on='KIC', right_on='kepid')
stats_planets_df.drop_duplicates('KIC', inplace=True)
stats_planets_df.drop('kepid', inplace=True, axis=1)
stats_planets_df.to_csv('planetary_data/stats_planets_df.csv', sep=',', index=False)
stats_planets_df = stats_planets_df.loc[((stats_planets_df['max_flux_value']<=0.03) &
(stats_planets_df['min_flux_value']>=-0.03)) |
(stats_planets_df['nconfp']>0.0)]
stats_planets_df['Confirmed_planets'] = [1.0 * x for x in stats_planets_df['nconfp'] > 0.0]
print(stats_planets_df.groupby('Confirmed_planets').count()[['KIC']])
print(stats_planets_df.groupby(['Confirmed_planets', 'nkoi']).count()['KIC'])
print(stats_planets_df.loc[(stats_planets_df['nkoi'] == 0) &
(stats_planets_df['Confirmed_planets'] == 1)].sort_values('nkoi')[
['KIC', 'nkoi', 'Confirmed_planets']])
def plot_curve(x_column, y_column, hue_column="Confirmed_planets"):
graph_name = y_column + '.png'
if (x_column == 'nkoi'):
x_label = 'Number of Kepler object of interest'
else:
x_label = x_column[0].upper() + x_column[1:].replace('_', ' ')
y_label = y_column[0].upper() + y_column[1:].replace('_', ' ')
# Plot 1: This will show the flux point counts for both the classes
sns.set_theme(style="darkgrid")
g = sns.catplot(x=x_column, y=y_column,
hue=hue_column,
data=stats_planets_df, kind="strip",
dodge=True,
height=4, aspect=1.5, legend_out=False)
g.despine(left=True)
# title
new_title = hue_column.replace('_', ' ')
g._legend.set_title(new_title)
# replace labels
new_labels = ['0 - No exoplanet', '1 - Exoplanet Present']
for t, l in zip(g._legend.texts, new_labels): t.set_text(l)
g.set(xlabel=x_label, ylabel=y_label)
plt.xlim(-0.5, 7.5)
plt.tight_layout()
plt.savefig('EDA_images/' + graph_name)
# plt.show()
plt.close()
y_columns = ['flux_point_counts', 'max_flux_value', 'min_flux_value',
'avg_flux_value', 'median_flux_value', 'skewness_flux_value',
'kurtosis_flux_value', 'Q1_flux_value', 'Q3_flux_value',
'std_flux_value', 'variance_flux_value']
for y_column in y_columns:
plot_curve('nkoi', y_column)
print(len(stats_planets_df.loc[stats_planets_df['nconfp'] > 0.0]))
print(len(stats_planets_df.loc[stats_planets_df['nconfp'] == 0.0]))
|
code
|
import sys
import cProfile
def brute(arg):
return reduce(lambda x, y: x + int(y), str(2**arg), 0)
if __name__ == "__main__":
arg = int(sys.argv[1])
def main():
print brute(arg)
cProfile.run('main()')
|
code
|
_employeeName = input()
fixedSalary = float(input())
salesBonus = float(input())
print(f"TOTAL = R$ {fixedSalary + (salesBonus * 0.15):.2f}")
|
code
|
import os
import torch
import torch.utils.data as data
from datasets.datahelpers import default_loader
class DigitsDataset(data.Dataset):
"""Digits dataset."""
def __init__(self, mode, data_root, transform=None, loader=default_loader):
if not (mode == 'train' or mode == 'dev'):
raise(RuntimeError("MODE should be either train or dev, passed as string"))
self.mode = mode
self.transform = transform
self.loader = loader
self.img_root = os.path.join(data_root, 'images')
self.img_names = []
self.targets = []
label_path = os.path.join(data_root, '{}.txt'.format(mode))
with open(label_path, 'r') as f:
for line in f.readlines():
line = line.strip().split()
self.img_names.append(line[0])
self.targets.append(line[1:])
def __len__(self):
return len(self.img_names)
def __getitem__(self, idx):
img_name = os.path.join(self.img_root, self.img_names[idx])
image = self.loader(img_name)
if self.transform is not None:
image = self.transform(image)
target = self.targets[idx]
target = torch.IntTensor([int(i) for i in target])
return image, target
class DigitsBatchTrain:
"""Collate function for train mode."""
def __init__(self, batch, keep_ratio=False):
transposed_data = list(zip(*batch))
if keep_ratio:
self.images = transposed_data[0]
else:
self.images = torch.stack(transposed_data[0], 0)
self.targets = torch.cat(transposed_data[1], 0)
# length of the each target string
self.targets_single = transposed_data[1]
self.target_lengths = torch.IntTensor([len(i) for i in transposed_data[1]])
def pin_memory(self):
self.images = self.images.pin_memory()
self.targets = self.targets.pin_memory()
self.target_lengths = self.target_lengths.pin_memory()
return self
class DigitsBatchDev:
"""Collate function for dev mode."""
def __init__(self, batch, keep_ratio=False):
transposed_data = list(zip(*batch))
if keep_ratio:
self.images = transposed_data[0]
else:
self.images = torch.stack(transposed_data[0], 0)
self.targets = [i.tolist() for i in transposed_data[1]]
self.target_lengths = torch.IntTensor([len(i) for i in transposed_data[1]])
def pin_memory(self):
self.images = self.images.pin_memory()
return self
class DigitsCollater:
"""Digits Collater."""
def __init__(self, mode, keep_ratio=False):
self.mode = mode
self.keep_ratio = keep_ratio
def __call__(self, batch):
if self.mode == 'train':
return DigitsBatchTrain(batch, self.keep_ratio)
elif self.mode == 'dev':
return DigitsBatchDev(batch, self.keep_ratio)
else:
raise(RuntimeError("MODE should be either train or dev, passed as string"))
|
code
|
#
# @lc app=leetcode.cn id=884 lang=python3
#
# [884] 两句话中的不常见单词
#
# @lc code=start
class Solution:
def uncommonFromSentences(self, A: str, B: str) -> List[str]:
from collections import Counter
ac = Counter(A.split())
bc = Counter(B.split())
answer = []
for k in ac.keys():
if ac.get(k) == 1 and k not in bc:
answer.append(k)
for k in bc.keys():
if bc.get(k) == 1 and k not in ac:
answer.append(k)
return answer
# @lc code=end
|
code
|
pretrain-mix-150b
A high-quality, 150-billion-token pre-training dataset meticulously curated for large language model research and development.
This dataset is a strategic mix of high-quality educational web text, comprehensive mathematical documents, and a diverse collection of source code, designed to foster strong reasoning and multi-domain capabilities in pre-trained models.
Dataset Overview
The pretrain-mix-150b dataset was created to provide a robust and balanced foundation for training novel language model architectures. The total dataset comprises approximately 130 million documents, amounting to ~150 billion tokens.
The curation process involved sourcing from three best-in-class, publicly available datasets and mixing them according to a specific ratio to ensure a balanced diet of general knowledge, logical reasoning, and programming syntax.
The composition was programmatically verified after creation:
| Source | Document Count | Percentage | Description |
|---|---|---|---|
| Web (FineWeb-Edu) | 87,570,000 | 67.3% | High-quality educational web content. |
| Code (Stack-Edu) | 23,560,000 | 18.1% | Curated source code from GitHub. |
| Math (FineMath) | 18,900,000 | 14.5% | Mathematical reasoning & problem-solving. |
| Total | 130,030,000 | 100.0% |
Why This Dataset?
While many large-scale datasets exist, pretrain-mix-150b was created with a specific philosophy in mind:
- Balanced Diet for Models: Avoids over-indexing on generic web text by including substantial, high-quality code and math corpora.
- Reproducibility: The entire creation process was scripted, and the composition is fully transparent.
- Efficiency: The data is provided in the highly-efficient Parquet format, ready for large-scale training pipelines.
This dataset is ideal for researchers and engineers looking to pre-train foundation models from scratch, especially those with novel architectures (like Mixture-of-Experts) that can benefit from specialized data sources.
How to Use
The dataset is structured with a data/ directory containing 2,601 Parquet files. You can easily load it using the 🤗 datasets library.
It is highly recommended to use streaming=True to avoid downloading the entire dataset at once.
from datasets import load_dataset
# Load the dataset in streaming mode
# The 'data_files' argument points to all Parquet files in the 'data' directory
repo_id = "meryyllebr543/pretrain-mix-150b"
dataset = load_dataset(repo_id, data_files="data/*.parquet", split="train", streaming=True)
# You can then iterate over the dataset
print("First example from the dataset:")
example = next(iter(dataset))
print(example)
# {'text': '...', 'source': 'web'}
Dataset Schema
Each row in the dataset has a simple, uniform schema:
text(string): The main content of the document.source(string): The origin of the document. Can be one ofweb,math, orcode. This is useful for analyzing model performance on different domains.
Data Sources
This dataset is a mix of the following excellent open-source projects. Please refer to their original dataset cards for more information on their respective curation processes.
- FineWeb-Edu: Sourced from
HuggingFaceFW/fineweb-edu. Thesample-100BTconfiguration was used. - FineMath: Sourced from
HuggingFaceTB/finemath. Thefinemath-3plusconfiguration was used. - Stack-Edu: Sourced from
meryyllebr543/stack-edu-huggingface(a direct-access version ofHuggingFaceTB/stack-edu). The following languages were included:python,rust,markdown,cpp, andcsharp.
Author
This dataset was curated and processed by Francisco Antonio.
- GitHub: MeryylleA
- LinkedIn: Francisco Antonio
This project is part of ongoing independent research into novel AI architectures.
License
The dataset is released under the Apache 2.0 License. Please be aware that the underlying data sources may have their own licenses and terms of use. It is the user's responsibility to adhere to them. ```
- Downloads last month
- 32
