text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
---|---|
using BenchmarkTools, Test, CUDA
a = CUDA.zeros(1024)
function kernel(a)
i = threadIdx().x
a[i] += 1
return
end
@cuda threads=length(a) kernel(a)
##
N = 2^20
x_d = CUDA.fill(1.0f0, N) # a vector stored on the GPU filled with 1.0 (Float32)
y_d = CUDA.fill(2.0f0, N) # a vector stored on the GPU filled with 2.0
y_d .+= x_d
function add_broadcast!(y, x)
CUDA.@sync y .+= x
return
end
##
@btime add_broadcast!($y_d, $x_d)
##
function gpu_add1!(y, x)
for i = 1:length(y)
@inbounds y[i] += x[i]
end
return nothing
end
fill!(y_d, 2)
@cuda gpu_add1!(y_d, x_d)
@test all(Array(y_d) .== 3.0f0)
##
function bench_gpu1!(y, x)
CUDA.@sync begin
@cuda gpu_add1!(y, x)
end
end
@btime bench_gpu1!($y_d, $x_d)
##
const nx = 1024 # do 1024 x 1024 2D FFT
xc = CuArray{ComplexF64}(CUDA.randn(Float64, nx, nx))
p = plan_fft!( xc )
##
@btime CUDA.@sync(p * x) setup=(
x=CuArray{ComplexF64}(CUDA.randn(Float64, nx, nx)));
##
for device in CUDA.devices()
@show capability(device)
end
##
using AbstractFFTs
using CUDA.CUFFT
##
b = CUDA.rand(ComplexF32,64,64,64)
# pa = plan_fft( a )
@btime fft(b); | {"hexsha": "e5e44f76994b10cf1b27d82920f3d07fb5571ef8", "size": 1159, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "scripts/cuda_debug.jl", "max_stars_repo_name": "doddgray/OptiMode.jl", "max_stars_repo_head_hexsha": "8d3185000218e0094c01b83f420dcebdf270b2c3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-05-26T00:20:24.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-17T05:24:38.000Z", "max_issues_repo_path": "scripts/cuda_debug.jl", "max_issues_repo_name": "doddgray/OptiMode.jl", "max_issues_repo_head_hexsha": "8d3185000218e0094c01b83f420dcebdf270b2c3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/cuda_debug.jl", "max_forks_repo_name": "doddgray/OptiMode.jl", "max_forks_repo_head_hexsha": "8d3185000218e0094c01b83f420dcebdf270b2c3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 15.8767123288, "max_line_length": 81, "alphanum_fraction": 0.6220880069, "num_tokens": 420} |
#!/usr/bin/env python3
import numpy as np
import tensorflow as tf
import cart_pole_evaluator
class Network:
def __init__(self, threads, seed=42):
# Create an empty graph and a session
graph = tf.Graph()
graph.seed = seed
self.session = tf.Session(graph = graph, config=tf.ConfigProto(inter_op_parallelism_threads=threads,
intra_op_parallelism_threads=threads))
def construct(self, args, state_shape, num_actions):
with self.session.graph.as_default():
# Input states
self.states = tf.placeholder(tf.float32, [None] + state_shape)
# Chosen actions (used for training)
self.actions = tf.placeholder(tf.int32, [None])
# Observed returns (used for training)
self.returns = tf.placeholder(tf.float32, [None])
# Compute the action logits
# TODO: Add a fully connected layer processing self.states, with args.hidden_layer neurons
# and some non-linear activatin.
# TODO: Compute `logits` using another dense layer with
# `num_actions` outputs (utilizing no activation function).
# TODO: Compute the `self.probabilities` from the `logits`.
# Training
# TODO: Compute `loss`, as a softmax cross entropy loss of self.actions and `logits`.
# Because this is a REINFORCE algorithm, it is crucial to weight the loss of batch
# elements using `self.returns` -- this can be accomplished using the `weights` parameter.
global_step = tf.train.create_global_step()
self.training = tf.train.AdamOptimizer(args.learning_rate).minimize(loss, global_step=global_step, name="training")
# Initialize variables
self.session.run(tf.global_variables_initializer())
def predict(self, states):
return self.session.run(self.probabilities, {self.states: states})
def train(self, states, actions, returns):
self.session.run(self.training, {self.states: states, self.actions: actions, self.returns: returns})
if __name__ == "__main__":
# Fix random seed
np.random.seed(42)
# Parse arguments
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--batch_size", default=5, type=int, help="Number of episodes to train on.")
parser.add_argument("--episodes", default=500, type=int, help="Training episodes.")
parser.add_argument("--gamma", default=1.0, type=float, help="Discounting factor.")
parser.add_argument("--hidden_layer", default=20, type=int, help="Size of hidden layer.")
parser.add_argument("--learning_rate", default=0.01, type=float, help="Learning rate.")
parser.add_argument("--render_each", default=0, type=int, help="Render some episodes.")
parser.add_argument("--threads", default=1, type=int, help="Maximum number of threads to use.")
args = parser.parse_args()
# Create the environment
env = cart_pole_evaluator.environment(discrete=False)
# Construct the network
network = Network(threads=args.threads)
network.construct(args, env.state_shape, env.actions)
evaluating = False
while True:
# TODO: Decide if evaluation should start (one possibility is to train for args.episodes,
# so env.episode >= args.episodes could be used).
evaluation = ...
# Train for a batch of episodes
batch_states, batch_actions, batch_returns = [], [], []
for _ in range(args.batch_size):
# Perform episode
state = env.reset(evaluating)
states, actions, rewards, done = [], [], [], False
while not done:
if args.render_each and env.episode > 0 and env.episode % args.render_each == 0:
env.render()
# TODO: Compute action distribution using `network.predict`
# TODO: Set `action` randomly according to the generated distribution
# (you can use np.random.choice or any other method).
action = ...
next_state, reward, done, _ = env.step(action)
# TODO: Accumulate states, actions and rewards.
state = next_state
# TODO: Compute returns from rewards (by summing them up and
# applying discount by `args.gamma`).
# TODO: Extend the batch_{states,actions,returns} using the episodic
# {states,actions,returns}.
# TODO: Perform network training using batch_{states,actions,returns}.
| {"hexsha": "61a0ea735248d83dd4994c5d1afb197d463c1a0d", "size": 4646, "ext": "py", "lang": "Python", "max_stars_repo_path": "charles-university/deep-learning/labs/12/reinforce.py", "max_stars_repo_name": "Hyperparticle/lct-master", "max_stars_repo_head_hexsha": "8acb0ca8fe14bb86305f235e3fec0a595acae2de", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2018-11-08T14:23:45.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-08T17:54:59.000Z", "max_issues_repo_path": "charles-university/deep-learning/labs/12/reinforce.py", "max_issues_repo_name": "Hyperparticle/lct-master", "max_issues_repo_head_hexsha": "8acb0ca8fe14bb86305f235e3fec0a595acae2de", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "charles-university/deep-learning/labs/12/reinforce.py", "max_forks_repo_name": "Hyperparticle/lct-master", "max_forks_repo_head_hexsha": "8acb0ca8fe14bb86305f235e3fec0a595acae2de", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.2363636364, "max_line_length": 127, "alphanum_fraction": 0.6349547998, "include": true, "reason": "import numpy", "num_tokens": 970} |
import chess
import numpy as np
import time
from numpy.random import default_rng
rng = default_rng()
class MCTS_graph:
def __init__(self,agent):
self.root=agent.root
self.temperature = agent.temperature
def make_graph(self,depth=1000):
self.cont=0
self.nodes = {}
self.edges = []
self.bfs(self.root,0,depth)
print('Total nodes: {}'.format(self.cont))
def bfs(self,node,father,depth):
if depth==0: return
if len(node.children)>0:
log_rollouts = np.log(node.num_rollouts)
for n in node.children:
self.cont+=1
win_percentage = n.winning_frac()
self.nodes[self.cont]=win_percentage
self.edges.append([father,self.cont,n.move])
self.bfs(n,self.cont,depth-1)
def save_graph(self,path,depth=1000):
with open(path,'w') as file:
self.make_graph(depth)
cad="digraph{\n 0 [label=\"root\"];\n"
for n,m in self.nodes.items():
cad+=" {} [label=\"{:.2f}\"];\n".format(n,m)
for (x,y,z) in self.edges:
cad+=" {} -- {} [label=\"{}\"];\n".format(x,y,z)
cad+="}"
file.write(cad)
print("Grafo guardado en: {}".format(path))
class MCTSNode:
def __init__(self, game_state, parent = None, move = None, bot = None, is_root = False):
self.game_state = game_state
self.parent = parent
self.move = move
self.win_counts = np.zeros([2,])
self.value=np.zeros([2,])
self.num_rollouts = 0
self.children = []
self.unvisited_moves = []
self.is_root=is_root
if self.is_terminal():
tmp = game_state.result()
if int(tmp[0]) == 0:
self.value = np.array([0,1])
elif int(tmp[2]) == 0:
self.value = np.array([1,0])
else:
self.value = np.array([1/2,1/2])
else:
self.unvisited_moves = list(game_state.legal_moves)
value = bot.get_move_values_single(game_state)
self.value+=value
def add_random_child(self,bot):
index = np.random.randint(len(self.unvisited_moves))
new_move = self.unvisited_moves.pop(index) #selecciona un movimiento disponible al azar y lo elimina de los movimientos no visitados
new_game_state = self.game_state.copy(stack=False) #crea una copia del estado de juego
new_game_state.push(new_move) #realiza el movimiento seleccionado
new_node = MCTSNode(game_state=new_game_state, parent=self, move=new_move, bot=bot) #crea un nuevo nodo
self.children.append(new_node) #añade el nodo a su lista de hijos
return new_node #retorna el nuevo nodo
def record_win(self, result):
self.win_counts += result
self.num_rollouts += 1
def result_simulation(self):
return self.value
def can_add_child(self): #comprueba si aun hay nodos por visitar
return len(self.unvisited_moves) > 0
def is_terminal(self): #verifica si es un nodo terminal, es decir, el final de una partida
return self.game_state.is_game_over()
def winning_frac(self): #obtiene el valor Q/N para el nodo dado
if self.parent.game_state.turn: #turno de las blancas
return float(self.win_counts[0]) / float(self.num_rollouts)
else: #turno de las negras
return float(self.win_counts[1]) / float(self.num_rollouts)
class agent_MCTS:
def __init__(self, temperature=2,bot=None,game_state=None,max_iter=100,verbose=0):
self.temperature = temperature
self.bot = bot
self.max_iter = max_iter
self.root = None
self.verbose = verbose
if game_state is not None:
self.root = MCTSNode(game_state.copy(),bot=self.bot,is_root=True)
def select_move(self,board,max_iter=None,push=True, thinking_time = 0):
moves,values=self.get_move_values(board,max_iter=max_iter, thinking_time = thinking_time)
if moves is None:
return None
index=np.argmax(values)
if push:
self.push_move(move=moves[index])
return moves[index]
def push_move(self,move=None):
for child in self.root.children:
if child.move==move:
child.is_root=True
self.root=child
self.root.num_rollouts-=1
self.root.parent=None
return True
return False
def push_board(self,board=None):
str_board=str(board)
for child in self.root.children:
if str(child.game_state) == str_board:
child.is_root=True
self.root=child
self.root.num_rollouts-=1
self.root.parent=None
return True
return False
def set_max_iter(self,max_iter=100):
self.max_iter=max_iter
def select_child(self, node):
best_score = -1
best_child = None
log_rollouts = np.log(node.num_rollouts)
for child in node.children:
win_percentage = child.winning_frac()
exploration_factor = np.sqrt(log_rollouts / child.num_rollouts)
uct_score = win_percentage + self.temperature * exploration_factor
if uct_score > best_score:
best_score = uct_score
best_child = child
return best_child
def get_move_values(self,game_state,max_iter=None, thinking_time = 0):
if max_iter is None:
max_iter=self.max_iter
if (self.root is None) or (str(self.root.game_state)!=str(game_state) and not self.push_board(board=game_state)):
if self.verbose>0:
print('El estado de juego no corresponde con el de la raiz del arbol, se recreó la raiz')
self.root = MCTSNode(game_state.copy(stack=False),bot=self.bot,is_root=True)
if self.root.is_terminal():
return None,None
i=0
tic = time.time()
while thinking_time>0 or i<max_iter:
toc = time.time()-tic
if toc> thinking_time:
thinking_time=0
i+=1
node = self.root
#fase de seleccion, donde busca un nodo que no sea un nodo derminal
while (not node.can_add_child()) and (not node.is_terminal()):
node = self.select_child(node)
#fase de expansión, donde se agrega un nuevo nodo
if node.can_add_child():
node = node.add_random_child(self.bot)
#fase de simulación. Con ayuda de la red neuronal, se obtiene el valor del nodo que predice como ganador
result = node.result_simulation()
#fase de retropropagación, donde se actualiza el valor de Q de los nodos padres hasta llegar al nodo raiz
while node is not None:
node.record_win(result)
node = node.parent
if self.verbose>1:
toc = time.time()-tic
print('MCTS - nodes:{} Elapsed time: {:.2f}s = {:.2f}m nps={:.0f}'.format(self.root.num_rollouts,toc,toc/60,self.root.num_rollouts/toc))
score = []
moves = []
for child in self.root.children:
win_percentage=child.winning_frac()
score.append(win_percentage)
moves.append(child.move)
score = np.array(score)
return moves,score
| {"hexsha": "d4f27f5a3133c797071081b88b7c6d69a9460a27", "size": 7779, "ext": "py", "lang": "Python", "max_stars_repo_path": "chesslab/agent_mcts.py", "max_stars_repo_name": "yniad/chesslab", "max_stars_repo_head_hexsha": "4720bfd093e9657798953702a1fa918f85991f65", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "chesslab/agent_mcts.py", "max_issues_repo_name": "yniad/chesslab", "max_issues_repo_head_hexsha": "4720bfd093e9657798953702a1fa918f85991f65", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "chesslab/agent_mcts.py", "max_forks_repo_name": "yniad/chesslab", "max_forks_repo_head_hexsha": "4720bfd093e9657798953702a1fa918f85991f65", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-02-11T04:34:22.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-11T04:34:22.000Z", "avg_line_length": 38.1323529412, "max_line_length": 149, "alphanum_fraction": 0.5756523975, "include": true, "reason": "import numpy,from numpy", "num_tokens": 1746} |
#include <string>
#include <iostream>
#include <iomanip>
#include <fstream>
#include <boost/filesystem.hpp>
#include "res2h.h"
#include "res2hutils.hpp"
struct FileData {
boost::filesystem::path inPath;
boost::filesystem::path outPath;
std::string internalName;
std::string dataVariableName;
std::string sizeVariableName;
size_t size;
};
bool beVerbose = false;
bool useRecursion = false;
bool useC = false;
bool createBinary = false;
bool appendFile = false;
bool combineResults = false;
boost::filesystem::path commonHeaderFilePath;
boost::filesystem::path utilitiesFilePath;
boost::filesystem::path inFilePath;
boost::filesystem::path outFilePath;
std::ofstream badOfStream; //we need this later as a default parameter...
//-----------------------------------------------------------------------------
//This is based on the example code found here: https://svn.boost.org/trac/boost/ticket/1976
//but changed to not return a trailing ".." when paths only differ in their file name.
//The function still seems to be missing in boost as of 1.54.0.
boost::filesystem::path naiveUncomplete(boost::filesystem::path const path, boost::filesystem::path const base)
{
if (path.has_root_path()) {
if (path.root_path() != base.root_path()) {
return path;
} else {
return naiveUncomplete(path.relative_path(), base.relative_path());
}
} else {
if (base.has_root_path()) {
return path;
} else {
auto path_it = path.begin();
auto base_it = base.begin();
while ( path_it != path.end() && base_it != base.end() ) {
if (*path_it != *base_it) break;
++path_it; ++base_it;
}
boost::filesystem::path result;
//check if we're at the filename of the base path already
if (*base_it != base.filename()) {
//add trailing ".." from path to base, but only if we're not already at the filename of the base path
for (; base_it != base.end() && *base_it != base.filename(); ++base_it) {
result /= "..";
}
}
for (; path_it != path.end(); ++path_it) {
result /= *path_it;
}
return result;
}
}
return path;
}
bool makeCanonical(boost::filesystem::path & result, const boost::filesystem::path & path)
{
//if we use canonical the file must exits, else we get an exception.
try {
result = boost::filesystem::canonical(path);
}
catch(...) {
//an error occurred. this maybe because the file is not there yet. try without the file name
try {
result = boost::filesystem::canonical(boost::filesystem::path(path).remove_filename());
//ok. this worked. add file name again
result /= path.filename();
}
catch (...) {
//hmm. didn't work. tell the user. at least the path should be there...
std::cout << "The path \"" << boost::filesystem::path(path).remove_filename().string() << "\" couldn't be found. Please create it." << std::endl;
return false;
}
}
return true;
}
//-----------------------------------------------------------------------------
void printVersion()
{
std::cout << "res2h " << RES2H_VERSION_STRING << " - Load plain binary data and dump to a raw C/C++ array." << std::endl << std::endl;
}
void printUsage()
{
std::cout << std::endl;
std::cout << "Usage: res2h <infile/indir> <outfile/outdir> [options]" << std::endl;
std::cout << "Valid options:" << std::endl;
std::cout << "-s Recurse into subdirectories below indir." << std::endl;
std::cout << "-c Use .c files and arrays for storing the data definitions, else" << std::endl << " uses .cpp files and std::vector/std::map." << std::endl;
std::cout << "-h <headerfile> Puts all declarations in a common \"headerfile\" using \"extern\"" << std::endl << " and includes that header file in the source files." << std::endl;
std::cout << "-u <sourcefile> Create utility functions and arrays in a .c/.cpp file." << std::endl << " Only makes sense in combination with -h" << std::endl;
std::cout << "-1 Combine all converted files into one big .c/.cpp file (use with -u)." << std::endl;
std::cout << "-b Compile binary archive outfile containing all infile(s). For reading in your" << std::endl << " software include res2hinterface.h/.c/.cpp (depending on -c) and consult the docs." << std::endl;
std::cout << "-a Append infile to outfile. Can be used to append an archive to an executable." << std::endl;
std::cout << "-v Be verbose." << std::endl;
std::cout << "Examples:" << std::endl;
std::cout << "res2h ./lenna.png ./resources/lenna_png.cpp (convert single file)" << std::endl;
std::cout << "res2h ./data ./resources -s -h resources.h -u resources.cpp (convert directory)" << std::endl;
std::cout << "res2h ./data ./resources/data.bin -b (convert directory to binary file)" << std::endl;
std::cout << "res2h ./resources/data.bin ./program.exe -a (append archive to executable)" << std::endl;
}
bool readArguments(int argc, const char * argv[])
{
bool pastFiles = false;
for(int i = 1; i < argc; ++i) {
//read argument from list
std::string argument = argv[i];
//check what it is
if (argument == "-a") {
if (!commonHeaderFilePath.empty() || !utilitiesFilePath.empty()) {
std::cout << "Error: Option -a can not be combined with -h or -u!" << std::endl;
return false;
}
else if (createBinary) {
std::cout << "Error: Option -a can not be combined with -b!" << std::endl;
return false;
}
else if (combineResults) {
std::cout << "Error: Option -a can not be combined with -1!" << std::endl;
return false;
}
appendFile = true;
pastFiles = true;
}
else if (argument == "-1") {
//-u must be used for this to work. check if specified
for(int j = 1; j < argc; ++j) {
//read argument from list
std::string argument = argv[j];
if (argument == "-u") {
combineResults = true;
pastFiles = true;
break;
}
}
if (!combineResults) {
//-u not specified. complain to user.
std::cout << "Error: Option -1 has to be combined with -u!" << std::endl;
return false;
}
}
else if (argument == "-b") {
if (!commonHeaderFilePath.empty() || !utilitiesFilePath.empty()) {
std::cout << "Error: Option -b can not be combined with -h or -u!" << std::endl;
return false;
}
else if (appendFile) {
std::cout << "Error: Option -b can not be combined with -a!" << std::endl;
return false;
}
else if (combineResults) {
std::cout << "Warning: Creating binary archive. Option -1 ignored!" << std::endl;
return false;
}
createBinary = true;
pastFiles = true;
}
else if (argument == "-c") {
useC = true;
pastFiles = true;
}
else if (argument == "-s") {
useRecursion = true;
pastFiles = true;
}
else if (argument == "-v") {
beVerbose = true;
pastFiles = true;
}
else if (argument == "-h") {
if (createBinary) {
std::cout << "Error: Option -h can not be combined with -b!" << std::endl;
return false;
}
else if (appendFile) {
std::cout << "Error: Option -h can not be combined with -a!" << std::endl;
return false;
}
//try getting next argument as header file name
i++;
if (i < argc && argv[i] != nullptr) {
if (!makeCanonical(commonHeaderFilePath, boost::filesystem::path(argv[i]))) {
return false;
}
}
else {
std::cout << "Error: Option -h specified, but no file name found!" << std::endl;
return false;
}
pastFiles = true;
}
else if (argument == "-u") {
if (createBinary) {
std::cout << "Error: Option -u can not be combined with -b!" << std::endl;
return false;
}
else if (appendFile) {
std::cout << "Error: Option -u can not be combined with -a!" << std::endl;
return false;
}
//try getting next argument as utility file name
i++;
if (i < argc && argv[i] != nullptr) {
if (!makeCanonical(utilitiesFilePath, boost::filesystem::path(argv[i]))) {
return false;
}
}
else {
std::cout << "Error: Option -u specified, but no file name found!" << std::endl;
return false;
}
if (!utilitiesFilePath.empty() && commonHeaderFilePath.empty()) {
std::cout << "Warning: -u does not make much sense without -h..." << std::endl;
}
pastFiles = true;
}
//none of the options was matched until here...
else if (!pastFiles) {
//if no files/directories have been found yet this is probably a file/directory
if (inFilePath.empty()) {
if (!makeCanonical(inFilePath, boost::filesystem::path(argument))) {
return false;
}
}
else if (outFilePath.empty()) {
if (!makeCanonical(outFilePath, boost::filesystem::path(argument))) {
return false;
}
pastFiles = true;
}
}
else {
std::cout << "Error: Unknown argument \"" << argument << "\"!" << std::endl;
return false;
}
}
return true;
}
//-----------------------------------------------------------------------------
std::vector<FileData> getFileDataFrom(const boost::filesystem::path & inPath, const boost::filesystem::path & outPath, const boost::filesystem::path & parentDir, const bool recurse)
{
//get all files from directory
std::vector<FileData> files;
//check for infinite symlinks
if(boost::filesystem::is_symlink(inPath)) {
//check if the symlink points somewhere in the path. this would recurse
if(inPath.string().find(boost::filesystem::canonical(inPath).string()) == 0) {
std::cout << "Warning: Path " << inPath << " contains recursive symlink! Skipping." << std::endl;
return files;
}
}
//iterate through source directory searching for files
const boost::filesystem::directory_iterator dirEnd;
for (boost::filesystem::directory_iterator fileIt(inPath); fileIt != dirEnd; ++fileIt) {
boost::filesystem::path filePath = (*fileIt).path();
if (!boost::filesystem::is_directory(filePath)) {
if (beVerbose) {
std::cout << "Found input file " << filePath << std::endl;
}
//add file to list
FileData temp;
temp.inPath = filePath;
//replace dots in file name with '_' and add a .c/.cpp extension
std::string newFileName = filePath.filename().generic_string();
std::replace(newFileName.begin(), newFileName.end(), '.', '_');
if (useC) {
newFileName.append(".c");
}
else {
newFileName.append(".cpp");
}
//remove parent directory of file from path for internal name. This could surely be done in a safer way
boost::filesystem::path subPath(filePath.generic_string().substr(parentDir.generic_string().size() + 1));
//add a ":/" before the name to mark internal resources (Yes. Hello Qt!)
temp.internalName = ":/" + subPath.generic_string();
//add subdir below parent path to name to enable multiple files with the same name
std::string subDirString(subPath.remove_filename().generic_string());
if (!subDirString.empty()) {
//replace dir separators by underscores
std::replace(subDirString.begin(), subDirString.end(), '/', '_');
//add in front of file name
newFileName = subDirString + "_" + newFileName;
}
//build new output file name
temp.outPath = outPath / newFileName;
if (beVerbose) {
std::cout << "Internal name will be \"" << temp.internalName << "\"" << std::endl;
std::cout << "Output path is " << temp.outPath << std::endl;
}
//get file size
try {
temp.size = (size_t)boost::filesystem::file_size(filePath);
if (beVerbose) {
std::cout << "Size is " << temp.size << " bytes." << std::endl;
}
}
catch(...) {
std::cout << "Error: Failed to get size of " << filePath << "!" << std::endl;
temp.size = 0;
}
//add file to list
files.push_back(temp);
}
}
//does the user want subdirectories?
if (recurse) {
//iterate through source directory again searching for directories
for (boost::filesystem::directory_iterator dirIt(inPath); dirIt != dirEnd; ++dirIt) {
boost::filesystem::path dirPath = (*dirIt).path();
if (boost::filesystem::is_directory(dirPath)) {
if (beVerbose) {
std::cout << "Found subdirectory " << dirPath << std::endl;
}
//subdirectory found. recurse.
std::vector<FileData> subFiles = getFileDataFrom(dirPath, outPath, parentDir, recurse);
//add returned result to file list
files.insert(files.end(), subFiles.cbegin(), subFiles.cend());
}
}
}
//return result
return files;
}
bool convertFile(FileData & fileData, const boost::filesystem::path & commonHeaderPath, std::ofstream & outStream = badOfStream, bool addHeader = true)
{
if (boost::filesystem::exists(fileData.inPath)) {
//try to open the input file
std::ifstream inStream;
inStream.open(fileData.inPath.string(), std::ifstream::in | std::ifstream::binary);
if (inStream.is_open() && inStream.good()) {
if (beVerbose) {
std::cout << "Converting input file " << fileData.inPath;
}
//try getting size of data
inStream.seekg(0, std::ios::end);
fileData.size = (size_t)inStream.tellg();
inStream.seekg(0);
//check if the caller passed and output stream and use that
bool closeOutStream = false;
if (!outStream.is_open() || !outStream.good()) {
if (!fileData.outPath.empty()) {
//try opening the output stream. truncate it when it exists
outStream.open(fileData.outPath.string(), std::ofstream::out | std::ofstream::trunc);
}
else {
std::cout << "Error: No output stream passed, but output path for \"" << fileData.inPath.filename().string() << "\" is empty! Skipping." << std::endl;
return false;
}
closeOutStream = true;
}
//now write to stream
if (outStream.is_open() && outStream.good()) {
//check if caller want to add a header
if (addHeader) {
//add message
outStream << "//this file was auto-generated from \"" << fileData.inPath.filename().string() << "\" by res2h" << std::endl << std::endl;
//add header include
if (!commonHeaderPath.empty()) {
//common header path must be relative to destination directory
boost::filesystem::path relativeHeaderPath = naiveUncomplete(commonHeaderPath, fileData.outPath);
outStream << "#include \"" << relativeHeaderPath.generic_string() << "\"" << std::endl << std::endl;
}
}
//create names for variables
fileData.dataVariableName = fileData.outPath.filename().stem().string() + "_data";
fileData.sizeVariableName = fileData.outPath.filename().stem().string() + "_size";
//add size and data variable
outStream << "const size_t " << fileData.sizeVariableName << " = " << std::dec << fileData.size << ";" << std::endl;
outStream << "const unsigned char " << fileData.dataVariableName << "[" << std::dec << fileData.size << "] = {" << std::endl;
outStream << " "; //first indent
//now add content
size_t breakCounter = 0;
while (!inStream.eof()) {
//read byte from source
unsigned char dataByte;
inStream.read((char *)&dataByte, 1);
//check if we have actually read something
if (inStream.gcount() != 1 || inStream.eof()) {
//we failed to read. break the read loop and close the file.
break;
}
//write to destination in hex with a width of 2 and '0' as padding
//we do not use showbase as it doesn't work with zero values
outStream << "0x" << std::setw(2) << std::setfill('0') << std::hex << (unsigned int)dataByte;
//was this the last character?
if (!inStream.eof() && fileData.size > (size_t)inStream.tellg()) {
//no. add comma.
outStream << ",";
//add break after 10 bytes and add indent again
if (++breakCounter % 10 == 0) {
outStream << std::endl << " ";
}
}
}
//close curly braces
outStream << std::endl << "};" << std::endl << std::endl;
//close files
if (closeOutStream) {
outStream.close();
}
inStream.close();
if (beVerbose) {
std::cout << " - succeeded." << std::endl;
}
return true;
}
else {
std::cout << "Error: Failed to open file \"" << fileData.outPath.string() << "\" for writing!" << std::endl;
return false;
}
}
else {
std::cout << "Error: Failed to open file \"" << fileData.inPath.string() << "\" for reading!" << std::endl;
return false;
}
}
else {
std::cout << "Error: File \"" << fileData.inPath.string() << "\" does not exist!" << std::endl;
}
return false;
}
bool createCommonHeader(const std::vector<FileData> & fileList, const boost::filesystem::path & commonHeaderPath, bool addUtilityFunctions = false, bool useCConstructs = false)
{
//try opening the output file. truncate it when it exists
std::ofstream outStream;
outStream.open(commonHeaderPath.generic_string(), std::ofstream::out | std::ofstream::trunc);
if (outStream.is_open() && outStream.good()) {
if (beVerbose) {
std::cout << std::endl << "Creating common header " << commonHeaderPath;
}
//add message
outStream << "//this file was auto-generated by res2h" << std::endl << std::endl;
//add #pragma to only include once
outStream << "#pragma once" << std::endl << std::endl;
//add includes for C++
if (!useCConstructs) {
outStream << "#include <string>" << std::endl;
if (addUtilityFunctions) {
outStream << "#include <map>" << std::endl;
}
outStream << std::endl;
}
//add all files
for (auto fdIt = fileList.cbegin(); fdIt != fileList.cend(); ++fdIt) {
//add size and data variable
outStream << "extern const size_t " << fdIt->sizeVariableName << ";" << std::endl;
outStream << "extern const unsigned char " << fdIt->dataVariableName << "[];" << std::endl << std::endl;
}
//if we want utilities, add array
if (addUtilityFunctions) {
//add resource struct
outStream << "struct Res2hEntry {" << std::endl;
if (useCConstructs) {
outStream << " const char * relativeFileName;" << std::endl;
}
else {
outStream << " const std::string relativeFileName;" << std::endl;
}
outStream << " const size_t size;" << std::endl;
outStream << " const unsigned char * data;" << std::endl;
outStream << "};" << std::endl << std::endl;
//add list holding files
outStream << "extern const size_t res2hNrOfFiles;" << std::endl;
outStream << "extern const Res2hEntry res2hFiles[];" << std::endl << std::endl;
if (!useCConstructs) {
//add additional std::map if C++
outStream << "typedef const std::map<const std::string, const Res2hEntry> res2hMapType;" << std::endl;
outStream << "extern res2hMapType res2hMap;" << std::endl;
}
}
//close file
outStream.close();
if (beVerbose) {
std::cout << " - succeeded." << std::endl;
}
return true;
}
else {
std::cout << "Error: Failed to open file \"" << commonHeaderPath << "\" for writing!" << std::endl;
}
return true;
}
bool createUtilities(std::vector<FileData> & fileList, const boost::filesystem::path & utilitiesPath, const boost::filesystem::path & commonHeaderPath, bool useCConstructs = false, bool addFileData = false)
{
//try opening the output file. truncate it when it exists
std::ofstream outStream;
outStream.open(utilitiesPath.generic_string(), std::ofstream::out | std::ofstream::trunc);
if (outStream.is_open() && outStream.good()) {
if (beVerbose) {
std::cout << std::endl << "Creating utilities file " << utilitiesPath;
}
//add message
outStream << "//this file was auto-generated by res2h" << std::endl << std::endl;
//create path to include file RELATIVE to this file
boost::filesystem::path relativePath = naiveUncomplete(commonHeaderPath, utilitiesPath);
//include header file
outStream << "#include \"" << relativePath.string() << "\"" << std::endl << std::endl;
//if the data should go to this file too, add it
if (addFileData) {
for (auto fdIt = fileList.begin(); fdIt != fileList.cend(); ++fdIt) {
if (!convertFile(*fdIt, commonHeaderFilePath, outStream, false)) {
std::cout << "Error: Failed to convert all files. Aborting!" << std::endl;
outStream.close();
return false;
}
}
}
//begin data arrays. switch depending wether C or C++
outStream << "const size_t res2hNrOfFiles = " << fileList.size() << ";" << std::endl;
//add files
outStream << "const Res2hEntry res2hFiles[res2hNrOfFiles] = {" << std::endl;
outStream << " "; //first indent
for (auto fdIt = fileList.cbegin(); fdIt != fileList.cend();) {
outStream << "{\"" << fdIt->internalName << "\", " << fdIt->sizeVariableName << ", " << fdIt->dataVariableName << "}";
//was this the last entry?
++fdIt;
if (fdIt != fileList.cend()) {
//no. add comma.
outStream << ",";
//add break after every entry and add indent again
outStream << std::endl << " ";
}
}
outStream << std::endl << "};" << std::endl;
if (!useCConstructs) {
//add files to map
outStream << std::endl << "res2hMapType::value_type mapTemp[] = {" << std::endl;
outStream << " ";
for (auto fdIt = fileList.cbegin(); fdIt != fileList.cend();) {
outStream << "std::make_pair(\"" << fdIt->internalName << "\", res2hFiles[" << (fdIt - fileList.cbegin()) << "])";
//was this the last entry?
++fdIt;
if (fdIt != fileList.cend()) {
//no. add comma.
outStream << ",";
//add break after every entry and add indent again
outStream << std::endl << " ";
}
}
outStream << std::endl << "};" << std::endl << std::endl;
//create map
outStream << "res2hMapType res2hMap(mapTemp, mapTemp + sizeof mapTemp / sizeof mapTemp[0]);" << std::endl;
}
//close file
outStream.close();
if (beVerbose) {
std::cout << " - succeeded." << std::endl;
}
return true;
}
else {
std::cout << "Error: Failed to open file \"" << utilitiesPath << "\" for writing!" << std::endl;
}
return true;
}
//Blob file format:
//Offset | Type | Description
//---------------+----------+-------------------------------------------
//START | char[8] | magic number string "res2hbin"
//08 | uint32_t | file format version number (currently 1)
//12 | uint32_t | format flags or other crap for file (currently 0)
//16 | uint32_t | size of whole archive including checksum in bytes
//20 | uint32_t | number of directory and file entries following
//Then follows the directory:
//24 + 00 | uint32_t | file entry #0, size of internal name INCLUDING null-terminating character
//24 + 04 | char[] | file entry #0, internal name (null-terminated)
//24 + 04 + name | uint32_t | file entry #0, format flags for entry (currently 0)
//24 + 08 + name | uint32_t | file entry #0, size of data
//24 + 12 + name | uint32_t | file entry #0, absolute offset of data in file
//24 + 16 + name | uint32_t | file entry #0, Adler-32 (RFC1950) checksum of data
//Then follow the other directory entries.
//Directly after the directory the data blocks begin.
//END - 04 | uint32_t | Adler-32 (RFC1950) checksum of whole file up to this point
//Obviously this limits you to ~4GB for the whole binary file and ~4GB per data entry. Go cry about it...
//There is some redundant information here, but that's for reading stuff faster.
//Also the version and dummy fields might be needed in later versions...
bool createBlob(const std::vector<FileData> & fileList, const boost::filesystem::path & filePath)
{
//try opening the output file. truncate it when it exists
std::fstream outStream;
outStream.open(filePath.string(), std::ofstream::out | std::ofstream::binary | std::ofstream::trunc);
if (outStream.is_open() && outStream.good()) {
if (beVerbose) {
std::cout << std::endl << "Creating binary archive " << filePath << std::endl;
}
//add magic number
const unsigned char magicBytes[9] = RES2H_MAGIC_BYTES;
outStream.write(reinterpret_cast<const char *>(&magicBytes), sizeof(magicBytes) - 1);
//add version and format flag
const uint32_t fileVersion = RES2H_ARCHIVE_VERSION;
const uint32_t fileFlags = 0;
outStream.write(reinterpret_cast<const char *>(&fileVersion), sizeof(uint32_t));
outStream.write(reinterpret_cast<const char *>(&fileFlags), sizeof(uint32_t));
//add dummy archive size
uint32_t archiveSize = 0;
outStream.write(reinterpret_cast<const char *>(&archiveSize), sizeof(uint32_t));
//add number of directory entries
const uint32_t nrOfEntries = fileList.size();
outStream.write(reinterpret_cast<const char *>(&nrOfEntries), sizeof(uint32_t));
//skip through files calculating data start offset behind directory
size_t dataStart = RES2H_OFFSET_DIR_START;
for (auto fdIt = fileList.cbegin(); fdIt != fileList.cend(); ++fdIt) {
//calculate size of entry and to entry start adress
dataStart += 20 + fdIt->internalName.size() + 1;
}
//add directory for all files
for (auto fdIt = fileList.cbegin(); fdIt != fileList.cend(); ++fdIt) {
//add size of name
const uint32_t nameSize = fdIt->internalName.size() + 1;
outStream.write(reinterpret_cast<const char *>(&nameSize), sizeof(uint32_t));
//add name and null-termination
outStream << fdIt->internalName << '\0';
//add flags
const uint32_t entryFlags = 0;
outStream.write(reinterpret_cast<const char *>(&entryFlags), sizeof(uint32_t));
//add data size
outStream.write(reinterpret_cast<const char *>(&fdIt->size), sizeof(uint32_t));
//add offset from file start to start of data
outStream.write(reinterpret_cast<const char *>(&dataStart), sizeof(uint32_t));
//add checksum of data
const uint32_t checksum = calculateAdler32(fdIt->inPath.string());
outStream.write(reinterpret_cast<const char *>(&checksum), sizeof(uint32_t));
if (beVerbose) {
std::cout << "Creating directory entry for \"" << fdIt->internalName << "\"" << std::endl;
std::cout << "Size is " << fdIt->size << " bytes." << std::endl;
std::cout << "Data starts at " << std::hex << std::showbase << dataStart << std::endl;
std::cout << "Adler-32 checksum is " << std::hex << std::showbase << checksum << std::endl;
}
//now add size of this entrys data to start offset for next data block
dataStart += fdIt->size;
}
//add data for all files
for (auto fdIt = fileList.cbegin(); fdIt != fileList.cend(); ++fdIt) {
//try to open file
std::ifstream inStream;
inStream.open(fdIt->inPath.string(), std::ifstream::in | std::ifstream::binary);
if (inStream.is_open() && inStream.good()) {
if (beVerbose) {
std::cout << "Adding data for \"" << fdIt->internalName << "\"" << std::endl;
}
std::streamsize overallDataSize = 0;
//copy data from input to output file
while (!inStream.eof() && inStream.good()) {
unsigned char buffer[1024];
std::streamsize readSize = sizeof(buffer);
try {
//try reading data from input file
inStream.read(reinterpret_cast<char *>(&buffer), sizeof(buffer));
}
catch (std::ios_base::failure) { /*ignore read failure. salvage what we can.*/ }
//store how many bytes were actually read
readSize = inStream.gcount();
//write to output file
outStream.write(reinterpret_cast<const char *>(&buffer), readSize);
//increate size of overall data read
overallDataSize += readSize;
}
//close input file
inStream.close();
//check if the file was completely read
if (overallDataSize != fdIt->size) {
std::cout << "Error: Failed to completely copy file \"" << fdIt->inPath.string() << "\" to binary data!" << std::endl;
outStream.close();
return false;
}
}
else {
std::cout << "Error: Failed to open file \"" << fdIt->inPath.string() << "\" for reading!" << std::endl;
outStream.close();
return false;
}
}
//final archive size is current size + checksum. write size to the header now
archiveSize = (uint32_t)outStream.tellg() + sizeof(uint32_t);
outStream.seekg(RES2H_OFFSET_ARCHIVE_SIZE);
outStream.write(reinterpret_cast<const char *>(&archiveSize), sizeof(uint32_t));
//close file
outStream.close();
if (beVerbose) {
std::cout << "Binary archive creation succeeded." << std::endl;
}
//calculate checksum of whole file
const uint32_t adler32 = calculateAdler32(filePath.string());
//open file again, move to end of file and append checksum
outStream.open(filePath.string(), std::ofstream::out | std::ofstream::binary | std::ofstream::app);
if (outStream.is_open() && outStream.good()) {
outStream.seekg(0, std::ios::end);
outStream.write(reinterpret_cast<const char *>(&adler32), sizeof(uint32_t));
//close file
outStream.close();
}
else {
std::cout << "Error: Failed to open file \"" << filePath.string() << "\" for writing!" << std::endl;
return false;
}
if (beVerbose) {
std::cout << "Archive checksum is " << std::hex << std::showbase << adler32 << "." << std::endl;
}
return true;
}
else {
std::cout << "Error: Failed to open file \"" << filePath.string() << "\" for writing!" << std::endl;
return false;
}
return false;
}
bool appendAtoB(const boost::filesystem::path & destinationPath, const boost::filesystem::path & sourcePath)
{
//try opening the output file.
std::fstream outStream;
outStream.open(destinationPath.string(), std::ofstream::out | std::ofstream::binary | std::ofstream::app);
if (outStream.is_open() && outStream.good()) {
if (beVerbose) {
std::cout << std::endl << "Opened output file " << destinationPath << std::endl;
}
//seek to the end
outStream.seekg(0, std::ios::end);
//open input file
std::ifstream inStream;
inStream.open(sourcePath.string(), std::ifstream::in | std::ifstream::binary);
if (inStream.is_open() && inStream.good()) {
if (beVerbose) {
std::cout << "Opened input file \"" << sourcePath << "\". Appending data to output." << std::endl;
}
//copy data from input to output file
while (!inStream.eof() && inStream.good()) {
unsigned char buffer[1024];
std::streamsize readSize = sizeof(buffer);
try {
//try reading data from input file
inStream.read(reinterpret_cast<char *>(&buffer), sizeof(buffer));
}
catch (std::ios_base::failure) { /*ignore read failure. salvage what we can.*/ }
//store how many bytes were actually read
readSize = inStream.gcount();
//write to output file
outStream.write(reinterpret_cast<const char *>(&buffer), readSize);
}
//close input file
inStream.close();
}
else {
std::cout << "Error: Failed to open input file \"" << sourcePath.string() << "\" for reading!" << std::endl;
outStream.close();
return false;
}
//close output file
outStream.close();
return true;
}
else {
std::cout << "Error: Failed to open output file \"" << destinationPath.string() << "\" for writing!" << std::endl;
}
return false;
}
//-----------------------------------------------------------------------------
int main(int argc, const char * argv[])
{
printVersion();
//check number of arguments and if all arguments can be read
if(argc < 3 || !readArguments(argc, argv)) {
printUsage();
return -1;
}
//check if the input path exist
if (!boost::filesystem::exists(inFilePath)) {
std::cout << "Error: Invalid input file/directory \"" << inFilePath.string() << "\"!" << std::endl;
return -2;
}
if (createBinary) {
//check if argument 2 is a file
if (boost::filesystem::is_directory(outFilePath)) {
std::cout << "Error: Output must be a file if -b is used!" << std::endl;
return -2;
}
}
else if (appendFile) {
//check if argument 2 is a file
if (boost::filesystem::is_directory(outFilePath)) {
std::cout << "Error: Output must be a file if -a is used!" << std::endl;
return -2;
}
}
else if (boost::filesystem::is_directory(inFilePath) != boost::filesystem::is_directory(outFilePath)) {
//check if output directory exists
if (boost::filesystem::is_directory(outFilePath) && !boost::filesystem::exists(outFilePath)) {
std::cout << "Error: Invalid output directory \"" << outFilePath.string() << "\"!" << std::endl;
return -2;
}
//check if arguments 1 and 2 are both files or both directories
std::cout << "Error: Input and output file must be both either a file or a directory!" << std::endl;
return -2;
}
if (appendFile) {
//append file a to b
if (!appendAtoB(outFilePath, inFilePath)) {
std::cout << "Error: Failed to append data to executable!" << std::endl;
return -3;
}
}
else {
//build list of files to process
std::vector<FileData> fileList;
if (boost::filesystem::is_directory(inFilePath) && boost::filesystem::is_directory(inFilePath)) {
//both files are directories, build file ist
fileList = getFileDataFrom(inFilePath, outFilePath, inFilePath, useRecursion);
if (fileList.empty()) {
std::cout << "Error: No files to convert!" << std::endl;
return -3;
}
}
else {
//just add single input/output file
FileData temp;
temp.inPath = inFilePath;
temp.outPath = outFilePath;
temp.internalName = inFilePath.filename().string(); //remove all, but the file name and extension
if (beVerbose) {
std::cout << "Found input file " << inFilePath << std::endl;
std::cout << "Internal name will be \"" << temp.internalName << "\"" << std::endl;
std::cout << "Output path is " << temp.outPath << std::endl;
}
//get file size
try {
temp.size = (size_t)boost::filesystem::file_size(inFilePath);
if (beVerbose) {
std::cout << "Size is " << temp.size << " bytes." << std::endl;
}
}
catch(...) {
std::cout << "Error: Failed to get size of " << inFilePath << "!" << std::endl;
temp.size = 0;
}
fileList.push_back(temp);
}
//does the user want an binary file?
if (createBinary) {
//yes. build it.
if (!createBlob(fileList, outFilePath)) {
std::cout << "Error: Failed to convert to binary file!" << std::endl;
return -4;
}
}
else {
//no. convert files to .c/.cpp. loop through list, converting files
for (auto fdIt = fileList.begin(); fdIt != fileList.cend(); ++fdIt) {
if (!convertFile(*fdIt, commonHeaderFilePath)) {
std::cout << "Error: Failed to convert all files. Aborting!" << std::endl;
return -4;
}
}
//do we need to write a header file?
if (!commonHeaderFilePath.empty()) {
if (!createCommonHeader(fileList, commonHeaderFilePath, !utilitiesFilePath.empty(), useC)) {
return -5;
}
//do we need to create utilities?
if (!utilitiesFilePath.empty()) {
if (!createUtilities(fileList, utilitiesFilePath, commonHeaderFilePath, useC, combineResults)) {
return -6;
}
}
}
}
} //if (!appendFile) {
//profit!!!
std::cout << "res2h succeeded." << std::endl;
return 0;
}
| {"hexsha": "69fae17903c065ca3ede622d5ff2a554d803d064", "size": 36469, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "data/res2h-master/src/res2h.cpp", "max_stars_repo_name": "tlanks/esbusyness", "max_stars_repo_head_hexsha": "41ed9e6b552585476c81f2f89b9e3d539c54d4ab", "max_stars_repo_licenses": ["Apache-2.0", "MIT"], "max_stars_count": 10.0, "max_stars_repo_stars_event_min_datetime": "2016-08-08T17:50:18.000Z", "max_stars_repo_stars_event_max_datetime": "2019-03-08T02:54:13.000Z", "max_issues_repo_path": "data/res2h-master/src/res2h.cpp", "max_issues_repo_name": "tlanks/esbusyness", "max_issues_repo_head_hexsha": "41ed9e6b552585476c81f2f89b9e3d539c54d4ab", "max_issues_repo_licenses": ["Apache-2.0", "MIT"], "max_issues_count": 7.0, "max_issues_repo_issues_event_min_datetime": "2016-08-10T03:07:30.000Z", "max_issues_repo_issues_event_max_datetime": "2018-10-10T15:24:40.000Z", "max_forks_repo_path": "data/res2h-master/src/res2h.cpp", "max_forks_repo_name": "tlanks/esbusyness", "max_forks_repo_head_hexsha": "41ed9e6b552585476c81f2f89b9e3d539c54d4ab", "max_forks_repo_licenses": ["Apache-2.0", "MIT"], "max_forks_count": 16.0, "max_forks_repo_forks_event_min_datetime": "2016-08-09T02:11:02.000Z", "max_forks_repo_forks_event_max_datetime": "2019-09-24T10:17:56.000Z", "avg_line_length": 40.2527593819, "max_line_length": 216, "alphanum_fraction": 0.598151855, "num_tokens": 9377} |
\section{Discussion}\label{section:discussion}
We have introduced relative suffix trees (\RCST), a new kind of compressed suffix tree for repetitive sequence collections. Our \RCST{} compresses the suffix tree of an individual sequence relative to the suffix tree of a reference sequence. It combines an already known relative suffix array with a novel relative-compressed longest common prefix representation (\RLCP). When the sequences are similar enough (e.g., two human genomes), the \RCST{} requires about 3 bits per symbol on each target sequence. This is close to the space used by the most space-efficient compressed suffix trees designed to store repetitive collections in a single tree, but the \RCST{} provides a different functionality as it indexes each sequence individually. The \RCST{} supports query and navigation operations within a few microseconds, which is competitive with the largest and fastest compressed suffix trees.
The size of \RCST{} is proportional to the amount of sequence that is present either in the reference or in the target, but not both. This is unusual for relative compression, where any additional material in the reference is generally harmless. Sorting the suffixes in lexicographic tends to distribute the additional suffixes all over the suffix array, creating many mismatches between the suffix-based structures of the reference and the target. For example, the 60~million suffixes from chromosome~Y created 34~million new phrases in the RLZ parse of the \DLCP{} array of a female genome, doubling the size of the \RLCP{} array. Having multiple references (e.g.~male and female) can hence be worthwhile when building relative data structures for many target sequences.
While our \RCST{} implementation provides competitive time/space trade-offs, there is still much room for improvement. Most importantly, some of the construction algorithms require significant amounts of time and memory. In many places, we have chosen simple and fast implementation options, even though there could be alternatives that require significantly less space without being too much slower.
Our \RCST{} is a relative version of the \CSTnpr. Another alternative for future work is a relative \CSTsada, using \RLZ{} compressed bitvectors for suffix tree topology and \PLCP. %Based on our preliminary experiments, the main obstacle is the compression of phrase pointers. Relative pointers work well when most differences between the reference and the target are single-character substitutions. As suffix sorting multiplies the differences and transforms substitutions into insertions and deletions, we need new compression schemes for the pointers.
| {"hexsha": "6ca296c09b4e58f46fa017390b2bc475b10b2d42", "size": 2681, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "rcst/concl.tex", "max_stars_repo_name": "jltsiren/relative-fm", "max_stars_repo_head_hexsha": "68c11f172fd2a546792aad3ad81ee1e185b5ee7f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 16, "max_stars_repo_stars_event_min_datetime": "2015-04-29T11:18:01.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-21T20:32:08.000Z", "max_issues_repo_path": "rcst/concl.tex", "max_issues_repo_name": "jltsiren/relative-fm", "max_issues_repo_head_hexsha": "68c11f172fd2a546792aad3ad81ee1e185b5ee7f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "rcst/concl.tex", "max_forks_repo_name": "jltsiren/relative-fm", "max_forks_repo_head_hexsha": "68c11f172fd2a546792aad3ad81ee1e185b5ee7f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2015-12-06T20:49:38.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-14T10:33:01.000Z", "avg_line_length": 206.2307692308, "max_line_length": 897, "alphanum_fraction": 0.8146214099, "num_tokens": 519} |
import os
import numpy.linalg as la
import numpy as np
from skimage.draw import line_nd
from os.path import join, expanduser
from dipy.io import read_bvals_bvecs
from dipy.io.image import load_nifti, save_nifti
rel_path = '~/.dnn/datasets/synth'
name = 'synth'
def process_movement():
bvals, bvecs = load_bvals_bvecs()
img, affine = load_image_from_nifti()
mov = get_movement_estimates(img, bvecs)
save_mov_image(mov, affine, name)
def load_image_from_numpy():
path = os.path.expanduser(rel_path)
url = os.path.join(path, name + '.npz')
img_dict = np.load(url, allow_pickle=True)
return img_dict['img']
def load_image_from_nifti():
base_path = expanduser(rel_path)
digit_hardi_url = join(base_path, name + '.nii.gz')
img, affine = load_nifti(digit_hardi_url)
return img, affine
def load_bvals_bvecs():
path = os.path.expanduser(rel_path)
bvals_url = join(path, 'bvals')
bvecs_url = join(path, 'bvecs')
bvals, bvecs = read_bvals_bvecs(bvals_url, bvecs_url)
return bvals, bvecs
def save_mov_image(mov, affine, name):
path = os.path.expanduser(rel_path)
if not os.path.isdir(path):
os.makedirs(path)
# np.savez(os.path.join(path, name + '_mov'), mov=mov)
save_nifti(os.path.join(path, name + '_mov.nii.gz'), mov, affine)
def mov_img(img, direction):
mov = np.zeros_like(img)
dims = img.shape
for i in range(dims[0]):
for j in range(dims[1]):
for k in range(dims[2]):
mov_ijk = movement(img, (i, j, k), direction, radius=10, eps=0.01)
mov[i, j, k] = mov_ijk
return mov
def movement(img, center, direction, radius=10, eps=0.01, min_val=1e-9):
center_value = img[center[0], center[1], center[2]]
mov = 0
if abs(center_value) > min_val:
coords = get_points_bidirectional(center, direction, radius, img.shape)
z = img[coords[0], coords[1], coords[2]]
if len(z) > 1:
deltas = np.abs(z[0] - z[1:]) + eps
variation = (1 / (len(z) - 1)) * np.sum(deltas)
mov = center_value / variation
return mov
def get_movement_estimates(img, bvecs, max_bvecs=None):
bvec_list = bvecs.tolist()[:max_bvecs]
movs = []
for k, direction in enumerate(bvec_list):
print(f'direction {k +1} of {len(bvec_list)}')
mov_for_direction = mov_img(img, direction)
movs.append(mov_for_direction)
mov = np.transpose(np.array(movs), (1, 2, 3, 0))
return mov
if __name__ == '__main__':
process_movement()
| {"hexsha": "cf71e616c93f24230b80d9ff351ad76474645a18", "size": 2586, "ext": "py", "lang": "Python", "max_stars_repo_path": "dataset/mnist/dwi/movement.py", "max_stars_repo_name": "cassianobecker/dnn", "max_stars_repo_head_hexsha": "bb2ea04f77733de9df10f795bb049ac3b9d30478", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-02-21T21:35:07.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-29T15:20:00.000Z", "max_issues_repo_path": "dataset/mnist/dwi/movement.py", "max_issues_repo_name": "cassianobecker/dnn", "max_issues_repo_head_hexsha": "bb2ea04f77733de9df10f795bb049ac3b9d30478", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 27, "max_issues_repo_issues_event_min_datetime": "2020-02-20T21:00:23.000Z", "max_issues_repo_issues_event_max_datetime": "2020-05-22T15:23:25.000Z", "max_forks_repo_path": "dataset/mnist/dwi/movement.py", "max_forks_repo_name": "cassianobecker/dnn", "max_forks_repo_head_hexsha": "bb2ea04f77733de9df10f795bb049ac3b9d30478", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.5090909091, "max_line_length": 82, "alphanum_fraction": 0.6415313225, "include": true, "reason": "import numpy", "num_tokens": 743} |
[STATEMENT]
lemma vars_of_instances:
shows "vars_of (subst t \<sigma>)
= \<Union> { V. \<exists>x. (x \<in> (vars_of t)) \<and> (V = vars_of (subst (Var x) \<sigma>)) }"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. vars_of (t \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t \<and> V = vars_of (Var x \<lhd> \<sigma>)}
[PROOF STEP]
proof (induction t)
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. \<And>x. vars_of (Var x \<lhd> \<sigma>) = \<Union> {V. \<exists>xa. xa \<in> vars_of (Var x) \<and> V = vars_of (Var xa \<lhd> \<sigma>)}
2. \<And>x. vars_of (Const x \<lhd> \<sigma>) = \<Union> {V. \<exists>xa. xa \<in> vars_of (Const x) \<and> V = vars_of (Var xa \<lhd> \<sigma>)}
3. \<And>t1 t2. \<lbrakk>vars_of (t1 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t1 \<and> V = vars_of (Var x \<lhd> \<sigma>)}; vars_of (t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t2 \<and> V = vars_of (Var x \<lhd> \<sigma>)}\<rbrakk> \<Longrightarrow> vars_of (t1 \<cdot> t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of (t1 \<cdot> t2) \<and> V = vars_of (Var x \<lhd> \<sigma>)}
[PROOF STEP]
case (Const a)
[PROOF STATE]
proof (state)
this:
goal (3 subgoals):
1. \<And>x. vars_of (Var x \<lhd> \<sigma>) = \<Union> {V. \<exists>xa. xa \<in> vars_of (Var x) \<and> V = vars_of (Var xa \<lhd> \<sigma>)}
2. \<And>x. vars_of (Const x \<lhd> \<sigma>) = \<Union> {V. \<exists>xa. xa \<in> vars_of (Const x) \<and> V = vars_of (Var xa \<lhd> \<sigma>)}
3. \<And>t1 t2. \<lbrakk>vars_of (t1 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t1 \<and> V = vars_of (Var x \<lhd> \<sigma>)}; vars_of (t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t2 \<and> V = vars_of (Var x \<lhd> \<sigma>)}\<rbrakk> \<Longrightarrow> vars_of (t1 \<cdot> t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of (t1 \<cdot> t2) \<and> V = vars_of (Var x \<lhd> \<sigma>)}
[PROOF STEP]
have "vars_of (Const a) = {}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. vars_of (Const a) = {}
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
vars_of (Const a) = {}
goal (3 subgoals):
1. \<And>x. vars_of (Var x \<lhd> \<sigma>) = \<Union> {V. \<exists>xa. xa \<in> vars_of (Var x) \<and> V = vars_of (Var xa \<lhd> \<sigma>)}
2. \<And>x. vars_of (Const x \<lhd> \<sigma>) = \<Union> {V. \<exists>xa. xa \<in> vars_of (Const x) \<and> V = vars_of (Var xa \<lhd> \<sigma>)}
3. \<And>t1 t2. \<lbrakk>vars_of (t1 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t1 \<and> V = vars_of (Var x \<lhd> \<sigma>)}; vars_of (t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t2 \<and> V = vars_of (Var x \<lhd> \<sigma>)}\<rbrakk> \<Longrightarrow> vars_of (t1 \<cdot> t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of (t1 \<cdot> t2) \<and> V = vars_of (Var x \<lhd> \<sigma>)}
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
vars_of (Const a) = {}
[PROOF STEP]
have rhs_empty: "\<Union> { V. \<exists>x. (x \<in> (vars_of (Const a))) \<and> (V = vars_of (subst (Var x) \<sigma>)) } = {}"
[PROOF STATE]
proof (prove)
using this:
vars_of (Const a) = {}
goal (1 subgoal):
1. \<Union> {V. \<exists>x. x \<in> vars_of (Const a) \<and> V = vars_of (Var x \<lhd> \<sigma>)} = {}
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<Union> {V. \<exists>x. x \<in> vars_of (Const a) \<and> V = vars_of (Var x \<lhd> \<sigma>)} = {}
goal (3 subgoals):
1. \<And>x. vars_of (Var x \<lhd> \<sigma>) = \<Union> {V. \<exists>xa. xa \<in> vars_of (Var x) \<and> V = vars_of (Var xa \<lhd> \<sigma>)}
2. \<And>x. vars_of (Const x \<lhd> \<sigma>) = \<Union> {V. \<exists>xa. xa \<in> vars_of (Const x) \<and> V = vars_of (Var xa \<lhd> \<sigma>)}
3. \<And>t1 t2. \<lbrakk>vars_of (t1 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t1 \<and> V = vars_of (Var x \<lhd> \<sigma>)}; vars_of (t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t2 \<and> V = vars_of (Var x \<lhd> \<sigma>)}\<rbrakk> \<Longrightarrow> vars_of (t1 \<cdot> t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of (t1 \<cdot> t2) \<and> V = vars_of (Var x \<lhd> \<sigma>)}
[PROOF STEP]
have lhs_empty: "(subst (Const a) \<sigma>) = (Const a)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Const a \<lhd> \<sigma> = Const a
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
Const a \<lhd> \<sigma> = Const a
goal (3 subgoals):
1. \<And>x. vars_of (Var x \<lhd> \<sigma>) = \<Union> {V. \<exists>xa. xa \<in> vars_of (Var x) \<and> V = vars_of (Var xa \<lhd> \<sigma>)}
2. \<And>x. vars_of (Const x \<lhd> \<sigma>) = \<Union> {V. \<exists>xa. xa \<in> vars_of (Const x) \<and> V = vars_of (Var xa \<lhd> \<sigma>)}
3. \<And>t1 t2. \<lbrakk>vars_of (t1 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t1 \<and> V = vars_of (Var x \<lhd> \<sigma>)}; vars_of (t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t2 \<and> V = vars_of (Var x \<lhd> \<sigma>)}\<rbrakk> \<Longrightarrow> vars_of (t1 \<cdot> t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of (t1 \<cdot> t2) \<and> V = vars_of (Var x \<lhd> \<sigma>)}
[PROOF STEP]
from rhs_empty and lhs_empty
[PROOF STATE]
proof (chain)
picking this:
\<Union> {V. \<exists>x. x \<in> vars_of (Const a) \<and> V = vars_of (Var x \<lhd> \<sigma>)} = {}
Const a \<lhd> \<sigma> = Const a
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
\<Union> {V. \<exists>x. x \<in> vars_of (Const a) \<and> V = vars_of (Var x \<lhd> \<sigma>)} = {}
Const a \<lhd> \<sigma> = Const a
goal (1 subgoal):
1. vars_of (Const a \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of (Const a) \<and> V = vars_of (Var x \<lhd> \<sigma>)}
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
vars_of (Const a \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of (Const a) \<and> V = vars_of (Var x \<lhd> \<sigma>)}
goal (2 subgoals):
1. \<And>x. vars_of (Var x \<lhd> \<sigma>) = \<Union> {V. \<exists>xa. xa \<in> vars_of (Var x) \<and> V = vars_of (Var xa \<lhd> \<sigma>)}
2. \<And>t1 t2. \<lbrakk>vars_of (t1 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t1 \<and> V = vars_of (Var x \<lhd> \<sigma>)}; vars_of (t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t2 \<and> V = vars_of (Var x \<lhd> \<sigma>)}\<rbrakk> \<Longrightarrow> vars_of (t1 \<cdot> t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of (t1 \<cdot> t2) \<and> V = vars_of (Var x \<lhd> \<sigma>)}
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<And>x. vars_of (Var x \<lhd> \<sigma>) = \<Union> {V. \<exists>xa. xa \<in> vars_of (Var x) \<and> V = vars_of (Var xa \<lhd> \<sigma>)}
2. \<And>t1 t2. \<lbrakk>vars_of (t1 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t1 \<and> V = vars_of (Var x \<lhd> \<sigma>)}; vars_of (t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t2 \<and> V = vars_of (Var x \<lhd> \<sigma>)}\<rbrakk> \<Longrightarrow> vars_of (t1 \<cdot> t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of (t1 \<cdot> t2) \<and> V = vars_of (Var x \<lhd> \<sigma>)}
[PROOF STEP]
case (Var a)
[PROOF STATE]
proof (state)
this:
goal (2 subgoals):
1. \<And>x. vars_of (Var x \<lhd> \<sigma>) = \<Union> {V. \<exists>xa. xa \<in> vars_of (Var x) \<and> V = vars_of (Var xa \<lhd> \<sigma>)}
2. \<And>t1 t2. \<lbrakk>vars_of (t1 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t1 \<and> V = vars_of (Var x \<lhd> \<sigma>)}; vars_of (t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t2 \<and> V = vars_of (Var x \<lhd> \<sigma>)}\<rbrakk> \<Longrightarrow> vars_of (t1 \<cdot> t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of (t1 \<cdot> t2) \<and> V = vars_of (Var x \<lhd> \<sigma>)}
[PROOF STEP]
have "vars_of (Var a) = { a }"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. vars_of (Var a) = {a}
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
vars_of (Var a) = {a}
goal (2 subgoals):
1. \<And>x. vars_of (Var x \<lhd> \<sigma>) = \<Union> {V. \<exists>xa. xa \<in> vars_of (Var x) \<and> V = vars_of (Var xa \<lhd> \<sigma>)}
2. \<And>t1 t2. \<lbrakk>vars_of (t1 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t1 \<and> V = vars_of (Var x \<lhd> \<sigma>)}; vars_of (t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t2 \<and> V = vars_of (Var x \<lhd> \<sigma>)}\<rbrakk> \<Longrightarrow> vars_of (t1 \<cdot> t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of (t1 \<cdot> t2) \<and> V = vars_of (Var x \<lhd> \<sigma>)}
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
vars_of (Var a) = {a}
[PROOF STEP]
have rhs: "\<Union> { V. \<exists>x. (x \<in> (vars_of (Var a))) \<and> (V = vars_of (subst (Var x) \<sigma>)) } =
vars_of (subst (Var a) \<sigma>)"
[PROOF STATE]
proof (prove)
using this:
vars_of (Var a) = {a}
goal (1 subgoal):
1. \<Union> {V. \<exists>x. x \<in> vars_of (Var a) \<and> V = vars_of (Var x \<lhd> \<sigma>)} = vars_of (Var a \<lhd> \<sigma>)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<Union> {V. \<exists>x. x \<in> vars_of (Var a) \<and> V = vars_of (Var x \<lhd> \<sigma>)} = vars_of (Var a \<lhd> \<sigma>)
goal (2 subgoals):
1. \<And>x. vars_of (Var x \<lhd> \<sigma>) = \<Union> {V. \<exists>xa. xa \<in> vars_of (Var x) \<and> V = vars_of (Var xa \<lhd> \<sigma>)}
2. \<And>t1 t2. \<lbrakk>vars_of (t1 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t1 \<and> V = vars_of (Var x \<lhd> \<sigma>)}; vars_of (t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t2 \<and> V = vars_of (Var x \<lhd> \<sigma>)}\<rbrakk> \<Longrightarrow> vars_of (t1 \<cdot> t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of (t1 \<cdot> t2) \<and> V = vars_of (Var x \<lhd> \<sigma>)}
[PROOF STEP]
have lhs: "(subst (Var a) \<sigma>) = (subst (Var a) \<sigma>)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Var a \<lhd> \<sigma> = Var a \<lhd> \<sigma>
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
Var a \<lhd> \<sigma> = Var a \<lhd> \<sigma>
goal (2 subgoals):
1. \<And>x. vars_of (Var x \<lhd> \<sigma>) = \<Union> {V. \<exists>xa. xa \<in> vars_of (Var x) \<and> V = vars_of (Var xa \<lhd> \<sigma>)}
2. \<And>t1 t2. \<lbrakk>vars_of (t1 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t1 \<and> V = vars_of (Var x \<lhd> \<sigma>)}; vars_of (t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t2 \<and> V = vars_of (Var x \<lhd> \<sigma>)}\<rbrakk> \<Longrightarrow> vars_of (t1 \<cdot> t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of (t1 \<cdot> t2) \<and> V = vars_of (Var x \<lhd> \<sigma>)}
[PROOF STEP]
from rhs and lhs
[PROOF STATE]
proof (chain)
picking this:
\<Union> {V. \<exists>x. x \<in> vars_of (Var a) \<and> V = vars_of (Var x \<lhd> \<sigma>)} = vars_of (Var a \<lhd> \<sigma>)
Var a \<lhd> \<sigma> = Var a \<lhd> \<sigma>
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
\<Union> {V. \<exists>x. x \<in> vars_of (Var a) \<and> V = vars_of (Var x \<lhd> \<sigma>)} = vars_of (Var a \<lhd> \<sigma>)
Var a \<lhd> \<sigma> = Var a \<lhd> \<sigma>
goal (1 subgoal):
1. vars_of (Var a \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of (Var a) \<and> V = vars_of (Var x \<lhd> \<sigma>)}
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
vars_of (Var a \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of (Var a) \<and> V = vars_of (Var x \<lhd> \<sigma>)}
goal (1 subgoal):
1. \<And>t1 t2. \<lbrakk>vars_of (t1 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t1 \<and> V = vars_of (Var x \<lhd> \<sigma>)}; vars_of (t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t2 \<and> V = vars_of (Var x \<lhd> \<sigma>)}\<rbrakk> \<Longrightarrow> vars_of (t1 \<cdot> t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of (t1 \<cdot> t2) \<and> V = vars_of (Var x \<lhd> \<sigma>)}
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>t1 t2. \<lbrakk>vars_of (t1 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t1 \<and> V = vars_of (Var x \<lhd> \<sigma>)}; vars_of (t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t2 \<and> V = vars_of (Var x \<lhd> \<sigma>)}\<rbrakk> \<Longrightarrow> vars_of (t1 \<cdot> t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of (t1 \<cdot> t2) \<and> V = vars_of (Var x \<lhd> \<sigma>)}
[PROOF STEP]
case (Comb t1 t2)
[PROOF STATE]
proof (state)
this:
vars_of (t1 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t1 \<and> V = vars_of (Var x \<lhd> \<sigma>)}
vars_of (t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t2 \<and> V = vars_of (Var x \<lhd> \<sigma>)}
goal (1 subgoal):
1. \<And>t1 t2. \<lbrakk>vars_of (t1 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t1 \<and> V = vars_of (Var x \<lhd> \<sigma>)}; vars_of (t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t2 \<and> V = vars_of (Var x \<lhd> \<sigma>)}\<rbrakk> \<Longrightarrow> vars_of (t1 \<cdot> t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of (t1 \<cdot> t2) \<and> V = vars_of (Var x \<lhd> \<sigma>)}
[PROOF STEP]
have "vars_of (Comb t1 t2) = (vars_of t1) \<union> (vars_of t2)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. vars_of (t1 \<cdot> t2) = vars_of t1 \<union> vars_of t2
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
vars_of (t1 \<cdot> t2) = vars_of t1 \<union> vars_of t2
goal (1 subgoal):
1. \<And>t1 t2. \<lbrakk>vars_of (t1 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t1 \<and> V = vars_of (Var x \<lhd> \<sigma>)}; vars_of (t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t2 \<and> V = vars_of (Var x \<lhd> \<sigma>)}\<rbrakk> \<Longrightarrow> vars_of (t1 \<cdot> t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of (t1 \<cdot> t2) \<and> V = vars_of (Var x \<lhd> \<sigma>)}
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
vars_of (t1 \<cdot> t2) = vars_of t1 \<union> vars_of t2
[PROOF STEP]
have "\<Union> { V. \<exists>x. (x \<in> (vars_of (Comb t1 t2))) \<and> (V = vars_of (subst (Var x) \<sigma>)) }
= \<Union> { V. \<exists>x. (x \<in> (vars_of t1)) \<and> (V = vars_of (subst(Var x) \<sigma>)) }
\<union> \<Union> { V. \<exists>x. (x \<in> (vars_of t2)) \<and> (V = vars_of (subst (Var x) \<sigma>)) }"
[PROOF STATE]
proof (prove)
using this:
vars_of (t1 \<cdot> t2) = vars_of t1 \<union> vars_of t2
goal (1 subgoal):
1. \<Union> {V. \<exists>x. x \<in> vars_of (t1 \<cdot> t2) \<and> V = vars_of (Var x \<lhd> \<sigma>)} = \<Union> {V. \<exists>x. x \<in> vars_of t1 \<and> V = vars_of (Var x \<lhd> \<sigma>)} \<union> \<Union> {V. \<exists>x. x \<in> vars_of t2 \<and> V = vars_of (Var x \<lhd> \<sigma>)}
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<Union> {V. \<exists>x. x \<in> vars_of (t1 \<cdot> t2) \<and> V = vars_of (Var x \<lhd> \<sigma>)} = \<Union> {V. \<exists>x. x \<in> vars_of t1 \<and> V = vars_of (Var x \<lhd> \<sigma>)} \<union> \<Union> {V. \<exists>x. x \<in> vars_of t2 \<and> V = vars_of (Var x \<lhd> \<sigma>)}
goal (1 subgoal):
1. \<And>t1 t2. \<lbrakk>vars_of (t1 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t1 \<and> V = vars_of (Var x \<lhd> \<sigma>)}; vars_of (t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t2 \<and> V = vars_of (Var x \<lhd> \<sigma>)}\<rbrakk> \<Longrightarrow> vars_of (t1 \<cdot> t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of (t1 \<cdot> t2) \<and> V = vars_of (Var x \<lhd> \<sigma>)}
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
\<Union> {V. \<exists>x. x \<in> vars_of (t1 \<cdot> t2) \<and> V = vars_of (Var x \<lhd> \<sigma>)} = \<Union> {V. \<exists>x. x \<in> vars_of t1 \<and> V = vars_of (Var x \<lhd> \<sigma>)} \<union> \<Union> {V. \<exists>x. x \<in> vars_of t2 \<and> V = vars_of (Var x \<lhd> \<sigma>)}
[PROOF STEP]
have rhs: "\<Union> { V. \<exists>x. (x \<in> (vars_of (Comb t1 t2))) \<and> (V = vars_of (subst (Var x) \<sigma>)) }
= (vars_of (subst t1 \<sigma>)) \<union> (vars_of (subst t2 \<sigma>))"
[PROOF STATE]
proof (prove)
using this:
\<Union> {V. \<exists>x. x \<in> vars_of (t1 \<cdot> t2) \<and> V = vars_of (Var x \<lhd> \<sigma>)} = \<Union> {V. \<exists>x. x \<in> vars_of t1 \<and> V = vars_of (Var x \<lhd> \<sigma>)} \<union> \<Union> {V. \<exists>x. x \<in> vars_of t2 \<and> V = vars_of (Var x \<lhd> \<sigma>)}
goal (1 subgoal):
1. \<Union> {V. \<exists>x. x \<in> vars_of (t1 \<cdot> t2) \<and> V = vars_of (Var x \<lhd> \<sigma>)} = vars_of (t1 \<lhd> \<sigma>) \<union> vars_of (t2 \<lhd> \<sigma>)
[PROOF STEP]
using \<open>vars_of (subst t1 \<sigma>)
= \<Union> { V. \<exists>x. (x \<in> (vars_of t1)) \<and> (V = vars_of (subst (Var x) \<sigma>)) }\<close>
and
\<open>vars_of (subst t2 \<sigma>)
= \<Union> { V. \<exists>x. (x \<in> (vars_of t2)) \<and> (V = vars_of (subst (Var x) \<sigma>)) }\<close>
[PROOF STATE]
proof (prove)
using this:
\<Union> {V. \<exists>x. x \<in> vars_of (t1 \<cdot> t2) \<and> V = vars_of (Var x \<lhd> \<sigma>)} = \<Union> {V. \<exists>x. x \<in> vars_of t1 \<and> V = vars_of (Var x \<lhd> \<sigma>)} \<union> \<Union> {V. \<exists>x. x \<in> vars_of t2 \<and> V = vars_of (Var x \<lhd> \<sigma>)}
vars_of (t1 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t1 \<and> V = vars_of (Var x \<lhd> \<sigma>)}
vars_of (t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t2 \<and> V = vars_of (Var x \<lhd> \<sigma>)}
goal (1 subgoal):
1. \<Union> {V. \<exists>x. x \<in> vars_of (t1 \<cdot> t2) \<and> V = vars_of (Var x \<lhd> \<sigma>)} = vars_of (t1 \<lhd> \<sigma>) \<union> vars_of (t2 \<lhd> \<sigma>)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<Union> {V. \<exists>x. x \<in> vars_of (t1 \<cdot> t2) \<and> V = vars_of (Var x \<lhd> \<sigma>)} = vars_of (t1 \<lhd> \<sigma>) \<union> vars_of (t2 \<lhd> \<sigma>)
goal (1 subgoal):
1. \<And>t1 t2. \<lbrakk>vars_of (t1 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t1 \<and> V = vars_of (Var x \<lhd> \<sigma>)}; vars_of (t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t2 \<and> V = vars_of (Var x \<lhd> \<sigma>)}\<rbrakk> \<Longrightarrow> vars_of (t1 \<cdot> t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of (t1 \<cdot> t2) \<and> V = vars_of (Var x \<lhd> \<sigma>)}
[PROOF STEP]
have "(subst (Comb t1 t2) \<sigma>) = (Comb (subst t1 \<sigma>) (subst t2 \<sigma>))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. t1 \<cdot> t2 \<lhd> \<sigma> = (t1 \<lhd> \<sigma>) \<cdot> (t2 \<lhd> \<sigma>)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
t1 \<cdot> t2 \<lhd> \<sigma> = (t1 \<lhd> \<sigma>) \<cdot> (t2 \<lhd> \<sigma>)
goal (1 subgoal):
1. \<And>t1 t2. \<lbrakk>vars_of (t1 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t1 \<and> V = vars_of (Var x \<lhd> \<sigma>)}; vars_of (t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t2 \<and> V = vars_of (Var x \<lhd> \<sigma>)}\<rbrakk> \<Longrightarrow> vars_of (t1 \<cdot> t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of (t1 \<cdot> t2) \<and> V = vars_of (Var x \<lhd> \<sigma>)}
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
t1 \<cdot> t2 \<lhd> \<sigma> = (t1 \<lhd> \<sigma>) \<cdot> (t2 \<lhd> \<sigma>)
[PROOF STEP]
have lhs: "(vars_of (subst (Comb t1 t2) \<sigma>)) =
(vars_of (subst t1 \<sigma>)) \<union> (vars_of (subst t2 \<sigma>))"
[PROOF STATE]
proof (prove)
using this:
t1 \<cdot> t2 \<lhd> \<sigma> = (t1 \<lhd> \<sigma>) \<cdot> (t2 \<lhd> \<sigma>)
goal (1 subgoal):
1. vars_of (t1 \<cdot> t2 \<lhd> \<sigma>) = vars_of (t1 \<lhd> \<sigma>) \<union> vars_of (t2 \<lhd> \<sigma>)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
vars_of (t1 \<cdot> t2 \<lhd> \<sigma>) = vars_of (t1 \<lhd> \<sigma>) \<union> vars_of (t2 \<lhd> \<sigma>)
goal (1 subgoal):
1. \<And>t1 t2. \<lbrakk>vars_of (t1 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t1 \<and> V = vars_of (Var x \<lhd> \<sigma>)}; vars_of (t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of t2 \<and> V = vars_of (Var x \<lhd> \<sigma>)}\<rbrakk> \<Longrightarrow> vars_of (t1 \<cdot> t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of (t1 \<cdot> t2) \<and> V = vars_of (Var x \<lhd> \<sigma>)}
[PROOF STEP]
from lhs and rhs
[PROOF STATE]
proof (chain)
picking this:
vars_of (t1 \<cdot> t2 \<lhd> \<sigma>) = vars_of (t1 \<lhd> \<sigma>) \<union> vars_of (t2 \<lhd> \<sigma>)
\<Union> {V. \<exists>x. x \<in> vars_of (t1 \<cdot> t2) \<and> V = vars_of (Var x \<lhd> \<sigma>)} = vars_of (t1 \<lhd> \<sigma>) \<union> vars_of (t2 \<lhd> \<sigma>)
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
vars_of (t1 \<cdot> t2 \<lhd> \<sigma>) = vars_of (t1 \<lhd> \<sigma>) \<union> vars_of (t2 \<lhd> \<sigma>)
\<Union> {V. \<exists>x. x \<in> vars_of (t1 \<cdot> t2) \<and> V = vars_of (Var x \<lhd> \<sigma>)} = vars_of (t1 \<lhd> \<sigma>) \<union> vars_of (t2 \<lhd> \<sigma>)
goal (1 subgoal):
1. vars_of (t1 \<cdot> t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of (t1 \<cdot> t2) \<and> V = vars_of (Var x \<lhd> \<sigma>)}
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
vars_of (t1 \<cdot> t2 \<lhd> \<sigma>) = \<Union> {V. \<exists>x. x \<in> vars_of (t1 \<cdot> t2) \<and> V = vars_of (Var x \<lhd> \<sigma>)}
goal:
No subgoals!
[PROOF STEP]
qed | {"llama_tokens": 9771, "file": "SuperCalc_terms", "length": 44} |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.neural_network import MLPRegressor
from sklearn.model_selection import LeaveOneGroupOut
from plot_with_PE_imputation import plot_with_PE_imputation
import matplotlib.colors as colors
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scipy.signal import medfilt
#Load Data
data = pd.read_csv('./facies_vectors.csv')
# Parameters
feature_names = ['GR', 'ILD_log10', 'DeltaPHI', 'PHIND', 'PE', 'NM_M', 'RELPOS']
facies_names = ['SS', 'CSiS', 'FSiS', 'SiSh', 'MS', 'WS', 'D', 'PS', 'BS']
facies_colors = ['#F4D03F', '#F5B041','#DC7633','#6E2C00', '#1B4F72','#2E86C1', '#AED6F1', '#A569BD', '#196F3D']
# Store features and labels
# X = data[feature_names].values
# y = data['Facies'].values
# Store well labels and depths
wells = data['Well Name'].values
depth = data['Depth'].values
# Imputation
DataImp_dropNA = data.dropna(axis = 0, inplace = False)
F9idx = DataImp_dropNA[DataImp_dropNA['Well Name'] == 'Recruit F9'].index
DataImp_dropF9 = DataImp_dropNA.drop(F9idx)
wells_noPE = DataImp_dropF9['Well Name'].values
DataImp = DataImp_dropF9.drop(['Formation', 'Well Name', 'Depth'], axis=1).copy()
Ximp=DataImp.loc[:, DataImp.columns != 'PE'].values
Yimp=DataImp.loc[:, 'PE'].values
from sklearn.preprocessing import RobustScaler
scaler = RobustScaler()
scaler.fit(Ximp)
Ximp_scaled = scaler.transform(Ximp)
logo = LeaveOneGroupOut()
from sklearn.metrics import r2_score
from sklearn.metrics import mean_squared_error
## 같은 parameter로 여러번 반복
loop = 10
loop_mse_list = []
loop_R2_list = []
df_loop = pd.DataFrame(columns=["R2","MSE"])
for i in range(loop):
mselist = []
R2list = []
for train, test in logo.split(Ximp_scaled, Yimp, groups=wells_noPE):
well_name = wells_noPE[test[0]]
# Imputation using MLP
reg = MLPRegressor(hidden_layer_sizes=50, max_iter=1000)
reg.fit(Ximp_scaled[train], Yimp[train])
Yimp_predicted = reg.predict(Ximp_scaled[test])
## medfilt
Yimp_predicted = medfilt(Yimp_predicted, kernel_size=5)
R2 = r2_score(Yimp[test], Yimp_predicted)
mse = mean_squared_error(Yimp[test], Yimp_predicted)
print("Well name_test : ", well_name)
print("R2 : %.4f" % R2)
print("mse : %.4f" % mse)
R2list.append(R2)
mselist.append(mse)
# predict_data = data[data['Well Name'] == well_name].copy()
# predict_data["PE_pred"] = Yimp_predicted
#
# plot_with_PE_imputation(predict_data, facies_colors,R2)
average_R2 = np.mean(np.array(R2list))
average_mse = np.mean(np.array(mselist))
print("%i of %i" % (i+1,loop), end=" ")
print("average R2 : %.4f " % average_R2, end=" ")
print("average MSE : %.4f " % average_mse)
loop_mse_list.append(average_mse)
loop_R2_list.append(average_R2)
df_loop.loc["try %i"%(i+1)] = [average_R2, average_mse]
average_R2_loop = np.mean(np.array(loop_R2_list))
average_mse_loop = np.mean(np.array(loop_mse_list))
df_loop.loc["average"] = [average_R2_loop, average_mse_loop]
print(df_loop)
# df_loop.to_excel("MLP_try10.xlsx")
| {"hexsha": "e6bbf3ea06862e0e492948c45271192370b86fdb", "size": 3154, "ext": "py", "lang": "Python", "max_stars_repo_path": "MLP_mean_impute.py", "max_stars_repo_name": "suniipang/PE_Imputation", "max_stars_repo_head_hexsha": "836b9c687883ac87f091785fc17fded6d122be83", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "MLP_mean_impute.py", "max_issues_repo_name": "suniipang/PE_Imputation", "max_issues_repo_head_hexsha": "836b9c687883ac87f091785fc17fded6d122be83", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "MLP_mean_impute.py", "max_forks_repo_name": "suniipang/PE_Imputation", "max_forks_repo_head_hexsha": "836b9c687883ac87f091785fc17fded6d122be83", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.5154639175, "max_line_length": 112, "alphanum_fraction": 0.6940393152, "include": true, "reason": "import numpy,from scipy", "num_tokens": 927} |
[STATEMENT]
lemma ns_mul_ext_bottom: "(A,{#}) \<in> ns_mul_ext ns s"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (A, {#}) \<in> ns_mul_ext ns s
[PROOF STEP]
by (auto intro!: ns_mul_extI) | {"llama_tokens": 92, "file": "Weighted_Path_Order_Multiset_Extension2", "length": 1} |
from policy import LSTMPolicy, MlpPolicyValue
import gym
import gym_compete
import pickle
import sys
import argparse
import tensorflow as tf
import numpy as np
def load_from_file(param_pkl_path):
with open(param_pkl_path, 'rb') as f:
params = pickle.load(f)
return params
def setFromFlat(var_list, flat_params):
shapes = list(map(lambda x: x.get_shape().as_list(), var_list))
total_size = np.sum([int(np.prod(shape)) for shape in shapes])
theta = tf.placeholder(tf.float32, [total_size])
start = 0
assigns = []
for (shape, v) in zip(shapes, var_list):
size = int(np.prod(shape))
assigns.append(tf.assign(v, tf.reshape(theta[start:start + size], shape)))
start += size
op = tf.group(*assigns)
tf.get_default_session().run(op, {theta: flat_params})
def run(config):
if config.env == "kick-and-defend":
env = gym.make("kick-and-defend-v0")
policy_type = "lstm"
elif config.env == "run-to-goal-humans":
env = gym.make("run-to-goal-humans-v0")
policy_type = "mlp"
elif config.env == "run-to-goal-ants":
env = gym.make("run-to-goal-ants-v0")
policy_type = "mlp"
elif config.env == "you-shall-not-pass":
env = gym.make("you-shall-not-pass-humans-v0")
policy_type = "mlp"
elif config.env == "sumo-humans":
env = gym.make("sumo-humans-v0")
policy_type = "lstm"
elif config.env == "sumo-ants":
env = gym.make("sumo-ants-v0")
policy_type = "lstm"
else:
print("unsupported environment")
print("choose from: run-to-goal-humans, run-to-goal-ants, you-shall-not-pass, sumo-humans, sumo-ants, kick-and-defend")
sys.exit()
param_paths = config.param_paths
tf_config = tf.ConfigProto(
inter_op_parallelism_threads=1,
intra_op_parallelism_threads=1)
sess = tf.Session(config=tf_config)
sess.__enter__()
policy = []
for i in range(2):
scope = "policy" + str(i)
if policy_type == "lstm":
policy.append(LSTMPolicy(scope=scope, reuse=False,
ob_space=env.observation_space.spaces[i],
ac_space=env.action_space.spaces[i],
hiddens=[128, 128], normalize=True))
else:
policy.append(MlpPolicyValue(scope=scope, reuse=False,
ob_space=env.observation_space.spaces[i],
ac_space=env.action_space.spaces[i],
hiddens=[64, 64], normalize=True))
# initialize uninitialized variables
sess.run(tf.variables_initializer(tf.global_variables()))
params = [load_from_file(param_pkl_path=path) for path in param_paths]
for i in range(len(policy)):
setFromFlat(policy[i].get_variables(), params[i])
max_episodes = config.max_episodes
num_episodes = 0
nstep = 0
total_reward = [0.0 for _ in range(len(policy))]
total_scores = [0 for _ in range(len(policy))]
# total_scores = np.asarray(total_scores)
observation = env.reset()
print("-"*5 + " Episode %d " % (num_episodes+1) + "-"*5)
while num_episodes < max_episodes:
env.render()
action = tuple([policy[i].act(stochastic=True, observation=observation[i])[0]
for i in range(len(policy))])
observation, reward, done, infos = env.step(action)
nstep += 1
for i in range(len(policy)):
total_reward[i] += reward[i]
if done[0]:
num_episodes += 1
draw = True
for i in range(len(policy)):
if 'winner' in infos[i]:
draw = False
total_scores[i] += 1
print("Winner: Agent {}, Scores: {}, Total Episodes: {}".format(i, total_scores, num_episodes))
if draw:
print("Game Tied: Agent {}, Scores: {}, Total Episodes: {}".format(i, total_scores, num_episodes))
observation = env.reset()
nstep = 0
total_reward = [0.0 for _ in range(len(policy))]
for i in range(len(policy)):
policy[i].reset()
if num_episodes < max_episodes:
print("-"*5 + "Episode %d" % (num_episodes+1) + "-"*5)
if __name__ == "__main__":
p = argparse.ArgumentParser(description="Environments for Multi-agent competition")
p.add_argument("--env", default="sumo-humans", type=str, help="competitive environment: run-to-goal-humans, run-to-goal-ants, you-shall-not-pass, sumo-humans, sumo-ants, kick-and-defend")
p.add_argument("--param-paths", nargs='+', required=True, type=str)
p.add_argument("--max-episodes", default=10, help="max number of matches", type=int)
config = p.parse_args()
run(config)
| {"hexsha": "d2997f9fd0befd133e3a7728c04ecbf4d5053abb", "size": 4910, "ext": "py", "lang": "Python", "max_stars_repo_path": "multiagent-competition/main.py", "max_stars_repo_name": "MachengShen/torchbeast", "max_stars_repo_head_hexsha": "3853fdda44db4d91d773ff2a3db3658a02fa1a15", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "multiagent-competition/main.py", "max_issues_repo_name": "MachengShen/torchbeast", "max_issues_repo_head_hexsha": "3853fdda44db4d91d773ff2a3db3658a02fa1a15", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "multiagent-competition/main.py", "max_forks_repo_name": "MachengShen/torchbeast", "max_forks_repo_head_hexsha": "3853fdda44db4d91d773ff2a3db3658a02fa1a15", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.918699187, "max_line_length": 191, "alphanum_fraction": 0.5890020367, "include": true, "reason": "import numpy", "num_tokens": 1189} |
# -*- coding:utf-8 -*-
###############################################################################
# Rutap Bot 2019 Hangul Clock Module #
# 해당 모듈은 한글시계에서 파생된 소프트웨어로서, GPLv3 라이선스의 적용을 받습니다. #
# 모듈 사용시 원작자분께 허락을 받으시길 바랍니다. #
# 모듈에 대한 저작권은 화향이 소유합니다. #
###############################################################################
import random, datetime, os
import numpy as np
from PIL import Image
from activity_log import log_actvity
def alpha_composite(src, dst):
src = np.asarray(src)
dst = np.asarray(dst)
out = np.empty(src.shape, dtype = 'float')
alpha = np.index_exp[:, :, 3:]
rgb = np.index_exp[:, :, :3]
src_a = src[alpha]/255.0
dst_a = dst[alpha]/255.0
out[alpha] = src_a+dst_a*(1-src_a)
old_setting = np.seterr(invalid = 'ignore')
out[rgb] = (src[rgb]*src_a + dst[rgb]*dst_a*(1-src_a))/out[alpha]
np.seterr(**old_setting)
out[alpha] *= 255
np.clip(out,0,255)
out = out.astype('uint8')
out = Image.fromarray(out, 'RGBA')
return out
def hangul_clock():
open('clock_rendering.rtl', 'w').close()
now = datetime.datetime.now()
filename = "%s_%s_%s_%s_%s_%s.png" % (now.year, now.month, now.day, now.hour, now.minute, now.second)
BG = Image.open("hangul_clock_base/BG_1000_1500.png")
ment = Image.open("hangul_clock_base/ment/ment%s_1000_1500.png" % (random.randint(1, 3)))
one = alpha_composite(ment, BG)
hour_base = Image.open("hangul_clock_base/hour/hour_base_1000_1500.png")
two = alpha_composite(hour_base, one)
min_base = Image.open("hangul_clock_base/minute/minute_base_1000_1500.png")
three = alpha_composite(min_base, two)
hour = now.hour
if hour > 12:
hour = now.hour - 12
now_hour = Image.open("hangul_clock_base/hour/hour_%s_1000_1500.png" % (hour))
four = alpha_composite(now_hour, three)
now_minute = Image.open("hangul_clock_base/minute/minute_%s_1000_1500.png" % (now.minute))
five = alpha_composite(now_minute, four)
result = five
result.save(filename)
log_actvity("I completed rendering Clock Render")
os.remove('clock_rendering.rtl')
return filename | {"hexsha": "1d4fc7c4808fd7099ec15a4757909fe0a8de1007", "size": 2263, "ext": "py", "lang": "Python", "max_stars_repo_path": "hangul_clock.py", "max_stars_repo_name": "HyunsDev/Rutap-bot_Discord", "max_stars_repo_head_hexsha": "13f664864953e56a4bb887fd9cc29519a58b49db", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-07-05T11:27:51.000Z", "max_stars_repo_stars_event_max_datetime": "2020-07-05T11:27:51.000Z", "max_issues_repo_path": "hangul_clock.py", "max_issues_repo_name": "HyunsDev/Rutap-bot_Discord", "max_issues_repo_head_hexsha": "13f664864953e56a4bb887fd9cc29519a58b49db", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "hangul_clock.py", "max_forks_repo_name": "HyunsDev/Rutap-bot_Discord", "max_forks_repo_head_hexsha": "13f664864953e56a4bb887fd9cc29519a58b49db", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.776119403, "max_line_length": 105, "alphanum_fraction": 0.5894829872, "include": true, "reason": "import numpy", "num_tokens": 682} |
from __future__ import division, absolute_import, print_function
import glob
import argparse
import os
import shutil
import pdb
import numpy as np
from tqdm import tqdm
CONTINUAL_LEARNING_LABELS = ['CC', 'SC', 'EC', 'SQC']
CL_LABEL_KEY = "continual_learning_label"
def main():
parser = argparse.ArgumentParser(description='Dataset Manipulator: useful to merge two datasets by concatenating '
+ 'episodes. PS: Deleting sources after merging into the destination '
+ 'folder.')
parser.add_argument('--continual-learning-labels', type=str, nargs=2, metavar=('label_1', 'label_2'),
default=argparse.SUPPRESS, help='Labels for the continual learning RL distillation task.')
parser.add_argument('-f', '--force', action='store_true', default=False,
help='Force the merge, even if it overrides something else,'
' including the destination if it exist')
parser.add_argument('--timesteps', type=int, nargs=2, default=[-1,-1],
help="To have a certain number of frames for two data sets ")
group = parser.add_mutually_exclusive_group()
group.add_argument('--merge', type=str, nargs=3, metavar=('source_1', 'source_2', 'destination'),
default=argparse.SUPPRESS,
help='Merge two datasets by appending the episodes, deleting sources right after.')
args = parser.parse_args()
if 'merge' in args:
# let make sure everything is in order
assert os.path.exists(args.merge[0]), "Error: dataset '{}' could not be found".format(args.merge[0])
# If the merge file exists already, delete it for the convenince of updating student's policy
if os.path.exists(args.merge[2]) or os.path.exists(args.merge[2] + '/'):
assert args.force, "Error: destination directory '{}' already exists".format(args.merge[2])
shutil.rmtree(args.merge[2])
if 'continual_learning_labels' in args:
assert args.continual_learning_labels[0] in CONTINUAL_LEARNING_LABELS \
and args.continual_learning_labels[1] in CONTINUAL_LEARNING_LABELS, \
"Please specify a valid Continual learning label to each dataset to be used for RL distillation !"
# create the output
os.mkdir(args.merge[2])
#os.rename(args.merge[0] + "/dataset_config.json", args.merge[2] + "/dataset_config.json")
#os.rename(args.merge[0] + "/env_globals.json", args.merge[2] + "/env_globals.json")
shutil.copy2(args.merge[0] + "/dataset_config.json",args.merge[2] + "/dataset_config.json")
shutil.copy2(args.merge[0] + "/env_globals.json", args.merge[2] + "/env_globals.json")
# copy files from first source
num_timesteps_1, num_timesteps_2 = args.timesteps
local_path = os.getcwd()
all_records = sorted(glob.glob(args.merge[0] + "/record_[0-9]*/*"))
previous_records = all_records[0]
for ts_counter_1, record in enumerate(all_records):
#if the timesteps is larger than needed, we wait until this episode is over
if(num_timesteps_1>0 and ts_counter_1 >num_timesteps_1):
if(os.path.dirname(previous_records).split('_')[-1] != os.path.dirname(record).split('_')[-1]):
break
s = args.merge[2] + "/" + record.split("/")[-2] + '/' + record.split("/")[-1]
s = os.path.join(local_path,s)
record = os.path.join(local_path, record)
try:
shutil.copy2(record, s)
except FileNotFoundError:
os.mkdir(os.path.dirname(s))
shutil.copy2(record, s)
previous_records = record
num_episode_dataset_1 = int(previous_records.split("/")[-2][7:])
if (num_timesteps_1 == -1):
num_episode_dataset_1 += 1
ts_counter_1 += 1
# copy files from second source
all_records = sorted(glob.glob(args.merge[1] + "/record_[0-9]*/*"))
previous_records = all_records[0]
for ts_counter_2, record in enumerate(all_records):
if (num_timesteps_2 > 0 and ts_counter_2 > num_timesteps_2):
if (os.path.dirname(previous_records).split('_')[-1] != os.path.dirname(record).split('_')[-1]):
break
episode = str(num_episode_dataset_1 + int(record.split("/")[-2][7:]))
new_episode = record.split("/")[-2][:-len(episode)] + episode
s = args.merge[2] + "/" + new_episode + '/' + record.split("/")[-1]
s = os.path.join(local_path, s)
record = os.path.join(local_path, record)
try:
shutil.copy2(record, s)
except FileNotFoundError:
os.mkdir(os.path.dirname(s))
shutil.copy2(record, s)
previous_records = record
num_episode_dataset_2 = int(previous_records.split("/")[-2][7:])
if(num_timesteps_2==-1):
num_episode_dataset_2 +=1
ts_counter_2 +=1
ts_counter = [ts_counter_1, ts_counter_2]
# load and correct ground_truth
ground_truth = {}
ground_truth_load = np.load(args.merge[0] + "/ground_truth.npz")
ground_truth_load_2 = np.load(args.merge[1] + "/ground_truth.npz")
ground_truth["images_path"] = []
num_episode_dataset = num_episode_dataset_1
index_slash = args.merge[2].find("/")
index_margin_str = len("/record_")
directory_str = args.merge[2][index_slash+1:]
len_info_1 = [len(ground_truth_load[k]) for k in ground_truth_load.keys()]
num_eps_total_1, num_ts_total_1 = min(len_info_1), max(len_info_1)
len_info_2 = [len(ground_truth_load_2[k]) for k in ground_truth_load_2.keys()]
num_eps_total_2, num_ts_total_2 = min(len_info_2), max(len_info_2)
for idx_, gt_load in enumerate([ground_truth_load, ground_truth_load_2], 1):
for arr in gt_load.files:
if arr == "images_path":
# here, we want to rename just the folder containing the records, hence the black magic
for i in tqdm(range(ts_counter[idx_-1]),#range(len(gt_load["images_path"])),
desc="Update of paths (Folder " + str(1+idx_) + ")"):
# find the "record_" position
path = gt_load["images_path"][i]
end_pos = path.find("/record_")
inter_pos = path.find("/frame") # pos in the complete path.
if idx_ > 1:
episode = str(num_episode_dataset_1 + int(path[end_pos + index_margin_str: inter_pos]))
episode = episode.zfill(3)
new_record_path = "/record_" + episode + path[inter_pos:]
else:
new_record_path = path[end_pos:]
ground_truth["images_path"].append(directory_str + new_record_path)
else:
# anything that isnt image_path, we dont need to change
gt_arr = gt_load[arr]
if idx_ > 1:
num_episode_dataset = num_episode_dataset_2
# HERE check before overwritting that the target is random !+
if gt_load[arr].shape[0] < num_episode_dataset:
gt_arr = np.repeat(gt_load[arr], num_episode_dataset, axis=0)
if idx_ > 1:
# This is the first dataset
if (len(gt_arr) == num_eps_total_2):
# This is a episode non-change variable
ground_truth[arr] = np.concatenate((ground_truth[arr],
gt_arr[:num_episode_dataset_2]), axis=0)
elif (len(gt_arr) == num_ts_total_2): # a timesteps changing variable
ground_truth[arr] = np.concatenate((ground_truth[arr],
gt_arr[:ts_counter_2]), axis=0)
else:
assert 0 == 1, "No compatible variable in the stored ground truth for the second dataset {}" \
.format(args.merge[1])
else:
# This is the first dataset
if(len(gt_arr) == num_eps_total_1):
#This is a episode non-change variable
ground_truth[arr] = gt_arr[:num_episode_dataset_1]
elif(len(gt_arr) == num_ts_total_1): # a timesteps changing variable
ground_truth[arr] = gt_arr[:ts_counter_1]
else:
assert 0 ==1 , "No compatible variable in the stored ground truth for the first dataset {}"\
.format(args.merge[0])
# save the corrected ground_truth
np.savez(args.merge[2] + "/ground_truth.npz", **ground_truth)
# load and correct the preprocessed data (actions, rewards etc)
preprocessed = {}
preprocessed_load = np.load(args.merge[0] + "/preprocessed_data.npz")
preprocessed_load_2 = np.load(args.merge[1] + "/preprocessed_data.npz")
dataset_1_size = preprocessed_load["actions"].shape[0]
dataset_2_size = preprocessed_load_2["actions"].shape[0]
for idx, prepro_load in enumerate([preprocessed_load, preprocessed_load_2]):
for arr in prepro_load.files:
pr_arr = prepro_load[arr]
to_class = None
if arr == "episode_starts":
to_class = bool
elif arr == "actions_proba" or arr =="rewards":
to_class = float
else:
to_class = int
# all data is of timesteps changing (instead of episode changing)
if preprocessed.get(arr, None) is None: #for the first dataset
preprocessed[arr] = pr_arr.astype(to_class)[:ts_counter_1]
else:# for the second dataset
preprocessed[arr] = np.concatenate((preprocessed[arr].astype(to_class),
pr_arr[:ts_counter_2].astype(to_class)), axis=0)
if 'continual_learning_labels' in args:
if preprocessed.get(CL_LABEL_KEY, None) is None:
preprocessed[CL_LABEL_KEY] = \
np.array([args.continual_learning_labels[idx] for _ in range(ts_counter_1)])
else:
preprocessed[CL_LABEL_KEY] = \
np.concatenate((preprocessed[CL_LABEL_KEY], np.array([args.continual_learning_labels[idx]
for _ in range(ts_counter_2)])), axis=0)
print("The total timesteps: ", ts_counter_1+ts_counter_2)
print("The total episodes: ", num_episode_dataset_1+num_episode_dataset_2)
for k in preprocessed:
print(k)
print(preprocessed[k].shape)
for k in ground_truth:
print(k)
print(ground_truth[k].shape)
np.savez(args.merge[2] + "/preprocessed_data.npz", ** preprocessed)
# remove the old folders
# shutil.rmtree(args.merge[0])
# shutil.rmtree(args.merge[1])
if __name__ == '__main__':
main() | {"hexsha": "21b874d5a5239f77ef89feee4906bb387aeb0323", "size": 11782, "ext": "py", "lang": "Python", "max_stars_repo_path": "environments/dataset_merger.py", "max_stars_repo_name": "sun-te/robotics-rl-srl", "max_stars_repo_head_hexsha": "d321085b81eef63dcac58028af87eec6de7633b4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "environments/dataset_merger.py", "max_issues_repo_name": "sun-te/robotics-rl-srl", "max_issues_repo_head_hexsha": "d321085b81eef63dcac58028af87eec6de7633b4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "environments/dataset_merger.py", "max_forks_repo_name": "sun-te/robotics-rl-srl", "max_forks_repo_head_hexsha": "d321085b81eef63dcac58028af87eec6de7633b4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-11-26T11:41:12.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-30T16:00:27.000Z", "avg_line_length": 49.9237288136, "max_line_length": 122, "alphanum_fraction": 0.5606009167, "include": true, "reason": "import numpy", "num_tokens": 2521} |
from pathlib import Path
import numpy as np
from tensorflow import keras
from tensorflow.keras.preprocessing.image import load_img
class MaskSequence(keras.utils.Sequence):
def __init__(self, base_path, split, batch_size, img_size):
self.batch_size = batch_size
self.img_size = img_size
self.input_img_paths, self.target_img_paths = self._load_paths(Path(base_path), split + ".txt")
def __len__(self):
return len(self.target_img_paths) // self.batch_size
def __getitem__(self, idx):
"""Returns tuple (input, target) correspond to batch #idx."""
i = idx * self.batch_size
batch_input_img_paths = self.input_img_paths[i: i + self.batch_size]
batch_target_img_paths = self.target_img_paths[i: i + self.batch_size]
x = np.zeros((self.batch_size,) + self.img_size + (3,), dtype="float32")
for j, path in enumerate(batch_input_img_paths):
img = load_img(path, target_size=self.img_size)
x[j] = img
y = np.zeros((self.batch_size,) + self.img_size + (1,), dtype="uint8")
for j, path in enumerate(batch_target_img_paths):
img = load_img(path, target_size=self.img_size, color_mode="grayscale")
y[j] = np.expand_dims(img, 2)
# Ground truth labels are 1, 2, 3. Subtract one to make them 0, 1, 2:
# y[j] -= 1
return x, y
@staticmethod
def _load_paths(directory, file):
with open(directory / file, ) as f:
rows = f.readlines()
rows = map(lambda x: x.strip(), rows)
rows = map(lambda x: x.split(" "), rows)
rows = list(rows)
inputs, outputs = zip(*rows)
absolute_path = lambda x: str((directory / x).resolve())
inputs, outputs = map(absolute_path, inputs), map(absolute_path, outputs)
return list(inputs), list(outputs)
| {"hexsha": "7b2e0c5eede2942dbe7d1c75add953385cc3089d", "size": 1916, "ext": "py", "lang": "Python", "max_stars_repo_path": "fashiondatasets/MaskSequence.py", "max_stars_repo_name": "NiklasHoltmeyer/FashionDatasets", "max_stars_repo_head_hexsha": "a9309f90abd6bff739ecffafd69cf52506f2cb97", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "fashiondatasets/MaskSequence.py", "max_issues_repo_name": "NiklasHoltmeyer/FashionDatasets", "max_issues_repo_head_hexsha": "a9309f90abd6bff739ecffafd69cf52506f2cb97", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "fashiondatasets/MaskSequence.py", "max_forks_repo_name": "NiklasHoltmeyer/FashionDatasets", "max_forks_repo_head_hexsha": "a9309f90abd6bff739ecffafd69cf52506f2cb97", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.1020408163, "max_line_length": 103, "alphanum_fraction": 0.6252609603, "include": true, "reason": "import numpy", "num_tokens": 457} |
%% Copyright (C) 2014, 2016-2017, 2019, 2022 Colin B. Macdonald
%% Copyright (C) 2020 Mike Miller
%% Copyright (C) 2020 Fernando Alvarruiz
%%
%% This file is part of OctSymPy.
%%
%% OctSymPy is free software; you can redistribute it and/or modify
%% it under the terms of the GNU General Public License as published
%% by the Free Software Foundation; either version 3 of the License,
%% or (at your option) any later version.
%%
%% This software is distributed in the hope that it will be useful,
%% but WITHOUT ANY WARRANTY; without even the implied warranty
%% of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
%% the GNU General Public License for more details.
%%
%% You should have received a copy of the GNU General Public
%% License along with this software; see the file COPYING.
%% If not, see <http://www.gnu.org/licenses/>.
%% -*- texinfo -*-
%% @defun mat_rclist_asgn (@var{A}, @var{r}, @var{c}, @var{B})
%% Private helper routine for sym array assigment using lists.
%%
%% @code{(R(i),C(i))} specify entries of the matrix @var{A}.
%% We execute @code{A(R(i),C(i)) = B(i)}.
%%
%% Notes:
%% @itemize
%% @item @var{B} is accessed with linear indexing.
%% @item @var{B} might be a scalar, used many times.
%% @item @var{A} might need to get bigger, if so it will be padded
%% with zeros.
%% @end itemize
%%
%% @end defun
function z = mat_rclist_asgn(A, r, c, B)
if (isempty (r) && isempty (c) && (isempty (B) || isscalar (B)))
z = A;
return
end
if ~( isvector(r) && isvector(c) && (length(r) == length(c)) )
error('this routine is for a list of rows and cols');
end
if ((numel(B) == 1) && (numel(r) > 1))
B = repmat(B, size(r));
end
if (length(r) ~= numel(B))
error('not enough/too much in B')
end
% Easy trick to copy A into larger matrix AA:
% AA = sp.Matrix.zeros(n, m)
% AA[0, 0] = A
% Also usefil: .copyin_matrix
cmd = { '(A, r, c, B) = _ins'
'# B linear access fix, transpose for sympy row-based'
'if B is None or not B.is_Matrix:'
' B = sp.Matrix([[B]])'
'BT = B.T'
'# make a resized copy of A, and copy existing stuff in'
'if isinstance(A, list):'
' assert len(A) == 0, "unexpectedly non-empty list: report bug!"'
' n = max(max(r) + 1, 1)'
' m = max(max(c) + 1, 1)'
' AA = [[0]*m for i in range(n)]'
'elif A is None or not isinstance(A, MatrixBase):'
' # we have non-matrix, put in top-left'
' n = max(max(r) + 1, 1)'
' m = max(max(c) + 1, 1)'
' AA = [[0]*m for i in range(n)]'
' AA[0][0] = A'
'else:'
' # build bigger matrix'
' n = max(max(r) + 1, A.rows)'
' m = max(max(c) + 1, A.cols)'
' AA = [[0]*m for i in range(n)]'
' # copy current matrix in'
' for i in range(A.rows):'
' for j in range(A.cols):'
' AA[i][j] = A[i, j]'
'# now insert the new bits from B'
'for i, (r, c) in enumerate(zip(r, c)):'
' AA[r][c] = BT[i]'
'return sp.Matrix(AA),' };
rr = num2cell(int32(r-1));
cc = num2cell(int32(c-1));
z = pycall_sympy__ (cmd, A, rr, cc, B);
% a simpler earlier version, but only for scalar r,c
%cmd = { '(A, r, c, b) = _ins'
% 'if not A.is_Matrix:'
% ' A = sp.Matrix([[A]])'
% 'AA = sp.Matrix.zeros(max(r+1, A.rows), max(c+1, A.cols))'
% 'AA[0, 0] = A'
% 'AA[r, c] = b'
% 'return AA,' };
end
| {"author": "cbm755", "repo": "octsympy", "sha": "c1ecd1e08f027d5101d0f4250dfc496aa98c8bcd", "save_path": "github-repos/MATLAB/cbm755-octsympy", "path": "github-repos/MATLAB/cbm755-octsympy/octsympy-c1ecd1e08f027d5101d0f4250dfc496aa98c8bcd/inst/@sym/private/mat_rclist_asgn.m"} |
import numpy as np
import os
import textwrap
import tkinter as tk
import tkinter.ttk as tk_ttk
import matplotlib
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
TREEVIEW_SELECT_EVENT = '<<treeview_select>>'
class FullDisplay(tk.Frame):
def __init__(self, master):
super().__init__(master)
self.grid(row=0, column=0, sticky='nsew')
self.tree = DirectoryViewer(self)
self.canvas = GraphPlotter(self)
self.bind(TREEVIEW_SELECT_EVENT, self.treeview_new_selection)
self.rowconfigure(0, weight=1)
self.columnconfigure(0, weight=3)
self.columnconfigure(1, weight=10)
def treeview_new_selection(self, event):
self.canvas.draw_plot(self.tree.get_selected_file())
class DirectoryViewer(tk.Frame):
def __init__(self, master=None, path='.'):
super().__init__(master)
self.master = master
self.grid(row=0, column=0, sticky='nswe')
self.setup_tree(path)
def tell_master_select(self, event):
self.master.event_generate(TREEVIEW_SELECT_EVENT)
def get_selected_file(self):
return self.build_path(self.tree.focus())
def build_path(self, curr_id):
curr_item = self.tree.item(curr_id)
parent_id = self.tree.parent(curr_id)
curr_item_path = curr_item['text']
while parent_id != '':
parent = self.tree.item(parent_id)
curr_item_path = os.path.join(parent['text'], curr_item_path)
curr_id = parent_id
curr_item = self.tree.item(curr_id)
parent_id = self.tree.parent(curr_id)
return curr_item_path
def setup_tree(self, path):
self.rowconfigure(0, weight=1)
self.columnconfigure(0, weight=1)
self.tree = tk_ttk.Treeview(self)
self.tree.bind('<<TreeviewSelect>>', self.tell_master_select)
self.tree.grid(row=0, column=0, sticky='nswe')
ysb = tk_ttk.Scrollbar(self,
orient='vertical',
command=self.tree.yview)
ysb.grid(row=0, column=1, sticky='ns')
xsb = tk_ttk.Scrollbar(self,
orient='horizontal',
command=self.tree.xview)
xsb.grid(row=1, column=0, sticky='ew')
self.tree.configure(yscroll=ysb.set, xscroll=xsb.set)
path = os.path.abspath(path)
self.path = path
self.tree.heading('#0', text=path, anchor='w')
root_node = self.tree.insert('', 'end', text=path, open=True)
self.opened = set([root_node])
for p in os.listdir(path):
self.insert_node(root_node, p, os.path.join(path, p))
self.tree.bind('<<TreeviewOpen>>', self.open_node)
# insert_node() and open_node() are for lazy loading
def insert_node(self, parent, text, path):
node = self.tree.insert(parent, 'end', text=text, open=False)
if os.path.isdir(path):
self.tree.insert(node, 'end') # dummy to show the dir icon
def open_node(self, event):
curr_node = self.tree.focus()
abspath = self.build_path(curr_node)
if os.path.isdir(abspath) and curr_node not in self.opened:
self.tree.delete(self.tree.get_children(curr_node))
for p in os.listdir(abspath):
self.insert_node(curr_node, p, os.path.join(abspath, p))
self.opened.add(curr_node)
# process_directory() does eager loading
def process_directory(self, parent, path):
for p in os.listdir(path):
abspath = os.path.join(path, p)
isdir = os.path.isdir(abspath)
oid = self.tree.insert(parent, 'end', text=p, open=False)
if isdir:
self.process_directory(oid, abspath)
class GraphPlotter(tk.Frame):
def __init__(self, master):
super().__init__(master)
self.grid(row=0, column=1, sticky='nsew')
self.load_plotters()
self.setup_canvas()
self.rowconfigure(0, weight=1)
self.columnconfigure(0, weight=1)
def setup_canvas(self):
self.figure = matplotlib.figure.Figure(figsize=(5, 5), dpi=100)
self.canvas = FigureCanvasTkAgg(self.figure, self)
self.draw_plot(None)
self.canvas.get_tk_widget().grid(column=0, row=0, sticky='nsew')
def load_plotters(self):
import data_browser.plotting_modules
self.plotters = {module.FILE_EXTENSION: module.DEFAULT_PLOTTER
for module
in data_browser.plotting_modules.__all__}
def draw_plot(self, file):
self.figure.clf()
if file is None or os.path.isdir(file):
plot_dir(file, self.figure)
elif os.path.splitext(file)[1] in self.plotters:
try:
self.plotters[os.path.splitext(file)[1]](file, self.figure)
except Exception as e:
plot_error(e, self.figure)
else:
plot_error(ValueError('cannot plot {}'.format(file)), self.figure)
self.canvas.draw_idle()
def plot_error(error, fig):
msg = 'An error occurred:\n'
msg += type(error).__name__ + '\n'
msg += '\n'.join(textwrap.wrap(str(error), 60))
ax = fig.add_subplot(111)
ax.text(0, 0, msg)
ax.set_axis_off()
def plot_dir(file, fig):
ax = fig.add_subplot(111)
ax.set_axis_off()
def _main():
root = tk.Tk()
root.geometry('800x500')
root.title('Data Browser')
app = FullDisplay(master=root)
root.rowconfigure(0, weight=1)
root.columnconfigure(0, weight=1)
app.mainloop()
if __name__ == '__main__':
_main()
| {"hexsha": "4474ea2f2b460f2931327cd2b8210c28ed377ca0", "size": 5683, "ext": "py", "lang": "Python", "max_stars_repo_path": "data_browser/data_browser.py", "max_stars_repo_name": "gfetterman/file_browser", "max_stars_repo_head_hexsha": "8f54fb0f3a4a1fcce93b98ae44431accd943ac00", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "data_browser/data_browser.py", "max_issues_repo_name": "gfetterman/file_browser", "max_issues_repo_head_hexsha": "8f54fb0f3a4a1fcce93b98ae44431accd943ac00", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "data_browser/data_browser.py", "max_forks_repo_name": "gfetterman/file_browser", "max_forks_repo_head_hexsha": "8f54fb0f3a4a1fcce93b98ae44431accd943ac00", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.8650306748, "max_line_length": 78, "alphanum_fraction": 0.6165757522, "include": true, "reason": "import numpy", "num_tokens": 1339} |
[STATEMENT]
lemma rt_graph_not_dip [dest]:
"\<And>ip ip' \<sigma> dip. (ip, ip') \<in> rt_graph \<sigma> dip \<Longrightarrow> ip \<noteq> dip"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>ip ip' \<sigma> dip. (ip, ip') \<in> rt_graph \<sigma> dip \<Longrightarrow> ip \<noteq> dip
[PROOF STEP]
unfolding rt_graph_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>ip ip' \<sigma> dip. (ip, ip') \<in> {uu_. \<exists>ip ip' dsn dsk hops. uu_ = (ip, ip') \<and> ip \<noteq> dip \<and> rt (\<sigma> ip) dip = Some (dsn, dsk, val, hops, ip')} \<Longrightarrow> ip \<noteq> dip
[PROOF STEP]
by auto | {"llama_tokens": 261, "file": "AODV_variants_c_gtobcast_C_Loop_Freedom", "length": 2} |
module LibRealSense
# Load in `deps.jl`, complaining if it does not exist
const depsjl_path = joinpath(@__DIR__, "..", "deps", "deps.jl")
if !isfile(depsjl_path)
error("LibRealSense was not build properly. Please run Pkg.build(\"LibRealSense\").")
end
include(depsjl_path)
# Module initialization function
function __init__()
check_deps()
end
include("CEnum.jl")
using .CEnum
include("ctypes.jl")
export Ctm, Ctime_t, Cclock_t
include(joinpath(@__DIR__, "..", "gen", "rs2_common.jl"))
include(joinpath(@__DIR__, "..", "gen", "rs2_api.jl"))
foreach(names(@__MODULE__, all=true)) do s
if startswith(string(s), "rs2_") || startswith(string(s), "RS2_")
@eval export $s
end
end
const RS2_API_VERSION = RS2_API_MAJOR_VERSION * 10000 + RS2_API_MINOR_VERSION * 100 + RS2_API_PATCH_VERSION
const RS2_API_VERSION_STR = "$(RS2_API_MAJOR_VERSION).$(RS2_API_MINOR_VERSION).$(RS2_API_PATCH_VERSION)"
export RS2_API_VERSION, RS2_API_VERSION_STR
end # module
| {"hexsha": "575c309884d625c1fd6887594b6d814be89b08da", "size": 977, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/LibRealSense.jl", "max_stars_repo_name": "UnofficialJuliaMirrorSnapshots/RealSense.jl-1d20419d-a1bd-598e-846b-24709a6a9336", "max_stars_repo_head_hexsha": "3cdc32505064468416fc891dfd877daf265d483a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/LibRealSense.jl", "max_issues_repo_name": "UnofficialJuliaMirrorSnapshots/RealSense.jl-1d20419d-a1bd-598e-846b-24709a6a9336", "max_issues_repo_head_hexsha": "3cdc32505064468416fc891dfd877daf265d483a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/LibRealSense.jl", "max_forks_repo_name": "UnofficialJuliaMirrorSnapshots/RealSense.jl-1d20419d-a1bd-598e-846b-24709a6a9336", "max_forks_repo_head_hexsha": "3cdc32505064468416fc891dfd877daf265d483a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.4054054054, "max_line_length": 107, "alphanum_fraction": 0.7308085977, "num_tokens": 287} |
import time
from random import *
import numpy as np
import matplotlib.pyplot as plt
def question_1():
# 初始化生成器
seed()
# 返回给定范围内的随机数
print(randrange(-10, 8))
# 返回给定范围内的随机数
print(randint(0, 20))
# 返回给定序列的随机元素
print(choice([1, 2, 5, 3, 5, 7]))
# 返回序列的给定样本
print(sample([1, 2, 3, 5, -4, 'ss'], 3))
# 返回 0~1 的浮点数
print(random())
# 返回两个给定参数之间的随机浮点数
print(uniform(1, 2))
# 返回两个给定参数之间的随机浮点数,指定 mode
print(triangular(0.2, 0.9, mode=0.4))
x = [1, 2, 3, 4]
# 打乱序列
shuffle(x)
print(x)
def question_2():
for s in "PYTHON":
if s == "T":
continue
print(s, end="")
print()
for s in "PYTHON":
if s == "T":
break
print(s, end="")
print()
for s in "BIT":
for i in range(10):
print(s, end="")
if s == "I":
break
def question_3():
# 作用是格式化时间戳为本地的时间
print(time.localtime())
# 作用是格式化时间戳为本地的时间
print(time.asctime())
time.sleep(2 + 3)
# 作用是格式化时间戳为本地的时间
print(time.ctime())
# 1970纪元后经过的浮点秒数
print(time.time())
print(time.process_time() / time.process_time_ns())
def question_4():
# 单分支
s = eval(input("请输入一个整数:"))
if s % 2 == 0:
print("这是个偶数")
print("输入的数字是:", s)
# 二分支
if True:
print("语句1")
else:
print("语句2")
# 紧凑形式:用于表达简单地二分支结构
guess = eval(input())
print("猜{}了".format("对" if guess == 99 else "错"))
# 多分支
if True:
print("1")
elif True:
print("2")
else:
print("3")
def question_6():
try:
raise IOError
except IOError:
print("IOError")
try:
raise SystemExit
except SystemExit:
print("SystemExit")
try:
raise OverflowError
except OverflowError:
print("OverflowError")
try:
raise EOFError
except EOFError:
print("EOFError")
def f(x0) -> int:
return x0 ** 2
def question_7():
"""
蒙特卡罗方法求函数 y=x^2 在[0,1]内的定积分(值)
"""
# 投点次数
n = 10000
# 矩形区域边界
x_min, x_max = 0.0, 1.0
y_min, y_max = 0.0, 1.0
# 在矩形区域内随机投点
x = np.random.uniform(x_min, x_max, n) # 均匀分布
y = np.random.uniform(y_min, y_max, n)
# 统计 落在函数 y=x^2图像下方的点的数目
res = sum(np.where(y < f(x), 1, 0))
# 计算 定积分的近似值(Monte Carlo方法的精髓:用统计值去近似真实值)
integral = res / n
print('integral: ', integral)
# 画个图看看
fig = plt.figure()
axes = fig.add_subplot(111)
axes.plot(x, y, 'ro', markersize=1)
plt.axis('equal') # 防止图像变形
axes.plot(np.linspace(x_min, x_max, 10), f(np.linspace(x_min, x_max, 10)), 'b-') # 函数图像
plt.show()
| {"hexsha": "d964f09281ac1a16b182274671829ab480293f5a", "size": 2703, "ext": "py", "lang": "Python", "max_stars_repo_path": "basic_exercises/experiment_3.py", "max_stars_repo_name": "vuhe/LearnPython", "max_stars_repo_head_hexsha": "0a081a85456557ae542925cce950b23313c3c9b9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "basic_exercises/experiment_3.py", "max_issues_repo_name": "vuhe/LearnPython", "max_issues_repo_head_hexsha": "0a081a85456557ae542925cce950b23313c3c9b9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "basic_exercises/experiment_3.py", "max_forks_repo_name": "vuhe/LearnPython", "max_forks_repo_head_hexsha": "0a081a85456557ae542925cce950b23313c3c9b9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 18.2635135135, "max_line_length": 92, "alphanum_fraction": 0.5290418054, "include": true, "reason": "import numpy", "num_tokens": 1094} |
/*
* VisualServoing is a tutorial program for introducing students to
* robotics.
*
* Copyright 2009, 2010 Kevin Quigley <kevin.quigley@gmail.com> and
* Marsette Vona <vona@ccs.neu.edu>
*
* VisualServoing is free software: you can redistribute it andor modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* VisualServoing is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* as the file COPYING along with VisualServoing. If not, see
* <http://www.gnu.org/licenses/>.
*/
// system headers
#include <cstdio>
#include <fstream>
#include <iostream>
#include <signal.h>
#include <unistd.h>
// CTY arm project
#include "ArmControl.hpp"
#include "ArmGui.hpp"
#include "ArmGuiGTK.hpp"
#include "IK.hpp"
#include "ImageProcessing.hpp"
#include "Params.hpp"
// OpenCV
#include <cv.h>
#include <highgui.h>
// Boost
#include <boost/numeric/ublas/vector.hpp>
#include <boost/numeric/ublas/io.hpp>
using namespace boost::numeric;
#if RUN_THREADED
#include <errno.h>
#include <pthread.h>
#endif
// constants
#define FOCAL_LENGTH 481.0 // calc p65 //TBD - add calculation info
#define DIAMETER .038 //!< Diameter of ball in meters (measurexd)
#define MIN_TRACKING_RADIUS_PIXELS 2.0 //!< Minimum tracking radius required
void mark_images(const ublas::vector<double>& target, const CvSeq* circles,
const Params& params, Images& images);
void calibrate_offsets(std::string& file, ublas::vector<double>& offsets);
void update_gui_position (ArmControl& ctl, Params& params);
void handler(int sig);
ArmControl* sig_ctl = 0; //!< Pointer to ArmControl for stopping arm
//! movement upon received signal.
/*!
* \Brief Starting function containing main control loop.
* Start up and configure all objects, spin off the GUI, and continue
* in main loop until told to stop.
*/
int main(int argc, char** argv) {
// set signal handling
struct sigaction action;
action.sa_handler = &handler;
if (sigaction(SIGHUP, &action, NULL) < 0)
printf("Error setting action for SIGHUP\n");
if (sigaction(SIGINT, &action, NULL) < 0)
printf("Error setting action for SIGINT\n");
if (sigaction(SIGQUIT, &action, NULL) < 0)
printf("Error setting action for SIGQUIT\n");
if (sigaction(SIGILL, &action, NULL) < 0)
printf("Error setting action for SIGILL\n");
if (sigaction(SIGABRT, &action, NULL) < 0)
printf("Error setting action for SIGABRT\n");
if (sigaction(SIGFPE, &action, NULL) < 0)
printf("Error setting action for SIGFPE\n");
if (sigaction(SIGSEGV, &action, NULL) < 0)
printf("Error setting action for SIGSEGV\n");
if (sigaction(SIGTERM, &action, NULL) < 0)
printf("Error setting action for SIGTERM\n");
Images images;
images.set = false;
images.bgr = 0;
images.filtered_bgr = 0;
images.filtered_hls = 0;
Params params;
init_params(params);
CvSeq* circles = 0;
unsigned int cameraID(0);
std::string port("/dev/ttyS0");
std::string config_file;
std::string flags = "hp:f:";
int opt;
bool help = false;
while ((opt = getopt(argc, argv, flags.c_str())) > 0) {
switch (opt) {
case 'h': help = true; break;
case 'p': port = optarg; break;
case 'f': config_file = optarg; break;
default: break;
}
}
if (help) {
printf("Visual Servo Arm Options:\n"
" -h Print this help menu\n"
" -f <file> Use a calibration file to set joint offsets\n"
" -p <port> Use an alternate serial port (default: /dev/ttyS0\n");
exit(0);
}
CvCapture* capture(0);
IplImage* frame(0);
ImageProcessing ip;
ublas::vector<double> features(3);
ublas::vector<double> delta_angles(3);
ublas::vector<double> target_pos(3);
ublas::vector<double> grab_target(3);
target_pos(0) = 0.0; //x
target_pos(1) = 0.0; //y
target_pos(2) = 0.2; //Z
grab_target(0) = 0.0; //x
grab_target(1) = 0.0; //y
grab_target(2) = 0.05; //Z
// div by focal_length to normalize target x,y
ublas::vector<double> target_pos_norm(target_pos);
target_pos_norm(0) /= FOCAL_LENGTH;
target_pos_norm(1) /= FOCAL_LENGTH;
IK ik;
ik.setTarget(target_pos_norm);
ik.setLengths(0.0, .152, 0.122, 0.075);
ik.setV(.015, -.150, .25); //m, m, rad
ArmGuiGTK* gui = ArmGuiGTK::instance();
gui->update(images, params);
#if RUN_THREADED
pthread_t guiTID;
switch (pthread_create(&guiTID, 0, ArmGui::threadRun, gui)) {
case EAGAIN: printf("Max threads reached\n"); return -1;
case EINVAL: printf("Invalid thread attributes\n"); return -1;
case EPERM: printf("Invalid permissions\n"); return -1;
default: break;
}
#endif
SSC32Controller ssc(port);
ArmControl ctl(ssc);
sig_ctl = &ctl;
ctl.setRateLimit(500);
ublas::vector<double> off(ublas::zero_vector<double>(NUM_JOINTS));
calibrate_offsets(config_file, off);
ctl.setOffset(off);
ublas::vector<double> angle_limits(ublas::vector<double>(NUM_JOINTS));
// max limits
angle_limits(0) = 3.0/8.0 * M_PI;
angle_limits(1) = M_PI_2;
angle_limits(2) = M_PI - .70; // off arm brace
angle_limits(3) = M_PI_2;
std::cout << "max limits: " << angle_limits << std::endl;
ctl.setMaxAngle(angle_limits);
ArmControl::radiansToDegrees(angle_limits);
for (int i = 0; i < NUM_JOINTS; i++)
params.ctl.max_limits[i] = angle_limits(i);
params.limits_changed = true;
// min limits
angle_limits(0) = -3.0/8.0 * M_PI;
angle_limits(1) = -M_PI_2 + 0.35; // off spring pedestal
// angle_limits(2) = 0;
angle_limits(2) = -50.0*2.0*M_PI/360.0;
angle_limits(3) = -M_PI_2;
ctl.setMinAngle(angle_limits);
std::cout << "min limits: " << angle_limits << std::endl;
ArmControl::radiansToDegrees(angle_limits);
for (int i = 0; i < NUM_JOINTS; i++)
params.ctl.min_limits[i] = angle_limits(i);
params.limits_changed = true;
ctl.park();
update_gui_position(ctl, params);
params.current_mode = PARK;
while (params.run) { //mainloop
gui->update(images, params);
#if !RUN_THREADED
gui->run();
#endif
if (!params.run) continue; //to next mainloop iteration
if (params.gui.estop) {
params.gui.estop = false;
printf("ESTOP received\n");
ctl.stop();
if (params.current_mode != ESTOP) {
params.current_mode = ESTOP;
}
}
// all activities respond to these new modes
switch (params.new_mode) {
case HOME:
params.new_mode = NONE;
printf("*** -> HOME\n");
ctl.home();
update_gui_position(ctl, params);
params.current_mode = READY;
break;
case PARK:
printf("park request\n");
params.new_mode = NONE;
printf("*** -> PARK\n");
ctl.park();
update_gui_position(ctl, params);
params.current_mode = PARK;
break;
default:
break;
}
// all activities respond to these current modes
switch (params.current_mode) {
case HOME:
printf("HOME->READY\n");
params.current_mode = READY;
break;
case PARK:
// getting out of PARK handled above
usleep(10000); // 10ms
case BUSY:
printf("BUSY -> READY\n");
if (!ctl.busy())
params.current_mode = READY;
break;
default:
break;
}
if (params.activity == KINEMATICS) {
usleep(10000); // 10ms
ctl.slaveWrist(false);
ublas::vector<double> new_position(NUM_JOINTS);
if (params.current_mode == READY) {
switch (params.new_mode) {
case MOVE:
params.new_mode = NONE;
printf("Moving\n");
for (int i = 0; i < NUM_JOINTS; i++ )
new_position(i) = params.gui.new_theta[i];
ArmControl::degreesToRadians(new_position);
ctl.moveToPosition(new_position);
update_gui_position(ctl, params);
break;
case DRAW:
params.new_mode = NONE;
printf("Drawing\n");
if (params.ctl.holding_marker) {
//ctl.drawX();
} else {
params.new_mode = ERROR;
params.error = "Must hold marker to draw.";
}
break;
// end movement modes
case GRAB:
params.new_mode = NONE;
printf("Grab marker\n");
if (!params.ctl.holding_marker) {
ctl.grabMarker();
//sleep(1);
params.ctl.holding_marker = true;
} else {
printf("error set\n");
params.error_set = true;
params.error = "Marker already held\n";
}
break;
case RELEASE:
params.new_mode = NONE;
printf("Release marker\n");
if (params.ctl.holding_marker) {
ctl.openGrip();
params.ctl.holding_marker = false;
} else {
params.error_set = true;
params.error = "Marker not being held\n";
}
break;
default:
break;
}
}
// update param struct
continue; //to next mainloop iteration
} //end of kinematics
//
// Setup code for Image Processing and Visual Servoing
//
if (capture == 0) {
capture = cvCreateCameraCapture(cameraID);
if (capture == 0) {
printf("failed to init capture device\n");
sleep(1); continue; //to next mainloop iteration
}
printf("initialized capture device\n");
printf("allocating images\n");
images.bgr = cvCreateImage(IMAGE_SIZE, IPL_DEPTH_8U, 3);
#if FLOAT_HLS
images.bgr32 = cvCreateImage(IMAGE_SIZE, IPL_DEPTH_32F, 3);
images.hls = cvCreateImage(IMAGE_SIZE, IPL_DEPTH_32F, 3);
#else
images.hls = cvCreateImage(IMAGE_SIZE, IPL_DEPTH_8U, 3);
#endif
images.filtered_bgr = cvCreateImage(IMAGE_SIZE, IPL_DEPTH_8U, 1);
images.filtered_hls = cvCreateImage(IMAGE_SIZE, IPL_DEPTH_8U, 1);
if (images.bgr == 0 || images.hls == 0
#if FLOAT_HLS
|| images.bgr32 == 0
#endif
|| images.filtered_bgr == 0 || images.filtered_hls == 0) {
params.current_mode = ERROR;
params.error = "Cannot create image holders";
std::cout << params.error << std::endl;
params.run = false;
continue; //to next mainloop iteration
}
//some images might be displayed before being initialized
cvSet(images.bgr, cvScalar(0,0,0));
#if FLOAT_HLS
cvSet(images.bgr32, cvScalar(0,0,0));
#endif
cvSet(images.hls, cvScalar(0,0,0));
cvSet(images.filtered_bgr, cvScalar(0));
cvSet(images.filtered_hls, cvScalar(0));
images.set = true;
} //capture was 0
//
// Image Processing
//
frame = cvQueryFrame(capture);
if (frame == 0) {
params.current_mode = ERROR;
params.error = "Null frame";
std::cout << params.error << std::endl;
params.run = false;
continue; //to next mainloop iteration
}
cvResize(frame, images.bgr);
ctl.slaveWrist(true);
ip.filterImages(images, params);
if (!params.gui.target_set) continue;
if (params.activity == VS ||
params.activity == IP) {
//find ball
circles = ip.findBall(images, params);
mark_images(target_pos, circles, params, images);
} //find ball
if (params.activity != VS) {
usleep(1000);
continue; //to next mainloop iteration
}
//
// Visual Servoing code
//
switch (params.new_mode) {
case GRAB: params.new_mode = NONE; ctl.grabBall(); break;
case RELEASE: params.new_mode = NONE; ctl.openGrip(); break;
default: break;
}
printf("current_mode = %d\n", params.current_mode);
switch (params.current_mode) {
case READY:
printf("old: READY\t");
switch (params.new_mode) {
case MOVE:
printf("new: MOVE\n");
params.new_mode = NONE;
params.current_mode = MOVE;
break;
case PAUSE:
printf("new: PAUSE\n");
params.new_mode = NONE;
params.current_mode = PAUSE;
continue; //to next mainloop iteration
default:
break;
}
break;
case PAUSE:
printf("old: PAUSE\t");
if (params.new_mode == MOVE) {
printf("new: MOVE\n");
params.new_mode = NONE;
params.current_mode = MOVE;
break;
}
break;
case MOVE:
printf("old: MOVE\t");
if (params.new_mode == PAUSE) {
printf("new: PAUSE\n");
params.new_mode = NONE;
//ctl.stop();
params.current_mode = PAUSE;
continue; //to next mainloop iteration
}
break;
default:
break;
}
if (circles != 0 && circles->total > 0 &&
params.gui.target_set &&
(params.current_mode == MOVE || params.current_mode == GRAB)) {
ublas::vector<double> features(3);
float* p = (float*) cvGetSeqElem(circles, 0);
printf("first circle at (%d,%d) radius %d\n",
cvRound(p[0]), cvRound(p[1]), cvRound(p[2]));
features(0) = p[0]; features(1) = p[1]; features(2) = p[2];
if (features(2) >= MIN_TRACKING_RADIUS_PIXELS) {
// rotate/translate to center origin, x left, y up
features(0) = (images.hls->width / 2.0) - features(0); // x
if (images.hls->origin == 0) // top left origin
features(1) = (images.hls->height / 2.0) - features(1); // y
// normalize x & y
features(0) /= FOCAL_LENGTH; features(1) /= FOCAL_LENGTH;
// circular approximation of Z
// Z = D*f / radius*2
features(2) = DIAMETER * FOCAL_LENGTH / (features(2) * 2.0);
printf("Norm features x,y = (%3f, %3f), Z = %3f\n",
features(0), features(1), features(2));
printf("Norm target x,y = (%3f, %3f), Z = %3f\n",
target_pos_norm(0), target_pos_norm(1), target_pos_norm(2));
std::cout << "current angles: " << ctl.getCurrentAngles() << std::endl;
bool dls = ik.damped_least_squares(features, ctl.getCurrentAngles(),
params, delta_angles);
if (dls && params.current_mode != PARK) {
std::cout << "commanded angle deltas: " << delta_angles << std::endl;
ctl.moveDelta(delta_angles);
}
} else {
std::cout <<
"radius below tracking enable threshold " <<
MIN_TRACKING_RADIUS_PIXELS;
}
} //tracking ball
} //mainloop
#if RUN_THREADED
switch (pthread_join(guiTID, 0)) {
case 0: break; // all ok
case EINVAL:
printf("pthread_join: Invalid thread id %d\n", (int) guiTID); break;
case ESRCH:
printf("pthread_join: Thread ID %d not found\n", (int) guiTID); break;
case EDEADLK:
printf("pthread_join: Deadlock detected\n"); break;
default:
break;
}
#endif
if (images.set) {
printf("releasing images\n");
cvReleaseImage(&(images.bgr));
cvReleaseImage(&(images.hls));
cvReleaseImage(&(images.filtered_hls));
cvReleaseImage(&(images.filtered_bgr));
#ifdef FLOAT_HLS
cvReleaseImage(&(images.bgr32));
#endif
}
if (gui != 0) {
printf("destroying gui\n");
gui->destroy();
gui = 0;
}
if (capture != 0) {
printf("releasing capture device\n");
cvReleaseCapture(&capture);
}
} //main()
/*!
* \brief Markup images with circles and lines.
* Used for giving feedback to the user on the location of the visual
* servo target and where the ball is detected in the image.
*
* \param[in] target Cartesian coordinates of the target in [pixel,
* pixel, meter] units.
* \param[in] circles Sequence of detected circles (u,v,r) in pixels
* \param[in] params Params struct
* \param[in,out] images Images struct
*/
void mark_images(const ublas::vector<double>& target, const CvSeq* circles,
const Params& params, Images& images) {
// draw target cross
if (params.gui.target_set && params.activity == VS) {
// fl * D / Z = apparent diameter, so div by 2 to get apparent radius
double radius = (FOCAL_LENGTH * DIAMETER / target(2)) / 2.0;
// rescale since target(x,y) was normalized using FOCAL_LENGTH
double ih = images.bgr->height/2.0;
double iw = images.bgr->width/2.0;
CvPoint v1 = cvPoint(cvRound(target(0) + iw ),
cvRound(target(1) + ih - radius)); // up
CvPoint v2 = cvPoint(cvRound(target(0) + iw ),
cvRound(target(1) + ih + radius)); // down
CvPoint h1 = cvPoint(cvRound(target(0) + iw - radius),
cvRound(target(1) + ih )); // left
CvPoint h2 = cvPoint(cvRound(target(0) + iw + radius),
cvRound(target(1) + ih )); // right
// Draw target cross for sighting.
cvLine(images.bgr, h1, h2, CV_RGB(0x00, 0x00, 0xff));
cvLine(images.bgr, v1, v2, CV_RGB(0x00, 0x00, 0xff));
}
int num_circles = /*params.activity == VS ? 1 :*/ circles->total;
// draw the ball
for (int i = 0; i < num_circles; i++ ) {
float* p = (float*) cvGetSeqElem(circles, i);
CvPoint pt = cvPoint(cvRound(p[0]),cvRound(p[1]));
cvCircle(images.bgr, pt, cvRound(p[2]), CV_RGB(0xff, 0x00, 0x00));
cvCircle(images.filtered_hls, pt, cvRound(p[2]), cvScalar(192)); //greyscale
//TBD mark filtered_bgr if using that to find the ball
}
}
/*!
* \brief Uses calibration file to set offsets.
* Reads servo numbers and calibration positions from the provided
* file. Offsets are calculated from calibration position differences
* to ideal positions.
*/
void
calibrate_offsets(std::string& file, ublas::vector<double>& offsets){
if (file.empty()) {
offsets(Arm::ELBOW) = 400;
} else {
std::fstream input(file.c_str());
int servo, val;
ublas::vector<double> calibration_position(NUM_JOINTS);
calibration_position(Arm::GRIP) = 1350;
calibration_position(Arm::WRIST) = 1500;
calibration_position(Arm::ELBOW) = 1500;
calibration_position(Arm::SHOULDER) = 1500;
calibration_position(Arm::BASE) = 1500;
std::cout << "cal: " << calibration_position << std::endl;
std::cout << "grip: " << Arm::GRIP << std::endl;
while (!input.eof()) {
input >> std::skipws >> servo >> val;
printf("servo: %d, val: %d, cal: %g\t",
servo, val, calibration_position(servo));
offsets[servo] = val - calibration_position(servo);
printf("offset: %g\n", offsets(servo));
}
std::cout << "off: " << offsets << std::endl;
}
}
/*!
* \brief Update params with current angles.
* Sets current angles of ctl in struct params to be picked up by GUI.
*/
void
update_gui_position (ArmControl& ctl, Params& params) {
ublas::vector<double> current_theta(ctl.getCurrentAngles());
ArmControl::radiansToDegrees(current_theta);
for (int i = 0; i < NUM_JOINTS; i++) {
params.ctl.current_theta[i] = current_theta(i);
}
params.position_changed = true;
}
/*!
* \brief Signal handler function for stopping threads.
*
* \param[in] sig The received signal.
*/
void handler(int sig) {
if (sig_ctl == 0) {
printf("No control object for emergency shutdown!\n");
} else {
sig_ctl->stop();
}
exit(sig);
}
| {"hexsha": "0dbc5c42afbc92ce92ea827adfc0d1d19f68e9d9", "size": 19550, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "Source/rvctools 1/robot/interfaces/crustcrawler/VisualServoing.cpp", "max_stars_repo_name": "Maria-Paulacf/PlumaBot", "max_stars_repo_head_hexsha": "d4bf2e667b88e955f40e33d55db2a8f22c35b47b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 227.0, "max_stars_repo_stars_event_min_datetime": "2021-01-20T05:34:32.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T12:43:05.000Z", "max_issues_repo_path": "Source/rvctools 1/robot/interfaces/crustcrawler/VisualServoing.cpp", "max_issues_repo_name": "Maria-Paulacf/PlumaBot", "max_issues_repo_head_hexsha": "d4bf2e667b88e955f40e33d55db2a8f22c35b47b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1.0, "max_issues_repo_issues_event_min_datetime": "2021-04-22T05:56:00.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-26T06:00:17.000Z", "max_forks_repo_path": "Source/rvctools 1/robot/interfaces/crustcrawler/VisualServoing.cpp", "max_forks_repo_name": "Maria-Paulacf/PlumaBot", "max_forks_repo_head_hexsha": "d4bf2e667b88e955f40e33d55db2a8f22c35b47b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 239.0, "max_forks_repo_forks_event_min_datetime": "2021-01-28T02:59:53.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-29T08:02:17.000Z", "avg_line_length": 28.6656891496, "max_line_length": 80, "alphanum_fraction": 0.6124296675, "num_tokens": 5366} |
import re
import argparse
import emoji
import MeCab
import numpy as np
import matplotlib.pyplot as plt
mecab = MeCab.Tagger('-Ochasen')
letters_pattern = re.compile(r'[a-zA-Z]+')
bracket_pairs = [['[', ']'], ['(', ')'], ['「', '」'], ['『', '』'], ['(', ')'],
['(', ')'], ['(', ')']]
# Non-breaking space symbol for html
symbols = [' ', '<', '>', '&', '"', ''',
'¢', '£', '¥', '&euro']
def has_target_postag(node):
tokens = []
has_noun = False
has_adj = False
while node:
tokens.append(node.surface)
features = node.feature.split(',')
tag = features[0]
tag_type = features[1]
if tag == '名詞' and tag_type == '一般':
has_noun = True
#if tag == '形容詞':
#has_adj = True
node = node.next
return tokens[1:-1], has_noun # and has_adj
def has_en_word(tokens):
has_letter = False
for token in tokens:
if letters_pattern.findall(token):
has_letter = True
break
return has_letter
def remove_bracket_content(text, bracket_pairs):
low = 0
high = 0
for left_b, right_b in bracket_pairs:
low = text.find(left_b)
high = text.find(right_b, low)
while low != -1 and high != -1:
content = text[low:high + 1]
text = text.replace(content, '')
low = text.find(left_b)
high = text.find(right_b, low)
return text
def remove_special_symbol(text):
for symbol in symbols:
text = text.replace(symbol, '')
text = text.replace(symbol[:-1], '')
return text
def remove_emoji(text):
return emoji.get_emoji_regexp().sub(r'', text)
def main(args):
f = open(args.output_file, 'w')
freq_dict = dict()
token_sum = 0
sample_num = 0
for line in open(args.input_file):
items = line.strip().split('\t')
if len(items) != 2:
continue
image = items[0]
caption = items[1].replace(' ', '')
# Remove content inside the bracket pairs
caption = remove_bracket_content(caption, bracket_pairs)
# Remove special symbol
caption = remove_special_symbol(caption)
# Remove emoji
caption = remove_emoji(caption)
# Tokenize caption
node = mecab.parseToNode(caption)
tokens, postag_flag = has_target_postag(node)
# Filter the caption with specific topics or tags
if caption.find('【') != -1 and caption.find('】') != -1:
# print(f'{line.strip()}')
continue
if len(tokens) < 5 or len(tokens) > 20:
continue
if has_en_word(tokens):
# print(f'{line.strip()}')
continue
if postag_flag:
token_sum += len(tokens)
sample_num += 1
if len(tokens) not in freq_dict:
freq_dict[len(tokens)] = 1
else:
freq_dict[len(tokens)] += 1
new_line = image + '\t' + ' '.join(tokens)
f.write(new_line + '\n')
# print(f'{new_line}')
f.close()
average_len = token_sum * 1.0 / sample_num
print(f'Average token length -> {average_len}')
# Plot the frequency curve
ordered = sorted(freq_dict.items(), key=lambda tup: tup[0])
x = np.array([t[0] for t in ordered])
y = np.array([t[1] for t in ordered])
plt.switch_backend('agg')
plt.figure()
plt.plot(x, y)
plt.grid(True, linestyle=':')
plt.savefig('./freq-figure.jpg')
if __name__ == '__main__':
parser = argparse.ArgumentParser('Clean Train Data')
parser.add_argument('-i', '--input-file', type=str)
parser.add_argument('-o', '--output-file', type=str, default='./output.txt')
args = parser.parse_args()
main(args)
| {"hexsha": "dbe54f8d627f9c590bb3316f3fbe3c593d5c92db", "size": 3836, "ext": "py", "lang": "Python", "max_stars_repo_path": "image-comment-generation/data/clean.py", "max_stars_repo_name": "stonyhu/Image-Commenting", "max_stars_repo_head_hexsha": "eb925a3f99075d8b74c6cabd125f7b9a1f9786d2", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "image-comment-generation/data/clean.py", "max_issues_repo_name": "stonyhu/Image-Commenting", "max_issues_repo_head_hexsha": "eb925a3f99075d8b74c6cabd125f7b9a1f9786d2", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "image-comment-generation/data/clean.py", "max_forks_repo_name": "stonyhu/Image-Commenting", "max_forks_repo_head_hexsha": "eb925a3f99075d8b74c6cabd125f7b9a1f9786d2", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.0606060606, "max_line_length": 80, "alphanum_fraction": 0.5573514077, "include": true, "reason": "import numpy", "num_tokens": 967} |
from scipy import spatial
# Find the distance between each embedding
def get_pairwise_dist(embeddings):
return spatial.distance.squareform(spatial.distance.pdist(embeddings, metric="cosine"))
| {"hexsha": "cf15412fe9b44f24408a1a6ad77545e5ccb9c23f", "size": 197, "ext": "py", "lang": "Python", "max_stars_repo_path": "similarity.py", "max_stars_repo_name": "Peter-Devine/text_finder", "max_stars_repo_head_hexsha": "b09ae796511dc1d000b07c12996d25576566e012", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "similarity.py", "max_issues_repo_name": "Peter-Devine/text_finder", "max_issues_repo_head_hexsha": "b09ae796511dc1d000b07c12996d25576566e012", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "similarity.py", "max_forks_repo_name": "Peter-Devine/text_finder", "max_forks_repo_head_hexsha": "b09ae796511dc1d000b07c12996d25576566e012", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.8333333333, "max_line_length": 91, "alphanum_fraction": 0.8121827411, "include": true, "reason": "from scipy", "num_tokens": 40} |
import numpy as np
from core.buffer.replay_buffer import ReplayBuffer
def test_replay_buffer(mock_transition):
buffer_size = 10
memory = ReplayBuffer(buffer_size=buffer_size)
# test after init
assert memory.buffer_size == buffer_size
assert memory.buffer_index == 0
assert memory.size == 0
# test store
store_iteration = 15
for _ in range(store_iteration):
memory.store(mock_transition)
# test after store
assert memory.buffer_index == (store_iteration % buffer_size)
assert memory.size == min(buffer_size, store_iteration)
# test sample
batch_size = 8
sample_transitions = memory.sample(batch_size=batch_size)
assert isinstance(sample_transitions, dict)
for key, val in sample_transitions.items():
assert key in mock_transition[0].keys()
if isinstance(val, list):
for i, v in enumerate(val):
assert isinstance(v, np.ndarray)
assert v.shape == (batch_size, *mock_transition[0][key][i].shape[1:])
else:
assert isinstance(val, np.ndarray)
assert val.shape == (batch_size, *mock_transition[0][key].shape[1:])
| {"hexsha": "026d6ef7181b7626bb6c14f052acf2ba3a45ee56", "size": 1181, "ext": "py", "lang": "Python", "max_stars_repo_path": "jorldy/test/core/buffer/test_replay_buffer.py", "max_stars_repo_name": "zenoengine/JORLDY", "max_stars_repo_head_hexsha": "1eb867e52a03e0282a55fa612cbc5b5de701ffe7", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 300, "max_stars_repo_stars_event_min_datetime": "2021-11-03T07:06:34.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-24T02:23:56.000Z", "max_issues_repo_path": "jorldy/test/core/buffer/test_replay_buffer.py", "max_issues_repo_name": "zenoengine/JORLDY", "max_issues_repo_head_hexsha": "1eb867e52a03e0282a55fa612cbc5b5de701ffe7", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 37, "max_issues_repo_issues_event_min_datetime": "2021-11-04T04:31:07.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T01:40:49.000Z", "max_forks_repo_path": "jorldy/test/core/buffer/test_replay_buffer.py", "max_forks_repo_name": "zenoengine/JORLDY", "max_forks_repo_head_hexsha": "1eb867e52a03e0282a55fa612cbc5b5de701ffe7", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 45, "max_forks_repo_forks_event_min_datetime": "2021-11-03T08:05:56.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-24T08:35:05.000Z", "avg_line_length": 31.9189189189, "max_line_length": 85, "alphanum_fraction": 0.6706181202, "include": true, "reason": "import numpy", "num_tokens": 264} |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 3 01:30:26 2021
@author: alan
"""
import tensorflow as tf
import glob
import random
import tensorflow.keras.layers as layers
import numpy as np
from skimage.io import imread
import os
import matplotlib.pyplot as plt
import cv2
from datetime import datetime
from packaging import version
import datetime
from tensorboard.plugins.hparams import api as hp
import time
device_name = tf.test.gpu_device_name()
if not device_name:
raise SystemError('GPU device not found')
print('Found GPU at: {}'.format(device_name))
tf.debugging.set_log_device_placement(True)
#Detecting GPU
print("Num GPUs Available: ", len(tf.config.list_physical_devices('GPU')))
def unet():
inputs = tf.keras.Input((112, 112, 3))
# Entry block
x = layers.Conv2D(32, 3, strides=2, padding="same")(inputs)
x = layers.BatchNormalization()(x)
x = layers.Activation("relu")(x)
previous_block_activation = x # Set aside residual
# Blocks 1, 2, 3 are identical apart from the feature depth.
for filters in [64, 128, 256]:
x = layers.Activation("relu")(x)
x = layers.SeparableConv2D(filters, 3, padding="same")(x)
x = layers.BatchNormalization()(x)
x = layers.Activation("relu")(x)
x = layers.SeparableConv2D(filters, 3, padding="same")(x)
x = layers.BatchNormalization()(x)
x = layers.MaxPooling2D(3, strides=2, padding="same")(x)
# Project residual
residual = layers.Conv2D(filters, 1, strides=2, padding="same")(
previous_block_activation
)
x = layers.add([x, residual]) # Add back residual
previous_block_activation = x # Set aside next residual
### [Second half of the network: upsampling inputs] ###
for filters in [256, 128, 64, 32]:
x = layers.Activation("relu")(x)
x = layers.Conv2DTranspose(filters, 3, padding="same")(x)
x = layers.BatchNormalization()(x)
x = layers.Activation("relu")(x)
x = layers.Conv2DTranspose(filters, 3, padding="same")(x)
x = layers.BatchNormalization()(x)
x = layers.UpSampling2D(2)(x)
# Project residual
residual = layers.UpSampling2D(2)(previous_block_activation)
residual = layers.Conv2D(filters, 1, padding="same")(residual)
x = layers.add([x, residual]) # Add back residual
previous_block_activation = x # Set aside next residual
# Add a per-pixel classification layer
outputs = layers.Conv2D(1, (1, 1), activation='sigmoid') (x)
# Define the model
model = tf.keras.Model(inputs, outputs)
return model
class data(tf.keras.utils.Sequence):
"""Helper to iterate over the data (as Numpy arrays)."""
def __init__(self, batch_size, img_size, input_img_paths, target_img_paths):
self.batch_size = batch_size
self.img_size = img_size
self.input_img_paths = input_img_paths
self.target_img_paths = target_img_paths
def __len__(self):
return len(self.target_img_paths) // self.batch_size
def __getitem__(self, idx):
"""Returns tuple (input, target) correspond to batch #idx."""
i = idx * self.batch_size
batch_input_img_paths = self.input_img_paths[i : i + self.batch_size]
batch_target_img_paths = self.target_img_paths[i : i + self.batch_size]
x = np.zeros((self.batch_size,) + self.img_size + (3,), dtype="float32")
for j, path in enumerate(batch_input_img_paths):
x[j] = plt.imread(path)
y = np.zeros((self.batch_size,) + self.img_size , dtype="uint8")
for j, path in enumerate(batch_target_img_paths):
img = plt.imread(path)
y[j] = img
return x, y
#Impoting data
img_files = glob.glob('DATA/frames/*.png')
mask_files = [glob.glob('DATA/masks/' + os.path.basename(im))[0] for im in img_files]
N = len (img_files)
# Spliting data
ixRand = list(range(N))
random.shuffle(ixRand)
train_data = [img_files[e] for e in ixRand[:round(N*.8)]]
train_labels = [mask_files[e] for e in ixRand[:round(N*.8)]]
test_data = [img_files[e] for e in ixRand[round(N*.8):]]
test_labels = [mask_files[e] for e in ixRand[round(N*.8):]]
# torch needs that data comes from an instance with getitem and len methods (Map-style datasets)
training_dataset = data(32,(112,112), train_data, train_labels)
val_dataset = data(32,(112,112), test_data, test_labels)
model = unet()
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
'''
tf.keras.utils.plot_model(
model, to_file='model.png', show_shapes=False, show_dtype=False,
show_layer_names=True, rankdir='TB', expand_nested=False, dpi=96
)
'''
# Train the model, doing validation at the end of each epoch.
epochs = 20
start = time.time()
history = model.fit(training_dataset, epochs=epochs, validation_data=val_dataset)
end = time.time()
elapsed = end-start
#%%
# "Accuracy"
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('$Model_{Accuracy}$')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='lower right')
plt.show()
# "Loss"
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('$Model_{Loss}$')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper right')
plt.show()
#%% Display some results
pred = model.predict(val_dataset)
savePred = [cv2.imwrite('DATA/pred/' + os.path.basename(test_data[i]), np.squeeze(np.array(pred[i]>.5, dtype='uint8'),-1) *255) for i in range (len(pred))]
plt.figure()
plt.subplot(121)
plt.imshow(np.squeeze(pred[6,:,:,:],-1) + cv2.cvtColor(plt.imread(test_data[6]), cv2.COLOR_BGR2GRAY))
plt.title('Prediction')
plt.subplot(122)
plt.imshow( cv2.cvtColor(plt.imread(test_data[6]), cv2.COLOR_BGR2GRAY) + plt.imread(test_labels[6]))
plt.title('Ground trouth')
#%% Get metrics for evaluation of segmentation
'''
import seg_metrics.seg_metrics as sg
import csv
csv_file = 'metrics.csv'
pred_path = glob.glob('DATA/pred/*.png')
gdth_path = [glob.glob('DATA/masks/' + os.path.basename(im))[0] for im in pred_path]
metrics = [sg.write_metrics(labels = [255], gdth_path=gdth_path[i], pred_path=pred_path[i], csv_file=csv_file) for i in range(len(pred))]
keys = list(metrics[0].keys())
keys.remove('filename')
means = [ sum(d[k][0] for d in metrics) / len(metrics) for k in keys]
metrics_mean = dict (zip(keys,means))
with open('metrics_mean.csv', 'w') as f: # You will need 'wb' mode in Python 2.x
w = csv.DictWriter(f, metrics_mean.keys())
w.writeheader()
w.writerow(metrics_mean)
''' | {"hexsha": "0cec8392a4c8f81e84914c431af17f474b4de088", "size": 6639, "ext": "py", "lang": "Python", "max_stars_repo_path": "segmentacionCNN.py", "max_stars_repo_name": "alandgabriel/LV-Segmentation-with-U-Net", "max_stars_repo_head_hexsha": "7cfad5791e91321a1d4afb73559dbeeeeaee9347", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "segmentacionCNN.py", "max_issues_repo_name": "alandgabriel/LV-Segmentation-with-U-Net", "max_issues_repo_head_hexsha": "7cfad5791e91321a1d4afb73559dbeeeeaee9347", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "segmentacionCNN.py", "max_forks_repo_name": "alandgabriel/LV-Segmentation-with-U-Net", "max_forks_repo_head_hexsha": "7cfad5791e91321a1d4afb73559dbeeeeaee9347", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.3150684932, "max_line_length": 157, "alphanum_fraction": 0.6826329266, "include": true, "reason": "import numpy", "num_tokens": 1743} |
# Copyright (c) 2017- Salas Lin (leVirve)
#
# This software is released under the MIT License.
# https://opensource.org/licenses/MIT
import numpy as np
from scipy.optimize import linear_sum_assignment
np.seterr(divide='ignore', invalid='ignore')
def confusion_table(preds, labels, num_class: int):
''' Calculate the confusion matrix
*credit: refer from [chainer/chainercv] eval_semantic_segmentation.py
Args:
preds: tensor, ndarray
labels: tensor, ndarray
'''
confusion = np.zeros(num_class * num_class, dtype=np.int64)
def flatten(x):
if isinstance(x, np.ndarray):
return x.flatten()
return x.view(-1)
def numpy(x):
if isinstance(x, np.ndarray):
return x
return x.cpu().numpy()
for pred, label in zip(preds, labels):
pred, label = flatten(pred), flatten(label)
mask = label < 255
hist = num_class * label[mask] + pred[mask]
confusion += np.bincount(numpy(hist), minlength=num_class ** 2)
return confusion.reshape((num_class, num_class))
def intersection_over_union(confusion: np.ndarray):
iou_denominator = (confusion.sum(axis=1) + confusion.sum(axis=0) - np.diag(confusion))
return np.diag(confusion) / (iou_denominator)
def max_bipartite_matching_score(predictions: np.ndarray, targets: np.ndarray):
def to_numpy(x):
import torch
if torch.is_tensor(x):
return x.cpu().numpy()
return x
def _one_sample(prediction, target):
''' calculate the maximum bipartite matching between two labels
prediction: 2-D numpy array
target: 2-D numpy array
'''
pred_labels = np.unique(prediction)
gt_labels = np.unique(target)
cost = np.zeros((len(pred_labels), len(gt_labels)))
for i, p in enumerate(pred_labels):
p_mask = prediction == p
cost[i] = [-np.sum(p_mask & (target == g)) for g in gt_labels]
row_ind, col_ind = linear_sum_assignment(cost)
score = -cost[row_ind, col_ind].sum()
return score / target.size
predictions = np.squeeze(to_numpy(predictions))
targets = np.squeeze(to_numpy(targets))
if len(predictions.shape) == len(targets.shape) and len(predictions.shape) == 3:
scores = [_one_sample(p, t) for p, t in zip(predictions, targets)]
return np.mean(scores)
return _one_sample(predictions, targets)
class Metric():
def __init__(self, num_class, only_scalar=False, prefix='acc/'):
self.num_class = num_class
self.only_scalar = only_scalar
self.prefix = prefix
def __call__(self, output, target):
'''
output: Variable
target: Variable
'''
confusion = confusion_table(output, target, num_class=self.num_class)
iou = intersection_over_union(confusion)
pixel_accuracy = np.diag(confusion).sum() / confusion.sum()
class_accuracy = np.diag(confusion) / np.sum(confusion, axis=1)
if self.only_scalar:
return {f'{self.prefix}miou': np.nanmean(iou),
f'{self.prefix}pixel': pixel_accuracy,
f'{self.prefix}mean_class': np.nanmean(class_accuracy)}
else:
return {'iou': iou, 'miou': np.nanmean(iou),
'pixel_accuracy': pixel_accuracy,
'class_accuracy': class_accuracy,
'mean_class_accuracy': np.nanmean(class_accuracy)}
| {"hexsha": "b6434705a9c84e6382a3b4cbf62adb6db847cd45", "size": 3520, "ext": "py", "lang": "Python", "max_stars_repo_path": "onegan/metrics/semantic_segmentation.py", "max_stars_repo_name": "leVirve/OneGAN", "max_stars_repo_head_hexsha": "e0d5f387c957fbf599919078d8c6277740015336", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2018-01-26T08:58:10.000Z", "max_stars_repo_stars_event_max_datetime": "2018-05-03T20:44:06.000Z", "max_issues_repo_path": "onegan/metrics/semantic_segmentation.py", "max_issues_repo_name": "leVirve/OneGAN", "max_issues_repo_head_hexsha": "e0d5f387c957fbf599919078d8c6277740015336", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2018-08-13T03:02:13.000Z", "max_issues_repo_issues_event_max_datetime": "2020-10-20T04:15:13.000Z", "max_forks_repo_path": "onegan/metrics/semantic_segmentation.py", "max_forks_repo_name": "leVirve/OneGAN", "max_forks_repo_head_hexsha": "e0d5f387c957fbf599919078d8c6277740015336", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2019-11-21T07:44:29.000Z", "max_forks_repo_forks_event_max_datetime": "2020-09-08T14:15:06.000Z", "avg_line_length": 32.2935779817, "max_line_length": 90, "alphanum_fraction": 0.6261363636, "include": true, "reason": "import numpy,from scipy", "num_tokens": 795} |
# Pre-image for Gaussian kernel
# From Kwok and Tsang, "The Pre-Image problem in kernel methods", ICML 2003
# (based on matlab code provided by authors)
# Also:
# Mika, et al. "Kernel PCA and Denoising in Feature Spaces", NIPS 1998
# and
# Teixeira et al. "KPCA Denoising and the pre-image problem revisited", DSP 2008
#
# Danny Perry (dperry@cs.utah.edu)
# May 2015
using Debug
include("Kernels.jl")
# Fixed point method to preimage
# y - projected data (trying to recover into input space)
# U - eigenvectors of K
# X - training data
# z_init - initial guess at z
# sigma - Gaussian kernel parameter
# tolerance - convergence criteria
# maxiters - max number of iters
function GaussianKernelPreImage(y,U, X, z_init, sigma, tolerance, maxiters)
gamma = U * y # results in n x 1 vector
z = copy(z_init)
last_z = copy(z)
iter = 0
for iter=1:maxiters
last_z = copy(z)
diff = X - ones(size(X,1),1)*z
diff .^= 2
kz = gamma .* exp( -sum(diff,2) ./ sigma^2)
if sum(kz) == 0
println("Preimage fixed point iteration failed: initial guess too far away - reverting to initial guess.")
return z_init # can't get any closer, returning initial guess.
end
z = sum(X .* (kz*ones(1,size(X,2))),1)/sum(kz)
if norm(last_z-z)/sqrt(norm(z)*norm(last_z)) < tolerance
break
end
end
if false
println("iters: ", iter)
println("err: ", norm(last_z-z)/sqrt(norm(z)*norm(last_z)) , " < ", tolerance)
end
if iter == maxiters
println("warning, did not converge.")
end
return z, iter
end
# Linear algebra approach to preimage
function GaussianKernelPreImage(distsq, X, neibsize)
sidx = sortperm(distsq)
sdistsq = distsq[sidx]
XH = X[sidx[1:neibsize],:]
Xmean = mean(XH,1)
XH = XH - ones(neibsize,1) * Xmean # centered around neighborhood mean
UM,SM,VM = svd(XH')
rankM = rank(diagm(SM),1e-5)
UM = UM[:,1:rankM]
SM = SM[1:rankM]
VM = VM[:,1:rankM]
transf = UM*diagm(1./SM)*(VM'./2)
sd0 = zeros(neibsize,1)
ZM = diagm(SM)*VM'
for i=1:neibsize
sd0[i] = (ZM[:,i]'*ZM[:,i])[1]
end
result = transf * (vec(sd0) - vec(sdistsq[1:neibsize])) + vec(Xmean)
end
# X - training data (n x d)
# K - uncentered training data Gram (n x n)
# Ktest - uncentered test data Gram with training (n x nt)
function GaussianKernelPreImage(X,K,Ktest, neibsize, sigma)
n = size(K,1)
nt = size(Ktest,2)
d = size(X,2)
spectrum_pct = 0
target_dim = n
centering = "additive"
Kc,P,V,S,Y = KernelPCA(K, spectrum_pct, target_dim, centering)
H = eye(n)-ones(n,n)/n # centering matrix
HMH = H*P*P'*H
cK = mean(K,2)
meanK = mean(cK)
result = zeros(nt,d)
neibs = zeros(Int64, nt, neibsize)
for j=1:nt
# calculate the distance between the testing point and training points
k_x = Ktest[:,j]
gammaC = HMH*(k_x-cK);
PphiNormC = ((k_x+cK)'*gammaC + meanK)[1]
d2 = zeros(n);
for i = 1:n
PphiProjC = (K[i,:]*gammaC)[1]+cK[i]
d2[i] = -log(abs((1-PphiNormC+2*PphiProjC)/2))*(sigma*2)
end
result[j,:] = GaussianKernelPreImage(d2,X,neibsize);
closestind = sortperm(d2)
neibs[j,:] = closestind[1:neibsize]
end
return result,neibs
end
function GaussianKernelDenoise(X,Xtest, iters, neibsize, sigma)
n = size(X,1)
nt = size(Xtest,1)
d = size(X,2)
K = GaussianKernel(X,X,sigma)
spectrum_pct = 0
target_dim = n
centering = "additive"
Kc,P,V,S,Y = KernelPCA(K, spectrum_pct, target_dim, centering)
H = eye(n)-ones(n,n)/n # centering matrix
HMH = H*P*P'*H
cK = mean(K,2)
meanK = mean(cK)
result = copy(Xtest)
neibs = zeros(Int64, nt, neibsize)
for iter=1:iters
for j=1:nt
# calculate the distance between the testing point and training points
k_x = GaussianKernel(X,Xtest[j,:],sigma)
gammaC = HMH*(k_x-cK);
PphiNormC = ((k_x+cK)'*gammaC + meanK)[1]
d2 = zeros(n);
for i = 1:n
PphiProjC = (vec(K[i,:])'*vec(gammaC)+cK[i])[1]
d2[i] = -log((1-PphiNormC+2*PphiProjC)/2)*sigma
end
result[j,:] = GaussianKernelPreImage(d2,X,neibsize);
closestind = sortperm(d2)
neibs[j,:] = closestind[1:neibsize]
end
end
return result,neibs
end
| {"hexsha": "91a4b0374c0e56afcd4ed27c084140bb0e33d2cb", "size": 4055, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/PreImage.jl", "max_stars_repo_name": "daniel-perry/Kernel.jl", "max_stars_repo_head_hexsha": "da7255ffe7b9e1341d4f2decc82128451dc3d383", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/PreImage.jl", "max_issues_repo_name": "daniel-perry/Kernel.jl", "max_issues_repo_head_hexsha": "da7255ffe7b9e1341d4f2decc82128451dc3d383", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2016-07-12T17:40:45.000Z", "max_issues_repo_issues_event_max_datetime": "2016-07-12T17:40:45.000Z", "max_forks_repo_path": "src/PreImage.jl", "max_forks_repo_name": "daniel-perry/Kernel.jl", "max_forks_repo_head_hexsha": "da7255ffe7b9e1341d4f2decc82128451dc3d383", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.8773006135, "max_line_length": 109, "alphanum_fraction": 0.6577065351, "num_tokens": 1433} |
#!/usr/bin/env python
import numpy as np
def get_input(prompt, default):
return input(prompt) or str(default)
N = int(get_input('Number of NUWS dimensions [1]: ', 1))
cos_power = int(get_input('Power of window function, n (cos^n) [2]: ',2))
Nmax = int(get_input('Maximum number of repeats [16]: ', 16))
print('Please enter time domain sizes as REAL points')
if N==1:
td1 = int(get_input('td (nominal) [64]: ', 64)) // 2
td2 = 1
else:
td1 = int(get_input('td (outer loop, nominal) [64]: ', 64)) // 2
td2 = int(get_input('td (inner loop, nominal) [16]: ', 16)) // 2
# calculate effective td sizes
td1eff=td1
td2eff=td2
for i in range(td1):
if int(round(Nmax * np.cos(i/td1*np.pi/2)**cos_power))==0:
td1eff = i
break
if N>1:
for i in range(td2):
if int(round(Nmax * np.cos(i/td2*np.pi/2)**cos_power))==0:
td2eff = i
break
print()
total_cycles = 0
# now calculate vc lists
if N==1: # 2D
for i in range(td1eff):
w1 = np.cos(i/td1*np.pi/2)**cos_power
c = int(round(Nmax*w1))
print(c)
print(c) # second copy for complex points
total_cycles += 2*c
else: # 3D
for i in range(td1eff): # outer loop
w1 = np.cos(i/td1*np.pi/2)**cos_power
for i2 in range(2): # complex pts
for j in range(td2eff): # inner loop
w2 = np.cos(j/td2*np.pi/2)**cos_power
c = int(round(Nmax * w1 * w2))
print(c)
print(c) # second copy for complex points
total_cycles += 2*c
print()
print('Effective td1 = ' + str(2*td1eff))
if N>1:
print('Effective td2 = ' + str(2*td2eff))
print('Total cycles = ' + str(total_cycles))
| {"hexsha": "2669a329c656d228dc07c5c54eb98fa9b7e60c61", "size": 1752, "ext": "py", "lang": "Python", "max_stars_repo_path": "util/make-nuws.py", "max_stars_repo_name": "chriswaudby/pp", "max_stars_repo_head_hexsha": "a1da83b5cba5ebb5e42b846478dc4bce8bace875", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "util/make-nuws.py", "max_issues_repo_name": "chriswaudby/pp", "max_issues_repo_head_hexsha": "a1da83b5cba5ebb5e42b846478dc4bce8bace875", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "util/make-nuws.py", "max_forks_repo_name": "chriswaudby/pp", "max_forks_repo_head_hexsha": "a1da83b5cba5ebb5e42b846478dc4bce8bace875", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2017-09-28T13:50:14.000Z", "max_forks_repo_forks_event_max_datetime": "2017-09-28T13:50:14.000Z", "avg_line_length": 28.2580645161, "max_line_length": 73, "alphanum_fraction": 0.5667808219, "include": true, "reason": "import numpy", "num_tokens": 548} |
End of preview. Expand
in Dataset Viewer.
proof-pile-2のalgebraic-stackからランダムに所得したデータセット
https://huggingface.co/datasets/EleutherAI/proof-pile-2
License see EleutherAI/proof-pile-2
- Downloads last month
- 36