max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
---|---|---|---|---|---|---|---|---|---|---|
src/models/encoder.py | guowenying111/SEKE | 0 | 200 | import math
import torch
import torch.nn as nn
from models.neural import MultiHeadedAttention, PositionwiseFeedForward
from models.rnn import LayerNormLSTM
class Classifier(nn.Module):
def __init__(self, hidden_size):
super(Classifier, self).__init__()
self.linear1 = nn.Linear(hidden_size, 1)
self.sigmoid = nn.Sigmoid()
def forward(self, x, mask_cls):
h = self.linear1(x).squeeze(-1)
sent_scores = self.sigmoid(h) * mask_cls.float()
return sent_scores
class PositionalEncoding(nn.Module):
def __init__(self, dropout, dim, max_len=5000):
pe = torch.zeros(max_len, dim)
position = torch.arange(0, max_len).unsqueeze(1)
div_term = torch.exp((torch.arange(0, dim, 2, dtype=torch.float) *
-(math.log(10000.0) / dim)))
pe[:, 0::2] = torch.sin(position.float() * div_term)
pe[:, 1::2] = torch.cos(position.float() * div_term)
pe = pe.unsqueeze(0)
super(PositionalEncoding, self).__init__()
self.register_buffer('pe', pe)
self.dropout = nn.Dropout(p=dropout)
self.dim = dim
def forward(self, emb, step=None):
emb = emb * math.sqrt(self.dim)
if (step):
emb = emb + self.pe[:, step][:, None, :]
else:
emb = emb + self.pe[:, :emb.size(1)]
emb = self.dropout(emb)
return emb
def get_emb(self, emb):
return self.pe[:, :emb.size(1)]
class TransformerEncoderLayer(nn.Module):
def __init__(self, d_model, heads, d_ff, dropout):
super(TransformerEncoderLayer, self).__init__()
self.self_attn = MultiHeadedAttention(
heads, d_model, dropout=dropout)
self.feed_forward = PositionwiseFeedForward(d_model, d_ff, dropout)
self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)
self.dropout = nn.Dropout(dropout)
def forward(self, iter, query, inputs, mask):
if (iter != 0):
input_norm = self.layer_norm(inputs)
else:
input_norm = inputs
mask = mask.unsqueeze(1)
context = self.self_attn(input_norm, input_norm, input_norm,
mask=mask)
out = self.dropout(context) + inputs
return self.feed_forward(out)
class TransformerInterEncoder(nn.Module):
def __init__(self, d_model, d_ff, heads, dropout, num_inter_layers=0):
super(TransformerInterEncoder, self).__init__()
self.d_model = d_model
self.num_inter_layers = num_inter_layers
self.pos_emb = PositionalEncoding(dropout, d_model)
self.transformer_inter = nn.ModuleList(
[TransformerEncoderLayer(d_model, heads, d_ff, dropout)
for _ in range(num_inter_layers)])
self.dropout = nn.Dropout(dropout)
self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)
self.wo = nn.Linear(d_model, 1, bias=True)
self.sigmoid = nn.Sigmoid()
def forward(self, top_vecs, mask):
""" See :obj:`EncoderBase.forward()`"""
batch_size, n_sents = top_vecs.size(0), top_vecs.size(1)
pos_emb = self.pos_emb.pe[:, :n_sents]
x = top_vecs * mask[:, :, None].float()
x = x + pos_emb
for i in range(self.num_inter_layers):
x = self.transformer_inter[i](i, x, x, ~mask) # all_sents * max_tokens * dim
x = self.layer_norm(x)
sent_scores = self.sigmoid(self.wo(x))
sent_scores = sent_scores.squeeze(-1) * mask.float()
return sent_scores
class GRUEncoder_attn(nn.Module):
def __init__(self,bidirectional, num_layers, input_size, hidden_size,dropout=0.0):
super(GRUEncoder_attn,self).__init__()
class RNNEncoder_attn(nn.Module):
def __init__(self, bidirectional, num_layers, input_size,
hidden_size, dropout=0.0):
super(RNNEncoder_attn, self).__init__()
num_directions = 2 if bidirectional else 1
assert hidden_size % num_directions == 0
hidden_size = hidden_size // num_directions
self.relu = nn.ReLU()
self.rnn = LayerNormLSTM(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bidirectional=bidirectional)
self.wo = nn.Linear(num_directions * hidden_size, 1, bias=True)
self.dropout = nn.Dropout(dropout)
self.softmax = nn.Softmax()
print('this is dropout',dropout)
def forward(self, x, mask):
"""See :func:`EncoderBase.forward()`"""
batch, layer, seq, hidden = x.size()
x1=x.contiguous().view(batch * layer, -1, hidden)
x1 = torch.transpose(x1, 1, 0)
memory_bank, _ = self.rnn(x1)
memory_bank = self.dropout(memory_bank) + x1
memory_bank = torch.transpose(memory_bank, 1, 0)
# sent_scores = self.softmax(self.relu(self.wo(memory_bank)).squeeze(dim=-1)).unsqueeze(-1)
sent_scores = self.softmax(self.relu(self.wo(memory_bank[:,-1,:])).squeeze(dim=-1).view(-1,layer)).unsqueeze(-1)
x=x.transpose(1,2)
sent_vec = torch.matmul(sent_scores.transpose(1,2).unsqueeze(dim = 1).expand(batch,seq,1,layer),x)
return sent_vec.squeeze(dim = 2)
class TransformerDecoderLayer(nn.Module):
def __init__(self, d_model, heads, d_ff, dropout):
super(TransformerDecoderLayer, self).__init__()
self.self_attn = MultiHeadedAttention(
heads, d_model, dropout=dropout)
self.feed_forward = PositionwiseFeedForward(d_model, d_ff, dropout)
self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)
def forward(self, iter, ent_enc, inputs, self_attn_mask=None,context_attn_mask=None):
context = self.self_attn(inputs, inputs, inputs,
mask=self_attn_mask)
dec_output = self.self_attn(
ent_enc, ent_enc, context, mask=context_attn_mask)
dec_output = self.feed_forward(dec_output)
return dec_output
class TransformerInterDecoder(nn.Module):
def __init__(self, d_model, d_ff, heads, dropout, d_hidden, num_inter_layers=0):
super(TransformerInterDecoder, self).__init__()
self.d_model = d_model
self.num_inter_layers = num_inter_layers
self.pos_emb = PositionalEncoding(dropout, d_model)
self.transformer_inter = nn.ModuleList(
[TransformerDecoderLayer(d_model, heads, d_ff, dropout)
for _ in range(num_inter_layers)])
self.dropout = nn.Dropout(dropout)
self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)
self.wo = nn.Linear(d_model, d_hidden , bias=True)
self.wi = nn.Linear(d_model, d_hidden, bias=True)
self.v = nn.Linear(d_hidden, 1, bias=True)
self.LR = nn.LeakyReLU()
self.softmax = nn.Softmax(dim=-1)
def forward(self, top_vecs, inputs, mask, label_mask=None):
""" See :obj:`EncoderBase.forward()`"""
n_out = inputs.size(1)
pos_emb = self.pos_emb.pe[:, :n_out]
seq_mask=subsequent_mask(inputs)
self_attn_mask = torch.gt((~label_mask.unsqueeze(1).expand(-1, n_out, -1) + seq_mask), 0)
inputs=inputs+pos_emb
for i in range(self.num_inter_layers):
inputs = self.transformer_inter[i](i, top_vecs, inputs,self_attn_mask,~ mask.unsqueeze(1).expand(-1, n_out,-1))
scores=self.v(self.LR(
self.wo(inputs.unsqueeze(2)).expand(-1, -1, top_vecs.size(1), -1) + self.wi(top_vecs).unsqueeze(
1))).squeeze(-1)
sent_scores = self.softmax(scores)
return sent_scores
class RNNEncoder(nn.Module):
def __init__(self, bidirectional, num_layers, input_size,
hidden_size, dropout=0.0):
super(RNNEncoder, self).__init__()
num_directions = 2 if bidirectional else 1
assert hidden_size % num_directions == 0
hidden_size = hidden_size // num_directions
self.rnn = LayerNormLSTM(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bidirectional=bidirectional)
self.wo = nn.Linear(num_directions * hidden_size, 1, bias=True)
self.dropout = nn.Dropout(dropout)
self.sigmoid = nn.Sigmoid()
def forward(self, x, mask):
"""See :func:`EncoderBase.forward()`"""
x = torch.transpose(x, 1, 0)
memory_bank, _ = self.rnn(x)
memory_bank = self.dropout(memory_bank) + x
memory_bank = torch.transpose(memory_bank, 1, 0)
sent_scores = self.sigmoid(self.wo(memory_bank))
sent_scores = sent_scores.squeeze(-1) * mask.float()
return sent_scores
class GCN(nn.Module):
def __init__(self,in_channel,out_channel,hidden_dim,drop):
super(GCN, self).__init__()
self.in_channel=in_channel
self.out_channel=out_channel
self.hidden_dim=hidden_dim
self.dropout = nn.Dropout(p=drop)
self.gcn_x_11=GCNConv(self.in_channel,self.hidden_dim)
self.gcn_x_12=GCNConv(self.hidden_dim,self.out_channel)#No.1-*2*2
# self.gcn_x_21=GCNConv(self.in_channel,self.hidden_dim)
# self.gcn_x_22=GCNConv(self.hidden_dim,self.out_channel)#No.2-*2
# self.gcn_mix=GCNConv(self.hidden_dim*2,self.hidden_dim)#No.2-*2
self.relu=nn.ReLU(inplace=True)
def forward(self, x_1, edge_index_1, edge_index_2=None,edge_weight_1=None,edge_weight_2=None):
syn=self.gcn_x_11(x_1, edge_index_1, edge_weight_1)
syn=self.relu(syn)
syn=self.dropout(syn)
syn = self.gcn_x_12(syn, edge_index_1, edge_weight_1)
syn = self.relu(syn)
syn = self.dropout(syn)
# x2 = self.gcn_x_21(x_1, edge_index_2, edge_weight_2)
# x2 = self.relu(x2)
# x2 = self.dropout(x2)
# mix = self.gcn_mix(torch.cat((syn,x2),-1), edge_index_2, edge_weight_2)
# x2 = self.gcn_x_22(mix, edge_index_2, edge_weight_2)
# syn=self.gcn_x_12(mix, edge_index_1, edge_weight_1)
# syn=self.relu(syn)
# syn=self.dropout(syn)
# x2 = self.relu(x2)
# x2 = self.dropout(x2)
return syn
| import math
import torch
import torch.nn as nn
from models.neural import MultiHeadedAttention, PositionwiseFeedForward
from models.rnn import LayerNormLSTM
class Classifier(nn.Module):
def __init__(self, hidden_size):
super(Classifier, self).__init__()
self.linear1 = nn.Linear(hidden_size, 1)
self.sigmoid = nn.Sigmoid()
def forward(self, x, mask_cls):
h = self.linear1(x).squeeze(-1)
sent_scores = self.sigmoid(h) * mask_cls.float()
return sent_scores
class PositionalEncoding(nn.Module):
def __init__(self, dropout, dim, max_len=5000):
pe = torch.zeros(max_len, dim)
position = torch.arange(0, max_len).unsqueeze(1)
div_term = torch.exp((torch.arange(0, dim, 2, dtype=torch.float) *
-(math.log(10000.0) / dim)))
pe[:, 0::2] = torch.sin(position.float() * div_term)
pe[:, 1::2] = torch.cos(position.float() * div_term)
pe = pe.unsqueeze(0)
super(PositionalEncoding, self).__init__()
self.register_buffer('pe', pe)
self.dropout = nn.Dropout(p=dropout)
self.dim = dim
def forward(self, emb, step=None):
emb = emb * math.sqrt(self.dim)
if (step):
emb = emb + self.pe[:, step][:, None, :]
else:
emb = emb + self.pe[:, :emb.size(1)]
emb = self.dropout(emb)
return emb
def get_emb(self, emb):
return self.pe[:, :emb.size(1)]
class TransformerEncoderLayer(nn.Module):
def __init__(self, d_model, heads, d_ff, dropout):
super(TransformerEncoderLayer, self).__init__()
self.self_attn = MultiHeadedAttention(
heads, d_model, dropout=dropout)
self.feed_forward = PositionwiseFeedForward(d_model, d_ff, dropout)
self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)
self.dropout = nn.Dropout(dropout)
def forward(self, iter, query, inputs, mask):
if (iter != 0):
input_norm = self.layer_norm(inputs)
else:
input_norm = inputs
mask = mask.unsqueeze(1)
context = self.self_attn(input_norm, input_norm, input_norm,
mask=mask)
out = self.dropout(context) + inputs
return self.feed_forward(out)
class TransformerInterEncoder(nn.Module):
def __init__(self, d_model, d_ff, heads, dropout, num_inter_layers=0):
super(TransformerInterEncoder, self).__init__()
self.d_model = d_model
self.num_inter_layers = num_inter_layers
self.pos_emb = PositionalEncoding(dropout, d_model)
self.transformer_inter = nn.ModuleList(
[TransformerEncoderLayer(d_model, heads, d_ff, dropout)
for _ in range(num_inter_layers)])
self.dropout = nn.Dropout(dropout)
self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)
self.wo = nn.Linear(d_model, 1, bias=True)
self.sigmoid = nn.Sigmoid()
def forward(self, top_vecs, mask):
""" See :obj:`EncoderBase.forward()`"""
batch_size, n_sents = top_vecs.size(0), top_vecs.size(1)
pos_emb = self.pos_emb.pe[:, :n_sents]
x = top_vecs * mask[:, :, None].float()
x = x + pos_emb
for i in range(self.num_inter_layers):
x = self.transformer_inter[i](i, x, x, ~mask) # all_sents * max_tokens * dim
x = self.layer_norm(x)
sent_scores = self.sigmoid(self.wo(x))
sent_scores = sent_scores.squeeze(-1) * mask.float()
return sent_scores
class GRUEncoder_attn(nn.Module):
def __init__(self,bidirectional, num_layers, input_size, hidden_size,dropout=0.0):
super(GRUEncoder_attn,self).__init__()
class RNNEncoder_attn(nn.Module):
def __init__(self, bidirectional, num_layers, input_size,
hidden_size, dropout=0.0):
super(RNNEncoder_attn, self).__init__()
num_directions = 2 if bidirectional else 1
assert hidden_size % num_directions == 0
hidden_size = hidden_size // num_directions
self.relu = nn.ReLU()
self.rnn = LayerNormLSTM(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bidirectional=bidirectional)
self.wo = nn.Linear(num_directions * hidden_size, 1, bias=True)
self.dropout = nn.Dropout(dropout)
self.softmax = nn.Softmax()
print('this is dropout',dropout)
def forward(self, x, mask):
"""See :func:`EncoderBase.forward()`"""
batch, layer, seq, hidden = x.size()
x1=x.contiguous().view(batch * layer, -1, hidden)
x1 = torch.transpose(x1, 1, 0)
memory_bank, _ = self.rnn(x1)
memory_bank = self.dropout(memory_bank) + x1
memory_bank = torch.transpose(memory_bank, 1, 0)
# sent_scores = self.softmax(self.relu(self.wo(memory_bank)).squeeze(dim=-1)).unsqueeze(-1)
sent_scores = self.softmax(self.relu(self.wo(memory_bank[:,-1,:])).squeeze(dim=-1).view(-1,layer)).unsqueeze(-1)
x=x.transpose(1,2)
sent_vec = torch.matmul(sent_scores.transpose(1,2).unsqueeze(dim = 1).expand(batch,seq,1,layer),x)
return sent_vec.squeeze(dim = 2)
class TransformerDecoderLayer(nn.Module):
def __init__(self, d_model, heads, d_ff, dropout):
super(TransformerDecoderLayer, self).__init__()
self.self_attn = MultiHeadedAttention(
heads, d_model, dropout=dropout)
self.feed_forward = PositionwiseFeedForward(d_model, d_ff, dropout)
self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)
def forward(self, iter, ent_enc, inputs, self_attn_mask=None,context_attn_mask=None):
context = self.self_attn(inputs, inputs, inputs,
mask=self_attn_mask)
dec_output = self.self_attn(
ent_enc, ent_enc, context, mask=context_attn_mask)
dec_output = self.feed_forward(dec_output)
return dec_output
class TransformerInterDecoder(nn.Module):
def __init__(self, d_model, d_ff, heads, dropout, d_hidden, num_inter_layers=0):
super(TransformerInterDecoder, self).__init__()
self.d_model = d_model
self.num_inter_layers = num_inter_layers
self.pos_emb = PositionalEncoding(dropout, d_model)
self.transformer_inter = nn.ModuleList(
[TransformerDecoderLayer(d_model, heads, d_ff, dropout)
for _ in range(num_inter_layers)])
self.dropout = nn.Dropout(dropout)
self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)
self.wo = nn.Linear(d_model, d_hidden , bias=True)
self.wi = nn.Linear(d_model, d_hidden, bias=True)
self.v = nn.Linear(d_hidden, 1, bias=True)
self.LR = nn.LeakyReLU()
self.softmax = nn.Softmax(dim=-1)
def forward(self, top_vecs, inputs, mask, label_mask=None):
""" See :obj:`EncoderBase.forward()`"""
n_out = inputs.size(1)
pos_emb = self.pos_emb.pe[:, :n_out]
seq_mask=subsequent_mask(inputs)
self_attn_mask = torch.gt((~label_mask.unsqueeze(1).expand(-1, n_out, -1) + seq_mask), 0)
inputs=inputs+pos_emb
for i in range(self.num_inter_layers):
inputs = self.transformer_inter[i](i, top_vecs, inputs,self_attn_mask,~ mask.unsqueeze(1).expand(-1, n_out,-1))
scores=self.v(self.LR(
self.wo(inputs.unsqueeze(2)).expand(-1, -1, top_vecs.size(1), -1) + self.wi(top_vecs).unsqueeze(
1))).squeeze(-1)
sent_scores = self.softmax(scores)
return sent_scores
class RNNEncoder(nn.Module):
def __init__(self, bidirectional, num_layers, input_size,
hidden_size, dropout=0.0):
super(RNNEncoder, self).__init__()
num_directions = 2 if bidirectional else 1
assert hidden_size % num_directions == 0
hidden_size = hidden_size // num_directions
self.rnn = LayerNormLSTM(
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
bidirectional=bidirectional)
self.wo = nn.Linear(num_directions * hidden_size, 1, bias=True)
self.dropout = nn.Dropout(dropout)
self.sigmoid = nn.Sigmoid()
def forward(self, x, mask):
"""See :func:`EncoderBase.forward()`"""
x = torch.transpose(x, 1, 0)
memory_bank, _ = self.rnn(x)
memory_bank = self.dropout(memory_bank) + x
memory_bank = torch.transpose(memory_bank, 1, 0)
sent_scores = self.sigmoid(self.wo(memory_bank))
sent_scores = sent_scores.squeeze(-1) * mask.float()
return sent_scores
class GCN(nn.Module):
def __init__(self,in_channel,out_channel,hidden_dim,drop):
super(GCN, self).__init__()
self.in_channel=in_channel
self.out_channel=out_channel
self.hidden_dim=hidden_dim
self.dropout = nn.Dropout(p=drop)
self.gcn_x_11=GCNConv(self.in_channel,self.hidden_dim)
self.gcn_x_12=GCNConv(self.hidden_dim,self.out_channel)#No.1-*2*2
# self.gcn_x_21=GCNConv(self.in_channel,self.hidden_dim)
# self.gcn_x_22=GCNConv(self.hidden_dim,self.out_channel)#No.2-*2
# self.gcn_mix=GCNConv(self.hidden_dim*2,self.hidden_dim)#No.2-*2
self.relu=nn.ReLU(inplace=True)
def forward(self, x_1, edge_index_1, edge_index_2=None,edge_weight_1=None,edge_weight_2=None):
syn=self.gcn_x_11(x_1, edge_index_1, edge_weight_1)
syn=self.relu(syn)
syn=self.dropout(syn)
syn = self.gcn_x_12(syn, edge_index_1, edge_weight_1)
syn = self.relu(syn)
syn = self.dropout(syn)
# x2 = self.gcn_x_21(x_1, edge_index_2, edge_weight_2)
# x2 = self.relu(x2)
# x2 = self.dropout(x2)
# mix = self.gcn_mix(torch.cat((syn,x2),-1), edge_index_2, edge_weight_2)
# x2 = self.gcn_x_22(mix, edge_index_2, edge_weight_2)
# syn=self.gcn_x_12(mix, edge_index_1, edge_weight_1)
# syn=self.relu(syn)
# syn=self.dropout(syn)
# x2 = self.relu(x2)
# x2 = self.dropout(x2)
return syn
| en | 0.227167 | See :obj:`EncoderBase.forward()` # all_sents * max_tokens * dim See :func:`EncoderBase.forward()` # sent_scores = self.softmax(self.relu(self.wo(memory_bank)).squeeze(dim=-1)).unsqueeze(-1) See :obj:`EncoderBase.forward()` See :func:`EncoderBase.forward()` #No.1-*2*2 # self.gcn_x_21=GCNConv(self.in_channel,self.hidden_dim) # self.gcn_x_22=GCNConv(self.hidden_dim,self.out_channel)#No.2-*2 # self.gcn_mix=GCNConv(self.hidden_dim*2,self.hidden_dim)#No.2-*2 # x2 = self.gcn_x_21(x_1, edge_index_2, edge_weight_2) # x2 = self.relu(x2) # x2 = self.dropout(x2) # mix = self.gcn_mix(torch.cat((syn,x2),-1), edge_index_2, edge_weight_2) # x2 = self.gcn_x_22(mix, edge_index_2, edge_weight_2) # syn=self.gcn_x_12(mix, edge_index_1, edge_weight_1) # syn=self.relu(syn) # syn=self.dropout(syn) # x2 = self.relu(x2) # x2 = self.dropout(x2) | 2.681408 | 3 |
djangox/lib/python3.8/site-packages/allauth/socialaccount/providers/dropbox/views.py | DemarcusL/django_wiki_lab | 6,342 | 201 | import requests
from allauth.socialaccount.providers.oauth2.views import (
OAuth2Adapter,
OAuth2CallbackView,
OAuth2LoginView,
)
from .provider import DropboxOAuth2Provider
class DropboxOAuth2Adapter(OAuth2Adapter):
provider_id = DropboxOAuth2Provider.id
access_token_url = "https://api.dropbox.com/oauth2/token"
authorize_url = "https://www.dropbox.com/oauth2/authorize"
profile_url = "https://api.dropbox.com/2/users/get_current_account"
redirect_uri_protocol = "https"
def complete_login(self, request, app, token, **kwargs):
response = requests.post(
self.profile_url,
headers={"Authorization": "Bearer %s" % (token.token,)},
)
response.raise_for_status()
return self.get_provider().sociallogin_from_response(request, response.json())
oauth_login = OAuth2LoginView.adapter_view(DropboxOAuth2Adapter)
oauth_callback = OAuth2CallbackView.adapter_view(DropboxOAuth2Adapter)
| import requests
from allauth.socialaccount.providers.oauth2.views import (
OAuth2Adapter,
OAuth2CallbackView,
OAuth2LoginView,
)
from .provider import DropboxOAuth2Provider
class DropboxOAuth2Adapter(OAuth2Adapter):
provider_id = DropboxOAuth2Provider.id
access_token_url = "https://api.dropbox.com/oauth2/token"
authorize_url = "https://www.dropbox.com/oauth2/authorize"
profile_url = "https://api.dropbox.com/2/users/get_current_account"
redirect_uri_protocol = "https"
def complete_login(self, request, app, token, **kwargs):
response = requests.post(
self.profile_url,
headers={"Authorization": "Bearer %s" % (token.token,)},
)
response.raise_for_status()
return self.get_provider().sociallogin_from_response(request, response.json())
oauth_login = OAuth2LoginView.adapter_view(DropboxOAuth2Adapter)
oauth_callback = OAuth2CallbackView.adapter_view(DropboxOAuth2Adapter)
| none | 1 | 2.602192 | 3 |
|
source/conf.py | Tatsh/upkeep | 3 | 202 | # SPDX-License-Identifier: MIT
# pylint: disable=redefined-builtin,invalid-name
"""See https://www.sphinx-doc.org/en/master/usage/configuration.html"""
from typing import Sequence
import os
import sys
# region Path setup
sys.path.insert(0, os.path.abspath('..'))
# endregion
# region Project information
project = 'Upkeep'
copyright = '2020, <NAME>'
author = '<NAME>'
# The short X.Y version
version = '1.2.7'
# The full version, including alpha/beta/rc tags
release = f'v{version}'
# endregion
# region General configuration
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.napoleon']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns: Sequence[str] = []
master_doc = 'index'
# endregion
# region Options for HTML output
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# endregion
# region Extension configuration
# endregion
| # SPDX-License-Identifier: MIT
# pylint: disable=redefined-builtin,invalid-name
"""See https://www.sphinx-doc.org/en/master/usage/configuration.html"""
from typing import Sequence
import os
import sys
# region Path setup
sys.path.insert(0, os.path.abspath('..'))
# endregion
# region Project information
project = 'Upkeep'
copyright = '2020, <NAME>'
author = '<NAME>'
# The short X.Y version
version = '1.2.7'
# The full version, including alpha/beta/rc tags
release = f'v{version}'
# endregion
# region General configuration
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.napoleon']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns: Sequence[str] = []
master_doc = 'index'
# endregion
# region Options for HTML output
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# endregion
# region Extension configuration
# endregion
| en | 0.718077 | # SPDX-License-Identifier: MIT # pylint: disable=redefined-builtin,invalid-name See https://www.sphinx-doc.org/en/master/usage/configuration.html # region Path setup # endregion # region Project information # The short X.Y version # The full version, including alpha/beta/rc tags # endregion # region General configuration # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. # Add any paths that contain templates here, relative to this directory. # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. # endregion # region Options for HTML output # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". # endregion # region Extension configuration # endregion | 1.757556 | 2 |
generators/generate_pybind11_bindings.py | sweptlaser/pclpy | 0 | 203 | <filename>generators/generate_pybind11_bindings.py
import os
import platform
import shutil
import sys
from collections import Counter
from collections import defaultdict, OrderedDict
from os.path import join
from typing import List, Dict, Set
from CppHeaderParser import CppHeaderParser
from CppHeaderParser.CppHeaderParser import CppMethod
import generators.dependency_tree
from generators.config import common_includes, PCL_BASE, PATH_LOADER, PATH_MODULES, MODULES_TO_BUILD, \
HEADERS_TO_SKIP, ATTRIBUTES_TO_SKIP, CLASSES_TO_IGNORE, METHODS_TO_SKIP, SUBMODULES_TO_SKIP, EXPLICIT_INCLUDES, \
SPECIALIZED_TEMPLATED_TYPES_TO_SKIP
from generators.definitions.function import generate_function_definitions, get_methods_defined_outside
from generators.definitions.method import split_methods_by_type
from generators.definitions.submodule_loader import generate_loader
from generators.definitions.templated_class import ClassDefinition
from generators.instantiations import Instantiations
from generators.point_types_utils import unpack_yaml_point_types
from generators.utils import make_header_include_name, sort_headers_by_dependencies, \
generate_main_loader, make_namespace_class, read_header_file
def filter_methods_for_parser_errors(methods):
return [m for m in methods if not m["name"] in ("void", "bool")]
def filter_methods_to_skip(methods):
filtered_methods = []
for m in methods:
if (m["parent"]["name"], m["name"]) in METHODS_TO_SKIP:
continue
if "Callback" in m["name"]:
single_argument = len(m["parameters"]) == 1
boost_function = single_argument and m["parameters"][0]["type"].startswith("boost::function")
if not boost_function:
continue
filtered_methods.append(m)
return filtered_methods
def same_parameters(p1: Dict, p2: Dict) -> bool:
fields = ["constant", "name", "raw_type", "reference", "static"]
return all(p1[f] == p2[f] for f in fields)
def same_methods(m1: CppMethod, m2: CppMethod) -> bool:
if m1["name"] != m2["name"]:
return False
# bug in CppHeaderParser
# in "void ImageGrabber<PointT>::publish", "void ImageGrabber<PointT>::" is the return type
path = m1.get("path", m2.get("path"))
path = path[path.rfind(":") + 1:]
if not any(path in type_ for type_ in [m1["rtnType"], m2["rtnType"]]):
return False
# same parameters
for p1 in m1["parameters"]:
for p2 in m2["parameters"]:
if m1["name"] == m2["name"] and same_parameters(p1, p2):
break
else:
return False
return len(m1["parameters"]) == len(m2["parameters"])
def private_methods_defined_outside(private_methods: List[CppMethod],
methods_declared_outside: List[CppMethod]) -> List[CppMethod]:
private_defined_outside = []
for m_private in private_methods:
for m_outside in methods_declared_outside:
if same_methods(m_private, m_outside):
private_defined_outside.append(m_private)
break
return private_defined_outside
def generate_class_definitions(main_classes,
module,
header_name,
path,
needs_overloading: List[str],
methods_defined_outside: List[CppMethod]) -> str:
text = []
a = text.append
a(common_includes)
a(EXPLICIT_INCLUDES.get((module, header_name), ""))
a(make_header_include_name(module, header_name, path))
a("")
namespaces = set([c["namespace"] for c in main_classes])
for namespace in namespaces:
if not namespace == "pcl":
a("using namespace %s;" % namespace)
a("\n")
for class_ in main_classes:
methods = class_["methods"]["public"]
methods = filter_methods_for_parser_errors(methods)
methods = filter_methods_to_skip(methods)
private_and_protected = class_["methods"]["private"] + class_["methods"]["protected"]
methods += private_methods_defined_outside(private_and_protected, methods_defined_outside)
class_properties = [p for p in class_["properties"]["public"]
if not "using" in p["type"]
and not "union" in p["type"]]
union_properties = [p for nested_class in class_["nested_classes"]
for p in nested_class["properties"]["public"]
if "union" in nested_class["name"]]
class_properties += union_properties
class_properties = filter_class_properties(module, header_name, class_["name"], class_properties)
constructors, variables, others = split_methods_by_type(methods, class_properties,
needs_overloading)
if not class_["can_be_instantiated"]:
constructors = []
class_def = ClassDefinition(class_, constructors, variables, others, module)
a(class_def.to_class_function_definition())
a("")
return "\n".join(text)
def filter_class_properties(module, header, class_name, properties):
key = (module, header, class_name)
# ignore properties without a name
properties = [p for p in properties if p["name"]]
if key in ATTRIBUTES_TO_SKIP:
to_ignore = ATTRIBUTES_TO_SKIP[key]
filtered_properties = []
for p in properties:
if p["name"] in to_ignore:
continue
filtered_properties.append(p)
properties = filtered_properties
return properties
def get_main_classes(header, module, header_name):
# header = read_headers(base_path, header_name, module)
main_classes = [c for c in header.classes.values() if c["namespace"] in ("pcl", "pcl::" + module)]
filtered_main_classes = []
for class_ in main_classes:
specialized_template = class_.get("template") and "<" in class_["name"]
if specialized_template:
to_skip = any(("<%s>" % type_) in class_["name"] for type_ in SPECIALIZED_TEMPLATED_TYPES_TO_SKIP)
if not to_skip:
message = "Warning: Template class specialization not implemented for class %s in %s"
print(message % (class_["name"], header_name))
elif (module, header_name, class_["name"]) in CLASSES_TO_IGNORE:
pass
else:
filtered_main_classes.append(class_)
filtered_main_classes = sorted(filtered_main_classes, key=lambda c: c["name"])
return filtered_main_classes
def get_functions(header, module):
functions = [f for f in header.functions if f["namespace"] in ("pcl",
"pcl::",
"pcl::%s" % module,
"pcl::%s::" % module)]
functions = sorted(functions, key=lambda f: f["name"])
filtered = filter_module_level_functions(functions)
return filtered
def filter_module_level_functions(functions: List[CppMethod]):
filtered = []
for f in functions:
keep = True
if f.get("returns_const"):
keep = False
for param in f["parameters"]:
for type_ in SPECIALIZED_TEMPLATED_TYPES_TO_SKIP:
if type_ in param["type"]:
keep = False
if keep:
filtered.append(f)
return filtered
def get_variables(header):
variables = [v for v in header.variables if v.get("defaultValue") and 'using' != v.get('type')]
variables = sorted(variables, key=lambda v: v["name"])
return variables
def get_enums(header):
enums = [e for e in header.enums if e.get("name")] # skip nameless enums
enums = sorted(enums, key=lambda v: v["name"])
return enums
def read_header(header_path, skip_macros=None):
# I tried to do this in multiple threads but it seems like CppHeaderParser is not thread safe...
if skip_macros is None:
skip_macros = []
header_file_str = read_header_file(header_path, skip_macros)
parser = CppHeaderParser
parser.debug = False
header = parser.CppHeader(header_file_str, argType="string")
return header
def clean():
try:
os.remove(PATH_LOADER)
except FileNotFoundError:
pass
if os.path.exists(PATH_MODULES):
shutil.rmtree(PATH_MODULES)
def check_if_needs_overloading(main_classes):
needs_overloading = {}
classes_by_module = defaultdict(list)
for (module, _), class_ in main_classes.items():
classes_by_module[module] += class_
for module, classes in classes_by_module.items():
needs = []
for class_ in classes:
count = Counter(m["name"] for methods in class_["methods"].values() for m in methods)
for name, count in count.items():
if count >= 2:
needs.append(name)
needs_overloading[module] = needs
return needs_overloading
def get_headers(modules=None, skip_modules=None):
def listmod(module):
found_modules = []
for base, folders, files in os.walk(join(PCL_BASE, module)):
if any(base.endswith(m) for m in SUBMODULES_TO_SKIP):
continue
relative_base = os.path.abspath(base).replace(PCL_BASE, "")[1:]
for f in files:
if f.endswith(".h"):
found_modules.append([f, join(relative_base, f)])
return found_modules
if modules is None:
modules = MODULES_TO_BUILD
if skip_modules is not None:
modules = [m for m in modules if m not in skip_modules]
headers_to_generate = [(module, header_name, path) for module in modules
for header_name, path in listmod(module)]
base_headers = [("", f, f) for f in os.listdir(PCL_BASE) if f.endswith(".h")]
headers_to_generate += base_headers
headers_to_generate_temp = []
for module, header_name, path in headers_to_generate:
if (module, header_name) in HEADERS_TO_SKIP:
continue
headers_to_generate_temp.append(tuple([module, header_name, path]))
return headers_to_generate_temp
def get_pure_virtual_methods(class_: CppHeaderParser.CppClass) -> Set[str]:
access = "private protected public".split()
return set([m["name"] for a in access for m in class_["methods"][a] if m["pure_virtual"]])
def get_all_class_methods_not_pure_virtual(class_: CppHeaderParser.CppClass) -> Set[str]:
access = "private protected public".split()
return set([m["name"] for a in access for m in class_["methods"][a] if not m["pure_virtual"]])
def flag_instantiatable_class(dependency_tree, main_classes):
"""determine if the class can be instantiated"""
main_classes_by_name_namespace = {make_namespace_class(c["namespace"], c["name"]): c
for classes in main_classes.values() for c in classes}
for module, header_name in main_classes:
for class_ in main_classes[(module, header_name)]:
can_be_instantiated = True
if class_["abstract"]:
can_be_instantiated = False
else:
# check if any pure virtual method is not implemented
all_implemented_inherited_methods = get_all_class_methods_not_pure_virtual(class_)
namespace_class = make_namespace_class(class_["namespace"], class_["name"])
for base_name_nsp in dependency_tree.breadth_first_iterator(namespace_class):
base_class = main_classes_by_name_namespace.get(base_name_nsp)
if base_class:
base_class_methods = get_all_class_methods_not_pure_virtual(base_class)
all_implemented_inherited_methods.update(base_class_methods)
for base_name_nsp in dependency_tree.breadth_first_iterator(namespace_class):
base_class = main_classes_by_name_namespace.get(base_name_nsp)
if base_class and base_class["abstract"]:
base_pure_virtual_methods = get_pure_virtual_methods(base_class)
if base_pure_virtual_methods - all_implemented_inherited_methods:
can_be_instantiated = False
class_["can_be_instantiated"] = can_be_instantiated
def load_yaml_point_types(not_every_point_type):
classes_point_types = unpack_yaml_point_types("point_types_generated.yml", not_every_point_type)
extra_point_types = unpack_yaml_point_types("point_types_extra.yml")
for k, v in extra_point_types.items():
if k in classes_point_types:
classes_point_types[k].append(v)
else:
classes_point_types[k] = v
return classes_point_types
def make_module_dirs(modules):
for module in modules:
module_dir = join(PATH_MODULES, module)
if not os.path.exists(module_dir):
os.makedirs(module_dir)
def is_file_different(path, text):
v = open(path).read()
if v != text:
print("File is different: %s" % os.path.split(path)[1])
return True
# print("File is the same: %s" % os.path.split(path)[1])
return False
def write_if_different(files_to_write, delete_others):
written = []
for base, folder, files in os.walk(PATH_MODULES):
for f in files:
path = join(base, f)
if path in files_to_write:
if is_file_different(path, files_to_write[path]):
open(path, "w").write(files_to_write[path])
written.append(path)
elif delete_others:
os.remove(path)
print("Deleted: " + path)
# write new files
for path, text in files_to_write.items():
if path not in written:
open(path, "w").write(files_to_write[path])
def delete_other_dirs(modules):
for f in os.listdir(PATH_MODULES):
folder = join(PATH_MODULES, f)
if f not in modules and os.path.isdir(folder):
shutil.rmtree(folder, ignore_errors=True)
def write_stuff_if_needed(generated_headers: OrderedDict, delete_others=True):
modules = set(module for module, _ in generated_headers.keys())
make_module_dirs(modules)
# hpp
files_to_write = {}
for (module, header_name), text in generated_headers.items():
if text:
output_path = join(PATH_MODULES, module, header_name + "pp")
files_to_write[output_path] = text
# loaders
loader_modules = defaultdict(list)
for (module, header_name), text in generated_headers.items():
if text:
loader_modules[module or "base"].append(header_name)
for module, headers in loader_modules.items():
path_loader = join(PATH_MODULES, "_%s_loader.cpp" % module)
files_to_write[path_loader] = generate_loader(module, headers)
files_to_write[PATH_LOADER] = generate_main_loader(loader_modules)
write_if_different(files_to_write, delete_others)
if delete_others:
delete_other_dirs(modules)
def generate(headers_to_generate, skip_macros, not_every_point_type=False) -> OrderedDict:
"""
:return: OrderedDict
"""
main_classes, module_functions, module_variables, module_enums = {}, {}, {}, {}
for module, header_name, path in headers_to_generate[:]:
header_full_path = join(PCL_BASE, path) if path else join(PCL_BASE, module, header_name)
header = read_header(header_full_path, skip_macros)
main_classes[(module, header_name)] = get_main_classes(header, module, header_name)
module_functions[(module, header_name)] = get_functions(header, module)
module_variables[(module, header_name)] = get_variables(header)
module_enums[(module, header_name)] = get_enums(header)
classes = [c for module, header, path in headers_to_generate
for c in main_classes[(module, header)]]
dependency_tree = generators.dependency_tree.DependencyTree(classes)
loaded_point_types = load_yaml_point_types(not_every_point_type)
classes_point_types: OrderedDict = dependency_tree.get_point_types_with_dependencies(loaded_point_types)
classes_sorted_base_first = list(dependency_tree.leaf_iterator())
def index_for_class(class_):
return classes_sorted_base_first.index(make_namespace_class(class_["namespace"], class_["name"]))
# sort classes inside modules based on inheritance
for module, header in main_classes:
main_classes[(module, header)] = list(sorted(main_classes[(module, header)], key=index_for_class))
headers_to_generate = sort_headers_by_dependencies(headers_to_generate, skip_macros=skip_macros)
methods_need_overloading = check_if_needs_overloading(main_classes)
flag_instantiatable_class(dependency_tree, main_classes)
def generate_header(module, header, path, keep_if_no_instantiation) -> str:
header_functions = module_functions[(module, header)]
header_classes = main_classes[(module, header)]
methods_defined_outside = get_methods_defined_outside(header_functions)
class_definitions = generate_class_definitions(header_classes,
module,
header,
path,
methods_need_overloading.get(module),
methods_defined_outside)
function_definitions = generate_function_definitions(header_functions,
module,
header,
not_every_point_type=not_every_point_type)
instantiations = Instantiations(header_classes,
module,
header,
classes_point_types,
module_variables[(module, header)],
module_enums[(module, header)],
)
instantiation_function = instantiations.generate_instantiation_function(has_functions=bool(header_functions))
something_instantiated = len(instantiation_function.split("\n")) > 2
text = []
if something_instantiated or keep_if_no_instantiation:
text = [class_definitions, function_definitions, instantiation_function]
return "\n".join(text)
generated_headers = OrderedDict()
for module, header, path in headers_to_generate:
generated_headers[(module, header)] = generate_header(module, header, path, keep_if_no_instantiation=False)
return generated_headers
def main():
import time
t = time.time()
windows = platform.system() == "Windows"
skip_macros = []
skip_modules = []
if not windows:
skip_macros = ["_MSC_VER"]
#skip_modules = ["visualization"]
skip_modules = []
all_headers = get_headers(skip_modules=skip_modules)
not_every_point_type = "--not-every-point-type" in sys.argv
generated_headers = generate(all_headers, skip_macros, not_every_point_type)
write_stuff_if_needed(generated_headers, delete_others=True)
print("generated in %.2f s" % (time.time() - t,))
if __name__ == '__main__':
main()
| <filename>generators/generate_pybind11_bindings.py
import os
import platform
import shutil
import sys
from collections import Counter
from collections import defaultdict, OrderedDict
from os.path import join
from typing import List, Dict, Set
from CppHeaderParser import CppHeaderParser
from CppHeaderParser.CppHeaderParser import CppMethod
import generators.dependency_tree
from generators.config import common_includes, PCL_BASE, PATH_LOADER, PATH_MODULES, MODULES_TO_BUILD, \
HEADERS_TO_SKIP, ATTRIBUTES_TO_SKIP, CLASSES_TO_IGNORE, METHODS_TO_SKIP, SUBMODULES_TO_SKIP, EXPLICIT_INCLUDES, \
SPECIALIZED_TEMPLATED_TYPES_TO_SKIP
from generators.definitions.function import generate_function_definitions, get_methods_defined_outside
from generators.definitions.method import split_methods_by_type
from generators.definitions.submodule_loader import generate_loader
from generators.definitions.templated_class import ClassDefinition
from generators.instantiations import Instantiations
from generators.point_types_utils import unpack_yaml_point_types
from generators.utils import make_header_include_name, sort_headers_by_dependencies, \
generate_main_loader, make_namespace_class, read_header_file
def filter_methods_for_parser_errors(methods):
return [m for m in methods if not m["name"] in ("void", "bool")]
def filter_methods_to_skip(methods):
filtered_methods = []
for m in methods:
if (m["parent"]["name"], m["name"]) in METHODS_TO_SKIP:
continue
if "Callback" in m["name"]:
single_argument = len(m["parameters"]) == 1
boost_function = single_argument and m["parameters"][0]["type"].startswith("boost::function")
if not boost_function:
continue
filtered_methods.append(m)
return filtered_methods
def same_parameters(p1: Dict, p2: Dict) -> bool:
fields = ["constant", "name", "raw_type", "reference", "static"]
return all(p1[f] == p2[f] for f in fields)
def same_methods(m1: CppMethod, m2: CppMethod) -> bool:
if m1["name"] != m2["name"]:
return False
# bug in CppHeaderParser
# in "void ImageGrabber<PointT>::publish", "void ImageGrabber<PointT>::" is the return type
path = m1.get("path", m2.get("path"))
path = path[path.rfind(":") + 1:]
if not any(path in type_ for type_ in [m1["rtnType"], m2["rtnType"]]):
return False
# same parameters
for p1 in m1["parameters"]:
for p2 in m2["parameters"]:
if m1["name"] == m2["name"] and same_parameters(p1, p2):
break
else:
return False
return len(m1["parameters"]) == len(m2["parameters"])
def private_methods_defined_outside(private_methods: List[CppMethod],
methods_declared_outside: List[CppMethod]) -> List[CppMethod]:
private_defined_outside = []
for m_private in private_methods:
for m_outside in methods_declared_outside:
if same_methods(m_private, m_outside):
private_defined_outside.append(m_private)
break
return private_defined_outside
def generate_class_definitions(main_classes,
module,
header_name,
path,
needs_overloading: List[str],
methods_defined_outside: List[CppMethod]) -> str:
text = []
a = text.append
a(common_includes)
a(EXPLICIT_INCLUDES.get((module, header_name), ""))
a(make_header_include_name(module, header_name, path))
a("")
namespaces = set([c["namespace"] for c in main_classes])
for namespace in namespaces:
if not namespace == "pcl":
a("using namespace %s;" % namespace)
a("\n")
for class_ in main_classes:
methods = class_["methods"]["public"]
methods = filter_methods_for_parser_errors(methods)
methods = filter_methods_to_skip(methods)
private_and_protected = class_["methods"]["private"] + class_["methods"]["protected"]
methods += private_methods_defined_outside(private_and_protected, methods_defined_outside)
class_properties = [p for p in class_["properties"]["public"]
if not "using" in p["type"]
and not "union" in p["type"]]
union_properties = [p for nested_class in class_["nested_classes"]
for p in nested_class["properties"]["public"]
if "union" in nested_class["name"]]
class_properties += union_properties
class_properties = filter_class_properties(module, header_name, class_["name"], class_properties)
constructors, variables, others = split_methods_by_type(methods, class_properties,
needs_overloading)
if not class_["can_be_instantiated"]:
constructors = []
class_def = ClassDefinition(class_, constructors, variables, others, module)
a(class_def.to_class_function_definition())
a("")
return "\n".join(text)
def filter_class_properties(module, header, class_name, properties):
key = (module, header, class_name)
# ignore properties without a name
properties = [p for p in properties if p["name"]]
if key in ATTRIBUTES_TO_SKIP:
to_ignore = ATTRIBUTES_TO_SKIP[key]
filtered_properties = []
for p in properties:
if p["name"] in to_ignore:
continue
filtered_properties.append(p)
properties = filtered_properties
return properties
def get_main_classes(header, module, header_name):
# header = read_headers(base_path, header_name, module)
main_classes = [c for c in header.classes.values() if c["namespace"] in ("pcl", "pcl::" + module)]
filtered_main_classes = []
for class_ in main_classes:
specialized_template = class_.get("template") and "<" in class_["name"]
if specialized_template:
to_skip = any(("<%s>" % type_) in class_["name"] for type_ in SPECIALIZED_TEMPLATED_TYPES_TO_SKIP)
if not to_skip:
message = "Warning: Template class specialization not implemented for class %s in %s"
print(message % (class_["name"], header_name))
elif (module, header_name, class_["name"]) in CLASSES_TO_IGNORE:
pass
else:
filtered_main_classes.append(class_)
filtered_main_classes = sorted(filtered_main_classes, key=lambda c: c["name"])
return filtered_main_classes
def get_functions(header, module):
functions = [f for f in header.functions if f["namespace"] in ("pcl",
"pcl::",
"pcl::%s" % module,
"pcl::%s::" % module)]
functions = sorted(functions, key=lambda f: f["name"])
filtered = filter_module_level_functions(functions)
return filtered
def filter_module_level_functions(functions: List[CppMethod]):
filtered = []
for f in functions:
keep = True
if f.get("returns_const"):
keep = False
for param in f["parameters"]:
for type_ in SPECIALIZED_TEMPLATED_TYPES_TO_SKIP:
if type_ in param["type"]:
keep = False
if keep:
filtered.append(f)
return filtered
def get_variables(header):
variables = [v for v in header.variables if v.get("defaultValue") and 'using' != v.get('type')]
variables = sorted(variables, key=lambda v: v["name"])
return variables
def get_enums(header):
enums = [e for e in header.enums if e.get("name")] # skip nameless enums
enums = sorted(enums, key=lambda v: v["name"])
return enums
def read_header(header_path, skip_macros=None):
# I tried to do this in multiple threads but it seems like CppHeaderParser is not thread safe...
if skip_macros is None:
skip_macros = []
header_file_str = read_header_file(header_path, skip_macros)
parser = CppHeaderParser
parser.debug = False
header = parser.CppHeader(header_file_str, argType="string")
return header
def clean():
try:
os.remove(PATH_LOADER)
except FileNotFoundError:
pass
if os.path.exists(PATH_MODULES):
shutil.rmtree(PATH_MODULES)
def check_if_needs_overloading(main_classes):
needs_overloading = {}
classes_by_module = defaultdict(list)
for (module, _), class_ in main_classes.items():
classes_by_module[module] += class_
for module, classes in classes_by_module.items():
needs = []
for class_ in classes:
count = Counter(m["name"] for methods in class_["methods"].values() for m in methods)
for name, count in count.items():
if count >= 2:
needs.append(name)
needs_overloading[module] = needs
return needs_overloading
def get_headers(modules=None, skip_modules=None):
def listmod(module):
found_modules = []
for base, folders, files in os.walk(join(PCL_BASE, module)):
if any(base.endswith(m) for m in SUBMODULES_TO_SKIP):
continue
relative_base = os.path.abspath(base).replace(PCL_BASE, "")[1:]
for f in files:
if f.endswith(".h"):
found_modules.append([f, join(relative_base, f)])
return found_modules
if modules is None:
modules = MODULES_TO_BUILD
if skip_modules is not None:
modules = [m for m in modules if m not in skip_modules]
headers_to_generate = [(module, header_name, path) for module in modules
for header_name, path in listmod(module)]
base_headers = [("", f, f) for f in os.listdir(PCL_BASE) if f.endswith(".h")]
headers_to_generate += base_headers
headers_to_generate_temp = []
for module, header_name, path in headers_to_generate:
if (module, header_name) in HEADERS_TO_SKIP:
continue
headers_to_generate_temp.append(tuple([module, header_name, path]))
return headers_to_generate_temp
def get_pure_virtual_methods(class_: CppHeaderParser.CppClass) -> Set[str]:
access = "private protected public".split()
return set([m["name"] for a in access for m in class_["methods"][a] if m["pure_virtual"]])
def get_all_class_methods_not_pure_virtual(class_: CppHeaderParser.CppClass) -> Set[str]:
access = "private protected public".split()
return set([m["name"] for a in access for m in class_["methods"][a] if not m["pure_virtual"]])
def flag_instantiatable_class(dependency_tree, main_classes):
"""determine if the class can be instantiated"""
main_classes_by_name_namespace = {make_namespace_class(c["namespace"], c["name"]): c
for classes in main_classes.values() for c in classes}
for module, header_name in main_classes:
for class_ in main_classes[(module, header_name)]:
can_be_instantiated = True
if class_["abstract"]:
can_be_instantiated = False
else:
# check if any pure virtual method is not implemented
all_implemented_inherited_methods = get_all_class_methods_not_pure_virtual(class_)
namespace_class = make_namespace_class(class_["namespace"], class_["name"])
for base_name_nsp in dependency_tree.breadth_first_iterator(namespace_class):
base_class = main_classes_by_name_namespace.get(base_name_nsp)
if base_class:
base_class_methods = get_all_class_methods_not_pure_virtual(base_class)
all_implemented_inherited_methods.update(base_class_methods)
for base_name_nsp in dependency_tree.breadth_first_iterator(namespace_class):
base_class = main_classes_by_name_namespace.get(base_name_nsp)
if base_class and base_class["abstract"]:
base_pure_virtual_methods = get_pure_virtual_methods(base_class)
if base_pure_virtual_methods - all_implemented_inherited_methods:
can_be_instantiated = False
class_["can_be_instantiated"] = can_be_instantiated
def load_yaml_point_types(not_every_point_type):
classes_point_types = unpack_yaml_point_types("point_types_generated.yml", not_every_point_type)
extra_point_types = unpack_yaml_point_types("point_types_extra.yml")
for k, v in extra_point_types.items():
if k in classes_point_types:
classes_point_types[k].append(v)
else:
classes_point_types[k] = v
return classes_point_types
def make_module_dirs(modules):
for module in modules:
module_dir = join(PATH_MODULES, module)
if not os.path.exists(module_dir):
os.makedirs(module_dir)
def is_file_different(path, text):
v = open(path).read()
if v != text:
print("File is different: %s" % os.path.split(path)[1])
return True
# print("File is the same: %s" % os.path.split(path)[1])
return False
def write_if_different(files_to_write, delete_others):
written = []
for base, folder, files in os.walk(PATH_MODULES):
for f in files:
path = join(base, f)
if path in files_to_write:
if is_file_different(path, files_to_write[path]):
open(path, "w").write(files_to_write[path])
written.append(path)
elif delete_others:
os.remove(path)
print("Deleted: " + path)
# write new files
for path, text in files_to_write.items():
if path not in written:
open(path, "w").write(files_to_write[path])
def delete_other_dirs(modules):
for f in os.listdir(PATH_MODULES):
folder = join(PATH_MODULES, f)
if f not in modules and os.path.isdir(folder):
shutil.rmtree(folder, ignore_errors=True)
def write_stuff_if_needed(generated_headers: OrderedDict, delete_others=True):
modules = set(module for module, _ in generated_headers.keys())
make_module_dirs(modules)
# hpp
files_to_write = {}
for (module, header_name), text in generated_headers.items():
if text:
output_path = join(PATH_MODULES, module, header_name + "pp")
files_to_write[output_path] = text
# loaders
loader_modules = defaultdict(list)
for (module, header_name), text in generated_headers.items():
if text:
loader_modules[module or "base"].append(header_name)
for module, headers in loader_modules.items():
path_loader = join(PATH_MODULES, "_%s_loader.cpp" % module)
files_to_write[path_loader] = generate_loader(module, headers)
files_to_write[PATH_LOADER] = generate_main_loader(loader_modules)
write_if_different(files_to_write, delete_others)
if delete_others:
delete_other_dirs(modules)
def generate(headers_to_generate, skip_macros, not_every_point_type=False) -> OrderedDict:
"""
:return: OrderedDict
"""
main_classes, module_functions, module_variables, module_enums = {}, {}, {}, {}
for module, header_name, path in headers_to_generate[:]:
header_full_path = join(PCL_BASE, path) if path else join(PCL_BASE, module, header_name)
header = read_header(header_full_path, skip_macros)
main_classes[(module, header_name)] = get_main_classes(header, module, header_name)
module_functions[(module, header_name)] = get_functions(header, module)
module_variables[(module, header_name)] = get_variables(header)
module_enums[(module, header_name)] = get_enums(header)
classes = [c for module, header, path in headers_to_generate
for c in main_classes[(module, header)]]
dependency_tree = generators.dependency_tree.DependencyTree(classes)
loaded_point_types = load_yaml_point_types(not_every_point_type)
classes_point_types: OrderedDict = dependency_tree.get_point_types_with_dependencies(loaded_point_types)
classes_sorted_base_first = list(dependency_tree.leaf_iterator())
def index_for_class(class_):
return classes_sorted_base_first.index(make_namespace_class(class_["namespace"], class_["name"]))
# sort classes inside modules based on inheritance
for module, header in main_classes:
main_classes[(module, header)] = list(sorted(main_classes[(module, header)], key=index_for_class))
headers_to_generate = sort_headers_by_dependencies(headers_to_generate, skip_macros=skip_macros)
methods_need_overloading = check_if_needs_overloading(main_classes)
flag_instantiatable_class(dependency_tree, main_classes)
def generate_header(module, header, path, keep_if_no_instantiation) -> str:
header_functions = module_functions[(module, header)]
header_classes = main_classes[(module, header)]
methods_defined_outside = get_methods_defined_outside(header_functions)
class_definitions = generate_class_definitions(header_classes,
module,
header,
path,
methods_need_overloading.get(module),
methods_defined_outside)
function_definitions = generate_function_definitions(header_functions,
module,
header,
not_every_point_type=not_every_point_type)
instantiations = Instantiations(header_classes,
module,
header,
classes_point_types,
module_variables[(module, header)],
module_enums[(module, header)],
)
instantiation_function = instantiations.generate_instantiation_function(has_functions=bool(header_functions))
something_instantiated = len(instantiation_function.split("\n")) > 2
text = []
if something_instantiated or keep_if_no_instantiation:
text = [class_definitions, function_definitions, instantiation_function]
return "\n".join(text)
generated_headers = OrderedDict()
for module, header, path in headers_to_generate:
generated_headers[(module, header)] = generate_header(module, header, path, keep_if_no_instantiation=False)
return generated_headers
def main():
import time
t = time.time()
windows = platform.system() == "Windows"
skip_macros = []
skip_modules = []
if not windows:
skip_macros = ["_MSC_VER"]
#skip_modules = ["visualization"]
skip_modules = []
all_headers = get_headers(skip_modules=skip_modules)
not_every_point_type = "--not-every-point-type" in sys.argv
generated_headers = generate(all_headers, skip_macros, not_every_point_type)
write_stuff_if_needed(generated_headers, delete_others=True)
print("generated in %.2f s" % (time.time() - t,))
if __name__ == '__main__':
main()
| en | 0.65474 | # bug in CppHeaderParser # in "void ImageGrabber<PointT>::publish", "void ImageGrabber<PointT>::" is the return type # same parameters # ignore properties without a name # header = read_headers(base_path, header_name, module) # skip nameless enums # I tried to do this in multiple threads but it seems like CppHeaderParser is not thread safe... determine if the class can be instantiated # check if any pure virtual method is not implemented # print("File is the same: %s" % os.path.split(path)[1]) # write new files # hpp # loaders :return: OrderedDict # sort classes inside modules based on inheritance #skip_modules = ["visualization"] | 2.020542 | 2 |
scripts_python3/exchange/deleteExchange.py | bcvsolutions/winrm-ad-connector | 0 | 204 | <gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# All params from IdM is stored in environment and you can get them by os.environ["paramName"]
import sys, os
# this is needed for importing file winrm_wrapper from parent dir
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import winrm_wrapper
import codecs
uid = os.environ["__UID__"]
winrm_wrapper.writeLog("Delete start for " + uid)
# Load PS script from file and replace params
winrm_wrapper.writeLog("loading script")
f = codecs.open(os.environ["script"], encoding='utf-8', mode='r')
command = f.read()
command = command.replace("$uid", uid)
# Call wrapper
winrm_wrapper.executeScript(os.environ["endpoint"], os.environ["authentication"], os.environ["user"],
os.environ["password"], os.environ["caTrustPath"], os.environ["ignoreCaValidation"], command, uid)
winrm_wrapper.writeLog("Delete end for " + uid)
print("__UID__=" + uid)
sys.exit()
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# All params from IdM is stored in environment and you can get them by os.environ["paramName"]
import sys, os
# this is needed for importing file winrm_wrapper from parent dir
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import winrm_wrapper
import codecs
uid = os.environ["__UID__"]
winrm_wrapper.writeLog("Delete start for " + uid)
# Load PS script from file and replace params
winrm_wrapper.writeLog("loading script")
f = codecs.open(os.environ["script"], encoding='utf-8', mode='r')
command = f.read()
command = command.replace("$uid", uid)
# Call wrapper
winrm_wrapper.executeScript(os.environ["endpoint"], os.environ["authentication"], os.environ["user"],
os.environ["password"], os.environ["caTrustPath"], os.environ["ignoreCaValidation"], command, uid)
winrm_wrapper.writeLog("Delete end for " + uid)
print("__UID__=" + uid)
sys.exit() | en | 0.813955 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- # All params from IdM is stored in environment and you can get them by os.environ["paramName"] # this is needed for importing file winrm_wrapper from parent dir # Load PS script from file and replace params # Call wrapper | 2.039386 | 2 |
tests/registry_test.py | Walon1998/dace | 1 | 205 | # Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
import unittest
from aenum import Enum, auto
from dace import registry
@registry.make_registry
class ExtensibleClass(object):
pass
class Extension(ExtensibleClass):
pass
@registry.extensible_enum
class ExtensibleEnumeration(Enum):
a = auto()
b = auto()
class RegistryTests(unittest.TestCase):
def test_class_registry(self):
ExtensibleClass.register(Extension)
self.assertTrue(Extension in ExtensibleClass.extensions())
ExtensibleClass.unregister(Extension)
self.assertTrue(Extension not in ExtensibleClass.extensions())
def test_autoregister(self):
@registry.autoregister
class Extension2(ExtensibleClass):
pass
self.assertTrue(Extension2 in ExtensibleClass.extensions())
def test_class_registry_args(self):
ExtensibleClass.register(Extension, a=True, b=1, c=2)
self.assertTrue(Extension in ExtensibleClass.extensions())
self.assertEqual(ExtensibleClass.extensions()[Extension], dict(a=True, b=1, c=2))
ExtensibleClass.unregister(Extension)
self.assertTrue(Extension not in ExtensibleClass.extensions())
def test_autoregister_args(self):
@registry.autoregister_params(a=False, b=0)
class Extension3(ExtensibleClass):
pass
self.assertTrue(Extension3 in ExtensibleClass.extensions())
self.assertEqual(ExtensibleClass.extensions()[Extension3], dict(a=False, b=0))
def test_autoregister_fail(self):
with self.assertRaises(TypeError):
@registry.autoregister
class Extension4(object):
pass
def test_enum_registry(self):
ExtensibleEnumeration.register('c')
self.assertTrue(ExtensibleEnumeration.c in ExtensibleEnumeration)
self.assertEqual(ExtensibleEnumeration.c.value, 3)
def test_enum_registry_fail(self):
with self.assertRaises(TypeError):
@registry.extensible_enum
class NotAnEnum(object):
pass
if __name__ == '__main__':
unittest.main()
| # Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
import unittest
from aenum import Enum, auto
from dace import registry
@registry.make_registry
class ExtensibleClass(object):
pass
class Extension(ExtensibleClass):
pass
@registry.extensible_enum
class ExtensibleEnumeration(Enum):
a = auto()
b = auto()
class RegistryTests(unittest.TestCase):
def test_class_registry(self):
ExtensibleClass.register(Extension)
self.assertTrue(Extension in ExtensibleClass.extensions())
ExtensibleClass.unregister(Extension)
self.assertTrue(Extension not in ExtensibleClass.extensions())
def test_autoregister(self):
@registry.autoregister
class Extension2(ExtensibleClass):
pass
self.assertTrue(Extension2 in ExtensibleClass.extensions())
def test_class_registry_args(self):
ExtensibleClass.register(Extension, a=True, b=1, c=2)
self.assertTrue(Extension in ExtensibleClass.extensions())
self.assertEqual(ExtensibleClass.extensions()[Extension], dict(a=True, b=1, c=2))
ExtensibleClass.unregister(Extension)
self.assertTrue(Extension not in ExtensibleClass.extensions())
def test_autoregister_args(self):
@registry.autoregister_params(a=False, b=0)
class Extension3(ExtensibleClass):
pass
self.assertTrue(Extension3 in ExtensibleClass.extensions())
self.assertEqual(ExtensibleClass.extensions()[Extension3], dict(a=False, b=0))
def test_autoregister_fail(self):
with self.assertRaises(TypeError):
@registry.autoregister
class Extension4(object):
pass
def test_enum_registry(self):
ExtensibleEnumeration.register('c')
self.assertTrue(ExtensibleEnumeration.c in ExtensibleEnumeration)
self.assertEqual(ExtensibleEnumeration.c.value, 3)
def test_enum_registry_fail(self):
with self.assertRaises(TypeError):
@registry.extensible_enum
class NotAnEnum(object):
pass
if __name__ == '__main__':
unittest.main()
| en | 0.669548 | # Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved. | 2.517492 | 3 |
tests/conftest.py | aviramha/aiologstash2 | 1 | 206 | import asyncio
import logging
from json import loads
import pytest
from aiologstash2 import create_tcp_handler
logging.getLogger().setLevel(logging.DEBUG)
class FakeTcpServer:
def __init__(self):
self.data = bytearray()
self.server = None
self.futs = set()
async def start(self):
self.server = await asyncio.start_server(self.on_connect, host="127.0.0.1")
@property
def port(self):
return self.server.sockets[0].getsockname()[1]
@property
def jsons(self):
s = self.data.decode("utf8")
return [loads(i) for i in s.split("\n") if i]
async def close(self):
if self.server is None:
return
self.server.close()
await self.server.wait_closed()
self.server = None
async def on_connect(self, reader, writer):
while True:
data = await reader.read(1024)
if not data:
break
self.data.extend(data)
for fut in self.futs:
if not fut.done():
fut.set_result(None)
async def wait(self):
fut = asyncio.get_event_loop().create_future()
self.futs.add(fut)
await fut
self.futs.remove(fut)
@pytest.fixture
async def make_tcp_server():
servers = []
async def go():
server = FakeTcpServer()
await server.start()
servers.append(server)
return server
yield go
async def finalize():
for server in servers:
await server.close()
await finalize()
@pytest.fixture
async def make_tcp_handler(make_tcp_server):
handlers = []
async def go(*args, level=logging.DEBUG, **kwargs):
server = await make_tcp_server()
handler = await create_tcp_handler("127.0.0.1", server.port, **kwargs)
handlers.append(handler)
return handler, server
yield go
async def finalize():
for handler in handlers:
handler.close()
await handler.wait_closed()
await finalize()
@pytest.fixture
async def setup_logger(make_tcp_handler):
async def go(*args, **kwargs):
handler, server = await make_tcp_handler(*args, **kwargs)
logger = logging.getLogger("aiologstash_test")
logger.addHandler(handler)
return logger, handler, server
yield go
| import asyncio
import logging
from json import loads
import pytest
from aiologstash2 import create_tcp_handler
logging.getLogger().setLevel(logging.DEBUG)
class FakeTcpServer:
def __init__(self):
self.data = bytearray()
self.server = None
self.futs = set()
async def start(self):
self.server = await asyncio.start_server(self.on_connect, host="127.0.0.1")
@property
def port(self):
return self.server.sockets[0].getsockname()[1]
@property
def jsons(self):
s = self.data.decode("utf8")
return [loads(i) for i in s.split("\n") if i]
async def close(self):
if self.server is None:
return
self.server.close()
await self.server.wait_closed()
self.server = None
async def on_connect(self, reader, writer):
while True:
data = await reader.read(1024)
if not data:
break
self.data.extend(data)
for fut in self.futs:
if not fut.done():
fut.set_result(None)
async def wait(self):
fut = asyncio.get_event_loop().create_future()
self.futs.add(fut)
await fut
self.futs.remove(fut)
@pytest.fixture
async def make_tcp_server():
servers = []
async def go():
server = FakeTcpServer()
await server.start()
servers.append(server)
return server
yield go
async def finalize():
for server in servers:
await server.close()
await finalize()
@pytest.fixture
async def make_tcp_handler(make_tcp_server):
handlers = []
async def go(*args, level=logging.DEBUG, **kwargs):
server = await make_tcp_server()
handler = await create_tcp_handler("127.0.0.1", server.port, **kwargs)
handlers.append(handler)
return handler, server
yield go
async def finalize():
for handler in handlers:
handler.close()
await handler.wait_closed()
await finalize()
@pytest.fixture
async def setup_logger(make_tcp_handler):
async def go(*args, **kwargs):
handler, server = await make_tcp_handler(*args, **kwargs)
logger = logging.getLogger("aiologstash_test")
logger.addHandler(handler)
return logger, handler, server
yield go
| none | 1 | 2.268937 | 2 |
|
tcex/playbooks/playbooks_base.py | RichieB2B/tcex | 0 | 207 | """TcEx Framework Playbook module"""
# standard library
import base64
import json
import re
from collections import OrderedDict
from collections.abc import Iterable
class PlaybooksBase:
"""TcEx Playbook Module Base Class
Args:
tcex (TcEx): Instance of TcEx class.
context (str): The Redis context (hash).
output_variables (list): The requested output variables.
"""
def __init__(self, tcex, context, output_variables):
"""Initialize the Class properties."""
self.tcex = tcex
self._context = context
self._output_variables = output_variables or []
# properties
self._output_variables_by_name = None
self._output_variables_by_type = None
self.log = tcex.log
# match full variable
self._variable_match = re.compile(fr'^{self._variable_pattern}$')
# capture variable parts (exactly a variable)
self._variable_parse = re.compile(self._variable_pattern)
# match embedded variables without quotes (#App:7979:variable_name!StringArray)
self._vars_keyvalue_embedded = re.compile(fr'(?:\"\:\s?)[^\"]?{self._variable_pattern}')
def _coerce_string_value(self, value):
"""Return a string value from an bool or int."""
# coerce bool before int as python says a bool is an int
if isinstance(value, bool):
# coerce bool to str type
self.log.warning(f'Coercing bool value ({value}) to a string ("{str(value).lower()}").')
value = str(value).lower()
# coerce int to str type
if isinstance(value, (float, int)):
self.log.warning(f'Coercing float/int value ({value}) to a string ("{str(value)}").')
value = str(value)
return value
def _create(self, key, value, validate=True):
"""Create the value in Redis if applicable."""
if key is None or value is None:
self.log.warning('The key or value field is None.')
return None
# get variable type from variable value
variable_type = self.variable_type(key)
if variable_type == 'Binary':
# if not isinstance(value, bytes):
# value = value.encode('utf-8')
if validate and not isinstance(value, bytes):
raise RuntimeError('Invalid data provided for Binary.')
value = base64.b64encode(value).decode('utf-8')
elif variable_type == 'KeyValue':
if validate and (not isinstance(value, dict) or not self._is_key_value(value)):
raise RuntimeError('Invalid data provided for KeyValue.')
elif variable_type == 'String':
# coerce string values
value = self._coerce_string_value(value)
if validate and not isinstance(value, str):
raise RuntimeError('Invalid data provided for String.')
elif variable_type == 'TCEntity':
if validate and (not isinstance(value, dict) or not self._is_tc_entity(value)):
raise RuntimeError('Invalid data provided for TcEntity.')
# self.log.trace(f'pb create - context: {self._context}, key: {key}, value: {value}')
try:
value = json.dumps(value)
except ValueError as e: # pragma: no cover
raise RuntimeError(f'Failed to serialize value ({e}).')
try:
return self.tcex.key_value_store.create(self._context, key.strip(), value)
except RuntimeError as e:
self.log.error(e)
return None
def _create_array(self, key, value, validate=True):
"""Create the value in Redis if applicable."""
if key is None or value is None:
self.log.warning('The key or value field is None.')
return None
# get variable type from variable value
variable_type = self.variable_type(key)
# Enhanced entity array is the wild-wild west, don't validate it
if variable_type != 'TCEnhancedEntityArray':
if validate and (not isinstance(value, Iterable) or isinstance(value, (str, dict))):
raise RuntimeError(f'Invalid data provided for {variable_type}.')
value = [
*value
] # spread the value so that we know it's a list (as opposed to an iterable)
if variable_type == 'BinaryArray':
value_encoded = []
for v in value:
if v is not None:
if validate and not isinstance(v, bytes):
raise RuntimeError('Invalid data provided for Binary.')
# if not isinstance(v, bytes):
# v = v.encode('utf-8')
v = base64.b64encode(v).decode('utf-8')
value_encoded.append(v)
value = value_encoded
elif variable_type == 'KeyValueArray':
if validate and not self._is_key_value_array(value):
raise RuntimeError('Invalid data provided for KeyValueArray.')
elif variable_type == 'StringArray':
value_coerced = []
for v in value:
# coerce string values
v = self._coerce_string_value(v)
if validate and not isinstance(v, (type(None), str)):
raise RuntimeError('Invalid data provided for StringArray.')
value_coerced.append(v)
value = value_coerced
elif variable_type == 'TCEntityArray':
if validate and not self._is_tc_entity_array(value):
raise RuntimeError('Invalid data provided for TcEntityArray.')
# self.log.trace(f'pb create - context: {self._context}, key: {key}, value: {value}')
try:
value = json.dumps(value)
except ValueError as e: # pragma: no cover
raise RuntimeError(f'Failed to serialize value ({e}).')
try:
return self.tcex.key_value_store.create(self._context, key.strip(), value)
except RuntimeError as e:
self.log.error(e)
return None
@staticmethod
def _decode_binary(data):
"""Return decoded bytes data handling data written by java apps."""
try:
data = data.decode('utf-8')
except UnicodeDecodeError: # pragma: no cover
# for data written an upstream java App
data = data.decode('latin-1')
return data
@staticmethod
def _is_key_value(data):
"""Return True if provided data has proper structure for Key Value."""
if data is None:
return False
return all(x in data for x in ['key', 'value'])
def _is_key_value_array(self, data):
"""Return True if provided data has proper structure for Key Value Array."""
for d in data:
if not self._is_key_value(d):
return False
return True
@staticmethod
def _is_tc_entity(data):
"""Return True if provided data has proper structure for TC Entity."""
if data is None:
return False
return all(x in data for x in ['id', 'value', 'type'])
def _is_tc_entity_array(self, data):
"""Return True if provided data has proper structure for TC Entity Array."""
for d in data:
if not self._is_tc_entity(d):
return False
return True
@staticmethod
def _load_value(value):
"""Return the loaded JSON value or raise an error.
Args:
value (str): The data from key/value store.
Raises:
RuntimeError: Raise error when data can't be loaded as JSON data.
Returns:
any: The de-serialized value from the key/value store.
"""
try:
return json.loads(value, object_pairs_hook=OrderedDict)
except ValueError as e: # pragma: no cover
raise RuntimeError(f'Failed to JSON load data "{value}" ({e}).')
def _parse_output_variables(self):
"""Parse the output variables provided to Playbook Class.
**Example Variable Format**::
['#App:1234:status!String', '#App:1234:status_code!String']
"""
self._output_variables_by_name = {}
self._output_variables_by_type = {}
for ov in self._output_variables:
# parse the variable to get individual parts
parsed_variable = self.parse_variable(ov)
variable_name = parsed_variable.get('name')
variable_type = parsed_variable.get('type')
# store the variables in dict by name (e.g. "status_code")
self._output_variables_by_name[variable_name] = {'variable': ov}
# store the variables in dict by name-type (e.g. "status_code-String")
self._output_variables_by_type[f'{variable_name}-{variable_type}'] = {'variable': ov}
def _read(self, key, embedded=True, b64decode=True, decode=False):
"""Create the value in Redis if applicable."""
if key is None:
self.log.warning('The key is None.')
return None
# get variable type from variable value
variable_type = self.variable_type(key)
try:
value = self.tcex.key_value_store.read(self._context, key.strip())
except RuntimeError as e:
self.log.error(e)
return None
if value is None:
return value
if variable_type == 'Binary':
value = self._load_value(value)
if b64decode:
value = base64.b64decode(value)
if decode:
value = self._decode_binary(value)
elif variable_type == 'KeyValue':
# embedded variable can be unquoted, which breaks JSON.
value = self._wrap_embedded_keyvalue(value)
if embedded:
value = self._read_embedded(value)
value = self._load_value(value)
elif variable_type == 'String':
if embedded:
value = self._read_embedded(value)
# coerce string values
value = self._coerce_string_value(self._load_value(value))
elif variable_type == 'TCEntity':
value = self._load_value(value)
return value
def _read_array(self, key, embedded=True, b64decode=True, decode=False):
"""Create the value in Redis if applicable."""
if key is None: # pragma: no cover
self.log.warning('The null value for key was provided.')
return None
# get variable type from variable value
variable_type = self.variable_type(key)
try:
value = self.tcex.key_value_store.read(self._context, key.strip())
except RuntimeError as e:
self.log.error(e)
return None
if value is None:
return value
if variable_type == 'BinaryArray':
value = json.loads(value, object_pairs_hook=OrderedDict)
values = []
for v in value:
if v is not None and b64decode:
v = base64.b64decode(v)
if decode:
v = self._decode_binary(v)
values.append(v)
value = values
elif variable_type == 'KeyValueArray':
# embedded variable can be unquoted, which breaks JSON.
value = self._wrap_embedded_keyvalue(value)
if embedded:
value = self._read_embedded(value)
try:
value = json.loads(value, object_pairs_hook=OrderedDict)
except ValueError as e: # pragma: no cover
raise RuntimeError(f'Failed loading JSON data ({value}). Error: ({e})')
elif variable_type == 'StringArray':
if embedded:
value = self._read_embedded(value)
# convert int to str
value_coerced = []
for v in self._load_value(value):
# coerce string values
value_coerced.append(self._coerce_string_value(v))
value = value_coerced
elif variable_type in ['TCEntityArray', 'TCEnhancedEntity', 'TCEnhancedEntityArray']:
value = self._load_value(value)
# self.log.trace(f'pb create - context: {self._context}, key: {key}, value: {value}')
return value
def _read_embedded(self, value):
"""Read method for "embedded" variables.
.. Note:: The ``read()`` method will automatically determine if the input is a variable or
needs to be searched for embedded variables.
Embedded variable rules:
* Only user input can have embedded variables.
* Only String and KeyValueArray variables can have embedded variables.
* Variables can only be embedded one level deep.
This method will automatically covert variables embedded in a string with value retrieved
from DB. If there are no keys/variables the raw string will be returned.
Examples::
DB Values
#App:7979:variable_name!String:
"embedded \\"variable\\""
#App:7979:two!String:
"two"
#App:7979:variable_name!StringArray:
["one", "two", "three"]
Examples 1:
Input: "This input has a embedded #App:7979:variable_name!String"
Examples 2:
Input: ["one", #App:7979:two!String, "three"]
Examples 3:
Input: [{
"key": "embedded string",
"value": "This input has a embedded #App:7979:variable_name!String"
}, {
"key": "string array",
"value": #App:7979:variable_name!StringArray
}, {
"key": "string",
"value": #App:7979:variable_name!String
}]
Args:
value (str): The value to parsed and updated from the DB.
Returns:
(str): Results retrieved from DB
"""
if value is None: # pragma: no cover
return value
for variable in (v.group(0) for v in re.finditer(self._variable_parse, str(value))):
v = self.read(variable)
self.log.trace(f'embedded variable: {variable}, value: {v}')
if isinstance(v, (dict, list)):
v = json.dumps(v)
# for KeyValueArray with nested dict/list type replace the
# quoted value to ensure the resulting data is loadable JSON
value = re.sub(f'"{variable}"', v, value)
if v is not None:
# only replace variable if a non-null value is returned from kv store
# APP-1030 need to revisit this to handle variable references in kv/kvarrays that
# are None. Would like to be able to say if value is just the variable reference,
# sub None value, else insert '' in string. That would require a kv-specific
# version of this method that gets the entire list/dict instead of just the string.
value = re.sub(variable, v, value)
return value
@property
def _variable_pattern(self):
"""Regex pattern to match and parse a playbook variable."""
variable_pattern = r'#([A-Za-z]+)' # match literal (#App,#Trigger) at beginning of String
variable_pattern += r':([\d]+)' # app id (:7979)
variable_pattern += r':([A-Za-z0-9_\.\-\[\]]+)' # variable name (:variable_name)
variable_pattern += r'!(StringArray|BinaryArray|KeyValueArray' # variable type (array)
variable_pattern += r'|TCEntityArray|TCEnhancedEntityArray' # variable type (array)
variable_pattern += r'|String|Binary|KeyValue|TCEntity|TCEnhancedEntity' # variable type
variable_pattern += r'|(?:(?!String)(?!Binary)(?!KeyValue)' # non matching for custom
variable_pattern += r'(?!TCEntity)(?!TCEnhancedEntity)' # non matching for custom
variable_pattern += r'[A-Za-z0-9_-]+))' # variable type (custom)
return variable_pattern
@property
def _variable_array_types(self):
"""Return list of standard playbook array variable types."""
return [
'BinaryArray',
'KeyValueArray',
'StringArray',
'TCEntityArray',
'TCEnhancedEntityArray',
]
@property
def _variable_single_types(self):
"""Return list of standard playbook single variable types."""
return [
'Binary',
'KeyValue',
'String',
'TCEntity',
'TCEnhancedEntity',
]
@property
def _variable_types(self):
"""Return list of standard playbook variable typesd."""
return self._variable_single_types + self._variable_array_types
def _wrap_embedded_keyvalue(self, data):
"""Wrap keyvalue embedded variable in double quotes.
Args:
data (str): The data with embedded variables.
Returns:
(str): Results retrieved from DB
"""
# TODO: need to verify if core still sends improper JSON for KeyValueArrays
if data is not None: # pragma: no cover
variables = []
for v in re.finditer(self._vars_keyvalue_embedded, data):
variables.append(v.group(0))
for var in set(variables): # recursion over set to handle duplicates
# pull (#App:1441:embedded_string!String) from (": #App:1441:embedded_string!String)
variable_string = re.search(self._variable_parse, var).group(0)
# reformat to replace the correct instance only, handling the case where a variable
# is embedded multiple times in the same key value array.
data = data.replace(var, f'": "{variable_string}"')
return data
def create_raw(self, key, value):
"""Create method of CRUD operation for raw data.
..important:: Raw data can only be a byte, str or int. Other data structures
(dict, list, etc) must be serialized.
Args:
key (str): The variable to write to the DB.
value (bytes|int|string): The data to write to the DB.
Returns:
(str): Result of DB write.
"""
data = None
if key is not None and value is not None:
try:
data = self.tcex.key_value_store.create(self._context, key.strip(), value)
except RuntimeError as e:
self.log.error(e)
else:
self.log.warning('The key or value field was None.')
return data
def read_raw(self, key):
"""Read method of CRUD operation for raw data.
..important:: Bytes input will be returned a as string as there is
no way to determine data from redis originated as bytes or string.
Args:
key (str): The variable to read from the DB.
Returns:
(str): Results retrieved from DB.
"""
value = None
if key is not None:
value = self.tcex.key_value_store.read(self._context, key.strip())
else:
self.log.warning('The key field was None.')
return value
def parse_variable(self, variable): # pragma: no cover
"""Set placeholder for child method."""
raise NotImplementedError('Implemented in child class')
def read(self, key, array=False, embedded=True): # pragma: no cover
"""Set placeholder for child method."""
raise NotImplementedError('Implemented in child class')
def variable_type(self, variable): # pragma: no cover
"""Set placeholder for child method."""
raise NotImplementedError('Implemented in child class')
| """TcEx Framework Playbook module"""
# standard library
import base64
import json
import re
from collections import OrderedDict
from collections.abc import Iterable
class PlaybooksBase:
"""TcEx Playbook Module Base Class
Args:
tcex (TcEx): Instance of TcEx class.
context (str): The Redis context (hash).
output_variables (list): The requested output variables.
"""
def __init__(self, tcex, context, output_variables):
"""Initialize the Class properties."""
self.tcex = tcex
self._context = context
self._output_variables = output_variables or []
# properties
self._output_variables_by_name = None
self._output_variables_by_type = None
self.log = tcex.log
# match full variable
self._variable_match = re.compile(fr'^{self._variable_pattern}$')
# capture variable parts (exactly a variable)
self._variable_parse = re.compile(self._variable_pattern)
# match embedded variables without quotes (#App:7979:variable_name!StringArray)
self._vars_keyvalue_embedded = re.compile(fr'(?:\"\:\s?)[^\"]?{self._variable_pattern}')
def _coerce_string_value(self, value):
"""Return a string value from an bool or int."""
# coerce bool before int as python says a bool is an int
if isinstance(value, bool):
# coerce bool to str type
self.log.warning(f'Coercing bool value ({value}) to a string ("{str(value).lower()}").')
value = str(value).lower()
# coerce int to str type
if isinstance(value, (float, int)):
self.log.warning(f'Coercing float/int value ({value}) to a string ("{str(value)}").')
value = str(value)
return value
def _create(self, key, value, validate=True):
"""Create the value in Redis if applicable."""
if key is None or value is None:
self.log.warning('The key or value field is None.')
return None
# get variable type from variable value
variable_type = self.variable_type(key)
if variable_type == 'Binary':
# if not isinstance(value, bytes):
# value = value.encode('utf-8')
if validate and not isinstance(value, bytes):
raise RuntimeError('Invalid data provided for Binary.')
value = base64.b64encode(value).decode('utf-8')
elif variable_type == 'KeyValue':
if validate and (not isinstance(value, dict) or not self._is_key_value(value)):
raise RuntimeError('Invalid data provided for KeyValue.')
elif variable_type == 'String':
# coerce string values
value = self._coerce_string_value(value)
if validate and not isinstance(value, str):
raise RuntimeError('Invalid data provided for String.')
elif variable_type == 'TCEntity':
if validate and (not isinstance(value, dict) or not self._is_tc_entity(value)):
raise RuntimeError('Invalid data provided for TcEntity.')
# self.log.trace(f'pb create - context: {self._context}, key: {key}, value: {value}')
try:
value = json.dumps(value)
except ValueError as e: # pragma: no cover
raise RuntimeError(f'Failed to serialize value ({e}).')
try:
return self.tcex.key_value_store.create(self._context, key.strip(), value)
except RuntimeError as e:
self.log.error(e)
return None
def _create_array(self, key, value, validate=True):
"""Create the value in Redis if applicable."""
if key is None or value is None:
self.log.warning('The key or value field is None.')
return None
# get variable type from variable value
variable_type = self.variable_type(key)
# Enhanced entity array is the wild-wild west, don't validate it
if variable_type != 'TCEnhancedEntityArray':
if validate and (not isinstance(value, Iterable) or isinstance(value, (str, dict))):
raise RuntimeError(f'Invalid data provided for {variable_type}.')
value = [
*value
] # spread the value so that we know it's a list (as opposed to an iterable)
if variable_type == 'BinaryArray':
value_encoded = []
for v in value:
if v is not None:
if validate and not isinstance(v, bytes):
raise RuntimeError('Invalid data provided for Binary.')
# if not isinstance(v, bytes):
# v = v.encode('utf-8')
v = base64.b64encode(v).decode('utf-8')
value_encoded.append(v)
value = value_encoded
elif variable_type == 'KeyValueArray':
if validate and not self._is_key_value_array(value):
raise RuntimeError('Invalid data provided for KeyValueArray.')
elif variable_type == 'StringArray':
value_coerced = []
for v in value:
# coerce string values
v = self._coerce_string_value(v)
if validate and not isinstance(v, (type(None), str)):
raise RuntimeError('Invalid data provided for StringArray.')
value_coerced.append(v)
value = value_coerced
elif variable_type == 'TCEntityArray':
if validate and not self._is_tc_entity_array(value):
raise RuntimeError('Invalid data provided for TcEntityArray.')
# self.log.trace(f'pb create - context: {self._context}, key: {key}, value: {value}')
try:
value = json.dumps(value)
except ValueError as e: # pragma: no cover
raise RuntimeError(f'Failed to serialize value ({e}).')
try:
return self.tcex.key_value_store.create(self._context, key.strip(), value)
except RuntimeError as e:
self.log.error(e)
return None
@staticmethod
def _decode_binary(data):
"""Return decoded bytes data handling data written by java apps."""
try:
data = data.decode('utf-8')
except UnicodeDecodeError: # pragma: no cover
# for data written an upstream java App
data = data.decode('latin-1')
return data
@staticmethod
def _is_key_value(data):
"""Return True if provided data has proper structure for Key Value."""
if data is None:
return False
return all(x in data for x in ['key', 'value'])
def _is_key_value_array(self, data):
"""Return True if provided data has proper structure for Key Value Array."""
for d in data:
if not self._is_key_value(d):
return False
return True
@staticmethod
def _is_tc_entity(data):
"""Return True if provided data has proper structure for TC Entity."""
if data is None:
return False
return all(x in data for x in ['id', 'value', 'type'])
def _is_tc_entity_array(self, data):
"""Return True if provided data has proper structure for TC Entity Array."""
for d in data:
if not self._is_tc_entity(d):
return False
return True
@staticmethod
def _load_value(value):
"""Return the loaded JSON value or raise an error.
Args:
value (str): The data from key/value store.
Raises:
RuntimeError: Raise error when data can't be loaded as JSON data.
Returns:
any: The de-serialized value from the key/value store.
"""
try:
return json.loads(value, object_pairs_hook=OrderedDict)
except ValueError as e: # pragma: no cover
raise RuntimeError(f'Failed to JSON load data "{value}" ({e}).')
def _parse_output_variables(self):
"""Parse the output variables provided to Playbook Class.
**Example Variable Format**::
['#App:1234:status!String', '#App:1234:status_code!String']
"""
self._output_variables_by_name = {}
self._output_variables_by_type = {}
for ov in self._output_variables:
# parse the variable to get individual parts
parsed_variable = self.parse_variable(ov)
variable_name = parsed_variable.get('name')
variable_type = parsed_variable.get('type')
# store the variables in dict by name (e.g. "status_code")
self._output_variables_by_name[variable_name] = {'variable': ov}
# store the variables in dict by name-type (e.g. "status_code-String")
self._output_variables_by_type[f'{variable_name}-{variable_type}'] = {'variable': ov}
def _read(self, key, embedded=True, b64decode=True, decode=False):
"""Create the value in Redis if applicable."""
if key is None:
self.log.warning('The key is None.')
return None
# get variable type from variable value
variable_type = self.variable_type(key)
try:
value = self.tcex.key_value_store.read(self._context, key.strip())
except RuntimeError as e:
self.log.error(e)
return None
if value is None:
return value
if variable_type == 'Binary':
value = self._load_value(value)
if b64decode:
value = base64.b64decode(value)
if decode:
value = self._decode_binary(value)
elif variable_type == 'KeyValue':
# embedded variable can be unquoted, which breaks JSON.
value = self._wrap_embedded_keyvalue(value)
if embedded:
value = self._read_embedded(value)
value = self._load_value(value)
elif variable_type == 'String':
if embedded:
value = self._read_embedded(value)
# coerce string values
value = self._coerce_string_value(self._load_value(value))
elif variable_type == 'TCEntity':
value = self._load_value(value)
return value
def _read_array(self, key, embedded=True, b64decode=True, decode=False):
"""Create the value in Redis if applicable."""
if key is None: # pragma: no cover
self.log.warning('The null value for key was provided.')
return None
# get variable type from variable value
variable_type = self.variable_type(key)
try:
value = self.tcex.key_value_store.read(self._context, key.strip())
except RuntimeError as e:
self.log.error(e)
return None
if value is None:
return value
if variable_type == 'BinaryArray':
value = json.loads(value, object_pairs_hook=OrderedDict)
values = []
for v in value:
if v is not None and b64decode:
v = base64.b64decode(v)
if decode:
v = self._decode_binary(v)
values.append(v)
value = values
elif variable_type == 'KeyValueArray':
# embedded variable can be unquoted, which breaks JSON.
value = self._wrap_embedded_keyvalue(value)
if embedded:
value = self._read_embedded(value)
try:
value = json.loads(value, object_pairs_hook=OrderedDict)
except ValueError as e: # pragma: no cover
raise RuntimeError(f'Failed loading JSON data ({value}). Error: ({e})')
elif variable_type == 'StringArray':
if embedded:
value = self._read_embedded(value)
# convert int to str
value_coerced = []
for v in self._load_value(value):
# coerce string values
value_coerced.append(self._coerce_string_value(v))
value = value_coerced
elif variable_type in ['TCEntityArray', 'TCEnhancedEntity', 'TCEnhancedEntityArray']:
value = self._load_value(value)
# self.log.trace(f'pb create - context: {self._context}, key: {key}, value: {value}')
return value
def _read_embedded(self, value):
"""Read method for "embedded" variables.
.. Note:: The ``read()`` method will automatically determine if the input is a variable or
needs to be searched for embedded variables.
Embedded variable rules:
* Only user input can have embedded variables.
* Only String and KeyValueArray variables can have embedded variables.
* Variables can only be embedded one level deep.
This method will automatically covert variables embedded in a string with value retrieved
from DB. If there are no keys/variables the raw string will be returned.
Examples::
DB Values
#App:7979:variable_name!String:
"embedded \\"variable\\""
#App:7979:two!String:
"two"
#App:7979:variable_name!StringArray:
["one", "two", "three"]
Examples 1:
Input: "This input has a embedded #App:7979:variable_name!String"
Examples 2:
Input: ["one", #App:7979:two!String, "three"]
Examples 3:
Input: [{
"key": "embedded string",
"value": "This input has a embedded #App:7979:variable_name!String"
}, {
"key": "string array",
"value": #App:7979:variable_name!StringArray
}, {
"key": "string",
"value": #App:7979:variable_name!String
}]
Args:
value (str): The value to parsed and updated from the DB.
Returns:
(str): Results retrieved from DB
"""
if value is None: # pragma: no cover
return value
for variable in (v.group(0) for v in re.finditer(self._variable_parse, str(value))):
v = self.read(variable)
self.log.trace(f'embedded variable: {variable}, value: {v}')
if isinstance(v, (dict, list)):
v = json.dumps(v)
# for KeyValueArray with nested dict/list type replace the
# quoted value to ensure the resulting data is loadable JSON
value = re.sub(f'"{variable}"', v, value)
if v is not None:
# only replace variable if a non-null value is returned from kv store
# APP-1030 need to revisit this to handle variable references in kv/kvarrays that
# are None. Would like to be able to say if value is just the variable reference,
# sub None value, else insert '' in string. That would require a kv-specific
# version of this method that gets the entire list/dict instead of just the string.
value = re.sub(variable, v, value)
return value
@property
def _variable_pattern(self):
"""Regex pattern to match and parse a playbook variable."""
variable_pattern = r'#([A-Za-z]+)' # match literal (#App,#Trigger) at beginning of String
variable_pattern += r':([\d]+)' # app id (:7979)
variable_pattern += r':([A-Za-z0-9_\.\-\[\]]+)' # variable name (:variable_name)
variable_pattern += r'!(StringArray|BinaryArray|KeyValueArray' # variable type (array)
variable_pattern += r'|TCEntityArray|TCEnhancedEntityArray' # variable type (array)
variable_pattern += r'|String|Binary|KeyValue|TCEntity|TCEnhancedEntity' # variable type
variable_pattern += r'|(?:(?!String)(?!Binary)(?!KeyValue)' # non matching for custom
variable_pattern += r'(?!TCEntity)(?!TCEnhancedEntity)' # non matching for custom
variable_pattern += r'[A-Za-z0-9_-]+))' # variable type (custom)
return variable_pattern
@property
def _variable_array_types(self):
"""Return list of standard playbook array variable types."""
return [
'BinaryArray',
'KeyValueArray',
'StringArray',
'TCEntityArray',
'TCEnhancedEntityArray',
]
@property
def _variable_single_types(self):
"""Return list of standard playbook single variable types."""
return [
'Binary',
'KeyValue',
'String',
'TCEntity',
'TCEnhancedEntity',
]
@property
def _variable_types(self):
"""Return list of standard playbook variable typesd."""
return self._variable_single_types + self._variable_array_types
def _wrap_embedded_keyvalue(self, data):
"""Wrap keyvalue embedded variable in double quotes.
Args:
data (str): The data with embedded variables.
Returns:
(str): Results retrieved from DB
"""
# TODO: need to verify if core still sends improper JSON for KeyValueArrays
if data is not None: # pragma: no cover
variables = []
for v in re.finditer(self._vars_keyvalue_embedded, data):
variables.append(v.group(0))
for var in set(variables): # recursion over set to handle duplicates
# pull (#App:1441:embedded_string!String) from (": #App:1441:embedded_string!String)
variable_string = re.search(self._variable_parse, var).group(0)
# reformat to replace the correct instance only, handling the case where a variable
# is embedded multiple times in the same key value array.
data = data.replace(var, f'": "{variable_string}"')
return data
def create_raw(self, key, value):
"""Create method of CRUD operation for raw data.
..important:: Raw data can only be a byte, str or int. Other data structures
(dict, list, etc) must be serialized.
Args:
key (str): The variable to write to the DB.
value (bytes|int|string): The data to write to the DB.
Returns:
(str): Result of DB write.
"""
data = None
if key is not None and value is not None:
try:
data = self.tcex.key_value_store.create(self._context, key.strip(), value)
except RuntimeError as e:
self.log.error(e)
else:
self.log.warning('The key or value field was None.')
return data
def read_raw(self, key):
"""Read method of CRUD operation for raw data.
..important:: Bytes input will be returned a as string as there is
no way to determine data from redis originated as bytes or string.
Args:
key (str): The variable to read from the DB.
Returns:
(str): Results retrieved from DB.
"""
value = None
if key is not None:
value = self.tcex.key_value_store.read(self._context, key.strip())
else:
self.log.warning('The key field was None.')
return value
def parse_variable(self, variable): # pragma: no cover
"""Set placeholder for child method."""
raise NotImplementedError('Implemented in child class')
def read(self, key, array=False, embedded=True): # pragma: no cover
"""Set placeholder for child method."""
raise NotImplementedError('Implemented in child class')
def variable_type(self, variable): # pragma: no cover
"""Set placeholder for child method."""
raise NotImplementedError('Implemented in child class')
| en | 0.663333 | TcEx Framework Playbook module # standard library TcEx Playbook Module Base Class Args: tcex (TcEx): Instance of TcEx class. context (str): The Redis context (hash). output_variables (list): The requested output variables. Initialize the Class properties. # properties # match full variable # capture variable parts (exactly a variable) # match embedded variables without quotes (#App:7979:variable_name!StringArray) Return a string value from an bool or int. # coerce bool before int as python says a bool is an int # coerce bool to str type # coerce int to str type Create the value in Redis if applicable. # get variable type from variable value # if not isinstance(value, bytes): # value = value.encode('utf-8') # coerce string values # self.log.trace(f'pb create - context: {self._context}, key: {key}, value: {value}') # pragma: no cover Create the value in Redis if applicable. # get variable type from variable value # Enhanced entity array is the wild-wild west, don't validate it # spread the value so that we know it's a list (as opposed to an iterable) # if not isinstance(v, bytes): # v = v.encode('utf-8') # coerce string values # self.log.trace(f'pb create - context: {self._context}, key: {key}, value: {value}') # pragma: no cover Return decoded bytes data handling data written by java apps. # pragma: no cover # for data written an upstream java App Return True if provided data has proper structure for Key Value. Return True if provided data has proper structure for Key Value Array. Return True if provided data has proper structure for TC Entity. Return True if provided data has proper structure for TC Entity Array. Return the loaded JSON value or raise an error. Args: value (str): The data from key/value store. Raises: RuntimeError: Raise error when data can't be loaded as JSON data. Returns: any: The de-serialized value from the key/value store. # pragma: no cover Parse the output variables provided to Playbook Class. **Example Variable Format**:: ['#App:1234:status!String', '#App:1234:status_code!String'] # parse the variable to get individual parts # store the variables in dict by name (e.g. "status_code") # store the variables in dict by name-type (e.g. "status_code-String") Create the value in Redis if applicable. # get variable type from variable value # embedded variable can be unquoted, which breaks JSON. # coerce string values Create the value in Redis if applicable. # pragma: no cover # get variable type from variable value # embedded variable can be unquoted, which breaks JSON. # pragma: no cover # convert int to str # coerce string values # self.log.trace(f'pb create - context: {self._context}, key: {key}, value: {value}') Read method for "embedded" variables. .. Note:: The ``read()`` method will automatically determine if the input is a variable or needs to be searched for embedded variables. Embedded variable rules: * Only user input can have embedded variables. * Only String and KeyValueArray variables can have embedded variables. * Variables can only be embedded one level deep. This method will automatically covert variables embedded in a string with value retrieved from DB. If there are no keys/variables the raw string will be returned. Examples:: DB Values #App:7979:variable_name!String: "embedded \\"variable\\"" #App:7979:two!String: "two" #App:7979:variable_name!StringArray: ["one", "two", "three"] Examples 1: Input: "This input has a embedded #App:7979:variable_name!String" Examples 2: Input: ["one", #App:7979:two!String, "three"] Examples 3: Input: [{ "key": "embedded string", "value": "This input has a embedded #App:7979:variable_name!String" }, { "key": "string array", "value": #App:7979:variable_name!StringArray }, { "key": "string", "value": #App:7979:variable_name!String }] Args: value (str): The value to parsed and updated from the DB. Returns: (str): Results retrieved from DB # pragma: no cover # for KeyValueArray with nested dict/list type replace the # quoted value to ensure the resulting data is loadable JSON # only replace variable if a non-null value is returned from kv store # APP-1030 need to revisit this to handle variable references in kv/kvarrays that # are None. Would like to be able to say if value is just the variable reference, # sub None value, else insert '' in string. That would require a kv-specific # version of this method that gets the entire list/dict instead of just the string. Regex pattern to match and parse a playbook variable. # match literal (#App,#Trigger) at beginning of String # app id (:7979) # variable name (:variable_name) # variable type (array) # variable type (array) # variable type # non matching for custom # non matching for custom # variable type (custom) Return list of standard playbook array variable types. Return list of standard playbook single variable types. Return list of standard playbook variable typesd. Wrap keyvalue embedded variable in double quotes. Args: data (str): The data with embedded variables. Returns: (str): Results retrieved from DB # TODO: need to verify if core still sends improper JSON for KeyValueArrays # pragma: no cover # recursion over set to handle duplicates # pull (#App:1441:embedded_string!String) from (": #App:1441:embedded_string!String) # reformat to replace the correct instance only, handling the case where a variable # is embedded multiple times in the same key value array. Create method of CRUD operation for raw data. ..important:: Raw data can only be a byte, str or int. Other data structures (dict, list, etc) must be serialized. Args: key (str): The variable to write to the DB. value (bytes|int|string): The data to write to the DB. Returns: (str): Result of DB write. Read method of CRUD operation for raw data. ..important:: Bytes input will be returned a as string as there is no way to determine data from redis originated as bytes or string. Args: key (str): The variable to read from the DB. Returns: (str): Results retrieved from DB. # pragma: no cover Set placeholder for child method. # pragma: no cover Set placeholder for child method. # pragma: no cover Set placeholder for child method. | 2.691435 | 3 |
DeepLearningExamples/TensorFlow/LanguageModeling/BERT/run_classifier.py | puririshi98/benchmark | 0 | 208 | # coding=utf-8
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT finetuning runner."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import os
import modeling
import optimization
import tokenization
import tensorflow as tf
import horovod.tensorflow as hvd
import time
from utils.utils import LogEvalRunHook, LogTrainRunHook, setup_xla_flags
from utils.gpu_affinity import set_affinity
import utils.dllogger_class
from dllogger import Verbosity
from utils.create_glue_data import *
import numpy as np
import tf_metrics
flags = tf.flags
FLAGS = flags.FLAGS
## Required parameters
flags.DEFINE_string(
"data_dir", None,
"The input data dir. Should contain the .tsv files (or other data files) "
"for the task.")
flags.DEFINE_string(
"bert_config_file", None,
"The config json file corresponding to the pre-trained BERT model. "
"This specifies the model architecture.")
flags.DEFINE_string("task_name", None, "The name of the task to train.")
flags.DEFINE_string("vocab_file", None,
"The vocabulary file that the BERT model was trained on.")
flags.DEFINE_string(
"output_dir", None,
"The output directory where the model checkpoints will be written.")
## Other parameters
flags.DEFINE_string(
"dllog_path", "/results/bert_dllog.json",
"filename where dllogger writes to")
flags.DEFINE_string(
"optimizer_type", "lamb",
"Optimizer type : adam or lamb")
flags.DEFINE_string(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained BERT model).")
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_integer(
"max_seq_length", 128,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded.")
flags.DEFINE_bool("do_train", False, "Whether to run training.")
flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.")
flags.DEFINE_bool(
"do_predict", False,
"Whether to run the model in inference mode on the test set.")
flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.")
flags.DEFINE_integer("eval_batch_size", 8, "Total batch size for eval.")
flags.DEFINE_integer("predict_batch_size", 8, "Total batch size for predict.")
flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.")
flags.DEFINE_bool("use_trt", False, "Whether to use TF-TRT")
flags.DEFINE_float("num_train_epochs", 3.0,
"Total number of training epochs to perform.")
flags.DEFINE_float(
"warmup_proportion", 0.1,
"Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10% of training.")
flags.DEFINE_integer("save_checkpoints_steps", 1000,
"How often to save the model checkpoint.")
flags.DEFINE_integer("display_loss_steps", 10,
"How often to print loss from estimator")
flags.DEFINE_integer("iterations_per_loop", 1000,
"How many steps to make in each estimator call.")
flags.DEFINE_integer("num_accumulation_steps", 1,
"Number of accumulation steps before gradient update"
"Global batch size = num_accumulation_steps * train_batch_size")
flags.DEFINE_bool("amp", True, "Whether to enable AMP ops. When false, uses TF32 on A100 and FP32 on V100 GPUS.")
flags.DEFINE_bool("use_xla", True, "Whether to enable XLA JIT compilation.")
flags.DEFINE_bool("horovod", False, "Whether to use Horovod for multi-gpu runs")
flags.DEFINE_bool(
"verbose_logging", False,
"If true, all of the warnings related to data processing will be printed. "
"A number of warnings are expected for a normal SQuAD evaluation.")
def file_based_input_fn_builder(input_file, batch_size, seq_length, is_training,
drop_remainder, hvd=None):
"""Creates an `input_fn` closure to be passed to Estimator."""
name_to_features = {
"input_ids": tf.io.FixedLenFeature([seq_length], tf.int64),
"input_mask": tf.io.FixedLenFeature([seq_length], tf.int64),
"segment_ids": tf.io.FixedLenFeature([seq_length], tf.int64),
"label_ids": tf.io.FixedLenFeature([], tf.int64),
}
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
def input_fn():
"""The actual input function."""
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
d = tf.data.TFRecordDataset(input_file)
if is_training:
if hvd is not None: d = d.shard(hvd.size(), hvd.rank())
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.apply(
tf.contrib.data.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
drop_remainder=drop_remainder))
return d
return input_fn
def create_model(bert_config, is_training, input_ids, input_mask, segment_ids,
labels, num_labels, use_one_hot_embeddings):
"""Creates a classification model."""
model = modeling.BertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings,
compute_type=tf.float32)
# In the demo, we are doing a simple classification task on the entire
# segment.
#
# If you want to use the token-level output, use model.get_sequence_output()
# instead.
output_layer = model.get_pooled_output()
hidden_size = output_layer.shape[-1].value
output_weights = tf.get_variable(
"output_weights", [num_labels, hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable(
"output_bias", [num_labels], initializer=tf.zeros_initializer())
with tf.variable_scope("loss"):
if is_training:
# I.e., 0.1 dropout
output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
logits = tf.matmul(output_layer, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias, name='cls_logits')
probabilities = tf.nn.softmax(logits, axis=-1, name='cls_probabilities')
log_probs = tf.nn.log_softmax(logits, axis=-1)
one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1, name='cls_per_example_loss')
loss = tf.reduce_mean(per_example_loss, name='cls_loss')
return (loss, per_example_loss, logits, probabilities)
def get_frozen_tftrt_model(bert_config, shape, num_labels, use_one_hot_embeddings, init_checkpoint):
tf_config = tf.compat.v1.ConfigProto()
tf_config.gpu_options.allow_growth = True
output_node_names = ['loss/cls_loss', 'loss/cls_per_example_loss', 'loss/cls_logits', 'loss/cls_probabilities']
with tf.Session(config=tf_config) as tf_sess:
input_ids = tf.placeholder(tf.int32, shape, 'input_ids')
input_mask = tf.placeholder(tf.int32, shape, 'input_mask')
segment_ids = tf.placeholder(tf.int32, shape, 'segment_ids')
label_ids = tf.placeholder(tf.int32, (None), 'label_ids')
create_model(bert_config, False, input_ids, input_mask, segment_ids, label_ids,
num_labels, use_one_hot_embeddings)
tvars = tf.trainable_variables()
(assignment_map, initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf_sess.run(tf.global_variables_initializer())
print("LOADED!")
tf.compat.v1.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
else:
init_string = ", *NOTTTTTTTTTTTTTTTTTTTTT"
tf.compat.v1.logging.info(" name = %s, shape = %s%s", var.name, var.shape, init_string)
frozen_graph = tf.graph_util.convert_variables_to_constants(tf_sess,
tf_sess.graph.as_graph_def(), output_node_names)
num_nodes = len(frozen_graph.node)
print('Converting graph using TensorFlow-TensorRT...')
from tensorflow.python.compiler.tensorrt import trt_convert as trt
converter = trt.TrtGraphConverter(
input_graph_def=frozen_graph,
nodes_blacklist=output_node_names,
max_workspace_size_bytes=(4096 << 20) - 1000,
precision_mode = "FP16" if FLAGS.amp else "FP32",
minimum_segment_size=4,
is_dynamic_op=True,
maximum_cached_engines=1000
)
frozen_graph = converter.convert()
print('Total node count before and after TF-TRT conversion:',
num_nodes, '->', len(frozen_graph.node))
print('TRT node count:',
len([1 for n in frozen_graph.node if str(n.op) == 'TRTEngineOp']))
with tf.io.gfile.GFile("frozen_modelTRT.pb", "wb") as f:
f.write(frozen_graph.SerializeToString())
return frozen_graph
def model_fn_builder(task_name, bert_config, num_labels, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps,
use_one_hot_embeddings, hvd=None):
"""Returns `model_fn` closure for Estimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for Estimator."""
def metric_fn(per_example_loss, label_ids, logits):
predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
if task_name == "cola":
FN, FN_op = tf.metrics.false_negatives(labels=label_ids, predictions=predictions)
FP, FP_op = tf.metrics.false_positives(labels=label_ids, predictions=predictions)
TP, TP_op = tf.metrics.true_positives(labels=label_ids, predictions=predictions)
TN, TN_op = tf.metrics.true_negatives(labels=label_ids, predictions=predictions)
MCC = (TP * TN - FP * FN) / ((TP + FP) * (TP + FN) * (TN + FP) * (TN + FN)) ** 0.5
MCC_op = tf.group(FN_op, TN_op, TP_op, FP_op, tf.identity(MCC, name="MCC"))
return {"MCC": (MCC, MCC_op)}
elif task_name == "mrpc":
accuracy = tf.metrics.accuracy(
labels=label_ids, predictions=predictions)
loss = tf.metrics.mean(values=per_example_loss)
f1 = tf_metrics.f1(labels=label_ids, predictions=predictions, num_classes=2, pos_indices=[1])
return {
"eval_accuracy": accuracy,
"eval_f1": f1,
"eval_loss": loss,
}
else:
accuracy = tf.metrics.accuracy(
labels=label_ids, predictions=predictions)
loss = tf.metrics.mean(values=per_example_loss)
return {
"eval_accuracy": accuracy,
"eval_loss": loss,
}
tf.compat.v1.logging.info("*** Features ***")
tf.compat.v1.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.compat.v1.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
label_ids = features["label_ids"]
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
if not is_training and FLAGS.use_trt:
trt_graph = get_frozen_tftrt_model(bert_config, input_ids.shape, num_labels, use_one_hot_embeddings, init_checkpoint)
(total_loss, per_example_loss, logits, probabilities) = tf.import_graph_def(trt_graph,
input_map={'input_ids':input_ids, 'input_mask':input_mask, 'segment_ids':segment_ids, 'label_ids':label_ids},
return_elements=['loss/cls_loss:0', 'loss/cls_per_example_loss:0', 'loss/cls_logits:0', 'loss/cls_probabilities:0'],
name='')
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {"probabilities": probabilities}
output_spec = tf.estimator.EstimatorSpec(
mode=mode, predictions=predictions)
elif mode == tf.estimator.ModeKeys.EVAL:
eval_metric_ops = metric_fn(per_example_loss, label_ids, logits)
output_spec = tf.estimator.EstimatorSpec(
mode=mode,
loss=total_loss,
eval_metric_ops=eval_metric_ops)
return output_spec
(total_loss, per_example_loss, logits, probabilities) = create_model(
bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,
num_labels, use_one_hot_embeddings)
tvars = tf.trainable_variables()
initialized_variable_names = {}
if init_checkpoint and (hvd is None or hvd.rank() == 0):
(assignment_map, initialized_variable_names
) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
if FLAGS.verbose_logging:
tf.compat.v1.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.compat.v1.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = optimization.create_optimizer(
total_loss, learning_rate, num_train_steps, num_warmup_steps,
hvd, False, FLAGS.amp, FLAGS.num_accumulation_steps, FLAGS.optimizer_type)
output_spec = tf.estimator.EstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op)
elif mode == tf.estimator.ModeKeys.EVAL:
dummy_op = tf.no_op()
# Need to call mixed precision graph rewrite if fp16 to enable graph rewrite
if FLAGS.amp:
loss_scaler = tf.train.experimental.FixedLossScale(1)
dummy_op = tf.train.experimental.enable_mixed_precision_graph_rewrite(
optimization.LAMBOptimizer(learning_rate=0.0), loss_scaler)
eval_metric_ops = metric_fn(per_example_loss, label_ids, logits)
output_spec = tf.estimator.EstimatorSpec(
mode=mode,
loss=total_loss,
eval_metric_ops=eval_metric_ops)
else:
dummy_op = tf.no_op()
# Need to call mixed precision graph rewrite if fp16 to enable graph rewrite
if FLAGS.amp:
dummy_op = tf.train.experimental.enable_mixed_precision_graph_rewrite(
optimization.LAMBOptimizer(learning_rate=0.0))
output_spec = tf.estimator.EstimatorSpec(
mode=mode, predictions=probabilities)
return output_spec
return model_fn
# This function is not used by this file but is still used by the Colab and
# people who depend on it.
def input_fn_builder(features, batch_size, seq_length, is_training, drop_remainder, hvd=None):
"""Creates an `input_fn` closure to be passed to Estimator."""
all_input_ids = []
all_input_mask = []
all_segment_ids = []
all_label_ids = []
for feature in features:
all_input_ids.append(feature.input_ids)
all_input_mask.append(feature.input_mask)
all_segment_ids.append(feature.segment_ids)
all_label_ids.append(feature.label_id)
def input_fn():
"""The actual input function."""
num_examples = len(features)
# This is for demo purposes and does NOT scale to large data sets. We do
# not use Dataset.from_generator() because that uses tf.py_func which is
# not TPU compatible. The right way to load data is with TFRecordReader.
d = tf.data.Dataset.from_tensor_slices({
"input_ids":
tf.constant(
all_input_ids, shape=[num_examples, seq_length],
dtype=tf.int32),
"input_mask":
tf.constant(
all_input_mask,
shape=[num_examples, seq_length],
dtype=tf.int32),
"segment_ids":
tf.constant(
all_segment_ids,
shape=[num_examples, seq_length],
dtype=tf.int32),
"label_ids":
tf.constant(all_label_ids, shape=[num_examples], dtype=tf.int32),
})
if is_training:
if hvd is not None: d = d.shard(hvd.size(), hvd.rank())
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)
return d
return input_fn
def main(_):
setup_xla_flags()
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
dllogging = utils.dllogger_class.dllogger_class(FLAGS.dllog_path)
if FLAGS.horovod:
hvd.init()
processors = {
"cola": ColaProcessor,
"mnli": MnliProcessor,
"mrpc": MrpcProcessor,
"xnli": XnliProcessor,
}
if not FLAGS.do_train and not FLAGS.do_eval and not FLAGS.do_predict:
raise ValueError(
"At least one of `do_train`, `do_eval` or `do_predict' must be True.")
bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
if FLAGS.max_seq_length > bert_config.max_position_embeddings:
raise ValueError(
"Cannot use sequence length %d because the BERT model "
"was only trained up to sequence length %d" %
(FLAGS.max_seq_length, bert_config.max_position_embeddings))
tf.io.gfile.makedirs(FLAGS.output_dir)
task_name = FLAGS.task_name.lower()
if task_name not in processors:
raise ValueError("Task not found: %s" % (task_name))
processor = processors[task_name]()
label_list = processor.get_labels()
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
master_process = True
training_hooks = []
global_batch_size = FLAGS.train_batch_size * FLAGS.num_accumulation_steps
hvd_rank = 0
config = tf.compat.v1.ConfigProto()
if FLAGS.horovod:
tf.compat.v1.logging.info("Multi-GPU training with TF Horovod")
tf.compat.v1.logging.info("hvd.size() = %d hvd.rank() = %d", hvd.size(), hvd.rank())
global_batch_size = FLAGS.train_batch_size * FLAGS.num_accumulation_steps * hvd.size()
master_process = (hvd.rank() == 0)
hvd_rank = hvd.rank()
config.gpu_options.visible_device_list = str(hvd.local_rank())
set_affinity(hvd.local_rank())
if hvd.size() > 1:
training_hooks.append(hvd.BroadcastGlobalVariablesHook(0))
if FLAGS.use_xla:
config.graph_options.optimizer_options.global_jit_level = tf.compat.v1.OptimizerOptions.ON_1
if FLAGS.amp:
tf.enable_resource_variables()
run_config = tf.estimator.RunConfig(
model_dir=FLAGS.output_dir if master_process else None,
session_config=config,
save_checkpoints_steps=FLAGS.save_checkpoints_steps if master_process else None,
save_summary_steps=FLAGS.save_checkpoints_steps if master_process else None,
log_step_count_steps=FLAGS.display_loss_steps,
keep_checkpoint_max=1)
if master_process:
tf.compat.v1.logging.info("***** Configuaration *****")
for key in FLAGS.__flags.keys():
tf.compat.v1.logging.info(' {}: {}'.format(key, getattr(FLAGS, key)))
tf.compat.v1.logging.info("**************************")
train_examples = None
num_train_steps = None
num_warmup_steps = None
training_hooks.append(LogTrainRunHook(global_batch_size, hvd_rank, FLAGS.save_checkpoints_steps, num_steps_ignore_xla=25))
if FLAGS.do_train:
train_examples = processor.get_train_examples(FLAGS.data_dir)
num_train_steps = int(
len(train_examples) / global_batch_size * FLAGS.num_train_epochs)
num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)
start_index = 0
end_index = len(train_examples)
tmp_filenames = [os.path.join(FLAGS.output_dir, "train.tf_record")]
if FLAGS.horovod:
tmp_filenames = [os.path.join(FLAGS.output_dir, "train.tf_record{}".format(i)) for i in range(hvd.size())]
num_examples_per_rank = len(train_examples) // hvd.size()
remainder = len(train_examples) % hvd.size()
if hvd.rank() < remainder:
start_index = hvd.rank() * (num_examples_per_rank+1)
end_index = start_index + num_examples_per_rank + 1
else:
start_index = hvd.rank() * num_examples_per_rank + remainder
end_index = start_index + (num_examples_per_rank)
model_fn = model_fn_builder(
task_name=task_name,
bert_config=bert_config,
num_labels=len(label_list),
init_checkpoint=FLAGS.init_checkpoint,
learning_rate=FLAGS.learning_rate if not FLAGS.horovod else FLAGS.learning_rate * hvd.size(),
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
use_one_hot_embeddings=False,
hvd=None if not FLAGS.horovod else hvd)
estimator = tf.estimator.Estimator(
model_fn=model_fn,
config=run_config)
if FLAGS.do_train:
file_based_convert_examples_to_features(
train_examples[start_index:end_index], label_list, FLAGS.max_seq_length, tokenizer, tmp_filenames[hvd_rank])
tf.compat.v1.logging.info("***** Running training *****")
tf.compat.v1.logging.info(" Num examples = %d", len(train_examples))
tf.compat.v1.logging.info(" Batch size = %d", FLAGS.train_batch_size)
tf.compat.v1.logging.info(" Num steps = %d", num_train_steps)
train_input_fn = file_based_input_fn_builder(
input_file=tmp_filenames,
batch_size=FLAGS.train_batch_size,
seq_length=FLAGS.max_seq_length,
is_training=True,
drop_remainder=True,
hvd=None if not FLAGS.horovod else hvd)
train_start_time = time.time()
estimator.train(input_fn=train_input_fn, max_steps=num_train_steps, hooks=training_hooks)
train_time_elapsed = time.time() - train_start_time
train_time_wo_overhead = training_hooks[-1].total_time
avg_sentences_per_second = num_train_steps * global_batch_size * 1.0 / train_time_elapsed
ss_sentences_per_second = (training_hooks[-1].count - training_hooks[-1].skipped) * global_batch_size * 1.0 / train_time_wo_overhead
if master_process:
tf.compat.v1.logging.info("-----------------------------")
tf.compat.v1.logging.info("Total Training Time = %0.2f for Sentences = %d", train_time_elapsed,
num_train_steps * global_batch_size)
tf.compat.v1.logging.info("Total Training Time W/O Overhead = %0.2f for Sentences = %d", train_time_wo_overhead,
(training_hooks[-1].count - training_hooks[-1].skipped) * global_batch_size)
tf.compat.v1.logging.info("Throughput Average (sentences/sec) with overhead = %0.2f", avg_sentences_per_second)
tf.compat.v1.logging.info("Throughput Average (sentences/sec) = %0.2f", ss_sentences_per_second)
tf.compat.v1.logging.info("-----------------------------")
if FLAGS.do_eval and master_process:
eval_examples = processor.get_dev_examples(FLAGS.data_dir)
eval_file = os.path.join(FLAGS.output_dir, "eval.tf_record")
file_based_convert_examples_to_features(
eval_examples, label_list, FLAGS.max_seq_length, tokenizer, eval_file)
tf.compat.v1.logging.info("***** Running evaluation *****")
tf.compat.v1.logging.info(" Num examples = %d", len(eval_examples))
tf.compat.v1.logging.info(" Batch size = %d", FLAGS.eval_batch_size)
eval_drop_remainder = False
eval_input_fn = file_based_input_fn_builder(
input_file=eval_file,
batch_size=FLAGS.eval_batch_size,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=eval_drop_remainder)
eval_hooks = [LogEvalRunHook(FLAGS.eval_batch_size)]
eval_start_time = time.time()
result = estimator.evaluate(input_fn=eval_input_fn, hooks=eval_hooks)
eval_time_elapsed = time.time() - eval_start_time
time_list = eval_hooks[-1].time_list
time_list.sort()
# Removing outliers (init/warmup) in throughput computation.
eval_time_wo_overhead = sum(time_list[:int(len(time_list) * 0.8)])
num_sentences = (int(len(time_list) * 0.8)) * FLAGS.eval_batch_size
avg = np.mean(time_list)
cf_50 = max(time_list[:int(len(time_list) * 0.50)])
cf_90 = max(time_list[:int(len(time_list) * 0.90)])
cf_95 = max(time_list[:int(len(time_list) * 0.95)])
cf_99 = max(time_list[:int(len(time_list) * 0.99)])
cf_100 = max(time_list[:int(len(time_list) * 1)])
ss_sentences_per_second = num_sentences * 1.0 / eval_time_wo_overhead
tf.compat.v1.logging.info("-----------------------------")
tf.compat.v1.logging.info("Total Inference Time = %0.2f for Sentences = %d", eval_time_elapsed,
eval_hooks[-1].count * FLAGS.eval_batch_size)
tf.compat.v1.logging.info("Total Inference Time W/O Overhead = %0.2f for Sentences = %d", eval_time_wo_overhead,
num_sentences)
tf.compat.v1.logging.info("Summary Inference Statistics on EVAL set")
tf.compat.v1.logging.info("Batch size = %d", FLAGS.eval_batch_size)
tf.compat.v1.logging.info("Sequence Length = %d", FLAGS.max_seq_length)
tf.compat.v1.logging.info("Precision = %s", "fp16" if FLAGS.amp else "fp32")
tf.compat.v1.logging.info("Latency Confidence Level 50 (ms) = %0.2f", cf_50 * 1000)
tf.compat.v1.logging.info("Latency Confidence Level 90 (ms) = %0.2f", cf_90 * 1000)
tf.compat.v1.logging.info("Latency Confidence Level 95 (ms) = %0.2f", cf_95 * 1000)
tf.compat.v1.logging.info("Latency Confidence Level 99 (ms) = %0.2f", cf_99 * 1000)
tf.compat.v1.logging.info("Latency Confidence Level 100 (ms) = %0.2f", cf_100 * 1000)
tf.compat.v1.logging.info("Latency Average (ms) = %0.2f", avg * 1000)
tf.compat.v1.logging.info("Throughput Average (sentences/sec) = %0.2f", ss_sentences_per_second)
dllogging.logger.log(step=(), data={"throughput_val": ss_sentences_per_second}, verbosity=Verbosity.DEFAULT)
tf.compat.v1.logging.info("-----------------------------")
output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt")
with tf.io.gfile.GFile(output_eval_file, "w") as writer:
tf.compat.v1.logging.info("***** Eval results *****")
for key in sorted(result.keys()):
dllogging.logger.log(step=(), data={key: float(result[key])}, verbosity=Verbosity.DEFAULT)
tf.compat.v1.logging.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
if FLAGS.do_predict and master_process:
predict_examples = processor.get_test_examples(FLAGS.data_dir)
predict_file = os.path.join(FLAGS.output_dir, "predict.tf_record")
file_based_convert_examples_to_features(predict_examples, label_list,
FLAGS.max_seq_length, tokenizer,
predict_file)
tf.compat.v1.logging.info("***** Running prediction*****")
tf.compat.v1.logging.info(" Num examples = %d", len(predict_examples))
tf.compat.v1.logging.info(" Batch size = %d", FLAGS.predict_batch_size)
predict_drop_remainder = False
predict_input_fn = file_based_input_fn_builder(
input_file=predict_file,
batch_size=FLAGS.predict_batch_size,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=predict_drop_remainder)
predict_hooks = [LogEvalRunHook(FLAGS.predict_batch_size)]
predict_start_time = time.time()
output_predict_file = os.path.join(FLAGS.output_dir, "test_results.tsv")
with tf.io.gfile.GFile(output_predict_file, "w") as writer:
tf.compat.v1.logging.info("***** Predict results *****")
for prediction in estimator.predict(input_fn=predict_input_fn, hooks=predict_hooks,
yield_single_examples=False):
output_line = "\t".join(
str(class_probability) for class_probability in prediction) + "\n"
writer.write(output_line)
predict_time_elapsed = time.time() - predict_start_time
time_list = predict_hooks[-1].time_list
time_list.sort()
# Removing outliers (init/warmup) in throughput computation.
predict_time_wo_overhead = sum(time_list[:int(len(time_list) * 0.8)])
num_sentences = (int(len(time_list) * 0.8)) * FLAGS.predict_batch_size
avg = np.mean(time_list)
cf_50 = max(time_list[:int(len(time_list) * 0.50)])
cf_90 = max(time_list[:int(len(time_list) * 0.90)])
cf_95 = max(time_list[:int(len(time_list) * 0.95)])
cf_99 = max(time_list[:int(len(time_list) * 0.99)])
cf_100 = max(time_list[:int(len(time_list) * 1)])
ss_sentences_per_second = num_sentences * 1.0 / predict_time_wo_overhead
tf.compat.v1.logging.info("-----------------------------")
tf.compat.v1.logging.info("Total Inference Time = %0.2f for Sentences = %d", predict_time_elapsed,
predict_hooks[-1].count * FLAGS.predict_batch_size)
tf.compat.v1.logging.info("Total Inference Time W/O Overhead = %0.2f for Sentences = %d", predict_time_wo_overhead,
num_sentences)
tf.compat.v1.logging.info("Summary Inference Statistics on TEST SET")
tf.compat.v1.logging.info("Batch size = %d", FLAGS.predict_batch_size)
tf.compat.v1.logging.info("Sequence Length = %d", FLAGS.max_seq_length)
tf.compat.v1.logging.info("Precision = %s", "fp16" if FLAGS.amp else "fp32")
tf.compat.v1.logging.info("Latency Confidence Level 50 (ms) = %0.2f", cf_50 * 1000)
tf.compat.v1.logging.info("Latency Confidence Level 90 (ms) = %0.2f", cf_90 * 1000)
tf.compat.v1.logging.info("Latency Confidence Level 95 (ms) = %0.2f", cf_95 * 1000)
tf.compat.v1.logging.info("Latency Confidence Level 99 (ms) = %0.2f", cf_99 * 1000)
tf.compat.v1.logging.info("Latency Confidence Level 100 (ms) = %0.2f", cf_100 * 1000)
tf.compat.v1.logging.info("Latency Average (ms) = %0.2f", avg * 1000)
tf.compat.v1.logging.info("Throughput Average (sentences/sec) = %0.2f", ss_sentences_per_second)
dllogging.logger.log(step=(), data={"throughput_val": ss_sentences_per_second}, verbosity=Verbosity.DEFAULT)
tf.compat.v1.logging.info("-----------------------------")
if __name__ == "__main__":
flags.mark_flag_as_required("data_dir")
flags.mark_flag_as_required("task_name")
flags.mark_flag_as_required("vocab_file")
flags.mark_flag_as_required("bert_config_file")
flags.mark_flag_as_required("output_dir")
tf.compat.v1.app.run()
| # coding=utf-8
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT finetuning runner."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import os
import modeling
import optimization
import tokenization
import tensorflow as tf
import horovod.tensorflow as hvd
import time
from utils.utils import LogEvalRunHook, LogTrainRunHook, setup_xla_flags
from utils.gpu_affinity import set_affinity
import utils.dllogger_class
from dllogger import Verbosity
from utils.create_glue_data import *
import numpy as np
import tf_metrics
flags = tf.flags
FLAGS = flags.FLAGS
## Required parameters
flags.DEFINE_string(
"data_dir", None,
"The input data dir. Should contain the .tsv files (or other data files) "
"for the task.")
flags.DEFINE_string(
"bert_config_file", None,
"The config json file corresponding to the pre-trained BERT model. "
"This specifies the model architecture.")
flags.DEFINE_string("task_name", None, "The name of the task to train.")
flags.DEFINE_string("vocab_file", None,
"The vocabulary file that the BERT model was trained on.")
flags.DEFINE_string(
"output_dir", None,
"The output directory where the model checkpoints will be written.")
## Other parameters
flags.DEFINE_string(
"dllog_path", "/results/bert_dllog.json",
"filename where dllogger writes to")
flags.DEFINE_string(
"optimizer_type", "lamb",
"Optimizer type : adam or lamb")
flags.DEFINE_string(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained BERT model).")
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_integer(
"max_seq_length", 128,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded.")
flags.DEFINE_bool("do_train", False, "Whether to run training.")
flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.")
flags.DEFINE_bool(
"do_predict", False,
"Whether to run the model in inference mode on the test set.")
flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.")
flags.DEFINE_integer("eval_batch_size", 8, "Total batch size for eval.")
flags.DEFINE_integer("predict_batch_size", 8, "Total batch size for predict.")
flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.")
flags.DEFINE_bool("use_trt", False, "Whether to use TF-TRT")
flags.DEFINE_float("num_train_epochs", 3.0,
"Total number of training epochs to perform.")
flags.DEFINE_float(
"warmup_proportion", 0.1,
"Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10% of training.")
flags.DEFINE_integer("save_checkpoints_steps", 1000,
"How often to save the model checkpoint.")
flags.DEFINE_integer("display_loss_steps", 10,
"How often to print loss from estimator")
flags.DEFINE_integer("iterations_per_loop", 1000,
"How many steps to make in each estimator call.")
flags.DEFINE_integer("num_accumulation_steps", 1,
"Number of accumulation steps before gradient update"
"Global batch size = num_accumulation_steps * train_batch_size")
flags.DEFINE_bool("amp", True, "Whether to enable AMP ops. When false, uses TF32 on A100 and FP32 on V100 GPUS.")
flags.DEFINE_bool("use_xla", True, "Whether to enable XLA JIT compilation.")
flags.DEFINE_bool("horovod", False, "Whether to use Horovod for multi-gpu runs")
flags.DEFINE_bool(
"verbose_logging", False,
"If true, all of the warnings related to data processing will be printed. "
"A number of warnings are expected for a normal SQuAD evaluation.")
def file_based_input_fn_builder(input_file, batch_size, seq_length, is_training,
drop_remainder, hvd=None):
"""Creates an `input_fn` closure to be passed to Estimator."""
name_to_features = {
"input_ids": tf.io.FixedLenFeature([seq_length], tf.int64),
"input_mask": tf.io.FixedLenFeature([seq_length], tf.int64),
"segment_ids": tf.io.FixedLenFeature([seq_length], tf.int64),
"label_ids": tf.io.FixedLenFeature([], tf.int64),
}
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
def input_fn():
"""The actual input function."""
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
d = tf.data.TFRecordDataset(input_file)
if is_training:
if hvd is not None: d = d.shard(hvd.size(), hvd.rank())
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.apply(
tf.contrib.data.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
drop_remainder=drop_remainder))
return d
return input_fn
def create_model(bert_config, is_training, input_ids, input_mask, segment_ids,
labels, num_labels, use_one_hot_embeddings):
"""Creates a classification model."""
model = modeling.BertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings,
compute_type=tf.float32)
# In the demo, we are doing a simple classification task on the entire
# segment.
#
# If you want to use the token-level output, use model.get_sequence_output()
# instead.
output_layer = model.get_pooled_output()
hidden_size = output_layer.shape[-1].value
output_weights = tf.get_variable(
"output_weights", [num_labels, hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable(
"output_bias", [num_labels], initializer=tf.zeros_initializer())
with tf.variable_scope("loss"):
if is_training:
# I.e., 0.1 dropout
output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
logits = tf.matmul(output_layer, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias, name='cls_logits')
probabilities = tf.nn.softmax(logits, axis=-1, name='cls_probabilities')
log_probs = tf.nn.log_softmax(logits, axis=-1)
one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1, name='cls_per_example_loss')
loss = tf.reduce_mean(per_example_loss, name='cls_loss')
return (loss, per_example_loss, logits, probabilities)
def get_frozen_tftrt_model(bert_config, shape, num_labels, use_one_hot_embeddings, init_checkpoint):
tf_config = tf.compat.v1.ConfigProto()
tf_config.gpu_options.allow_growth = True
output_node_names = ['loss/cls_loss', 'loss/cls_per_example_loss', 'loss/cls_logits', 'loss/cls_probabilities']
with tf.Session(config=tf_config) as tf_sess:
input_ids = tf.placeholder(tf.int32, shape, 'input_ids')
input_mask = tf.placeholder(tf.int32, shape, 'input_mask')
segment_ids = tf.placeholder(tf.int32, shape, 'segment_ids')
label_ids = tf.placeholder(tf.int32, (None), 'label_ids')
create_model(bert_config, False, input_ids, input_mask, segment_ids, label_ids,
num_labels, use_one_hot_embeddings)
tvars = tf.trainable_variables()
(assignment_map, initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf_sess.run(tf.global_variables_initializer())
print("LOADED!")
tf.compat.v1.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
else:
init_string = ", *NOTTTTTTTTTTTTTTTTTTTTT"
tf.compat.v1.logging.info(" name = %s, shape = %s%s", var.name, var.shape, init_string)
frozen_graph = tf.graph_util.convert_variables_to_constants(tf_sess,
tf_sess.graph.as_graph_def(), output_node_names)
num_nodes = len(frozen_graph.node)
print('Converting graph using TensorFlow-TensorRT...')
from tensorflow.python.compiler.tensorrt import trt_convert as trt
converter = trt.TrtGraphConverter(
input_graph_def=frozen_graph,
nodes_blacklist=output_node_names,
max_workspace_size_bytes=(4096 << 20) - 1000,
precision_mode = "FP16" if FLAGS.amp else "FP32",
minimum_segment_size=4,
is_dynamic_op=True,
maximum_cached_engines=1000
)
frozen_graph = converter.convert()
print('Total node count before and after TF-TRT conversion:',
num_nodes, '->', len(frozen_graph.node))
print('TRT node count:',
len([1 for n in frozen_graph.node if str(n.op) == 'TRTEngineOp']))
with tf.io.gfile.GFile("frozen_modelTRT.pb", "wb") as f:
f.write(frozen_graph.SerializeToString())
return frozen_graph
def model_fn_builder(task_name, bert_config, num_labels, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps,
use_one_hot_embeddings, hvd=None):
"""Returns `model_fn` closure for Estimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for Estimator."""
def metric_fn(per_example_loss, label_ids, logits):
predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
if task_name == "cola":
FN, FN_op = tf.metrics.false_negatives(labels=label_ids, predictions=predictions)
FP, FP_op = tf.metrics.false_positives(labels=label_ids, predictions=predictions)
TP, TP_op = tf.metrics.true_positives(labels=label_ids, predictions=predictions)
TN, TN_op = tf.metrics.true_negatives(labels=label_ids, predictions=predictions)
MCC = (TP * TN - FP * FN) / ((TP + FP) * (TP + FN) * (TN + FP) * (TN + FN)) ** 0.5
MCC_op = tf.group(FN_op, TN_op, TP_op, FP_op, tf.identity(MCC, name="MCC"))
return {"MCC": (MCC, MCC_op)}
elif task_name == "mrpc":
accuracy = tf.metrics.accuracy(
labels=label_ids, predictions=predictions)
loss = tf.metrics.mean(values=per_example_loss)
f1 = tf_metrics.f1(labels=label_ids, predictions=predictions, num_classes=2, pos_indices=[1])
return {
"eval_accuracy": accuracy,
"eval_f1": f1,
"eval_loss": loss,
}
else:
accuracy = tf.metrics.accuracy(
labels=label_ids, predictions=predictions)
loss = tf.metrics.mean(values=per_example_loss)
return {
"eval_accuracy": accuracy,
"eval_loss": loss,
}
tf.compat.v1.logging.info("*** Features ***")
tf.compat.v1.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.compat.v1.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
label_ids = features["label_ids"]
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
if not is_training and FLAGS.use_trt:
trt_graph = get_frozen_tftrt_model(bert_config, input_ids.shape, num_labels, use_one_hot_embeddings, init_checkpoint)
(total_loss, per_example_loss, logits, probabilities) = tf.import_graph_def(trt_graph,
input_map={'input_ids':input_ids, 'input_mask':input_mask, 'segment_ids':segment_ids, 'label_ids':label_ids},
return_elements=['loss/cls_loss:0', 'loss/cls_per_example_loss:0', 'loss/cls_logits:0', 'loss/cls_probabilities:0'],
name='')
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {"probabilities": probabilities}
output_spec = tf.estimator.EstimatorSpec(
mode=mode, predictions=predictions)
elif mode == tf.estimator.ModeKeys.EVAL:
eval_metric_ops = metric_fn(per_example_loss, label_ids, logits)
output_spec = tf.estimator.EstimatorSpec(
mode=mode,
loss=total_loss,
eval_metric_ops=eval_metric_ops)
return output_spec
(total_loss, per_example_loss, logits, probabilities) = create_model(
bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,
num_labels, use_one_hot_embeddings)
tvars = tf.trainable_variables()
initialized_variable_names = {}
if init_checkpoint and (hvd is None or hvd.rank() == 0):
(assignment_map, initialized_variable_names
) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
if FLAGS.verbose_logging:
tf.compat.v1.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.compat.v1.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = optimization.create_optimizer(
total_loss, learning_rate, num_train_steps, num_warmup_steps,
hvd, False, FLAGS.amp, FLAGS.num_accumulation_steps, FLAGS.optimizer_type)
output_spec = tf.estimator.EstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op)
elif mode == tf.estimator.ModeKeys.EVAL:
dummy_op = tf.no_op()
# Need to call mixed precision graph rewrite if fp16 to enable graph rewrite
if FLAGS.amp:
loss_scaler = tf.train.experimental.FixedLossScale(1)
dummy_op = tf.train.experimental.enable_mixed_precision_graph_rewrite(
optimization.LAMBOptimizer(learning_rate=0.0), loss_scaler)
eval_metric_ops = metric_fn(per_example_loss, label_ids, logits)
output_spec = tf.estimator.EstimatorSpec(
mode=mode,
loss=total_loss,
eval_metric_ops=eval_metric_ops)
else:
dummy_op = tf.no_op()
# Need to call mixed precision graph rewrite if fp16 to enable graph rewrite
if FLAGS.amp:
dummy_op = tf.train.experimental.enable_mixed_precision_graph_rewrite(
optimization.LAMBOptimizer(learning_rate=0.0))
output_spec = tf.estimator.EstimatorSpec(
mode=mode, predictions=probabilities)
return output_spec
return model_fn
# This function is not used by this file but is still used by the Colab and
# people who depend on it.
def input_fn_builder(features, batch_size, seq_length, is_training, drop_remainder, hvd=None):
"""Creates an `input_fn` closure to be passed to Estimator."""
all_input_ids = []
all_input_mask = []
all_segment_ids = []
all_label_ids = []
for feature in features:
all_input_ids.append(feature.input_ids)
all_input_mask.append(feature.input_mask)
all_segment_ids.append(feature.segment_ids)
all_label_ids.append(feature.label_id)
def input_fn():
"""The actual input function."""
num_examples = len(features)
# This is for demo purposes and does NOT scale to large data sets. We do
# not use Dataset.from_generator() because that uses tf.py_func which is
# not TPU compatible. The right way to load data is with TFRecordReader.
d = tf.data.Dataset.from_tensor_slices({
"input_ids":
tf.constant(
all_input_ids, shape=[num_examples, seq_length],
dtype=tf.int32),
"input_mask":
tf.constant(
all_input_mask,
shape=[num_examples, seq_length],
dtype=tf.int32),
"segment_ids":
tf.constant(
all_segment_ids,
shape=[num_examples, seq_length],
dtype=tf.int32),
"label_ids":
tf.constant(all_label_ids, shape=[num_examples], dtype=tf.int32),
})
if is_training:
if hvd is not None: d = d.shard(hvd.size(), hvd.rank())
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)
return d
return input_fn
def main(_):
setup_xla_flags()
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
dllogging = utils.dllogger_class.dllogger_class(FLAGS.dllog_path)
if FLAGS.horovod:
hvd.init()
processors = {
"cola": ColaProcessor,
"mnli": MnliProcessor,
"mrpc": MrpcProcessor,
"xnli": XnliProcessor,
}
if not FLAGS.do_train and not FLAGS.do_eval and not FLAGS.do_predict:
raise ValueError(
"At least one of `do_train`, `do_eval` or `do_predict' must be True.")
bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
if FLAGS.max_seq_length > bert_config.max_position_embeddings:
raise ValueError(
"Cannot use sequence length %d because the BERT model "
"was only trained up to sequence length %d" %
(FLAGS.max_seq_length, bert_config.max_position_embeddings))
tf.io.gfile.makedirs(FLAGS.output_dir)
task_name = FLAGS.task_name.lower()
if task_name not in processors:
raise ValueError("Task not found: %s" % (task_name))
processor = processors[task_name]()
label_list = processor.get_labels()
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
master_process = True
training_hooks = []
global_batch_size = FLAGS.train_batch_size * FLAGS.num_accumulation_steps
hvd_rank = 0
config = tf.compat.v1.ConfigProto()
if FLAGS.horovod:
tf.compat.v1.logging.info("Multi-GPU training with TF Horovod")
tf.compat.v1.logging.info("hvd.size() = %d hvd.rank() = %d", hvd.size(), hvd.rank())
global_batch_size = FLAGS.train_batch_size * FLAGS.num_accumulation_steps * hvd.size()
master_process = (hvd.rank() == 0)
hvd_rank = hvd.rank()
config.gpu_options.visible_device_list = str(hvd.local_rank())
set_affinity(hvd.local_rank())
if hvd.size() > 1:
training_hooks.append(hvd.BroadcastGlobalVariablesHook(0))
if FLAGS.use_xla:
config.graph_options.optimizer_options.global_jit_level = tf.compat.v1.OptimizerOptions.ON_1
if FLAGS.amp:
tf.enable_resource_variables()
run_config = tf.estimator.RunConfig(
model_dir=FLAGS.output_dir if master_process else None,
session_config=config,
save_checkpoints_steps=FLAGS.save_checkpoints_steps if master_process else None,
save_summary_steps=FLAGS.save_checkpoints_steps if master_process else None,
log_step_count_steps=FLAGS.display_loss_steps,
keep_checkpoint_max=1)
if master_process:
tf.compat.v1.logging.info("***** Configuaration *****")
for key in FLAGS.__flags.keys():
tf.compat.v1.logging.info(' {}: {}'.format(key, getattr(FLAGS, key)))
tf.compat.v1.logging.info("**************************")
train_examples = None
num_train_steps = None
num_warmup_steps = None
training_hooks.append(LogTrainRunHook(global_batch_size, hvd_rank, FLAGS.save_checkpoints_steps, num_steps_ignore_xla=25))
if FLAGS.do_train:
train_examples = processor.get_train_examples(FLAGS.data_dir)
num_train_steps = int(
len(train_examples) / global_batch_size * FLAGS.num_train_epochs)
num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)
start_index = 0
end_index = len(train_examples)
tmp_filenames = [os.path.join(FLAGS.output_dir, "train.tf_record")]
if FLAGS.horovod:
tmp_filenames = [os.path.join(FLAGS.output_dir, "train.tf_record{}".format(i)) for i in range(hvd.size())]
num_examples_per_rank = len(train_examples) // hvd.size()
remainder = len(train_examples) % hvd.size()
if hvd.rank() < remainder:
start_index = hvd.rank() * (num_examples_per_rank+1)
end_index = start_index + num_examples_per_rank + 1
else:
start_index = hvd.rank() * num_examples_per_rank + remainder
end_index = start_index + (num_examples_per_rank)
model_fn = model_fn_builder(
task_name=task_name,
bert_config=bert_config,
num_labels=len(label_list),
init_checkpoint=FLAGS.init_checkpoint,
learning_rate=FLAGS.learning_rate if not FLAGS.horovod else FLAGS.learning_rate * hvd.size(),
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
use_one_hot_embeddings=False,
hvd=None if not FLAGS.horovod else hvd)
estimator = tf.estimator.Estimator(
model_fn=model_fn,
config=run_config)
if FLAGS.do_train:
file_based_convert_examples_to_features(
train_examples[start_index:end_index], label_list, FLAGS.max_seq_length, tokenizer, tmp_filenames[hvd_rank])
tf.compat.v1.logging.info("***** Running training *****")
tf.compat.v1.logging.info(" Num examples = %d", len(train_examples))
tf.compat.v1.logging.info(" Batch size = %d", FLAGS.train_batch_size)
tf.compat.v1.logging.info(" Num steps = %d", num_train_steps)
train_input_fn = file_based_input_fn_builder(
input_file=tmp_filenames,
batch_size=FLAGS.train_batch_size,
seq_length=FLAGS.max_seq_length,
is_training=True,
drop_remainder=True,
hvd=None if not FLAGS.horovod else hvd)
train_start_time = time.time()
estimator.train(input_fn=train_input_fn, max_steps=num_train_steps, hooks=training_hooks)
train_time_elapsed = time.time() - train_start_time
train_time_wo_overhead = training_hooks[-1].total_time
avg_sentences_per_second = num_train_steps * global_batch_size * 1.0 / train_time_elapsed
ss_sentences_per_second = (training_hooks[-1].count - training_hooks[-1].skipped) * global_batch_size * 1.0 / train_time_wo_overhead
if master_process:
tf.compat.v1.logging.info("-----------------------------")
tf.compat.v1.logging.info("Total Training Time = %0.2f for Sentences = %d", train_time_elapsed,
num_train_steps * global_batch_size)
tf.compat.v1.logging.info("Total Training Time W/O Overhead = %0.2f for Sentences = %d", train_time_wo_overhead,
(training_hooks[-1].count - training_hooks[-1].skipped) * global_batch_size)
tf.compat.v1.logging.info("Throughput Average (sentences/sec) with overhead = %0.2f", avg_sentences_per_second)
tf.compat.v1.logging.info("Throughput Average (sentences/sec) = %0.2f", ss_sentences_per_second)
tf.compat.v1.logging.info("-----------------------------")
if FLAGS.do_eval and master_process:
eval_examples = processor.get_dev_examples(FLAGS.data_dir)
eval_file = os.path.join(FLAGS.output_dir, "eval.tf_record")
file_based_convert_examples_to_features(
eval_examples, label_list, FLAGS.max_seq_length, tokenizer, eval_file)
tf.compat.v1.logging.info("***** Running evaluation *****")
tf.compat.v1.logging.info(" Num examples = %d", len(eval_examples))
tf.compat.v1.logging.info(" Batch size = %d", FLAGS.eval_batch_size)
eval_drop_remainder = False
eval_input_fn = file_based_input_fn_builder(
input_file=eval_file,
batch_size=FLAGS.eval_batch_size,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=eval_drop_remainder)
eval_hooks = [LogEvalRunHook(FLAGS.eval_batch_size)]
eval_start_time = time.time()
result = estimator.evaluate(input_fn=eval_input_fn, hooks=eval_hooks)
eval_time_elapsed = time.time() - eval_start_time
time_list = eval_hooks[-1].time_list
time_list.sort()
# Removing outliers (init/warmup) in throughput computation.
eval_time_wo_overhead = sum(time_list[:int(len(time_list) * 0.8)])
num_sentences = (int(len(time_list) * 0.8)) * FLAGS.eval_batch_size
avg = np.mean(time_list)
cf_50 = max(time_list[:int(len(time_list) * 0.50)])
cf_90 = max(time_list[:int(len(time_list) * 0.90)])
cf_95 = max(time_list[:int(len(time_list) * 0.95)])
cf_99 = max(time_list[:int(len(time_list) * 0.99)])
cf_100 = max(time_list[:int(len(time_list) * 1)])
ss_sentences_per_second = num_sentences * 1.0 / eval_time_wo_overhead
tf.compat.v1.logging.info("-----------------------------")
tf.compat.v1.logging.info("Total Inference Time = %0.2f for Sentences = %d", eval_time_elapsed,
eval_hooks[-1].count * FLAGS.eval_batch_size)
tf.compat.v1.logging.info("Total Inference Time W/O Overhead = %0.2f for Sentences = %d", eval_time_wo_overhead,
num_sentences)
tf.compat.v1.logging.info("Summary Inference Statistics on EVAL set")
tf.compat.v1.logging.info("Batch size = %d", FLAGS.eval_batch_size)
tf.compat.v1.logging.info("Sequence Length = %d", FLAGS.max_seq_length)
tf.compat.v1.logging.info("Precision = %s", "fp16" if FLAGS.amp else "fp32")
tf.compat.v1.logging.info("Latency Confidence Level 50 (ms) = %0.2f", cf_50 * 1000)
tf.compat.v1.logging.info("Latency Confidence Level 90 (ms) = %0.2f", cf_90 * 1000)
tf.compat.v1.logging.info("Latency Confidence Level 95 (ms) = %0.2f", cf_95 * 1000)
tf.compat.v1.logging.info("Latency Confidence Level 99 (ms) = %0.2f", cf_99 * 1000)
tf.compat.v1.logging.info("Latency Confidence Level 100 (ms) = %0.2f", cf_100 * 1000)
tf.compat.v1.logging.info("Latency Average (ms) = %0.2f", avg * 1000)
tf.compat.v1.logging.info("Throughput Average (sentences/sec) = %0.2f", ss_sentences_per_second)
dllogging.logger.log(step=(), data={"throughput_val": ss_sentences_per_second}, verbosity=Verbosity.DEFAULT)
tf.compat.v1.logging.info("-----------------------------")
output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt")
with tf.io.gfile.GFile(output_eval_file, "w") as writer:
tf.compat.v1.logging.info("***** Eval results *****")
for key in sorted(result.keys()):
dllogging.logger.log(step=(), data={key: float(result[key])}, verbosity=Verbosity.DEFAULT)
tf.compat.v1.logging.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
if FLAGS.do_predict and master_process:
predict_examples = processor.get_test_examples(FLAGS.data_dir)
predict_file = os.path.join(FLAGS.output_dir, "predict.tf_record")
file_based_convert_examples_to_features(predict_examples, label_list,
FLAGS.max_seq_length, tokenizer,
predict_file)
tf.compat.v1.logging.info("***** Running prediction*****")
tf.compat.v1.logging.info(" Num examples = %d", len(predict_examples))
tf.compat.v1.logging.info(" Batch size = %d", FLAGS.predict_batch_size)
predict_drop_remainder = False
predict_input_fn = file_based_input_fn_builder(
input_file=predict_file,
batch_size=FLAGS.predict_batch_size,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=predict_drop_remainder)
predict_hooks = [LogEvalRunHook(FLAGS.predict_batch_size)]
predict_start_time = time.time()
output_predict_file = os.path.join(FLAGS.output_dir, "test_results.tsv")
with tf.io.gfile.GFile(output_predict_file, "w") as writer:
tf.compat.v1.logging.info("***** Predict results *****")
for prediction in estimator.predict(input_fn=predict_input_fn, hooks=predict_hooks,
yield_single_examples=False):
output_line = "\t".join(
str(class_probability) for class_probability in prediction) + "\n"
writer.write(output_line)
predict_time_elapsed = time.time() - predict_start_time
time_list = predict_hooks[-1].time_list
time_list.sort()
# Removing outliers (init/warmup) in throughput computation.
predict_time_wo_overhead = sum(time_list[:int(len(time_list) * 0.8)])
num_sentences = (int(len(time_list) * 0.8)) * FLAGS.predict_batch_size
avg = np.mean(time_list)
cf_50 = max(time_list[:int(len(time_list) * 0.50)])
cf_90 = max(time_list[:int(len(time_list) * 0.90)])
cf_95 = max(time_list[:int(len(time_list) * 0.95)])
cf_99 = max(time_list[:int(len(time_list) * 0.99)])
cf_100 = max(time_list[:int(len(time_list) * 1)])
ss_sentences_per_second = num_sentences * 1.0 / predict_time_wo_overhead
tf.compat.v1.logging.info("-----------------------------")
tf.compat.v1.logging.info("Total Inference Time = %0.2f for Sentences = %d", predict_time_elapsed,
predict_hooks[-1].count * FLAGS.predict_batch_size)
tf.compat.v1.logging.info("Total Inference Time W/O Overhead = %0.2f for Sentences = %d", predict_time_wo_overhead,
num_sentences)
tf.compat.v1.logging.info("Summary Inference Statistics on TEST SET")
tf.compat.v1.logging.info("Batch size = %d", FLAGS.predict_batch_size)
tf.compat.v1.logging.info("Sequence Length = %d", FLAGS.max_seq_length)
tf.compat.v1.logging.info("Precision = %s", "fp16" if FLAGS.amp else "fp32")
tf.compat.v1.logging.info("Latency Confidence Level 50 (ms) = %0.2f", cf_50 * 1000)
tf.compat.v1.logging.info("Latency Confidence Level 90 (ms) = %0.2f", cf_90 * 1000)
tf.compat.v1.logging.info("Latency Confidence Level 95 (ms) = %0.2f", cf_95 * 1000)
tf.compat.v1.logging.info("Latency Confidence Level 99 (ms) = %0.2f", cf_99 * 1000)
tf.compat.v1.logging.info("Latency Confidence Level 100 (ms) = %0.2f", cf_100 * 1000)
tf.compat.v1.logging.info("Latency Average (ms) = %0.2f", avg * 1000)
tf.compat.v1.logging.info("Throughput Average (sentences/sec) = %0.2f", ss_sentences_per_second)
dllogging.logger.log(step=(), data={"throughput_val": ss_sentences_per_second}, verbosity=Verbosity.DEFAULT)
tf.compat.v1.logging.info("-----------------------------")
if __name__ == "__main__":
flags.mark_flag_as_required("data_dir")
flags.mark_flag_as_required("task_name")
flags.mark_flag_as_required("vocab_file")
flags.mark_flag_as_required("bert_config_file")
flags.mark_flag_as_required("output_dir")
tf.compat.v1.app.run()
| en | 0.798778 | # coding=utf-8 # Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. # Copyright 2018 The Google AI Language Team Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. BERT finetuning runner. ## Required parameters ## Other parameters Creates an `input_fn` closure to be passed to Estimator. Decodes a record to a TensorFlow example. # tf.Example only supports tf.int64, but the TPU only supports tf.int32. # So cast all int64 to int32. The actual input function. # For training, we want a lot of parallel reading and shuffling. # For eval, we want no shuffling and parallel reading doesn't matter. Creates a classification model. # In the demo, we are doing a simple classification task on the entire # segment. # # If you want to use the token-level output, use model.get_sequence_output() # instead. # I.e., 0.1 dropout Returns `model_fn` closure for Estimator. # pylint: disable=unused-argument The `model_fn` for Estimator. # Need to call mixed precision graph rewrite if fp16 to enable graph rewrite # Need to call mixed precision graph rewrite if fp16 to enable graph rewrite # This function is not used by this file but is still used by the Colab and # people who depend on it. Creates an `input_fn` closure to be passed to Estimator. The actual input function. # This is for demo purposes and does NOT scale to large data sets. We do # not use Dataset.from_generator() because that uses tf.py_func which is # not TPU compatible. The right way to load data is with TFRecordReader. # Removing outliers (init/warmup) in throughput computation. # Removing outliers (init/warmup) in throughput computation. | 1.853838 | 2 |
FusionIIIT/applications/academic_information/views.py | 29rj/Fusion | 29 | 209 | <reponame>29rj/Fusion<gh_stars>10-100
import datetime
import json
import os
import xlrd
import logging
from io import BytesIO
from xlsxwriter.workbook import Workbook
from xhtml2pdf import pisa
from itertools import chain
from django.contrib.auth.models import User
from django.http import HttpResponse, HttpResponseRedirect, JsonResponse
from django.shortcuts import get_object_or_404, render
from django.template.loader import get_template
from django.views.decorators.csrf import csrf_exempt
from django.template.loader import render_to_string
from django.contrib.auth.decorators import login_required
from applications.academic_procedures.models import MinimumCredits, Register, InitialRegistration, course_registration, AssistantshipClaim,Assistantship_status
from applications.globals.models import (Designation, ExtraInfo,
HoldsDesignation, DepartmentInfo)
from .forms import AcademicTimetableForm, ExamTimetableForm, MinuteForm
from .models import (Calendar, Course, Exam_timetable, Grades, Curriculum_Instructor,Constants,
Meeting, Student, Student_attendance, Timetable,Curriculum)
from applications.programme_curriculum.models import (CourseSlot, Course as Courses, Batch, Semester, Programme, Discipline)
from applications.academic_procedures.views import acad_proced_global_context
from applications.programme_curriculum.models import Batch
@login_required
def user_check(request):
"""
This function is used to check the type of user.
It checkes the authentication of the user.
@param:
request - contains metadata about the requested page
@variables:
current_user - get user from request
user_details - extract details of user from database
desig_id - check for designation
acadadmin - designation for Acadadmin
final_user - final designation of request user
"""
try:
current_user = get_object_or_404(User, username=request.user.username)
user_details = ExtraInfo.objects.all().select_related('user','department').filter(user=current_user).first()
desig_id = Designation.objects.all().filter(name='Upper Division Clerk')
temp = HoldsDesignation.objects.all().select_related().filter(designation = desig_id).first()
acadadmin = temp.working
k = str(user_details).split()
final_user = k[2]
except Exception as e:
acadadmin=""
final_user=""
pass
if (str(acadadmin) != str(final_user)):
return True
else:
return False
def get_context(request):
"""
This function gets basic gata from database to send to template
@param:
request - contains metadata about the requested page
@variables:
acadTtForm - the form to add academic calender
examTtForm - the form required to add exam timetable
exam_t - all the exam timetable objects
timetable - all the academic timetable objects
calendar - all the academic calender objects
context - the datas to be displayed in the webpage
this_sem_course - tha data of thsi semester courses
next_sem_courses - the data of next semester courses
courses - all the courses in curriculum
course_type - list the type of courses
"""
if user_check(request):
return HttpResponseRedirect('/academic-procedures/')
course_list = sem_for_generate_sheet()
if(course_list[0]==1):
course_list_2 = [2, 4, 6, 8]
else:
course_list_2 = [1, 3, 5, 7]
# examTtForm = ExamTimetableForm()
# acadTtForm = AcademicTimetableForm()
# calendar = Calendar.objects.all()
# this_sem_courses = Curriculum.objects.all().filter(sem__in=course_list).filter(floated=True)
# next_sem_courses = Curriculum.objects.all().filter(sem__in=course_list).filter(floated=True)
# courses = Course.objects.all()
# course_type = Constants.COURSE_TYPE
# timetable = Timetable.objects.all()
# exam_t = Exam_timetable.objects.all()
procedures_context = acad_proced_global_context()
try:
examTtForm = ExamTimetableForm()
acadTtForm = AcademicTimetableForm()
calendar = Calendar.objects.all()
this_sem_courses = Curriculum.objects.all().select_related().filter(sem__in=course_list).filter(floated=True)
next_sem_courses = Curriculum.objects.all().select_related().filter(sem__in=course_list_2).filter(floated=True)
courses = Course.objects.all()
courses_list = Courses.objects.all()
course_type = Constants.COURSE_TYPE
timetable = Timetable.objects.all()
exam_t = Exam_timetable.objects.all()
pgstudent = Student.objects.filter(programme = "M.Tech") | Student.objects.filter(programme = "PhD")
assistant_list = AssistantshipClaim.objects.filter(ta_supervisor_remark = True).filter(thesis_supervisor_remark = True).filter(hod_approval =True).filter(acad_approval = False)
assistant_approve_list = AssistantshipClaim.objects.filter(ta_supervisor_remark = True).filter(thesis_supervisor_remark = True).filter(hod_approval =True).filter(hod_approval = True)
assistant_list_length = len(assistant_list.filter(acad_approval = False))
assis_stat = Assistantship_status.objects.all()
for obj in assis_stat:
assistant_flag = obj.student_status
hod_flag = obj.hod_status
account_flag = obj.account_status
except Exception as e:
examTtForm = ""
acadTtForm = ""
calendar = ""
this_sem_courses = ""
next_sem_courses = ""
courses = ""
course_type = ""
timetable = ""
exam_t = ""
pass
context = {
'acadTtForm': acadTtForm,
'examTtForm': examTtForm,
'courses': courses,
'courses_list': courses_list,
'course_type': course_type,
'exam': exam_t,
'timetable': timetable,
'academic_calendar': calendar,
'next_sem_course': next_sem_courses,
'this_sem_course': this_sem_courses,
'curriculum': curriculum,
'pgstudent' : pgstudent,
'assistant_list' : assistant_list,
'assistant_approve_list' : assistant_approve_list,
'assistant_list_length' : assistant_list_length,
'tab_id': ['1','1'],
'context': procedures_context['context'],
'lists': procedures_context['lists'],
'date': procedures_context['date'],
'query_option1': procedures_context['query_option1'],
'query_option2': procedures_context['query_option2'],
'course_verification_date' : procedures_context['course_verification_date'],
'submitted_course_list' : procedures_context['submitted_course_list'],
'result_year' : procedures_context['result_year'],
'batch_grade_data' : procedures_context['batch_grade_data'],
'batch_branch_data' : procedures_context['batch_branch_data'],
'assistant_flag' : assistant_flag,
'hod_flag' : hod_flag,
'account_flag' : account_flag
}
return context
@login_required
def homepage(request):
"""
This function is used to set up the homepage of the application.
It checkes the authentication of the user and also fetches the available
data from the databases to display it on the page.
@param:
request - contains metadata about the requested page
@variables:
senates - the extraInfo objects that holds the designation as a senator
students - all the objects in the Student class
Convenor - the extraInfo objects that holds the designation as a convenor
CoConvenor - the extraInfo objects that holds the designation as a coconvenor
meetings - the all meeting objects held in senator meetings
minuteForm - the form to add a senate meeting minutes
acadTtForm - the form to add academic calender
examTtForm - the form required to add exam timetable
Dean - the extraInfo objects that holds the designation as a dean
student - the students as a senator
extra - all the extraInfor objects
exam_t - all the exam timetable objects
timetable - all the academic timetable objects
calendar - all the academic calender objects
department - all the departments in the college
attendance - all the attendance objects of the students
context - the datas to be displayed in the webpage
"""
if user_check(request):
return HttpResponseRedirect('/academic-procedures/')
context = get_context(request)
return render(request, "ais/ais.html", context)
# ####################################
# # curriculum #
# ####################################
@login_required
def curriculum(request):
"""
This function is used to see curriculum and edit entries in a curriculum.
It checkes the authentication of the user and also fetches the available
data from the databases to display it on the page.
@param:
request - contains metadata about the requested page
@variables:
request_batch - Batch from form
request_branch - Branch from form
request_programme - Programme from form
request_sem - Semester from form
curriculum - Get data about curriculum from database
courses - get courses from database
courses_type - get course types from database
"""
if user_check(request):
return HttpResponseRedirect('/academic-procedures/')
context = get_context(request)
context['tab_id'][0]='6'
if request.method == 'POST':
try:
request_batch = request.POST['batch']
request_branch = request.POST['branch']
request_programme = request.POST['programme']
request_sem = request.POST['sem']
except Exception as e:
request_batch = ""
request_branch = ""
request_programme = ""
request_sem = ""
#for checking if the user has searched for any particular curriculum
if request_batch == "" and request_branch == "" and request_programme=="" and request_sem=="":
curriculum = None #Curriculum.objects.all()
else:
if int(request_sem) == 0:
curriculum = Curriculum.objects.select_related().filter(branch = request_branch).filter(batch = request_batch).filter(programme= request_programme).order_by('sem')
else:
curriculum = Curriculum.objects.select_related().filter(branch = request_branch).filter(batch = request_batch).filter(programme= request_programme).filter(sem= request_sem)
# context={
# 'courses' : courses,
# 'course_type' : course_type,
# 'curriculum' : curriculum,
# 'tab_id' :['3','1']
# }
courses = Course.objects.all()
course_type = Constants.COURSE_TYPE
html = render_to_string('ais/curr_list.html',{'curriculum':curriculum,'courses':courses,'course_type':course_type},request)
obj = json.dumps({'html':html})
#return render(request, "ais/ais.html", context)
return HttpResponse(obj,content_type='application/json')
else:
return render(request, "ais/ais.html", context)
return render(request, "ais/ais.html", context)
@login_required
def add_curriculum(request):
"""
This function is used to add new curriculum in database
It checkes the authentication of the user and also fetches the available
data from the databases to display it on the page.
@param:
request - contains metadata about the requested page
@variables:
programme - programme from form.REQUEST
batch - batch from form.REQUEST
branch - branch from form.REQUEST
sem - semester from form.REQUEST
course_code - course_code from form.REQUEST
course_name - course-name from form.REQUEST
course_id - course_id from database
credits - credits from form.REQUEST
optional - optional from form.REQUEST
course_type - course_type from form.REQUEST
ins - data is stored in database
"""
if user_check(request):
return HttpResponseRedirect('/academic-procedures/')
context={
'tab_id' :['3','2']
}
if request.method == 'POST':
i=0
new_curr=[]
while True:
if "semester_"+str(i) in request.POST:
try:
programme=request.POST['AddProgramme']
batch=request.POST['AddBatch']
branch=request.POST['AddBranch']
sem=request.POST["semester_"+str(i)]
course_code=request.POST["course_code_"+str(i)]
course_name=request.POST["course_name_"+str(i)]
course_id=Course.objects.get(course_name=course_name)
credits=request.POST["credits_"+str(i)]
if "optional_"+str(i) in request.POST:
optional=True
else:
optional=False
course_type=request.POST["course_type_"+str(i)]
except Exception as e:
programme=""
batch=""
branch=""
sem=""
course_code=""
course_name=""
course_id=""
credits=""
optional=""
course_type=""
pass
ins=Curriculum(
programme=programme,
batch=batch,
branch=branch,
sem=sem,
course_code=course_code,
course_id=course_id,
credits=credits,
optional=optional,
course_type=course_type,
)
new_curr.append(ins)
else:
break
i+=1
Curriculum.objects.bulk_create(new_curr)
curriculum = Curriculum.objects.select_related().filter(branch = branch).filter(batch = batch).filter(programme= programme)
courses = Course.objects.all()
course_type = Constants.COURSE_TYPE
context= {
'courses': courses,
'course_type': course_type,
'curriculum': curriculum,
'tab_id' :['3','2']
}
return render(request, "ais/ais.html", context)
else:
return render(request, "ais/ais.html", context)
return render(request, "ais/ais.html", context)
@login_required
def edit_curriculum(request):
"""
This function is used to edit curriculum in database
It checkes the authentication of the user and also fetches the available
data from the databases to display it on the page.
@param:
request - contains metadata about the requested page
@variables:
programme - programme from form.REQUEST
batch - batch from form.REQUEST
branch - branch from form.REQUEST
sem - semester from form.REQUEST
course_code - course_code from form.REQUEST
course_name - course-name from form.REQUEST
course_id - course_id from database
credits - credits from form.REQUEST
optional - optional from form.REQUEST
course_type - course_type from form.REQUEST
ins - data is stored in database
"""
if user_check(request):
return HttpResponseRedirect('/academic-procedures/')
context={
'tab_id' :['3','1']
}
if request.method == 'POST':
try:
id=request.POST['id']
programme=request.POST['programme']
batch=request.POST['batch']
branch=request.POST['branch']
sem=request.POST["sem"]
course_code=request.POST["course_code"]
course_name=request.POST["course_id"]
course_id=Course.objects.get(course_name=course_name)
credits=request.POST["credits"]
if request.POST['optional'] == "on":
optional=True
else:
optional=False
course_type=request.POST["course_type"]
except Exception as e:
id=""
programme=""
batch=""
branch=""
sem=""
course_code=""
course_name=""
course_id=""
credits=""
optional=""
course_type=""
pass
entry=Curriculum.objects.all().select_related().filter(curriculum_id=id).first()
entry.programme=programme
entry.batch=batch
entry.branch=branch
entry.sem=sem
entry.course_code=course_code
entry.course_id=course_id
entry.credits=credits
entry.optional=optional
entry.course_type=course_type
entry.save()
curriculum = Curriculum.objects.select_related().filter(branch = branch).filter(batch = batch).filter(programme= programme)
courses = Course.objects.all()
course_type = Constants.COURSE_TYPE
context= {
'courses': courses,
'course_type': course_type,
'curriculum': curriculum,
'tab_id' :['3','1']
}
return render(request, "ais/ais.html", context)
else:
return render(request, "ais/ais.html", context)
return render(request, "ais/ais.html", context)
@login_required
def delete_curriculum(request):
"""
This function is used to delete curriculum entry in database
It checkes the authentication of the user and also fetches the available
data from the databases to display it on the page.
@param:
request - contains metadata about the requested page
@variables:
dele - data being deleted from database
"""
if user_check(request):
return HttpResponseRedirect('/academic-procedures/')
context={
'tab_id' :['3','1']
}
if request.method == "POST":
dele = Curriculum.objects.select_related().filter(curriculum_id=request.POST['id'])
dele.delete()
curriculum = Curriculum.objects.select_related().filter(branch = request.POST['branch']).filter(batch = request.POST['batch']).filter(programme= request.POST['programme'])
courses = Course.objects.all()
course_type = Constants.COURSE_TYPE
context= {
'courses': courses,
'course_type': course_type,
'curriculum': curriculum,
'tab_id' :['3','1']
}
return render(request, "ais/ais.html", context)
return render(request, 'ais/ais.html', context)
@login_required
def next_curriculum(request):
"""
This function is used to decide curriculum for new batch.
It checkes the authentication of the user and also fetches the available
data from the databases to display it on the page.
@param:
request - contains metadata about the requested page
@variables:
programme - programme from form.REQUEST
now - current date from system
year - current year
batch - batch form form
curriculum - curriculum details form database
ins - Inster data in database
"""
if user_check(request):
return HttpResponseRedirect('/academic-procedures/')
if request.method == 'POST':
programme = request.POST['programme']
now = datetime.datetime.now()
year = int(now.year)
batch = year-1
curriculum = Curriculum.objects.all().select_related().filter(batch = batch).filter(programme = programme)
if request.POST['option'] == '1':
new_curriculum=[]
for i in curriculum:
ins=Curriculum(
programme=i.programme,
batch=i.batch+1,
branch=i.branch,
sem=i.sem,
course_code=i.course_code,
course_id=i.course_id,
credits=i.credits,
optional=i.optional,
course_type=i.course_type,
)
new_curriculum.append(ins)
Curriculum.objects.bulk_create(new_curriculum)
elif request.POST['option'] == '2':
new_curriculum=[]
for i in curriculum:
ins=Curriculum(
programme=i.programme,
batch=i.batch+1,
branch=i.branch,
sem=i.sem,
course_code=i.course_code,
course_id=i.course_id,
credits=i.credits,
optional=i.optional,
course_type=i.course_type,
)
new_curriculum.append(ins)
Curriculum.objects.bulk_create(new_curriculum)
batch=batch+1
curriculum = Curriculum.objects.all().select_related().filter(batch = batch).filter(programme = programme)
context= {
'curriculumm' :curriculum,
'tab_id' :['3','3']
}
return render(request, "ais/ais.html", context)
else:
context= {
'tab_id' :['3','2']
}
return render(request, "ais/ais.html", context)
context= {
'tab_id' :['3','1']
}
return render(request, "ais/ais.html", context)
@login_required
def add_timetable(request):
"""
acad-admin can upload the time table(any type of) of the semester.
@param:
request - contains metadata about the requested page.
@variables:
acadTtForm - data of delete dictionary in post request
timetable - all timetable from database
exam_t - all exam timetable from database
"""
if user_check(request):
return HttpResponseRedirect('/academic-procedures/')
timetable = Timetable.objects.all()
exam_t = Exam_timetable.objects.all()
context= {
'exam': exam_t,
'timetable': timetable,
'tab_id' :['10','1']
}
acadTtForm = AcademicTimetableForm()
if request.method == 'POST' and request.FILES:
acadTtForm = AcademicTimetableForm(request.POST, request.FILES)
if acadTtForm.is_valid():
acadTtForm.save()
return render(request, "ais/ais.html", context)
else:
return render(request, "ais/ais.html", context)
return render(request, "ais/ais.html", context)
@login_required
def add_exam_timetable(request):
"""
acad-admin can upload the exam timtable of the ongoing semester.
@param:
request - contains metadata about the requested page.
@variables:
examTtForm - data of delete dictionary in post request
timetable - all timetable from database
exam_t - all exam timetable from database
"""
if user_check(request):
return HttpResponseRedirect('/academic-procedures/')
timetable = Timetable.objects.all()
exam_t = Exam_timetable.objects.all()
context= {
'exam': exam_t,
'timetable': timetable,
'tab_id' :['10','2']
}
examTtForm = ExamTimetableForm()
if request.method == 'POST' and request.FILES:
examTtForm = ExamTimetableForm(request.POST, request.FILES)
if examTtForm.is_valid():
examTtForm.save()
return render(request, "ais/ais.html", context)
else:
return render(request, "ais/ais.html", context)
return render(request, "ais/ais.html", context)
@login_required
def delete_timetable(request):
"""
acad-admin can delete the outdated timetable from the server.
@param:
request - contains metadata about the requested page.
@variables:
data - data of delete dictionary in post request
t - Object of time table to be deleted
"""
if user_check(request):
return HttpResponseRedirect('/academic-procedures/')
if request.method == "POST":
data = request.POST['delete']
t = Timetable.objects.get(time_table=data)
t.delete()
return HttpResponse("TimeTable Deleted")
@login_required
def delete_exam_timetable(request):
"""
acad-admin can delete the outdated exam timetable.
@param:
request - contains metadata about the requested page.
@variables:
data - data of delete dictionary in post request
t - Object of Exam time table to be deleted
"""
if user_check(request):
return HttpResponseRedirect('/academic-procedures/')
if request.method == "POST":
data = request.POST['delete']
t = Exam_timetable.objects.get(exam_time_table=data)
t.delete()
return HttpResponse("TimeTable Deleted")
@login_required
def add_calendar(request):
"""
to add an entry to the academic calendar to be uploaded
@param:
request - contains metadata about the requested page.
@variables:
from_date - The starting date for the academic calendar event.
to_date - The ending date for the academic caldendar event.
desc - Description for the academic calendar event.
c = object to save new event to the academic calendar.
"""
if user_check(request):
return HttpResponseRedirect('/academic-procedures/')
calendar = Calendar.objects.all()
context= {
'academic_calendar' :calendar,
'tab_id' :['4','1']
}
if request.method == "POST":
try:
from_date = request.POST.getlist('from_date')
to_date = request.POST.getlist('to_date')
desc = request.POST.getlist('description')[0]
from_date = from_date[0].split('-')
from_date = [int(i) for i in from_date]
from_date = datetime.datetime(*from_date).date()
to_date = to_date[0].split('-')
to_date = [int(i) for i in to_date]
to_date = datetime.datetime(*to_date).date()
except Exception as e:
from_date=""
to_date=""
desc=""
pass
c = Calendar(
from_date=from_date,
to_date=to_date,
description=desc)
c.save()
HttpResponse("Calendar Added")
return render(request, "ais/ais.html", context)
@login_required
def update_calendar(request):
"""
to update an entry to the academic calendar to be updated.
@param:
request - contains metadata about the requested page.
@variables:
from_date - The starting date for the academic calendar event.
to_date - The ending date for the academic caldendar event.
desc - Description for the academic calendar event.
prev_desc - Description for the previous event which is to be updated.
get_calendar_details = Get the object of the calendar instance from the database for the previous Description.
"""
if user_check(request):
return HttpResponseRedirect('/academic-procedures/')
calendar = Calendar.objects.all()
context= {
'academic_calendar' :calendar,
'tab_id' :['4','1']
}
if request.method == "POST":
try:
from_date = request.POST.getlist('from_date')
to_date = request.POST.getlist('to_date')
desc = request.POST.getlist('description')[0]
prev_desc = request.POST.getlist('prev_desc')[0]
from_date = from_date[0].split('-')
from_date = [int(i) for i in from_date]
from_date = datetime.datetime(*from_date).date()
to_date = to_date[0].split('-')
to_date = [int(i) for i in to_date]
to_date = datetime.datetime(*to_date).date()
get_calendar_details = Calendar.objects.all().filter(description=prev_desc).first()
get_calendar_details.description = desc
get_calendar_details.from_date = from_date
get_calendar_details.to_date = to_date
get_calendar_details.save()
except Exception as e:
from_date=""
to_date=""
desc=""
return render(request, "ais/ais.html", context)
return render(request, "ais/ais.html", context)
#Generate Attendance Sheet
def sem_for_generate_sheet():
"""
This function generates semester grade sheet
@variables:
now - current datetime
month - current month
"""
now = datetime.datetime.now()
month = int(now.month)
if month >= 7 and month <= 12:
return [1, 3, 5, 7]
else:
return [2, 4, 6, 8]
@login_required
def generatexlsheet(request):
"""
to generate Course List of Registered Students
@param:
request - contains metadata about the requested page
@variables:
batch - gets the batch
course - gets the course
curr_key - gets the curriculum from database
obj - get stdents data from database
ans - Formatted Array to be converted to xlsx
k -temporary array to add data to formatted array/variable
output - io Bytes object to write to xlsx file
book - workbook of xlsx file
title - formatting variable of title the workbook
subtitle - formatting variable of subtitle the workbook
normaltext - formatting variable for normal text
sheet - xlsx sheet to be rendered
titletext - formatting variable of title text
dep - temporary variables
z - temporary variables for final output
b - temporary variables for final output
c - temporary variables for final output
st - temporary variables for final output
"""
if user_check(request):
return HttpResponseRedirect('/academic-procedures/')
try:
batch = request.POST['batch']
course = Courses.objects.get(id = request.POST['course'])
obj = course_registration.objects.all().filter(course_id = course)
except Exception as e:
batch=""
course=""
curr_key=""
obj=""
registered_courses = []
for i in obj:
if i.student_id.batch_id.year == int(batch):
registered_courses.append(i)
ans = []
for i in registered_courses:
k = []
k.append(i.student_id.id.id)
k.append(i.student_id.id.user.first_name)
k.append(i.student_id.id.user.last_name)
k.append(i.student_id.id.department)
ans.append(k)
ans.sort()
output = BytesIO()
book = Workbook(output,{'in_memory':True})
title = book.add_format({'bold': True,
'font_size': 22,
'align': 'center',
'valign': 'vcenter'})
subtitle = book.add_format({'bold': True,
'font_size': 15,
'align': 'center',
'valign': 'vcenter'})
normaltext = book.add_format({'bold': False,
'font_size': 15,
'align': 'center',
'valign': 'vcenter'})
sheet = book.add_worksheet()
title_text = ((str(course.name)+" : "+str(str(batch))))
sheet.set_default_row(25)
sheet.merge_range('A2:E2', title_text, title)
sheet.write_string('A3',"Sl. No",subtitle)
sheet.write_string('B3',"Roll No",subtitle)
sheet.write_string('C3',"Name",subtitle)
sheet.write_string('D3',"Discipline",subtitle)
sheet.write_string('E3','Signature',subtitle)
sheet.set_column('A:A',20)
sheet.set_column('B:B',20)
sheet.set_column('C:C',60)
sheet.set_column('D:D',15)
sheet.set_column('E:E',30)
k = 4
num = 1
for i in ans:
sheet.write_number('A'+str(k),num,normaltext)
num+=1
z,b,c = str(i[0]),i[1],i[2]
name = str(b)+" "+str(c)
temp = str(i[3]).split()
dep = str(temp[len(temp)-1])
sheet.write_string('B'+str(k),z,normaltext)
sheet.write_string('C'+str(k),name,normaltext)
sheet.write_string('D'+str(k),dep,normaltext)
k+=1
book.close()
output.seek(0)
response = HttpResponse(output.read(),content_type = 'application/vnd.ms-excel')
st = 'attachment; filename = ' + course.code + '.xlsx'
response['Content-Disposition'] = st
return response
@login_required
def generate_preregistration_report(request):
"""
to generate preresgistration report after pre-registration
@param:
request - contains metadata about the requested page
@variables:
sem - get current semester from current time
now - get current time
year - getcurrent year
batch - gets the batch from form
sem - stores the next semester
obj - All the registration details appended into one
data - Formated data for context
m - counter for Sl. No (in formated data)
z - temporary array to add data to variable data
k -temporary array to add data to formatted array/variable
output - io Bytes object to write to xlsx file
book - workbook of xlsx file
title - formatting variable of title the workbook
subtitle - formatting variable of subtitle the workbook
normaltext - formatting variable for normal text
sheet - xlsx sheet to be rendered
titletext - formatting variable of title text
dep - temporary variables
z - temporary variables for final output
b - temporary variables for final output
c - temporary variables for final output
st - temporary variables for final output
"""
if user_check(request):
return HttpResponseRedirect('/academic-procedures/')
if request.method == "POST":
sem = request.POST.get('semester_no')
batch_id=request.POST.get('batch_branch')
batch = Batch.objects.filter(id = batch_id).first()
obj = InitialRegistration.objects.filter(student_id__batch_id=batch_id, semester_id__semester_no=sem)
registered_students = set()
unregistered_students = set()
for stu in obj:
registered_students.add(stu.student_id)
students = Student.objects.filter(batch_id = batch_id)
for stu in students:
if stu not in registered_students:
unregistered_students.add(stu)
data = []
m = 1
for i in unregistered_students:
z = []
z.append(m)
m += 1
z.append(i.id.user.username)
z.append(str(i.id.user.first_name)+" "+str(i.id.user.last_name))
z.append(i.id.department.name)
z.append('not registered')
data.append(z)
for i in registered_students:
z = []
z.append(m)
m += 1
z.append(i.id.user.username)
z.append(str(i.id.user.first_name)+" "+str(i.id.user.last_name))
z.append(i.id.department.name)
z.append('registered')
data.append(z)
output = BytesIO()
book = Workbook(output,{'in_memory':True})
title = book.add_format({'bold': True,
'font_size': 22,
'align': 'center',
'valign': 'vcenter'})
subtitle = book.add_format({'bold': True,
'font_size': 15,
'align': 'center',
'valign': 'vcenter'})
normaltext = book.add_format({'bold': False,
'font_size': 15,
'align': 'center',
'valign': 'vcenter'})
sheet = book.add_worksheet()
title_text = ("Pre-registeration : "+ batch.name + str(" ") + batch.discipline.acronym + str(" ") + str(batch.year))
sheet.set_default_row(25)
sheet.merge_range('A2:E2', title_text, title)
sheet.write_string('A3',"Sl. No",subtitle)
sheet.write_string('B3',"Roll No",subtitle)
sheet.write_string('C3',"Name",subtitle)
sheet.write_string('D3',"Discipline",subtitle)
sheet.write_string('E3','Status',subtitle)
sheet.set_column('A:A',20)
sheet.set_column('B:B',20)
sheet.set_column('C:C',50)
sheet.set_column('D:D',15)
sheet.set_column('E:E',15)
k = 4
num = 1
for i in data:
sheet.write_number('A'+str(k),num,normaltext)
num+=1
z,b,c = str(i[0]),i[1],i[2]
a,b,c,d,e = str(i[0]),str(i[1]),str(i[2]),str(i[3]),str(i[4])
temp = str(i[3]).split()
sheet.write_string('B'+str(k),b,normaltext)
sheet.write_string('C'+str(k),c,normaltext)
sheet.write_string('D'+str(k),d,normaltext)
sheet.write_string('E'+str(k),e,normaltext)
k+=1
book.close()
output.seek(0)
response = HttpResponse(output.read(),content_type = 'application/vnd.ms-excel')
st = 'attachment; filename = ' + batch.name + batch.discipline.acronym + str(batch.year) + '-preresgistration.xlsx'
response['Content-Disposition'] = st
return response
@login_required
def add_new_profile (request):
"""
To add details of new upcoming students in the database.User must be logged in and must be acadadmin
@param:
request - contains metadata about the requested page.
@variables:
profiles - gets the excel file having data
excel - excel file
sheet - sheet no in excel file
roll_no - details of student from file
first_name - details of student from file
last_name - details of student from file
email - details of student from file
sex - details of student from file
title - details of student from file
dob - details of student from file
fathers_name - details of student from file
mothers_name - details of student from file
category - details of student from file
phone_no - details of student from file
address - details of student from file
department - details of student from file
specialization - details of student from file
hall_no - details of student from file
programme - details of student from file
batch - details of student from file
user - new user created in database
einfo - new extrainfo object created in database
stud_data - new student object created in database
desig - get designation object of student
holds_desig - get hold_desig object of student
currs - get curriculum details
reg - create registeration object in registeration table
"""
if user_check(request):
return HttpResponseRedirect('/academic-procedures/')
context= {
'tab_id' :['2','1']
}
if request.method == 'POST' and request.FILES:
profiles=request.FILES['profiles']
excel = xlrd.open_workbook(file_contents=profiles.read())
sheet=excel.sheet_by_index(0)
for i in range(sheet.nrows):
roll_no=int(sheet.cell(i,0).value)
first_name=str(sheet.cell(i,1).value)
last_name=str(sheet.cell(i,2).value)
email=str(sheet.cell(i,3).value)
sex=str(sheet.cell(i,4).value)
if sex == 'F':
title='Ms.'
else:
title='Mr.'
dob_tmp=sheet.cell(i,5).value
dob_tmp=sheet.cell_value(rowx=i,colx=5)
dob=datetime.datetime(*xlrd.xldate_as_tuple(dob_tmp,excel.datemode))
fathers_name=str(sheet.cell(i,6).value)
mothers_name=str(sheet.cell(i,7).value)
category=str(sheet.cell(i,8).value)
phone_no=int(sheet.cell(i,9).value)
address=str(sheet.cell(i,10).value)
dept=str(sheet.cell(i,11).value)
specialization=str(sheet.cell(i,12).value)
hall_no=sheet.cell(i,13 ).value
department=DepartmentInfo.objects.all().filter(name=dept).first()
if specialization == "":
specialization="None"
if hall_no == None:
hall_no=3
else:
hall_no=int(hall_no)
programme_name=request.POST['Programme']
batch_year=request.POST['Batch']
batch = Batch.objects.all().filter(name = programme_name, discipline__acronym = dept, year = batch_year).first()
user = User.objects.create_user(
username=roll_no,
password='<PASSWORD>',
first_name=first_name,
last_name=last_name,
email=email,
)
einfo = ExtraInfo.objects.create(
id=roll_no,
user=user,
title=title,
sex=sex,
date_of_birth=dob,
address=address,
phone_no=phone_no,
user_type='student',
department=department,
)
sem=1
stud_data = Student.objects.create(
id=einfo,
programme = programme_name,
batch=batch_year,
batch_id = batch,
father_name = fathers_name,
mother_name = mothers_name,
cpi = 0,
category = category,
hall_no = hall_no,
specialization = specialization,
curr_semester_no=sem,
)
desig = Designation.objects.get(name='student')
hold_des = HoldsDesignation.objects.create(
user=user,
working=user,
designation=desig,
)
sem_id = Semester.objects.get(curriculum = batch.curriculum, semester_no = sem)
course_slots = CourseSlot.objects.all().filter(semester = sem_id)
courses = []
for course_slot in course_slots:
courses += course_slot.courses.all()
new_reg=[]
for c in courses:
reg=course_registration(
course_id = c,
semester_id=sem_id,
student_id=stud_data
)
new_reg.append(reg)
course_registration.objects.bulk_create(new_reg)
else:
return render(request, "ais/ais.html", context)
return render(request, "ais/ais.html", context)
def get_faculty_list():
"""
to get faculty list from database
@param:
request - contains metadata about the requested page.
@variables:
f1,f2,f3 - temporary varibles
faculty - details of faculty of data
faculty_list - list of faculty
"""
try:
f1 = HoldsDesignation.objects.select_related().filter(designation=Designation.objects.get(name = "Assistant Professor"))
f2 = HoldsDesignation.objects.select_related().filter(designation=Designation.objects.get(name = "Professor"))
f3 = HoldsDesignation.objects.select_related().filter(designation=Designation.objects.get(name = "Associate Professor"))
except Exception as e:
f1=f2=f3=""
pass
faculty = list(chain(f1,f2,f3))
faculty_list = []
for i in faculty:
faculty_list.append(i)
return faculty_list
@login_required
def float_course(request):
"""
to float courses for the next sem and store data in databsae.
User must be logged in and must be acadadmin
@param:
request - contains metadata about the requested page.
@variables:
request_batch - Batch from form
request_branch - Branch from form
request_programme - Programme from form
request_sem - Semester from form
"""
if user_check(request):
return HttpResponseRedirect('/academic-procedures/')
context= {
'tab_id' :['5','1']
}
if request.method == 'POST':
try:
request_batch = request.POST['batch']
request_branch = request.POST['branch']
request_programme = request.POST['programme']
except Exception as e:
request_batch = ""
request_branch = ""
request_programme = ""
if request_batch == "" and request_branch == "" and request_programme=="":
curriculum = None #Curriculum.objects.all()
else:
sem = sem_for_generate_sheet()
now = datetime.datetime.now()
year = int(now.year)
if sem[0] == 2:
sem = sem[year-int(request_batch)-1]
else:
sem = sem[year-int(request_batch)]
sem+=1
curriculum = Curriculum.objects.select_related().filter(branch = request_branch).filter(batch = request_batch).filter(programme= request_programme).filter(sem=sem).order_by('course_code')
faculty_list = get_faculty_list()
courses = Course.objects.all()
course_type = Constants.COURSE_TYPE
context= {
'courses': courses,
'course_type': course_type,
'curriculum': curriculum,
'faculty_list': faculty_list,
'tab_id' :['5','1']
}
return render(request, "ais/ais.html", context)
else:
return render(request, "ais/ais.html", context)
return render(request, "ais/ais.html", context)
@login_required
def float_course_submit(request):
"""
to float courses for the next sem and store data in databsae.
User must be logged in and must be acadadmin
@param:
request - contains metadata about the requested page.
@variables:
request_batch - Batch from form
request_branch - Branch from form
request_programme - Programme from form
request_sem - Semester from form
"""
if user_check(request):
return HttpResponseRedirect('/academic-procedures/')
context= {
'tab_id' :['5','1']
}
if request.method == "POST":
i=1
while True:
if str(i)+"_ccode" in request.POST:
if str(i)+"_fac" in request.POST:
if request.POST[str(i)+"_fac"] == "" :
logging.warning("No faculty")
else:
flot = Curriculum.objects.select_related().get(curriculum_id=request.POST[str(i)+"_ccode"])
flot.floated = True
flot.save()
new_curr_inst=[]
for c,i in enumerate(request.POST.getlist(str(i)+'_fac')):
inst = get_object_or_404(User, username = i)
inst = ExtraInfo.objects.select_related('user','department').get(user=inst)
if c==0:
ins=Curriculum_Instructor(
curriculum_id=flot,
instructor_id=inst,
chief_inst=True,
)
new_curr_inst.append(ins)
else:
ins=Curriculum_Instructor(
curriculum_id=flot,
instructor_id=inst,
chief_inst=False,
)
new_curr_inst.append(ins)
Curriculum_Instructor.objects.bulk_create(new_curr_inst)
else:
break
i+=1
return render(request, "ais/ais.html", context)
# # ---------------------senator------------------
# @csrf_exempt
def senator(request):
# """
# to add a new student senator
# @param:
# request - contains metadata about the requested page
# @variables:
# current_user - gets the data of current user.
# user_details - gets the details of the required user.
# desig_id - used to check the designation ID.
# extraInfo - extraInfo object of the student with that rollno
# s - designation object of senator
# hDes - holdsDesignation object to store that the particualr student is holding the senator designation
# student - the student object of the new senator
# data - data of the student to be displayed in teh webpage
# """
# current_user = get_object_or_404(User, username=request.user.username)
# user_details = ExtraInfo.objects.all().filter(user=current_user).first()
# desig_id = Designation.objects.all().filter(name='Upper Division Clerk')
temp = HoldsDesignation.objects.all().select_related().filter(designation = desig_id).first()
#print (temp)
# print (current_user)
# acadadmin = temp.working
# k = str(user_details).split()
# print(k)
# final_user = k[2]
# if (str(acadadmin) != str(final_user)):
# return HttpResponseRedirect('/academic-procedures/')
# if request.method == 'POST':
# print(request.POST, ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
# rollno = request.POST.getlist('Roll Number')[0]
# # print(request.POST.get('rollno'))
# extraInfo = ExtraInfo.objects.get(id=rollno)
# s = Designation.objects.get(name='Senator')
# hDes = HoldsDesignation()
# hDes.user = extraInfo.user
# hDes.working = extraInfo.user
# hDes.designation = s
# hDes.save()
# student = Student.objects.get(id=extraInfo)
# data = {
# 'name': extraInfo.user.username,
# 'rollno': extraInfo.id,
# 'programme': student.programme,
# 'branch': extraInfo.department.name
# }
# return HttpResponseRedirect('/aims/')
# # return JsonResponse(data)
# else:
# return HttpResponseRedirect('/aims/')
# @csrf_exempt
def deleteSenator(request, pk):
# """
# to remove a senator from the position
# @param:
# request - contains metadata about the requested page
# @variables:
# s - the designation object that contains senator
# student - the list students that is a senator
# hDes - the holdDesignation object that stores the
# information that the particular student is a senator
# """
pass
# if request.POST:
# s = get_object_or_404(Designation, name="Senator")
# student = get_object_or_404(ExtraInfo, id=request.POST.getlist("senate_id")[0])
# hDes = get_object_or_404( HoldsDesignation, user = student.user)
# hDes.delete()
# return HttpResponseRedirect('/aims/')
# else:
# return HttpResponseRedirect('/aims/')# ####################################################
# # ##########covenors and coconvenors##################
# @csrf_exempt
def add_convenor(request):
# """
# to add a new student convenor/coconvenor
# @param:
# request - contains metadata about the requested page
# @variables:
# rollno - rollno of the student to become the convenor/coconvenor
# extraInfo - extraInfo object of the student with that rollno
# s - designation object of Convenor
# p - designation object of Co Convenor
# result - the data that contains where the student will become
# convenor or coconvenor
# hDes - holdsDesignation object to store that the particualr student is
# holding the convenor/coconvenor designation
# student - the student object of the new convenor/coconvenor
# data - data of the student to be displayed in the webpage
# """
s = Designation.objects.get(name='Convenor')
# p = Designation.objects.get(name='Co Convenor')
# if request.method == 'POST':
# rollno = request.POST.get('rollno_convenor')
# extraInfo = ExtraInfo.objects.get(id=rollno)
# s = Designation.objects.get(name='Convenor')
# p = Designation.objects.get(name='Co Convenor')
# result = request.POST.get('designation')
# hDes = HoldsDesignation()
# hDes.user = extraInfo.user
# hDes.working = extraInfo.user
# if result == "Convenor":
# hDes.designation = s
# else:
# hDes.designation = p
# hDes.save()
# data = {
# 'name': extraInfo.user.username,
# 'rollno_convenor': extraInfo.id,
# 'designation': hDes.designation.name,
# }
# return JsonResponse(data)
# else:
# data = {}
# return JsonResponse(data)
# @csrf_exempt
def deleteConvenor(request, pk):
# """
# to remove a convenor/coconvenor from the position
# @param:
# request - contains metadata about the requested page
# pk - the primary key of that particular student field
# @variables:
# s - the designation object that contains convenor
# c - the designation object that contains co convenor
# student - the student object with the given pk
# hDes - the holdDesignation object that stores the
# information that the particular student is a convenor/coconvenor to be deleted
# data - data of the student to be hidden in the webpage
# """
# s = get_object_or_404(Designation, name="Convenor")
c = get_object_or_404(Designation, name="Co Convenor")
# student = get_object_or_404(ExtraInfo, id=pk)
# hDes = HoldsDesignation.objects.filter(user = student.user)
# designation = []
# for des in hDes:
# if des.designation == s or des.designation == c:
# designation = des.designation.name
# des.delete()
# data = {
# 'id': pk,
# 'designation': designation,
# }
# return JsonResponse(data)# ######################################################
# # ##########Senate meeting Minute##################
# @csrf_exempt
def addMinute(request):
# """
# to add a new senate meeting minute object to the database.
# @param:
# request - contains metadata about the requested page
# @variables:
# current_user - details of the current user.
# desig_id - to check the designation of the user.
# user_details - to get the details of the required user.
# """
# current_user = get_object_or_404(User, username=request.user.username)
# user_details = ExtraInfo.objects.all().filter(user=current_user).first()
# desig_id = Designation.objects.all().filter(name='Upper Division Clerk')
temp = HoldsDesignation.objects.all().select_related().filter(designation = desig_id).first()
# print (temp)
# print (current_user)
# acadadmin = temp.working
# k = str(user_details).split()
# print(k)
# final_user = k[2]
# if (str(acadadmin) != str(final_user)):
# return HttpResponseRedirect('/academic-procedures/')
# if request.method == 'POST' and request.FILES:
# form = MinuteForm(request.POST, request.FILES)
# if form.is_valid():
# form.save()
# return HttpResponse('sucess')
# else:
# return HttpResponse('not uploaded')
# return render(request, "ais/ais.html", {})
def deleteMinute(request):
# """
# to delete an existing senate meeting minute object from the database.
# @param:
# request - contains metadata about the requested page
# @variables:
# data - the id of the minute object to be deleted
# t - the minute object received from id to be deleted
# """
# if request.method == "POST":
# data = request.POST['delete']
# t = Meeting.objects.get(id=data)
# t.delete()
return HttpResponseRedirect('/aims/')
# # ######################################################
# # ##########Student basic profile##################
# @csrf_exempt
def add_basic_profile(request):
# """
# It adds the basic profile information like username,password, name,
# rollno, etc of a student
# @param:
# request - contains metadata about the requested page
# @variables:
# name - the name of the student
# roll - the rollno of the student
# batch - the current batch of the student
# programme - the programme the student is enrolled in
# ph - the phone number of the student
# """
if request.method == "POST":
name = request.POST.get('name')
# roll = ExtraInfo.objects.get(id=request.POST.get('rollno'))
# programme = request.POST.get('programme')
# batch = request.POST.get('batch')
# ph = request.POST.get('phoneno')
# if not Student.objects.filter(id=roll).exists():
# db = Student()
# st = ExtraInfo.objects.get(id=roll.id)
# db.name = name.upper()
# db.id = roll
# db.batch = batch
# db.programme = programme
# st.phone_no = ph
# db.save()
# st.save()
# data = {
# 'name': name,
# 'rollno': roll.id,
# 'programme': programme,
# 'phoneno': ph,
# 'batch': batch
# }
# print(data)
# return JsonResponse(data)
# else:
# data = {}
# return JsonResponse(data)
# else:
# data = {}
# return JsonResponse(data)
# @csrf_exempt
def delete_basic_profile(request, pk):
# """
# Deletes the student from the database
# @param:
# request - contains metadata about the requested page
# pk - the primary key of the student's record in the database table
# @variables:
# e - the extraInfo objects of the student
# user - the User object of the student
# s - the student object of the student
# """
e = get_object_or_404(ExtraInfo, id=pk)
# user = get_object_or_404(User, username = e.user.username)
# s = get_object_or_404(Student, id=e)
# data = {
# 'rollno': pk,
# }
# s.delete()
# e.delete()
# u.delete()
# return JsonResponse(data)# #########################################################
# '''
# # view to add attendance data to database
# def curriculum(request):
# '''
def delete_advanced_profile(request):
# """
# to delete the advance information of the student
# @param:
# request - contains metadata about the requested page
# @variables:
# current_user - the username of the logged in user
# user_details - the details of the current user
# desig_id - checking the designation of the current user
# acadadmin - deatils of the acad admin
# s - the student object from the requested rollno
# """
current_user = get_object_or_404(User, username=request.user.username)
# user_details = ExtraInfo.objects.all().filter(user=current_user).first()
# desig_id = Designation.objects.all().filter(name='Upper Division Clerk')
# temp = HoldsDesignation.objects.all().filter(designation = desig_id).first()
# print (temp)
# print (current_user)
# acadadmin = temp.working
# k = str(user_details).split()
# print(k)
# final_user = k[2]
# if (str(acadadmin) != str(final_user)):
# return HttpResponseRedirect('/academic-procedures/')
# if request.method == "POST":
# st = request.POST['delete']
# arr = st.split("-")
# stu = arr[0]
# if Student.objects.get(id=stu):
# s = Student.objects.get(id=stu)
# s.father_name = ""
# s.mother_name = ""
# s.hall_no = 1
# s.room_no = ""
# s.save()
# else:
# return HttpResponse("Data Does Not Exist")
# return HttpResponse("Data Deleted Successfully")
def add_advanced_profile(request):
# """
# It adds the advance profile information like hall no, room no,
# profile picture, about me etc of a student
# @param:
# request - contains metadata about the requested page
# @variables:
# current_user - the username of the logged in user
# user_details - the details of the current user
# desig_id - checking the designation of the current user
# acadadmin - deatils of the acad admin
# father - father's name of the student
# rollno - the rollno of the student required to check if the student is available
# mother - mother's name of the student
# add - student's address
# cpi - student's cpi
# hall - hall no of where the student stays
# room no - hostel room no
# """
current_user = get_object_or_404(User, username=request.user.username)
# user_details = ExtraInfo.objects.all().filter(user=current_user).first()
# desig_id = Designation.objects.all().filter(name='Upper Division Clerk')
# temp = HoldsDesignation.objects.all().filter(designation = desig_id).first()
# print (temp)
# print (current_user)
# acadadmin = temp.working
# k = str(user_details).split()
# print(k)
# final_user = k[2]
# if (str(acadadmin) != str(final_user)):
# return HttpResponseRedirect('/academic-procedures/')
# if request.method == "POST":
# print(request.POST)
# rollno=request.POST.get('roll')
# print(rollno)
# student = ExtraInfo.objects.get(id=rollno)
# print(student.address)
# if not student:
# data = {}
# return JsonResponse(data)
# else:
# father = request.POST.get('father')
# mother = request.POST.get('mother')
# add = request.POST.get('address')
# hall = request.POST.get('hall')
# room = request.POST.get('room')
# cpi = request.POST.get('cpi')
# student.address = str(hall) + " " + str(room)
# student.save()
# s = Student.objects.get(id=student)
# s.father_name=father
# s.mother_name=mother
# s.hall_no = hall
# s.room_no = room
# s.save()
# return HttpResponseRedirect('/academic-procedures/')
# return HttpResponseRedirect('/academic-procedures/')
def add_optional(request):
# """
# acadmic admin to update the additional courses
# @param:
# request - contains metadata about the requested page.
# @variables:
# choices - selected addtional courses by the academic person.
# course - Course details which is selected by the academic admin.
# """
if request.method == "POST":
pass
# print(request.POST)
# choices = request.POST.getlist('choice')
# for i in choices:
# course = Course.objects.all().filter(course_id=i).first()
# course.acad_selection = True
# course.save()
# courses = Course.objects.all()
# for i in courses:
# if i.course_id not in choices:
# i.acad_selection = False
# i.save()
# return HttpResponseRedirect('/academic-procedures/')
def min_cred(request):
# """
# to set minimum credit for a current semester that a student must take
# @param:
# request - contains metadata about the requested page.
# @variables:
# sem_cred = Get credit details from forms and the append it to an array.
# sem - Get the object for the minimum credits from the database and the update it.
# """
if request.method=="POST":
sem_cred = []
# sem_cred.append(0)
# for i in range(1, 10):
# sem = "sem_"+"1"
# sem_cred.append(request.POST.getlist(sem)[0])
# for i in range(1, 9):
# sem = MinimumCredits.objects.all().filter(semester=i).first()
# sem.credits = sem_cred[i+1]
# sem.save()
# return HttpResponse("Worked")
def view_course(request):
# if request.method == "POST":
# programme=request.POST['programme']
# batch=request.POST['batch']
# branch=request.POST['branch']
# sem=request.POST['sem']
# curriculum_courses = Curriculum.objects.filter(branch = branch).filter(batch = batch).filter(programme= programme).filter(sem = sem)
# print(curriculum_courses)
# courses = Course.objects.all()
# course_type = Constants.COURSE_TYPE
# context= {
# 'courses': courses,
# 'course_type': course_type,
# 'curriculum_course': curriculum_courses,
# }
# return render(request, "ais/ais.html", context)
# else:
# return render(request, "ais/ais.html")
return render(request, "ais/ais.html")
def delete_grade(request):
# """
# It deletes the grade of the student
# @param:
# request - contains metadata about the requested page
# @variables:
# current_user - father's name of the student
# user_details - the rollno of the student required to check if the student is available
# desig_id - mother 's name of the student
# acadadmin - student's address
# final_user - details of the user
# sem - current semester of the student
# data - tag whether to delete it or not
# course - get the course details
# """
# current_user = get_object_or_404(User, username=request.user.username)
# user_details = ExtraInfo.objects.all().filter(user=current_user).first()
# desig_id = Designation.objects.all().filter(name='Upper Division Clerk')
# temp = HoldsDesignation.objects.all().filter(designation = desig_id).first()
# print (temp)
# print (current_user)
# acadadmin = temp.working
# k = str(user_details).split()
# print(k)
# final_user = k[2]
# if (str(acadadmin) != str(final_user)):
# return HttpResponseRedirect('/academic-procedures/')
# print(request.POST['delete'])
# data = request.POST['delete']
# d = data.split("-")
# id = d[0]
# course = d[2]
# sem = int(d[3])
# if request.method == "POST":
# if(Grades.objects.filter(student_id=id, sem=sem)):
# s = Grades.objects.filter(student_id=id, sem=sem)
# for p in s:
# if (str(p.course_id) == course):
# print(p.course_id)
# p.delete()
# else:
# return HttpResponse("Unable to delete data")
return HttpResponse("Data Deleted SuccessFully")
@login_required
def verify_grade(request):
"""
It verify the grades of the student
@param:
request - contains metadata about the requested page
@variables:
current_user - father's name of the student
user_details - the rollno of the student required to check if the student is available
desig_id - mother's name of the student
acadadmin - student's address
subject - subject of which the grade has to be added
sem - semester of the student
grade - grade to be added in the student
course - course ofwhich the grade is added
"""
# if user_check(request):
# return HttpResponseRedirect('/academic-procedures/')
# if request.method == "POST":
# curr_id=request.POST['course']
# print(curr_id)
# curr_course = Curriculum.objects.filter(curriculum_id=curr_id)
# grades = Grades.objects.filter(curriculum_id=curr_course)
# context= {
# 'grades': grades,
# 'tab_id' :"2"
# }
# return render(request,"ais/ais.html", context)
# else:
# return HttpResponseRedirect('/aims/')
return HttpResponseRedirect('/aims/')
def confirm_grades(request):
# if user_check(request):
# return HttpResponseRedirect('/academic-procedures/')
# if request.method == "POST":
# print("confirm hone wala hai")
# print(request.POST)
return HttpResponseRedirect('/aims/')
| import datetime
import json
import os
import xlrd
import logging
from io import BytesIO
from xlsxwriter.workbook import Workbook
from xhtml2pdf import pisa
from itertools import chain
from django.contrib.auth.models import User
from django.http import HttpResponse, HttpResponseRedirect, JsonResponse
from django.shortcuts import get_object_or_404, render
from django.template.loader import get_template
from django.views.decorators.csrf import csrf_exempt
from django.template.loader import render_to_string
from django.contrib.auth.decorators import login_required
from applications.academic_procedures.models import MinimumCredits, Register, InitialRegistration, course_registration, AssistantshipClaim,Assistantship_status
from applications.globals.models import (Designation, ExtraInfo,
HoldsDesignation, DepartmentInfo)
from .forms import AcademicTimetableForm, ExamTimetableForm, MinuteForm
from .models import (Calendar, Course, Exam_timetable, Grades, Curriculum_Instructor,Constants,
Meeting, Student, Student_attendance, Timetable,Curriculum)
from applications.programme_curriculum.models import (CourseSlot, Course as Courses, Batch, Semester, Programme, Discipline)
from applications.academic_procedures.views import acad_proced_global_context
from applications.programme_curriculum.models import Batch
@login_required
def user_check(request):
"""
This function is used to check the type of user.
It checkes the authentication of the user.
@param:
request - contains metadata about the requested page
@variables:
current_user - get user from request
user_details - extract details of user from database
desig_id - check for designation
acadadmin - designation for Acadadmin
final_user - final designation of request user
"""
try:
current_user = get_object_or_404(User, username=request.user.username)
user_details = ExtraInfo.objects.all().select_related('user','department').filter(user=current_user).first()
desig_id = Designation.objects.all().filter(name='Upper Division Clerk')
temp = HoldsDesignation.objects.all().select_related().filter(designation = desig_id).first()
acadadmin = temp.working
k = str(user_details).split()
final_user = k[2]
except Exception as e:
acadadmin=""
final_user=""
pass
if (str(acadadmin) != str(final_user)):
return True
else:
return False
def get_context(request):
"""
This function gets basic gata from database to send to template
@param:
request - contains metadata about the requested page
@variables:
acadTtForm - the form to add academic calender
examTtForm - the form required to add exam timetable
exam_t - all the exam timetable objects
timetable - all the academic timetable objects
calendar - all the academic calender objects
context - the datas to be displayed in the webpage
this_sem_course - tha data of thsi semester courses
next_sem_courses - the data of next semester courses
courses - all the courses in curriculum
course_type - list the type of courses
"""
if user_check(request):
return HttpResponseRedirect('/academic-procedures/')
course_list = sem_for_generate_sheet()
if(course_list[0]==1):
course_list_2 = [2, 4, 6, 8]
else:
course_list_2 = [1, 3, 5, 7]
# examTtForm = ExamTimetableForm()
# acadTtForm = AcademicTimetableForm()
# calendar = Calendar.objects.all()
# this_sem_courses = Curriculum.objects.all().filter(sem__in=course_list).filter(floated=True)
# next_sem_courses = Curriculum.objects.all().filter(sem__in=course_list).filter(floated=True)
# courses = Course.objects.all()
# course_type = Constants.COURSE_TYPE
# timetable = Timetable.objects.all()
# exam_t = Exam_timetable.objects.all()
procedures_context = acad_proced_global_context()
try:
examTtForm = ExamTimetableForm()
acadTtForm = AcademicTimetableForm()
calendar = Calendar.objects.all()
this_sem_courses = Curriculum.objects.all().select_related().filter(sem__in=course_list).filter(floated=True)
next_sem_courses = Curriculum.objects.all().select_related().filter(sem__in=course_list_2).filter(floated=True)
courses = Course.objects.all()
courses_list = Courses.objects.all()
course_type = Constants.COURSE_TYPE
timetable = Timetable.objects.all()
exam_t = Exam_timetable.objects.all()
pgstudent = Student.objects.filter(programme = "M.Tech") | Student.objects.filter(programme = "PhD")
assistant_list = AssistantshipClaim.objects.filter(ta_supervisor_remark = True).filter(thesis_supervisor_remark = True).filter(hod_approval =True).filter(acad_approval = False)
assistant_approve_list = AssistantshipClaim.objects.filter(ta_supervisor_remark = True).filter(thesis_supervisor_remark = True).filter(hod_approval =True).filter(hod_approval = True)
assistant_list_length = len(assistant_list.filter(acad_approval = False))
assis_stat = Assistantship_status.objects.all()
for obj in assis_stat:
assistant_flag = obj.student_status
hod_flag = obj.hod_status
account_flag = obj.account_status
except Exception as e:
examTtForm = ""
acadTtForm = ""
calendar = ""
this_sem_courses = ""
next_sem_courses = ""
courses = ""
course_type = ""
timetable = ""
exam_t = ""
pass
context = {
'acadTtForm': acadTtForm,
'examTtForm': examTtForm,
'courses': courses,
'courses_list': courses_list,
'course_type': course_type,
'exam': exam_t,
'timetable': timetable,
'academic_calendar': calendar,
'next_sem_course': next_sem_courses,
'this_sem_course': this_sem_courses,
'curriculum': curriculum,
'pgstudent' : pgstudent,
'assistant_list' : assistant_list,
'assistant_approve_list' : assistant_approve_list,
'assistant_list_length' : assistant_list_length,
'tab_id': ['1','1'],
'context': procedures_context['context'],
'lists': procedures_context['lists'],
'date': procedures_context['date'],
'query_option1': procedures_context['query_option1'],
'query_option2': procedures_context['query_option2'],
'course_verification_date' : procedures_context['course_verification_date'],
'submitted_course_list' : procedures_context['submitted_course_list'],
'result_year' : procedures_context['result_year'],
'batch_grade_data' : procedures_context['batch_grade_data'],
'batch_branch_data' : procedures_context['batch_branch_data'],
'assistant_flag' : assistant_flag,
'hod_flag' : hod_flag,
'account_flag' : account_flag
}
return context
@login_required
def homepage(request):
"""
This function is used to set up the homepage of the application.
It checkes the authentication of the user and also fetches the available
data from the databases to display it on the page.
@param:
request - contains metadata about the requested page
@variables:
senates - the extraInfo objects that holds the designation as a senator
students - all the objects in the Student class
Convenor - the extraInfo objects that holds the designation as a convenor
CoConvenor - the extraInfo objects that holds the designation as a coconvenor
meetings - the all meeting objects held in senator meetings
minuteForm - the form to add a senate meeting minutes
acadTtForm - the form to add academic calender
examTtForm - the form required to add exam timetable
Dean - the extraInfo objects that holds the designation as a dean
student - the students as a senator
extra - all the extraInfor objects
exam_t - all the exam timetable objects
timetable - all the academic timetable objects
calendar - all the academic calender objects
department - all the departments in the college
attendance - all the attendance objects of the students
context - the datas to be displayed in the webpage
"""
if user_check(request):
return HttpResponseRedirect('/academic-procedures/')
context = get_context(request)
return render(request, "ais/ais.html", context)
# ####################################
# # curriculum #
# ####################################
@login_required
def curriculum(request):
"""
This function is used to see curriculum and edit entries in a curriculum.
It checkes the authentication of the user and also fetches the available
data from the databases to display it on the page.
@param:
request - contains metadata about the requested page
@variables:
request_batch - Batch from form
request_branch - Branch from form
request_programme - Programme from form
request_sem - Semester from form
curriculum - Get data about curriculum from database
courses - get courses from database
courses_type - get course types from database
"""
if user_check(request):
return HttpResponseRedirect('/academic-procedures/')
context = get_context(request)
context['tab_id'][0]='6'
if request.method == 'POST':
try:
request_batch = request.POST['batch']
request_branch = request.POST['branch']
request_programme = request.POST['programme']
request_sem = request.POST['sem']
except Exception as e:
request_batch = ""
request_branch = ""
request_programme = ""
request_sem = ""
#for checking if the user has searched for any particular curriculum
if request_batch == "" and request_branch == "" and request_programme=="" and request_sem=="":
curriculum = None #Curriculum.objects.all()
else:
if int(request_sem) == 0:
curriculum = Curriculum.objects.select_related().filter(branch = request_branch).filter(batch = request_batch).filter(programme= request_programme).order_by('sem')
else:
curriculum = Curriculum.objects.select_related().filter(branch = request_branch).filter(batch = request_batch).filter(programme= request_programme).filter(sem= request_sem)
# context={
# 'courses' : courses,
# 'course_type' : course_type,
# 'curriculum' : curriculum,
# 'tab_id' :['3','1']
# }
courses = Course.objects.all()
course_type = Constants.COURSE_TYPE
html = render_to_string('ais/curr_list.html',{'curriculum':curriculum,'courses':courses,'course_type':course_type},request)
obj = json.dumps({'html':html})
#return render(request, "ais/ais.html", context)
return HttpResponse(obj,content_type='application/json')
else:
return render(request, "ais/ais.html", context)
return render(request, "ais/ais.html", context)
@login_required
def add_curriculum(request):
"""
This function is used to add new curriculum in database
It checkes the authentication of the user and also fetches the available
data from the databases to display it on the page.
@param:
request - contains metadata about the requested page
@variables:
programme - programme from form.REQUEST
batch - batch from form.REQUEST
branch - branch from form.REQUEST
sem - semester from form.REQUEST
course_code - course_code from form.REQUEST
course_name - course-name from form.REQUEST
course_id - course_id from database
credits - credits from form.REQUEST
optional - optional from form.REQUEST
course_type - course_type from form.REQUEST
ins - data is stored in database
"""
if user_check(request):
return HttpResponseRedirect('/academic-procedures/')
context={
'tab_id' :['3','2']
}
if request.method == 'POST':
i=0
new_curr=[]
while True:
if "semester_"+str(i) in request.POST:
try:
programme=request.POST['AddProgramme']
batch=request.POST['AddBatch']
branch=request.POST['AddBranch']
sem=request.POST["semester_"+str(i)]
course_code=request.POST["course_code_"+str(i)]
course_name=request.POST["course_name_"+str(i)]
course_id=Course.objects.get(course_name=course_name)
credits=request.POST["credits_"+str(i)]
if "optional_"+str(i) in request.POST:
optional=True
else:
optional=False
course_type=request.POST["course_type_"+str(i)]
except Exception as e:
programme=""
batch=""
branch=""
sem=""
course_code=""
course_name=""
course_id=""
credits=""
optional=""
course_type=""
pass
ins=Curriculum(
programme=programme,
batch=batch,
branch=branch,
sem=sem,
course_code=course_code,
course_id=course_id,
credits=credits,
optional=optional,
course_type=course_type,
)
new_curr.append(ins)
else:
break
i+=1
Curriculum.objects.bulk_create(new_curr)
curriculum = Curriculum.objects.select_related().filter(branch = branch).filter(batch = batch).filter(programme= programme)
courses = Course.objects.all()
course_type = Constants.COURSE_TYPE
context= {
'courses': courses,
'course_type': course_type,
'curriculum': curriculum,
'tab_id' :['3','2']
}
return render(request, "ais/ais.html", context)
else:
return render(request, "ais/ais.html", context)
return render(request, "ais/ais.html", context)
@login_required
def edit_curriculum(request):
"""
This function is used to edit curriculum in database
It checkes the authentication of the user and also fetches the available
data from the databases to display it on the page.
@param:
request - contains metadata about the requested page
@variables:
programme - programme from form.REQUEST
batch - batch from form.REQUEST
branch - branch from form.REQUEST
sem - semester from form.REQUEST
course_code - course_code from form.REQUEST
course_name - course-name from form.REQUEST
course_id - course_id from database
credits - credits from form.REQUEST
optional - optional from form.REQUEST
course_type - course_type from form.REQUEST
ins - data is stored in database
"""
if user_check(request):
return HttpResponseRedirect('/academic-procedures/')
context={
'tab_id' :['3','1']
}
if request.method == 'POST':
try:
id=request.POST['id']
programme=request.POST['programme']
batch=request.POST['batch']
branch=request.POST['branch']
sem=request.POST["sem"]
course_code=request.POST["course_code"]
course_name=request.POST["course_id"]
course_id=Course.objects.get(course_name=course_name)
credits=request.POST["credits"]
if request.POST['optional'] == "on":
optional=True
else:
optional=False
course_type=request.POST["course_type"]
except Exception as e:
id=""
programme=""
batch=""
branch=""
sem=""
course_code=""
course_name=""
course_id=""
credits=""
optional=""
course_type=""
pass
entry=Curriculum.objects.all().select_related().filter(curriculum_id=id).first()
entry.programme=programme
entry.batch=batch
entry.branch=branch
entry.sem=sem
entry.course_code=course_code
entry.course_id=course_id
entry.credits=credits
entry.optional=optional
entry.course_type=course_type
entry.save()
curriculum = Curriculum.objects.select_related().filter(branch = branch).filter(batch = batch).filter(programme= programme)
courses = Course.objects.all()
course_type = Constants.COURSE_TYPE
context= {
'courses': courses,
'course_type': course_type,
'curriculum': curriculum,
'tab_id' :['3','1']
}
return render(request, "ais/ais.html", context)
else:
return render(request, "ais/ais.html", context)
return render(request, "ais/ais.html", context)
@login_required
def delete_curriculum(request):
"""
This function is used to delete curriculum entry in database
It checkes the authentication of the user and also fetches the available
data from the databases to display it on the page.
@param:
request - contains metadata about the requested page
@variables:
dele - data being deleted from database
"""
if user_check(request):
return HttpResponseRedirect('/academic-procedures/')
context={
'tab_id' :['3','1']
}
if request.method == "POST":
dele = Curriculum.objects.select_related().filter(curriculum_id=request.POST['id'])
dele.delete()
curriculum = Curriculum.objects.select_related().filter(branch = request.POST['branch']).filter(batch = request.POST['batch']).filter(programme= request.POST['programme'])
courses = Course.objects.all()
course_type = Constants.COURSE_TYPE
context= {
'courses': courses,
'course_type': course_type,
'curriculum': curriculum,
'tab_id' :['3','1']
}
return render(request, "ais/ais.html", context)
return render(request, 'ais/ais.html', context)
@login_required
def next_curriculum(request):
"""
This function is used to decide curriculum for new batch.
It checkes the authentication of the user and also fetches the available
data from the databases to display it on the page.
@param:
request - contains metadata about the requested page
@variables:
programme - programme from form.REQUEST
now - current date from system
year - current year
batch - batch form form
curriculum - curriculum details form database
ins - Inster data in database
"""
if user_check(request):
return HttpResponseRedirect('/academic-procedures/')
if request.method == 'POST':
programme = request.POST['programme']
now = datetime.datetime.now()
year = int(now.year)
batch = year-1
curriculum = Curriculum.objects.all().select_related().filter(batch = batch).filter(programme = programme)
if request.POST['option'] == '1':
new_curriculum=[]
for i in curriculum:
ins=Curriculum(
programme=i.programme,
batch=i.batch+1,
branch=i.branch,
sem=i.sem,
course_code=i.course_code,
course_id=i.course_id,
credits=i.credits,
optional=i.optional,
course_type=i.course_type,
)
new_curriculum.append(ins)
Curriculum.objects.bulk_create(new_curriculum)
elif request.POST['option'] == '2':
new_curriculum=[]
for i in curriculum:
ins=Curriculum(
programme=i.programme,
batch=i.batch+1,
branch=i.branch,
sem=i.sem,
course_code=i.course_code,
course_id=i.course_id,
credits=i.credits,
optional=i.optional,
course_type=i.course_type,
)
new_curriculum.append(ins)
Curriculum.objects.bulk_create(new_curriculum)
batch=batch+1
curriculum = Curriculum.objects.all().select_related().filter(batch = batch).filter(programme = programme)
context= {
'curriculumm' :curriculum,
'tab_id' :['3','3']
}
return render(request, "ais/ais.html", context)
else:
context= {
'tab_id' :['3','2']
}
return render(request, "ais/ais.html", context)
context= {
'tab_id' :['3','1']
}
return render(request, "ais/ais.html", context)
@login_required
def add_timetable(request):
"""
acad-admin can upload the time table(any type of) of the semester.
@param:
request - contains metadata about the requested page.
@variables:
acadTtForm - data of delete dictionary in post request
timetable - all timetable from database
exam_t - all exam timetable from database
"""
if user_check(request):
return HttpResponseRedirect('/academic-procedures/')
timetable = Timetable.objects.all()
exam_t = Exam_timetable.objects.all()
context= {
'exam': exam_t,
'timetable': timetable,
'tab_id' :['10','1']
}
acadTtForm = AcademicTimetableForm()
if request.method == 'POST' and request.FILES:
acadTtForm = AcademicTimetableForm(request.POST, request.FILES)
if acadTtForm.is_valid():
acadTtForm.save()
return render(request, "ais/ais.html", context)
else:
return render(request, "ais/ais.html", context)
return render(request, "ais/ais.html", context)
@login_required
def add_exam_timetable(request):
"""
acad-admin can upload the exam timtable of the ongoing semester.
@param:
request - contains metadata about the requested page.
@variables:
examTtForm - data of delete dictionary in post request
timetable - all timetable from database
exam_t - all exam timetable from database
"""
if user_check(request):
return HttpResponseRedirect('/academic-procedures/')
timetable = Timetable.objects.all()
exam_t = Exam_timetable.objects.all()
context= {
'exam': exam_t,
'timetable': timetable,
'tab_id' :['10','2']
}
examTtForm = ExamTimetableForm()
if request.method == 'POST' and request.FILES:
examTtForm = ExamTimetableForm(request.POST, request.FILES)
if examTtForm.is_valid():
examTtForm.save()
return render(request, "ais/ais.html", context)
else:
return render(request, "ais/ais.html", context)
return render(request, "ais/ais.html", context)
@login_required
def delete_timetable(request):
"""
acad-admin can delete the outdated timetable from the server.
@param:
request - contains metadata about the requested page.
@variables:
data - data of delete dictionary in post request
t - Object of time table to be deleted
"""
if user_check(request):
return HttpResponseRedirect('/academic-procedures/')
if request.method == "POST":
data = request.POST['delete']
t = Timetable.objects.get(time_table=data)
t.delete()
return HttpResponse("TimeTable Deleted")
@login_required
def delete_exam_timetable(request):
"""
acad-admin can delete the outdated exam timetable.
@param:
request - contains metadata about the requested page.
@variables:
data - data of delete dictionary in post request
t - Object of Exam time table to be deleted
"""
if user_check(request):
return HttpResponseRedirect('/academic-procedures/')
if request.method == "POST":
data = request.POST['delete']
t = Exam_timetable.objects.get(exam_time_table=data)
t.delete()
return HttpResponse("TimeTable Deleted")
@login_required
def add_calendar(request):
"""
to add an entry to the academic calendar to be uploaded
@param:
request - contains metadata about the requested page.
@variables:
from_date - The starting date for the academic calendar event.
to_date - The ending date for the academic caldendar event.
desc - Description for the academic calendar event.
c = object to save new event to the academic calendar.
"""
if user_check(request):
return HttpResponseRedirect('/academic-procedures/')
calendar = Calendar.objects.all()
context= {
'academic_calendar' :calendar,
'tab_id' :['4','1']
}
if request.method == "POST":
try:
from_date = request.POST.getlist('from_date')
to_date = request.POST.getlist('to_date')
desc = request.POST.getlist('description')[0]
from_date = from_date[0].split('-')
from_date = [int(i) for i in from_date]
from_date = datetime.datetime(*from_date).date()
to_date = to_date[0].split('-')
to_date = [int(i) for i in to_date]
to_date = datetime.datetime(*to_date).date()
except Exception as e:
from_date=""
to_date=""
desc=""
pass
c = Calendar(
from_date=from_date,
to_date=to_date,
description=desc)
c.save()
HttpResponse("Calendar Added")
return render(request, "ais/ais.html", context)
@login_required
def update_calendar(request):
"""
to update an entry to the academic calendar to be updated.
@param:
request - contains metadata about the requested page.
@variables:
from_date - The starting date for the academic calendar event.
to_date - The ending date for the academic caldendar event.
desc - Description for the academic calendar event.
prev_desc - Description for the previous event which is to be updated.
get_calendar_details = Get the object of the calendar instance from the database for the previous Description.
"""
if user_check(request):
return HttpResponseRedirect('/academic-procedures/')
calendar = Calendar.objects.all()
context= {
'academic_calendar' :calendar,
'tab_id' :['4','1']
}
if request.method == "POST":
try:
from_date = request.POST.getlist('from_date')
to_date = request.POST.getlist('to_date')
desc = request.POST.getlist('description')[0]
prev_desc = request.POST.getlist('prev_desc')[0]
from_date = from_date[0].split('-')
from_date = [int(i) for i in from_date]
from_date = datetime.datetime(*from_date).date()
to_date = to_date[0].split('-')
to_date = [int(i) for i in to_date]
to_date = datetime.datetime(*to_date).date()
get_calendar_details = Calendar.objects.all().filter(description=prev_desc).first()
get_calendar_details.description = desc
get_calendar_details.from_date = from_date
get_calendar_details.to_date = to_date
get_calendar_details.save()
except Exception as e:
from_date=""
to_date=""
desc=""
return render(request, "ais/ais.html", context)
return render(request, "ais/ais.html", context)
#Generate Attendance Sheet
def sem_for_generate_sheet():
"""
This function generates semester grade sheet
@variables:
now - current datetime
month - current month
"""
now = datetime.datetime.now()
month = int(now.month)
if month >= 7 and month <= 12:
return [1, 3, 5, 7]
else:
return [2, 4, 6, 8]
@login_required
def generatexlsheet(request):
"""
to generate Course List of Registered Students
@param:
request - contains metadata about the requested page
@variables:
batch - gets the batch
course - gets the course
curr_key - gets the curriculum from database
obj - get stdents data from database
ans - Formatted Array to be converted to xlsx
k -temporary array to add data to formatted array/variable
output - io Bytes object to write to xlsx file
book - workbook of xlsx file
title - formatting variable of title the workbook
subtitle - formatting variable of subtitle the workbook
normaltext - formatting variable for normal text
sheet - xlsx sheet to be rendered
titletext - formatting variable of title text
dep - temporary variables
z - temporary variables for final output
b - temporary variables for final output
c - temporary variables for final output
st - temporary variables for final output
"""
if user_check(request):
return HttpResponseRedirect('/academic-procedures/')
try:
batch = request.POST['batch']
course = Courses.objects.get(id = request.POST['course'])
obj = course_registration.objects.all().filter(course_id = course)
except Exception as e:
batch=""
course=""
curr_key=""
obj=""
registered_courses = []
for i in obj:
if i.student_id.batch_id.year == int(batch):
registered_courses.append(i)
ans = []
for i in registered_courses:
k = []
k.append(i.student_id.id.id)
k.append(i.student_id.id.user.first_name)
k.append(i.student_id.id.user.last_name)
k.append(i.student_id.id.department)
ans.append(k)
ans.sort()
output = BytesIO()
book = Workbook(output,{'in_memory':True})
title = book.add_format({'bold': True,
'font_size': 22,
'align': 'center',
'valign': 'vcenter'})
subtitle = book.add_format({'bold': True,
'font_size': 15,
'align': 'center',
'valign': 'vcenter'})
normaltext = book.add_format({'bold': False,
'font_size': 15,
'align': 'center',
'valign': 'vcenter'})
sheet = book.add_worksheet()
title_text = ((str(course.name)+" : "+str(str(batch))))
sheet.set_default_row(25)
sheet.merge_range('A2:E2', title_text, title)
sheet.write_string('A3',"Sl. No",subtitle)
sheet.write_string('B3',"Roll No",subtitle)
sheet.write_string('C3',"Name",subtitle)
sheet.write_string('D3',"Discipline",subtitle)
sheet.write_string('E3','Signature',subtitle)
sheet.set_column('A:A',20)
sheet.set_column('B:B',20)
sheet.set_column('C:C',60)
sheet.set_column('D:D',15)
sheet.set_column('E:E',30)
k = 4
num = 1
for i in ans:
sheet.write_number('A'+str(k),num,normaltext)
num+=1
z,b,c = str(i[0]),i[1],i[2]
name = str(b)+" "+str(c)
temp = str(i[3]).split()
dep = str(temp[len(temp)-1])
sheet.write_string('B'+str(k),z,normaltext)
sheet.write_string('C'+str(k),name,normaltext)
sheet.write_string('D'+str(k),dep,normaltext)
k+=1
book.close()
output.seek(0)
response = HttpResponse(output.read(),content_type = 'application/vnd.ms-excel')
st = 'attachment; filename = ' + course.code + '.xlsx'
response['Content-Disposition'] = st
return response
@login_required
def generate_preregistration_report(request):
"""
to generate preresgistration report after pre-registration
@param:
request - contains metadata about the requested page
@variables:
sem - get current semester from current time
now - get current time
year - getcurrent year
batch - gets the batch from form
sem - stores the next semester
obj - All the registration details appended into one
data - Formated data for context
m - counter for Sl. No (in formated data)
z - temporary array to add data to variable data
k -temporary array to add data to formatted array/variable
output - io Bytes object to write to xlsx file
book - workbook of xlsx file
title - formatting variable of title the workbook
subtitle - formatting variable of subtitle the workbook
normaltext - formatting variable for normal text
sheet - xlsx sheet to be rendered
titletext - formatting variable of title text
dep - temporary variables
z - temporary variables for final output
b - temporary variables for final output
c - temporary variables for final output
st - temporary variables for final output
"""
if user_check(request):
return HttpResponseRedirect('/academic-procedures/')
if request.method == "POST":
sem = request.POST.get('semester_no')
batch_id=request.POST.get('batch_branch')
batch = Batch.objects.filter(id = batch_id).first()
obj = InitialRegistration.objects.filter(student_id__batch_id=batch_id, semester_id__semester_no=sem)
registered_students = set()
unregistered_students = set()
for stu in obj:
registered_students.add(stu.student_id)
students = Student.objects.filter(batch_id = batch_id)
for stu in students:
if stu not in registered_students:
unregistered_students.add(stu)
data = []
m = 1
for i in unregistered_students:
z = []
z.append(m)
m += 1
z.append(i.id.user.username)
z.append(str(i.id.user.first_name)+" "+str(i.id.user.last_name))
z.append(i.id.department.name)
z.append('not registered')
data.append(z)
for i in registered_students:
z = []
z.append(m)
m += 1
z.append(i.id.user.username)
z.append(str(i.id.user.first_name)+" "+str(i.id.user.last_name))
z.append(i.id.department.name)
z.append('registered')
data.append(z)
output = BytesIO()
book = Workbook(output,{'in_memory':True})
title = book.add_format({'bold': True,
'font_size': 22,
'align': 'center',
'valign': 'vcenter'})
subtitle = book.add_format({'bold': True,
'font_size': 15,
'align': 'center',
'valign': 'vcenter'})
normaltext = book.add_format({'bold': False,
'font_size': 15,
'align': 'center',
'valign': 'vcenter'})
sheet = book.add_worksheet()
title_text = ("Pre-registeration : "+ batch.name + str(" ") + batch.discipline.acronym + str(" ") + str(batch.year))
sheet.set_default_row(25)
sheet.merge_range('A2:E2', title_text, title)
sheet.write_string('A3',"Sl. No",subtitle)
sheet.write_string('B3',"Roll No",subtitle)
sheet.write_string('C3',"Name",subtitle)
sheet.write_string('D3',"Discipline",subtitle)
sheet.write_string('E3','Status',subtitle)
sheet.set_column('A:A',20)
sheet.set_column('B:B',20)
sheet.set_column('C:C',50)
sheet.set_column('D:D',15)
sheet.set_column('E:E',15)
k = 4
num = 1
for i in data:
sheet.write_number('A'+str(k),num,normaltext)
num+=1
z,b,c = str(i[0]),i[1],i[2]
a,b,c,d,e = str(i[0]),str(i[1]),str(i[2]),str(i[3]),str(i[4])
temp = str(i[3]).split()
sheet.write_string('B'+str(k),b,normaltext)
sheet.write_string('C'+str(k),c,normaltext)
sheet.write_string('D'+str(k),d,normaltext)
sheet.write_string('E'+str(k),e,normaltext)
k+=1
book.close()
output.seek(0)
response = HttpResponse(output.read(),content_type = 'application/vnd.ms-excel')
st = 'attachment; filename = ' + batch.name + batch.discipline.acronym + str(batch.year) + '-preresgistration.xlsx'
response['Content-Disposition'] = st
return response
@login_required
def add_new_profile (request):
"""
To add details of new upcoming students in the database.User must be logged in and must be acadadmin
@param:
request - contains metadata about the requested page.
@variables:
profiles - gets the excel file having data
excel - excel file
sheet - sheet no in excel file
roll_no - details of student from file
first_name - details of student from file
last_name - details of student from file
email - details of student from file
sex - details of student from file
title - details of student from file
dob - details of student from file
fathers_name - details of student from file
mothers_name - details of student from file
category - details of student from file
phone_no - details of student from file
address - details of student from file
department - details of student from file
specialization - details of student from file
hall_no - details of student from file
programme - details of student from file
batch - details of student from file
user - new user created in database
einfo - new extrainfo object created in database
stud_data - new student object created in database
desig - get designation object of student
holds_desig - get hold_desig object of student
currs - get curriculum details
reg - create registeration object in registeration table
"""
if user_check(request):
return HttpResponseRedirect('/academic-procedures/')
context= {
'tab_id' :['2','1']
}
if request.method == 'POST' and request.FILES:
profiles=request.FILES['profiles']
excel = xlrd.open_workbook(file_contents=profiles.read())
sheet=excel.sheet_by_index(0)
for i in range(sheet.nrows):
roll_no=int(sheet.cell(i,0).value)
first_name=str(sheet.cell(i,1).value)
last_name=str(sheet.cell(i,2).value)
email=str(sheet.cell(i,3).value)
sex=str(sheet.cell(i,4).value)
if sex == 'F':
title='Ms.'
else:
title='Mr.'
dob_tmp=sheet.cell(i,5).value
dob_tmp=sheet.cell_value(rowx=i,colx=5)
dob=datetime.datetime(*xlrd.xldate_as_tuple(dob_tmp,excel.datemode))
fathers_name=str(sheet.cell(i,6).value)
mothers_name=str(sheet.cell(i,7).value)
category=str(sheet.cell(i,8).value)
phone_no=int(sheet.cell(i,9).value)
address=str(sheet.cell(i,10).value)
dept=str(sheet.cell(i,11).value)
specialization=str(sheet.cell(i,12).value)
hall_no=sheet.cell(i,13 ).value
department=DepartmentInfo.objects.all().filter(name=dept).first()
if specialization == "":
specialization="None"
if hall_no == None:
hall_no=3
else:
hall_no=int(hall_no)
programme_name=request.POST['Programme']
batch_year=request.POST['Batch']
batch = Batch.objects.all().filter(name = programme_name, discipline__acronym = dept, year = batch_year).first()
user = User.objects.create_user(
username=roll_no,
password='<PASSWORD>',
first_name=first_name,
last_name=last_name,
email=email,
)
einfo = ExtraInfo.objects.create(
id=roll_no,
user=user,
title=title,
sex=sex,
date_of_birth=dob,
address=address,
phone_no=phone_no,
user_type='student',
department=department,
)
sem=1
stud_data = Student.objects.create(
id=einfo,
programme = programme_name,
batch=batch_year,
batch_id = batch,
father_name = fathers_name,
mother_name = mothers_name,
cpi = 0,
category = category,
hall_no = hall_no,
specialization = specialization,
curr_semester_no=sem,
)
desig = Designation.objects.get(name='student')
hold_des = HoldsDesignation.objects.create(
user=user,
working=user,
designation=desig,
)
sem_id = Semester.objects.get(curriculum = batch.curriculum, semester_no = sem)
course_slots = CourseSlot.objects.all().filter(semester = sem_id)
courses = []
for course_slot in course_slots:
courses += course_slot.courses.all()
new_reg=[]
for c in courses:
reg=course_registration(
course_id = c,
semester_id=sem_id,
student_id=stud_data
)
new_reg.append(reg)
course_registration.objects.bulk_create(new_reg)
else:
return render(request, "ais/ais.html", context)
return render(request, "ais/ais.html", context)
def get_faculty_list():
"""
to get faculty list from database
@param:
request - contains metadata about the requested page.
@variables:
f1,f2,f3 - temporary varibles
faculty - details of faculty of data
faculty_list - list of faculty
"""
try:
f1 = HoldsDesignation.objects.select_related().filter(designation=Designation.objects.get(name = "Assistant Professor"))
f2 = HoldsDesignation.objects.select_related().filter(designation=Designation.objects.get(name = "Professor"))
f3 = HoldsDesignation.objects.select_related().filter(designation=Designation.objects.get(name = "Associate Professor"))
except Exception as e:
f1=f2=f3=""
pass
faculty = list(chain(f1,f2,f3))
faculty_list = []
for i in faculty:
faculty_list.append(i)
return faculty_list
@login_required
def float_course(request):
"""
to float courses for the next sem and store data in databsae.
User must be logged in and must be acadadmin
@param:
request - contains metadata about the requested page.
@variables:
request_batch - Batch from form
request_branch - Branch from form
request_programme - Programme from form
request_sem - Semester from form
"""
if user_check(request):
return HttpResponseRedirect('/academic-procedures/')
context= {
'tab_id' :['5','1']
}
if request.method == 'POST':
try:
request_batch = request.POST['batch']
request_branch = request.POST['branch']
request_programme = request.POST['programme']
except Exception as e:
request_batch = ""
request_branch = ""
request_programme = ""
if request_batch == "" and request_branch == "" and request_programme=="":
curriculum = None #Curriculum.objects.all()
else:
sem = sem_for_generate_sheet()
now = datetime.datetime.now()
year = int(now.year)
if sem[0] == 2:
sem = sem[year-int(request_batch)-1]
else:
sem = sem[year-int(request_batch)]
sem+=1
curriculum = Curriculum.objects.select_related().filter(branch = request_branch).filter(batch = request_batch).filter(programme= request_programme).filter(sem=sem).order_by('course_code')
faculty_list = get_faculty_list()
courses = Course.objects.all()
course_type = Constants.COURSE_TYPE
context= {
'courses': courses,
'course_type': course_type,
'curriculum': curriculum,
'faculty_list': faculty_list,
'tab_id' :['5','1']
}
return render(request, "ais/ais.html", context)
else:
return render(request, "ais/ais.html", context)
return render(request, "ais/ais.html", context)
@login_required
def float_course_submit(request):
"""
to float courses for the next sem and store data in databsae.
User must be logged in and must be acadadmin
@param:
request - contains metadata about the requested page.
@variables:
request_batch - Batch from form
request_branch - Branch from form
request_programme - Programme from form
request_sem - Semester from form
"""
if user_check(request):
return HttpResponseRedirect('/academic-procedures/')
context= {
'tab_id' :['5','1']
}
if request.method == "POST":
i=1
while True:
if str(i)+"_ccode" in request.POST:
if str(i)+"_fac" in request.POST:
if request.POST[str(i)+"_fac"] == "" :
logging.warning("No faculty")
else:
flot = Curriculum.objects.select_related().get(curriculum_id=request.POST[str(i)+"_ccode"])
flot.floated = True
flot.save()
new_curr_inst=[]
for c,i in enumerate(request.POST.getlist(str(i)+'_fac')):
inst = get_object_or_404(User, username = i)
inst = ExtraInfo.objects.select_related('user','department').get(user=inst)
if c==0:
ins=Curriculum_Instructor(
curriculum_id=flot,
instructor_id=inst,
chief_inst=True,
)
new_curr_inst.append(ins)
else:
ins=Curriculum_Instructor(
curriculum_id=flot,
instructor_id=inst,
chief_inst=False,
)
new_curr_inst.append(ins)
Curriculum_Instructor.objects.bulk_create(new_curr_inst)
else:
break
i+=1
return render(request, "ais/ais.html", context)
# # ---------------------senator------------------
# @csrf_exempt
def senator(request):
# """
# to add a new student senator
# @param:
# request - contains metadata about the requested page
# @variables:
# current_user - gets the data of current user.
# user_details - gets the details of the required user.
# desig_id - used to check the designation ID.
# extraInfo - extraInfo object of the student with that rollno
# s - designation object of senator
# hDes - holdsDesignation object to store that the particualr student is holding the senator designation
# student - the student object of the new senator
# data - data of the student to be displayed in teh webpage
# """
# current_user = get_object_or_404(User, username=request.user.username)
# user_details = ExtraInfo.objects.all().filter(user=current_user).first()
# desig_id = Designation.objects.all().filter(name='Upper Division Clerk')
temp = HoldsDesignation.objects.all().select_related().filter(designation = desig_id).first()
#print (temp)
# print (current_user)
# acadadmin = temp.working
# k = str(user_details).split()
# print(k)
# final_user = k[2]
# if (str(acadadmin) != str(final_user)):
# return HttpResponseRedirect('/academic-procedures/')
# if request.method == 'POST':
# print(request.POST, ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
# rollno = request.POST.getlist('Roll Number')[0]
# # print(request.POST.get('rollno'))
# extraInfo = ExtraInfo.objects.get(id=rollno)
# s = Designation.objects.get(name='Senator')
# hDes = HoldsDesignation()
# hDes.user = extraInfo.user
# hDes.working = extraInfo.user
# hDes.designation = s
# hDes.save()
# student = Student.objects.get(id=extraInfo)
# data = {
# 'name': extraInfo.user.username,
# 'rollno': extraInfo.id,
# 'programme': student.programme,
# 'branch': extraInfo.department.name
# }
# return HttpResponseRedirect('/aims/')
# # return JsonResponse(data)
# else:
# return HttpResponseRedirect('/aims/')
# @csrf_exempt
def deleteSenator(request, pk):
# """
# to remove a senator from the position
# @param:
# request - contains metadata about the requested page
# @variables:
# s - the designation object that contains senator
# student - the list students that is a senator
# hDes - the holdDesignation object that stores the
# information that the particular student is a senator
# """
pass
# if request.POST:
# s = get_object_or_404(Designation, name="Senator")
# student = get_object_or_404(ExtraInfo, id=request.POST.getlist("senate_id")[0])
# hDes = get_object_or_404( HoldsDesignation, user = student.user)
# hDes.delete()
# return HttpResponseRedirect('/aims/')
# else:
# return HttpResponseRedirect('/aims/')# ####################################################
# # ##########covenors and coconvenors##################
# @csrf_exempt
def add_convenor(request):
# """
# to add a new student convenor/coconvenor
# @param:
# request - contains metadata about the requested page
# @variables:
# rollno - rollno of the student to become the convenor/coconvenor
# extraInfo - extraInfo object of the student with that rollno
# s - designation object of Convenor
# p - designation object of Co Convenor
# result - the data that contains where the student will become
# convenor or coconvenor
# hDes - holdsDesignation object to store that the particualr student is
# holding the convenor/coconvenor designation
# student - the student object of the new convenor/coconvenor
# data - data of the student to be displayed in the webpage
# """
s = Designation.objects.get(name='Convenor')
# p = Designation.objects.get(name='Co Convenor')
# if request.method == 'POST':
# rollno = request.POST.get('rollno_convenor')
# extraInfo = ExtraInfo.objects.get(id=rollno)
# s = Designation.objects.get(name='Convenor')
# p = Designation.objects.get(name='Co Convenor')
# result = request.POST.get('designation')
# hDes = HoldsDesignation()
# hDes.user = extraInfo.user
# hDes.working = extraInfo.user
# if result == "Convenor":
# hDes.designation = s
# else:
# hDes.designation = p
# hDes.save()
# data = {
# 'name': extraInfo.user.username,
# 'rollno_convenor': extraInfo.id,
# 'designation': hDes.designation.name,
# }
# return JsonResponse(data)
# else:
# data = {}
# return JsonResponse(data)
# @csrf_exempt
def deleteConvenor(request, pk):
# """
# to remove a convenor/coconvenor from the position
# @param:
# request - contains metadata about the requested page
# pk - the primary key of that particular student field
# @variables:
# s - the designation object that contains convenor
# c - the designation object that contains co convenor
# student - the student object with the given pk
# hDes - the holdDesignation object that stores the
# information that the particular student is a convenor/coconvenor to be deleted
# data - data of the student to be hidden in the webpage
# """
# s = get_object_or_404(Designation, name="Convenor")
c = get_object_or_404(Designation, name="Co Convenor")
# student = get_object_or_404(ExtraInfo, id=pk)
# hDes = HoldsDesignation.objects.filter(user = student.user)
# designation = []
# for des in hDes:
# if des.designation == s or des.designation == c:
# designation = des.designation.name
# des.delete()
# data = {
# 'id': pk,
# 'designation': designation,
# }
# return JsonResponse(data)# ######################################################
# # ##########Senate meeting Minute##################
# @csrf_exempt
def addMinute(request):
# """
# to add a new senate meeting minute object to the database.
# @param:
# request - contains metadata about the requested page
# @variables:
# current_user - details of the current user.
# desig_id - to check the designation of the user.
# user_details - to get the details of the required user.
# """
# current_user = get_object_or_404(User, username=request.user.username)
# user_details = ExtraInfo.objects.all().filter(user=current_user).first()
# desig_id = Designation.objects.all().filter(name='Upper Division Clerk')
temp = HoldsDesignation.objects.all().select_related().filter(designation = desig_id).first()
# print (temp)
# print (current_user)
# acadadmin = temp.working
# k = str(user_details).split()
# print(k)
# final_user = k[2]
# if (str(acadadmin) != str(final_user)):
# return HttpResponseRedirect('/academic-procedures/')
# if request.method == 'POST' and request.FILES:
# form = MinuteForm(request.POST, request.FILES)
# if form.is_valid():
# form.save()
# return HttpResponse('sucess')
# else:
# return HttpResponse('not uploaded')
# return render(request, "ais/ais.html", {})
def deleteMinute(request):
# """
# to delete an existing senate meeting minute object from the database.
# @param:
# request - contains metadata about the requested page
# @variables:
# data - the id of the minute object to be deleted
# t - the minute object received from id to be deleted
# """
# if request.method == "POST":
# data = request.POST['delete']
# t = Meeting.objects.get(id=data)
# t.delete()
return HttpResponseRedirect('/aims/')
# # ######################################################
# # ##########Student basic profile##################
# @csrf_exempt
def add_basic_profile(request):
# """
# It adds the basic profile information like username,password, name,
# rollno, etc of a student
# @param:
# request - contains metadata about the requested page
# @variables:
# name - the name of the student
# roll - the rollno of the student
# batch - the current batch of the student
# programme - the programme the student is enrolled in
# ph - the phone number of the student
# """
if request.method == "POST":
name = request.POST.get('name')
# roll = ExtraInfo.objects.get(id=request.POST.get('rollno'))
# programme = request.POST.get('programme')
# batch = request.POST.get('batch')
# ph = request.POST.get('phoneno')
# if not Student.objects.filter(id=roll).exists():
# db = Student()
# st = ExtraInfo.objects.get(id=roll.id)
# db.name = name.upper()
# db.id = roll
# db.batch = batch
# db.programme = programme
# st.phone_no = ph
# db.save()
# st.save()
# data = {
# 'name': name,
# 'rollno': roll.id,
# 'programme': programme,
# 'phoneno': ph,
# 'batch': batch
# }
# print(data)
# return JsonResponse(data)
# else:
# data = {}
# return JsonResponse(data)
# else:
# data = {}
# return JsonResponse(data)
# @csrf_exempt
def delete_basic_profile(request, pk):
# """
# Deletes the student from the database
# @param:
# request - contains metadata about the requested page
# pk - the primary key of the student's record in the database table
# @variables:
# e - the extraInfo objects of the student
# user - the User object of the student
# s - the student object of the student
# """
e = get_object_or_404(ExtraInfo, id=pk)
# user = get_object_or_404(User, username = e.user.username)
# s = get_object_or_404(Student, id=e)
# data = {
# 'rollno': pk,
# }
# s.delete()
# e.delete()
# u.delete()
# return JsonResponse(data)# #########################################################
# '''
# # view to add attendance data to database
# def curriculum(request):
# '''
def delete_advanced_profile(request):
# """
# to delete the advance information of the student
# @param:
# request - contains metadata about the requested page
# @variables:
# current_user - the username of the logged in user
# user_details - the details of the current user
# desig_id - checking the designation of the current user
# acadadmin - deatils of the acad admin
# s - the student object from the requested rollno
# """
current_user = get_object_or_404(User, username=request.user.username)
# user_details = ExtraInfo.objects.all().filter(user=current_user).first()
# desig_id = Designation.objects.all().filter(name='Upper Division Clerk')
# temp = HoldsDesignation.objects.all().filter(designation = desig_id).first()
# print (temp)
# print (current_user)
# acadadmin = temp.working
# k = str(user_details).split()
# print(k)
# final_user = k[2]
# if (str(acadadmin) != str(final_user)):
# return HttpResponseRedirect('/academic-procedures/')
# if request.method == "POST":
# st = request.POST['delete']
# arr = st.split("-")
# stu = arr[0]
# if Student.objects.get(id=stu):
# s = Student.objects.get(id=stu)
# s.father_name = ""
# s.mother_name = ""
# s.hall_no = 1
# s.room_no = ""
# s.save()
# else:
# return HttpResponse("Data Does Not Exist")
# return HttpResponse("Data Deleted Successfully")
def add_advanced_profile(request):
# """
# It adds the advance profile information like hall no, room no,
# profile picture, about me etc of a student
# @param:
# request - contains metadata about the requested page
# @variables:
# current_user - the username of the logged in user
# user_details - the details of the current user
# desig_id - checking the designation of the current user
# acadadmin - deatils of the acad admin
# father - father's name of the student
# rollno - the rollno of the student required to check if the student is available
# mother - mother's name of the student
# add - student's address
# cpi - student's cpi
# hall - hall no of where the student stays
# room no - hostel room no
# """
current_user = get_object_or_404(User, username=request.user.username)
# user_details = ExtraInfo.objects.all().filter(user=current_user).first()
# desig_id = Designation.objects.all().filter(name='Upper Division Clerk')
# temp = HoldsDesignation.objects.all().filter(designation = desig_id).first()
# print (temp)
# print (current_user)
# acadadmin = temp.working
# k = str(user_details).split()
# print(k)
# final_user = k[2]
# if (str(acadadmin) != str(final_user)):
# return HttpResponseRedirect('/academic-procedures/')
# if request.method == "POST":
# print(request.POST)
# rollno=request.POST.get('roll')
# print(rollno)
# student = ExtraInfo.objects.get(id=rollno)
# print(student.address)
# if not student:
# data = {}
# return JsonResponse(data)
# else:
# father = request.POST.get('father')
# mother = request.POST.get('mother')
# add = request.POST.get('address')
# hall = request.POST.get('hall')
# room = request.POST.get('room')
# cpi = request.POST.get('cpi')
# student.address = str(hall) + " " + str(room)
# student.save()
# s = Student.objects.get(id=student)
# s.father_name=father
# s.mother_name=mother
# s.hall_no = hall
# s.room_no = room
# s.save()
# return HttpResponseRedirect('/academic-procedures/')
# return HttpResponseRedirect('/academic-procedures/')
def add_optional(request):
# """
# acadmic admin to update the additional courses
# @param:
# request - contains metadata about the requested page.
# @variables:
# choices - selected addtional courses by the academic person.
# course - Course details which is selected by the academic admin.
# """
if request.method == "POST":
pass
# print(request.POST)
# choices = request.POST.getlist('choice')
# for i in choices:
# course = Course.objects.all().filter(course_id=i).first()
# course.acad_selection = True
# course.save()
# courses = Course.objects.all()
# for i in courses:
# if i.course_id not in choices:
# i.acad_selection = False
# i.save()
# return HttpResponseRedirect('/academic-procedures/')
def min_cred(request):
# """
# to set minimum credit for a current semester that a student must take
# @param:
# request - contains metadata about the requested page.
# @variables:
# sem_cred = Get credit details from forms and the append it to an array.
# sem - Get the object for the minimum credits from the database and the update it.
# """
if request.method=="POST":
sem_cred = []
# sem_cred.append(0)
# for i in range(1, 10):
# sem = "sem_"+"1"
# sem_cred.append(request.POST.getlist(sem)[0])
# for i in range(1, 9):
# sem = MinimumCredits.objects.all().filter(semester=i).first()
# sem.credits = sem_cred[i+1]
# sem.save()
# return HttpResponse("Worked")
def view_course(request):
# if request.method == "POST":
# programme=request.POST['programme']
# batch=request.POST['batch']
# branch=request.POST['branch']
# sem=request.POST['sem']
# curriculum_courses = Curriculum.objects.filter(branch = branch).filter(batch = batch).filter(programme= programme).filter(sem = sem)
# print(curriculum_courses)
# courses = Course.objects.all()
# course_type = Constants.COURSE_TYPE
# context= {
# 'courses': courses,
# 'course_type': course_type,
# 'curriculum_course': curriculum_courses,
# }
# return render(request, "ais/ais.html", context)
# else:
# return render(request, "ais/ais.html")
return render(request, "ais/ais.html")
def delete_grade(request):
# """
# It deletes the grade of the student
# @param:
# request - contains metadata about the requested page
# @variables:
# current_user - father's name of the student
# user_details - the rollno of the student required to check if the student is available
# desig_id - mother 's name of the student
# acadadmin - student's address
# final_user - details of the user
# sem - current semester of the student
# data - tag whether to delete it or not
# course - get the course details
# """
# current_user = get_object_or_404(User, username=request.user.username)
# user_details = ExtraInfo.objects.all().filter(user=current_user).first()
# desig_id = Designation.objects.all().filter(name='Upper Division Clerk')
# temp = HoldsDesignation.objects.all().filter(designation = desig_id).first()
# print (temp)
# print (current_user)
# acadadmin = temp.working
# k = str(user_details).split()
# print(k)
# final_user = k[2]
# if (str(acadadmin) != str(final_user)):
# return HttpResponseRedirect('/academic-procedures/')
# print(request.POST['delete'])
# data = request.POST['delete']
# d = data.split("-")
# id = d[0]
# course = d[2]
# sem = int(d[3])
# if request.method == "POST":
# if(Grades.objects.filter(student_id=id, sem=sem)):
# s = Grades.objects.filter(student_id=id, sem=sem)
# for p in s:
# if (str(p.course_id) == course):
# print(p.course_id)
# p.delete()
# else:
# return HttpResponse("Unable to delete data")
return HttpResponse("Data Deleted SuccessFully")
@login_required
def verify_grade(request):
"""
It verify the grades of the student
@param:
request - contains metadata about the requested page
@variables:
current_user - father's name of the student
user_details - the rollno of the student required to check if the student is available
desig_id - mother's name of the student
acadadmin - student's address
subject - subject of which the grade has to be added
sem - semester of the student
grade - grade to be added in the student
course - course ofwhich the grade is added
"""
# if user_check(request):
# return HttpResponseRedirect('/academic-procedures/')
# if request.method == "POST":
# curr_id=request.POST['course']
# print(curr_id)
# curr_course = Curriculum.objects.filter(curriculum_id=curr_id)
# grades = Grades.objects.filter(curriculum_id=curr_course)
# context= {
# 'grades': grades,
# 'tab_id' :"2"
# }
# return render(request,"ais/ais.html", context)
# else:
# return HttpResponseRedirect('/aims/')
return HttpResponseRedirect('/aims/')
def confirm_grades(request):
# if user_check(request):
# return HttpResponseRedirect('/academic-procedures/')
# if request.method == "POST":
# print("confirm hone wala hai")
# print(request.POST)
return HttpResponseRedirect('/aims/') | en | 0.670988 | This function is used to check the type of user. It checkes the authentication of the user. @param: request - contains metadata about the requested page @variables: current_user - get user from request user_details - extract details of user from database desig_id - check for designation acadadmin - designation for Acadadmin final_user - final designation of request user This function gets basic gata from database to send to template @param: request - contains metadata about the requested page @variables: acadTtForm - the form to add academic calender examTtForm - the form required to add exam timetable exam_t - all the exam timetable objects timetable - all the academic timetable objects calendar - all the academic calender objects context - the datas to be displayed in the webpage this_sem_course - tha data of thsi semester courses next_sem_courses - the data of next semester courses courses - all the courses in curriculum course_type - list the type of courses # examTtForm = ExamTimetableForm() # acadTtForm = AcademicTimetableForm() # calendar = Calendar.objects.all() # this_sem_courses = Curriculum.objects.all().filter(sem__in=course_list).filter(floated=True) # next_sem_courses = Curriculum.objects.all().filter(sem__in=course_list).filter(floated=True) # courses = Course.objects.all() # course_type = Constants.COURSE_TYPE # timetable = Timetable.objects.all() # exam_t = Exam_timetable.objects.all() This function is used to set up the homepage of the application. It checkes the authentication of the user and also fetches the available data from the databases to display it on the page. @param: request - contains metadata about the requested page @variables: senates - the extraInfo objects that holds the designation as a senator students - all the objects in the Student class Convenor - the extraInfo objects that holds the designation as a convenor CoConvenor - the extraInfo objects that holds the designation as a coconvenor meetings - the all meeting objects held in senator meetings minuteForm - the form to add a senate meeting minutes acadTtForm - the form to add academic calender examTtForm - the form required to add exam timetable Dean - the extraInfo objects that holds the designation as a dean student - the students as a senator extra - all the extraInfor objects exam_t - all the exam timetable objects timetable - all the academic timetable objects calendar - all the academic calender objects department - all the departments in the college attendance - all the attendance objects of the students context - the datas to be displayed in the webpage # #################################### # # curriculum # # #################################### This function is used to see curriculum and edit entries in a curriculum. It checkes the authentication of the user and also fetches the available data from the databases to display it on the page. @param: request - contains metadata about the requested page @variables: request_batch - Batch from form request_branch - Branch from form request_programme - Programme from form request_sem - Semester from form curriculum - Get data about curriculum from database courses - get courses from database courses_type - get course types from database #for checking if the user has searched for any particular curriculum #Curriculum.objects.all() # context={ # 'courses' : courses, # 'course_type' : course_type, # 'curriculum' : curriculum, # 'tab_id' :['3','1'] # } #return render(request, "ais/ais.html", context) This function is used to add new curriculum in database It checkes the authentication of the user and also fetches the available data from the databases to display it on the page. @param: request - contains metadata about the requested page @variables: programme - programme from form.REQUEST batch - batch from form.REQUEST branch - branch from form.REQUEST sem - semester from form.REQUEST course_code - course_code from form.REQUEST course_name - course-name from form.REQUEST course_id - course_id from database credits - credits from form.REQUEST optional - optional from form.REQUEST course_type - course_type from form.REQUEST ins - data is stored in database This function is used to edit curriculum in database It checkes the authentication of the user and also fetches the available data from the databases to display it on the page. @param: request - contains metadata about the requested page @variables: programme - programme from form.REQUEST batch - batch from form.REQUEST branch - branch from form.REQUEST sem - semester from form.REQUEST course_code - course_code from form.REQUEST course_name - course-name from form.REQUEST course_id - course_id from database credits - credits from form.REQUEST optional - optional from form.REQUEST course_type - course_type from form.REQUEST ins - data is stored in database This function is used to delete curriculum entry in database It checkes the authentication of the user and also fetches the available data from the databases to display it on the page. @param: request - contains metadata about the requested page @variables: dele - data being deleted from database This function is used to decide curriculum for new batch. It checkes the authentication of the user and also fetches the available data from the databases to display it on the page. @param: request - contains metadata about the requested page @variables: programme - programme from form.REQUEST now - current date from system year - current year batch - batch form form curriculum - curriculum details form database ins - Inster data in database acad-admin can upload the time table(any type of) of the semester. @param: request - contains metadata about the requested page. @variables: acadTtForm - data of delete dictionary in post request timetable - all timetable from database exam_t - all exam timetable from database acad-admin can upload the exam timtable of the ongoing semester. @param: request - contains metadata about the requested page. @variables: examTtForm - data of delete dictionary in post request timetable - all timetable from database exam_t - all exam timetable from database acad-admin can delete the outdated timetable from the server. @param: request - contains metadata about the requested page. @variables: data - data of delete dictionary in post request t - Object of time table to be deleted acad-admin can delete the outdated exam timetable. @param: request - contains metadata about the requested page. @variables: data - data of delete dictionary in post request t - Object of Exam time table to be deleted to add an entry to the academic calendar to be uploaded @param: request - contains metadata about the requested page. @variables: from_date - The starting date for the academic calendar event. to_date - The ending date for the academic caldendar event. desc - Description for the academic calendar event. c = object to save new event to the academic calendar. to update an entry to the academic calendar to be updated. @param: request - contains metadata about the requested page. @variables: from_date - The starting date for the academic calendar event. to_date - The ending date for the academic caldendar event. desc - Description for the academic calendar event. prev_desc - Description for the previous event which is to be updated. get_calendar_details = Get the object of the calendar instance from the database for the previous Description. #Generate Attendance Sheet This function generates semester grade sheet @variables: now - current datetime month - current month to generate Course List of Registered Students @param: request - contains metadata about the requested page @variables: batch - gets the batch course - gets the course curr_key - gets the curriculum from database obj - get stdents data from database ans - Formatted Array to be converted to xlsx k -temporary array to add data to formatted array/variable output - io Bytes object to write to xlsx file book - workbook of xlsx file title - formatting variable of title the workbook subtitle - formatting variable of subtitle the workbook normaltext - formatting variable for normal text sheet - xlsx sheet to be rendered titletext - formatting variable of title text dep - temporary variables z - temporary variables for final output b - temporary variables for final output c - temporary variables for final output st - temporary variables for final output to generate preresgistration report after pre-registration @param: request - contains metadata about the requested page @variables: sem - get current semester from current time now - get current time year - getcurrent year batch - gets the batch from form sem - stores the next semester obj - All the registration details appended into one data - Formated data for context m - counter for Sl. No (in formated data) z - temporary array to add data to variable data k -temporary array to add data to formatted array/variable output - io Bytes object to write to xlsx file book - workbook of xlsx file title - formatting variable of title the workbook subtitle - formatting variable of subtitle the workbook normaltext - formatting variable for normal text sheet - xlsx sheet to be rendered titletext - formatting variable of title text dep - temporary variables z - temporary variables for final output b - temporary variables for final output c - temporary variables for final output st - temporary variables for final output To add details of new upcoming students in the database.User must be logged in and must be acadadmin @param: request - contains metadata about the requested page. @variables: profiles - gets the excel file having data excel - excel file sheet - sheet no in excel file roll_no - details of student from file first_name - details of student from file last_name - details of student from file email - details of student from file sex - details of student from file title - details of student from file dob - details of student from file fathers_name - details of student from file mothers_name - details of student from file category - details of student from file phone_no - details of student from file address - details of student from file department - details of student from file specialization - details of student from file hall_no - details of student from file programme - details of student from file batch - details of student from file user - new user created in database einfo - new extrainfo object created in database stud_data - new student object created in database desig - get designation object of student holds_desig - get hold_desig object of student currs - get curriculum details reg - create registeration object in registeration table to get faculty list from database @param: request - contains metadata about the requested page. @variables: f1,f2,f3 - temporary varibles faculty - details of faculty of data faculty_list - list of faculty to float courses for the next sem and store data in databsae. User must be logged in and must be acadadmin @param: request - contains metadata about the requested page. @variables: request_batch - Batch from form request_branch - Branch from form request_programme - Programme from form request_sem - Semester from form #Curriculum.objects.all() to float courses for the next sem and store data in databsae. User must be logged in and must be acadadmin @param: request - contains metadata about the requested page. @variables: request_batch - Batch from form request_branch - Branch from form request_programme - Programme from form request_sem - Semester from form # # ---------------------senator------------------ # @csrf_exempt # """ # to add a new student senator # @param: # request - contains metadata about the requested page # @variables: # current_user - gets the data of current user. # user_details - gets the details of the required user. # desig_id - used to check the designation ID. # extraInfo - extraInfo object of the student with that rollno # s - designation object of senator # hDes - holdsDesignation object to store that the particualr student is holding the senator designation # student - the student object of the new senator # data - data of the student to be displayed in teh webpage # """ # current_user = get_object_or_404(User, username=request.user.username) # user_details = ExtraInfo.objects.all().filter(user=current_user).first() # desig_id = Designation.objects.all().filter(name='Upper Division Clerk') #print (temp) # print (current_user) # acadadmin = temp.working # k = str(user_details).split() # print(k) # final_user = k[2] # if (str(acadadmin) != str(final_user)): # return HttpResponseRedirect('/academic-procedures/') # if request.method == 'POST': # print(request.POST, ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>") # rollno = request.POST.getlist('Roll Number')[0] # # print(request.POST.get('rollno')) # extraInfo = ExtraInfo.objects.get(id=rollno) # s = Designation.objects.get(name='Senator') # hDes = HoldsDesignation() # hDes.user = extraInfo.user # hDes.working = extraInfo.user # hDes.designation = s # hDes.save() # student = Student.objects.get(id=extraInfo) # data = { # 'name': extraInfo.user.username, # 'rollno': extraInfo.id, # 'programme': student.programme, # 'branch': extraInfo.department.name # } # return HttpResponseRedirect('/aims/') # # return JsonResponse(data) # else: # return HttpResponseRedirect('/aims/') # @csrf_exempt # """ # to remove a senator from the position # @param: # request - contains metadata about the requested page # @variables: # s - the designation object that contains senator # student - the list students that is a senator # hDes - the holdDesignation object that stores the # information that the particular student is a senator # """ # if request.POST: # s = get_object_or_404(Designation, name="Senator") # student = get_object_or_404(ExtraInfo, id=request.POST.getlist("senate_id")[0]) # hDes = get_object_or_404( HoldsDesignation, user = student.user) # hDes.delete() # return HttpResponseRedirect('/aims/') # else: # return HttpResponseRedirect('/aims/')# #################################################### # # ##########covenors and coconvenors################## # @csrf_exempt # """ # to add a new student convenor/coconvenor # @param: # request - contains metadata about the requested page # @variables: # rollno - rollno of the student to become the convenor/coconvenor # extraInfo - extraInfo object of the student with that rollno # s - designation object of Convenor # p - designation object of Co Convenor # result - the data that contains where the student will become # convenor or coconvenor # hDes - holdsDesignation object to store that the particualr student is # holding the convenor/coconvenor designation # student - the student object of the new convenor/coconvenor # data - data of the student to be displayed in the webpage # """ # p = Designation.objects.get(name='Co Convenor') # if request.method == 'POST': # rollno = request.POST.get('rollno_convenor') # extraInfo = ExtraInfo.objects.get(id=rollno) # s = Designation.objects.get(name='Convenor') # p = Designation.objects.get(name='Co Convenor') # result = request.POST.get('designation') # hDes = HoldsDesignation() # hDes.user = extraInfo.user # hDes.working = extraInfo.user # if result == "Convenor": # hDes.designation = s # else: # hDes.designation = p # hDes.save() # data = { # 'name': extraInfo.user.username, # 'rollno_convenor': extraInfo.id, # 'designation': hDes.designation.name, # } # return JsonResponse(data) # else: # data = {} # return JsonResponse(data) # @csrf_exempt # """ # to remove a convenor/coconvenor from the position # @param: # request - contains metadata about the requested page # pk - the primary key of that particular student field # @variables: # s - the designation object that contains convenor # c - the designation object that contains co convenor # student - the student object with the given pk # hDes - the holdDesignation object that stores the # information that the particular student is a convenor/coconvenor to be deleted # data - data of the student to be hidden in the webpage # """ # s = get_object_or_404(Designation, name="Convenor") # student = get_object_or_404(ExtraInfo, id=pk) # hDes = HoldsDesignation.objects.filter(user = student.user) # designation = [] # for des in hDes: # if des.designation == s or des.designation == c: # designation = des.designation.name # des.delete() # data = { # 'id': pk, # 'designation': designation, # } # return JsonResponse(data)# ###################################################### # # ##########Senate meeting Minute################## # @csrf_exempt # """ # to add a new senate meeting minute object to the database. # @param: # request - contains metadata about the requested page # @variables: # current_user - details of the current user. # desig_id - to check the designation of the user. # user_details - to get the details of the required user. # """ # current_user = get_object_or_404(User, username=request.user.username) # user_details = ExtraInfo.objects.all().filter(user=current_user).first() # desig_id = Designation.objects.all().filter(name='Upper Division Clerk') # print (temp) # print (current_user) # acadadmin = temp.working # k = str(user_details).split() # print(k) # final_user = k[2] # if (str(acadadmin) != str(final_user)): # return HttpResponseRedirect('/academic-procedures/') # if request.method == 'POST' and request.FILES: # form = MinuteForm(request.POST, request.FILES) # if form.is_valid(): # form.save() # return HttpResponse('sucess') # else: # return HttpResponse('not uploaded') # return render(request, "ais/ais.html", {}) # """ # to delete an existing senate meeting minute object from the database. # @param: # request - contains metadata about the requested page # @variables: # data - the id of the minute object to be deleted # t - the minute object received from id to be deleted # """ # if request.method == "POST": # data = request.POST['delete'] # t = Meeting.objects.get(id=data) # t.delete() # # ###################################################### # # ##########Student basic profile################## # @csrf_exempt # """ # It adds the basic profile information like username,password, name, # rollno, etc of a student # @param: # request - contains metadata about the requested page # @variables: # name - the name of the student # roll - the rollno of the student # batch - the current batch of the student # programme - the programme the student is enrolled in # ph - the phone number of the student # """ # roll = ExtraInfo.objects.get(id=request.POST.get('rollno')) # programme = request.POST.get('programme') # batch = request.POST.get('batch') # ph = request.POST.get('phoneno') # if not Student.objects.filter(id=roll).exists(): # db = Student() # st = ExtraInfo.objects.get(id=roll.id) # db.name = name.upper() # db.id = roll # db.batch = batch # db.programme = programme # st.phone_no = ph # db.save() # st.save() # data = { # 'name': name, # 'rollno': roll.id, # 'programme': programme, # 'phoneno': ph, # 'batch': batch # } # print(data) # return JsonResponse(data) # else: # data = {} # return JsonResponse(data) # else: # data = {} # return JsonResponse(data) # @csrf_exempt # """ # Deletes the student from the database # @param: # request - contains metadata about the requested page # pk - the primary key of the student's record in the database table # @variables: # e - the extraInfo objects of the student # user - the User object of the student # s - the student object of the student # """ # user = get_object_or_404(User, username = e.user.username) # s = get_object_or_404(Student, id=e) # data = { # 'rollno': pk, # } # s.delete() # e.delete() # u.delete() # return JsonResponse(data)# ######################################################### # ''' # # view to add attendance data to database # def curriculum(request): # ''' # """ # to delete the advance information of the student # @param: # request - contains metadata about the requested page # @variables: # current_user - the username of the logged in user # user_details - the details of the current user # desig_id - checking the designation of the current user # acadadmin - deatils of the acad admin # s - the student object from the requested rollno # """ # user_details = ExtraInfo.objects.all().filter(user=current_user).first() # desig_id = Designation.objects.all().filter(name='Upper Division Clerk') # temp = HoldsDesignation.objects.all().filter(designation = desig_id).first() # print (temp) # print (current_user) # acadadmin = temp.working # k = str(user_details).split() # print(k) # final_user = k[2] # if (str(acadadmin) != str(final_user)): # return HttpResponseRedirect('/academic-procedures/') # if request.method == "POST": # st = request.POST['delete'] # arr = st.split("-") # stu = arr[0] # if Student.objects.get(id=stu): # s = Student.objects.get(id=stu) # s.father_name = "" # s.mother_name = "" # s.hall_no = 1 # s.room_no = "" # s.save() # else: # return HttpResponse("Data Does Not Exist") # return HttpResponse("Data Deleted Successfully") # """ # It adds the advance profile information like hall no, room no, # profile picture, about me etc of a student # @param: # request - contains metadata about the requested page # @variables: # current_user - the username of the logged in user # user_details - the details of the current user # desig_id - checking the designation of the current user # acadadmin - deatils of the acad admin # father - father's name of the student # rollno - the rollno of the student required to check if the student is available # mother - mother's name of the student # add - student's address # cpi - student's cpi # hall - hall no of where the student stays # room no - hostel room no # """ # user_details = ExtraInfo.objects.all().filter(user=current_user).first() # desig_id = Designation.objects.all().filter(name='Upper Division Clerk') # temp = HoldsDesignation.objects.all().filter(designation = desig_id).first() # print (temp) # print (current_user) # acadadmin = temp.working # k = str(user_details).split() # print(k) # final_user = k[2] # if (str(acadadmin) != str(final_user)): # return HttpResponseRedirect('/academic-procedures/') # if request.method == "POST": # print(request.POST) # rollno=request.POST.get('roll') # print(rollno) # student = ExtraInfo.objects.get(id=rollno) # print(student.address) # if not student: # data = {} # return JsonResponse(data) # else: # father = request.POST.get('father') # mother = request.POST.get('mother') # add = request.POST.get('address') # hall = request.POST.get('hall') # room = request.POST.get('room') # cpi = request.POST.get('cpi') # student.address = str(hall) + " " + str(room) # student.save() # s = Student.objects.get(id=student) # s.father_name=father # s.mother_name=mother # s.hall_no = hall # s.room_no = room # s.save() # return HttpResponseRedirect('/academic-procedures/') # return HttpResponseRedirect('/academic-procedures/') # """ # acadmic admin to update the additional courses # @param: # request - contains metadata about the requested page. # @variables: # choices - selected addtional courses by the academic person. # course - Course details which is selected by the academic admin. # """ # print(request.POST) # choices = request.POST.getlist('choice') # for i in choices: # course = Course.objects.all().filter(course_id=i).first() # course.acad_selection = True # course.save() # courses = Course.objects.all() # for i in courses: # if i.course_id not in choices: # i.acad_selection = False # i.save() # return HttpResponseRedirect('/academic-procedures/') # """ # to set minimum credit for a current semester that a student must take # @param: # request - contains metadata about the requested page. # @variables: # sem_cred = Get credit details from forms and the append it to an array. # sem - Get the object for the minimum credits from the database and the update it. # """ # sem_cred.append(0) # for i in range(1, 10): # sem = "sem_"+"1" # sem_cred.append(request.POST.getlist(sem)[0]) # for i in range(1, 9): # sem = MinimumCredits.objects.all().filter(semester=i).first() # sem.credits = sem_cred[i+1] # sem.save() # return HttpResponse("Worked") # if request.method == "POST": # programme=request.POST['programme'] # batch=request.POST['batch'] # branch=request.POST['branch'] # sem=request.POST['sem'] # curriculum_courses = Curriculum.objects.filter(branch = branch).filter(batch = batch).filter(programme= programme).filter(sem = sem) # print(curriculum_courses) # courses = Course.objects.all() # course_type = Constants.COURSE_TYPE # context= { # 'courses': courses, # 'course_type': course_type, # 'curriculum_course': curriculum_courses, # } # return render(request, "ais/ais.html", context) # else: # return render(request, "ais/ais.html") # """ # It deletes the grade of the student # @param: # request - contains metadata about the requested page # @variables: # current_user - father's name of the student # user_details - the rollno of the student required to check if the student is available # desig_id - mother 's name of the student # acadadmin - student's address # final_user - details of the user # sem - current semester of the student # data - tag whether to delete it or not # course - get the course details # """ # current_user = get_object_or_404(User, username=request.user.username) # user_details = ExtraInfo.objects.all().filter(user=current_user).first() # desig_id = Designation.objects.all().filter(name='Upper Division Clerk') # temp = HoldsDesignation.objects.all().filter(designation = desig_id).first() # print (temp) # print (current_user) # acadadmin = temp.working # k = str(user_details).split() # print(k) # final_user = k[2] # if (str(acadadmin) != str(final_user)): # return HttpResponseRedirect('/academic-procedures/') # print(request.POST['delete']) # data = request.POST['delete'] # d = data.split("-") # id = d[0] # course = d[2] # sem = int(d[3]) # if request.method == "POST": # if(Grades.objects.filter(student_id=id, sem=sem)): # s = Grades.objects.filter(student_id=id, sem=sem) # for p in s: # if (str(p.course_id) == course): # print(p.course_id) # p.delete() # else: # return HttpResponse("Unable to delete data") It verify the grades of the student @param: request - contains metadata about the requested page @variables: current_user - father's name of the student user_details - the rollno of the student required to check if the student is available desig_id - mother's name of the student acadadmin - student's address subject - subject of which the grade has to be added sem - semester of the student grade - grade to be added in the student course - course ofwhich the grade is added # if user_check(request): # return HttpResponseRedirect('/academic-procedures/') # if request.method == "POST": # curr_id=request.POST['course'] # print(curr_id) # curr_course = Curriculum.objects.filter(curriculum_id=curr_id) # grades = Grades.objects.filter(curriculum_id=curr_course) # context= { # 'grades': grades, # 'tab_id' :"2" # } # return render(request,"ais/ais.html", context) # else: # return HttpResponseRedirect('/aims/') # if user_check(request): # return HttpResponseRedirect('/academic-procedures/') # if request.method == "POST": # print("confirm hone wala hai") # print(request.POST) | 2.037758 | 2 |
subject/tests/functional/test_glance_replicator.py | laoyigrace/subject | 0 | 210 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Functional test cases for subject-replicator"""
import sys
from subject.tests import functional
from subject.tests.utils import execute
class TestGlanceReplicator(functional.FunctionalTest):
"""Functional tests for subject-replicator"""
def test_compare(self):
# Test for issue: https://bugs.launchpad.net/glance/+bug/1598928
cmd = ('%s -m subject.cmd.replicator '
'compare az1:9292 az2:9292 --debug' %
(sys.executable,))
exitcode, out, err = execute(cmd, raise_error=False)
self.assertIn(
'Request: GET http://az1:9292/v1/subjects/detail?is_public=None',
err
)
| # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Functional test cases for subject-replicator"""
import sys
from subject.tests import functional
from subject.tests.utils import execute
class TestGlanceReplicator(functional.FunctionalTest):
"""Functional tests for subject-replicator"""
def test_compare(self):
# Test for issue: https://bugs.launchpad.net/glance/+bug/1598928
cmd = ('%s -m subject.cmd.replicator '
'compare az1:9292 az2:9292 --debug' %
(sys.executable,))
exitcode, out, err = execute(cmd, raise_error=False)
self.assertIn(
'Request: GET http://az1:9292/v1/subjects/detail?is_public=None',
err
)
| en | 0.825754 | # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. Functional test cases for subject-replicator Functional tests for subject-replicator # Test for issue: https://bugs.launchpad.net/glance/+bug/1598928 | 1.981368 | 2 |
test/countries/test_united_states.py | OmoMicheal/marketanalysis | 2 | 211 | <reponame>OmoMicheal/marketanalysis<gh_stars>1-10
# -*- coding: utf-8 -*-
# marketanalysis
# ----------------
# A fast, efficient Python library for generating country, province and state
# specific sets of marketmarketholidayss on the fly. It aims to make determining whether a
# specific date is a holiday as fast and flexible as possible.
#
# Author: MichealOmojola <<EMAIL>>
# Website: https://github.com/OmoMicheal/trading_days
# License: MIT (see LICENSE file)
# Version: 0.1 (April 7, 2021)
import unittest
from datetime import date
from dateutil.relativedelta import relativedelta
# import sys
# sys.path.insert(0, 'C:/Users/momojola/projects/marketanalysis/marketanalysis/')
from marketanalysis import marketholidays
from marketanalysis import markettradingdays
class TestUS(unittest.TestCase):
def setUp(self):
self.marketholidayss = marketholidays.USA(observed=False)
self.markettradingdayss = markettradingdays.USA()
def test_new_years(self):
self.assertNotIn(date(2010, 12, 31), self.marketholidayss)
self.assertNotIn(date(2017, 1, 2), self.marketholidayss)
self.marketholidayss.observed = True
self.assertIn(date(2010, 12, 31), self.marketholidayss)
self.assertIn(date(2017, 1, 2), self.marketholidayss)
self.marketholidayss.observed = False
for year in range(1900, 2100):
dt = date(year, 1, 1)
self.assertIn(dt, self.marketholidayss)
self.assertNotIn(dt + relativedelta(days=-1), self.marketholidayss)
self.assertNotIn(dt + relativedelta(days=+1), self.marketholidayss)
def test_martin_luther(self):
for dt in [
date(1986, 1, 20),
date(1999, 1, 18),
date(2000, 1, 17),
date(2012, 1, 16),
date(2013, 1, 21),
date(2014, 1, 20),
date(2015, 1, 19),
date(2016, 1, 18),
date(2020, 1, 20),
]:
self.assertIn(dt, self.marketholidayss)
self.assertNotIn(dt + relativedelta(days=-1), self.marketholidayss)
self.assertNotIn(dt + relativedelta(days=+1), self.marketholidayss)
def test_washingtons_birthday(self):
de_marketholidayss = marketholidays.US()
for dt in [
date(1969, 2, 22),
date(1970, 2, 22),
date(1971, 2, 15),
date(1997, 2, 17),
date(1999, 2, 15),
date(2000, 2, 21),
date(2012, 2, 20),
date(2013, 2, 18),
date(2014, 2, 17),
date(2015, 2, 16),
date(2016, 2, 15),
date(2020, 2, 17),
]:
self.assertIn(dt, self.marketholidayss)
self.assertNotIn(dt + relativedelta(days=-1), self.marketholidayss)
self.assertNotIn(dt + relativedelta(days=+1), self.marketholidayss)
self.assertIn(dt, de_marketholidayss)
self.assertEqual(marketholidays.US().get("2015-02-16"), "Presidents' Day")
def test_good_friday(self):
marketholidayss_US = marketholidays.US()
for dt in [
date(1900, 4, 13),
date(1901, 4, 5),
date(1902, 3, 28),
date(1999, 4, 2),
date(2000, 4, 21),
date(2010, 4, 2),
date(2018, 3, 30),
date(2019, 4, 19),
date(2020, 4, 10),
]:
self.assertIn(dt, self.marketholidayss)
self.assertIn(dt, marketholidayss_US)
def test_memorial_day(self):
for dt in [
date(1969, 5, 30),
date(1970, 5, 30),
date(1971, 5, 31),
date(1997, 5, 26),
date(1999, 5, 31),
date(2000, 5, 29),
date(2012, 5, 28),
date(2013, 5, 27),
date(2014, 5, 26),
date(2015, 5, 25),
date(2016, 5, 30),
date(2020, 5, 25),
]:
self.assertIn(dt, self.marketholidayss)
self.assertNotIn(dt + relativedelta(days=-1), self.marketholidayss)
self.assertNotIn(dt + relativedelta(days=+1), self.marketholidayss)
def test_independence_day(self):
for year in range(1900, 2100):
dt = date(year, 7, 4)
self.assertIn(dt, self.marketholidayss)
self.assertNotIn(dt + relativedelta(days=-1), self.marketholidayss)
self.assertNotIn(dt + relativedelta(days=+1), self.marketholidayss)
self.assertNotIn(date(2010, 7, 5), self.marketholidayss)
self.assertNotIn(date(2020, 7, 3), self.marketholidayss)
self.marketholidayss.observed = True
self.assertIn(date(2010, 7, 5), self.marketholidayss)
self.assertIn(date(2020, 7, 3), self.marketholidayss)
def test_labor_day(self):
for dt in [
date(1997, 9, 1),
date(1999, 9, 6),
date(2000, 9, 4),
date(2012, 9, 3),
date(2013, 9, 2),
date(2014, 9, 1),
date(2015, 9, 7),
date(2016, 9, 5),
date(2020, 9, 7),
]:
self.assertIn(dt, self.marketholidayss)
self.assertNotIn(dt + relativedelta(days=-1), self.marketholidayss)
self.assertNotIn(dt + relativedelta(days=+1), self.marketholidayss)
def test_thanksgiving_day(self):
for dt in [
date(1997, 11, 27),
date(1999, 11, 25),
date(2000, 11, 23),
date(2012, 11, 22),
date(2013, 11, 28),
date(2014, 11, 27),
date(2015, 11, 26),
date(2016, 11, 24),
date(2020, 11, 26),
]:
self.assertNotIn(dt, self.marketholidayss)
self.assertNotIn(dt + relativedelta(days=-1), self.marketholidayss)
self.assertNotIn(dt + relativedelta(days=+1), self.marketholidayss)
def test_christmas_eve(self):
as_marketholidayss = marketholidays.US()
self.marketholidayss.observed = False
for year in range(1900, 2050):
self.assertNotIn(date(year, 12, 24), self.marketholidayss)
# self.assertIn(date(year, 12, 24), as_marketholidayss)
self.assertNotIn(date(2016, 12, 23), as_marketholidayss)
self.assertNotIn(
"Christmas Eve (Observed)",
as_marketholidayss.get_list(date(2017, 12, 22)),
)
def test_christmas_day(self):
for year in range(1900, 2100):
dt = date(year, 12, 25)
self.assertIn(dt, self.marketholidayss)
self.assertNotIn(dt + relativedelta(days=-1), self.marketholidayss)
self.assertNotIn(dt + relativedelta(days=+1), self.marketholidayss)
self.assertNotIn(date(2010, 12, 24), self.marketholidayss)
self.assertNotIn(date(2016, 12, 26), self.marketholidayss)
self.marketholidayss.observed = True
self.assertIn(date(2010, 12, 24), self.marketholidayss)
self.assertIn(date(2016, 12, 26), self.marketholidayss)
def test_day_after_christmas(self):
nc_marketholidayss = marketholidays.US(observed=False)
self.assertNotIn(date(2015, 12, 28), nc_marketholidayss)
self.assertNotIn(date(2016, 12, 27), nc_marketholidayss)
nc_marketholidayss.observed = True
def test_new_years_eve(self):
ky_marketholidayss = marketholidays.US()
self.assertNotIn(date(2012, 12, 31), ky_marketholidayss)
for dt in [date(2013, 12, 31), date(2016, 12, 30)]:
self.assertNotIn(dt, self.marketholidayss)
self.assertNotIn(dt, ky_marketholidayss)
def test_future_list(self):
current_date = '2021-04-13'
lookup_step = 10
self.assertIn(date(2021, 4, 16), self.markettradingdayss.future_list(current_date, lookup_step))
self.assertNotIn(date(2021, 4, 18), self.markettradingdayss.future_list(current_date, lookup_step))
def test_prevDays(self):
current_date = '2021-04-13'
lookback_step = 4
self.assertIn(date(2021, 4, 9), self.markettradingdayss.prevDays(current_date, lookback_step))
self.assertNotIn(date(2021, 4, 11), self.markettradingdayss.prevDays(current_date, lookback_step))
def test_BtwDates(self):
current_date = '2021-04-13'
future_date = '2021-04-20'
self.assertIn(date(2021, 4, 15), self.markettradingdayss.BtwDates(current_date, future_date))
self.assertNotIn(date(2021, 4, 18), self.markettradingdayss.BtwDates(current_date, future_date))
# if __name__ == "__main__":
# unittest.main() | # -*- coding: utf-8 -*-
# marketanalysis
# ----------------
# A fast, efficient Python library for generating country, province and state
# specific sets of marketmarketholidayss on the fly. It aims to make determining whether a
# specific date is a holiday as fast and flexible as possible.
#
# Author: MichealOmojola <<EMAIL>>
# Website: https://github.com/OmoMicheal/trading_days
# License: MIT (see LICENSE file)
# Version: 0.1 (April 7, 2021)
import unittest
from datetime import date
from dateutil.relativedelta import relativedelta
# import sys
# sys.path.insert(0, 'C:/Users/momojola/projects/marketanalysis/marketanalysis/')
from marketanalysis import marketholidays
from marketanalysis import markettradingdays
class TestUS(unittest.TestCase):
def setUp(self):
self.marketholidayss = marketholidays.USA(observed=False)
self.markettradingdayss = markettradingdays.USA()
def test_new_years(self):
self.assertNotIn(date(2010, 12, 31), self.marketholidayss)
self.assertNotIn(date(2017, 1, 2), self.marketholidayss)
self.marketholidayss.observed = True
self.assertIn(date(2010, 12, 31), self.marketholidayss)
self.assertIn(date(2017, 1, 2), self.marketholidayss)
self.marketholidayss.observed = False
for year in range(1900, 2100):
dt = date(year, 1, 1)
self.assertIn(dt, self.marketholidayss)
self.assertNotIn(dt + relativedelta(days=-1), self.marketholidayss)
self.assertNotIn(dt + relativedelta(days=+1), self.marketholidayss)
def test_martin_luther(self):
for dt in [
date(1986, 1, 20),
date(1999, 1, 18),
date(2000, 1, 17),
date(2012, 1, 16),
date(2013, 1, 21),
date(2014, 1, 20),
date(2015, 1, 19),
date(2016, 1, 18),
date(2020, 1, 20),
]:
self.assertIn(dt, self.marketholidayss)
self.assertNotIn(dt + relativedelta(days=-1), self.marketholidayss)
self.assertNotIn(dt + relativedelta(days=+1), self.marketholidayss)
def test_washingtons_birthday(self):
de_marketholidayss = marketholidays.US()
for dt in [
date(1969, 2, 22),
date(1970, 2, 22),
date(1971, 2, 15),
date(1997, 2, 17),
date(1999, 2, 15),
date(2000, 2, 21),
date(2012, 2, 20),
date(2013, 2, 18),
date(2014, 2, 17),
date(2015, 2, 16),
date(2016, 2, 15),
date(2020, 2, 17),
]:
self.assertIn(dt, self.marketholidayss)
self.assertNotIn(dt + relativedelta(days=-1), self.marketholidayss)
self.assertNotIn(dt + relativedelta(days=+1), self.marketholidayss)
self.assertIn(dt, de_marketholidayss)
self.assertEqual(marketholidays.US().get("2015-02-16"), "Presidents' Day")
def test_good_friday(self):
marketholidayss_US = marketholidays.US()
for dt in [
date(1900, 4, 13),
date(1901, 4, 5),
date(1902, 3, 28),
date(1999, 4, 2),
date(2000, 4, 21),
date(2010, 4, 2),
date(2018, 3, 30),
date(2019, 4, 19),
date(2020, 4, 10),
]:
self.assertIn(dt, self.marketholidayss)
self.assertIn(dt, marketholidayss_US)
def test_memorial_day(self):
for dt in [
date(1969, 5, 30),
date(1970, 5, 30),
date(1971, 5, 31),
date(1997, 5, 26),
date(1999, 5, 31),
date(2000, 5, 29),
date(2012, 5, 28),
date(2013, 5, 27),
date(2014, 5, 26),
date(2015, 5, 25),
date(2016, 5, 30),
date(2020, 5, 25),
]:
self.assertIn(dt, self.marketholidayss)
self.assertNotIn(dt + relativedelta(days=-1), self.marketholidayss)
self.assertNotIn(dt + relativedelta(days=+1), self.marketholidayss)
def test_independence_day(self):
for year in range(1900, 2100):
dt = date(year, 7, 4)
self.assertIn(dt, self.marketholidayss)
self.assertNotIn(dt + relativedelta(days=-1), self.marketholidayss)
self.assertNotIn(dt + relativedelta(days=+1), self.marketholidayss)
self.assertNotIn(date(2010, 7, 5), self.marketholidayss)
self.assertNotIn(date(2020, 7, 3), self.marketholidayss)
self.marketholidayss.observed = True
self.assertIn(date(2010, 7, 5), self.marketholidayss)
self.assertIn(date(2020, 7, 3), self.marketholidayss)
def test_labor_day(self):
for dt in [
date(1997, 9, 1),
date(1999, 9, 6),
date(2000, 9, 4),
date(2012, 9, 3),
date(2013, 9, 2),
date(2014, 9, 1),
date(2015, 9, 7),
date(2016, 9, 5),
date(2020, 9, 7),
]:
self.assertIn(dt, self.marketholidayss)
self.assertNotIn(dt + relativedelta(days=-1), self.marketholidayss)
self.assertNotIn(dt + relativedelta(days=+1), self.marketholidayss)
def test_thanksgiving_day(self):
for dt in [
date(1997, 11, 27),
date(1999, 11, 25),
date(2000, 11, 23),
date(2012, 11, 22),
date(2013, 11, 28),
date(2014, 11, 27),
date(2015, 11, 26),
date(2016, 11, 24),
date(2020, 11, 26),
]:
self.assertNotIn(dt, self.marketholidayss)
self.assertNotIn(dt + relativedelta(days=-1), self.marketholidayss)
self.assertNotIn(dt + relativedelta(days=+1), self.marketholidayss)
def test_christmas_eve(self):
as_marketholidayss = marketholidays.US()
self.marketholidayss.observed = False
for year in range(1900, 2050):
self.assertNotIn(date(year, 12, 24), self.marketholidayss)
# self.assertIn(date(year, 12, 24), as_marketholidayss)
self.assertNotIn(date(2016, 12, 23), as_marketholidayss)
self.assertNotIn(
"Christmas Eve (Observed)",
as_marketholidayss.get_list(date(2017, 12, 22)),
)
def test_christmas_day(self):
for year in range(1900, 2100):
dt = date(year, 12, 25)
self.assertIn(dt, self.marketholidayss)
self.assertNotIn(dt + relativedelta(days=-1), self.marketholidayss)
self.assertNotIn(dt + relativedelta(days=+1), self.marketholidayss)
self.assertNotIn(date(2010, 12, 24), self.marketholidayss)
self.assertNotIn(date(2016, 12, 26), self.marketholidayss)
self.marketholidayss.observed = True
self.assertIn(date(2010, 12, 24), self.marketholidayss)
self.assertIn(date(2016, 12, 26), self.marketholidayss)
def test_day_after_christmas(self):
nc_marketholidayss = marketholidays.US(observed=False)
self.assertNotIn(date(2015, 12, 28), nc_marketholidayss)
self.assertNotIn(date(2016, 12, 27), nc_marketholidayss)
nc_marketholidayss.observed = True
def test_new_years_eve(self):
ky_marketholidayss = marketholidays.US()
self.assertNotIn(date(2012, 12, 31), ky_marketholidayss)
for dt in [date(2013, 12, 31), date(2016, 12, 30)]:
self.assertNotIn(dt, self.marketholidayss)
self.assertNotIn(dt, ky_marketholidayss)
def test_future_list(self):
current_date = '2021-04-13'
lookup_step = 10
self.assertIn(date(2021, 4, 16), self.markettradingdayss.future_list(current_date, lookup_step))
self.assertNotIn(date(2021, 4, 18), self.markettradingdayss.future_list(current_date, lookup_step))
def test_prevDays(self):
current_date = '2021-04-13'
lookback_step = 4
self.assertIn(date(2021, 4, 9), self.markettradingdayss.prevDays(current_date, lookback_step))
self.assertNotIn(date(2021, 4, 11), self.markettradingdayss.prevDays(current_date, lookback_step))
def test_BtwDates(self):
current_date = '2021-04-13'
future_date = '2021-04-20'
self.assertIn(date(2021, 4, 15), self.markettradingdayss.BtwDates(current_date, future_date))
self.assertNotIn(date(2021, 4, 18), self.markettradingdayss.BtwDates(current_date, future_date))
# if __name__ == "__main__":
# unittest.main() | en | 0.714732 | # -*- coding: utf-8 -*- # marketanalysis # ---------------- # A fast, efficient Python library for generating country, province and state # specific sets of marketmarketholidayss on the fly. It aims to make determining whether a # specific date is a holiday as fast and flexible as possible. # # Author: MichealOmojola <<EMAIL>> # Website: https://github.com/OmoMicheal/trading_days # License: MIT (see LICENSE file) # Version: 0.1 (April 7, 2021) # import sys # sys.path.insert(0, 'C:/Users/momojola/projects/marketanalysis/marketanalysis/') # self.assertIn(date(year, 12, 24), as_marketholidayss) # if __name__ == "__main__": # unittest.main() | 2.610884 | 3 |
src/ros_comm/rosmsg/setup.py | jungleni/ros_code_reading | 742 | 212 | <reponame>jungleni/ros_code_reading
#!/usr/bin/env python
from distutils.core import setup
from catkin_pkg.python_setup import generate_distutils_setup
d = generate_distutils_setup(
packages=['rosmsg'],
package_dir={'': 'src'},
scripts=['scripts/rosmsg', 'scripts/rosmsg-proto', 'scripts/rossrv'],
requires=['genmsg', 'rosbag', 'roslib', 'rospkg']
)
setup(**d)
| #!/usr/bin/env python
from distutils.core import setup
from catkin_pkg.python_setup import generate_distutils_setup
d = generate_distutils_setup(
packages=['rosmsg'],
package_dir={'': 'src'},
scripts=['scripts/rosmsg', 'scripts/rosmsg-proto', 'scripts/rossrv'],
requires=['genmsg', 'rosbag', 'roslib', 'rospkg']
)
setup(**d) | ru | 0.26433 | #!/usr/bin/env python | 1.327455 | 1 |
unsorted/linked_list.py | AlgoArt/algoart | 1 | 213 | <filename>unsorted/linked_list.py
#!/usr/bin/env python
# linked_list.py - Linked list implementation in Python by Sergey 2015
"""
Linked list implementation in Python
"""
# Standard modules
import unittest
import sys
import os
import argparse
import re
import random
import subprocess
import getpass
import shutil
# Additional modules
###############################################################################
# Linked_list Class
###############################################################################
class Node:
def __init__(self, value, tail):
self.value = value
self.next = tail
class Linked_list:
""" Linked_list representation """
def __init__(self):
""" Default constructor """
self.list = None
def insert(self, value):
self.list = Node(value, self.list)
def start_iter(self):
return self.list
def next_iter(self, iter):
if iter is not None:
return iter.next
else:
return iter
def tolist(self):
result = []
iter = self.start_iter()
while True:
result.append(iter.value)
iter = self.next_iter(iter)
if not iter:
break
return result
def run(self, test=False):
""" Main execution function """
if test:
return
###############################################################################
# Executable code
###############################################################################
def main():
# Sandbox
sb = Linked_list(" ".join(sys.argv[1:]))
sb.run()
###############################################################################
# Unit Tests
###############################################################################
class unitTests(unittest.TestCase):
def test_Linked_list_class__basic_functionality(self):
""" Linked_list class basic testing """
d = Linked_list()
self.assertEqual(d.list, None)
d.insert(1)
self.assertEqual(d.list.value, 1)
d.insert(2)
self.assertEqual(d.list.next.value, 1)
iter = d.start_iter()
self.assertEqual(iter.value, 2)
iter = d.next_iter(iter)
self.assertEqual(iter.value, 1)
self.assertEqual(d.tolist(), [2, 1])
if __name__ == "__main__":
if sys.argv[-1] == "-ut":
unittest.main(argv=[" "])
main()
| <filename>unsorted/linked_list.py
#!/usr/bin/env python
# linked_list.py - Linked list implementation in Python by Sergey 2015
"""
Linked list implementation in Python
"""
# Standard modules
import unittest
import sys
import os
import argparse
import re
import random
import subprocess
import getpass
import shutil
# Additional modules
###############################################################################
# Linked_list Class
###############################################################################
class Node:
def __init__(self, value, tail):
self.value = value
self.next = tail
class Linked_list:
""" Linked_list representation """
def __init__(self):
""" Default constructor """
self.list = None
def insert(self, value):
self.list = Node(value, self.list)
def start_iter(self):
return self.list
def next_iter(self, iter):
if iter is not None:
return iter.next
else:
return iter
def tolist(self):
result = []
iter = self.start_iter()
while True:
result.append(iter.value)
iter = self.next_iter(iter)
if not iter:
break
return result
def run(self, test=False):
""" Main execution function """
if test:
return
###############################################################################
# Executable code
###############################################################################
def main():
# Sandbox
sb = Linked_list(" ".join(sys.argv[1:]))
sb.run()
###############################################################################
# Unit Tests
###############################################################################
class unitTests(unittest.TestCase):
def test_Linked_list_class__basic_functionality(self):
""" Linked_list class basic testing """
d = Linked_list()
self.assertEqual(d.list, None)
d.insert(1)
self.assertEqual(d.list.value, 1)
d.insert(2)
self.assertEqual(d.list.next.value, 1)
iter = d.start_iter()
self.assertEqual(iter.value, 2)
iter = d.next_iter(iter)
self.assertEqual(iter.value, 1)
self.assertEqual(d.tolist(), [2, 1])
if __name__ == "__main__":
if sys.argv[-1] == "-ut":
unittest.main(argv=[" "])
main()
| de | 0.624338 | #!/usr/bin/env python # linked_list.py - Linked list implementation in Python by Sergey 2015 Linked list implementation in Python # Standard modules # Additional modules ############################################################################### # Linked_list Class ############################################################################### Linked_list representation Default constructor Main execution function ############################################################################### # Executable code ############################################################################### # Sandbox ############################################################################### # Unit Tests ############################################################################### Linked_list class basic testing | 3.956057 | 4 |
examples/seismic/viscoacoustic/wavesolver.py | speglich/devito | 1 | 214 | from devito import VectorTimeFunction, TimeFunction, NODE
from devito.tools import memoized_meth
from examples.seismic import PointSource
from examples.seismic.viscoacoustic.operators import (ForwardOperator, AdjointOperator)
class ViscoacousticWaveSolver(object):
"""
Solver object that provides operators for seismic inversion problems
and encapsulates the time and space discretization for a given problem
setup.
Parameters
----------
model : Model
Physical model with domain parameters.
geometry : AcquisitionGeometry
Geometry object that contains the source (SparseTimeFunction) and
receivers (SparseTimeFunction) and their position.
space_order : int, optional
Order of the spatial stencil discretisation. Defaults to 4.
kernel : selects a visco-acoustic equation from the options below:
'sls' (Standard Linear Solid) :
1st order - Blanch and Symes (1995) / Dutta and Schuster (2014)
viscoacoustic equation
2nd order - Bai et al. (2014) viscoacoustic equation
'ren' - Ren et al. (2014) viscoacoustic equation
'deng_mcmechan' - Deng and McMechan (2007) viscoacoustic equation
Defaults to 'sls' 2nd order.
"""
def __init__(self, model, geometry, space_order=4, kernel='sls', time_order=2,
**kwargs):
self.model = model
self.model._initialize_bcs(bcs="mask")
self.geometry = geometry
self.space_order = space_order
self.kernel = kernel
self.time_order = time_order
self._kwargs = kwargs
@property
def dt(self):
return self.model.critical_dt
@memoized_meth
def op_fwd(self, save=None):
"""Cached operator for forward runs with buffered wavefield"""
return ForwardOperator(self.model, save=save, geometry=self.geometry,
space_order=self.space_order, kernel=self.kernel,
time_order=self.time_order, **self._kwargs)
@memoized_meth
def op_adj(self):
"""Cached operator for adjoint runs"""
return AdjointOperator(self.model, save=None, geometry=self.geometry,
space_order=self.space_order, kernel=self.kernel,
time_order=self.time_order, **self._kwargs)
def forward(self, src=None, rec=None, v=None, r=None, p=None, qp=None, b=None,
vp=None, save=None, **kwargs):
"""
Forward modelling function that creates the necessary
data objects for running a forward modelling operator.
Parameters
----------
src : SparseTimeFunction or array_like, optional
Time series data for the injected source term.
rec : SparseTimeFunction or array_like, optional
The interpolated receiver data.
v : VectorTimeFunction, optional
The computed particle velocity.
r : TimeFunction, optional
The computed memory variable.
p : TimeFunction, optional
Stores the computed wavefield.
qp : Function, optional
The P-wave quality factor.
b : Function, optional
The time-constant inverse density.
vp : Function or float, optional
The time-constant velocity.
save : bool, optional
Whether or not to save the entire (unrolled) wavefield.
Returns
-------
Receiver, wavefield and performance summary
"""
# Source term is read-only, so re-use the default
src = src or self.geometry.src
# Create a new receiver object to store the result
rec = rec or self.geometry.rec
# Create all the fields v, p, r
save_t = src.nt if save else None
if self.time_order == 1:
v = v or VectorTimeFunction(name="v", grid=self.model.grid, save=save_t,
time_order=self.time_order,
space_order=self.space_order)
kwargs.update({k.name: k for k in v})
# Create the forward wavefield if not provided
p = p or TimeFunction(name="p", grid=self.model.grid, save=save_t,
time_order=self.time_order, space_order=self.space_order,
staggered=NODE)
# Memory variable:
r = r or TimeFunction(name="r", grid=self.model.grid, save=save_t,
time_order=self.time_order, space_order=self.space_order,
staggered=NODE)
# Pick physical parameters from model unless explicitly provided
b = b or self.model.b
qp = qp or self.model.qp
# Pick vp from model unless explicitly provided
vp = vp or self.model.vp
if self.kernel == 'sls':
# Execute operator and return wavefield and receiver data
# With Memory variable
summary = self.op_fwd(save).apply(src=src, rec=rec, qp=qp, r=r,
p=p, b=b, vp=vp,
dt=kwargs.pop('dt', self.dt), **kwargs)
else:
# Execute operator and return wavefield and receiver data
# Without Memory variable
summary = self.op_fwd(save).apply(src=src, rec=rec, qp=qp, p=p,
b=b, vp=vp,
dt=kwargs.pop('dt', self.dt), **kwargs)
return rec, p, v, summary
def adjoint(self, rec, srca=None, va=None, pa=None, vp=None, qp=None, b=None, r=None,
**kwargs):
"""
Adjoint modelling function that creates the necessary
data objects for running an adjoint modelling operator.
Parameters
----------
rec : SparseTimeFunction or array-like
The receiver data. Please note that
these act as the source term in the adjoint run.
srca : SparseTimeFunction or array-like
The resulting data for the interpolated at the
original source location.
va : VectorTimeFunction, optional
The computed particle velocity.
pa : TimeFunction, optional
Stores the computed wavefield.
vp : Function or float, optional
The time-constant velocity.
qp : Function, optional
The P-wave quality factor.
b : Function, optional
The time-constant inverse density.
r : TimeFunction, optional
The computed memory variable.
Returns
-------
Adjoint source, wavefield and performance summary.
"""
# Create a new adjoint source and receiver symbol
srca = srca or PointSource(name='srca', grid=self.model.grid,
time_range=self.geometry.time_axis,
coordinates=self.geometry.src_positions)
if self.time_order == 1:
va = va or VectorTimeFunction(name="va", grid=self.model.grid,
time_order=self.time_order,
space_order=self.space_order)
kwargs.update({k.name: k for k in va})
pa = pa or TimeFunction(name="pa", grid=self.model.grid,
time_order=self.time_order, space_order=self.space_order,
staggered=NODE)
# Memory variable:
r = r or TimeFunction(name="r", grid=self.model.grid, time_order=self.time_order,
space_order=self.space_order, staggered=NODE)
b = b or self.model.b
qp = qp or self.model.qp
# Pick vp from model unless explicitly provided
vp = vp or self.model.vp
# Execute operator and return wavefield and receiver data
if self.kernel == 'sls':
# Execute operator and return wavefield and receiver data
# With Memory variable
summary = self.op_adj().apply(src=srca, rec=rec, pa=pa, r=r, b=b, vp=vp,
qp=qp, dt=kwargs.pop('dt', self.dt), **kwargs)
else:
summary = self.op_adj().apply(src=srca, rec=rec, pa=pa, vp=vp, b=b, qp=qp,
dt=kwargs.pop('dt', self.dt), **kwargs)
return srca, pa, va, summary
| from devito import VectorTimeFunction, TimeFunction, NODE
from devito.tools import memoized_meth
from examples.seismic import PointSource
from examples.seismic.viscoacoustic.operators import (ForwardOperator, AdjointOperator)
class ViscoacousticWaveSolver(object):
"""
Solver object that provides operators for seismic inversion problems
and encapsulates the time and space discretization for a given problem
setup.
Parameters
----------
model : Model
Physical model with domain parameters.
geometry : AcquisitionGeometry
Geometry object that contains the source (SparseTimeFunction) and
receivers (SparseTimeFunction) and their position.
space_order : int, optional
Order of the spatial stencil discretisation. Defaults to 4.
kernel : selects a visco-acoustic equation from the options below:
'sls' (Standard Linear Solid) :
1st order - Blanch and Symes (1995) / Dutta and Schuster (2014)
viscoacoustic equation
2nd order - Bai et al. (2014) viscoacoustic equation
'ren' - Ren et al. (2014) viscoacoustic equation
'deng_mcmechan' - Deng and McMechan (2007) viscoacoustic equation
Defaults to 'sls' 2nd order.
"""
def __init__(self, model, geometry, space_order=4, kernel='sls', time_order=2,
**kwargs):
self.model = model
self.model._initialize_bcs(bcs="mask")
self.geometry = geometry
self.space_order = space_order
self.kernel = kernel
self.time_order = time_order
self._kwargs = kwargs
@property
def dt(self):
return self.model.critical_dt
@memoized_meth
def op_fwd(self, save=None):
"""Cached operator for forward runs with buffered wavefield"""
return ForwardOperator(self.model, save=save, geometry=self.geometry,
space_order=self.space_order, kernel=self.kernel,
time_order=self.time_order, **self._kwargs)
@memoized_meth
def op_adj(self):
"""Cached operator for adjoint runs"""
return AdjointOperator(self.model, save=None, geometry=self.geometry,
space_order=self.space_order, kernel=self.kernel,
time_order=self.time_order, **self._kwargs)
def forward(self, src=None, rec=None, v=None, r=None, p=None, qp=None, b=None,
vp=None, save=None, **kwargs):
"""
Forward modelling function that creates the necessary
data objects for running a forward modelling operator.
Parameters
----------
src : SparseTimeFunction or array_like, optional
Time series data for the injected source term.
rec : SparseTimeFunction or array_like, optional
The interpolated receiver data.
v : VectorTimeFunction, optional
The computed particle velocity.
r : TimeFunction, optional
The computed memory variable.
p : TimeFunction, optional
Stores the computed wavefield.
qp : Function, optional
The P-wave quality factor.
b : Function, optional
The time-constant inverse density.
vp : Function or float, optional
The time-constant velocity.
save : bool, optional
Whether or not to save the entire (unrolled) wavefield.
Returns
-------
Receiver, wavefield and performance summary
"""
# Source term is read-only, so re-use the default
src = src or self.geometry.src
# Create a new receiver object to store the result
rec = rec or self.geometry.rec
# Create all the fields v, p, r
save_t = src.nt if save else None
if self.time_order == 1:
v = v or VectorTimeFunction(name="v", grid=self.model.grid, save=save_t,
time_order=self.time_order,
space_order=self.space_order)
kwargs.update({k.name: k for k in v})
# Create the forward wavefield if not provided
p = p or TimeFunction(name="p", grid=self.model.grid, save=save_t,
time_order=self.time_order, space_order=self.space_order,
staggered=NODE)
# Memory variable:
r = r or TimeFunction(name="r", grid=self.model.grid, save=save_t,
time_order=self.time_order, space_order=self.space_order,
staggered=NODE)
# Pick physical parameters from model unless explicitly provided
b = b or self.model.b
qp = qp or self.model.qp
# Pick vp from model unless explicitly provided
vp = vp or self.model.vp
if self.kernel == 'sls':
# Execute operator and return wavefield and receiver data
# With Memory variable
summary = self.op_fwd(save).apply(src=src, rec=rec, qp=qp, r=r,
p=p, b=b, vp=vp,
dt=kwargs.pop('dt', self.dt), **kwargs)
else:
# Execute operator and return wavefield and receiver data
# Without Memory variable
summary = self.op_fwd(save).apply(src=src, rec=rec, qp=qp, p=p,
b=b, vp=vp,
dt=kwargs.pop('dt', self.dt), **kwargs)
return rec, p, v, summary
def adjoint(self, rec, srca=None, va=None, pa=None, vp=None, qp=None, b=None, r=None,
**kwargs):
"""
Adjoint modelling function that creates the necessary
data objects for running an adjoint modelling operator.
Parameters
----------
rec : SparseTimeFunction or array-like
The receiver data. Please note that
these act as the source term in the adjoint run.
srca : SparseTimeFunction or array-like
The resulting data for the interpolated at the
original source location.
va : VectorTimeFunction, optional
The computed particle velocity.
pa : TimeFunction, optional
Stores the computed wavefield.
vp : Function or float, optional
The time-constant velocity.
qp : Function, optional
The P-wave quality factor.
b : Function, optional
The time-constant inverse density.
r : TimeFunction, optional
The computed memory variable.
Returns
-------
Adjoint source, wavefield and performance summary.
"""
# Create a new adjoint source and receiver symbol
srca = srca or PointSource(name='srca', grid=self.model.grid,
time_range=self.geometry.time_axis,
coordinates=self.geometry.src_positions)
if self.time_order == 1:
va = va or VectorTimeFunction(name="va", grid=self.model.grid,
time_order=self.time_order,
space_order=self.space_order)
kwargs.update({k.name: k for k in va})
pa = pa or TimeFunction(name="pa", grid=self.model.grid,
time_order=self.time_order, space_order=self.space_order,
staggered=NODE)
# Memory variable:
r = r or TimeFunction(name="r", grid=self.model.grid, time_order=self.time_order,
space_order=self.space_order, staggered=NODE)
b = b or self.model.b
qp = qp or self.model.qp
# Pick vp from model unless explicitly provided
vp = vp or self.model.vp
# Execute operator and return wavefield and receiver data
if self.kernel == 'sls':
# Execute operator and return wavefield and receiver data
# With Memory variable
summary = self.op_adj().apply(src=srca, rec=rec, pa=pa, r=r, b=b, vp=vp,
qp=qp, dt=kwargs.pop('dt', self.dt), **kwargs)
else:
summary = self.op_adj().apply(src=srca, rec=rec, pa=pa, vp=vp, b=b, qp=qp,
dt=kwargs.pop('dt', self.dt), **kwargs)
return srca, pa, va, summary
| en | 0.698954 | Solver object that provides operators for seismic inversion problems and encapsulates the time and space discretization for a given problem setup. Parameters ---------- model : Model Physical model with domain parameters. geometry : AcquisitionGeometry Geometry object that contains the source (SparseTimeFunction) and receivers (SparseTimeFunction) and their position. space_order : int, optional Order of the spatial stencil discretisation. Defaults to 4. kernel : selects a visco-acoustic equation from the options below: 'sls' (Standard Linear Solid) : 1st order - Blanch and Symes (1995) / Dutta and Schuster (2014) viscoacoustic equation 2nd order - Bai et al. (2014) viscoacoustic equation 'ren' - Ren et al. (2014) viscoacoustic equation 'deng_mcmechan' - Deng and McMechan (2007) viscoacoustic equation Defaults to 'sls' 2nd order. Cached operator for forward runs with buffered wavefield Cached operator for adjoint runs Forward modelling function that creates the necessary data objects for running a forward modelling operator. Parameters ---------- src : SparseTimeFunction or array_like, optional Time series data for the injected source term. rec : SparseTimeFunction or array_like, optional The interpolated receiver data. v : VectorTimeFunction, optional The computed particle velocity. r : TimeFunction, optional The computed memory variable. p : TimeFunction, optional Stores the computed wavefield. qp : Function, optional The P-wave quality factor. b : Function, optional The time-constant inverse density. vp : Function or float, optional The time-constant velocity. save : bool, optional Whether or not to save the entire (unrolled) wavefield. Returns ------- Receiver, wavefield and performance summary # Source term is read-only, so re-use the default # Create a new receiver object to store the result # Create all the fields v, p, r # Create the forward wavefield if not provided # Memory variable: # Pick physical parameters from model unless explicitly provided # Pick vp from model unless explicitly provided # Execute operator and return wavefield and receiver data # With Memory variable # Execute operator and return wavefield and receiver data # Without Memory variable Adjoint modelling function that creates the necessary data objects for running an adjoint modelling operator. Parameters ---------- rec : SparseTimeFunction or array-like The receiver data. Please note that these act as the source term in the adjoint run. srca : SparseTimeFunction or array-like The resulting data for the interpolated at the original source location. va : VectorTimeFunction, optional The computed particle velocity. pa : TimeFunction, optional Stores the computed wavefield. vp : Function or float, optional The time-constant velocity. qp : Function, optional The P-wave quality factor. b : Function, optional The time-constant inverse density. r : TimeFunction, optional The computed memory variable. Returns ------- Adjoint source, wavefield and performance summary. # Create a new adjoint source and receiver symbol # Memory variable: # Pick vp from model unless explicitly provided # Execute operator and return wavefield and receiver data # Execute operator and return wavefield and receiver data # With Memory variable | 2.545285 | 3 |
StaticProcess/apriori.py | NIL-zhuang/NJU-Data-Integration | 0 | 215 | import pandas as pd
import os
from tqdm import tqdm
from collections import defaultdict
from mlxtend.preprocessing import TransactionEncoder
from mlxtend.frequent_patterns import apriori
dataPath = "data/static"
itemSetList = []
def loadDataSet():
with open(os.path.join(dataPath, "aprioriData.csv"), 'r') as f:
for line in f.readlines():
line = line.replace('\n', '')
cates = line.split(' ')
itemSetList.append(list(map(int, cates)))
def myApriori():
te = TransactionEncoder()
te_ary = te.fit(itemSetList).transform(itemSetList)
df = pd.DataFrame(te_ary, columns=te.columns_)
return df
def dataInit():
if os.path.exists(os.path.join(dataPath, "aprioriData.csv")):
return
df = pd.read_csv("data/static/static.csv")
user_category = defaultdict(set)
for idx, row in tqdm(df.iterrows(), total=df.shape[0], desc="category data generate"):
user_category[row['USER_ID']].add(row['CATEGORY_ID'])
with open(os.path.join(dataPath, "aprioriData.csv"), 'w+') as f:
for k, v in tqdm(user_category.items()):
f.write(' '.join(sorted(list(map(str, v))))+'\n')
if __name__ == '__main__':
dataInit()
loadDataSet()
df = myApriori()
frequent_itemsets = apriori(df, min_support=0.0035, use_colnames=True)
frequent_itemsets['length'] = frequent_itemsets['itemsets'].apply(lambda x: len(x))
print(frequent_itemsets[(frequent_itemsets['length'] >= 2)])
| import pandas as pd
import os
from tqdm import tqdm
from collections import defaultdict
from mlxtend.preprocessing import TransactionEncoder
from mlxtend.frequent_patterns import apriori
dataPath = "data/static"
itemSetList = []
def loadDataSet():
with open(os.path.join(dataPath, "aprioriData.csv"), 'r') as f:
for line in f.readlines():
line = line.replace('\n', '')
cates = line.split(' ')
itemSetList.append(list(map(int, cates)))
def myApriori():
te = TransactionEncoder()
te_ary = te.fit(itemSetList).transform(itemSetList)
df = pd.DataFrame(te_ary, columns=te.columns_)
return df
def dataInit():
if os.path.exists(os.path.join(dataPath, "aprioriData.csv")):
return
df = pd.read_csv("data/static/static.csv")
user_category = defaultdict(set)
for idx, row in tqdm(df.iterrows(), total=df.shape[0], desc="category data generate"):
user_category[row['USER_ID']].add(row['CATEGORY_ID'])
with open(os.path.join(dataPath, "aprioriData.csv"), 'w+') as f:
for k, v in tqdm(user_category.items()):
f.write(' '.join(sorted(list(map(str, v))))+'\n')
if __name__ == '__main__':
dataInit()
loadDataSet()
df = myApriori()
frequent_itemsets = apriori(df, min_support=0.0035, use_colnames=True)
frequent_itemsets['length'] = frequent_itemsets['itemsets'].apply(lambda x: len(x))
print(frequent_itemsets[(frequent_itemsets['length'] >= 2)])
| none | 1 | 2.44751 | 2 |
|
gcp-python-fn/main.py | FuriKuri/faas-playground | 1 | 216 | <reponame>FuriKuri/faas-playground
def hello_world(request):
request_json = request.get_json()
name = 'World'
if request_json and 'name' in request_json:
name = request_json['name']
headers = {
'Access-Control-Allow-Origin': 'https://furikuri.net',
'Access-Control-Allow-Methods': 'GET, POST',
'Access-Control-Allow-Headers': 'Content-Type'
}
return ('Hello ' + name + '! From GCP + Python', 200, headers)
| def hello_world(request):
request_json = request.get_json()
name = 'World'
if request_json and 'name' in request_json:
name = request_json['name']
headers = {
'Access-Control-Allow-Origin': 'https://furikuri.net',
'Access-Control-Allow-Methods': 'GET, POST',
'Access-Control-Allow-Headers': 'Content-Type'
}
return ('Hello ' + name + '! From GCP + Python', 200, headers) | none | 1 | 2.946684 | 3 |
|
tonclient/test/helpers.py | move-ton/ton-client-py | 28 | 217 | <filename>tonclient/test/helpers.py<gh_stars>10-100
import os
from tonclient.client import TonClient
from tonclient.types import Abi, CallSet, Signer, ClientConfig, \
ParamsOfEncodeMessage, ParamsOfProcessMessage
BASE_DIR = os.path.dirname(__file__)
SAMPLES_DIR = os.path.join(BASE_DIR, 'samples')
GIVER_ADDRESS = '0:f5c2510bfe407363cb1db6b9d7bc1184a05f8b343aeaa828189c580e8569ee23'
client_config = ClientConfig()
client_config.network.endpoints = ['https://tonos.freeton.surf']
async_core_client = TonClient(config=client_config)
sync_core_client = TonClient(config=client_config, is_core_async=False)
def send_grams(address: str):
giver_abi = Abi.from_path(
path=os.path.join(SAMPLES_DIR, 'Giver.abi.json'))
call_set = CallSet(
function_name='grant', input={'dest': address})
encode_params = ParamsOfEncodeMessage(
abi=giver_abi, signer=Signer.NoSigner(), address=GIVER_ADDRESS,
call_set=call_set)
process_params = ParamsOfProcessMessage(
message_encode_params=encode_params, send_events=False)
async_core_client.processing.process_message(params=process_params)
def tonos_punch():
send_grams(
address='0:b5e9240fc2d2f1ff8cbb1d1dee7fb7cae155e5f6320e585fcc685698994a19a5')
| <filename>tonclient/test/helpers.py<gh_stars>10-100
import os
from tonclient.client import TonClient
from tonclient.types import Abi, CallSet, Signer, ClientConfig, \
ParamsOfEncodeMessage, ParamsOfProcessMessage
BASE_DIR = os.path.dirname(__file__)
SAMPLES_DIR = os.path.join(BASE_DIR, 'samples')
GIVER_ADDRESS = '0:f5c2510bfe407363cb1db6b9d7bc1184a05f8b343aeaa828189c580e8569ee23'
client_config = ClientConfig()
client_config.network.endpoints = ['https://tonos.freeton.surf']
async_core_client = TonClient(config=client_config)
sync_core_client = TonClient(config=client_config, is_core_async=False)
def send_grams(address: str):
giver_abi = Abi.from_path(
path=os.path.join(SAMPLES_DIR, 'Giver.abi.json'))
call_set = CallSet(
function_name='grant', input={'dest': address})
encode_params = ParamsOfEncodeMessage(
abi=giver_abi, signer=Signer.NoSigner(), address=GIVER_ADDRESS,
call_set=call_set)
process_params = ParamsOfProcessMessage(
message_encode_params=encode_params, send_events=False)
async_core_client.processing.process_message(params=process_params)
def tonos_punch():
send_grams(
address='0:b5e9240fc2d2f1ff8cbb1d1dee7fb7cae155e5f6320e585fcc685698994a19a5')
| none | 1 | 1.903629 | 2 |
|
tests/test_i18n.py | vthriller/flask-kajiki | 0 | 218 | from kajiki import i18n
from flask import request
from flask_kajiki import render_template
# N. B. settting i18n.gettext would affect tests from all modules,
# so we test for request path that only functions from this module could set
def gettext(s):
if request.path == '/test_i18n':
return s.upper()
return s
i18n.gettext = gettext
def test_does_translations(app):
"""Callback interface is able to inject Translator filter"""
with app.test_request_context(path='/test_i18n'):
rendered = render_template('i18n.html')
# TODO DOCTYPE; see also render_args
expected = '<p>HELLO!</p>'
assert rendered == expected
| from kajiki import i18n
from flask import request
from flask_kajiki import render_template
# N. B. settting i18n.gettext would affect tests from all modules,
# so we test for request path that only functions from this module could set
def gettext(s):
if request.path == '/test_i18n':
return s.upper()
return s
i18n.gettext = gettext
def test_does_translations(app):
"""Callback interface is able to inject Translator filter"""
with app.test_request_context(path='/test_i18n'):
rendered = render_template('i18n.html')
# TODO DOCTYPE; see also render_args
expected = '<p>HELLO!</p>'
assert rendered == expected
| en | 0.868347 | # N. B. settting i18n.gettext would affect tests from all modules, # so we test for request path that only functions from this module could set Callback interface is able to inject Translator filter # TODO DOCTYPE; see also render_args | 2.451841 | 2 |
mmdet/models/roi_heads/mask_heads/fcn_mask_head.py | jstzwjr/mmdetection | 1 | 219 | <reponame>jstzwjr/mmdetection<gh_stars>1-10
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule, build_upsample_layer
from mmcv.ops import Conv2d
from mmcv.ops.carafe import CARAFEPack
from mmcv.runner import auto_fp16, force_fp32
from torch.nn.modules.utils import _pair
from mmdet.core import mask_target
from mmdet.models.builder import HEADS, build_loss
BYTES_PER_FLOAT = 4
# TODO: This memory limit may be too much or too little. It would be better to
# determine it based on available resources.
GPU_MEM_LIMIT = 1024**3 # 1 GB memory limit
@HEADS.register_module()
class FCNMaskHead(nn.Module):
def __init__(self,
num_convs=4,
roi_feat_size=14,
in_channels=256,
conv_kernel_size=3,
conv_out_channels=256,
num_classes=80,
class_agnostic=False,
upsample_cfg=dict(type='deconv', scale_factor=2),
conv_cfg=None,
norm_cfg=None,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)):
super(FCNMaskHead, self).__init__()
self.upsample_cfg = upsample_cfg.copy()
if self.upsample_cfg['type'] not in [
None, 'deconv', 'nearest', 'bilinear', 'carafe'
]:
raise ValueError(
f'Invalid upsample method {self.upsample_cfg["type"]}, '
'accepted methods are "deconv", "nearest", "bilinear", '
'"carafe"')
self.num_convs = num_convs
# WARN: roi_feat_size is reserved and not used
self.roi_feat_size = _pair(roi_feat_size)
self.in_channels = in_channels
self.conv_kernel_size = conv_kernel_size
self.conv_out_channels = conv_out_channels
self.upsample_method = self.upsample_cfg.get('type')
self.scale_factor = self.upsample_cfg.pop('scale_factor', None)
self.num_classes = num_classes
self.class_agnostic = class_agnostic
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.fp16_enabled = False
self.loss_mask = build_loss(loss_mask)
self.convs = nn.ModuleList()
for i in range(self.num_convs):
in_channels = (
self.in_channels if i == 0 else self.conv_out_channels)
padding = (self.conv_kernel_size - 1) // 2
self.convs.append(
ConvModule(
in_channels,
self.conv_out_channels,
self.conv_kernel_size,
padding=padding,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg))
upsample_in_channels = (
self.conv_out_channels if self.num_convs > 0 else in_channels)
upsample_cfg_ = self.upsample_cfg.copy()
if self.upsample_method is None:
self.upsample = None
elif self.upsample_method == 'deconv':
upsample_cfg_.update(
in_channels=upsample_in_channels,
out_channels=self.conv_out_channels,
kernel_size=self.scale_factor,
stride=self.scale_factor)
self.upsample = build_upsample_layer(upsample_cfg_)
elif self.upsample_method == 'carafe':
upsample_cfg_.update(
channels=upsample_in_channels, scale_factor=self.scale_factor)
self.upsample = build_upsample_layer(upsample_cfg_)
else:
# suppress warnings
align_corners = (None
if self.upsample_method == 'nearest' else False)
upsample_cfg_.update(
scale_factor=self.scale_factor,
mode=self.upsample_method,
align_corners=align_corners)
self.upsample = build_upsample_layer(upsample_cfg_)
out_channels = 1 if self.class_agnostic else self.num_classes
logits_in_channel = (
self.conv_out_channels
if self.upsample_method == 'deconv' else upsample_in_channels)
self.conv_logits = Conv2d(logits_in_channel, out_channels, 1)
self.relu = nn.ReLU(inplace=True)
self.debug_imgs = None
def init_weights(self):
for m in [self.upsample, self.conv_logits]:
if m is None:
continue
elif isinstance(m, CARAFEPack):
m.init_weights()
else:
nn.init.kaiming_normal_(
m.weight, mode='fan_out', nonlinearity='relu')
nn.init.constant_(m.bias, 0)
@auto_fp16()
def forward(self, x):
for conv in self.convs:
x = conv(x)
if self.upsample is not None:
x = self.upsample(x)
if self.upsample_method == 'deconv':
x = self.relu(x)
mask_pred = self.conv_logits(x)
return mask_pred
def get_targets(self, sampling_results, gt_masks, rcnn_train_cfg):
pos_proposals = [res.pos_bboxes for res in sampling_results]
pos_assigned_gt_inds = [
res.pos_assigned_gt_inds for res in sampling_results
]
mask_targets = mask_target(pos_proposals, pos_assigned_gt_inds,
gt_masks, rcnn_train_cfg)
return mask_targets
@force_fp32(apply_to=('mask_pred', ))
def loss(self, mask_pred, mask_targets, labels):
loss = dict()
if mask_pred.size(0) == 0:
loss_mask = mask_pred.sum() * 0
else:
if self.class_agnostic:
loss_mask = self.loss_mask(mask_pred, mask_targets,
torch.zeros_like(labels))
else:
loss_mask = self.loss_mask(mask_pred, mask_targets, labels)
loss['loss_mask'] = loss_mask
return loss
def get_seg_masks(self, mask_pred, det_bboxes, det_labels, rcnn_test_cfg,
ori_shape, scale_factor, rescale):
"""Get segmentation masks from mask_pred and bboxes.
Args:
mask_pred (Tensor or ndarray): shape (n, #class, h, w).
For single-scale testing, mask_pred is the direct output of
model, whose type is Tensor, while for multi-scale testing,
it will be converted to numpy array outside of this method.
det_bboxes (Tensor): shape (n, 4/5)
det_labels (Tensor): shape (n, )
img_shape (Tensor): shape (3, )
rcnn_test_cfg (dict): rcnn testing config
ori_shape: original image size
Returns:
list[list]: encoded masks
"""
if isinstance(mask_pred, torch.Tensor):
mask_pred = mask_pred.sigmoid()
else:
mask_pred = det_bboxes.new_tensor(mask_pred)
device = mask_pred.device
cls_segms = [[] for _ in range(self.num_classes)
] # BG is not included in num_classes
bboxes = det_bboxes[:, :4]
labels = det_labels
if rescale:
img_h, img_w = ori_shape[:2]
else:
if isinstance(scale_factor, float):
img_h = np.round(ori_shape[0] * scale_factor).astype(np.int32)
img_w = np.round(ori_shape[1] * scale_factor).astype(np.int32)
else:
w_scale, h_scale = scale_factor[0], scale_factor[1]
img_h = np.round(ori_shape[0] * h_scale.item()).astype(
np.int32)
img_w = np.round(ori_shape[1] * w_scale.item()).astype(
np.int32)
scale_factor = 1.0
if not isinstance(scale_factor, (float, torch.Tensor)):
scale_factor = bboxes.new_tensor(scale_factor)
bboxes = bboxes / scale_factor
N = len(mask_pred)
# The actual implementation split the input into chunks,
# and paste them chunk by chunk.
if device.type == 'cpu':
# CPU is most efficient when they are pasted one by one with
# skip_empty=True, so that it performs minimal number of
# operations.
num_chunks = N
else:
# GPU benefits from parallelism for larger chunks,
# but may have memory issue
num_chunks = int(
np.ceil(N * img_h * img_w * BYTES_PER_FLOAT / GPU_MEM_LIMIT))
assert (num_chunks <=
N), 'Default GPU_MEM_LIMIT is too small; try increasing it'
chunks = torch.chunk(torch.arange(N, device=device), num_chunks)
threshold = rcnn_test_cfg.mask_thr_binary
im_mask = torch.zeros(
N,
img_h,
img_w,
device=device,
dtype=torch.bool if threshold >= 0 else torch.uint8)
if not self.class_agnostic:
mask_pred = mask_pred[range(N), labels][:, None]
for inds in chunks:
masks_chunk, spatial_inds = _do_paste_mask(
mask_pred[inds],
bboxes[inds],
img_h,
img_w,
skip_empty=device.type == 'cpu')
if threshold >= 0:
masks_chunk = (masks_chunk >= threshold).to(dtype=torch.bool)
else:
# for visualization and debugging
masks_chunk = (masks_chunk * 255).to(dtype=torch.uint8)
im_mask[(inds, ) + spatial_inds] = masks_chunk
for i in range(N):
cls_segms[labels[i]].append(im_mask[i].cpu().numpy())
return cls_segms
def _do_paste_mask(masks, boxes, img_h, img_w, skip_empty=True):
"""Paste instance masks acoording to boxes.
This implementation is modified from
https://github.com/facebookresearch/detectron2/
Args:
masks (Tensor): N, 1, H, W
boxes (Tensor): N, 4
img_h (int): Height of the image to be pasted.
img_w (int): Width of the image to be pasted.
skip_empty (bool): Only paste masks within the region that
tightly bound all boxes, and returns the results this region only.
An important optimization for CPU.
Returns:
tuple: (Tensor, tuple). The first item is mask tensor, the second one
is the slice object.
If skip_empty == False, the whole image will be pasted. It will
return a mask of shape (N, img_h, img_w) and an empty tuple.
If skip_empty == True, only area around the mask will be pasted.
A mask of shape (N, h', w') and its start and end coordinates
in the original image will be returned.
"""
# On GPU, paste all masks together (up to chunk size)
# by using the entire image to sample the masks
# Compared to pasting them one by one,
# this has more operations but is faster on COCO-scale dataset.
device = masks.device
if skip_empty:
x0_int, y0_int = torch.clamp(
boxes.min(dim=0).values.floor()[:2] - 1,
min=0).to(dtype=torch.int32)
x1_int = torch.clamp(
boxes[:, 2].max().ceil() + 1, max=img_w).to(dtype=torch.int32)
y1_int = torch.clamp(
boxes[:, 3].max().ceil() + 1, max=img_h).to(dtype=torch.int32)
else:
x0_int, y0_int = 0, 0
x1_int, y1_int = img_w, img_h
x0, y0, x1, y1 = torch.split(boxes, 1, dim=1) # each is Nx1
N = masks.shape[0]
img_y = torch.arange(
y0_int, y1_int, device=device, dtype=torch.float32) + 0.5
img_x = torch.arange(
x0_int, x1_int, device=device, dtype=torch.float32) + 0.5
img_y = (img_y - y0) / (y1 - y0) * 2 - 1
img_x = (img_x - x0) / (x1 - x0) * 2 - 1
# img_x, img_y have shapes (N, w), (N, h)
if torch.isinf(img_x).any():
inds = torch.where(torch.isinf(img_x))
img_x[inds] = 0
if torch.isinf(img_y).any():
inds = torch.where(torch.isinf(img_y))
img_y[inds] = 0
gx = img_x[:, None, :].expand(N, img_y.size(1), img_x.size(1))
gy = img_y[:, :, None].expand(N, img_y.size(1), img_x.size(1))
grid = torch.stack([gx, gy], dim=3)
img_masks = F.grid_sample(
masks.to(dtype=torch.float32), grid, align_corners=False)
if skip_empty:
return img_masks[:, 0], (slice(y0_int, y1_int), slice(x0_int, x1_int))
else:
return img_masks[:, 0], ()
| import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule, build_upsample_layer
from mmcv.ops import Conv2d
from mmcv.ops.carafe import CARAFEPack
from mmcv.runner import auto_fp16, force_fp32
from torch.nn.modules.utils import _pair
from mmdet.core import mask_target
from mmdet.models.builder import HEADS, build_loss
BYTES_PER_FLOAT = 4
# TODO: This memory limit may be too much or too little. It would be better to
# determine it based on available resources.
GPU_MEM_LIMIT = 1024**3 # 1 GB memory limit
@HEADS.register_module()
class FCNMaskHead(nn.Module):
def __init__(self,
num_convs=4,
roi_feat_size=14,
in_channels=256,
conv_kernel_size=3,
conv_out_channels=256,
num_classes=80,
class_agnostic=False,
upsample_cfg=dict(type='deconv', scale_factor=2),
conv_cfg=None,
norm_cfg=None,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)):
super(FCNMaskHead, self).__init__()
self.upsample_cfg = upsample_cfg.copy()
if self.upsample_cfg['type'] not in [
None, 'deconv', 'nearest', 'bilinear', 'carafe'
]:
raise ValueError(
f'Invalid upsample method {self.upsample_cfg["type"]}, '
'accepted methods are "deconv", "nearest", "bilinear", '
'"carafe"')
self.num_convs = num_convs
# WARN: roi_feat_size is reserved and not used
self.roi_feat_size = _pair(roi_feat_size)
self.in_channels = in_channels
self.conv_kernel_size = conv_kernel_size
self.conv_out_channels = conv_out_channels
self.upsample_method = self.upsample_cfg.get('type')
self.scale_factor = self.upsample_cfg.pop('scale_factor', None)
self.num_classes = num_classes
self.class_agnostic = class_agnostic
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.fp16_enabled = False
self.loss_mask = build_loss(loss_mask)
self.convs = nn.ModuleList()
for i in range(self.num_convs):
in_channels = (
self.in_channels if i == 0 else self.conv_out_channels)
padding = (self.conv_kernel_size - 1) // 2
self.convs.append(
ConvModule(
in_channels,
self.conv_out_channels,
self.conv_kernel_size,
padding=padding,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg))
upsample_in_channels = (
self.conv_out_channels if self.num_convs > 0 else in_channels)
upsample_cfg_ = self.upsample_cfg.copy()
if self.upsample_method is None:
self.upsample = None
elif self.upsample_method == 'deconv':
upsample_cfg_.update(
in_channels=upsample_in_channels,
out_channels=self.conv_out_channels,
kernel_size=self.scale_factor,
stride=self.scale_factor)
self.upsample = build_upsample_layer(upsample_cfg_)
elif self.upsample_method == 'carafe':
upsample_cfg_.update(
channels=upsample_in_channels, scale_factor=self.scale_factor)
self.upsample = build_upsample_layer(upsample_cfg_)
else:
# suppress warnings
align_corners = (None
if self.upsample_method == 'nearest' else False)
upsample_cfg_.update(
scale_factor=self.scale_factor,
mode=self.upsample_method,
align_corners=align_corners)
self.upsample = build_upsample_layer(upsample_cfg_)
out_channels = 1 if self.class_agnostic else self.num_classes
logits_in_channel = (
self.conv_out_channels
if self.upsample_method == 'deconv' else upsample_in_channels)
self.conv_logits = Conv2d(logits_in_channel, out_channels, 1)
self.relu = nn.ReLU(inplace=True)
self.debug_imgs = None
def init_weights(self):
for m in [self.upsample, self.conv_logits]:
if m is None:
continue
elif isinstance(m, CARAFEPack):
m.init_weights()
else:
nn.init.kaiming_normal_(
m.weight, mode='fan_out', nonlinearity='relu')
nn.init.constant_(m.bias, 0)
@auto_fp16()
def forward(self, x):
for conv in self.convs:
x = conv(x)
if self.upsample is not None:
x = self.upsample(x)
if self.upsample_method == 'deconv':
x = self.relu(x)
mask_pred = self.conv_logits(x)
return mask_pred
def get_targets(self, sampling_results, gt_masks, rcnn_train_cfg):
pos_proposals = [res.pos_bboxes for res in sampling_results]
pos_assigned_gt_inds = [
res.pos_assigned_gt_inds for res in sampling_results
]
mask_targets = mask_target(pos_proposals, pos_assigned_gt_inds,
gt_masks, rcnn_train_cfg)
return mask_targets
@force_fp32(apply_to=('mask_pred', ))
def loss(self, mask_pred, mask_targets, labels):
loss = dict()
if mask_pred.size(0) == 0:
loss_mask = mask_pred.sum() * 0
else:
if self.class_agnostic:
loss_mask = self.loss_mask(mask_pred, mask_targets,
torch.zeros_like(labels))
else:
loss_mask = self.loss_mask(mask_pred, mask_targets, labels)
loss['loss_mask'] = loss_mask
return loss
def get_seg_masks(self, mask_pred, det_bboxes, det_labels, rcnn_test_cfg,
ori_shape, scale_factor, rescale):
"""Get segmentation masks from mask_pred and bboxes.
Args:
mask_pred (Tensor or ndarray): shape (n, #class, h, w).
For single-scale testing, mask_pred is the direct output of
model, whose type is Tensor, while for multi-scale testing,
it will be converted to numpy array outside of this method.
det_bboxes (Tensor): shape (n, 4/5)
det_labels (Tensor): shape (n, )
img_shape (Tensor): shape (3, )
rcnn_test_cfg (dict): rcnn testing config
ori_shape: original image size
Returns:
list[list]: encoded masks
"""
if isinstance(mask_pred, torch.Tensor):
mask_pred = mask_pred.sigmoid()
else:
mask_pred = det_bboxes.new_tensor(mask_pred)
device = mask_pred.device
cls_segms = [[] for _ in range(self.num_classes)
] # BG is not included in num_classes
bboxes = det_bboxes[:, :4]
labels = det_labels
if rescale:
img_h, img_w = ori_shape[:2]
else:
if isinstance(scale_factor, float):
img_h = np.round(ori_shape[0] * scale_factor).astype(np.int32)
img_w = np.round(ori_shape[1] * scale_factor).astype(np.int32)
else:
w_scale, h_scale = scale_factor[0], scale_factor[1]
img_h = np.round(ori_shape[0] * h_scale.item()).astype(
np.int32)
img_w = np.round(ori_shape[1] * w_scale.item()).astype(
np.int32)
scale_factor = 1.0
if not isinstance(scale_factor, (float, torch.Tensor)):
scale_factor = bboxes.new_tensor(scale_factor)
bboxes = bboxes / scale_factor
N = len(mask_pred)
# The actual implementation split the input into chunks,
# and paste them chunk by chunk.
if device.type == 'cpu':
# CPU is most efficient when they are pasted one by one with
# skip_empty=True, so that it performs minimal number of
# operations.
num_chunks = N
else:
# GPU benefits from parallelism for larger chunks,
# but may have memory issue
num_chunks = int(
np.ceil(N * img_h * img_w * BYTES_PER_FLOAT / GPU_MEM_LIMIT))
assert (num_chunks <=
N), 'Default GPU_MEM_LIMIT is too small; try increasing it'
chunks = torch.chunk(torch.arange(N, device=device), num_chunks)
threshold = rcnn_test_cfg.mask_thr_binary
im_mask = torch.zeros(
N,
img_h,
img_w,
device=device,
dtype=torch.bool if threshold >= 0 else torch.uint8)
if not self.class_agnostic:
mask_pred = mask_pred[range(N), labels][:, None]
for inds in chunks:
masks_chunk, spatial_inds = _do_paste_mask(
mask_pred[inds],
bboxes[inds],
img_h,
img_w,
skip_empty=device.type == 'cpu')
if threshold >= 0:
masks_chunk = (masks_chunk >= threshold).to(dtype=torch.bool)
else:
# for visualization and debugging
masks_chunk = (masks_chunk * 255).to(dtype=torch.uint8)
im_mask[(inds, ) + spatial_inds] = masks_chunk
for i in range(N):
cls_segms[labels[i]].append(im_mask[i].cpu().numpy())
return cls_segms
def _do_paste_mask(masks, boxes, img_h, img_w, skip_empty=True):
"""Paste instance masks acoording to boxes.
This implementation is modified from
https://github.com/facebookresearch/detectron2/
Args:
masks (Tensor): N, 1, H, W
boxes (Tensor): N, 4
img_h (int): Height of the image to be pasted.
img_w (int): Width of the image to be pasted.
skip_empty (bool): Only paste masks within the region that
tightly bound all boxes, and returns the results this region only.
An important optimization for CPU.
Returns:
tuple: (Tensor, tuple). The first item is mask tensor, the second one
is the slice object.
If skip_empty == False, the whole image will be pasted. It will
return a mask of shape (N, img_h, img_w) and an empty tuple.
If skip_empty == True, only area around the mask will be pasted.
A mask of shape (N, h', w') and its start and end coordinates
in the original image will be returned.
"""
# On GPU, paste all masks together (up to chunk size)
# by using the entire image to sample the masks
# Compared to pasting them one by one,
# this has more operations but is faster on COCO-scale dataset.
device = masks.device
if skip_empty:
x0_int, y0_int = torch.clamp(
boxes.min(dim=0).values.floor()[:2] - 1,
min=0).to(dtype=torch.int32)
x1_int = torch.clamp(
boxes[:, 2].max().ceil() + 1, max=img_w).to(dtype=torch.int32)
y1_int = torch.clamp(
boxes[:, 3].max().ceil() + 1, max=img_h).to(dtype=torch.int32)
else:
x0_int, y0_int = 0, 0
x1_int, y1_int = img_w, img_h
x0, y0, x1, y1 = torch.split(boxes, 1, dim=1) # each is Nx1
N = masks.shape[0]
img_y = torch.arange(
y0_int, y1_int, device=device, dtype=torch.float32) + 0.5
img_x = torch.arange(
x0_int, x1_int, device=device, dtype=torch.float32) + 0.5
img_y = (img_y - y0) / (y1 - y0) * 2 - 1
img_x = (img_x - x0) / (x1 - x0) * 2 - 1
# img_x, img_y have shapes (N, w), (N, h)
if torch.isinf(img_x).any():
inds = torch.where(torch.isinf(img_x))
img_x[inds] = 0
if torch.isinf(img_y).any():
inds = torch.where(torch.isinf(img_y))
img_y[inds] = 0
gx = img_x[:, None, :].expand(N, img_y.size(1), img_x.size(1))
gy = img_y[:, :, None].expand(N, img_y.size(1), img_x.size(1))
grid = torch.stack([gx, gy], dim=3)
img_masks = F.grid_sample(
masks.to(dtype=torch.float32), grid, align_corners=False)
if skip_empty:
return img_masks[:, 0], (slice(y0_int, y1_int), slice(x0_int, x1_int))
else:
return img_masks[:, 0], () | en | 0.854116 | # TODO: This memory limit may be too much or too little. It would be better to # determine it based on available resources. # 1 GB memory limit # WARN: roi_feat_size is reserved and not used # suppress warnings Get segmentation masks from mask_pred and bboxes. Args: mask_pred (Tensor or ndarray): shape (n, #class, h, w). For single-scale testing, mask_pred is the direct output of model, whose type is Tensor, while for multi-scale testing, it will be converted to numpy array outside of this method. det_bboxes (Tensor): shape (n, 4/5) det_labels (Tensor): shape (n, ) img_shape (Tensor): shape (3, ) rcnn_test_cfg (dict): rcnn testing config ori_shape: original image size Returns: list[list]: encoded masks # BG is not included in num_classes # The actual implementation split the input into chunks, # and paste them chunk by chunk. # CPU is most efficient when they are pasted one by one with # skip_empty=True, so that it performs minimal number of # operations. # GPU benefits from parallelism for larger chunks, # but may have memory issue # for visualization and debugging Paste instance masks acoording to boxes. This implementation is modified from https://github.com/facebookresearch/detectron2/ Args: masks (Tensor): N, 1, H, W boxes (Tensor): N, 4 img_h (int): Height of the image to be pasted. img_w (int): Width of the image to be pasted. skip_empty (bool): Only paste masks within the region that tightly bound all boxes, and returns the results this region only. An important optimization for CPU. Returns: tuple: (Tensor, tuple). The first item is mask tensor, the second one is the slice object. If skip_empty == False, the whole image will be pasted. It will return a mask of shape (N, img_h, img_w) and an empty tuple. If skip_empty == True, only area around the mask will be pasted. A mask of shape (N, h', w') and its start and end coordinates in the original image will be returned. # On GPU, paste all masks together (up to chunk size) # by using the entire image to sample the masks # Compared to pasting them one by one, # this has more operations but is faster on COCO-scale dataset. # each is Nx1 # img_x, img_y have shapes (N, w), (N, h) | 1.974462 | 2 |
PaddleOCR/deploy/hubserving/ocr_det/params.py | TangJiamin/Ultra_light_OCR_No.23 | 0 | 220 | # -*- coding:utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
class Config(object):
pass
def read_params():
cfg = Config()
#params for text detector
cfg.det_algorithm = "DB"
cfg.det_model_dir = "./inference/ch_ppocr_mobile_v2.0_det_infer/"
cfg.det_limit_side_len = 960
cfg.det_limit_type = 'max'
#DB parmas
cfg.det_db_thresh = 0.3
cfg.det_db_box_thresh = 0.5
cfg.det_db_unclip_ratio = 1.6
cfg.use_dilation = False
# #EAST parmas
# cfg.det_east_score_thresh = 0.8
# cfg.det_east_cover_thresh = 0.1
# cfg.det_east_nms_thresh = 0.2
cfg.use_pdserving = False
cfg.use_tensorrt = False
return cfg
| # -*- coding:utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
class Config(object):
pass
def read_params():
cfg = Config()
#params for text detector
cfg.det_algorithm = "DB"
cfg.det_model_dir = "./inference/ch_ppocr_mobile_v2.0_det_infer/"
cfg.det_limit_side_len = 960
cfg.det_limit_type = 'max'
#DB parmas
cfg.det_db_thresh = 0.3
cfg.det_db_box_thresh = 0.5
cfg.det_db_unclip_ratio = 1.6
cfg.use_dilation = False
# #EAST parmas
# cfg.det_east_score_thresh = 0.8
# cfg.det_east_cover_thresh = 0.1
# cfg.det_east_nms_thresh = 0.2
cfg.use_pdserving = False
cfg.use_tensorrt = False
return cfg
| en | 0.279061 | # -*- coding:utf-8 -*- #params for text detector #DB parmas # #EAST parmas # cfg.det_east_score_thresh = 0.8 # cfg.det_east_cover_thresh = 0.1 # cfg.det_east_nms_thresh = 0.2 | 1.983244 | 2 |
networks/larflow/models/larflow_uresnet.py | LArbys/ublarcvserver | 2 | 221 | <filename>networks/larflow/models/larflow_uresnet.py
import torch.nn as nn
import torch as torch
import math
import torch.utils.model_zoo as model_zoo
###########################################################
#
# U-ResNet
# U-net witih ResNet modules
#
# Semantic segmentation network used by MicroBooNE
# to label track/shower pixels
#
# resnet implementation from pytorch.torchvision module
# U-net from (cite)
#
# meant to be copy of caffe version
#
###########################################################
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.stride = stride
self.bypass = None
if inplanes!=planes or stride>1:
self.bypass = nn.Conv2d(inplanes, planes, kernel_size=1, stride=stride, padding=0, bias=False)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
out = self.conv1(x)
out = self.bn1(out)
out = self.relu1(out)
out = self.conv2(out)
out = self.bn2(out)
if self.bypass is not None:
outbp = self.bypass(x)
out += outbp
else:
out += x
out = self.relu(out)
return out
class Bottleneck(nn.Module):
def __init__(self, inplanes, planes, stride=1 ):
super(Bottleneck, self).__init__()
# residual path
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.stride = stride
# if stride >1, then we need to subsamble the input
if stride>1:
self.shortcut = nn.Conv2d(inplanes,planes,kernel_size=1,stride=stride,bias=False)
else:
self.shortcut = None
def forward(self, x):
if self.shortcut is None:
bypass = x
else:
bypass = self.shortcut(x)
residual = self.conv1(x)
residual = self.bn1(residual)
residual = self.relu(residual)
residual = self.conv2(residual)
residual = self.bn2(residual)
residual = self.relu(residual)
residual = self.conv3(residual)
residual = self.bn3(residual)
out = bypass+residual
out = self.relu(out)
return out
class PreactivationBlock(nn.Module):
def __init__(self, inplanes, planes, stride=1 ):
super(Preactivation, self).__init__()
# residual path
self.bn1 = nn.BatchNorm2d(inplanes)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.relu2 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
# if stride >1, then we need to subsamble the input
if stride>1:
self.shortcut = nn.Conv2d(inplanes,planes,kernel_size=1,stride=stride,bias=False)
else:
self.shortcut = None
def forward(self, x):
if self.shortcut is None:
bypass = x
else:
bypass = self.shortcut(x)
class DoubleResNet(nn.Module):
def __init__(self,Block,inplanes,planes,stride=1):
super(DoubleResNet,self).__init__()
self.res1 = Block(inplanes,planes,stride)
self.res2 = Block( planes,planes, 1)
def forward(self, x):
out = self.res1(x)
out = self.res2(out)
return out
class ConvTransposeLayer(nn.Module):
def __init__(self,deconv_inplanes,skip_inplanes,deconv_outplanes,res_outplanes):
super(ConvTransposeLayer,self).__init__()
self.deconv = nn.ConvTranspose2d( deconv_inplanes, deconv_outplanes, kernel_size=4, stride=2, padding=1, bias=False )
self.res = DoubleResNet(BasicBlock,deconv_outplanes+skip_inplanes,res_outplanes,stride=1)
def forward(self,x,skip_x):
out = self.deconv(x,output_size=skip_x.size())
# concat skip connections
out = torch.cat( [out,skip_x], 1 )
out = self.res(out)
return out
class LArFlowUResNet(nn.Module):
def __init__(self, num_classes=3, input_channels=3, inplanes=16, showsizes=False, use_visi=True):
self.inplanes =inplanes
super(LArFlowUResNet, self).__init__()
self._showsizes = showsizes # print size at each layer
self.use_visi = use_visi
# Encoder
# stem
# one big stem
self.conv1 = nn.Conv2d(input_channels, self.inplanes, kernel_size=7, stride=1, padding=3, bias=True) # initial conv layer
self.bn1 = nn.BatchNorm2d(self.inplanes)
self.relu1 = nn.ReLU(inplace=True)
self.pool1 = nn.MaxPool2d( 3, stride=2, padding=1 )
self.enc_layer1 = self._make_encoding_layer( self.inplanes*1, self.inplanes*2, stride=1) # 16->32
self.enc_layer2 = self._make_encoding_layer( self.inplanes*2, self.inplanes*4, stride=2) # 32->64
self.enc_layer3 = self._make_encoding_layer( self.inplanes*4, self.inplanes*8, stride=2) # 64->128
self.enc_layer4 = self._make_encoding_layer( self.inplanes*8, self.inplanes*16, stride=2) # 128->256
self.enc_layer5 = self._make_encoding_layer( self.inplanes*16, self.inplanes*32, stride=2) # 256->512
# decoding flow
#self.num_final_flow_features = self.inplanes
self.num_final_flow_features = self.inplanes
self.flow_dec_layer5 = self._make_decoding_layer( self.inplanes*32*2, self.inplanes*16, self.inplanes*16, self.inplanes*16 ) # 512->256
self.flow_dec_layer4 = self._make_decoding_layer( self.inplanes*16, self.inplanes*8, self.inplanes*8, self.inplanes*8 ) # 256->128
self.flow_dec_layer3 = self._make_decoding_layer( self.inplanes*8, self.inplanes*4, self.inplanes*4, self.inplanes*4 ) # 128->64
self.flow_dec_layer2 = self._make_decoding_layer( self.inplanes*4, self.inplanes*2, self.inplanes*2, self.inplanes*2 ) # 64->32
#self.flow_dec_layer1 = self._make_decoding_layer( self.inplanes*2, self.inplanes, self.inplanes ) # 32->16
self.flow_dec_layer1 = self._make_decoding_layer( self.inplanes*2, self.inplanes, self.inplanes, self.num_final_flow_features ) # 32->200
# decoding matchability
if self.use_visi:
self.visi_dec_layer5 = self._make_decoding_layer( self.inplanes*32*2, self.inplanes*16, self.inplanes*16, self.inplanes*16 ) # 512->256
self.visi_dec_layer4 = self._make_decoding_layer( self.inplanes*16, self.inplanes*8, self.inplanes*8, self.inplanes*8 ) # 256->128
self.visi_dec_layer3 = self._make_decoding_layer( self.inplanes*8, self.inplanes*4, self.inplanes*4, self.inplanes*4 ) # 128->64
self.visi_dec_layer2 = self._make_decoding_layer( self.inplanes*4, self.inplanes*2, self.inplanes*2, self.inplanes*2 ) # 64->32
self.visi_dec_layer1 = self._make_decoding_layer( self.inplanes*2, self.inplanes, self.inplanes, self.inplanes ) # 32->16
# 1x1 conv for flow
self.flow_conv = nn.Conv2d( self.num_final_flow_features, 1, kernel_size=1, stride=1, padding=0, bias=True )
# 1x1 conv for mathability
if self.use_visi:
self.visi_conv = nn.Conv2d( self.inplanes, 2, kernel_size=1, stride=1, padding=0, bias=True ) # 2 classes, 0=not vis, 1=vis
self.visi_softmax = nn.LogSoftmax(dim=1)
# initialization
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m,nn.ConvTranspose2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_encoding_layer(self, inplanes, planes, stride=2):
return DoubleResNet(BasicBlock,inplanes,planes,stride=stride)
def _make_decoding_layer(self, inplanes, skipplanes, deconvplanes, resnetplanes ):
return ConvTransposeLayer( inplanes, skipplanes, deconvplanes, resnetplanes )
def encode(self,x):
# stem
x = self.conv1(x)
x = self.bn1(x)
x0 = self.relu1(x)
x = self.pool1(x0)
x1 = self.enc_layer1(x)
x2 = self.enc_layer2(x1)
x3 = self.enc_layer3(x2)
x4 = self.enc_layer4(x3)
x5 = self.enc_layer5(x4)
if self._showsizes:
print "after encoding: "
print " x1: ",x1.size()
print " x2: ",x2.size()
print " x3: ",x3.size()
print " x4: ",x4.size()
print " x5: ",x5.size()
return x5,x0,x1,x2,x3,x4
def flow(self,merged_encode,x0,x1,x2,x3,x4):
""" decoding to flow prediction """
x = self.flow_dec_layer5(merged_encode,x4)
if self._showsizes:
print "after decoding:"
print " dec5: ",x.size()," iscuda=",x.is_cuda
x = self.flow_dec_layer4(x,x3)
if self._showsizes:
print " dec4: ",x.size()," iscuda=",x.is_cuda
x = self.flow_dec_layer3(x,x2)
if self._showsizes:
print " dec3: ",x.size()," iscuda=",x.is_cuda
x = self.flow_dec_layer2(x,x1)
if self._showsizes:
print " dec2: ",x.size()," iscuda=",x.is_cuda
x = self.flow_dec_layer1(x,x0)
if self._showsizes:
print " dec1: ",x.size()," iscuda=",x.is_cuda
return x
def visibility(self,merged_encode,x0,x1,x2,x3,x4):
""" decoding to flow prediction """
x = self.visi_dec_layer5(merged_encode,x4)
if self._showsizes:
print "after decoding:"
print " dec5: ",x.size()," iscuda=",x.is_cuda
x = self.visi_dec_layer4(x,x3)
if self._showsizes:
print " dec4: ",x.size()," iscuda=",x.is_cuda
x = self.visi_dec_layer3(x,x2)
if self._showsizes:
print " dec3: ",x.size()," iscuda=",x.is_cuda
x = self.visi_dec_layer2(x,x1)
if self._showsizes:
print " dec2: ",x.size()," iscuda=",x.is_cuda
x = self.visi_dec_layer1(x,x0)
if self._showsizes:
print " dec1: ",x.size()," iscuda=",x.is_cuda
return x
def forward(self, src, target):
if self._showsizes:
print "input: ",x.size()," is_cuda=",x.is_cuda
src_encode, s0, s1, s2, s3, s4 = self.encode(src)
target_encode, t0, t1, t2, t3, t4 = self.encode(target)
merged_encode = torch.cat( [target_encode,src_encode], 1 )
flowout = self.flow( merged_encode, s0, s1, s2, s3, s4 )
if self.use_visi:
visiout = self.visibility( merged_encode, t0, t1, t2, t3, t4 )
flow_predict = self.flow_conv( flowout )
if self.use_visi:
visi_predict = self.visi_conv( visiout )
visi_predict = self.visi_softmax(visi_predict)
else:
visi_predict = None
if self._showsizes:
print " softmax: ",x.size()
return flow_predict,visi_predict
| <filename>networks/larflow/models/larflow_uresnet.py
import torch.nn as nn
import torch as torch
import math
import torch.utils.model_zoo as model_zoo
###########################################################
#
# U-ResNet
# U-net witih ResNet modules
#
# Semantic segmentation network used by MicroBooNE
# to label track/shower pixels
#
# resnet implementation from pytorch.torchvision module
# U-net from (cite)
#
# meant to be copy of caffe version
#
###########################################################
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.stride = stride
self.bypass = None
if inplanes!=planes or stride>1:
self.bypass = nn.Conv2d(inplanes, planes, kernel_size=1, stride=stride, padding=0, bias=False)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
out = self.conv1(x)
out = self.bn1(out)
out = self.relu1(out)
out = self.conv2(out)
out = self.bn2(out)
if self.bypass is not None:
outbp = self.bypass(x)
out += outbp
else:
out += x
out = self.relu(out)
return out
class Bottleneck(nn.Module):
def __init__(self, inplanes, planes, stride=1 ):
super(Bottleneck, self).__init__()
# residual path
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.stride = stride
# if stride >1, then we need to subsamble the input
if stride>1:
self.shortcut = nn.Conv2d(inplanes,planes,kernel_size=1,stride=stride,bias=False)
else:
self.shortcut = None
def forward(self, x):
if self.shortcut is None:
bypass = x
else:
bypass = self.shortcut(x)
residual = self.conv1(x)
residual = self.bn1(residual)
residual = self.relu(residual)
residual = self.conv2(residual)
residual = self.bn2(residual)
residual = self.relu(residual)
residual = self.conv3(residual)
residual = self.bn3(residual)
out = bypass+residual
out = self.relu(out)
return out
class PreactivationBlock(nn.Module):
def __init__(self, inplanes, planes, stride=1 ):
super(Preactivation, self).__init__()
# residual path
self.bn1 = nn.BatchNorm2d(inplanes)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.relu2 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
# if stride >1, then we need to subsamble the input
if stride>1:
self.shortcut = nn.Conv2d(inplanes,planes,kernel_size=1,stride=stride,bias=False)
else:
self.shortcut = None
def forward(self, x):
if self.shortcut is None:
bypass = x
else:
bypass = self.shortcut(x)
class DoubleResNet(nn.Module):
def __init__(self,Block,inplanes,planes,stride=1):
super(DoubleResNet,self).__init__()
self.res1 = Block(inplanes,planes,stride)
self.res2 = Block( planes,planes, 1)
def forward(self, x):
out = self.res1(x)
out = self.res2(out)
return out
class ConvTransposeLayer(nn.Module):
def __init__(self,deconv_inplanes,skip_inplanes,deconv_outplanes,res_outplanes):
super(ConvTransposeLayer,self).__init__()
self.deconv = nn.ConvTranspose2d( deconv_inplanes, deconv_outplanes, kernel_size=4, stride=2, padding=1, bias=False )
self.res = DoubleResNet(BasicBlock,deconv_outplanes+skip_inplanes,res_outplanes,stride=1)
def forward(self,x,skip_x):
out = self.deconv(x,output_size=skip_x.size())
# concat skip connections
out = torch.cat( [out,skip_x], 1 )
out = self.res(out)
return out
class LArFlowUResNet(nn.Module):
def __init__(self, num_classes=3, input_channels=3, inplanes=16, showsizes=False, use_visi=True):
self.inplanes =inplanes
super(LArFlowUResNet, self).__init__()
self._showsizes = showsizes # print size at each layer
self.use_visi = use_visi
# Encoder
# stem
# one big stem
self.conv1 = nn.Conv2d(input_channels, self.inplanes, kernel_size=7, stride=1, padding=3, bias=True) # initial conv layer
self.bn1 = nn.BatchNorm2d(self.inplanes)
self.relu1 = nn.ReLU(inplace=True)
self.pool1 = nn.MaxPool2d( 3, stride=2, padding=1 )
self.enc_layer1 = self._make_encoding_layer( self.inplanes*1, self.inplanes*2, stride=1) # 16->32
self.enc_layer2 = self._make_encoding_layer( self.inplanes*2, self.inplanes*4, stride=2) # 32->64
self.enc_layer3 = self._make_encoding_layer( self.inplanes*4, self.inplanes*8, stride=2) # 64->128
self.enc_layer4 = self._make_encoding_layer( self.inplanes*8, self.inplanes*16, stride=2) # 128->256
self.enc_layer5 = self._make_encoding_layer( self.inplanes*16, self.inplanes*32, stride=2) # 256->512
# decoding flow
#self.num_final_flow_features = self.inplanes
self.num_final_flow_features = self.inplanes
self.flow_dec_layer5 = self._make_decoding_layer( self.inplanes*32*2, self.inplanes*16, self.inplanes*16, self.inplanes*16 ) # 512->256
self.flow_dec_layer4 = self._make_decoding_layer( self.inplanes*16, self.inplanes*8, self.inplanes*8, self.inplanes*8 ) # 256->128
self.flow_dec_layer3 = self._make_decoding_layer( self.inplanes*8, self.inplanes*4, self.inplanes*4, self.inplanes*4 ) # 128->64
self.flow_dec_layer2 = self._make_decoding_layer( self.inplanes*4, self.inplanes*2, self.inplanes*2, self.inplanes*2 ) # 64->32
#self.flow_dec_layer1 = self._make_decoding_layer( self.inplanes*2, self.inplanes, self.inplanes ) # 32->16
self.flow_dec_layer1 = self._make_decoding_layer( self.inplanes*2, self.inplanes, self.inplanes, self.num_final_flow_features ) # 32->200
# decoding matchability
if self.use_visi:
self.visi_dec_layer5 = self._make_decoding_layer( self.inplanes*32*2, self.inplanes*16, self.inplanes*16, self.inplanes*16 ) # 512->256
self.visi_dec_layer4 = self._make_decoding_layer( self.inplanes*16, self.inplanes*8, self.inplanes*8, self.inplanes*8 ) # 256->128
self.visi_dec_layer3 = self._make_decoding_layer( self.inplanes*8, self.inplanes*4, self.inplanes*4, self.inplanes*4 ) # 128->64
self.visi_dec_layer2 = self._make_decoding_layer( self.inplanes*4, self.inplanes*2, self.inplanes*2, self.inplanes*2 ) # 64->32
self.visi_dec_layer1 = self._make_decoding_layer( self.inplanes*2, self.inplanes, self.inplanes, self.inplanes ) # 32->16
# 1x1 conv for flow
self.flow_conv = nn.Conv2d( self.num_final_flow_features, 1, kernel_size=1, stride=1, padding=0, bias=True )
# 1x1 conv for mathability
if self.use_visi:
self.visi_conv = nn.Conv2d( self.inplanes, 2, kernel_size=1, stride=1, padding=0, bias=True ) # 2 classes, 0=not vis, 1=vis
self.visi_softmax = nn.LogSoftmax(dim=1)
# initialization
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m,nn.ConvTranspose2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_encoding_layer(self, inplanes, planes, stride=2):
return DoubleResNet(BasicBlock,inplanes,planes,stride=stride)
def _make_decoding_layer(self, inplanes, skipplanes, deconvplanes, resnetplanes ):
return ConvTransposeLayer( inplanes, skipplanes, deconvplanes, resnetplanes )
def encode(self,x):
# stem
x = self.conv1(x)
x = self.bn1(x)
x0 = self.relu1(x)
x = self.pool1(x0)
x1 = self.enc_layer1(x)
x2 = self.enc_layer2(x1)
x3 = self.enc_layer3(x2)
x4 = self.enc_layer4(x3)
x5 = self.enc_layer5(x4)
if self._showsizes:
print "after encoding: "
print " x1: ",x1.size()
print " x2: ",x2.size()
print " x3: ",x3.size()
print " x4: ",x4.size()
print " x5: ",x5.size()
return x5,x0,x1,x2,x3,x4
def flow(self,merged_encode,x0,x1,x2,x3,x4):
""" decoding to flow prediction """
x = self.flow_dec_layer5(merged_encode,x4)
if self._showsizes:
print "after decoding:"
print " dec5: ",x.size()," iscuda=",x.is_cuda
x = self.flow_dec_layer4(x,x3)
if self._showsizes:
print " dec4: ",x.size()," iscuda=",x.is_cuda
x = self.flow_dec_layer3(x,x2)
if self._showsizes:
print " dec3: ",x.size()," iscuda=",x.is_cuda
x = self.flow_dec_layer2(x,x1)
if self._showsizes:
print " dec2: ",x.size()," iscuda=",x.is_cuda
x = self.flow_dec_layer1(x,x0)
if self._showsizes:
print " dec1: ",x.size()," iscuda=",x.is_cuda
return x
def visibility(self,merged_encode,x0,x1,x2,x3,x4):
""" decoding to flow prediction """
x = self.visi_dec_layer5(merged_encode,x4)
if self._showsizes:
print "after decoding:"
print " dec5: ",x.size()," iscuda=",x.is_cuda
x = self.visi_dec_layer4(x,x3)
if self._showsizes:
print " dec4: ",x.size()," iscuda=",x.is_cuda
x = self.visi_dec_layer3(x,x2)
if self._showsizes:
print " dec3: ",x.size()," iscuda=",x.is_cuda
x = self.visi_dec_layer2(x,x1)
if self._showsizes:
print " dec2: ",x.size()," iscuda=",x.is_cuda
x = self.visi_dec_layer1(x,x0)
if self._showsizes:
print " dec1: ",x.size()," iscuda=",x.is_cuda
return x
def forward(self, src, target):
if self._showsizes:
print "input: ",x.size()," is_cuda=",x.is_cuda
src_encode, s0, s1, s2, s3, s4 = self.encode(src)
target_encode, t0, t1, t2, t3, t4 = self.encode(target)
merged_encode = torch.cat( [target_encode,src_encode], 1 )
flowout = self.flow( merged_encode, s0, s1, s2, s3, s4 )
if self.use_visi:
visiout = self.visibility( merged_encode, t0, t1, t2, t3, t4 )
flow_predict = self.flow_conv( flowout )
if self.use_visi:
visi_predict = self.visi_conv( visiout )
visi_predict = self.visi_softmax(visi_predict)
else:
visi_predict = None
if self._showsizes:
print " softmax: ",x.size()
return flow_predict,visi_predict
| en | 0.388715 | ########################################################### # # U-ResNet # U-net witih ResNet modules # # Semantic segmentation network used by MicroBooNE # to label track/shower pixels # # resnet implementation from pytorch.torchvision module # U-net from (cite) # # meant to be copy of caffe version # ########################################################### 3x3 convolution with padding # residual path # if stride >1, then we need to subsamble the input # residual path # if stride >1, then we need to subsamble the input # concat skip connections # print size at each layer # Encoder # stem # one big stem # initial conv layer # 16->32 # 32->64 # 64->128 # 128->256 # 256->512 # decoding flow #self.num_final_flow_features = self.inplanes # 512->256 # 256->128 # 128->64 # 64->32 #self.flow_dec_layer1 = self._make_decoding_layer( self.inplanes*2, self.inplanes, self.inplanes ) # 32->16 # 32->200 # decoding matchability # 512->256 # 256->128 # 128->64 # 64->32 # 32->16 # 1x1 conv for flow # 1x1 conv for mathability # 2 classes, 0=not vis, 1=vis # initialization # stem decoding to flow prediction decoding to flow prediction | 2.536323 | 3 |
examples/machine_reading_comprehension/DuReader-robust/run_du.py | wzzju/PaddleNLP | 3 | 222 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import random
import time
import json
import math
from functools import partial
import numpy as np
import paddle
from paddle.io import DataLoader
from args import parse_args
import paddlenlp as ppnlp
from paddlenlp.data import Pad, Stack, Tuple, Dict
from paddlenlp.transformers import BertForQuestionAnswering, BertTokenizer
from paddlenlp.transformers import ErnieForQuestionAnswering, ErnieTokenizer
from paddlenlp.transformers import ErnieGramForQuestionAnswering, ErnieGramTokenizer
from paddlenlp.transformers import RobertaForQuestionAnswering, RobertaTokenizer
from paddlenlp.transformers import LinearDecayWithWarmup
from paddlenlp.metrics.squad import squad_evaluate, compute_prediction
from paddlenlp.datasets import load_dataset
MODEL_CLASSES = {
"bert": (BertForQuestionAnswering, BertTokenizer),
"ernie": (ErnieForQuestionAnswering, ErnieTokenizer),
"ernie_gram": (ErnieGramForQuestionAnswering, ErnieGramTokenizer),
"roberta": (RobertaForQuestionAnswering, RobertaTokenizer)
}
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
paddle.seed(args.seed)
@paddle.no_grad()
def evaluate(model, data_loader, args):
model.eval()
all_start_logits = []
all_end_logits = []
tic_eval = time.time()
for batch in data_loader:
input_ids, token_type_ids = batch
start_logits_tensor, end_logits_tensor = model(input_ids,
token_type_ids)
for idx in range(start_logits_tensor.shape[0]):
if len(all_start_logits) % 1000 == 0 and len(all_start_logits):
print("Processing example: %d" % len(all_start_logits))
print('time per 1000:', time.time() - tic_eval)
tic_eval = time.time()
all_start_logits.append(start_logits_tensor.numpy()[idx])
all_end_logits.append(end_logits_tensor.numpy()[idx])
all_predictions, _, _ = compute_prediction(
data_loader.dataset.data, data_loader.dataset.new_data,
(all_start_logits, all_end_logits), False, args.n_best_size,
args.max_answer_length)
# Can also write all_nbest_json and scores_diff_json files if needed
with open('prediction.json', "w", encoding='utf-8') as writer:
writer.write(
json.dumps(
all_predictions, ensure_ascii=False, indent=4) + "\n")
squad_evaluate(
examples=data_loader.dataset.data,
preds=all_predictions,
is_whitespace_splited=False)
model.train()
class CrossEntropyLossForSQuAD(paddle.nn.Layer):
def __init__(self):
super(CrossEntropyLossForSQuAD, self).__init__()
def forward(self, y, label):
start_logits, end_logits = y
start_position, end_position = label
start_position = paddle.unsqueeze(start_position, axis=-1)
end_position = paddle.unsqueeze(end_position, axis=-1)
start_loss = paddle.nn.functional.cross_entropy(
input=start_logits, label=start_position)
end_loss = paddle.nn.functional.cross_entropy(
input=end_logits, label=end_position)
loss = (start_loss + end_loss) / 2
return loss
def run(args):
paddle.set_device(args.device)
if paddle.distributed.get_world_size() > 1:
paddle.distributed.init_parallel_env()
rank = paddle.distributed.get_rank()
task_name = args.task_name.lower()
args.model_type = args.model_type.lower()
model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
tokenizer = tokenizer_class.from_pretrained(args.model_name_or_path)
set_seed(args)
if rank == 0:
if os.path.exists(args.model_name_or_path):
print("init checkpoint from %s" % args.model_name_or_path)
model = model_class.from_pretrained(args.model_name_or_path)
if paddle.distributed.get_world_size() > 1:
model = paddle.DataParallel(model)
def prepare_train_features(examples):
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
# NOTE: Almost the same functionality as HuggingFace's prepare_train_features function. The main difference is
# that HugggingFace uses ArrowTable as basic data structure, while we use list of dictionary instead.
contexts = [examples[i]['context'] for i in range(len(examples))]
questions = [examples[i]['question'] for i in range(len(examples))]
tokenized_examples = tokenizer(
questions,
contexts,
stride=args.doc_stride,
max_seq_len=args.max_seq_length)
# Let's label those examples!
for i, tokenized_example in enumerate(tokenized_examples):
# We will label impossible answers with the index of the CLS token.
input_ids = tokenized_example["input_ids"]
cls_index = input_ids.index(tokenizer.cls_token_id)
# The offset mappings will give us a map from token to character position in the original context. This will
# help us compute the start_positions and end_positions.
offsets = tokenized_example['offset_mapping']
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
sequence_ids = tokenized_example['token_type_ids']
# One example can give several spans, this is the index of the example containing this span of text.
sample_index = tokenized_example['overflow_to_sample']
answers = examples[sample_index]['answers']
answer_starts = examples[sample_index]['answer_starts']
# Start/end character index of the answer in the text.
start_char = answer_starts[0]
end_char = start_char + len(answers[0])
# Start token index of the current span in the text.
token_start_index = 0
while sequence_ids[token_start_index] != 1:
token_start_index += 1
# End token index of the current span in the text.
token_end_index = len(input_ids) - 1
while sequence_ids[token_end_index] != 1:
token_end_index -= 1
# Minus one more to reach actual text
token_end_index -= 1
# Detect if the answer is out of the span (in which case this feature is labeled with the CLS index).
if not (offsets[token_start_index][0] <= start_char and
offsets[token_end_index][1] >= end_char):
tokenized_examples[i]["start_positions"] = cls_index
tokenized_examples[i]["end_positions"] = cls_index
else:
# Otherwise move the token_start_index and token_end_index to the two ends of the answer.
# Note: we could go after the last offset if the answer is the last word (edge case).
while token_start_index < len(offsets) and offsets[
token_start_index][0] <= start_char:
token_start_index += 1
tokenized_examples[i]["start_positions"] = token_start_index - 1
while offsets[token_end_index][1] >= end_char:
token_end_index -= 1
tokenized_examples[i]["end_positions"] = token_end_index + 1
return tokenized_examples
if args.do_train:
if args.train_file:
train_ds = load_dataset(task_name, data_files=args.train_file)
else:
train_ds = load_dataset(task_name, splits='train')
train_ds.map(prepare_train_features, batched=True)
train_batch_sampler = paddle.io.DistributedBatchSampler(
train_ds, batch_size=args.batch_size, shuffle=True)
train_batchify_fn = lambda samples, fn=Dict({
"input_ids": Pad(axis=0, pad_val=tokenizer.pad_token_id),
"token_type_ids": Pad(axis=0, pad_val=tokenizer.pad_token_type_id),
"start_positions": Stack(dtype="int64"),
"end_positions": Stack(dtype="int64")
}): fn(samples)
train_data_loader = DataLoader(
dataset=train_ds,
batch_sampler=train_batch_sampler,
collate_fn=train_batchify_fn,
return_list=True)
num_training_steps = args.max_steps if args.max_steps > 0 else len(
train_data_loader) * args.num_train_epochs
num_train_epochs = math.ceil(num_training_steps /
len(train_data_loader))
lr_scheduler = LinearDecayWithWarmup(
args.learning_rate, num_training_steps, args.warmup_proportion)
# Generate parameter names needed to perform weight decay.
# All bias and LayerNorm parameters are excluded.
decay_params = [
p.name for n, p in model.named_parameters()
if not any(nd in n for nd in ["bias", "norm"])
]
optimizer = paddle.optimizer.AdamW(
learning_rate=lr_scheduler,
epsilon=args.adam_epsilon,
parameters=model.parameters(),
weight_decay=args.weight_decay,
apply_decay_param_fun=lambda x: x in decay_params)
criterion = CrossEntropyLossForSQuAD()
global_step = 0
tic_train = time.time()
for epoch in range(num_train_epochs):
for step, batch in enumerate(train_data_loader):
global_step += 1
input_ids, token_type_ids, start_positions, end_positions = batch
logits = model(
input_ids=input_ids, token_type_ids=token_type_ids)
loss = criterion(logits, (start_positions, end_positions))
if global_step % args.logging_steps == 0:
print(
"global step %d, epoch: %d, batch: %d, loss: %f, speed: %.2f step/s"
% (global_step, epoch + 1, step + 1, loss,
args.logging_steps / (time.time() - tic_train)))
tic_train = time.time()
loss.backward()
optimizer.step()
lr_scheduler.step()
optimizer.clear_grad()
if global_step % args.save_steps == 0 or global_step == num_training_steps:
if rank == 0:
output_dir = os.path.join(args.output_dir,
"model_%d" % global_step)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# need better way to get inner model of DataParallel
model_to_save = model._layers if isinstance(
model, paddle.DataParallel) else model
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
print('Saving checkpoint to:', output_dir)
if global_step == num_training_steps:
break
def prepare_validation_features(examples):
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
# NOTE: Almost the same functionality as HuggingFace's prepare_train_features function. The main difference is
# that HugggingFace uses ArrowTable as basic data structure, while we use list of dictionary instead.
contexts = [examples[i]['context'] for i in range(len(examples))]
questions = [examples[i]['question'] for i in range(len(examples))]
tokenized_examples = tokenizer(
questions,
contexts,
stride=args.doc_stride,
max_seq_len=args.max_seq_length)
# For validation, there is no need to compute start and end positions
for i, tokenized_example in enumerate(tokenized_examples):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
sequence_ids = tokenized_example['token_type_ids']
# One example can give several spans, this is the index of the example containing this span of text.
sample_index = tokenized_example['overflow_to_sample']
tokenized_examples[i]["example_id"] = examples[sample_index]['id']
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
tokenized_examples[i]["offset_mapping"] = [
(o if sequence_ids[k] == 1 else None)
for k, o in enumerate(tokenized_example["offset_mapping"])
]
return tokenized_examples
if args.do_predict and rank == 0:
if args.predict_file:
dev_ds = load_dataset(task_name, data_files=args.predict_file)
else:
dev_ds = load_dataset(task_name, splits='dev')
dev_ds.map(prepare_validation_features, batched=True)
dev_batch_sampler = paddle.io.BatchSampler(
dev_ds, batch_size=args.batch_size, shuffle=False)
dev_batchify_fn = lambda samples, fn=Dict({
"input_ids": Pad(axis=0, pad_val=tokenizer.pad_token_id),
"token_type_ids": Pad(axis=0, pad_val=tokenizer.pad_token_type_id)
}): fn(samples)
dev_data_loader = DataLoader(
dataset=dev_ds,
batch_sampler=dev_batch_sampler,
collate_fn=dev_batchify_fn,
return_list=True)
evaluate(model, dev_data_loader, args)
if __name__ == "__main__":
args = parse_args()
run(args)
| # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import random
import time
import json
import math
from functools import partial
import numpy as np
import paddle
from paddle.io import DataLoader
from args import parse_args
import paddlenlp as ppnlp
from paddlenlp.data import Pad, Stack, Tuple, Dict
from paddlenlp.transformers import BertForQuestionAnswering, BertTokenizer
from paddlenlp.transformers import ErnieForQuestionAnswering, ErnieTokenizer
from paddlenlp.transformers import ErnieGramForQuestionAnswering, ErnieGramTokenizer
from paddlenlp.transformers import RobertaForQuestionAnswering, RobertaTokenizer
from paddlenlp.transformers import LinearDecayWithWarmup
from paddlenlp.metrics.squad import squad_evaluate, compute_prediction
from paddlenlp.datasets import load_dataset
MODEL_CLASSES = {
"bert": (BertForQuestionAnswering, BertTokenizer),
"ernie": (ErnieForQuestionAnswering, ErnieTokenizer),
"ernie_gram": (ErnieGramForQuestionAnswering, ErnieGramTokenizer),
"roberta": (RobertaForQuestionAnswering, RobertaTokenizer)
}
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
paddle.seed(args.seed)
@paddle.no_grad()
def evaluate(model, data_loader, args):
model.eval()
all_start_logits = []
all_end_logits = []
tic_eval = time.time()
for batch in data_loader:
input_ids, token_type_ids = batch
start_logits_tensor, end_logits_tensor = model(input_ids,
token_type_ids)
for idx in range(start_logits_tensor.shape[0]):
if len(all_start_logits) % 1000 == 0 and len(all_start_logits):
print("Processing example: %d" % len(all_start_logits))
print('time per 1000:', time.time() - tic_eval)
tic_eval = time.time()
all_start_logits.append(start_logits_tensor.numpy()[idx])
all_end_logits.append(end_logits_tensor.numpy()[idx])
all_predictions, _, _ = compute_prediction(
data_loader.dataset.data, data_loader.dataset.new_data,
(all_start_logits, all_end_logits), False, args.n_best_size,
args.max_answer_length)
# Can also write all_nbest_json and scores_diff_json files if needed
with open('prediction.json', "w", encoding='utf-8') as writer:
writer.write(
json.dumps(
all_predictions, ensure_ascii=False, indent=4) + "\n")
squad_evaluate(
examples=data_loader.dataset.data,
preds=all_predictions,
is_whitespace_splited=False)
model.train()
class CrossEntropyLossForSQuAD(paddle.nn.Layer):
def __init__(self):
super(CrossEntropyLossForSQuAD, self).__init__()
def forward(self, y, label):
start_logits, end_logits = y
start_position, end_position = label
start_position = paddle.unsqueeze(start_position, axis=-1)
end_position = paddle.unsqueeze(end_position, axis=-1)
start_loss = paddle.nn.functional.cross_entropy(
input=start_logits, label=start_position)
end_loss = paddle.nn.functional.cross_entropy(
input=end_logits, label=end_position)
loss = (start_loss + end_loss) / 2
return loss
def run(args):
paddle.set_device(args.device)
if paddle.distributed.get_world_size() > 1:
paddle.distributed.init_parallel_env()
rank = paddle.distributed.get_rank()
task_name = args.task_name.lower()
args.model_type = args.model_type.lower()
model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
tokenizer = tokenizer_class.from_pretrained(args.model_name_or_path)
set_seed(args)
if rank == 0:
if os.path.exists(args.model_name_or_path):
print("init checkpoint from %s" % args.model_name_or_path)
model = model_class.from_pretrained(args.model_name_or_path)
if paddle.distributed.get_world_size() > 1:
model = paddle.DataParallel(model)
def prepare_train_features(examples):
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
# NOTE: Almost the same functionality as HuggingFace's prepare_train_features function. The main difference is
# that HugggingFace uses ArrowTable as basic data structure, while we use list of dictionary instead.
contexts = [examples[i]['context'] for i in range(len(examples))]
questions = [examples[i]['question'] for i in range(len(examples))]
tokenized_examples = tokenizer(
questions,
contexts,
stride=args.doc_stride,
max_seq_len=args.max_seq_length)
# Let's label those examples!
for i, tokenized_example in enumerate(tokenized_examples):
# We will label impossible answers with the index of the CLS token.
input_ids = tokenized_example["input_ids"]
cls_index = input_ids.index(tokenizer.cls_token_id)
# The offset mappings will give us a map from token to character position in the original context. This will
# help us compute the start_positions and end_positions.
offsets = tokenized_example['offset_mapping']
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
sequence_ids = tokenized_example['token_type_ids']
# One example can give several spans, this is the index of the example containing this span of text.
sample_index = tokenized_example['overflow_to_sample']
answers = examples[sample_index]['answers']
answer_starts = examples[sample_index]['answer_starts']
# Start/end character index of the answer in the text.
start_char = answer_starts[0]
end_char = start_char + len(answers[0])
# Start token index of the current span in the text.
token_start_index = 0
while sequence_ids[token_start_index] != 1:
token_start_index += 1
# End token index of the current span in the text.
token_end_index = len(input_ids) - 1
while sequence_ids[token_end_index] != 1:
token_end_index -= 1
# Minus one more to reach actual text
token_end_index -= 1
# Detect if the answer is out of the span (in which case this feature is labeled with the CLS index).
if not (offsets[token_start_index][0] <= start_char and
offsets[token_end_index][1] >= end_char):
tokenized_examples[i]["start_positions"] = cls_index
tokenized_examples[i]["end_positions"] = cls_index
else:
# Otherwise move the token_start_index and token_end_index to the two ends of the answer.
# Note: we could go after the last offset if the answer is the last word (edge case).
while token_start_index < len(offsets) and offsets[
token_start_index][0] <= start_char:
token_start_index += 1
tokenized_examples[i]["start_positions"] = token_start_index - 1
while offsets[token_end_index][1] >= end_char:
token_end_index -= 1
tokenized_examples[i]["end_positions"] = token_end_index + 1
return tokenized_examples
if args.do_train:
if args.train_file:
train_ds = load_dataset(task_name, data_files=args.train_file)
else:
train_ds = load_dataset(task_name, splits='train')
train_ds.map(prepare_train_features, batched=True)
train_batch_sampler = paddle.io.DistributedBatchSampler(
train_ds, batch_size=args.batch_size, shuffle=True)
train_batchify_fn = lambda samples, fn=Dict({
"input_ids": Pad(axis=0, pad_val=tokenizer.pad_token_id),
"token_type_ids": Pad(axis=0, pad_val=tokenizer.pad_token_type_id),
"start_positions": Stack(dtype="int64"),
"end_positions": Stack(dtype="int64")
}): fn(samples)
train_data_loader = DataLoader(
dataset=train_ds,
batch_sampler=train_batch_sampler,
collate_fn=train_batchify_fn,
return_list=True)
num_training_steps = args.max_steps if args.max_steps > 0 else len(
train_data_loader) * args.num_train_epochs
num_train_epochs = math.ceil(num_training_steps /
len(train_data_loader))
lr_scheduler = LinearDecayWithWarmup(
args.learning_rate, num_training_steps, args.warmup_proportion)
# Generate parameter names needed to perform weight decay.
# All bias and LayerNorm parameters are excluded.
decay_params = [
p.name for n, p in model.named_parameters()
if not any(nd in n for nd in ["bias", "norm"])
]
optimizer = paddle.optimizer.AdamW(
learning_rate=lr_scheduler,
epsilon=args.adam_epsilon,
parameters=model.parameters(),
weight_decay=args.weight_decay,
apply_decay_param_fun=lambda x: x in decay_params)
criterion = CrossEntropyLossForSQuAD()
global_step = 0
tic_train = time.time()
for epoch in range(num_train_epochs):
for step, batch in enumerate(train_data_loader):
global_step += 1
input_ids, token_type_ids, start_positions, end_positions = batch
logits = model(
input_ids=input_ids, token_type_ids=token_type_ids)
loss = criterion(logits, (start_positions, end_positions))
if global_step % args.logging_steps == 0:
print(
"global step %d, epoch: %d, batch: %d, loss: %f, speed: %.2f step/s"
% (global_step, epoch + 1, step + 1, loss,
args.logging_steps / (time.time() - tic_train)))
tic_train = time.time()
loss.backward()
optimizer.step()
lr_scheduler.step()
optimizer.clear_grad()
if global_step % args.save_steps == 0 or global_step == num_training_steps:
if rank == 0:
output_dir = os.path.join(args.output_dir,
"model_%d" % global_step)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# need better way to get inner model of DataParallel
model_to_save = model._layers if isinstance(
model, paddle.DataParallel) else model
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
print('Saving checkpoint to:', output_dir)
if global_step == num_training_steps:
break
def prepare_validation_features(examples):
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
# NOTE: Almost the same functionality as HuggingFace's prepare_train_features function. The main difference is
# that HugggingFace uses ArrowTable as basic data structure, while we use list of dictionary instead.
contexts = [examples[i]['context'] for i in range(len(examples))]
questions = [examples[i]['question'] for i in range(len(examples))]
tokenized_examples = tokenizer(
questions,
contexts,
stride=args.doc_stride,
max_seq_len=args.max_seq_length)
# For validation, there is no need to compute start and end positions
for i, tokenized_example in enumerate(tokenized_examples):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
sequence_ids = tokenized_example['token_type_ids']
# One example can give several spans, this is the index of the example containing this span of text.
sample_index = tokenized_example['overflow_to_sample']
tokenized_examples[i]["example_id"] = examples[sample_index]['id']
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
tokenized_examples[i]["offset_mapping"] = [
(o if sequence_ids[k] == 1 else None)
for k, o in enumerate(tokenized_example["offset_mapping"])
]
return tokenized_examples
if args.do_predict and rank == 0:
if args.predict_file:
dev_ds = load_dataset(task_name, data_files=args.predict_file)
else:
dev_ds = load_dataset(task_name, splits='dev')
dev_ds.map(prepare_validation_features, batched=True)
dev_batch_sampler = paddle.io.BatchSampler(
dev_ds, batch_size=args.batch_size, shuffle=False)
dev_batchify_fn = lambda samples, fn=Dict({
"input_ids": Pad(axis=0, pad_val=tokenizer.pad_token_id),
"token_type_ids": Pad(axis=0, pad_val=tokenizer.pad_token_type_id)
}): fn(samples)
dev_data_loader = DataLoader(
dataset=dev_ds,
batch_sampler=dev_batch_sampler,
collate_fn=dev_batchify_fn,
return_list=True)
evaluate(model, dev_data_loader, args)
if __name__ == "__main__":
args = parse_args()
run(args)
| en | 0.883001 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Can also write all_nbest_json and scores_diff_json files if needed # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. # NOTE: Almost the same functionality as HuggingFace's prepare_train_features function. The main difference is # that HugggingFace uses ArrowTable as basic data structure, while we use list of dictionary instead. # Let's label those examples! # We will label impossible answers with the index of the CLS token. # The offset mappings will give us a map from token to character position in the original context. This will # help us compute the start_positions and end_positions. # Grab the sequence corresponding to that example (to know what is the context and what is the question). # One example can give several spans, this is the index of the example containing this span of text. # Start/end character index of the answer in the text. # Start token index of the current span in the text. # End token index of the current span in the text. # Minus one more to reach actual text # Detect if the answer is out of the span (in which case this feature is labeled with the CLS index). # Otherwise move the token_start_index and token_end_index to the two ends of the answer. # Note: we could go after the last offset if the answer is the last word (edge case). # Generate parameter names needed to perform weight decay. # All bias and LayerNorm parameters are excluded. # need better way to get inner model of DataParallel # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. # NOTE: Almost the same functionality as HuggingFace's prepare_train_features function. The main difference is # that HugggingFace uses ArrowTable as basic data structure, while we use list of dictionary instead. # For validation, there is no need to compute start and end positions # Grab the sequence corresponding to that example (to know what is the context and what is the question). # One example can give several spans, this is the index of the example containing this span of text. # Set to None the offset_mapping that are not part of the context so it's easy to determine if a token # position is part of the context or not. | 1.85833 | 2 |
Deep_Q_Network/DQN_for_FrozenLake_Discrete_Domain.py | quangnguyendang/Reinforcement_Learning | 0 | 223 | # Credit to https://medium.com/emergent-future/simple-reinforcement-learning-with-tensorflow-part-0-q-learning-with-tables-and-neural-networks-d195264329d0
import gym
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
env = gym.make('FrozenLake-v0')
# NEURAL NETWORK IMPLEMENTATION
tf.reset_default_graph()
# Feature vector for current state representation
input1 = tf.placeholder(shape=[1, env.observation_space.n], dtype=tf.float32)
# tf.Variable(<initial-value>, name=<optional-name>)
# tf.random_uniform(shape, minval=0, maxval=None, dtype=tf.float32, seed=None, name=None)
# Weighting W vector in range 0 - 0.01 (like the way Andrew Ng did with *0.01
W = tf.Variable(tf.random_uniform([env.observation_space.n, env.action_space.n], 0, 0.01))
# Qout with shape [1, env.action_space.n] - Action state value for Q[s, a] with every a available at a state
Qout = tf.matmul(input1, W)
# Greedy action at a state
predict = tf.argmax(Qout, axis=1)
# Feature vector for next state representation
nextQ = tf.placeholder(shape=[1, env.action_space.n], dtype=tf.float32)
# Entropy loss
loss = tf.reduce_sum(tf.square(Qout - nextQ))
trainer = tf.train.GradientDescentOptimizer(learning_rate=0.1)
updateModel = trainer.minimize(loss)
# TRAIN THE NETWORK
init = tf.global_variables_initializer()
# Set learning parameters
y = 0.99
e = 0.1
number_episodes = 2000
# List to store total rewards and steps per episode
jList = []
rList = []
with tf.Session() as sess:
sess.run(init)
for i in range(number_episodes):
print("Episode #{} is running!".format(i))
# First state
s = env.reset()
rAll = 0
d = False
j = 0
# Q network
while j < 200: # or While not d:
j += 1
# Choose action by epsilon (e) greedy
# print("s = ", s," --> Identity s:s+1: ", np.identity(env.observation_space.n)[s:s+1])
# s = 0 --> Identity s: s + 1: [[1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]]
# s = 1 --> Identity s: s + 1: [[0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]]
# Identity [s:s+1] is a one-hot vector
# Therefore W is the actual Q value
a, allQ = sess.run([predict, Qout], feed_dict={input1: np.identity(env.observation_space.n)[s:s+1]})
if np.random.rand(1) < e:
a[0] = env.action_space.sample()
s1, r, d, _ = env.step(a[0])
# Obtain next state Q value by feeding the new state throughout the network
Q1 = sess.run(Qout, feed_dict={input1: np.identity(env.observation_space.n)[s1:s1+1]})
maxQ1 = np.max(Q1)
targetQ = allQ
targetQ[0, a[0]] = r + y * maxQ1
# Train our network using target and predicted Q values
_, W1 = sess.run([updateModel, W], feed_dict={input1: np.identity(env.observation_space.n)[s:s+1], nextQ: targetQ})
rAll += r
s = s1
if d:
e = 1./((i/50) + 10)
break
jList.append(j)
rList.append(rAll)
env.close()
plt.figure()
plt.plot(rList, label="Return - Q Learning")
plt.show()
plt.figure()
plt.plot(jList, label="Steps - Q Learning")
plt.show()
# -------------------------------------------------------------------------
# TABULAR IMPLEMENTATION
#
# # Set learning parameters
# lr = 0.8
# y = 0.95
# number_episodes = 20000
#
# # Initial table with all zeros
# Q = np.zeros([env.observation_space.n, env.action_space.n])
#
# # List of reward and steps per episode
# rList = []
# for i in range (number_episodes):
# print("Episode #{} is running!".format(i))
# s = env.reset()
# rAll = 0
# d = False
# j = 0
# while j < 99:
# j += 1
# # Choose an action by greedily (with noise) picking from Q table
# # Because of the noise, it is epsilon-greedy with epsilon decreasing over time
# a = np.argmax(Q[s, :] + np.random.rand(1, env.action_space.n)*(1./(i + 1)))
# s1, r, d, _ = env.step(a)
# # env.render()
#
# # Update Q table with new knowledge
# Q[s, a] = Q[s, a] + lr * (r + y * np.max(Q[s1, :]) - Q[s, a])
# rAll += r
# s = s1
# if d:
# break
# rList.append(rAll)
| # Credit to https://medium.com/emergent-future/simple-reinforcement-learning-with-tensorflow-part-0-q-learning-with-tables-and-neural-networks-d195264329d0
import gym
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
env = gym.make('FrozenLake-v0')
# NEURAL NETWORK IMPLEMENTATION
tf.reset_default_graph()
# Feature vector for current state representation
input1 = tf.placeholder(shape=[1, env.observation_space.n], dtype=tf.float32)
# tf.Variable(<initial-value>, name=<optional-name>)
# tf.random_uniform(shape, minval=0, maxval=None, dtype=tf.float32, seed=None, name=None)
# Weighting W vector in range 0 - 0.01 (like the way Andrew Ng did with *0.01
W = tf.Variable(tf.random_uniform([env.observation_space.n, env.action_space.n], 0, 0.01))
# Qout with shape [1, env.action_space.n] - Action state value for Q[s, a] with every a available at a state
Qout = tf.matmul(input1, W)
# Greedy action at a state
predict = tf.argmax(Qout, axis=1)
# Feature vector for next state representation
nextQ = tf.placeholder(shape=[1, env.action_space.n], dtype=tf.float32)
# Entropy loss
loss = tf.reduce_sum(tf.square(Qout - nextQ))
trainer = tf.train.GradientDescentOptimizer(learning_rate=0.1)
updateModel = trainer.minimize(loss)
# TRAIN THE NETWORK
init = tf.global_variables_initializer()
# Set learning parameters
y = 0.99
e = 0.1
number_episodes = 2000
# List to store total rewards and steps per episode
jList = []
rList = []
with tf.Session() as sess:
sess.run(init)
for i in range(number_episodes):
print("Episode #{} is running!".format(i))
# First state
s = env.reset()
rAll = 0
d = False
j = 0
# Q network
while j < 200: # or While not d:
j += 1
# Choose action by epsilon (e) greedy
# print("s = ", s," --> Identity s:s+1: ", np.identity(env.observation_space.n)[s:s+1])
# s = 0 --> Identity s: s + 1: [[1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]]
# s = 1 --> Identity s: s + 1: [[0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]]
# Identity [s:s+1] is a one-hot vector
# Therefore W is the actual Q value
a, allQ = sess.run([predict, Qout], feed_dict={input1: np.identity(env.observation_space.n)[s:s+1]})
if np.random.rand(1) < e:
a[0] = env.action_space.sample()
s1, r, d, _ = env.step(a[0])
# Obtain next state Q value by feeding the new state throughout the network
Q1 = sess.run(Qout, feed_dict={input1: np.identity(env.observation_space.n)[s1:s1+1]})
maxQ1 = np.max(Q1)
targetQ = allQ
targetQ[0, a[0]] = r + y * maxQ1
# Train our network using target and predicted Q values
_, W1 = sess.run([updateModel, W], feed_dict={input1: np.identity(env.observation_space.n)[s:s+1], nextQ: targetQ})
rAll += r
s = s1
if d:
e = 1./((i/50) + 10)
break
jList.append(j)
rList.append(rAll)
env.close()
plt.figure()
plt.plot(rList, label="Return - Q Learning")
plt.show()
plt.figure()
plt.plot(jList, label="Steps - Q Learning")
plt.show()
# -------------------------------------------------------------------------
# TABULAR IMPLEMENTATION
#
# # Set learning parameters
# lr = 0.8
# y = 0.95
# number_episodes = 20000
#
# # Initial table with all zeros
# Q = np.zeros([env.observation_space.n, env.action_space.n])
#
# # List of reward and steps per episode
# rList = []
# for i in range (number_episodes):
# print("Episode #{} is running!".format(i))
# s = env.reset()
# rAll = 0
# d = False
# j = 0
# while j < 99:
# j += 1
# # Choose an action by greedily (with noise) picking from Q table
# # Because of the noise, it is epsilon-greedy with epsilon decreasing over time
# a = np.argmax(Q[s, :] + np.random.rand(1, env.action_space.n)*(1./(i + 1)))
# s1, r, d, _ = env.step(a)
# # env.render()
#
# # Update Q table with new knowledge
# Q[s, a] = Q[s, a] + lr * (r + y * np.max(Q[s1, :]) - Q[s, a])
# rAll += r
# s = s1
# if d:
# break
# rList.append(rAll)
| en | 0.690065 | # Credit to https://medium.com/emergent-future/simple-reinforcement-learning-with-tensorflow-part-0-q-learning-with-tables-and-neural-networks-d195264329d0 # NEURAL NETWORK IMPLEMENTATION # Feature vector for current state representation # tf.Variable(<initial-value>, name=<optional-name>) # tf.random_uniform(shape, minval=0, maxval=None, dtype=tf.float32, seed=None, name=None) # Weighting W vector in range 0 - 0.01 (like the way Andrew Ng did with *0.01 # Qout with shape [1, env.action_space.n] - Action state value for Q[s, a] with every a available at a state # Greedy action at a state # Feature vector for next state representation # Entropy loss # TRAIN THE NETWORK # Set learning parameters # List to store total rewards and steps per episode #{} is running!".format(i)) # First state # Q network # or While not d: # Choose action by epsilon (e) greedy # print("s = ", s," --> Identity s:s+1: ", np.identity(env.observation_space.n)[s:s+1]) # s = 0 --> Identity s: s + 1: [[1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]] # s = 1 --> Identity s: s + 1: [[0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]] # Identity [s:s+1] is a one-hot vector # Therefore W is the actual Q value # Obtain next state Q value by feeding the new state throughout the network # Train our network using target and predicted Q values # ------------------------------------------------------------------------- # TABULAR IMPLEMENTATION # # # Set learning parameters # lr = 0.8 # y = 0.95 # number_episodes = 20000 # # # Initial table with all zeros # Q = np.zeros([env.observation_space.n, env.action_space.n]) # # # List of reward and steps per episode # rList = [] # for i in range (number_episodes): # print("Episode #{} is running!".format(i)) # s = env.reset() # rAll = 0 # d = False # j = 0 # while j < 99: # j += 1 # # Choose an action by greedily (with noise) picking from Q table # # Because of the noise, it is epsilon-greedy with epsilon decreasing over time # a = np.argmax(Q[s, :] + np.random.rand(1, env.action_space.n)*(1./(i + 1))) # s1, r, d, _ = env.step(a) # # env.render() # # # Update Q table with new knowledge # Q[s, a] = Q[s, a] + lr * (r + y * np.max(Q[s1, :]) - Q[s, a]) # rAll += r # s = s1 # if d: # break # rList.append(rAll) | 2.813866 | 3 |
testing/cross_language/util/supported_key_types.py | chanced/tink | 0 | 224 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""All KeyTypes and which languages support them."""
# Placeholder for import for type annotations
from tink import aead
from tink import daead
from tink import hybrid
from tink import mac
from tink import prf
from tink import signature
from tink import streaming_aead
from tink.proto import tink_pb2
# All languages supported by cross-language tests.
ALL_LANGUAGES = ['cc', 'java', 'go', 'python']
# All KeyTypes (without the prefix 'type.googleapis.com/google.crypto.tink.')
AEAD_KEY_TYPES = [
'AesEaxKey',
'AesGcmKey',
'AesGcmSivKey',
'AesCtrHmacAeadKey',
'ChaCha20Poly1305Key',
'XChaCha20Poly1305Key',
]
DAEAD_KEY_TYPES = ['AesSivKey']
STREAMING_AEAD_KEY_TYPES = [
'AesCtrHmacStreamingKey',
'AesGcmHkdfStreamingKey',
]
HYBRID_PRIVATE_KEY_TYPES = ['EciesAeadHkdfPrivateKey']
MAC_KEY_TYPES = [
'AesCmacKey',
'HmacKey',
]
SIGNATURE_KEY_TYPES = [
'EcdsaPrivateKey',
'Ed25519PrivateKey',
'RsaSsaPkcs1PrivateKey',
'RsaSsaPssPrivateKey',
]
PRF_KEY_TYPES = [
'AesCmacPrfKey',
'HmacPrfKey',
'HkdfPrfKey',
]
ALL_KEY_TYPES = (
AEAD_KEY_TYPES + DAEAD_KEY_TYPES + STREAMING_AEAD_KEY_TYPES +
HYBRID_PRIVATE_KEY_TYPES + MAC_KEY_TYPES + SIGNATURE_KEY_TYPES +
PRF_KEY_TYPES)
# All languages that are supported by a KeyType
SUPPORTED_LANGUAGES = {
'AesEaxKey': ['cc', 'java', 'python'],
'AesGcmKey': ['cc', 'java', 'go', 'python'],
'AesGcmSivKey': ['cc', 'python'],
'AesCtrHmacAeadKey': ['cc', 'java', 'go', 'python'],
'ChaCha20Poly1305Key': ['java', 'go'],
'XChaCha20Poly1305Key': ['cc', 'java', 'go', 'python'],
'AesSivKey': ['cc', 'java', 'go', 'python'],
'AesCtrHmacStreamingKey': ['cc', 'java', 'go', 'python'],
'AesGcmHkdfStreamingKey': ['cc', 'java', 'go', 'python'],
'EciesAeadHkdfPrivateKey': ['cc', 'java', 'go', 'python'],
'AesCmacKey': ['cc', 'java', 'go', 'python'],
'HmacKey': ['cc', 'java', 'go', 'python'],
'EcdsaPrivateKey': ['cc', 'java', 'go', 'python'],
'Ed25519PrivateKey': ['cc', 'java', 'go', 'python'],
'RsaSsaPkcs1PrivateKey': ['cc', 'java', 'python'],
'RsaSsaPssPrivateKey': ['cc', 'java', 'python'],
'AesCmacPrfKey': ['cc', 'java', 'go', 'python'],
'HmacPrfKey': ['cc', 'java', 'go', 'python'],
'HkdfPrfKey': ['cc', 'java', 'go', 'python'],
}
KEY_TYPE_FROM_URL = {
'type.googleapis.com/google.crypto.tink.' + key_type: key_type
for key_type in ALL_KEY_TYPES}
# For each KeyType, a list of all KeyTemplate Names that must be supported.
KEY_TEMPLATE_NAMES = {
'AesEaxKey': ['AES128_EAX', 'AES256_EAX'],
'AesGcmKey': ['AES128_GCM', 'AES256_GCM'],
'AesGcmSivKey': ['AES128_GCM_SIV', 'AES256_GCM_SIV'],
'AesCtrHmacAeadKey': ['AES128_CTR_HMAC_SHA256', 'AES256_CTR_HMAC_SHA256'],
'ChaCha20Poly1305Key': ['CHACHA20_POLY1305'],
'XChaCha20Poly1305Key': ['XCHACHA20_POLY1305'],
'AesSivKey': ['AES256_SIV'],
'AesCtrHmacStreamingKey': [
'AES128_CTR_HMAC_SHA256_4KB',
'AES256_CTR_HMAC_SHA256_4KB',
],
'AesGcmHkdfStreamingKey': [
'AES128_GCM_HKDF_4KB',
'AES256_GCM_HKDF_4KB',
'AES256_GCM_HKDF_1MB',
],
'EciesAeadHkdfPrivateKey': [
'ECIES_P256_HKDF_HMAC_SHA256_AES128_GCM',
'ECIES_P256_HKDF_HMAC_SHA256_AES128_CTR_HMAC_SHA256'
],
'AesCmacKey': ['AES_CMAC'],
'HmacKey': [
'HMAC_SHA256_128BITTAG', 'HMAC_SHA256_256BITTAG',
'HMAC_SHA512_256BITTAG', 'HMAC_SHA512_512BITTAG'
],
'EcdsaPrivateKey': [
'ECDSA_P256', 'ECDSA_P384', 'ECDSA_P384_SHA384', 'ECDSA_P521',
'ECDSA_P256_IEEE_P1363', 'ECDSA_P384_IEEE_P1363',
'ECDSA_P384_SHA384_IEEE_P1363', 'ECDSA_P521_IEEE_P1363'
],
'Ed25519PrivateKey': ['ED25519'],
'RsaSsaPkcs1PrivateKey': [
'RSA_SSA_PKCS1_3072_SHA256_F4', 'RSA_SSA_PKCS1_4096_SHA512_F4'
],
'RsaSsaPssPrivateKey': [
'RSA_SSA_PSS_3072_SHA256_SHA256_32_F4',
'RSA_SSA_PSS_4096_SHA512_SHA512_64_F4'
],
'AesCmacPrfKey': ['AES_CMAC_PRF'],
'HmacPrfKey': ['HMAC_PRF_SHA256', 'HMAC_PRF_SHA512'],
'HkdfPrfKey': ['<KEY>'],
}
# KeyTemplate (as Protobuf) for each KeyTemplate name.
KEY_TEMPLATE = {
'AES128_EAX':
aead.aead_key_templates.AES128_EAX,
'AES256_EAX':
aead.aead_key_templates.AES256_EAX,
'AES128_GCM':
aead.aead_key_templates.AES128_GCM,
'AES256_GCM':
aead.aead_key_templates.AES256_GCM,
'AES128_GCM_SIV':
aead.aead_key_templates.AES128_GCM_SIV,
'AES256_GCM_SIV':
aead.aead_key_templates.AES256_GCM_SIV,
'AES128_CTR_HMAC_SHA256':
aead.aead_key_templates.AES128_CTR_HMAC_SHA256,
'AES256_CTR_HMAC_SHA256':
aead.aead_key_templates.AES256_CTR_HMAC_SHA256,
'CHACHA20_POLY1305':
tink_pb2.KeyTemplate(
type_url=('type.googleapis.com/google.crypto.tink.' +
'ChaCha20Poly1305Key'),
output_prefix_type=tink_pb2.TINK),
'XCHACHA20_POLY1305':
aead.aead_key_templates.XCHACHA20_POLY1305,
'AES256_SIV':
daead.deterministic_aead_key_templates.AES256_SIV,
'AES128_CTR_HMAC_SHA256_4KB':
streaming_aead.streaming_aead_key_templates.AES128_CTR_HMAC_SHA256_4KB,
'AES256_CTR_HMAC_SHA256_4KB':
streaming_aead.streaming_aead_key_templates.AES256_CTR_HMAC_SHA256_4KB,
'AES128_GCM_HKDF_4KB':
streaming_aead.streaming_aead_key_templates.AES128_GCM_HKDF_4KB,
'AES256_GCM_HKDF_4KB':
streaming_aead.streaming_aead_key_templates.AES256_GCM_HKDF_4KB,
'AES256_GCM_HKDF_1MB':
streaming_aead.streaming_aead_key_templates.AES256_GCM_HKDF_1MB,
'ECIES_P256_HKDF_HMAC_SHA256_AES128_GCM':
hybrid.hybrid_key_templates.ECIES_P256_HKDF_HMAC_SHA256_AES128_GCM,
'ECIES_P256_HKDF_HMAC_SHA256_AES128_CTR_HMAC_SHA256':
hybrid.hybrid_key_templates
.ECIES_P256_HKDF_HMAC_SHA256_AES128_CTR_HMAC_SHA256,
'AES_CMAC':
mac.mac_key_templates.AES_CMAC,
'HMAC_SHA256_128BITTAG':
mac.mac_key_templates.HMAC_SHA256_128BITTAG,
'HMAC_SHA256_256BITTAG':
mac.mac_key_templates.HMAC_SHA256_256BITTAG,
'HMAC_SHA512_256BITTAG':
mac.mac_key_templates.HMAC_SHA512_256BITTAG,
'HMAC_SHA512_512BITTAG':
mac.mac_key_templates.HMAC_SHA512_512BITTAG,
'ECDSA_P256':
signature.signature_key_templates.ECDSA_P256,
'ECDSA_P384':
signature.signature_key_templates.ECDSA_P384,
'ECDSA_P384_SHA384':
signature.signature_key_templates.ECDSA_P384_SHA384,
'ECDSA_P521':
signature.signature_key_templates.ECDSA_P521,
'ECDSA_P256_IEEE_P1363':
signature.signature_key_templates.ECDSA_P256_IEEE_P1363,
'ECDSA_P384_IEEE_P1363':
signature.signature_key_templates.ECDSA_P384_IEEE_P1363,
'ECDSA_P384_SHA384_IEEE_P1363':
signature.signature_key_templates.ECDSA_P384_SHA384_IEEE_P1363,
'ECDSA_P521_IEEE_P1363':
signature.signature_key_templates.ECDSA_P521_IEEE_P1363,
'ED25519':
signature.signature_key_templates.ED25519,
'RSA_SSA_PKCS1_3072_SHA256_F4':
signature.signature_key_templates.RSA_SSA_PKCS1_3072_SHA256_F4,
'RSA_SSA_PKCS1_4096_SHA512_F4':
signature.signature_key_templates.RSA_SSA_PKCS1_4096_SHA512_F4,
'RSA_SSA_PSS_3072_SHA256_SHA256_32_F4':
signature.signature_key_templates.RSA_SSA_PSS_3072_SHA256_SHA256_32_F4,
'RSA_SSA_PSS_4096_SHA512_SHA512_64_F4':
signature.signature_key_templates.RSA_SSA_PSS_4096_SHA512_SHA512_64_F4,
'AES_CMAC_PRF':
prf.prf_key_templates.AES_CMAC,
'HMAC_PRF_SHA256':
prf.prf_key_templates.HMAC_SHA256,
'HMAC_PRF_SHA512':
prf.prf_key_templates.HMAC_SHA512,
'HKDF_PRF_SHA256':
prf.prf_key_templates.HKDF_SHA256,
}
SUPPORTED_LANGUAGES_BY_TEMPLATE_NAME = {
name: SUPPORTED_LANGUAGES[KEY_TYPE_FROM_URL[template.type_url]]
for name, template in KEY_TEMPLATE.items()
}
| # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""All KeyTypes and which languages support them."""
# Placeholder for import for type annotations
from tink import aead
from tink import daead
from tink import hybrid
from tink import mac
from tink import prf
from tink import signature
from tink import streaming_aead
from tink.proto import tink_pb2
# All languages supported by cross-language tests.
ALL_LANGUAGES = ['cc', 'java', 'go', 'python']
# All KeyTypes (without the prefix 'type.googleapis.com/google.crypto.tink.')
AEAD_KEY_TYPES = [
'AesEaxKey',
'AesGcmKey',
'AesGcmSivKey',
'AesCtrHmacAeadKey',
'ChaCha20Poly1305Key',
'XChaCha20Poly1305Key',
]
DAEAD_KEY_TYPES = ['AesSivKey']
STREAMING_AEAD_KEY_TYPES = [
'AesCtrHmacStreamingKey',
'AesGcmHkdfStreamingKey',
]
HYBRID_PRIVATE_KEY_TYPES = ['EciesAeadHkdfPrivateKey']
MAC_KEY_TYPES = [
'AesCmacKey',
'HmacKey',
]
SIGNATURE_KEY_TYPES = [
'EcdsaPrivateKey',
'Ed25519PrivateKey',
'RsaSsaPkcs1PrivateKey',
'RsaSsaPssPrivateKey',
]
PRF_KEY_TYPES = [
'AesCmacPrfKey',
'HmacPrfKey',
'HkdfPrfKey',
]
ALL_KEY_TYPES = (
AEAD_KEY_TYPES + DAEAD_KEY_TYPES + STREAMING_AEAD_KEY_TYPES +
HYBRID_PRIVATE_KEY_TYPES + MAC_KEY_TYPES + SIGNATURE_KEY_TYPES +
PRF_KEY_TYPES)
# All languages that are supported by a KeyType
SUPPORTED_LANGUAGES = {
'AesEaxKey': ['cc', 'java', 'python'],
'AesGcmKey': ['cc', 'java', 'go', 'python'],
'AesGcmSivKey': ['cc', 'python'],
'AesCtrHmacAeadKey': ['cc', 'java', 'go', 'python'],
'ChaCha20Poly1305Key': ['java', 'go'],
'XChaCha20Poly1305Key': ['cc', 'java', 'go', 'python'],
'AesSivKey': ['cc', 'java', 'go', 'python'],
'AesCtrHmacStreamingKey': ['cc', 'java', 'go', 'python'],
'AesGcmHkdfStreamingKey': ['cc', 'java', 'go', 'python'],
'EciesAeadHkdfPrivateKey': ['cc', 'java', 'go', 'python'],
'AesCmacKey': ['cc', 'java', 'go', 'python'],
'HmacKey': ['cc', 'java', 'go', 'python'],
'EcdsaPrivateKey': ['cc', 'java', 'go', 'python'],
'Ed25519PrivateKey': ['cc', 'java', 'go', 'python'],
'RsaSsaPkcs1PrivateKey': ['cc', 'java', 'python'],
'RsaSsaPssPrivateKey': ['cc', 'java', 'python'],
'AesCmacPrfKey': ['cc', 'java', 'go', 'python'],
'HmacPrfKey': ['cc', 'java', 'go', 'python'],
'HkdfPrfKey': ['cc', 'java', 'go', 'python'],
}
KEY_TYPE_FROM_URL = {
'type.googleapis.com/google.crypto.tink.' + key_type: key_type
for key_type in ALL_KEY_TYPES}
# For each KeyType, a list of all KeyTemplate Names that must be supported.
KEY_TEMPLATE_NAMES = {
'AesEaxKey': ['AES128_EAX', 'AES256_EAX'],
'AesGcmKey': ['AES128_GCM', 'AES256_GCM'],
'AesGcmSivKey': ['AES128_GCM_SIV', 'AES256_GCM_SIV'],
'AesCtrHmacAeadKey': ['AES128_CTR_HMAC_SHA256', 'AES256_CTR_HMAC_SHA256'],
'ChaCha20Poly1305Key': ['CHACHA20_POLY1305'],
'XChaCha20Poly1305Key': ['XCHACHA20_POLY1305'],
'AesSivKey': ['AES256_SIV'],
'AesCtrHmacStreamingKey': [
'AES128_CTR_HMAC_SHA256_4KB',
'AES256_CTR_HMAC_SHA256_4KB',
],
'AesGcmHkdfStreamingKey': [
'AES128_GCM_HKDF_4KB',
'AES256_GCM_HKDF_4KB',
'AES256_GCM_HKDF_1MB',
],
'EciesAeadHkdfPrivateKey': [
'ECIES_P256_HKDF_HMAC_SHA256_AES128_GCM',
'ECIES_P256_HKDF_HMAC_SHA256_AES128_CTR_HMAC_SHA256'
],
'AesCmacKey': ['AES_CMAC'],
'HmacKey': [
'HMAC_SHA256_128BITTAG', 'HMAC_SHA256_256BITTAG',
'HMAC_SHA512_256BITTAG', 'HMAC_SHA512_512BITTAG'
],
'EcdsaPrivateKey': [
'ECDSA_P256', 'ECDSA_P384', 'ECDSA_P384_SHA384', 'ECDSA_P521',
'ECDSA_P256_IEEE_P1363', 'ECDSA_P384_IEEE_P1363',
'ECDSA_P384_SHA384_IEEE_P1363', 'ECDSA_P521_IEEE_P1363'
],
'Ed25519PrivateKey': ['ED25519'],
'RsaSsaPkcs1PrivateKey': [
'RSA_SSA_PKCS1_3072_SHA256_F4', 'RSA_SSA_PKCS1_4096_SHA512_F4'
],
'RsaSsaPssPrivateKey': [
'RSA_SSA_PSS_3072_SHA256_SHA256_32_F4',
'RSA_SSA_PSS_4096_SHA512_SHA512_64_F4'
],
'AesCmacPrfKey': ['AES_CMAC_PRF'],
'HmacPrfKey': ['HMAC_PRF_SHA256', 'HMAC_PRF_SHA512'],
'HkdfPrfKey': ['<KEY>'],
}
# KeyTemplate (as Protobuf) for each KeyTemplate name.
KEY_TEMPLATE = {
'AES128_EAX':
aead.aead_key_templates.AES128_EAX,
'AES256_EAX':
aead.aead_key_templates.AES256_EAX,
'AES128_GCM':
aead.aead_key_templates.AES128_GCM,
'AES256_GCM':
aead.aead_key_templates.AES256_GCM,
'AES128_GCM_SIV':
aead.aead_key_templates.AES128_GCM_SIV,
'AES256_GCM_SIV':
aead.aead_key_templates.AES256_GCM_SIV,
'AES128_CTR_HMAC_SHA256':
aead.aead_key_templates.AES128_CTR_HMAC_SHA256,
'AES256_CTR_HMAC_SHA256':
aead.aead_key_templates.AES256_CTR_HMAC_SHA256,
'CHACHA20_POLY1305':
tink_pb2.KeyTemplate(
type_url=('type.googleapis.com/google.crypto.tink.' +
'ChaCha20Poly1305Key'),
output_prefix_type=tink_pb2.TINK),
'XCHACHA20_POLY1305':
aead.aead_key_templates.XCHACHA20_POLY1305,
'AES256_SIV':
daead.deterministic_aead_key_templates.AES256_SIV,
'AES128_CTR_HMAC_SHA256_4KB':
streaming_aead.streaming_aead_key_templates.AES128_CTR_HMAC_SHA256_4KB,
'AES256_CTR_HMAC_SHA256_4KB':
streaming_aead.streaming_aead_key_templates.AES256_CTR_HMAC_SHA256_4KB,
'AES128_GCM_HKDF_4KB':
streaming_aead.streaming_aead_key_templates.AES128_GCM_HKDF_4KB,
'AES256_GCM_HKDF_4KB':
streaming_aead.streaming_aead_key_templates.AES256_GCM_HKDF_4KB,
'AES256_GCM_HKDF_1MB':
streaming_aead.streaming_aead_key_templates.AES256_GCM_HKDF_1MB,
'ECIES_P256_HKDF_HMAC_SHA256_AES128_GCM':
hybrid.hybrid_key_templates.ECIES_P256_HKDF_HMAC_SHA256_AES128_GCM,
'ECIES_P256_HKDF_HMAC_SHA256_AES128_CTR_HMAC_SHA256':
hybrid.hybrid_key_templates
.ECIES_P256_HKDF_HMAC_SHA256_AES128_CTR_HMAC_SHA256,
'AES_CMAC':
mac.mac_key_templates.AES_CMAC,
'HMAC_SHA256_128BITTAG':
mac.mac_key_templates.HMAC_SHA256_128BITTAG,
'HMAC_SHA256_256BITTAG':
mac.mac_key_templates.HMAC_SHA256_256BITTAG,
'HMAC_SHA512_256BITTAG':
mac.mac_key_templates.HMAC_SHA512_256BITTAG,
'HMAC_SHA512_512BITTAG':
mac.mac_key_templates.HMAC_SHA512_512BITTAG,
'ECDSA_P256':
signature.signature_key_templates.ECDSA_P256,
'ECDSA_P384':
signature.signature_key_templates.ECDSA_P384,
'ECDSA_P384_SHA384':
signature.signature_key_templates.ECDSA_P384_SHA384,
'ECDSA_P521':
signature.signature_key_templates.ECDSA_P521,
'ECDSA_P256_IEEE_P1363':
signature.signature_key_templates.ECDSA_P256_IEEE_P1363,
'ECDSA_P384_IEEE_P1363':
signature.signature_key_templates.ECDSA_P384_IEEE_P1363,
'ECDSA_P384_SHA384_IEEE_P1363':
signature.signature_key_templates.ECDSA_P384_SHA384_IEEE_P1363,
'ECDSA_P521_IEEE_P1363':
signature.signature_key_templates.ECDSA_P521_IEEE_P1363,
'ED25519':
signature.signature_key_templates.ED25519,
'RSA_SSA_PKCS1_3072_SHA256_F4':
signature.signature_key_templates.RSA_SSA_PKCS1_3072_SHA256_F4,
'RSA_SSA_PKCS1_4096_SHA512_F4':
signature.signature_key_templates.RSA_SSA_PKCS1_4096_SHA512_F4,
'RSA_SSA_PSS_3072_SHA256_SHA256_32_F4':
signature.signature_key_templates.RSA_SSA_PSS_3072_SHA256_SHA256_32_F4,
'RSA_SSA_PSS_4096_SHA512_SHA512_64_F4':
signature.signature_key_templates.RSA_SSA_PSS_4096_SHA512_SHA512_64_F4,
'AES_CMAC_PRF':
prf.prf_key_templates.AES_CMAC,
'HMAC_PRF_SHA256':
prf.prf_key_templates.HMAC_SHA256,
'HMAC_PRF_SHA512':
prf.prf_key_templates.HMAC_SHA512,
'HKDF_PRF_SHA256':
prf.prf_key_templates.HKDF_SHA256,
}
SUPPORTED_LANGUAGES_BY_TEMPLATE_NAME = {
name: SUPPORTED_LANGUAGES[KEY_TYPE_FROM_URL[template.type_url]]
for name, template in KEY_TEMPLATE.items()
}
| en | 0.845781 | # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. All KeyTypes and which languages support them. # Placeholder for import for type annotations # All languages supported by cross-language tests. # All KeyTypes (without the prefix 'type.googleapis.com/google.crypto.tink.') # All languages that are supported by a KeyType # For each KeyType, a list of all KeyTemplate Names that must be supported. # KeyTemplate (as Protobuf) for each KeyTemplate name. | 1.533725 | 2 |
signer.py | chapeltech/remote-signer | 39 | 225 | #!/usr/bin/env python3
#########################################################
# Written by <NAME>, <EMAIL>
# Copyright (c) 2018 Blockscale LLC
# released under the MIT license
#########################################################
from flask import Flask, request, Response, json, jsonify
from src.remote_signer import RemoteSigner
from os import path
import logging
logging.basicConfig(filename='./remote-signer.log', format='%(asctime)s %(message)s', level=logging.INFO)
app = Flask(__name__)
# sample config used for testing
config = {
'hsm_username': 'resigner',
'hsm_slot': 1,
'hsm_lib': '/opt/cloudhsm/lib/libcloudhsm_pkcs11.so',
'node_addr': 'http://node.internal:8732',
'keys': {
'<KEY>': {
'public_key': '<KEY>',
'private_handle': 7,
'public_handle': 9
}
}
}
logging.info('Opening keys.json')
if path.isfile('keys.json'):
logging.info('Found keys.json')
with open('keys.json', 'r') as myfile:
json_blob = myfile.read().replace('\n', '')
logging.info('Parsed keys.json successfully as JSON')
config = json.loads(json_blob)
logging.info('Config contains: {}'.format(json.dumps(config, indent=2)))
@app.route('/keys/<key_hash>', methods=['POST'])
def sign(key_hash):
response = None
try:
data = request.get_json(force=True)
if key_hash in config['keys']:
logging.info('Found key_hash {} in config'.format(key_hash))
key = config['keys'][key_hash]
logging.info('Attempting to sign {}'.format(data))
rs = RemoteSigner(config, data)
response = jsonify({
'signature': rs.sign(key['private_handle'])
})
logging.info('Response is {}'.format(response))
else:
logging.warning("Couldn't find key {}".format(key_hash))
response = Response('Key not found', status=404)
except Exception as e:
data = {'error': str(e)}
logging.error('Exception thrown during request: {}'.format(str(e)))
response = app.response_class(
response=json.dumps(data),
status=500,
mimetype='application/json'
)
logging.info('Returning flask response {}'.format(response))
return response
@app.route('/keys/<key_hash>', methods=['GET'])
def get_public_key(key_hash):
response = None
try:
if key_hash in config['keys']:
key = config['keys'][key_hash]
response = jsonify({
'public_key': key['public_key']
})
logging.info('Found public key {} for key hash {}'.format(key['public_key'], key_hash))
else:
logging.warning("Couldn't public key for key hash {}".format(key_hash))
response = Response('Key not found', status=404)
except Exception as e:
data = {'error': str(e)}
logging.error('Exception thrown during request: {}'.format(str(e)))
response = app.response_class(
response=json.dumps(data),
status=500,
mimetype='application/json'
)
logging.info('Returning flask response {}'.format(response))
return response
@app.route('/authorized_keys', methods=['GET'])
def authorized_keys():
return app.response_class(
response=json.dumps({}),
status=200,
mimetype='application/json'
)
if __name__ == '__main__':
app.run(host='127.0.0.1', port=5000, debug=True)
| #!/usr/bin/env python3
#########################################################
# Written by <NAME>, <EMAIL>
# Copyright (c) 2018 Blockscale LLC
# released under the MIT license
#########################################################
from flask import Flask, request, Response, json, jsonify
from src.remote_signer import RemoteSigner
from os import path
import logging
logging.basicConfig(filename='./remote-signer.log', format='%(asctime)s %(message)s', level=logging.INFO)
app = Flask(__name__)
# sample config used for testing
config = {
'hsm_username': 'resigner',
'hsm_slot': 1,
'hsm_lib': '/opt/cloudhsm/lib/libcloudhsm_pkcs11.so',
'node_addr': 'http://node.internal:8732',
'keys': {
'<KEY>': {
'public_key': '<KEY>',
'private_handle': 7,
'public_handle': 9
}
}
}
logging.info('Opening keys.json')
if path.isfile('keys.json'):
logging.info('Found keys.json')
with open('keys.json', 'r') as myfile:
json_blob = myfile.read().replace('\n', '')
logging.info('Parsed keys.json successfully as JSON')
config = json.loads(json_blob)
logging.info('Config contains: {}'.format(json.dumps(config, indent=2)))
@app.route('/keys/<key_hash>', methods=['POST'])
def sign(key_hash):
response = None
try:
data = request.get_json(force=True)
if key_hash in config['keys']:
logging.info('Found key_hash {} in config'.format(key_hash))
key = config['keys'][key_hash]
logging.info('Attempting to sign {}'.format(data))
rs = RemoteSigner(config, data)
response = jsonify({
'signature': rs.sign(key['private_handle'])
})
logging.info('Response is {}'.format(response))
else:
logging.warning("Couldn't find key {}".format(key_hash))
response = Response('Key not found', status=404)
except Exception as e:
data = {'error': str(e)}
logging.error('Exception thrown during request: {}'.format(str(e)))
response = app.response_class(
response=json.dumps(data),
status=500,
mimetype='application/json'
)
logging.info('Returning flask response {}'.format(response))
return response
@app.route('/keys/<key_hash>', methods=['GET'])
def get_public_key(key_hash):
response = None
try:
if key_hash in config['keys']:
key = config['keys'][key_hash]
response = jsonify({
'public_key': key['public_key']
})
logging.info('Found public key {} for key hash {}'.format(key['public_key'], key_hash))
else:
logging.warning("Couldn't public key for key hash {}".format(key_hash))
response = Response('Key not found', status=404)
except Exception as e:
data = {'error': str(e)}
logging.error('Exception thrown during request: {}'.format(str(e)))
response = app.response_class(
response=json.dumps(data),
status=500,
mimetype='application/json'
)
logging.info('Returning flask response {}'.format(response))
return response
@app.route('/authorized_keys', methods=['GET'])
def authorized_keys():
return app.response_class(
response=json.dumps({}),
status=200,
mimetype='application/json'
)
if __name__ == '__main__':
app.run(host='127.0.0.1', port=5000, debug=True)
| de | 0.451115 | #!/usr/bin/env python3 ######################################################### # Written by <NAME>, <EMAIL> # Copyright (c) 2018 Blockscale LLC # released under the MIT license ######################################################### # sample config used for testing | 1.909194 | 2 |
unwind.py | 0x1F9F1/binja-msvc | 9 | 226 | from binaryninja import log
from .utils import BinjaStruct, read_pe_header, split_bits, update_percentage
# https://msdn.microsoft.com/en-us/library/ft9x1kdx.aspx
RUNTIME_FUNCTION_t = BinjaStruct('<III', names = ('BeginAddress', 'EndAddress', 'UnwindData'))
def read_runtime_function(view, address):
runtime_function, address = RUNTIME_FUNCTION_t.read(view, address, 4)
if runtime_function is not None:
runtime_function['BeginAddress'] += view.start
runtime_function['EndAddress'] += view.start
runtime_function['UnwindData'] += view.start
return runtime_function, address
UNWIND_INFO_t = BinjaStruct('<BBBB', names = ('VersionAndFlags', 'SizeOfProlog', 'CountOfCodes', 'FrameRegisterAndOffset'))
UNW_FLAG_NHANDLER = 0x0
UNW_FLAG_EHANDLER = 0x1
UNW_FLAG_UHANDLER = 0x2
UNW_FLAG_FHANDLER = 0x3
UNW_FLAG_CHAININFO = 0x4
def read_unwind_info(view, address):
unwind_info, address = UNWIND_INFO_t.read(view, address)
if unwind_info is not None:
split_bits(unwind_info, 'VersionAndFlags', [
('Version', 0, 3),
('Flags', 3, 5)
])
split_bits(unwind_info, 'FrameRegisterAndOffset', [
('FrameRegister', 0, 4),
('FrameOffset', 4, 4)
])
if unwind_info['Version'] == 1:
unwind_codes = [ ]
for i in range(unwind_info['CountOfCodes']):
unwind_code, address = read_unwind_code(view, address)
unwind_codes.append(unwind_code)
unwind_info['UnwindCodes'] = unwind_codes
if unwind_info['Flags'] & UNW_FLAG_CHAININFO:
unwind_info['FunctionEntry'], address = read_runtime_function(view, address)
return unwind_info, address
UNWIND_CODE_t = BinjaStruct('<BB', names = ('CodeOffset', 'UnwindOpAndInfo'))
def read_unwind_code(view, address):
unwind_code, address = UNWIND_CODE_t.read(view, address)
if unwind_code is not None:
split_bits(unwind_code, 'UnwindOpAndInfo', [
('UnwindOp', 0, 4),
('OpInfo', 4, 4)
])
return unwind_code, address
def parse_unwind_info(thread, view):
base_address = view.start
pe = read_pe_header(view)
unwind_directory = pe.OPTIONAL_HEADER.DATA_DIRECTORY[3]
unwind_entrys = base_address + unwind_directory.VirtualAddress
unwind_entrys_end = unwind_entrys + unwind_directory.Size
funcs = set()
log.log_info('Exception Data @ 0x{0:X} => 0x{1:X}'.format(unwind_entrys, unwind_entrys_end))
for runtime_address in range(unwind_entrys, unwind_entrys_end, 12):
if thread.cancelled:
break
update_percentage(thread, unwind_entrys, unwind_entrys_end, runtime_address, 'Parsing Unwind Info - Found {0} functions'.format(len(funcs)))
runtime_function, _ = read_runtime_function(view, runtime_address)
if runtime_function is None:
continue
start_address = runtime_function['BeginAddress']
if not view.is_offset_executable(start_address):
continue
if view.get_functions_containing(start_address):
continue
info_address = runtime_function['UnwindData']
unwind_info, _ = read_unwind_info(view, info_address)
if unwind_info is None:
continue
if 'FunctionEntry' in unwind_info:
continue
funcs.add(start_address)
if not thread.cancelled:
thread.progress = 'Creating {0} Function'.format(len(funcs))
log.log_info('Found {0} functions'.format(len(funcs)))
for func in funcs:
view.create_user_function(func)
| from binaryninja import log
from .utils import BinjaStruct, read_pe_header, split_bits, update_percentage
# https://msdn.microsoft.com/en-us/library/ft9x1kdx.aspx
RUNTIME_FUNCTION_t = BinjaStruct('<III', names = ('BeginAddress', 'EndAddress', 'UnwindData'))
def read_runtime_function(view, address):
runtime_function, address = RUNTIME_FUNCTION_t.read(view, address, 4)
if runtime_function is not None:
runtime_function['BeginAddress'] += view.start
runtime_function['EndAddress'] += view.start
runtime_function['UnwindData'] += view.start
return runtime_function, address
UNWIND_INFO_t = BinjaStruct('<BBBB', names = ('VersionAndFlags', 'SizeOfProlog', 'CountOfCodes', 'FrameRegisterAndOffset'))
UNW_FLAG_NHANDLER = 0x0
UNW_FLAG_EHANDLER = 0x1
UNW_FLAG_UHANDLER = 0x2
UNW_FLAG_FHANDLER = 0x3
UNW_FLAG_CHAININFO = 0x4
def read_unwind_info(view, address):
unwind_info, address = UNWIND_INFO_t.read(view, address)
if unwind_info is not None:
split_bits(unwind_info, 'VersionAndFlags', [
('Version', 0, 3),
('Flags', 3, 5)
])
split_bits(unwind_info, 'FrameRegisterAndOffset', [
('FrameRegister', 0, 4),
('FrameOffset', 4, 4)
])
if unwind_info['Version'] == 1:
unwind_codes = [ ]
for i in range(unwind_info['CountOfCodes']):
unwind_code, address = read_unwind_code(view, address)
unwind_codes.append(unwind_code)
unwind_info['UnwindCodes'] = unwind_codes
if unwind_info['Flags'] & UNW_FLAG_CHAININFO:
unwind_info['FunctionEntry'], address = read_runtime_function(view, address)
return unwind_info, address
UNWIND_CODE_t = BinjaStruct('<BB', names = ('CodeOffset', 'UnwindOpAndInfo'))
def read_unwind_code(view, address):
unwind_code, address = UNWIND_CODE_t.read(view, address)
if unwind_code is not None:
split_bits(unwind_code, 'UnwindOpAndInfo', [
('UnwindOp', 0, 4),
('OpInfo', 4, 4)
])
return unwind_code, address
def parse_unwind_info(thread, view):
base_address = view.start
pe = read_pe_header(view)
unwind_directory = pe.OPTIONAL_HEADER.DATA_DIRECTORY[3]
unwind_entrys = base_address + unwind_directory.VirtualAddress
unwind_entrys_end = unwind_entrys + unwind_directory.Size
funcs = set()
log.log_info('Exception Data @ 0x{0:X} => 0x{1:X}'.format(unwind_entrys, unwind_entrys_end))
for runtime_address in range(unwind_entrys, unwind_entrys_end, 12):
if thread.cancelled:
break
update_percentage(thread, unwind_entrys, unwind_entrys_end, runtime_address, 'Parsing Unwind Info - Found {0} functions'.format(len(funcs)))
runtime_function, _ = read_runtime_function(view, runtime_address)
if runtime_function is None:
continue
start_address = runtime_function['BeginAddress']
if not view.is_offset_executable(start_address):
continue
if view.get_functions_containing(start_address):
continue
info_address = runtime_function['UnwindData']
unwind_info, _ = read_unwind_info(view, info_address)
if unwind_info is None:
continue
if 'FunctionEntry' in unwind_info:
continue
funcs.add(start_address)
if not thread.cancelled:
thread.progress = 'Creating {0} Function'.format(len(funcs))
log.log_info('Found {0} functions'.format(len(funcs)))
for func in funcs:
view.create_user_function(func)
| en | 0.476875 | # https://msdn.microsoft.com/en-us/library/ft9x1kdx.aspx | 2.001411 | 2 |
pixloc/visualization/viz_3d.py | jmorlana/pixloc | 457 | 227 | <filename>pixloc/visualization/viz_3d.py
"""
3D visualization primitives based on Plotly.
We might want to instead use a more powerful library like Open3D.
Plotly however supports animations, buttons and sliders.
1) Initialize a figure with `fig = init_figure()`
2) Plot points, cameras, lines, or create a slider animation.
3) Call `fig.show()` to render the figure.
"""
import plotly.graph_objects as go
import numpy as np
from ..pixlib.geometry.utils import to_homogeneous
def init_figure(height=800):
"""Initialize a 3D figure."""
fig = go.Figure()
fig.update_layout(
height=height,
scene_camera=dict(
eye=dict(x=0., y=-.1, z=-2), up=dict(x=0, y=-1., z=0)),
scene=dict(
xaxis=dict(showbackground=False),
yaxis=dict(showbackground=False),
aspectmode='data', dragmode='orbit'),
margin=dict(l=0, r=0, b=0, t=0, pad=0)) # noqa E741
return fig
def plot_points(fig, pts, color='rgba(255, 0, 0, 1)', ps=2):
"""Plot a set of 3D points."""
x, y, z = pts.T
tr = go.Scatter3d(
x=x, y=y, z=z, mode='markers', marker_size=ps,
marker_color=color, marker_line_width=.2)
fig.add_trace(tr)
def plot_camera(fig, R, t, K, color='rgb(0, 0, 255)'):
"""Plot a camera as a cone with camera frustum."""
x, y, z = t
u, v, w = R @ -np.array([0, 0, 1])
tr = go.Cone(
x=[x], y=[y], z=[z], u=[u], v=[v], w=[w], anchor='tip',
showscale=False, colorscale=[[0, color], [1, color]],
sizemode='absolute')
fig.add_trace(tr)
W, H = K[0, 2]*2, K[1, 2]*2
corners = np.array([[0, 0], [W, 0], [W, H], [0, H], [0, 0]])
corners = to_homogeneous(corners) @ np.linalg.inv(K).T
corners = (corners/2) @ R.T + t
x, y, z = corners.T
tr = go.Scatter3d(
x=x, y=y, z=z, line=dict(color='rgba(0, 0, 0, .5)'),
marker=dict(size=0.0001), showlegend=False)
fig.add_trace(tr)
def create_slider_animation(fig, traces):
"""Create a slider that animates a list of traces (e.g. 3D points)."""
slider = {'steps': []}
frames = []
fig.add_trace(traces[0])
idx = len(fig.data) - 1
for i, tr in enumerate(traces):
frames.append(go.Frame(name=str(i), traces=[idx], data=[tr]))
step = {"args": [
[str(i)],
{"frame": {"redraw": True},
"mode": "immediate"}],
"label": i,
"method": "animate"}
slider['steps'].append(step)
fig.frames = tuple(frames)
fig.layout.sliders = (slider,)
| <filename>pixloc/visualization/viz_3d.py
"""
3D visualization primitives based on Plotly.
We might want to instead use a more powerful library like Open3D.
Plotly however supports animations, buttons and sliders.
1) Initialize a figure with `fig = init_figure()`
2) Plot points, cameras, lines, or create a slider animation.
3) Call `fig.show()` to render the figure.
"""
import plotly.graph_objects as go
import numpy as np
from ..pixlib.geometry.utils import to_homogeneous
def init_figure(height=800):
"""Initialize a 3D figure."""
fig = go.Figure()
fig.update_layout(
height=height,
scene_camera=dict(
eye=dict(x=0., y=-.1, z=-2), up=dict(x=0, y=-1., z=0)),
scene=dict(
xaxis=dict(showbackground=False),
yaxis=dict(showbackground=False),
aspectmode='data', dragmode='orbit'),
margin=dict(l=0, r=0, b=0, t=0, pad=0)) # noqa E741
return fig
def plot_points(fig, pts, color='rgba(255, 0, 0, 1)', ps=2):
"""Plot a set of 3D points."""
x, y, z = pts.T
tr = go.Scatter3d(
x=x, y=y, z=z, mode='markers', marker_size=ps,
marker_color=color, marker_line_width=.2)
fig.add_trace(tr)
def plot_camera(fig, R, t, K, color='rgb(0, 0, 255)'):
"""Plot a camera as a cone with camera frustum."""
x, y, z = t
u, v, w = R @ -np.array([0, 0, 1])
tr = go.Cone(
x=[x], y=[y], z=[z], u=[u], v=[v], w=[w], anchor='tip',
showscale=False, colorscale=[[0, color], [1, color]],
sizemode='absolute')
fig.add_trace(tr)
W, H = K[0, 2]*2, K[1, 2]*2
corners = np.array([[0, 0], [W, 0], [W, H], [0, H], [0, 0]])
corners = to_homogeneous(corners) @ np.linalg.inv(K).T
corners = (corners/2) @ R.T + t
x, y, z = corners.T
tr = go.Scatter3d(
x=x, y=y, z=z, line=dict(color='rgba(0, 0, 0, .5)'),
marker=dict(size=0.0001), showlegend=False)
fig.add_trace(tr)
def create_slider_animation(fig, traces):
"""Create a slider that animates a list of traces (e.g. 3D points)."""
slider = {'steps': []}
frames = []
fig.add_trace(traces[0])
idx = len(fig.data) - 1
for i, tr in enumerate(traces):
frames.append(go.Frame(name=str(i), traces=[idx], data=[tr]))
step = {"args": [
[str(i)],
{"frame": {"redraw": True},
"mode": "immediate"}],
"label": i,
"method": "animate"}
slider['steps'].append(step)
fig.frames = tuple(frames)
fig.layout.sliders = (slider,)
| en | 0.871509 | 3D visualization primitives based on Plotly. We might want to instead use a more powerful library like Open3D. Plotly however supports animations, buttons and sliders. 1) Initialize a figure with `fig = init_figure()` 2) Plot points, cameras, lines, or create a slider animation. 3) Call `fig.show()` to render the figure. Initialize a 3D figure. # noqa E741 Plot a set of 3D points. Plot a camera as a cone with camera frustum. Create a slider that animates a list of traces (e.g. 3D points). | 3.083093 | 3 |
day04/c.py | Net-Mist/advent_of_code2021 | 1 | 228 | <filename>day04/c.py
import numpy as np
GRID_SIZE = 5
def read_bingo_grid(lines: list[str]) -> list[list[int]]:
return [[int(n) for n in line.split()] for line in lines]
def bingo_step(grids: np.ndarray, checked_grids: np.ndarray, number: int) -> None:
checked_grids[np.where(grids == number)] = True
def check_victory(check_grids: np.ndarray) -> set[int]:
"""return empty set if no victory, else set of id of the wining grids"""
return set(np.where(check_grids.sum(axis=1).max(axis=1) == 5)[0]).union(
np.where(check_grids.sum(axis=2).max(axis=1) == 5)[0]
)
def sum_grid(grid: np.ndarray, checked_grid: np.ndarray) -> int:
grid[checked_grid] = 0
return grid.sum()
def main() -> None:
with open("input.txt") as f:
lines = f.readlines()
random_numbers = [int(n) for n in lines[0].split(",")]
grids = np.array([read_bingo_grid(lines[i : i + GRID_SIZE]) for i in range(2, len(lines), 1 + GRID_SIZE)])
checked_grids = np.array([[[False for _ in range(GRID_SIZE)] for _ in range(GRID_SIZE)] for _ in range(len(grids))])
win = False
i = 0
q1_done = False
while not win:
bingo_step(grids, checked_grids, random_numbers[i])
winning_set = check_victory(checked_grids)
if len(winning_set) == 1 and not q1_done:
index = list(winning_set)[0]
s = sum_grid(grids[index], checked_grids[index])
print("part1:", s * random_numbers[i])
q1_done = True
if len(grids) == len(winning_set) + 1:
index_last_to_win = list(set(range(len(grids))).difference(winning_set))[0]
if len(grids) == len(winning_set):
s = sum_grid(grids[index_last_to_win], checked_grids[index_last_to_win])
print("part2:", random_numbers[i], s, random_numbers[i] * s)
return
i += 1
if __name__ == "__main__":
main()
| <filename>day04/c.py
import numpy as np
GRID_SIZE = 5
def read_bingo_grid(lines: list[str]) -> list[list[int]]:
return [[int(n) for n in line.split()] for line in lines]
def bingo_step(grids: np.ndarray, checked_grids: np.ndarray, number: int) -> None:
checked_grids[np.where(grids == number)] = True
def check_victory(check_grids: np.ndarray) -> set[int]:
"""return empty set if no victory, else set of id of the wining grids"""
return set(np.where(check_grids.sum(axis=1).max(axis=1) == 5)[0]).union(
np.where(check_grids.sum(axis=2).max(axis=1) == 5)[0]
)
def sum_grid(grid: np.ndarray, checked_grid: np.ndarray) -> int:
grid[checked_grid] = 0
return grid.sum()
def main() -> None:
with open("input.txt") as f:
lines = f.readlines()
random_numbers = [int(n) for n in lines[0].split(",")]
grids = np.array([read_bingo_grid(lines[i : i + GRID_SIZE]) for i in range(2, len(lines), 1 + GRID_SIZE)])
checked_grids = np.array([[[False for _ in range(GRID_SIZE)] for _ in range(GRID_SIZE)] for _ in range(len(grids))])
win = False
i = 0
q1_done = False
while not win:
bingo_step(grids, checked_grids, random_numbers[i])
winning_set = check_victory(checked_grids)
if len(winning_set) == 1 and not q1_done:
index = list(winning_set)[0]
s = sum_grid(grids[index], checked_grids[index])
print("part1:", s * random_numbers[i])
q1_done = True
if len(grids) == len(winning_set) + 1:
index_last_to_win = list(set(range(len(grids))).difference(winning_set))[0]
if len(grids) == len(winning_set):
s = sum_grid(grids[index_last_to_win], checked_grids[index_last_to_win])
print("part2:", random_numbers[i], s, random_numbers[i] * s)
return
i += 1
if __name__ == "__main__":
main()
| en | 0.632777 | return empty set if no victory, else set of id of the wining grids | 3.214474 | 3 |
altair/vegalite/v2/examples/us_population_pyramid_over_time.py | hugovk/altair | 1 | 229 | <filename>altair/vegalite/v2/examples/us_population_pyramid_over_time.py<gh_stars>1-10
'''
US Population Pyramid Over Time
===============================
A population pyramid shows the distribution of age groups within a population.
It uses a slider widget that is bound to the year to visualize the age
distribution over time.
'''
# category: case studies
import altair as alt
from altair.expr import datum, if_
from vega_datasets import data
pop = data.population.url
slider = alt.binding_range(min=1850, max=2000, step=10)
select_year = alt.selection_single(name='year', fields=['year'], bind=slider)
base = alt.Chart(pop).add_selection(
select_year
).transform_filter(
select_year
).transform_calculate(
gender=if_(datum.sex == 1, 'Male', 'Female')
)
title = alt.Axis(title='population')
color_scale = alt.Scale(domain=['Male', 'Female'],
range=['#1f77b4', '#e377c2'])
left = base.transform_filter(
datum.gender == 'Female'
).encode(
y=alt.X('age:O', axis=None),
x=alt.X('sum(people):Q', axis=title, sort=alt.SortOrder('descending')),
color=alt.Color('gender:N', scale=color_scale, legend=None)
).mark_bar().properties(title='Female')
middle = base.encode(
y=alt.X('age:O', axis=None),
text=alt.Text('age:Q'),
).mark_text().properties(width=20)
right = base.transform_filter(
datum.gender == 'Male'
).encode(
y=alt.X('age:O', axis=None),
x=alt.X('sum(people):Q', axis=title),
color=alt.Color('gender:N', scale=color_scale, legend=None)
).mark_bar().properties(title='Male')
left | middle | right | <filename>altair/vegalite/v2/examples/us_population_pyramid_over_time.py<gh_stars>1-10
'''
US Population Pyramid Over Time
===============================
A population pyramid shows the distribution of age groups within a population.
It uses a slider widget that is bound to the year to visualize the age
distribution over time.
'''
# category: case studies
import altair as alt
from altair.expr import datum, if_
from vega_datasets import data
pop = data.population.url
slider = alt.binding_range(min=1850, max=2000, step=10)
select_year = alt.selection_single(name='year', fields=['year'], bind=slider)
base = alt.Chart(pop).add_selection(
select_year
).transform_filter(
select_year
).transform_calculate(
gender=if_(datum.sex == 1, 'Male', 'Female')
)
title = alt.Axis(title='population')
color_scale = alt.Scale(domain=['Male', 'Female'],
range=['#1f77b4', '#e377c2'])
left = base.transform_filter(
datum.gender == 'Female'
).encode(
y=alt.X('age:O', axis=None),
x=alt.X('sum(people):Q', axis=title, sort=alt.SortOrder('descending')),
color=alt.Color('gender:N', scale=color_scale, legend=None)
).mark_bar().properties(title='Female')
middle = base.encode(
y=alt.X('age:O', axis=None),
text=alt.Text('age:Q'),
).mark_text().properties(width=20)
right = base.transform_filter(
datum.gender == 'Male'
).encode(
y=alt.X('age:O', axis=None),
x=alt.X('sum(people):Q', axis=title),
color=alt.Color('gender:N', scale=color_scale, legend=None)
).mark_bar().properties(title='Male')
left | middle | right | en | 0.810942 | US Population Pyramid Over Time =============================== A population pyramid shows the distribution of age groups within a population. It uses a slider widget that is bound to the year to visualize the age distribution over time. # category: case studies | 3.255542 | 3 |
mtp_api/apps/core/migrations/0004_token.py | ministryofjustice/mtp-api | 5 | 230 | <reponame>ministryofjustice/mtp-api<gh_stars>1-10
from django.db import migrations, models
import django.utils.timezone
import model_utils.fields
class Migration(migrations.Migration):
dependencies = [
('core', '0003_auto_20180404_1515'),
]
operations = [
migrations.CreateModel(
name='Token',
fields=[
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('name', models.CharField(max_length=20, primary_key=True, serialize=False)),
('token', models.TextField()),
('expires', models.DateTimeField(blank=True, null=True)),
],
options={
'ordering': ('name',),
'permissions': (('view_token', 'Can view token'),),
},
),
]
| from django.db import migrations, models
import django.utils.timezone
import model_utils.fields
class Migration(migrations.Migration):
dependencies = [
('core', '0003_auto_20180404_1515'),
]
operations = [
migrations.CreateModel(
name='Token',
fields=[
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('name', models.CharField(max_length=20, primary_key=True, serialize=False)),
('token', models.TextField()),
('expires', models.DateTimeField(blank=True, null=True)),
],
options={
'ordering': ('name',),
'permissions': (('view_token', 'Can view token'),),
},
),
] | none | 1 | 1.816168 | 2 |
|
palm_tree/coconut_1/models.py | m-hintz-42/a-palm-tree | 0 | 231 | from palm_tree import db
class Data(db.Model):
id = db.Column(db.Integer, primary_key=True)
uuid = db.Column(db.Integer)
response = db.Column(db.Text)
datetime = db.Column(db.DateTime)
def __init__(self, uuid, response, datetime):
self.uuid = uuid
self.response = response
self.datetime = datetime
def __repr__(self):
return '<Data %r>' % self.response
#
# class Logs(db.Model):
# id = db.Column(db.Integer, primary_key=True)
# uuid = db.Column(db.Integer)
# payload = db.Column(db.Text)
# datetime = db.Column(db.DateTime)
#
# def __init__(self, uuid, payload, datetime):
# self.uuid = uuid
# self.payload = payload
# self.datetime = datetime
#
# def __repr__(self):
# return '<Data %r>' % self.payload
| from palm_tree import db
class Data(db.Model):
id = db.Column(db.Integer, primary_key=True)
uuid = db.Column(db.Integer)
response = db.Column(db.Text)
datetime = db.Column(db.DateTime)
def __init__(self, uuid, response, datetime):
self.uuid = uuid
self.response = response
self.datetime = datetime
def __repr__(self):
return '<Data %r>' % self.response
#
# class Logs(db.Model):
# id = db.Column(db.Integer, primary_key=True)
# uuid = db.Column(db.Integer)
# payload = db.Column(db.Text)
# datetime = db.Column(db.DateTime)
#
# def __init__(self, uuid, payload, datetime):
# self.uuid = uuid
# self.payload = payload
# self.datetime = datetime
#
# def __repr__(self):
# return '<Data %r>' % self.payload
| en | 0.307568 | # # class Logs(db.Model): # id = db.Column(db.Integer, primary_key=True) # uuid = db.Column(db.Integer) # payload = db.Column(db.Text) # datetime = db.Column(db.DateTime) # # def __init__(self, uuid, payload, datetime): # self.uuid = uuid # self.payload = payload # self.datetime = datetime # # def __repr__(self): # return '<Data %r>' % self.payload | 2.591085 | 3 |
network_checker/dhcp_checker/utils.py | Zipfer/fuel-web | 0 | 232 | # Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import re
import subprocess
import sys
from scapy import all as scapy
DHCP_OFFER_COLUMNS = ('iface', 'mac', 'server_ip', 'server_id', 'gateway',
'dport', 'message', 'yiaddr')
def command_util(*command):
"""object with stderr and stdout
"""
return subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
def _check_vconfig():
"""Check vconfig installed or not
"""
return not command_util('which', 'vconfig').stderr.read()
def _iface_state(iface):
"""For a given iface return it's state
returns UP, DOWN, UNKNOWN
"""
state = command_util('ip', 'link', 'show', iface).stdout.read()
search_result = re.search(r'.*<(?P<state>.*)>.*', state)
if search_result:
state_list = search_result.groupdict().get('state', [])
if 'UP' in state_list:
return 'UP'
else:
return 'DOWN'
return 'UNKNOWN'
def check_network_up(iface):
return _iface_state(iface) == 'UP'
def check_iface_exist(iface):
"""Check provided interface exists
"""
return not command_util("ip", "link", "show", iface).stderr.read()
def filtered_ifaces(ifaces):
for iface in ifaces:
if not check_iface_exist(iface):
sys.stderr.write('Iface {0} does not exist.'.format(iface))
else:
if not check_network_up(iface):
sys.stderr.write('Network for iface {0} is down.'.format(
iface))
else:
yield iface
def pick_ip(range_start, range_end):
"""Given start_range, end_range generate list of ips
>>> next(pick_ip('192.168.1.10','192.168.1.13'))
'192.168.1.10'
"""
split_address = lambda ip_address: \
[int(item) for item in ip_address.split('.')]
range_start = split_address(range_start)
range_end = split_address(range_end)
i = 0
# ipv4 subnet cant be longer that 4 items
while i < 4:
# 255 - end of subnet
if not range_start[i] == range_end[i] and range_start[i] < 255:
yield '.'.join([str(item) for item in range_start])
range_start[i] += 1
else:
i += 1
def get_item_properties(item, columns):
"""Get specified in columns properties, with preserved order.
Required for correct cli table generation
:param item: dict
:param columns: list with arbitrary keys
"""
properties = []
for key in columns:
properties.append(item.get(key, ''))
return properties
def format_options(options):
"""Util for serializing dhcp options
@options = [1,2,3]
>>> format_options([1, 2, 3])
'\x01\x02\x03'
"""
return "".join((chr(item) for item in options))
def _dhcp_options(dhcp_options):
"""Dhcp options returned by scapy is not in usable format
[('message-type', 2), ('server_id', '192.168.0.5'),
('name_server', '192.168.0.1', '192.168.0.2'), 'end']
"""
for option in dhcp_options:
if isinstance(option, (tuple, list)):
header = option[0]
if len(option[1:]) > 1:
yield (header, option)
else:
yield (header, option[1])
def format_answer(ans, iface):
dhcp_options = dict(_dhcp_options(ans[scapy.DHCP].options))
results = (
iface, ans[scapy.Ether].src, ans[scapy.IP].src,
dhcp_options['server_id'], ans[scapy.BOOTP].giaddr,
ans[scapy.UDP].sport,
scapy.DHCPTypes[dhcp_options['message-type']],
ans[scapy.BOOTP].yiaddr)
return dict(zip(DHCP_OFFER_COLUMNS, results))
def single_format(func):
"""Manage format of dhcp response
"""
@functools.wraps(func)
def formatter(*args, **kwargs):
iface = args[0]
ans = func(*args, **kwargs)
#scapy stores all sequence of requests
#so ans[0][1] would be response to first request
return [format_answer(response[1], iface) for response in ans]
return formatter
def multiproc_map(func):
# multiproc map could not work with format *args
@functools.wraps(func)
def workaround(*args, **kwargs):
args = args[0] if isinstance(args[0], (tuple, list)) else args
return func(*args, **kwargs)
return workaround
def filter_duplicated_results(func):
# due to network infra on broadcast multiple duplicated results
# returned. This helper filter them out
@functools.wraps(func)
def wrapper(*args, **kwargs):
resp = func(*args, **kwargs)
return (dict(t) for t in set([tuple(d.items()) for d in resp]))
return wrapper
class VlansContext(object):
"""Contains all logic to manage vlans
"""
def __init__(self, config):
"""Initialize VlansContext
@config - list or tuple of (iface, vlan) pairs
"""
self.config = config
def __enter__(self):
for iface, vlans in self.config.iteritems():
vifaces = []
for vlan in vlans:
if vlan > 0:
vifaces.append('{0}.{1}'.format(iface, vlan))
yield str(iface), vifaces
def __exit__(self, type, value, trace):
pass
class IfaceState(object):
"""Context manager to control state of iface when dhcp checker is running
"""
def __init__(self, iface, rollback=True, retry=3):
self.rollback = rollback
self.retry = retry
self.iface = iface
self.pre_iface_state = _iface_state(iface)
self.iface_state = self.pre_iface_state
self.post_iface_state = ''
def iface_up(self):
while self.retry and self.iface_state != 'UP':
command_util('ifconfig', self.iface, 'up')
self.iface_state = _iface_state(self.iface)
self.retry -= 1
if self.iface_state != 'UP':
raise EnvironmentError(
'Tried my best to ifup iface {0}.'.format(self.iface))
def __enter__(self):
self.iface_up()
return self.iface
def __exit__(self, exc_type, exc_val, exc_tb):
if self.pre_iface_state != 'UP' and self.rollback:
command_util('ifconfig', self.iface, 'down')
self.post_iface_state = _iface_state(self.iface)
| # Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import re
import subprocess
import sys
from scapy import all as scapy
DHCP_OFFER_COLUMNS = ('iface', 'mac', 'server_ip', 'server_id', 'gateway',
'dport', 'message', 'yiaddr')
def command_util(*command):
"""object with stderr and stdout
"""
return subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
def _check_vconfig():
"""Check vconfig installed or not
"""
return not command_util('which', 'vconfig').stderr.read()
def _iface_state(iface):
"""For a given iface return it's state
returns UP, DOWN, UNKNOWN
"""
state = command_util('ip', 'link', 'show', iface).stdout.read()
search_result = re.search(r'.*<(?P<state>.*)>.*', state)
if search_result:
state_list = search_result.groupdict().get('state', [])
if 'UP' in state_list:
return 'UP'
else:
return 'DOWN'
return 'UNKNOWN'
def check_network_up(iface):
return _iface_state(iface) == 'UP'
def check_iface_exist(iface):
"""Check provided interface exists
"""
return not command_util("ip", "link", "show", iface).stderr.read()
def filtered_ifaces(ifaces):
for iface in ifaces:
if not check_iface_exist(iface):
sys.stderr.write('Iface {0} does not exist.'.format(iface))
else:
if not check_network_up(iface):
sys.stderr.write('Network for iface {0} is down.'.format(
iface))
else:
yield iface
def pick_ip(range_start, range_end):
"""Given start_range, end_range generate list of ips
>>> next(pick_ip('192.168.1.10','192.168.1.13'))
'192.168.1.10'
"""
split_address = lambda ip_address: \
[int(item) for item in ip_address.split('.')]
range_start = split_address(range_start)
range_end = split_address(range_end)
i = 0
# ipv4 subnet cant be longer that 4 items
while i < 4:
# 255 - end of subnet
if not range_start[i] == range_end[i] and range_start[i] < 255:
yield '.'.join([str(item) for item in range_start])
range_start[i] += 1
else:
i += 1
def get_item_properties(item, columns):
"""Get specified in columns properties, with preserved order.
Required for correct cli table generation
:param item: dict
:param columns: list with arbitrary keys
"""
properties = []
for key in columns:
properties.append(item.get(key, ''))
return properties
def format_options(options):
"""Util for serializing dhcp options
@options = [1,2,3]
>>> format_options([1, 2, 3])
'\x01\x02\x03'
"""
return "".join((chr(item) for item in options))
def _dhcp_options(dhcp_options):
"""Dhcp options returned by scapy is not in usable format
[('message-type', 2), ('server_id', '192.168.0.5'),
('name_server', '192.168.0.1', '192.168.0.2'), 'end']
"""
for option in dhcp_options:
if isinstance(option, (tuple, list)):
header = option[0]
if len(option[1:]) > 1:
yield (header, option)
else:
yield (header, option[1])
def format_answer(ans, iface):
dhcp_options = dict(_dhcp_options(ans[scapy.DHCP].options))
results = (
iface, ans[scapy.Ether].src, ans[scapy.IP].src,
dhcp_options['server_id'], ans[scapy.BOOTP].giaddr,
ans[scapy.UDP].sport,
scapy.DHCPTypes[dhcp_options['message-type']],
ans[scapy.BOOTP].yiaddr)
return dict(zip(DHCP_OFFER_COLUMNS, results))
def single_format(func):
"""Manage format of dhcp response
"""
@functools.wraps(func)
def formatter(*args, **kwargs):
iface = args[0]
ans = func(*args, **kwargs)
#scapy stores all sequence of requests
#so ans[0][1] would be response to first request
return [format_answer(response[1], iface) for response in ans]
return formatter
def multiproc_map(func):
# multiproc map could not work with format *args
@functools.wraps(func)
def workaround(*args, **kwargs):
args = args[0] if isinstance(args[0], (tuple, list)) else args
return func(*args, **kwargs)
return workaround
def filter_duplicated_results(func):
# due to network infra on broadcast multiple duplicated results
# returned. This helper filter them out
@functools.wraps(func)
def wrapper(*args, **kwargs):
resp = func(*args, **kwargs)
return (dict(t) for t in set([tuple(d.items()) for d in resp]))
return wrapper
class VlansContext(object):
"""Contains all logic to manage vlans
"""
def __init__(self, config):
"""Initialize VlansContext
@config - list or tuple of (iface, vlan) pairs
"""
self.config = config
def __enter__(self):
for iface, vlans in self.config.iteritems():
vifaces = []
for vlan in vlans:
if vlan > 0:
vifaces.append('{0}.{1}'.format(iface, vlan))
yield str(iface), vifaces
def __exit__(self, type, value, trace):
pass
class IfaceState(object):
"""Context manager to control state of iface when dhcp checker is running
"""
def __init__(self, iface, rollback=True, retry=3):
self.rollback = rollback
self.retry = retry
self.iface = iface
self.pre_iface_state = _iface_state(iface)
self.iface_state = self.pre_iface_state
self.post_iface_state = ''
def iface_up(self):
while self.retry and self.iface_state != 'UP':
command_util('ifconfig', self.iface, 'up')
self.iface_state = _iface_state(self.iface)
self.retry -= 1
if self.iface_state != 'UP':
raise EnvironmentError(
'Tried my best to ifup iface {0}.'.format(self.iface))
def __enter__(self):
self.iface_up()
return self.iface
def __exit__(self, exc_type, exc_val, exc_tb):
if self.pre_iface_state != 'UP' and self.rollback:
command_util('ifconfig', self.iface, 'down')
self.post_iface_state = _iface_state(self.iface)
| en | 0.74584 | # Copyright 2013 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. object with stderr and stdout Check vconfig installed or not For a given iface return it's state returns UP, DOWN, UNKNOWN Check provided interface exists Given start_range, end_range generate list of ips >>> next(pick_ip('192.168.1.10','192.168.1.13')) '192.168.1.10' # ipv4 subnet cant be longer that 4 items # 255 - end of subnet Get specified in columns properties, with preserved order. Required for correct cli table generation :param item: dict :param columns: list with arbitrary keys Util for serializing dhcp options @options = [1,2,3] >>> format_options([1, 2, 3]) '\x01\x02\x03' Dhcp options returned by scapy is not in usable format [('message-type', 2), ('server_id', '192.168.0.5'), ('name_server', '192.168.0.1', '192.168.0.2'), 'end'] Manage format of dhcp response #scapy stores all sequence of requests #so ans[0][1] would be response to first request # multiproc map could not work with format *args # due to network infra on broadcast multiple duplicated results # returned. This helper filter them out Contains all logic to manage vlans Initialize VlansContext @config - list or tuple of (iface, vlan) pairs Context manager to control state of iface when dhcp checker is running | 2.023288 | 2 |
paneldata_dash/backend/schemas/johnson_scanner_data.py | clarencejlee/jdp | 0 | 233 | from ma import ma
from models.johnson_scanner_data import JohnsonScannerDataModel
from schemas.brand import BrandSchema
from schemas.category import CategorySchema
from schemas.facts_in_data import FactsInDataSchema
from schemas.market import MarketSchema
from schemas.period import PeriodSchema
class JohnsonScannerDataSchema(ma.SQLAlchemySchema):
market = ma.Nested(MarketSchema)
brand = ma.Nested(BrandSchema)
category = ma.Nested(CategorySchema)
period = ma.Nested(PeriodSchema)
facts = ma.Nested(FactsInDataSchema, many=True)
class Meta:
model = JohnsonScannerDataModel
dump_only = ("id",)
# include_fk = False
| from ma import ma
from models.johnson_scanner_data import JohnsonScannerDataModel
from schemas.brand import BrandSchema
from schemas.category import CategorySchema
from schemas.facts_in_data import FactsInDataSchema
from schemas.market import MarketSchema
from schemas.period import PeriodSchema
class JohnsonScannerDataSchema(ma.SQLAlchemySchema):
market = ma.Nested(MarketSchema)
brand = ma.Nested(BrandSchema)
category = ma.Nested(CategorySchema)
period = ma.Nested(PeriodSchema)
facts = ma.Nested(FactsInDataSchema, many=True)
class Meta:
model = JohnsonScannerDataModel
dump_only = ("id",)
# include_fk = False
| en | 0.892635 | # include_fk = False | 2.174699 | 2 |
Chest X-Ray Multilabel Image classification using CNN - Pytorch/Arch2.py | farzanaaswin0708/CNN-for-Visual-recognition | 0 | 234 | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
################################################################################
# CSE 253: Programming Assignment 3
# Winter 2019
# Code author: <NAME> (+ modifications by <NAME>)
#
# Filename: baseline_cnn.py
#
# Description:
#
# This file contains the starter code for the baseline architecture you will use
# to get a little practice with PyTorch and compare the results of with your
# improved architecture.
#
# Be sure to fill in the code in the areas marked #TODO.
################################################################################
# PyTorch and neural network imports
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as func
import torch.nn.init as torch_init
import torch.optim as optim
# Data utils and dataloader
import torchvision
from torchvision import transforms, utils
from xray_dataloader_zscored import ChestXrayDataset, create_split_loaders
import matplotlib.pyplot as plt
import numpy as np
import os
class Arch2CNN(nn.Module):
"""
<<<<<<< HEAD
conv1 -> maxpool -> conv2 -> maxpool -> conv3 -> conv4 ->maxpool -> conv5 -> conv6 -> maxpool -> conv7 -> conv8 -> maxpool -> fc1 -> fc2 -> fc3 (outputs)
=======
conv1 -> conv2 -> maxpool -> conv3 -> conv4 -> conv5 -> maxpool -> fc1 -> fc2 -> fc3 (outputs)
>>>>>>> 6652e3cfb72835ac4a7c802c9a703b59d5f63ae6
"""
def __init__(self):
super(Arch2CNN, self).__init__()
# conv1: 1 input channel, 4 output channels, [3x3] kernel size
self.conv1 = nn.Conv2d(in_channels=1, out_channels=4, kernel_size=3)
# Add batch-normalization to the outputs of conv1
self.conv1_normed = nn.BatchNorm2d(4)
# Initialized weights using the Xavier-Normal method
torch_init.xavier_normal_(self.conv1.weight)
self.pool1 = nn.MaxPool2d(kernel_size=3, stride=1)
#TODO: Fill in the remaining initializations replacing each '_' with
# the necessary value based on the provided specs for each layer
#TODO: conv2: 4 input channels, 8 output channels, [3x3] kernel, initialization: xavier
self.conv2 = nn.Conv2d(in_channels=4, out_channels=8, kernel_size=3)
self.conv2_normed = nn.BatchNorm2d(8)
torch_init.xavier_normal_(self.conv2.weight)
#Maxpool
self.pool2 = nn.MaxPool2d(kernel_size=3, stride=1)
#TODO: conv3: X input channels, 12 output channels, [8x8] kernel, initialization: xavier
self.conv3 = nn.Conv2d(in_channels=8, out_channels=16, kernel_size=3)
self.conv3_normed = nn.BatchNorm2d(16)
torch_init.xavier_normal_(self.conv3.weight)
#TODO: conv4: X input channels, 10 output channels, [6x6] kernel, initialization: xavier
self.conv4 = nn.Conv2d(in_channels=16, out_channels=16, kernel_size=3)
self.conv4_normed = nn.BatchNorm2d(16)
torch_init.xavier_normal_(self.conv4.weight)
self.pool3 = nn.MaxPool2d(kernel_size=3, stride=1)
#TODO: conv5: X input channels, 8 output channels, [5x5] kernel, initialization: xavier
self.conv5 = nn.Conv2d(in_channels=16, out_channels=8, kernel_size=3)
self.conv5_normed = nn.BatchNorm2d(8)
torch_init.xavier_normal_(self.conv5.weight)
self.conv6 = nn.Conv2d(in_channels=8, out_channels=8, kernel_size=3)
self.conv6_normed = nn.BatchNorm2d(8)
torch_init.xavier_normal_(self.conv6.weight)
self.pool4 = nn.MaxPool2d(kernel_size=3, stride=1)
#TODO: Apply max-pooling with a [3x3] kernel using tiling (*NO SLIDING WINDOW*)
self.conv7 = nn.Conv2d(in_channels=8, out_channels=8, kernel_size=3)
self.conv7_normed = nn.BatchNorm2d(8)
torch_init.xavier_normal_(self.conv7.weight)
self.conv8 = nn.Conv2d(in_channels=8, out_channels=8, kernel_size=3)
self.conv8_normed = nn.BatchNorm2d(8)
torch_init.xavier_normal_(self.conv8.weight)
self.pool5 = nn.MaxPool2d(kernel_size=4, stride=4)
# Define 2 fully connected layers:
#TODO: fc1
self.fc1 = nn.Linear(in_features=122*122*8, out_features=512)
self.fc1_normed = nn.BatchNorm1d(512)
torch_init.xavier_normal_(self.fc1.weight)
#TODO: fc2
self.fc2 = nn.Linear(in_features=512, out_features=128)
self.fc2_normed = nn.BatchNorm1d(128)
torch_init.xavier_normal_(self.fc2.weight)
#TODO: fc3
self.fc3 = nn.Linear(in_features=128, out_features=14)
torch_init.xavier_normal_(self.fc3.weight)
#TODO: Output layer: what should out_features be?
self.out_features = 14
def forward(self, batch):
"""Pass the batch of images through each layer of the network, applying
non-linearities after each layer.
Note that this function *needs* to be called "forward" for PyTorch to
automagically perform the forward pass.
Params:
-------
- batch: (Tensor) An input batch of images
Returns:
--------
- logits: (Variable) The output of the network
"""
# Apply first convolution, followed by ReLU non-linearity;
# use batch-normalization on its outputs
batch = func.rrelu(self.conv1_normed(self.conv1(batch)))
batch = self.pool1(batch)
# Apply conv2 and conv3 similarly
batch = func.rrelu(self.conv2_normed(self.conv2(batch)))
batch = self.pool2(batch)
batch = func.rrelu(self.conv3_normed(self.conv3(batch)))
batch = func.rrelu(self.conv4_normed(self.conv4(batch)))
batch = self.pool3(batch)
batch = func.rrelu(self.conv5_normed(self.conv5(batch)))
batch = func.rrelu(self.conv6_normed(self.conv6(batch)))
# Pass the output of conv3 to the pooling layer
batch = self.pool4(batch)
batch = func.rrelu(self.conv7_normed(self.conv7(batch)))
batch = func.rrelu(self.conv8_normed(self.conv8(batch)))
# Pass the output of conv3 to the pooling layer
batch = self.pool5(batch)
# Reshape the output of the conv3 to pass to fully-connected layer
batch = batch.view(-1, self.num_flat_features(batch))
# Connect the reshaped features of the pooled conv3 to fc1
batch = func.rrelu(self.fc1_normed(self.fc1(batch)))
batch = func.rrelu(self.fc2_normed(self.fc2(batch)))
# Connect fc1 to fc2 - this layer is slightly different than the rest (why?)
batch = self.fc3(batch)
# Return the class predictions
#TODO: apply an activition function to 'batch'
#batch = func.sigmoid(batch)
return batch
def num_flat_features(self, inputs):
# Get the dimensions of the layers excluding the inputs
size = inputs.size()[1:]
# Track the number of features
num_features = 1
for s in size:
num_features *= s
return num_features
| #!/usr/bin/env python
# coding: utf-8
# In[ ]:
################################################################################
# CSE 253: Programming Assignment 3
# Winter 2019
# Code author: <NAME> (+ modifications by <NAME>)
#
# Filename: baseline_cnn.py
#
# Description:
#
# This file contains the starter code for the baseline architecture you will use
# to get a little practice with PyTorch and compare the results of with your
# improved architecture.
#
# Be sure to fill in the code in the areas marked #TODO.
################################################################################
# PyTorch and neural network imports
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as func
import torch.nn.init as torch_init
import torch.optim as optim
# Data utils and dataloader
import torchvision
from torchvision import transforms, utils
from xray_dataloader_zscored import ChestXrayDataset, create_split_loaders
import matplotlib.pyplot as plt
import numpy as np
import os
class Arch2CNN(nn.Module):
"""
<<<<<<< HEAD
conv1 -> maxpool -> conv2 -> maxpool -> conv3 -> conv4 ->maxpool -> conv5 -> conv6 -> maxpool -> conv7 -> conv8 -> maxpool -> fc1 -> fc2 -> fc3 (outputs)
=======
conv1 -> conv2 -> maxpool -> conv3 -> conv4 -> conv5 -> maxpool -> fc1 -> fc2 -> fc3 (outputs)
>>>>>>> 6652e3cfb72835ac4a7c802c9a703b59d5f63ae6
"""
def __init__(self):
super(Arch2CNN, self).__init__()
# conv1: 1 input channel, 4 output channels, [3x3] kernel size
self.conv1 = nn.Conv2d(in_channels=1, out_channels=4, kernel_size=3)
# Add batch-normalization to the outputs of conv1
self.conv1_normed = nn.BatchNorm2d(4)
# Initialized weights using the Xavier-Normal method
torch_init.xavier_normal_(self.conv1.weight)
self.pool1 = nn.MaxPool2d(kernel_size=3, stride=1)
#TODO: Fill in the remaining initializations replacing each '_' with
# the necessary value based on the provided specs for each layer
#TODO: conv2: 4 input channels, 8 output channels, [3x3] kernel, initialization: xavier
self.conv2 = nn.Conv2d(in_channels=4, out_channels=8, kernel_size=3)
self.conv2_normed = nn.BatchNorm2d(8)
torch_init.xavier_normal_(self.conv2.weight)
#Maxpool
self.pool2 = nn.MaxPool2d(kernel_size=3, stride=1)
#TODO: conv3: X input channels, 12 output channels, [8x8] kernel, initialization: xavier
self.conv3 = nn.Conv2d(in_channels=8, out_channels=16, kernel_size=3)
self.conv3_normed = nn.BatchNorm2d(16)
torch_init.xavier_normal_(self.conv3.weight)
#TODO: conv4: X input channels, 10 output channels, [6x6] kernel, initialization: xavier
self.conv4 = nn.Conv2d(in_channels=16, out_channels=16, kernel_size=3)
self.conv4_normed = nn.BatchNorm2d(16)
torch_init.xavier_normal_(self.conv4.weight)
self.pool3 = nn.MaxPool2d(kernel_size=3, stride=1)
#TODO: conv5: X input channels, 8 output channels, [5x5] kernel, initialization: xavier
self.conv5 = nn.Conv2d(in_channels=16, out_channels=8, kernel_size=3)
self.conv5_normed = nn.BatchNorm2d(8)
torch_init.xavier_normal_(self.conv5.weight)
self.conv6 = nn.Conv2d(in_channels=8, out_channels=8, kernel_size=3)
self.conv6_normed = nn.BatchNorm2d(8)
torch_init.xavier_normal_(self.conv6.weight)
self.pool4 = nn.MaxPool2d(kernel_size=3, stride=1)
#TODO: Apply max-pooling with a [3x3] kernel using tiling (*NO SLIDING WINDOW*)
self.conv7 = nn.Conv2d(in_channels=8, out_channels=8, kernel_size=3)
self.conv7_normed = nn.BatchNorm2d(8)
torch_init.xavier_normal_(self.conv7.weight)
self.conv8 = nn.Conv2d(in_channels=8, out_channels=8, kernel_size=3)
self.conv8_normed = nn.BatchNorm2d(8)
torch_init.xavier_normal_(self.conv8.weight)
self.pool5 = nn.MaxPool2d(kernel_size=4, stride=4)
# Define 2 fully connected layers:
#TODO: fc1
self.fc1 = nn.Linear(in_features=122*122*8, out_features=512)
self.fc1_normed = nn.BatchNorm1d(512)
torch_init.xavier_normal_(self.fc1.weight)
#TODO: fc2
self.fc2 = nn.Linear(in_features=512, out_features=128)
self.fc2_normed = nn.BatchNorm1d(128)
torch_init.xavier_normal_(self.fc2.weight)
#TODO: fc3
self.fc3 = nn.Linear(in_features=128, out_features=14)
torch_init.xavier_normal_(self.fc3.weight)
#TODO: Output layer: what should out_features be?
self.out_features = 14
def forward(self, batch):
"""Pass the batch of images through each layer of the network, applying
non-linearities after each layer.
Note that this function *needs* to be called "forward" for PyTorch to
automagically perform the forward pass.
Params:
-------
- batch: (Tensor) An input batch of images
Returns:
--------
- logits: (Variable) The output of the network
"""
# Apply first convolution, followed by ReLU non-linearity;
# use batch-normalization on its outputs
batch = func.rrelu(self.conv1_normed(self.conv1(batch)))
batch = self.pool1(batch)
# Apply conv2 and conv3 similarly
batch = func.rrelu(self.conv2_normed(self.conv2(batch)))
batch = self.pool2(batch)
batch = func.rrelu(self.conv3_normed(self.conv3(batch)))
batch = func.rrelu(self.conv4_normed(self.conv4(batch)))
batch = self.pool3(batch)
batch = func.rrelu(self.conv5_normed(self.conv5(batch)))
batch = func.rrelu(self.conv6_normed(self.conv6(batch)))
# Pass the output of conv3 to the pooling layer
batch = self.pool4(batch)
batch = func.rrelu(self.conv7_normed(self.conv7(batch)))
batch = func.rrelu(self.conv8_normed(self.conv8(batch)))
# Pass the output of conv3 to the pooling layer
batch = self.pool5(batch)
# Reshape the output of the conv3 to pass to fully-connected layer
batch = batch.view(-1, self.num_flat_features(batch))
# Connect the reshaped features of the pooled conv3 to fc1
batch = func.rrelu(self.fc1_normed(self.fc1(batch)))
batch = func.rrelu(self.fc2_normed(self.fc2(batch)))
# Connect fc1 to fc2 - this layer is slightly different than the rest (why?)
batch = self.fc3(batch)
# Return the class predictions
#TODO: apply an activition function to 'batch'
#batch = func.sigmoid(batch)
return batch
def num_flat_features(self, inputs):
# Get the dimensions of the layers excluding the inputs
size = inputs.size()[1:]
# Track the number of features
num_features = 1
for s in size:
num_features *= s
return num_features
| en | 0.695438 | #!/usr/bin/env python # coding: utf-8 # In[ ]: ################################################################################ # CSE 253: Programming Assignment 3 # Winter 2019 # Code author: <NAME> (+ modifications by <NAME>) # # Filename: baseline_cnn.py # # Description: # # This file contains the starter code for the baseline architecture you will use # to get a little practice with PyTorch and compare the results of with your # improved architecture. # # Be sure to fill in the code in the areas marked #TODO. ################################################################################ # PyTorch and neural network imports # Data utils and dataloader <<<<<<< HEAD conv1 -> maxpool -> conv2 -> maxpool -> conv3 -> conv4 ->maxpool -> conv5 -> conv6 -> maxpool -> conv7 -> conv8 -> maxpool -> fc1 -> fc2 -> fc3 (outputs) ======= conv1 -> conv2 -> maxpool -> conv3 -> conv4 -> conv5 -> maxpool -> fc1 -> fc2 -> fc3 (outputs) >>>>>>> 6652e3cfb72835ac4a7c802c9a703b59d5f63ae6 # conv1: 1 input channel, 4 output channels, [3x3] kernel size # Add batch-normalization to the outputs of conv1 # Initialized weights using the Xavier-Normal method #TODO: Fill in the remaining initializations replacing each '_' with # the necessary value based on the provided specs for each layer #TODO: conv2: 4 input channels, 8 output channels, [3x3] kernel, initialization: xavier #Maxpool #TODO: conv3: X input channels, 12 output channels, [8x8] kernel, initialization: xavier #TODO: conv4: X input channels, 10 output channels, [6x6] kernel, initialization: xavier #TODO: conv5: X input channels, 8 output channels, [5x5] kernel, initialization: xavier #TODO: Apply max-pooling with a [3x3] kernel using tiling (*NO SLIDING WINDOW*) # Define 2 fully connected layers: #TODO: fc1 #TODO: fc2 #TODO: fc3 #TODO: Output layer: what should out_features be? Pass the batch of images through each layer of the network, applying non-linearities after each layer. Note that this function *needs* to be called "forward" for PyTorch to automagically perform the forward pass. Params: ------- - batch: (Tensor) An input batch of images Returns: -------- - logits: (Variable) The output of the network # Apply first convolution, followed by ReLU non-linearity; # use batch-normalization on its outputs # Apply conv2 and conv3 similarly # Pass the output of conv3 to the pooling layer # Pass the output of conv3 to the pooling layer # Reshape the output of the conv3 to pass to fully-connected layer # Connect the reshaped features of the pooled conv3 to fc1 # Connect fc1 to fc2 - this layer is slightly different than the rest (why?) # Return the class predictions #TODO: apply an activition function to 'batch' #batch = func.sigmoid(batch) # Get the dimensions of the layers excluding the inputs # Track the number of features | 3.150542 | 3 |
news_access.py | HydeJackal/TwitterWeeklyNewsBot | 0 | 235 | import json
import urllib.request
import credentials
from datetime import datetime, timedelta
class NewsAPI:
def __init__(self, nyt_api):
self.nyt_access = nyt_api
def get_nyt_last_week_articles(self, topic, today):
delta = timedelta(weeks = 1)
last_week = today - delta
begin_date = last_week.strftime('%Y%m%d')
url = 'https://api.nytimes.com/svc/search/v2/articlesearch.json?q=' + topic + '&begin_date=' + begin_date + '&sort=best&type_of_material=Article&api-key=' + self.nyt_access
try:
json_url = urllib.request.urlopen(url)
articles = json.loads(json_url.read())
except:
raise RuntimeError('Failed to retrive New York Times data.')
if articles['status'] != 'OK':
num_of_articles = articles['response']['docs'].length()
if num_of_articles > 5:
return articles['response']['docs'][0:4], articles['response']['meta']['hits']
else:
return articles['response']['docs'][0:num_of_articles - 1], articles['response']['meta']['hits']
else:
raise RuntimeError('Failed to find any New York Times articles with query.')
api = NewsAPI(credentials.NYT_API)
date_time_obj = datetime.now()
api.get_nyt_last_week_articles('election', date_time_obj) | import json
import urllib.request
import credentials
from datetime import datetime, timedelta
class NewsAPI:
def __init__(self, nyt_api):
self.nyt_access = nyt_api
def get_nyt_last_week_articles(self, topic, today):
delta = timedelta(weeks = 1)
last_week = today - delta
begin_date = last_week.strftime('%Y%m%d')
url = 'https://api.nytimes.com/svc/search/v2/articlesearch.json?q=' + topic + '&begin_date=' + begin_date + '&sort=best&type_of_material=Article&api-key=' + self.nyt_access
try:
json_url = urllib.request.urlopen(url)
articles = json.loads(json_url.read())
except:
raise RuntimeError('Failed to retrive New York Times data.')
if articles['status'] != 'OK':
num_of_articles = articles['response']['docs'].length()
if num_of_articles > 5:
return articles['response']['docs'][0:4], articles['response']['meta']['hits']
else:
return articles['response']['docs'][0:num_of_articles - 1], articles['response']['meta']['hits']
else:
raise RuntimeError('Failed to find any New York Times articles with query.')
api = NewsAPI(credentials.NYT_API)
date_time_obj = datetime.now()
api.get_nyt_last_week_articles('election', date_time_obj) | none | 1 | 3.109453 | 3 |
|
tests/unit/test_trial_component.py | owen-t/sagemaker-experiments-1 | 0 | 236 | <gh_stars>0
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from smexperiments import trial_component, api_types
import datetime
import pytest
import unittest.mock
@pytest.fixture
def sagemaker_boto_client():
return unittest.mock.Mock()
def test_create(sagemaker_boto_client):
sagemaker_boto_client.create_trial_component.return_value = {
"TrialComponentArn": "bazz",
}
obj = trial_component.TrialComponent.create(
trial_component_name="foo", display_name="bar", sagemaker_boto_client=sagemaker_boto_client
)
sagemaker_boto_client.create_trial_component.assert_called_with(TrialComponentName="foo", DisplayName="bar")
assert "foo" == obj.trial_component_name
assert "bar" == obj.display_name
assert "bazz" == obj.trial_component_arn
def test_load(sagemaker_boto_client):
now = datetime.datetime.now(datetime.timezone.utc)
sagemaker_boto_client.describe_trial_component.return_value = {
"TrialComponentArn": "A",
"TrialComponentName": "B",
"DisplayName": "C",
"Status": {"PrimaryStatus": "InProgress", "Message": "D"},
"Parameters": {"E": {"NumberValue": 1.0}, "F": {"StringValue": "G"}},
"InputArtifacts": {"H": {"Value": "s3://foo/bar", "MediaType": "text/plain"}},
"OutputArtifacts": {"I": {"Value": "s3://whizz/bang", "MediaType": "text/plain"}},
"Metrics": [
{
"MetricName": "J",
"Count": 1,
"Min": 1.0,
"Max": 2.0,
"Avg": 3.0,
"StdDev": 4.0,
"SourceArn": "K",
"Timestamp": now,
}
],
}
obj = trial_component.TrialComponent.load(trial_component_name="foo", sagemaker_boto_client=sagemaker_boto_client)
sagemaker_boto_client.describe_trial_component.assert_called_with(TrialComponentName="foo")
assert "A" == obj.trial_component_arn
assert "B" == obj.trial_component_name
assert "C" == obj.display_name
assert api_types.TrialComponentStatus(primary_status="InProgress", message="D") == obj.status
assert {"E": 1.0, "F": "G"} == obj.parameters
assert {"H": api_types.TrialComponentArtifact(value="s3://foo/bar", media_type="text/plain")}
assert {"I": api_types.TrialComponentArtifact(value="s3://whizz/bang", media_type="text/plain")}
assert [
api_types.TrialComponentMetricSummary(
metric_name="J", count=1, min=1.0, max=2.0, avg=3.0, std_dev=4.0, source_arn="K", timestamp=now
)
]
def test_list(sagemaker_boto_client):
start_time = datetime.datetime.now(datetime.timezone.utc) + datetime.timedelta(hours=1)
end_time = datetime.datetime.now(datetime.timezone.utc) + datetime.timedelta(hours=2)
creation_time = datetime.datetime.now(datetime.timezone.utc) + datetime.timedelta(hours=3)
last_modified_time = datetime.datetime.now(datetime.timezone.utc) + datetime.timedelta(hours=4)
sagemaker_boto_client.list_trial_components.side_effect = [
{
"TrialComponentSummaries": [
{
"TrialComponentName": "A" + str(i),
"TrialComponentArn": "B" + str(i),
"DisplayName": "C" + str(i),
"SourceArn": "D" + str(i),
"Status": {"PrimaryStatus": "InProgress", "Message": "E" + str(i)},
"StartTime": start_time + datetime.timedelta(hours=i),
"EndTime": end_time + datetime.timedelta(hours=i),
"CreationTime": creation_time + datetime.timedelta(hours=i),
"LastModifiedTime": last_modified_time + datetime.timedelta(hours=i),
"LastModifiedBy": {},
}
for i in range(10)
],
"NextToken": "100",
},
{
"TrialComponentSummaries": [
{
"TrialComponentName": "A" + str(i),
"TrialComponentArn": "B" + str(i),
"DisplayName": "C" + str(i),
"SourceArn": "D" + str(i),
"Status": {"PrimaryStatus": "InProgress", "Message": "E" + str(i)},
"StartTime": start_time + datetime.timedelta(hours=i),
"EndTime": end_time + datetime.timedelta(hours=i),
"CreationTime": creation_time + datetime.timedelta(hours=i),
"LastModifiedTime": last_modified_time + datetime.timedelta(hours=i),
"LastModifiedBy": {},
}
for i in range(10, 20)
]
},
]
expected = [
api_types.TrialComponentSummary(
trial_component_name="A" + str(i),
trial_component_arn="B" + str(i),
display_name="C" + str(i),
source_arn="D" + str(i),
status=api_types.TrialComponentStatus(primary_status="InProgress", message="E" + str(i)),
start_time=start_time + datetime.timedelta(hours=i),
end_time=end_time + datetime.timedelta(hours=i),
creation_time=creation_time + datetime.timedelta(hours=i),
last_modified_time=last_modified_time + datetime.timedelta(hours=i),
last_modified_by={},
)
for i in range(20)
]
result = list(
trial_component.TrialComponent.list(
sagemaker_boto_client=sagemaker_boto_client,
source_arn="foo",
sort_by="CreationTime",
sort_order="Ascending",
)
)
assert expected == result
expected_calls = [
unittest.mock.call(SortBy="CreationTime", SortOrder="Ascending", SourceArn="foo"),
unittest.mock.call(NextToken="100", SortBy="CreationTime", SortOrder="Ascending", SourceArn="foo"),
]
assert expected_calls == sagemaker_boto_client.list_trial_components.mock_calls
def test_list_empty(sagemaker_boto_client):
sagemaker_boto_client.list_trial_components.return_value = {"TrialComponentSummaries": []}
assert [] == list(trial_component.TrialComponent.list(sagemaker_boto_client=sagemaker_boto_client))
def test_list_trial_components_call_args(sagemaker_boto_client):
created_before = datetime.datetime(1999, 10, 12, 0, 0, 0)
created_after = datetime.datetime(1990, 10, 12, 0, 0, 0)
trial_name = "foo-trial"
experiment_name = "foo-experiment"
next_token = "<PASSWORD>"
max_results = 99
sagemaker_boto_client.list_trial_components.return_value = {}
assert [] == list(
trial_component.TrialComponent.list(
sagemaker_boto_client=sagemaker_boto_client,
trial_name=trial_name,
experiment_name=experiment_name,
created_before=created_before,
created_after=created_after,
next_token=next_token,
max_results=max_results,
sort_by="CreationTime",
sort_order="Ascending",
)
)
expected_calls = [
unittest.mock.call(
TrialName="foo-trial",
ExperimentName="foo-experiment",
CreatedBefore=created_before,
CreatedAfter=created_after,
SortBy="CreationTime",
SortOrder="Ascending",
NextToken="<PASSWORD>token",
MaxResults=99,
)
]
assert expected_calls == sagemaker_boto_client.list_trial_components.mock_calls
def test_save(sagemaker_boto_client):
obj = trial_component.TrialComponent(sagemaker_boto_client, trial_component_name="foo", display_name="bar")
sagemaker_boto_client.update_trial_component.return_value = {}
obj.save()
sagemaker_boto_client.update_trial_component.assert_called_with(TrialComponentName="foo", DisplayName="bar")
def test_delete(sagemaker_boto_client):
obj = trial_component.TrialComponent(sagemaker_boto_client, trial_component_name="foo", display_name="bar")
sagemaker_boto_client.delete_trial_component.return_value = {}
obj.delete()
sagemaker_boto_client.delete_trial_component.assert_called_with(TrialComponentName="foo")
def test_boto_ignore():
obj = trial_component.TrialComponent(sagemaker_boto_client, trial_component_name="foo", display_name="bar")
assert obj._boto_ignore() == ["ResponseMetadata", "CreatedBy"]
| # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from smexperiments import trial_component, api_types
import datetime
import pytest
import unittest.mock
@pytest.fixture
def sagemaker_boto_client():
return unittest.mock.Mock()
def test_create(sagemaker_boto_client):
sagemaker_boto_client.create_trial_component.return_value = {
"TrialComponentArn": "bazz",
}
obj = trial_component.TrialComponent.create(
trial_component_name="foo", display_name="bar", sagemaker_boto_client=sagemaker_boto_client
)
sagemaker_boto_client.create_trial_component.assert_called_with(TrialComponentName="foo", DisplayName="bar")
assert "foo" == obj.trial_component_name
assert "bar" == obj.display_name
assert "bazz" == obj.trial_component_arn
def test_load(sagemaker_boto_client):
now = datetime.datetime.now(datetime.timezone.utc)
sagemaker_boto_client.describe_trial_component.return_value = {
"TrialComponentArn": "A",
"TrialComponentName": "B",
"DisplayName": "C",
"Status": {"PrimaryStatus": "InProgress", "Message": "D"},
"Parameters": {"E": {"NumberValue": 1.0}, "F": {"StringValue": "G"}},
"InputArtifacts": {"H": {"Value": "s3://foo/bar", "MediaType": "text/plain"}},
"OutputArtifacts": {"I": {"Value": "s3://whizz/bang", "MediaType": "text/plain"}},
"Metrics": [
{
"MetricName": "J",
"Count": 1,
"Min": 1.0,
"Max": 2.0,
"Avg": 3.0,
"StdDev": 4.0,
"SourceArn": "K",
"Timestamp": now,
}
],
}
obj = trial_component.TrialComponent.load(trial_component_name="foo", sagemaker_boto_client=sagemaker_boto_client)
sagemaker_boto_client.describe_trial_component.assert_called_with(TrialComponentName="foo")
assert "A" == obj.trial_component_arn
assert "B" == obj.trial_component_name
assert "C" == obj.display_name
assert api_types.TrialComponentStatus(primary_status="InProgress", message="D") == obj.status
assert {"E": 1.0, "F": "G"} == obj.parameters
assert {"H": api_types.TrialComponentArtifact(value="s3://foo/bar", media_type="text/plain")}
assert {"I": api_types.TrialComponentArtifact(value="s3://whizz/bang", media_type="text/plain")}
assert [
api_types.TrialComponentMetricSummary(
metric_name="J", count=1, min=1.0, max=2.0, avg=3.0, std_dev=4.0, source_arn="K", timestamp=now
)
]
def test_list(sagemaker_boto_client):
start_time = datetime.datetime.now(datetime.timezone.utc) + datetime.timedelta(hours=1)
end_time = datetime.datetime.now(datetime.timezone.utc) + datetime.timedelta(hours=2)
creation_time = datetime.datetime.now(datetime.timezone.utc) + datetime.timedelta(hours=3)
last_modified_time = datetime.datetime.now(datetime.timezone.utc) + datetime.timedelta(hours=4)
sagemaker_boto_client.list_trial_components.side_effect = [
{
"TrialComponentSummaries": [
{
"TrialComponentName": "A" + str(i),
"TrialComponentArn": "B" + str(i),
"DisplayName": "C" + str(i),
"SourceArn": "D" + str(i),
"Status": {"PrimaryStatus": "InProgress", "Message": "E" + str(i)},
"StartTime": start_time + datetime.timedelta(hours=i),
"EndTime": end_time + datetime.timedelta(hours=i),
"CreationTime": creation_time + datetime.timedelta(hours=i),
"LastModifiedTime": last_modified_time + datetime.timedelta(hours=i),
"LastModifiedBy": {},
}
for i in range(10)
],
"NextToken": "100",
},
{
"TrialComponentSummaries": [
{
"TrialComponentName": "A" + str(i),
"TrialComponentArn": "B" + str(i),
"DisplayName": "C" + str(i),
"SourceArn": "D" + str(i),
"Status": {"PrimaryStatus": "InProgress", "Message": "E" + str(i)},
"StartTime": start_time + datetime.timedelta(hours=i),
"EndTime": end_time + datetime.timedelta(hours=i),
"CreationTime": creation_time + datetime.timedelta(hours=i),
"LastModifiedTime": last_modified_time + datetime.timedelta(hours=i),
"LastModifiedBy": {},
}
for i in range(10, 20)
]
},
]
expected = [
api_types.TrialComponentSummary(
trial_component_name="A" + str(i),
trial_component_arn="B" + str(i),
display_name="C" + str(i),
source_arn="D" + str(i),
status=api_types.TrialComponentStatus(primary_status="InProgress", message="E" + str(i)),
start_time=start_time + datetime.timedelta(hours=i),
end_time=end_time + datetime.timedelta(hours=i),
creation_time=creation_time + datetime.timedelta(hours=i),
last_modified_time=last_modified_time + datetime.timedelta(hours=i),
last_modified_by={},
)
for i in range(20)
]
result = list(
trial_component.TrialComponent.list(
sagemaker_boto_client=sagemaker_boto_client,
source_arn="foo",
sort_by="CreationTime",
sort_order="Ascending",
)
)
assert expected == result
expected_calls = [
unittest.mock.call(SortBy="CreationTime", SortOrder="Ascending", SourceArn="foo"),
unittest.mock.call(NextToken="100", SortBy="CreationTime", SortOrder="Ascending", SourceArn="foo"),
]
assert expected_calls == sagemaker_boto_client.list_trial_components.mock_calls
def test_list_empty(sagemaker_boto_client):
sagemaker_boto_client.list_trial_components.return_value = {"TrialComponentSummaries": []}
assert [] == list(trial_component.TrialComponent.list(sagemaker_boto_client=sagemaker_boto_client))
def test_list_trial_components_call_args(sagemaker_boto_client):
created_before = datetime.datetime(1999, 10, 12, 0, 0, 0)
created_after = datetime.datetime(1990, 10, 12, 0, 0, 0)
trial_name = "foo-trial"
experiment_name = "foo-experiment"
next_token = "<PASSWORD>"
max_results = 99
sagemaker_boto_client.list_trial_components.return_value = {}
assert [] == list(
trial_component.TrialComponent.list(
sagemaker_boto_client=sagemaker_boto_client,
trial_name=trial_name,
experiment_name=experiment_name,
created_before=created_before,
created_after=created_after,
next_token=next_token,
max_results=max_results,
sort_by="CreationTime",
sort_order="Ascending",
)
)
expected_calls = [
unittest.mock.call(
TrialName="foo-trial",
ExperimentName="foo-experiment",
CreatedBefore=created_before,
CreatedAfter=created_after,
SortBy="CreationTime",
SortOrder="Ascending",
NextToken="<PASSWORD>token",
MaxResults=99,
)
]
assert expected_calls == sagemaker_boto_client.list_trial_components.mock_calls
def test_save(sagemaker_boto_client):
obj = trial_component.TrialComponent(sagemaker_boto_client, trial_component_name="foo", display_name="bar")
sagemaker_boto_client.update_trial_component.return_value = {}
obj.save()
sagemaker_boto_client.update_trial_component.assert_called_with(TrialComponentName="foo", DisplayName="bar")
def test_delete(sagemaker_boto_client):
obj = trial_component.TrialComponent(sagemaker_boto_client, trial_component_name="foo", display_name="bar")
sagemaker_boto_client.delete_trial_component.return_value = {}
obj.delete()
sagemaker_boto_client.delete_trial_component.assert_called_with(TrialComponentName="foo")
def test_boto_ignore():
obj = trial_component.TrialComponent(sagemaker_boto_client, trial_component_name="foo", display_name="bar")
assert obj._boto_ignore() == ["ResponseMetadata", "CreatedBy"] | en | 0.891234 | # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. | 1.922529 | 2 |
cadnano25/cadnano/part/xovercmds.py | amylittleyang/OtraCAD | 0 | 237 | from cadnano.cnproxy import UndoCommand
from cadnano.strand import Strand
from cadnano import getBatch
import cadnano.preferences as prefs
import random
class CreateXoverCommand(UndoCommand):
"""
Creates a Xover from the 3' end of strand5p to the 5' end of strand3p
this needs to
1. preserve the old oligo of strand3p
2. install the crossover
3. apply the strand5p oligo to the strand3p
"""
def __init__(self, part, strand5p, strand5p_idx, strand3p, strand3p_idx, update_oligo=True):
super(CreateXoverCommand, self).__init__("create xover")
self._part = part
self._strand5p = strand5p
self._strand5p_idx = strand5p_idx
self._strand3p = strand3p
self._strand3p_idx = strand3p_idx
self._old_oligo3p = strand3p.oligo()
self._update_oligo = update_oligo
# end def
def redo(self):
part = self._part
strand5p = self._strand5p
strand5p_idx = self._strand5p_idx
strand3p = self._strand3p
strand3p_idx = self._strand3p_idx
olg5p = strand5p.oligo()
old_olg3p = self._old_oligo3p
# 0. Deselect the involved strands
doc = strand5p.document()
doc.removeStrandFromSelection(strand5p)
doc.removeStrandFromSelection(strand3p)
if self._update_oligo:
# Test for Loopiness
if olg5p == strand3p.oligo():
olg5p.setLoop(True)
else:
# 1. update preserved oligo length
olg5p.incrementLength(old_olg3p.length())
# 2. Remove the old oligo and apply the 5' oligo to the 3' strand
old_olg3p.removeFromPart()
for strand in strand3p.generator3pStrand():
# emits strandHasNewOligoSignal
Strand.setOligo(strand, olg5p)
# 3. install the Xover
strand5p.setConnection3p(strand3p)
strand3p.setConnection5p(strand5p)
#print('strand5p = %s, connection3p = %s'%(strand5p._name, strand3p._name))
ss5 = strand5p.strandSet()
vh5p = ss5.virtualHelix()
st5p = ss5.strandType()
ss3 = strand3p.strandSet()
vh3p = ss3.virtualHelix()
st3p = ss3.strandType()
part.partActiveVirtualHelixChangedSignal.emit(part, vh5p)
# strand5p.strandXover5pChangedSignal.emit(strand5p, strand3p)
# if self._update_oligo and not getBatch():
if self._update_oligo:
strand5p.strandUpdateSignal.emit(strand5p)
strand3p.strandUpdateSignal.emit(strand3p)
# end def
def undo(self):
part = self._part
strand5p = self._strand5p
strand5p_idx = self._strand5p_idx
strand3p = self._strand3p
strand3p_idx = self._strand3p_idx
old_olg3p = self._old_oligo3p
olg5p = strand5p.oligo()
# 0. Deselect the involved strands
doc = strand5p.document()
doc.removeStrandFromSelection(strand5p)
doc.removeStrandFromSelection(strand3p)
# 1. uninstall the Xover
strand5p.setConnection3p(None)
strand3p.setConnection5p(None)
if self._update_oligo:
# Test Loopiness
if old_olg3p.isLoop():
old_olg3p.setLoop(False)
else:
# 2. restore the modified oligo length
olg5p.decrementLength(old_olg3p.length())
# 3. apply the old oligo to strand3p
old_olg3p.addToPart(part)
for strand in strand3p.generator3pStrand():
# emits strandHasNewOligoSignal
Strand.setOligo(strand, old_olg3p)
ss5 = strand5p.strandSet()
vh5p = ss5.virtualHelix()
st5p = ss5.strandType()
ss3 = strand3p.strandSet()
vh3p = ss3.virtualHelix()
st3p = ss3.strandType()
part.partActiveVirtualHelixChangedSignal.emit(part, vh5p)
# strand5p.strandXover5pChangedSignal.emit(strand5p, strand3p)
if self._update_oligo:
strand5p.strandUpdateSignal.emit(strand5p)
strand3p.strandUpdateSignal.emit(strand3p)
# end def
# end class
class RemoveXoverCommand(UndoCommand):
"""
Removes a Xover from the 3' end of strand5p to the 5' end of strand3p
this needs to
1. preserve the old oligo of strand3p
2. install the crossover
3. update the oligo length
4. apply the new strand3p oligo to the strand3p
"""
def __init__(self, part, strand5p, strand3p):
super(RemoveXoverCommand, self).__init__("remove xover")
self._part = part
self._strand5p = strand5p
self._strand5p_idx = strand5p.idx3Prime()
self._strand3p = strand3p
self._strand3p_idx = strand3p.idx5Prime()
n_o3p = self._new_oligo3p = strand3p.oligo().shallowCopy()
colorList = prefs.STAP_COLORS if strand5p.strandSet().isStaple() \
else prefs.SCAF_COLORS
n_o3p.setColor(random.choice(colorList).name())
n_o3p.setLength(0)
for strand in strand3p.generator3pStrand():
n_o3p.incrementLength(strand.totalLength())
# end def
n_o3p.setStrand5p(strand3p)
self._isLoop = strand3p.oligo().isLoop()
# end def
def redo(self):
part = self._part
strand5p = self._strand5p
strand5p_idx = self._strand5p_idx
strand3p = self._strand3p
strand3p_idx = self._strand3p_idx
new_olg3p = self._new_oligo3p
olg5p = self._strand5p.oligo()
# 0. Deselect the involved strands
doc = strand5p.document()
doc.removeStrandFromSelection(strand5p)
doc.removeStrandFromSelection(strand3p)
# 1. uninstall the Xover
strand5p.setConnection3p(None)
strand3p.setConnection5p(None)
if self._isLoop:
olg5p.setLoop(False)
olg5p.setStrand5p(strand3p)
else:
# 2. restore the modified oligo length
olg5p.decrementLength(new_olg3p.length())
# 3. apply the old oligo to strand3p
new_olg3p.addToPart(part)
for strand in strand3p.generator3pStrand():
# emits strandHasNewOligoSignal
Strand.setOligo(strand, new_olg3p)
ss5 = strand5p.strandSet()
vh5p = ss5.virtualHelix()
st5p = ss5.strandType()
ss3 = strand3p.strandSet()
vh3p = ss3.virtualHelix()
st3p = ss3.strandType()
part.partActiveVirtualHelixChangedSignal.emit(part, vh5p)
# strand5p.strandXover5pChangedSignal.emit(strand5p, strand3p)
strand5p.strandUpdateSignal.emit(strand5p)
strand3p.strandUpdateSignal.emit(strand3p)
# end def
def undo(self):
part = self._part
strand5p = self._strand5p
strand5p_idx = self._strand5p_idx
strand3p = self._strand3p
strand3p_idx = self._strand3p_idx
olg5p = strand5p.oligo()
new_olg3p = self._new_oligo3p
# 0. Deselect the involved strands
doc = strand5p.document()
doc.removeStrandFromSelection(strand5p)
doc.removeStrandFromSelection(strand3p)
if self._isLoop:
olg5p.setLoop(True)
# No need to restore whatever the old Oligo._strand5p was
else:
# 1. update preserved oligo length
olg5p.incrementLength(new_olg3p.length())
# 2. Remove the old oligo and apply the 5' oligo to the 3' strand
new_olg3p.removeFromPart()
for strand in strand3p.generator3pStrand():
# emits strandHasNewOligoSignal
Strand.setOligo(strand, olg5p)
# end else
# 3. install the Xover
strand5p.setConnection3p(strand3p)
strand3p.setConnection5p(strand5p)
ss5 = strand5p.strandSet()
vh5p = ss5.virtualHelix()
st5p = ss5.strandType()
ss3 = strand3p.strandSet()
vh3p = ss3.virtualHelix()
st3p = ss3.strandType()
part.partActiveVirtualHelixChangedSignal.emit(part, vh5p)
# strand5p.strandXover5pChangedSignal.emit(strand5p, strand3p)
strand5p.strandUpdateSignal.emit(strand5p)
strand3p.strandUpdateSignal.emit(strand3p)
# end def
# end class | from cadnano.cnproxy import UndoCommand
from cadnano.strand import Strand
from cadnano import getBatch
import cadnano.preferences as prefs
import random
class CreateXoverCommand(UndoCommand):
"""
Creates a Xover from the 3' end of strand5p to the 5' end of strand3p
this needs to
1. preserve the old oligo of strand3p
2. install the crossover
3. apply the strand5p oligo to the strand3p
"""
def __init__(self, part, strand5p, strand5p_idx, strand3p, strand3p_idx, update_oligo=True):
super(CreateXoverCommand, self).__init__("create xover")
self._part = part
self._strand5p = strand5p
self._strand5p_idx = strand5p_idx
self._strand3p = strand3p
self._strand3p_idx = strand3p_idx
self._old_oligo3p = strand3p.oligo()
self._update_oligo = update_oligo
# end def
def redo(self):
part = self._part
strand5p = self._strand5p
strand5p_idx = self._strand5p_idx
strand3p = self._strand3p
strand3p_idx = self._strand3p_idx
olg5p = strand5p.oligo()
old_olg3p = self._old_oligo3p
# 0. Deselect the involved strands
doc = strand5p.document()
doc.removeStrandFromSelection(strand5p)
doc.removeStrandFromSelection(strand3p)
if self._update_oligo:
# Test for Loopiness
if olg5p == strand3p.oligo():
olg5p.setLoop(True)
else:
# 1. update preserved oligo length
olg5p.incrementLength(old_olg3p.length())
# 2. Remove the old oligo and apply the 5' oligo to the 3' strand
old_olg3p.removeFromPart()
for strand in strand3p.generator3pStrand():
# emits strandHasNewOligoSignal
Strand.setOligo(strand, olg5p)
# 3. install the Xover
strand5p.setConnection3p(strand3p)
strand3p.setConnection5p(strand5p)
#print('strand5p = %s, connection3p = %s'%(strand5p._name, strand3p._name))
ss5 = strand5p.strandSet()
vh5p = ss5.virtualHelix()
st5p = ss5.strandType()
ss3 = strand3p.strandSet()
vh3p = ss3.virtualHelix()
st3p = ss3.strandType()
part.partActiveVirtualHelixChangedSignal.emit(part, vh5p)
# strand5p.strandXover5pChangedSignal.emit(strand5p, strand3p)
# if self._update_oligo and not getBatch():
if self._update_oligo:
strand5p.strandUpdateSignal.emit(strand5p)
strand3p.strandUpdateSignal.emit(strand3p)
# end def
def undo(self):
part = self._part
strand5p = self._strand5p
strand5p_idx = self._strand5p_idx
strand3p = self._strand3p
strand3p_idx = self._strand3p_idx
old_olg3p = self._old_oligo3p
olg5p = strand5p.oligo()
# 0. Deselect the involved strands
doc = strand5p.document()
doc.removeStrandFromSelection(strand5p)
doc.removeStrandFromSelection(strand3p)
# 1. uninstall the Xover
strand5p.setConnection3p(None)
strand3p.setConnection5p(None)
if self._update_oligo:
# Test Loopiness
if old_olg3p.isLoop():
old_olg3p.setLoop(False)
else:
# 2. restore the modified oligo length
olg5p.decrementLength(old_olg3p.length())
# 3. apply the old oligo to strand3p
old_olg3p.addToPart(part)
for strand in strand3p.generator3pStrand():
# emits strandHasNewOligoSignal
Strand.setOligo(strand, old_olg3p)
ss5 = strand5p.strandSet()
vh5p = ss5.virtualHelix()
st5p = ss5.strandType()
ss3 = strand3p.strandSet()
vh3p = ss3.virtualHelix()
st3p = ss3.strandType()
part.partActiveVirtualHelixChangedSignal.emit(part, vh5p)
# strand5p.strandXover5pChangedSignal.emit(strand5p, strand3p)
if self._update_oligo:
strand5p.strandUpdateSignal.emit(strand5p)
strand3p.strandUpdateSignal.emit(strand3p)
# end def
# end class
class RemoveXoverCommand(UndoCommand):
"""
Removes a Xover from the 3' end of strand5p to the 5' end of strand3p
this needs to
1. preserve the old oligo of strand3p
2. install the crossover
3. update the oligo length
4. apply the new strand3p oligo to the strand3p
"""
def __init__(self, part, strand5p, strand3p):
super(RemoveXoverCommand, self).__init__("remove xover")
self._part = part
self._strand5p = strand5p
self._strand5p_idx = strand5p.idx3Prime()
self._strand3p = strand3p
self._strand3p_idx = strand3p.idx5Prime()
n_o3p = self._new_oligo3p = strand3p.oligo().shallowCopy()
colorList = prefs.STAP_COLORS if strand5p.strandSet().isStaple() \
else prefs.SCAF_COLORS
n_o3p.setColor(random.choice(colorList).name())
n_o3p.setLength(0)
for strand in strand3p.generator3pStrand():
n_o3p.incrementLength(strand.totalLength())
# end def
n_o3p.setStrand5p(strand3p)
self._isLoop = strand3p.oligo().isLoop()
# end def
def redo(self):
part = self._part
strand5p = self._strand5p
strand5p_idx = self._strand5p_idx
strand3p = self._strand3p
strand3p_idx = self._strand3p_idx
new_olg3p = self._new_oligo3p
olg5p = self._strand5p.oligo()
# 0. Deselect the involved strands
doc = strand5p.document()
doc.removeStrandFromSelection(strand5p)
doc.removeStrandFromSelection(strand3p)
# 1. uninstall the Xover
strand5p.setConnection3p(None)
strand3p.setConnection5p(None)
if self._isLoop:
olg5p.setLoop(False)
olg5p.setStrand5p(strand3p)
else:
# 2. restore the modified oligo length
olg5p.decrementLength(new_olg3p.length())
# 3. apply the old oligo to strand3p
new_olg3p.addToPart(part)
for strand in strand3p.generator3pStrand():
# emits strandHasNewOligoSignal
Strand.setOligo(strand, new_olg3p)
ss5 = strand5p.strandSet()
vh5p = ss5.virtualHelix()
st5p = ss5.strandType()
ss3 = strand3p.strandSet()
vh3p = ss3.virtualHelix()
st3p = ss3.strandType()
part.partActiveVirtualHelixChangedSignal.emit(part, vh5p)
# strand5p.strandXover5pChangedSignal.emit(strand5p, strand3p)
strand5p.strandUpdateSignal.emit(strand5p)
strand3p.strandUpdateSignal.emit(strand3p)
# end def
def undo(self):
part = self._part
strand5p = self._strand5p
strand5p_idx = self._strand5p_idx
strand3p = self._strand3p
strand3p_idx = self._strand3p_idx
olg5p = strand5p.oligo()
new_olg3p = self._new_oligo3p
# 0. Deselect the involved strands
doc = strand5p.document()
doc.removeStrandFromSelection(strand5p)
doc.removeStrandFromSelection(strand3p)
if self._isLoop:
olg5p.setLoop(True)
# No need to restore whatever the old Oligo._strand5p was
else:
# 1. update preserved oligo length
olg5p.incrementLength(new_olg3p.length())
# 2. Remove the old oligo and apply the 5' oligo to the 3' strand
new_olg3p.removeFromPart()
for strand in strand3p.generator3pStrand():
# emits strandHasNewOligoSignal
Strand.setOligo(strand, olg5p)
# end else
# 3. install the Xover
strand5p.setConnection3p(strand3p)
strand3p.setConnection5p(strand5p)
ss5 = strand5p.strandSet()
vh5p = ss5.virtualHelix()
st5p = ss5.strandType()
ss3 = strand3p.strandSet()
vh3p = ss3.virtualHelix()
st3p = ss3.strandType()
part.partActiveVirtualHelixChangedSignal.emit(part, vh5p)
# strand5p.strandXover5pChangedSignal.emit(strand5p, strand3p)
strand5p.strandUpdateSignal.emit(strand5p)
strand3p.strandUpdateSignal.emit(strand3p)
# end def
# end class | en | 0.580872 | Creates a Xover from the 3' end of strand5p to the 5' end of strand3p this needs to 1. preserve the old oligo of strand3p 2. install the crossover 3. apply the strand5p oligo to the strand3p # end def # 0. Deselect the involved strands # Test for Loopiness # 1. update preserved oligo length # 2. Remove the old oligo and apply the 5' oligo to the 3' strand # emits strandHasNewOligoSignal # 3. install the Xover #print('strand5p = %s, connection3p = %s'%(strand5p._name, strand3p._name)) # strand5p.strandXover5pChangedSignal.emit(strand5p, strand3p) # if self._update_oligo and not getBatch(): # end def # 0. Deselect the involved strands # 1. uninstall the Xover # Test Loopiness # 2. restore the modified oligo length # 3. apply the old oligo to strand3p # emits strandHasNewOligoSignal # strand5p.strandXover5pChangedSignal.emit(strand5p, strand3p) # end def # end class Removes a Xover from the 3' end of strand5p to the 5' end of strand3p this needs to 1. preserve the old oligo of strand3p 2. install the crossover 3. update the oligo length 4. apply the new strand3p oligo to the strand3p # end def # end def # 0. Deselect the involved strands # 1. uninstall the Xover # 2. restore the modified oligo length # 3. apply the old oligo to strand3p # emits strandHasNewOligoSignal # strand5p.strandXover5pChangedSignal.emit(strand5p, strand3p) # end def # 0. Deselect the involved strands # No need to restore whatever the old Oligo._strand5p was # 1. update preserved oligo length # 2. Remove the old oligo and apply the 5' oligo to the 3' strand # emits strandHasNewOligoSignal # end else # 3. install the Xover # strand5p.strandXover5pChangedSignal.emit(strand5p, strand3p) # end def # end class | 2.33794 | 2 |
src/temp2.py | FabBrolMons/frbayart | 0 | 238 | from w1thermsensor import W1ThermSensor
sensor = W1ThermSensor()
temperature_in_celsius = sensor.get_temperature()
temperature_in_fahrenheit = sensor.get_temperature(W1ThermSensor.DEGREES_F)
temperature_in_all_units = sensor.get_temperatures([W1ThermSensor.DEGREES_C, W1ThermSensor.DEGREES_F, W1ThermSensor.KELVIN])
print("Sensor id:" + sensor.id)
print(temperature_in_celsius)
| from w1thermsensor import W1ThermSensor
sensor = W1ThermSensor()
temperature_in_celsius = sensor.get_temperature()
temperature_in_fahrenheit = sensor.get_temperature(W1ThermSensor.DEGREES_F)
temperature_in_all_units = sensor.get_temperatures([W1ThermSensor.DEGREES_C, W1ThermSensor.DEGREES_F, W1ThermSensor.KELVIN])
print("Sensor id:" + sensor.id)
print(temperature_in_celsius)
| none | 1 | 2.999274 | 3 |
|
tables/migrations/0004_auto_20200901_2004.py | jarnoln/exposures | 0 | 239 | # Generated by Django 3.1.1 on 2020-09-01 17:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tables', '0003_exposure_category'),
]
operations = [
migrations.AlterField(
model_name='exposure',
name='location',
field=models.CharField(blank=True, default='', max_length=200),
),
]
| # Generated by Django 3.1.1 on 2020-09-01 17:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tables', '0003_exposure_category'),
]
operations = [
migrations.AlterField(
model_name='exposure',
name='location',
field=models.CharField(blank=True, default='', max_length=200),
),
]
| en | 0.813313 | # Generated by Django 3.1.1 on 2020-09-01 17:04 | 1.556958 | 2 |
src/reportlab/graphics/charts/__init__.py | kokinomura/reportlab | 52 | 240 | #Copyright ReportLab Europe Ltd. 2000-2016
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/graphics/charts/__init__.py
__version__='3.3.0'
__doc__='''Business charts'''
| #Copyright ReportLab Europe Ltd. 2000-2016
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/graphics/charts/__init__.py
__version__='3.3.0'
__doc__='''Business charts'''
| en | 0.597226 | #Copyright ReportLab Europe Ltd. 2000-2016 #see license.txt for license details #history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/graphics/charts/__init__.py Business charts | 0.938814 | 1 |
src/command_modules/azure-cli-vm/azure/cli/command_modules/vm/_validators.py | AndrewLane/azure-cli | 0 | 241 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint:disable=too-many-lines
import os
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse # pylint: disable=import-error
from knack.log import get_logger
from knack.util import CLIError
from azure.cli.core.commands.validators import (
get_default_location_from_resource_group, validate_file_or_dict, validate_parameter_set, validate_tags)
from azure.cli.core.util import hash_string
from azure.cli.command_modules.vm._vm_utils import check_existence, get_target_network_api, get_storage_blob_uri
from azure.cli.command_modules.vm._template_builder import StorageProfile
import azure.cli.core.keys as keys
from ._client_factory import _compute_client_factory
from ._actions import _get_latest_image_version
logger = get_logger(__name__)
def validate_asg_names_or_ids(cmd, namespace):
from msrestazure.tools import resource_id, is_valid_resource_id
from azure.cli.core.profiles import ResourceType
from azure.cli.core.commands.client_factory import get_subscription_id
ApplicationSecurityGroup = cmd.get_models('ApplicationSecurityGroup',
resource_type=ResourceType.MGMT_NETWORK)
resource_group = namespace.resource_group_name
subscription_id = get_subscription_id(cmd.cli_ctx)
names_or_ids = getattr(namespace, 'application_security_groups')
ids = []
if names_or_ids == [""] or not names_or_ids:
return
for val in names_or_ids:
if not is_valid_resource_id(val):
val = resource_id(
subscription=subscription_id,
resource_group=resource_group,
namespace='Microsoft.Network', type='applicationSecurityGroups',
name=val
)
ids.append(ApplicationSecurityGroup(id=val))
setattr(namespace, 'application_security_groups', ids)
def validate_nsg_name(cmd, namespace):
from msrestazure.tools import resource_id
from azure.cli.core.commands.client_factory import get_subscription_id
vm_id = resource_id(name=namespace.vm_name, resource_group=namespace.resource_group_name,
namespace='Microsoft.Compute', type='virtualMachines',
subscription=get_subscription_id(cmd.cli_ctx))
namespace.network_security_group_name = namespace.network_security_group_name \
or '{}_NSG_{}'.format(namespace.vm_name, hash_string(vm_id, length=8))
def validate_keyvault(cmd, namespace):
namespace.keyvault = _get_resource_id(cmd.cli_ctx, namespace.keyvault, namespace.resource_group_name,
'vaults', 'Microsoft.KeyVault')
def process_vm_secret_format(cmd, namespace):
from msrestazure.tools import is_valid_resource_id
keyvault_usage = CLIError('usage error: [--keyvault NAME --resource-group NAME | --keyvault ID]')
kv = namespace.keyvault
rg = namespace.resource_group_name
if rg:
if not kv or is_valid_resource_id(kv):
raise keyvault_usage
validate_keyvault(cmd, namespace)
else:
if kv and not is_valid_resource_id(kv):
raise keyvault_usage
def _get_resource_group_from_vault_name(cli_ctx, vault_name):
"""
Fetch resource group from vault name
:param str vault_name: name of the key vault
:return: resource group name or None
:rtype: str
"""
from azure.cli.core.profiles import ResourceType
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from msrestazure.tools import parse_resource_id
client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_KEYVAULT).vaults
for vault in client.list():
id_comps = parse_resource_id(vault.id)
if id_comps['name'] == vault_name:
return id_comps['resource_group']
return None
def _get_resource_id(cli_ctx, val, resource_group, resource_type, resource_namespace):
from msrestazure.tools import resource_id, is_valid_resource_id
from azure.cli.core.commands.client_factory import get_subscription_id
if is_valid_resource_id(val):
return val
kwargs = {
'name': val,
'resource_group': resource_group,
'namespace': resource_namespace,
'type': resource_type,
'subscription': get_subscription_id(cli_ctx)
}
missing_kwargs = {k: v for k, v in kwargs.items() if not v}
return resource_id(**kwargs) if not missing_kwargs else None
def _get_nic_id(cli_ctx, val, resource_group):
return _get_resource_id(cli_ctx, val, resource_group,
'networkInterfaces', 'Microsoft.Network')
def validate_vm_nic(cmd, namespace):
namespace.nic = _get_nic_id(cmd.cli_ctx, namespace.nic, namespace.resource_group_name)
def validate_vm_nics(cmd, namespace):
rg = namespace.resource_group_name
nic_ids = []
for n in namespace.nics:
nic_ids.append(_get_nic_id(cmd.cli_ctx, n, rg))
namespace.nics = nic_ids
if hasattr(namespace, 'primary_nic') and namespace.primary_nic:
namespace.primary_nic = _get_nic_id(cmd.cli_ctx, namespace.primary_nic, rg)
def _validate_secrets(secrets, os_type):
"""
Validates a parsed JSON array containing secrets for use in VM Creation
Secrets JSON structure
[{
"sourceVault": { "id": "value" },
"vaultCertificates": [{
"certificateUrl": "value",
"certificateStore": "cert store name (only on windows)"
}]
}]
:param dict secrets: Dict fitting the JSON description above
:param string os_type: the type of OS (linux or windows)
:return: errors if any were found
:rtype: list
"""
is_windows = os_type == 'windows'
errors = []
try:
loaded_secret = [validate_file_or_dict(secret) for secret in secrets]
except Exception as err:
raise CLIError('Error decoding secrets: {0}'.format(err))
for idx_arg, narg_secret in enumerate(loaded_secret):
for idx, secret in enumerate(narg_secret):
if 'sourceVault' not in secret:
errors.append(
'Secret is missing sourceVault key at index {0} in arg {1}'.format(
idx, idx_arg))
if 'sourceVault' in secret and 'id' not in secret['sourceVault']:
errors.append(
'Secret is missing sourceVault.id key at index {0} in arg {1}'.format(
idx, idx_arg))
if 'vaultCertificates' not in secret or not secret['vaultCertificates']:
err = 'Secret is missing vaultCertificates array or it is empty at index {0} in ' \
'arg {1} '
errors.append(err.format(idx, idx_arg))
else:
for jdx, cert in enumerate(secret['vaultCertificates']):
message = 'Secret is missing {0} within vaultCertificates array at secret ' \
'index {1} and vaultCertificate index {2} in arg {3}'
if 'certificateUrl' not in cert:
errors.append(message.format('certificateUrl', idx, jdx, idx_arg))
if is_windows and 'certificateStore' not in cert:
errors.append(message.format('certificateStore', idx, jdx, idx_arg))
if errors:
raise CLIError('\n'.join(errors))
# region VM Create Validators
def _parse_image_argument(cmd, namespace):
""" Systematically determines what type is supplied for the --image parameter. Updates the
namespace and returns the type for subsequent processing. """
from msrestazure.tools import is_valid_resource_id
from msrestazure.azure_exceptions import CloudError
import re
# 1 - check if a fully-qualified ID (assumes it is an image ID)
if is_valid_resource_id(namespace.image):
return 'image_id'
# 2 - attempt to match an URN pattern
urn_match = re.match('([^:]*):([^:]*):([^:]*):([^:]*)', namespace.image)
if urn_match:
namespace.os_publisher = urn_match.group(1)
namespace.os_offer = urn_match.group(2)
namespace.os_sku = urn_match.group(3)
namespace.os_version = urn_match.group(4)
if not any([namespace.plan_name, namespace.plan_product, namespace.plan_publisher]):
image_plan = _get_image_plan_info_if_exists(cmd, namespace)
if image_plan:
namespace.plan_name = image_plan.name
namespace.plan_product = image_plan.product
namespace.plan_publisher = image_plan.publisher
return 'urn'
# 3 - unmanaged vhd based images?
if urlparse(namespace.image).scheme:
return 'uri'
# 4 - attempt to match an URN alias (most likely)
from azure.cli.command_modules.vm._actions import load_images_from_aliases_doc
images = load_images_from_aliases_doc(cmd.cli_ctx)
matched = next((x for x in images if x['urnAlias'].lower() == namespace.image.lower()), None)
if matched:
namespace.os_publisher = matched['publisher']
namespace.os_offer = matched['offer']
namespace.os_sku = matched['sku']
namespace.os_version = matched['version']
return 'urn'
# 5 - check if an existing managed disk image resource
compute_client = _compute_client_factory(cmd.cli_ctx)
try:
compute_client.images.get(namespace.resource_group_name, namespace.image)
namespace.image = _get_resource_id(cmd.cli_ctx, namespace.image, namespace.resource_group_name,
'images', 'Microsoft.Compute')
return 'image_id'
except CloudError:
err = 'Invalid image "{}". Use a custom image name, id, or pick one from {}'
raise CLIError(err.format(namespace.image, [x['urnAlias'] for x in images]))
def _get_image_plan_info_if_exists(cmd, namespace):
from msrestazure.azure_exceptions import CloudError
try:
compute_client = _compute_client_factory(cmd.cli_ctx)
if namespace.os_version.lower() == 'latest':
image_version = _get_latest_image_version(cmd.cli_ctx, namespace.location, namespace.os_publisher,
namespace.os_offer, namespace.os_sku)
else:
image_version = namespace.os_version
image = compute_client.virtual_machine_images.get(namespace.location,
namespace.os_publisher,
namespace.os_offer,
namespace.os_sku,
image_version)
# pylint: disable=no-member
return image.plan
except CloudError as ex:
logger.warning("Querying the image of '%s' failed for an error '%s'. Configuring plan settings "
"will be skipped", namespace.image, ex.message)
# pylint: disable=inconsistent-return-statements
def _get_storage_profile_description(profile):
if profile == StorageProfile.SACustomImage:
return 'create unmanaged OS disk created from generalized VHD'
elif profile == StorageProfile.SAPirImage:
return 'create unmanaged OS disk from Azure Marketplace image'
elif profile == StorageProfile.SASpecializedOSDisk:
return 'attach to existing unmanaged OS disk'
elif profile == StorageProfile.ManagedCustomImage:
return 'create managed OS disk from custom image'
elif profile == StorageProfile.ManagedPirImage:
return 'create managed OS disk from Azure Marketplace image'
elif profile == StorageProfile.ManagedSpecializedOSDisk:
return 'attach existing managed OS disk'
def _validate_managed_disk_sku(sku):
allowed_skus = ['Premium_LRS', 'Standard_LRS', 'StandardSSD_LRS', 'UltraSSD_LRS']
if sku and sku.lower() not in [x.lower() for x in allowed_skus]:
raise CLIError("invalid storage SKU '{}': allowed values: '{}'".format(sku, allowed_skus))
def _validate_location(cmd, namespace, zone_info, size_info):
from ._vm_utils import list_sku_info
if not namespace.location:
get_default_location_from_resource_group(cmd, namespace)
if zone_info:
sku_infos = list_sku_info(cmd.cli_ctx, namespace.location)
temp = next((x for x in sku_infos if x.name.lower() == size_info.lower()), None)
# For Stack (compute - 2017-03-30), Resource_sku doesn't implement location_info property
if not hasattr(temp, 'location_info'):
return
if not temp or not [x for x in (temp.location_info or []) if x.zones]:
raise CLIError("{}'s location can't be used to create the VM/VMSS because availablity zone is not yet "
"supported. Please use '--location' to specify a capable one. 'az vm list-skus' can be "
"used to find such locations".format(namespace.resource_group_name))
# pylint: disable=too-many-branches, too-many-statements
def _validate_vm_create_storage_profile(cmd, namespace, for_scale_set=False):
from msrestazure.tools import parse_resource_id
# use minimal parameters to resolve the expected storage profile
if getattr(namespace, 'attach_os_disk', None) and not namespace.image:
if namespace.use_unmanaged_disk:
# STORAGE PROFILE #3
namespace.storage_profile = StorageProfile.SASpecializedOSDisk
else:
# STORAGE PROFILE #6
namespace.storage_profile = StorageProfile.ManagedSpecializedOSDisk
elif namespace.image and not getattr(namespace, 'attach_os_disk', None):
image_type = _parse_image_argument(cmd, namespace)
if image_type == 'uri':
# STORAGE PROFILE #2
namespace.storage_profile = StorageProfile.SACustomImage
elif image_type == 'image_id':
# STORAGE PROFILE #5
namespace.storage_profile = StorageProfile.ManagedCustomImage
elif image_type == 'urn':
if namespace.use_unmanaged_disk:
# STORAGE PROFILE #1
namespace.storage_profile = StorageProfile.SAPirImage
else:
# STORAGE PROFILE #4
namespace.storage_profile = StorageProfile.ManagedPirImage
else:
raise CLIError('Unrecognized image type: {}'.format(image_type))
else:
# did not specify image XOR attach-os-disk
raise CLIError('incorrect usage: --image IMAGE | --attach-os-disk DISK')
auth_params = ['<PASSWORD>_password', 'admin_username', 'authentication_type',
'generate_ssh_keys', 'ssh_dest_key_path', 'ssh_key_value']
# perform parameter validation for the specific storage profile
# start with the required/forbidden parameters for VM
if namespace.storage_profile == StorageProfile.ManagedPirImage:
required = ['image']
forbidden = ['os_type', 'attach_os_disk', 'storage_account',
'storage_container_name', 'use_unmanaged_disk']
if for_scale_set:
forbidden.append('os_disk_name')
_validate_managed_disk_sku(namespace.storage_sku)
elif namespace.storage_profile == StorageProfile.ManagedCustomImage:
required = ['image']
forbidden = ['os_type', 'attach_os_disk', 'storage_account',
'storage_container_name', 'use_unmanaged_disk']
if for_scale_set:
forbidden.append('os_disk_name')
_validate_managed_disk_sku(namespace.storage_sku)
elif namespace.storage_profile == StorageProfile.ManagedSpecializedOSDisk:
required = ['os_type', 'attach_os_disk']
forbidden = ['os_disk_name', 'os_caching', 'storage_account',
'storage_container_name', 'use_unmanaged_disk', 'storage_sku'] + auth_params
_validate_managed_disk_sku(namespace.storage_sku)
elif namespace.storage_profile == StorageProfile.SAPirImage:
required = ['image', 'use_unmanaged_disk']
forbidden = ['os_type', 'attach_os_disk', 'data_disk_sizes_gb']
elif namespace.storage_profile == StorageProfile.SACustomImage:
required = ['image', 'os_type', 'use_unmanaged_disk']
forbidden = ['attach_os_disk', 'data_disk_sizes_gb']
elif namespace.storage_profile == StorageProfile.SASpecializedOSDisk:
required = ['os_type', 'attach_os_disk', 'use_unmanaged_disk']
forbidden = ['os_disk_name', 'os_caching', 'image', 'storage_account',
'storage_container_name', 'data_disk_sizes_gb', 'storage_sku'] + auth_params
else:
raise CLIError('Unrecognized storage profile: {}'.format(namespace.storage_profile))
logger.debug("storage profile '%s'", namespace.storage_profile)
if for_scale_set:
# VMSS lacks some parameters, so scrub these out
props_to_remove = ['attach_os_disk', 'storage_account']
for prop in props_to_remove:
if prop in required:
required.remove(prop)
if prop in forbidden:
forbidden.remove(prop)
# set default storage SKU if not provided and using an image based OS
if not namespace.storage_sku and namespace.storage_profile in [StorageProfile.SAPirImage, StorageProfile.SACustomImage]: # pylint: disable=line-too-long
namespace.storage_sku = 'Standard_LRS' if for_scale_set else 'Premium_LRS'
if namespace.storage_sku == 'UltraSSD_LRS' and namespace.ultra_ssd_enabled is None:
namespace.ultra_ssd_enabled = True
# Now verify that the status of required and forbidden parameters
validate_parameter_set(
namespace, required, forbidden,
description='storage profile: {}:'.format(_get_storage_profile_description(namespace.storage_profile)))
image_data_disks_num = 0
if namespace.storage_profile == StorageProfile.ManagedCustomImage:
# extract additional information from a managed custom image
res = parse_resource_id(namespace.image)
compute_client = _compute_client_factory(cmd.cli_ctx, subscription_id=res['subscription'])
if res['type'].lower() == 'images':
image_info = compute_client.images.get(res['resource_group'], res['name'])
namespace.os_type = image_info.storage_profile.os_disk.os_type.value
image_data_disks_num = len(image_info.storage_profile.data_disks or [])
elif res['type'].lower() == 'galleries':
image_info = compute_client.gallery_images.get(resource_group_name=res['resource_group'],
gallery_name=res['name'],
gallery_image_name=res['child_name_1'])
namespace.os_type = image_info.os_type.value
gallery_image_version = res.get('child_name_2', '')
if gallery_image_version.lower() in ['latest', '']:
image_version_infos = compute_client.gallery_image_versions.list_by_gallery_image(
resource_group_name=res['resource_group'], gallery_name=res['name'],
gallery_image_name=res['child_name_1'])
image_version_infos = [x for x in image_version_infos if not x.publishing_profile.exclude_from_latest]
if not image_version_infos:
raise CLIError('There is no latest image version exists for "{}"'.format(namespace.image))
image_version_info = sorted(image_version_infos, key=lambda x: x.publishing_profile.published_date)[-1]
else:
image_version_info = compute_client.gallery_image_versions.get(
resource_group_name=res['resource_group'], gallery_name=res['name'],
gallery_image_name=res['child_name_1'], gallery_image_version_name=res['child_name_2'])
image_data_disks_num = len(image_version_info.storage_profile.data_disk_images or [])
else:
raise CLIError('usage error: unrecognized image informations "{}"'.format(namespace.image))
# pylint: disable=no-member
elif namespace.storage_profile == StorageProfile.ManagedSpecializedOSDisk:
# accept disk name or ID
namespace.attach_os_disk = _get_resource_id(
cmd.cli_ctx, namespace.attach_os_disk, namespace.resource_group_name, 'disks', 'Microsoft.Compute')
if getattr(namespace, 'attach_data_disks', None):
if not namespace.use_unmanaged_disk:
namespace.attach_data_disks = [_get_resource_id(cmd.cli_ctx, d, namespace.resource_group_name, 'disks',
'Microsoft.Compute') for d in namespace.attach_data_disks]
if not namespace.os_type:
namespace.os_type = 'windows' if 'windows' in namespace.os_offer.lower() else 'linux'
from ._vm_utils import normalize_disk_info
# attach_data_disks are not exposed yet for VMSS, so use 'getattr' to avoid crash
namespace.disk_info = normalize_disk_info(image_data_disks_num=image_data_disks_num,
data_disk_sizes_gb=namespace.data_disk_sizes_gb,
attach_data_disks=getattr(namespace, 'attach_data_disks', []),
storage_sku=namespace.storage_sku,
os_disk_caching=namespace.os_caching,
data_disk_cachings=namespace.data_caching)
def _validate_vm_create_storage_account(cmd, namespace):
from msrestazure.tools import parse_resource_id
if namespace.storage_account:
storage_id = parse_resource_id(namespace.storage_account)
rg = storage_id.get('resource_group', namespace.resource_group_name)
if check_existence(cmd.cli_ctx, storage_id['name'], rg, 'Microsoft.Storage', 'storageAccounts'):
# 1 - existing storage account specified
namespace.storage_account_type = 'existing'
logger.debug("using specified existing storage account '%s'", storage_id['name'])
else:
# 2 - params for new storage account specified
namespace.storage_account_type = 'new'
logger.debug("specified storage account '%s' not found and will be created", storage_id['name'])
else:
from azure.cli.core.profiles import ResourceType
from azure.cli.core.commands.client_factory import get_mgmt_service_client
storage_client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_STORAGE).storage_accounts
# find storage account in target resource group that matches the VM's location
sku_tier = 'Premium' if 'Premium' in namespace.storage_sku else 'Standard'
account = next(
(a for a in storage_client.list_by_resource_group(namespace.resource_group_name)
if a.sku.tier.value == sku_tier and a.location == namespace.location), None)
if account:
# 3 - nothing specified - find viable storage account in target resource group
namespace.storage_account = account.name
namespace.storage_account_type = 'existing'
logger.debug("suitable existing storage account '%s' will be used", account.name)
else:
# 4 - nothing specified - create a new storage account
namespace.storage_account_type = 'new'
logger.debug('no suitable storage account found. One will be created.')
def _validate_vm_create_availability_set(cmd, namespace):
from msrestazure.tools import parse_resource_id, resource_id
from azure.cli.core.commands.client_factory import get_subscription_id
if namespace.availability_set:
as_id = parse_resource_id(namespace.availability_set)
name = as_id['name']
rg = as_id.get('resource_group', namespace.resource_group_name)
if not check_existence(cmd.cli_ctx, name, rg, 'Microsoft.Compute', 'availabilitySets'):
raise CLIError("Availability set '{}' does not exist.".format(name))
namespace.availability_set = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=rg,
namespace='Microsoft.Compute',
type='availabilitySets',
name=name)
logger.debug("adding to specified availability set '%s'", namespace.availability_set)
def _validate_vm_vmss_create_vnet(cmd, namespace, for_scale_set=False):
from msrestazure.tools import is_valid_resource_id
vnet = namespace.vnet_name
subnet = namespace.subnet
rg = namespace.resource_group_name
location = namespace.location
nics = getattr(namespace, 'nics', None)
if not vnet and not subnet and not nics:
logger.debug('no subnet specified. Attempting to find an existing Vnet and subnet...')
# if nothing specified, try to find an existing vnet and subnet in the target resource group
client = get_network_client(cmd.cli_ctx).virtual_networks
# find VNET in target resource group that matches the VM's location with a matching subnet
for vnet_match in (v for v in client.list(rg) if v.location == location and v.subnets):
# 1 - find a suitable existing vnet/subnet
result = None
if not for_scale_set:
result = next((s for s in vnet_match.subnets if s.name.lower() != 'gatewaysubnet'), None)
else:
def _check_subnet(s):
if s.name.lower() == 'gatewaysubnet':
return False
subnet_mask = s.address_prefix.split('/')[-1]
return _subnet_capacity_check(subnet_mask, namespace.instance_count,
not namespace.disable_overprovision)
result = next((s for s in vnet_match.subnets if _check_subnet(s)), None)
if not result:
continue
namespace.subnet = result.name
namespace.vnet_name = vnet_match.name
namespace.vnet_type = 'existing'
logger.debug("existing vnet '%s' and subnet '%s' found", namespace.vnet_name, namespace.subnet)
return
if subnet:
subnet_is_id = is_valid_resource_id(subnet)
if (subnet_is_id and vnet) or (not subnet_is_id and not vnet):
raise CLIError("incorrect '--subnet' usage: --subnet SUBNET_ID | "
"--subnet SUBNET_NAME --vnet-name VNET_NAME")
subnet_exists = \
check_existence(cmd.cli_ctx, subnet, rg, 'Microsoft.Network', 'subnets', vnet, 'virtualNetworks')
if subnet_is_id and not subnet_exists:
raise CLIError("Subnet '{}' does not exist.".format(subnet))
elif subnet_exists:
# 2 - user specified existing vnet/subnet
namespace.vnet_type = 'existing'
logger.debug("using specified vnet '%s' and subnet '%s'", namespace.vnet_name, namespace.subnet)
return
# 3 - create a new vnet/subnet
namespace.vnet_type = 'new'
logger.debug('no suitable subnet found. One will be created.')
def _subnet_capacity_check(subnet_mask, vmss_instance_count, over_provision):
mask = int(subnet_mask)
# '2' are the reserved broadcasting addresses
# '*1.5' so we have enough leeway for over-provision
factor = 1.5 if over_provision else 1
return ((1 << (32 - mask)) - 2) > int(vmss_instance_count * factor)
def _validate_vm_vmss_accelerated_networking(cli_ctx, namespace):
if namespace.accelerated_networking is None:
size = getattr(namespace, 'size', None) or getattr(namespace, 'vm_sku', None)
size = size.lower()
# to refresh the list, run 'az vm create --accelerated-networking --size Standard_DS1_v2' and
# get it from the error
aval_sizes = ['Standard_D3_v2', 'Standard_D12_v2', 'Standard_D3_v2_Promo', 'Standard_D12_v2_Promo',
'Standard_DS3_v2', 'Standard_DS12_v2', 'Standard_DS13-4_v2', 'Standard_DS14-4_v2',
'Standard_DS3_v2_Promo', 'Standard_DS12_v2_Promo', 'Standard_DS13-4_v2_Promo',
'Standard_DS14-4_v2_Promo', 'Standard_F4', 'Standard_F4s', 'Standard_D8_v3', 'Standard_D8s_v3',
'Standard_D32-8s_v3', 'Standard_E8_v3', 'Standard_E8s_v3', 'Standard_D3_v2_ABC',
'Standard_D12_v2_ABC', 'Standard_F4_ABC', 'Standard_F8s_v2', 'Standard_D4_v2',
'Standard_D13_v2', 'Standard_D4_v2_Promo', 'Standard_D13_v2_Promo', 'Standard_DS4_v2',
'Standard_DS13_v2', 'Standard_DS14-8_v2', 'Standard_DS4_v2_Promo', 'Standard_DS13_v2_Promo',
'Standard_DS14-8_v2_Promo', 'Standard_F8', 'Standard_F8s', 'Standard_M64-16ms',
'Standard_D16_v3', 'Standard_D16s_v3', 'Standard_D32-16s_v3', 'Standard_D64-16s_v3',
'Standard_E16_v3', 'Standard_E16s_v3', 'Standard_E32-16s_v3', 'Standard_D4_v2_ABC',
'Standard_D13_v2_ABC', 'Standard_F8_ABC', 'Standard_F16s_v2', 'Standard_D5_v2',
'Standard_D14_v2', 'Standard_D5_v2_Promo', 'Standard_D14_v2_Promo', 'Standard_DS5_v2',
'Standard_DS14_v2', 'Standard_DS5_v2_Promo', 'Standard_DS14_v2_Promo', 'Standard_F16',
'Standard_F16s', 'Standard_M64-32ms', 'Standard_M128-32ms', 'Standard_D32_v3',
'Standard_D32s_v3', 'Standard_D64-32s_v3', 'Standard_E32_v3', 'Standard_E32s_v3',
'Standard_E32-8s_v3', 'Standard_E32-16_v3', 'Standard_D5_v2_ABC', 'Standard_D14_v2_ABC',
'Standard_F16_ABC', 'Standard_F32s_v2', 'Standard_D15_v2', 'Standard_D15_v2_Promo',
'Standard_D15_v2_Nested', 'Standard_DS15_v2', 'Standard_DS15_v2_Promo',
'Standard_DS15_v2_Nested', 'Standard_D40_v3', 'Standard_D40s_v3', 'Standard_D15_v2_ABC',
'Standard_M64ms', 'Standard_M64s', 'Standard_M128-64ms', 'Standard_D64_v3', 'Standard_D64s_v3',
'Standard_E64_v3', 'Standard_E64s_v3', 'Standard_E64-16s_v3', 'Standard_E64-32s_v3',
'Standard_F64s_v2', 'Standard_F72s_v2', 'Standard_M128s', 'Standard_M128ms', 'Standard_L8s_v2',
'Standard_L16s_v2', 'Standard_L32s_v2', 'Standard_L64s_v2', 'Standard_L96s_v2', 'SQLGL',
'SQLGLCore', 'Standard_D4_v3', 'Standard_D4s_v3', 'Standard_D2_v2', 'Standard_DS2_v2',
'Standard_E4_v3', 'Standard_E4s_v3', 'Standard_F2', 'Standard_F2s', 'Standard_F4s_v2',
'Standard_D11_v2', 'Standard_DS11_v2', 'AZAP_Performance_ComputeV17C']
aval_sizes = [x.lower() for x in aval_sizes]
if size not in aval_sizes:
return
new_4core_sizes = ['Standard_D3_v2', 'Standard_D3_v2_Promo', 'Standard_D3_v2_ABC', 'Standard_DS3_v2',
'Standard_DS3_v2_Promo', 'Standard_D12_v2', 'Standard_D12_v2_Promo', 'Standard_D12_v2_ABC',
'Standard_DS12_v2', 'Standard_DS12_v2_Promo', 'Standard_F8s_v2', 'Standard_F4',
'Standard_F4_ABC', 'Standard_F4s', 'Standard_E8_v3', 'Standard_E8s_v3', 'Standard_D8_v3',
'Standard_D8s_v3']
new_4core_sizes = [x.lower() for x in new_4core_sizes]
if size not in new_4core_sizes:
compute_client = _compute_client_factory(cli_ctx)
sizes = compute_client.virtual_machine_sizes.list(namespace.location)
size_info = next((s for s in sizes if s.name.lower() == size), None)
if size_info is None or size_info.number_of_cores < 8:
return
# VMs need to be a supported image in the marketplace
# Ubuntu 16.04, SLES 12 SP3, RHEL 7.4, CentOS 7.4, CoreOS Linux, Debian "Stretch" with backports kernel
# Oracle Linux 7.4, Windows Server 2016, Windows Server 2012R2
publisher, offer, sku = namespace.os_publisher, namespace.os_offer, namespace.os_sku
if not publisher:
return
publisher, offer, sku = publisher.lower(), offer.lower(), sku.lower()
distros = [('canonical', 'UbuntuServer', '^16.04'), ('suse', 'sles', '^12-sp3'), ('redhat', 'rhel', '^7.4'),
('openlogic', 'centos', '^7.4'), ('coreos', 'coreos', None), ('credativ', 'debian', '-backports'),
('oracle', 'oracle-linux', '^7.4'), ('MicrosoftWindowsServer', 'WindowsServer', '^2016'),
('MicrosoftWindowsServer', 'WindowsServer', '^2012-R2')]
import re
for p, o, s in distros:
if p.lower() == publisher and (o is None or o.lower() == offer) and (s is None or re.match(s, sku, re.I)):
namespace.accelerated_networking = True
def _validate_vmss_create_subnet(namespace):
if namespace.vnet_type == 'new':
if namespace.subnet_address_prefix is None:
cidr = namespace.vnet_address_prefix.split('/', 1)[0]
i = 0
for i in range(24, 16, -1):
if _subnet_capacity_check(i, namespace.instance_count, not namespace.disable_overprovision):
break
if i < 16:
err = "instance count '{}' is out of range of 2^16 subnet size'"
raise CLIError(err.format(namespace.instance_count))
namespace.subnet_address_prefix = '{}/{}'.format(cidr, i)
if namespace.app_gateway_type and namespace.app_gateway_subnet_address_prefix is None:
namespace.app_gateway_subnet_address_prefix = _get_next_subnet_addr_suffix(
namespace.vnet_address_prefix, namespace.subnet_address_prefix, 24)
def _get_next_subnet_addr_suffix(vnet_cidr, subnet_cidr, new_mask):
def _convert_to_int(address, bit_mask_len):
a, b, c, d = [int(x) for x in address.split('.')]
result = '{0:08b}{1:08b}{2:08b}{3:08b}'.format(a, b, c, d)
return int(result[:-bit_mask_len], 2)
error_msg = "usage error: --subnet-address-prefix value should be a subrange of --vnet-address-prefix's"
# extract vnet information needed to verify the defaults we are coming out
vnet_ip_address, mask = vnet_cidr.split('/')
vnet_bit_mask_len = 32 - int(mask)
vnet_int = _convert_to_int(vnet_ip_address, vnet_bit_mask_len)
subnet_ip_address, mask = subnet_cidr.split('/')
subnet_bit_mask_len = 32 - int(mask)
if vnet_bit_mask_len <= subnet_bit_mask_len:
raise CLIError(error_msg)
candidate_int = _convert_to_int(subnet_ip_address, subnet_bit_mask_len) + 1
if (candidate_int >> (vnet_bit_mask_len - subnet_bit_mask_len)) > vnet_int: # overflows?
candidate_int = candidate_int - 2 # try the other way around
if (candidate_int >> (vnet_bit_mask_len - subnet_bit_mask_len)) > vnet_int:
raise CLIError(error_msg)
# format back to the cidr
candaidate_str = '{0:32b}'.format(candidate_int << subnet_bit_mask_len)
return '{0}.{1}.{2}.{3}/{4}'.format(int(candaidate_str[0:8], 2), int(candaidate_str[8:16], 2),
int(candaidate_str[16:24], 2), int(candaidate_str[24:32], 2),
new_mask)
def _validate_vm_create_nsg(cmd, namespace):
if namespace.nsg:
if check_existence(cmd.cli_ctx, namespace.nsg, namespace.resource_group_name,
'Microsoft.Network', 'networkSecurityGroups'):
namespace.nsg_type = 'existing'
logger.debug("using specified NSG '%s'", namespace.nsg)
else:
namespace.nsg_type = 'new'
logger.debug("specified NSG '%s' not found. It will be created.", namespace.nsg)
elif namespace.nsg == '':
namespace.nsg_type = None
logger.debug('no NSG will be used')
elif namespace.nsg is None:
namespace.nsg_type = 'new'
logger.debug('new NSG will be created')
def _validate_vmss_create_nsg(cmd, namespace):
if namespace.nsg:
namespace.nsg = _get_resource_id(cmd.cli_ctx, namespace.nsg, namespace.resource_group_name,
'networkSecurityGroups', 'Microsoft.Network')
def _validate_vm_vmss_create_public_ip(cmd, namespace):
if namespace.public_ip_address:
if check_existence(cmd.cli_ctx, namespace.public_ip_address, namespace.resource_group_name,
'Microsoft.Network', 'publicIPAddresses'):
namespace.public_ip_address_type = 'existing'
logger.debug("using existing specified public IP '%s'", namespace.public_ip_address)
else:
namespace.public_ip_address_type = 'new'
logger.debug("specified public IP '%s' not found. It will be created.", namespace.public_ip_address)
elif namespace.public_ip_address == '':
namespace.public_ip_address_type = None
logger.debug('no public IP address will be used')
elif namespace.public_ip_address is None:
namespace.public_ip_address_type = 'new'
logger.debug('new public IP address will be created')
# Public-IP SKU is only exposed for VM. VMSS has no such needs so far
if getattr(namespace, 'public_ip_sku', None):
from azure.cli.core.profiles import ResourceType
PublicIPAddressSkuName, IPAllocationMethod = cmd.get_models('PublicIPAddressSkuName', 'IPAllocationMethod',
resource_type=ResourceType.MGMT_NETWORK)
if namespace.public_ip_sku == PublicIPAddressSkuName.standard.value:
if not namespace.public_ip_address_allocation:
namespace.public_ip_address_allocation = IPAllocationMethod.static.value
def _validate_vmss_create_public_ip(cmd, namespace):
if namespace.load_balancer_type is None and namespace.app_gateway_type is None:
if namespace.public_ip_address:
raise CLIError('--public-ip-address can only be used when creating a new load '
'balancer or application gateway frontend.')
namespace.public_ip_address = ''
_validate_vm_vmss_create_public_ip(cmd, namespace)
def _validate_vm_create_nics(cmd, namespace):
from msrestazure.tools import resource_id
from azure.cli.core.commands.client_factory import get_subscription_id
nics_value = namespace.nics
nics = []
if not nics_value:
namespace.nic_type = 'new'
logger.debug('new NIC will be created')
return
if not isinstance(nics_value, list):
nics_value = [nics_value]
for n in nics_value:
nics.append({
'id': n if '/' in n else resource_id(name=n,
resource_group=namespace.resource_group_name,
namespace='Microsoft.Network',
type='networkInterfaces',
subscription=get_subscription_id(cmd.cli_ctx)),
'properties': {
'primary': nics_value[0] == n
}
})
namespace.nics = nics
namespace.nic_type = 'existing'
namespace.public_ip_address_type = None
logger.debug('existing NIC(s) will be used')
def _validate_vm_vmss_create_auth(namespace):
if namespace.storage_profile in [StorageProfile.ManagedSpecializedOSDisk,
StorageProfile.SASpecializedOSDisk]:
return
namespace.admin_username = _validate_admin_username(namespace.admin_username, namespace.os_type)
if not namespace.os_type:
raise CLIError("Unable to resolve OS type. Specify '--os-type' argument.")
if not namespace.authentication_type:
# apply default auth type (password for Windows, ssh for Linux) by examining the OS type
namespace.authentication_type = 'password' \
if (namespace.os_type.lower() == 'windows' or namespace.admin_password) else 'ssh'
if namespace.os_type.lower() == 'windows' and namespace.authentication_type == 'ssh':
raise CLIError('SSH not supported for Windows VMs.')
# validate proper arguments supplied based on the authentication type
if namespace.authentication_type == 'password':
if namespace.ssh_key_value or namespace.ssh_dest_key_path:
raise ValueError(
"incorrect usage for authentication-type 'password': "
"[--admin-username USERNAME] --admin-password PASSWORD")
from knack.prompting import prompt_pass, NoTTYException
try:
if not namespace.admin_password:
namespace.admin_password = prompt_pass('Admin Password: ', confirm=True)
except NoTTYException:
raise CLIError('Please specify password in non-interactive mode.')
# validate password
_validate_admin_password(namespace.admin_password,
namespace.os_type)
elif namespace.authentication_type == 'ssh':
if namespace.admin_password:
raise ValueError('Admin password cannot be used with SSH authentication type')
validate_ssh_key(namespace)
if not namespace.ssh_dest_key_path:
namespace.ssh_dest_key_path = \
'/home/{}/.ssh/authorized_keys'.format(namespace.admin_username)
def _validate_admin_username(username, os_type):
import re
if not username:
raise CLIError("admin user name can not be empty")
is_linux = (os_type.lower() == 'linux')
# pylint: disable=line-too-long
pattern = (r'[\\\/"\[\]:|<>+=;,?*@#()!A-Z]+' if is_linux else r'[\\\/"\[\]:|<>+=;,?*@]+')
linux_err = r'admin user name cannot contain upper case character A-Z, special characters \/"[]:|<>+=;,?*@#()! or start with $ or -'
win_err = r'admin user name cannot contain special characters \/"[]:|<>+=;,?*@# or ends with .'
if re.findall(pattern, username):
raise CLIError(linux_err if is_linux else win_err)
if is_linux and re.findall(r'^[$-]+', username):
raise CLIError(linux_err)
if not is_linux and username.endswith('.'):
raise CLIError(win_err)
disallowed_user_names = [
"administrator", "admin", "user", "user1", "test", "user2",
"test1", "user3", "admin1", "1", "123", "a", "actuser", "adm",
"admin2", "aspnet", "backup", "console", "guest",
"owner", "root", "server", "sql", "support", "support_388945a0",
"sys", "test2", "test3", "user4", "user5"]
if username.lower() in disallowed_user_names:
raise CLIError("This user name '{}' meets the general requirements, but is specifically disallowed for this image. Please try a different value.".format(username))
return username
def _validate_admin_password(password, os_type):
import re
is_linux = (os_type.lower() == 'linux')
max_length = 72 if is_linux else 123
min_length = 12
if len(password) not in range(min_length, max_length + 1):
raise CLIError('The password length must be between {} and {}'.format(min_length,
max_length))
contains_lower = re.findall('[a-z]+', password)
contains_upper = re.findall('[A-Z]+', password)
contains_digit = re.findall('[0-9]+', password)
contains_special_char = re.findall(r'[ `~!@#$%^&*()=+_\[\]{}\|;:.\/\'\",<>?]+', password)
count = len([x for x in [contains_lower, contains_upper,
contains_digit, contains_special_char] if x])
# pylint: disable=line-too-long
if count < 3:
raise CLIError('Password must have the 3 of the following: 1 lower case character, 1 upper case character, 1 number and 1 special character')
def validate_ssh_key(namespace):
string_or_file = (namespace.ssh_key_value or
os.path.join(os.path.expanduser('~'), '.ssh', 'id_rsa.pub'))
content = string_or_file
if os.path.exists(string_or_file):
logger.info('Use existing SSH public key file: %s', string_or_file)
with open(string_or_file, 'r') as f:
content = f.read()
elif not keys.is_valid_ssh_rsa_public_key(content):
if namespace.generate_ssh_keys:
# figure out appropriate file names:
# 'base_name'(with private keys), and 'base_name.pub'(with public keys)
public_key_filepath = string_or_file
if public_key_filepath[-4:].lower() == '.pub':
private_key_filepath = public_key_filepath[:-4]
else:
private_key_filepath = public_key_filepath + '.private'
content = keys.generate_ssh_keys(private_key_filepath, public_key_filepath)
logger.warning("SSH key files '%s' and '%s' have been generated under ~/.ssh to "
"allow SSH access to the VM. If using machines without "
"permanent storage, back up your keys to a safe location.",
private_key_filepath, public_key_filepath)
else:
raise CLIError('An RSA key file or key value must be supplied to SSH Key Value. '
'You can use --generate-ssh-keys to let CLI generate one for you')
namespace.ssh_key_value = content
def _validate_vm_vmss_msi(cmd, namespace, from_set_command=False):
if from_set_command or namespace.assign_identity is not None:
identities = namespace.assign_identity or []
from ._vm_utils import MSI_LOCAL_ID
for i, _ in enumerate(identities):
if identities[i] != MSI_LOCAL_ID:
identities[i] = _get_resource_id(cmd.cli_ctx, identities[i], namespace.resource_group_name,
'userAssignedIdentities', 'Microsoft.ManagedIdentity')
if not namespace.identity_scope and getattr(namespace.identity_role, 'is_default', None) is None:
raise CLIError("usage error: '--role {}' is not applicable as the '--scope' is not provided".format(
namespace.identity_role))
user_assigned_identities = [x for x in identities if x != MSI_LOCAL_ID]
if user_assigned_identities and not cmd.supported_api_version(min_api='2017-12-01'):
raise CLIError('usage error: user assigned identity is only available under profile '
'with minimum Compute API version of 2017-12-01')
if namespace.identity_scope:
if identities and MSI_LOCAL_ID not in identities:
raise CLIError("usage error: '--scope'/'--role' is only applicable when assign system identity")
# keep 'identity_role' for output as logical name is more readable
setattr(namespace, 'identity_role_id', _resolve_role_id(cmd.cli_ctx, namespace.identity_role,
namespace.identity_scope))
elif namespace.identity_scope or getattr(namespace.identity_role, 'is_default', None) is None:
raise CLIError('usage error: --assign-identity [--scope SCOPE] [--role ROLE]')
def _resolve_role_id(cli_ctx, role, scope):
import re
import uuid
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.cli.core.profiles import ResourceType
client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_AUTHORIZATION).role_definitions
role_id = None
if re.match(r'/subscriptions/.+/providers/Microsoft.Authorization/roleDefinitions/',
role, re.I):
role_id = role
else:
try:
uuid.UUID(role)
role_id = '/subscriptions/{}/providers/Microsoft.Authorization/roleDefinitions/{}'.format(
client.config.subscription_id, role)
except ValueError:
pass
if not role_id: # retrieve role id
role_defs = list(client.list(scope, "roleName eq '{}'".format(role)))
if not role_defs:
raise CLIError("Role '{}' doesn't exist.".format(role))
elif len(role_defs) > 1:
ids = [r.id for r in role_defs]
err = "More than one role matches the given name '{}'. Please pick an id from '{}'"
raise CLIError(err.format(role, ids))
role_id = role_defs[0].id
return role_id
def process_vm_create_namespace(cmd, namespace):
validate_tags(namespace)
_validate_location(cmd, namespace, namespace.zone, namespace.size)
validate_asg_names_or_ids(cmd, namespace)
_validate_vm_create_storage_profile(cmd, namespace)
if namespace.storage_profile in [StorageProfile.SACustomImage,
StorageProfile.SAPirImage]:
_validate_vm_create_storage_account(cmd, namespace)
_validate_vm_create_availability_set(cmd, namespace)
_validate_vm_vmss_create_vnet(cmd, namespace)
_validate_vm_create_nsg(cmd, namespace)
_validate_vm_vmss_create_public_ip(cmd, namespace)
_validate_vm_create_nics(cmd, namespace)
_validate_vm_vmss_accelerated_networking(cmd.cli_ctx, namespace)
_validate_vm_vmss_create_auth(namespace)
if namespace.secrets:
_validate_secrets(namespace.secrets, namespace.os_type)
if namespace.license_type and namespace.os_type.lower() != 'windows':
raise CLIError('usage error: --license-type is only applicable on Windows VM')
_validate_vm_vmss_msi(cmd, namespace)
if namespace.boot_diagnostics_storage:
namespace.boot_diagnostics_storage = get_storage_blob_uri(cmd.cli_ctx, namespace.boot_diagnostics_storage)
# endregion
# region VMSS Create Validators
def _get_default_address_pool(cli_ctx, resource_group, balancer_name, balancer_type):
option_name = '--backend-pool-name'
client = getattr(get_network_client(cli_ctx), balancer_type, None)
if not client:
raise CLIError('unrecognized balancer type: {}'.format(balancer_type))
balancer = client.get(resource_group, balancer_name)
values = [x.name for x in balancer.backend_address_pools]
if len(values) > 1:
raise CLIError("Multiple possible values found for '{0}': {1}\nSpecify '{0}' "
"explicitly.".format(option_name, ', '.join(values)))
elif not values:
raise CLIError("No existing values found for '{0}'. Create one first and try "
"again.".format(option_name))
return values[0]
def _validate_vmss_single_placement_group(namespace):
if namespace.platform_fault_domain_count is not None and namespace.zones is None:
raise CLIError('usage error: --platform-fault-domain-count COUNT --zones ZONES')
if namespace.zones or namespace.instance_count > 100:
if namespace.single_placement_group is None:
namespace.single_placement_group = False
elif namespace.single_placement_group:
raise CLIError("usage error: '--single-placement-group' should be turned off for zonal scale-sets or with"
" 100+ instances")
def _validate_vmss_create_load_balancer_or_app_gateway(cmd, namespace):
from msrestazure.azure_exceptions import CloudError
from msrestazure.tools import parse_resource_id
from azure.cli.core.profiles import ResourceType
std_lb_is_available = cmd.supported_api_version(min_api='2017-08-01', resource_type=ResourceType.MGMT_NETWORK)
if namespace.load_balancer and namespace.application_gateway:
raise CLIError('incorrect usage: --load-balancer NAME_OR_ID | '
'--application-gateway NAME_OR_ID')
# Resolve the type of balancer (if any) being used
balancer_type = 'None'
if namespace.load_balancer is None and namespace.application_gateway is None:
if std_lb_is_available:
balancer_type = 'loadBalancer'
else: # needed for Stack profile 2017_03_09
balancer_type = 'loadBalancer' if namespace.single_placement_group is not False else 'applicationGateway'
logger.debug("W/o STD LB, defaulting to '%s' under because single placement group is disabled",
balancer_type)
elif namespace.load_balancer:
balancer_type = 'loadBalancer'
elif namespace.application_gateway:
balancer_type = 'applicationGateway'
if balancer_type == 'applicationGateway':
if namespace.application_gateway:
client = get_network_client(cmd.cli_ctx).application_gateways
try:
rg = parse_resource_id(namespace.application_gateway).get(
'resource_group', namespace.resource_group_name)
ag_name = parse_resource_id(namespace.application_gateway)['name']
client.get(rg, ag_name)
namespace.app_gateway_type = 'existing'
namespace.backend_pool_name = namespace.backend_pool_name or \
_get_default_address_pool(cmd.cli_ctx, rg, ag_name, 'application_gateways')
logger.debug("using specified existing application gateway '%s'", namespace.application_gateway)
except CloudError:
namespace.app_gateway_type = 'new'
logger.debug("application gateway '%s' not found. It will be created.", namespace.application_gateway)
elif namespace.application_gateway == '':
namespace.app_gateway_type = None
logger.debug('no application gateway will be used')
elif namespace.application_gateway is None:
namespace.app_gateway_type = 'new'
logger.debug('new application gateway will be created')
# AppGateway frontend
required = []
if namespace.app_gateway_type == 'new':
required.append('app_gateway_sku')
required.append('app_gateway_capacity')
if namespace.vnet_type != 'new':
required.append('app_gateway_subnet_address_prefix')
elif namespace.app_gateway_type == 'existing':
required.append('backend_pool_name')
forbidden = ['nat_pool_name', 'load_balancer', 'health_probe']
validate_parameter_set(namespace, required, forbidden, description='network balancer: application gateway')
elif balancer_type == 'loadBalancer':
# LoadBalancer frontend
required = []
forbidden = ['app_gateway_subnet_address_prefix', 'application_gateway', 'app_gateway_sku',
'app_gateway_capacity']
validate_parameter_set(namespace, required, forbidden, description='network balancer: load balancer')
if namespace.load_balancer:
rg = parse_resource_id(namespace.load_balancer).get('resource_group', namespace.resource_group_name)
lb_name = parse_resource_id(namespace.load_balancer)['name']
lb = get_network_lb(cmd.cli_ctx, namespace.resource_group_name, lb_name)
if lb:
namespace.load_balancer_type = 'existing'
namespace.backend_pool_name = namespace.backend_pool_name or \
_get_default_address_pool(cmd.cli_ctx, rg, lb_name, 'load_balancers')
if not namespace.nat_pool_name:
if len(lb.inbound_nat_pools) > 1:
raise CLIError("Multiple possible values found for '{0}': {1}\nSpecify '{0}' explicitly.".format( # pylint: disable=line-too-long
'--nat-pool-name', ', '.join([n.name for n in lb.inbound_nat_pools])))
elif not lb.inbound_nat_pools: # Associated scaleset will be missing ssh/rdp, so warn here.
logger.warning("No inbound nat pool was configured on '%s'", namespace.load_balancer)
else:
namespace.nat_pool_name = lb.inbound_nat_pools[0].name
logger.debug("using specified existing load balancer '%s'", namespace.load_balancer)
else:
namespace.load_balancer_type = 'new'
logger.debug("load balancer '%s' not found. It will be created.", namespace.load_balancer)
elif namespace.load_balancer == '':
namespace.load_balancer_type = None
logger.debug('no load balancer will be used')
elif namespace.load_balancer is None:
namespace.load_balancer_type = 'new'
logger.debug('new load balancer will be created')
if namespace.load_balancer_type == 'new' and namespace.single_placement_group is False and std_lb_is_available:
LBSkuName = cmd.get_models('LoadBalancerSkuName', resource_type=ResourceType.MGMT_NETWORK)
if namespace.load_balancer_sku is None:
namespace.load_balancer_sku = LBSkuName.standard.value
logger.debug("use Standard sku as single placement group is turned off")
elif namespace.load_balancer_sku == LBSkuName.basic.value:
if namespace.zones:
err = "'Standard' load balancer is required for zonal scale-sets"
elif namespace.instance_count > 100:
err = "'Standard' load balancer is required for scale-sets with 100+ instances"
else:
err = "'Standard' load balancer is required because 'single placement group' is turned off"
raise CLIError('usage error:{}'.format(err))
def get_network_client(cli_ctx):
from azure.cli.core.profiles import ResourceType
from azure.cli.core.commands.client_factory import get_mgmt_service_client
return get_mgmt_service_client(cli_ctx, ResourceType.MGMT_NETWORK, api_version=get_target_network_api(cli_ctx))
def get_network_lb(cli_ctx, resource_group_name, lb_name):
from msrestazure.azure_exceptions import CloudError
network_client = get_network_client(cli_ctx)
try:
return network_client.load_balancers.get(resource_group_name, lb_name)
except CloudError:
return None
def process_vmss_create_namespace(cmd, namespace):
validate_tags(namespace)
if namespace.vm_sku is None:
from azure.cli.core.cloud import AZURE_US_GOV_CLOUD
if cmd.cli_ctx.cloud.name != AZURE_US_GOV_CLOUD.name:
namespace.vm_sku = 'Standard_DS1_v2'
else:
namespace.vm_sku = 'Standard_D1_v2'
_validate_location(cmd, namespace, namespace.zones, namespace.vm_sku)
validate_asg_names_or_ids(cmd, namespace)
_validate_vm_create_storage_profile(cmd, namespace, for_scale_set=True)
_validate_vm_vmss_create_vnet(cmd, namespace, for_scale_set=True)
_validate_vmss_single_placement_group(namespace)
_validate_vmss_create_load_balancer_or_app_gateway(cmd, namespace)
_validate_vmss_create_subnet(namespace)
_validate_vmss_create_public_ip(cmd, namespace)
_validate_vmss_create_nsg(cmd, namespace)
_validate_vm_vmss_accelerated_networking(cmd.cli_ctx, namespace)
_validate_vm_vmss_create_auth(namespace)
_validate_vm_vmss_msi(cmd, namespace)
if namespace.license_type and namespace.os_type.lower() != 'windows':
raise CLIError('usage error: --license-type is only applicable on Windows VM scaleset')
if not namespace.public_ip_per_vm and namespace.vm_domain_name:
raise CLIError('Usage error: --vm-domain-name can only be used when --public-ip-per-vm is enabled')
if namespace.eviction_policy and not namespace.priority:
raise CLIError('Usage error: --priority PRIORITY [--eviction-policy POLICY]')
# endregion
# region disk, snapshot, image validators
def validate_vm_disk(cmd, namespace):
namespace.disk = _get_resource_id(cmd.cli_ctx, namespace.disk,
namespace.resource_group_name, 'disks', 'Microsoft.Compute')
def validate_vmss_disk(cmd, namespace):
if namespace.disk:
namespace.disk = _get_resource_id(cmd.cli_ctx, namespace.disk,
namespace.resource_group_name, 'disks', 'Microsoft.Compute')
if bool(namespace.disk) == bool(namespace.size_gb):
raise CLIError('usage error: --disk EXIST_DISK --instance-id ID | --size-gb GB')
elif bool(namespace.disk) != bool(namespace.instance_id):
raise CLIError('usage error: --disk EXIST_DISK --instance-id ID')
def process_disk_or_snapshot_create_namespace(cmd, namespace):
from msrestazure.azure_exceptions import CloudError
validate_tags(namespace)
if namespace.source:
usage_error = 'usage error: --source {SNAPSHOT | DISK} | --source VHD_BLOB_URI [--source-storage-account-id ID]'
try:
namespace.source_blob_uri, namespace.source_disk, namespace.source_snapshot = _figure_out_storage_source(
cmd.cli_ctx, namespace.resource_group_name, namespace.source)
if not namespace.source_blob_uri and namespace.source_storage_account_id:
raise CLIError(usage_error)
except CloudError:
raise CLIError(usage_error)
def process_image_create_namespace(cmd, namespace):
from msrestazure.tools import parse_resource_id
from msrestazure.azure_exceptions import CloudError
validate_tags(namespace)
try:
# try capturing from VM, a most common scenario
res_id = _get_resource_id(cmd.cli_ctx, namespace.source, namespace.resource_group_name,
'virtualMachines', 'Microsoft.Compute')
res = parse_resource_id(res_id)
compute_client = _compute_client_factory(cmd.cli_ctx, subscription_id=res['subscription'])
vm_info = compute_client.virtual_machines.get(res['resource_group'], res['name'])
# pylint: disable=no-member
namespace.os_type = vm_info.storage_profile.os_disk.os_type.value
namespace.source_virtual_machine = res_id
if namespace.data_disk_sources:
raise CLIError("'--data-disk-sources' is not allowed when capturing "
"images from virtual machines")
except CloudError:
namespace.os_blob_uri, namespace.os_disk, namespace.os_snapshot = _figure_out_storage_source(cmd.cli_ctx, namespace.resource_group_name, namespace.source) # pylint: disable=line-too-long
namespace.data_blob_uris = []
namespace.data_disks = []
namespace.data_snapshots = []
if namespace.data_disk_sources:
for data_disk_source in namespace.data_disk_sources:
source_blob_uri, source_disk, source_snapshot = _figure_out_storage_source(
cmd.cli_ctx, namespace.resource_group_name, data_disk_source)
if source_blob_uri:
namespace.data_blob_uris.append(source_blob_uri)
if source_disk:
namespace.data_disks.append(source_disk)
if source_snapshot:
namespace.data_snapshots.append(source_snapshot)
if not namespace.os_type:
raise CLIError("usage error: os type is required to create the image, "
"please specify '--os-type OS_TYPE'")
def _figure_out_storage_source(cli_ctx, resource_group_name, source):
from msrestazure.azure_exceptions import CloudError
source_blob_uri = None
source_disk = None
source_snapshot = None
if urlparse(source).scheme: # a uri?
source_blob_uri = source
elif '/disks/' in source.lower():
source_disk = source
elif '/snapshots/' in source.lower():
source_snapshot = source
else:
compute_client = _compute_client_factory(cli_ctx)
# pylint: disable=no-member
try:
info = compute_client.snapshots.get(resource_group_name, source)
source_snapshot = info.id
except CloudError:
info = compute_client.disks.get(resource_group_name, source)
source_disk = info.id
return (source_blob_uri, source_disk, source_snapshot)
def process_disk_encryption_namespace(cmd, namespace):
namespace.disk_encryption_keyvault = _get_resource_id(cmd.cli_ctx, namespace.disk_encryption_keyvault,
namespace.resource_group_name,
'vaults', 'Microsoft.KeyVault')
if namespace.key_encryption_keyvault:
if not namespace.key_encryption_key:
raise CLIError("Incorrect usage '--key-encryption-keyvault': "
"'--key-encryption-key' is required")
namespace.key_encryption_keyvault = _get_resource_id(cmd.cli_ctx, namespace.key_encryption_keyvault,
namespace.resource_group_name,
'vaults', 'Microsoft.KeyVault')
def process_assign_identity_namespace(cmd, namespace):
_validate_vm_vmss_msi(cmd, namespace, from_set_command=True)
def process_remove_identity_namespace(cmd, namespace):
if namespace.identities:
from ._vm_utils import MSI_LOCAL_ID
for i in range(len(namespace.identities)):
if namespace.identities[i] != MSI_LOCAL_ID:
namespace.identities[i] = _get_resource_id(cmd.cli_ctx, namespace.identities[i],
namespace.resource_group_name,
'userAssignedIdentities',
'Microsoft.ManagedIdentity')
# TODO move to its own command module https://github.com/Azure/azure-cli/issues/5105
def process_msi_namespace(cmd, namespace):
get_default_location_from_resource_group(cmd, namespace)
validate_tags(namespace)
def process_gallery_image_version_namespace(cmd, namespace):
TargetRegion = cmd.get_models('TargetRegion')
if namespace.target_regions:
regions_info = []
for t in namespace.target_regions:
parts = t.split('=', 1)
if len(parts) == 1:
regions_info.append(TargetRegion(name=parts[0]))
else:
try:
replica_count = int(parts[1])
except ValueError:
raise CLIError("usage error: {}'s replica count must be an integer".format(parts[0]))
regions_info.append(TargetRegion(name=parts[0], regional_replica_count=replica_count))
namespace.target_regions = regions_info
# endregion
| # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint:disable=too-many-lines
import os
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse # pylint: disable=import-error
from knack.log import get_logger
from knack.util import CLIError
from azure.cli.core.commands.validators import (
get_default_location_from_resource_group, validate_file_or_dict, validate_parameter_set, validate_tags)
from azure.cli.core.util import hash_string
from azure.cli.command_modules.vm._vm_utils import check_existence, get_target_network_api, get_storage_blob_uri
from azure.cli.command_modules.vm._template_builder import StorageProfile
import azure.cli.core.keys as keys
from ._client_factory import _compute_client_factory
from ._actions import _get_latest_image_version
logger = get_logger(__name__)
def validate_asg_names_or_ids(cmd, namespace):
from msrestazure.tools import resource_id, is_valid_resource_id
from azure.cli.core.profiles import ResourceType
from azure.cli.core.commands.client_factory import get_subscription_id
ApplicationSecurityGroup = cmd.get_models('ApplicationSecurityGroup',
resource_type=ResourceType.MGMT_NETWORK)
resource_group = namespace.resource_group_name
subscription_id = get_subscription_id(cmd.cli_ctx)
names_or_ids = getattr(namespace, 'application_security_groups')
ids = []
if names_or_ids == [""] or not names_or_ids:
return
for val in names_or_ids:
if not is_valid_resource_id(val):
val = resource_id(
subscription=subscription_id,
resource_group=resource_group,
namespace='Microsoft.Network', type='applicationSecurityGroups',
name=val
)
ids.append(ApplicationSecurityGroup(id=val))
setattr(namespace, 'application_security_groups', ids)
def validate_nsg_name(cmd, namespace):
from msrestazure.tools import resource_id
from azure.cli.core.commands.client_factory import get_subscription_id
vm_id = resource_id(name=namespace.vm_name, resource_group=namespace.resource_group_name,
namespace='Microsoft.Compute', type='virtualMachines',
subscription=get_subscription_id(cmd.cli_ctx))
namespace.network_security_group_name = namespace.network_security_group_name \
or '{}_NSG_{}'.format(namespace.vm_name, hash_string(vm_id, length=8))
def validate_keyvault(cmd, namespace):
namespace.keyvault = _get_resource_id(cmd.cli_ctx, namespace.keyvault, namespace.resource_group_name,
'vaults', 'Microsoft.KeyVault')
def process_vm_secret_format(cmd, namespace):
from msrestazure.tools import is_valid_resource_id
keyvault_usage = CLIError('usage error: [--keyvault NAME --resource-group NAME | --keyvault ID]')
kv = namespace.keyvault
rg = namespace.resource_group_name
if rg:
if not kv or is_valid_resource_id(kv):
raise keyvault_usage
validate_keyvault(cmd, namespace)
else:
if kv and not is_valid_resource_id(kv):
raise keyvault_usage
def _get_resource_group_from_vault_name(cli_ctx, vault_name):
"""
Fetch resource group from vault name
:param str vault_name: name of the key vault
:return: resource group name or None
:rtype: str
"""
from azure.cli.core.profiles import ResourceType
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from msrestazure.tools import parse_resource_id
client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_KEYVAULT).vaults
for vault in client.list():
id_comps = parse_resource_id(vault.id)
if id_comps['name'] == vault_name:
return id_comps['resource_group']
return None
def _get_resource_id(cli_ctx, val, resource_group, resource_type, resource_namespace):
from msrestazure.tools import resource_id, is_valid_resource_id
from azure.cli.core.commands.client_factory import get_subscription_id
if is_valid_resource_id(val):
return val
kwargs = {
'name': val,
'resource_group': resource_group,
'namespace': resource_namespace,
'type': resource_type,
'subscription': get_subscription_id(cli_ctx)
}
missing_kwargs = {k: v for k, v in kwargs.items() if not v}
return resource_id(**kwargs) if not missing_kwargs else None
def _get_nic_id(cli_ctx, val, resource_group):
return _get_resource_id(cli_ctx, val, resource_group,
'networkInterfaces', 'Microsoft.Network')
def validate_vm_nic(cmd, namespace):
namespace.nic = _get_nic_id(cmd.cli_ctx, namespace.nic, namespace.resource_group_name)
def validate_vm_nics(cmd, namespace):
rg = namespace.resource_group_name
nic_ids = []
for n in namespace.nics:
nic_ids.append(_get_nic_id(cmd.cli_ctx, n, rg))
namespace.nics = nic_ids
if hasattr(namespace, 'primary_nic') and namespace.primary_nic:
namespace.primary_nic = _get_nic_id(cmd.cli_ctx, namespace.primary_nic, rg)
def _validate_secrets(secrets, os_type):
"""
Validates a parsed JSON array containing secrets for use in VM Creation
Secrets JSON structure
[{
"sourceVault": { "id": "value" },
"vaultCertificates": [{
"certificateUrl": "value",
"certificateStore": "cert store name (only on windows)"
}]
}]
:param dict secrets: Dict fitting the JSON description above
:param string os_type: the type of OS (linux or windows)
:return: errors if any were found
:rtype: list
"""
is_windows = os_type == 'windows'
errors = []
try:
loaded_secret = [validate_file_or_dict(secret) for secret in secrets]
except Exception as err:
raise CLIError('Error decoding secrets: {0}'.format(err))
for idx_arg, narg_secret in enumerate(loaded_secret):
for idx, secret in enumerate(narg_secret):
if 'sourceVault' not in secret:
errors.append(
'Secret is missing sourceVault key at index {0} in arg {1}'.format(
idx, idx_arg))
if 'sourceVault' in secret and 'id' not in secret['sourceVault']:
errors.append(
'Secret is missing sourceVault.id key at index {0} in arg {1}'.format(
idx, idx_arg))
if 'vaultCertificates' not in secret or not secret['vaultCertificates']:
err = 'Secret is missing vaultCertificates array or it is empty at index {0} in ' \
'arg {1} '
errors.append(err.format(idx, idx_arg))
else:
for jdx, cert in enumerate(secret['vaultCertificates']):
message = 'Secret is missing {0} within vaultCertificates array at secret ' \
'index {1} and vaultCertificate index {2} in arg {3}'
if 'certificateUrl' not in cert:
errors.append(message.format('certificateUrl', idx, jdx, idx_arg))
if is_windows and 'certificateStore' not in cert:
errors.append(message.format('certificateStore', idx, jdx, idx_arg))
if errors:
raise CLIError('\n'.join(errors))
# region VM Create Validators
def _parse_image_argument(cmd, namespace):
""" Systematically determines what type is supplied for the --image parameter. Updates the
namespace and returns the type for subsequent processing. """
from msrestazure.tools import is_valid_resource_id
from msrestazure.azure_exceptions import CloudError
import re
# 1 - check if a fully-qualified ID (assumes it is an image ID)
if is_valid_resource_id(namespace.image):
return 'image_id'
# 2 - attempt to match an URN pattern
urn_match = re.match('([^:]*):([^:]*):([^:]*):([^:]*)', namespace.image)
if urn_match:
namespace.os_publisher = urn_match.group(1)
namespace.os_offer = urn_match.group(2)
namespace.os_sku = urn_match.group(3)
namespace.os_version = urn_match.group(4)
if not any([namespace.plan_name, namespace.plan_product, namespace.plan_publisher]):
image_plan = _get_image_plan_info_if_exists(cmd, namespace)
if image_plan:
namespace.plan_name = image_plan.name
namespace.plan_product = image_plan.product
namespace.plan_publisher = image_plan.publisher
return 'urn'
# 3 - unmanaged vhd based images?
if urlparse(namespace.image).scheme:
return 'uri'
# 4 - attempt to match an URN alias (most likely)
from azure.cli.command_modules.vm._actions import load_images_from_aliases_doc
images = load_images_from_aliases_doc(cmd.cli_ctx)
matched = next((x for x in images if x['urnAlias'].lower() == namespace.image.lower()), None)
if matched:
namespace.os_publisher = matched['publisher']
namespace.os_offer = matched['offer']
namespace.os_sku = matched['sku']
namespace.os_version = matched['version']
return 'urn'
# 5 - check if an existing managed disk image resource
compute_client = _compute_client_factory(cmd.cli_ctx)
try:
compute_client.images.get(namespace.resource_group_name, namespace.image)
namespace.image = _get_resource_id(cmd.cli_ctx, namespace.image, namespace.resource_group_name,
'images', 'Microsoft.Compute')
return 'image_id'
except CloudError:
err = 'Invalid image "{}". Use a custom image name, id, or pick one from {}'
raise CLIError(err.format(namespace.image, [x['urnAlias'] for x in images]))
def _get_image_plan_info_if_exists(cmd, namespace):
from msrestazure.azure_exceptions import CloudError
try:
compute_client = _compute_client_factory(cmd.cli_ctx)
if namespace.os_version.lower() == 'latest':
image_version = _get_latest_image_version(cmd.cli_ctx, namespace.location, namespace.os_publisher,
namespace.os_offer, namespace.os_sku)
else:
image_version = namespace.os_version
image = compute_client.virtual_machine_images.get(namespace.location,
namespace.os_publisher,
namespace.os_offer,
namespace.os_sku,
image_version)
# pylint: disable=no-member
return image.plan
except CloudError as ex:
logger.warning("Querying the image of '%s' failed for an error '%s'. Configuring plan settings "
"will be skipped", namespace.image, ex.message)
# pylint: disable=inconsistent-return-statements
def _get_storage_profile_description(profile):
if profile == StorageProfile.SACustomImage:
return 'create unmanaged OS disk created from generalized VHD'
elif profile == StorageProfile.SAPirImage:
return 'create unmanaged OS disk from Azure Marketplace image'
elif profile == StorageProfile.SASpecializedOSDisk:
return 'attach to existing unmanaged OS disk'
elif profile == StorageProfile.ManagedCustomImage:
return 'create managed OS disk from custom image'
elif profile == StorageProfile.ManagedPirImage:
return 'create managed OS disk from Azure Marketplace image'
elif profile == StorageProfile.ManagedSpecializedOSDisk:
return 'attach existing managed OS disk'
def _validate_managed_disk_sku(sku):
allowed_skus = ['Premium_LRS', 'Standard_LRS', 'StandardSSD_LRS', 'UltraSSD_LRS']
if sku and sku.lower() not in [x.lower() for x in allowed_skus]:
raise CLIError("invalid storage SKU '{}': allowed values: '{}'".format(sku, allowed_skus))
def _validate_location(cmd, namespace, zone_info, size_info):
from ._vm_utils import list_sku_info
if not namespace.location:
get_default_location_from_resource_group(cmd, namespace)
if zone_info:
sku_infos = list_sku_info(cmd.cli_ctx, namespace.location)
temp = next((x for x in sku_infos if x.name.lower() == size_info.lower()), None)
# For Stack (compute - 2017-03-30), Resource_sku doesn't implement location_info property
if not hasattr(temp, 'location_info'):
return
if not temp or not [x for x in (temp.location_info or []) if x.zones]:
raise CLIError("{}'s location can't be used to create the VM/VMSS because availablity zone is not yet "
"supported. Please use '--location' to specify a capable one. 'az vm list-skus' can be "
"used to find such locations".format(namespace.resource_group_name))
# pylint: disable=too-many-branches, too-many-statements
def _validate_vm_create_storage_profile(cmd, namespace, for_scale_set=False):
from msrestazure.tools import parse_resource_id
# use minimal parameters to resolve the expected storage profile
if getattr(namespace, 'attach_os_disk', None) and not namespace.image:
if namespace.use_unmanaged_disk:
# STORAGE PROFILE #3
namespace.storage_profile = StorageProfile.SASpecializedOSDisk
else:
# STORAGE PROFILE #6
namespace.storage_profile = StorageProfile.ManagedSpecializedOSDisk
elif namespace.image and not getattr(namespace, 'attach_os_disk', None):
image_type = _parse_image_argument(cmd, namespace)
if image_type == 'uri':
# STORAGE PROFILE #2
namespace.storage_profile = StorageProfile.SACustomImage
elif image_type == 'image_id':
# STORAGE PROFILE #5
namespace.storage_profile = StorageProfile.ManagedCustomImage
elif image_type == 'urn':
if namespace.use_unmanaged_disk:
# STORAGE PROFILE #1
namespace.storage_profile = StorageProfile.SAPirImage
else:
# STORAGE PROFILE #4
namespace.storage_profile = StorageProfile.ManagedPirImage
else:
raise CLIError('Unrecognized image type: {}'.format(image_type))
else:
# did not specify image XOR attach-os-disk
raise CLIError('incorrect usage: --image IMAGE | --attach-os-disk DISK')
auth_params = ['<PASSWORD>_password', 'admin_username', 'authentication_type',
'generate_ssh_keys', 'ssh_dest_key_path', 'ssh_key_value']
# perform parameter validation for the specific storage profile
# start with the required/forbidden parameters for VM
if namespace.storage_profile == StorageProfile.ManagedPirImage:
required = ['image']
forbidden = ['os_type', 'attach_os_disk', 'storage_account',
'storage_container_name', 'use_unmanaged_disk']
if for_scale_set:
forbidden.append('os_disk_name')
_validate_managed_disk_sku(namespace.storage_sku)
elif namespace.storage_profile == StorageProfile.ManagedCustomImage:
required = ['image']
forbidden = ['os_type', 'attach_os_disk', 'storage_account',
'storage_container_name', 'use_unmanaged_disk']
if for_scale_set:
forbidden.append('os_disk_name')
_validate_managed_disk_sku(namespace.storage_sku)
elif namespace.storage_profile == StorageProfile.ManagedSpecializedOSDisk:
required = ['os_type', 'attach_os_disk']
forbidden = ['os_disk_name', 'os_caching', 'storage_account',
'storage_container_name', 'use_unmanaged_disk', 'storage_sku'] + auth_params
_validate_managed_disk_sku(namespace.storage_sku)
elif namespace.storage_profile == StorageProfile.SAPirImage:
required = ['image', 'use_unmanaged_disk']
forbidden = ['os_type', 'attach_os_disk', 'data_disk_sizes_gb']
elif namespace.storage_profile == StorageProfile.SACustomImage:
required = ['image', 'os_type', 'use_unmanaged_disk']
forbidden = ['attach_os_disk', 'data_disk_sizes_gb']
elif namespace.storage_profile == StorageProfile.SASpecializedOSDisk:
required = ['os_type', 'attach_os_disk', 'use_unmanaged_disk']
forbidden = ['os_disk_name', 'os_caching', 'image', 'storage_account',
'storage_container_name', 'data_disk_sizes_gb', 'storage_sku'] + auth_params
else:
raise CLIError('Unrecognized storage profile: {}'.format(namespace.storage_profile))
logger.debug("storage profile '%s'", namespace.storage_profile)
if for_scale_set:
# VMSS lacks some parameters, so scrub these out
props_to_remove = ['attach_os_disk', 'storage_account']
for prop in props_to_remove:
if prop in required:
required.remove(prop)
if prop in forbidden:
forbidden.remove(prop)
# set default storage SKU if not provided and using an image based OS
if not namespace.storage_sku and namespace.storage_profile in [StorageProfile.SAPirImage, StorageProfile.SACustomImage]: # pylint: disable=line-too-long
namespace.storage_sku = 'Standard_LRS' if for_scale_set else 'Premium_LRS'
if namespace.storage_sku == 'UltraSSD_LRS' and namespace.ultra_ssd_enabled is None:
namespace.ultra_ssd_enabled = True
# Now verify that the status of required and forbidden parameters
validate_parameter_set(
namespace, required, forbidden,
description='storage profile: {}:'.format(_get_storage_profile_description(namespace.storage_profile)))
image_data_disks_num = 0
if namespace.storage_profile == StorageProfile.ManagedCustomImage:
# extract additional information from a managed custom image
res = parse_resource_id(namespace.image)
compute_client = _compute_client_factory(cmd.cli_ctx, subscription_id=res['subscription'])
if res['type'].lower() == 'images':
image_info = compute_client.images.get(res['resource_group'], res['name'])
namespace.os_type = image_info.storage_profile.os_disk.os_type.value
image_data_disks_num = len(image_info.storage_profile.data_disks or [])
elif res['type'].lower() == 'galleries':
image_info = compute_client.gallery_images.get(resource_group_name=res['resource_group'],
gallery_name=res['name'],
gallery_image_name=res['child_name_1'])
namespace.os_type = image_info.os_type.value
gallery_image_version = res.get('child_name_2', '')
if gallery_image_version.lower() in ['latest', '']:
image_version_infos = compute_client.gallery_image_versions.list_by_gallery_image(
resource_group_name=res['resource_group'], gallery_name=res['name'],
gallery_image_name=res['child_name_1'])
image_version_infos = [x for x in image_version_infos if not x.publishing_profile.exclude_from_latest]
if not image_version_infos:
raise CLIError('There is no latest image version exists for "{}"'.format(namespace.image))
image_version_info = sorted(image_version_infos, key=lambda x: x.publishing_profile.published_date)[-1]
else:
image_version_info = compute_client.gallery_image_versions.get(
resource_group_name=res['resource_group'], gallery_name=res['name'],
gallery_image_name=res['child_name_1'], gallery_image_version_name=res['child_name_2'])
image_data_disks_num = len(image_version_info.storage_profile.data_disk_images or [])
else:
raise CLIError('usage error: unrecognized image informations "{}"'.format(namespace.image))
# pylint: disable=no-member
elif namespace.storage_profile == StorageProfile.ManagedSpecializedOSDisk:
# accept disk name or ID
namespace.attach_os_disk = _get_resource_id(
cmd.cli_ctx, namespace.attach_os_disk, namespace.resource_group_name, 'disks', 'Microsoft.Compute')
if getattr(namespace, 'attach_data_disks', None):
if not namespace.use_unmanaged_disk:
namespace.attach_data_disks = [_get_resource_id(cmd.cli_ctx, d, namespace.resource_group_name, 'disks',
'Microsoft.Compute') for d in namespace.attach_data_disks]
if not namespace.os_type:
namespace.os_type = 'windows' if 'windows' in namespace.os_offer.lower() else 'linux'
from ._vm_utils import normalize_disk_info
# attach_data_disks are not exposed yet for VMSS, so use 'getattr' to avoid crash
namespace.disk_info = normalize_disk_info(image_data_disks_num=image_data_disks_num,
data_disk_sizes_gb=namespace.data_disk_sizes_gb,
attach_data_disks=getattr(namespace, 'attach_data_disks', []),
storage_sku=namespace.storage_sku,
os_disk_caching=namespace.os_caching,
data_disk_cachings=namespace.data_caching)
def _validate_vm_create_storage_account(cmd, namespace):
from msrestazure.tools import parse_resource_id
if namespace.storage_account:
storage_id = parse_resource_id(namespace.storage_account)
rg = storage_id.get('resource_group', namespace.resource_group_name)
if check_existence(cmd.cli_ctx, storage_id['name'], rg, 'Microsoft.Storage', 'storageAccounts'):
# 1 - existing storage account specified
namespace.storage_account_type = 'existing'
logger.debug("using specified existing storage account '%s'", storage_id['name'])
else:
# 2 - params for new storage account specified
namespace.storage_account_type = 'new'
logger.debug("specified storage account '%s' not found and will be created", storage_id['name'])
else:
from azure.cli.core.profiles import ResourceType
from azure.cli.core.commands.client_factory import get_mgmt_service_client
storage_client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_STORAGE).storage_accounts
# find storage account in target resource group that matches the VM's location
sku_tier = 'Premium' if 'Premium' in namespace.storage_sku else 'Standard'
account = next(
(a for a in storage_client.list_by_resource_group(namespace.resource_group_name)
if a.sku.tier.value == sku_tier and a.location == namespace.location), None)
if account:
# 3 - nothing specified - find viable storage account in target resource group
namespace.storage_account = account.name
namespace.storage_account_type = 'existing'
logger.debug("suitable existing storage account '%s' will be used", account.name)
else:
# 4 - nothing specified - create a new storage account
namespace.storage_account_type = 'new'
logger.debug('no suitable storage account found. One will be created.')
def _validate_vm_create_availability_set(cmd, namespace):
from msrestazure.tools import parse_resource_id, resource_id
from azure.cli.core.commands.client_factory import get_subscription_id
if namespace.availability_set:
as_id = parse_resource_id(namespace.availability_set)
name = as_id['name']
rg = as_id.get('resource_group', namespace.resource_group_name)
if not check_existence(cmd.cli_ctx, name, rg, 'Microsoft.Compute', 'availabilitySets'):
raise CLIError("Availability set '{}' does not exist.".format(name))
namespace.availability_set = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=rg,
namespace='Microsoft.Compute',
type='availabilitySets',
name=name)
logger.debug("adding to specified availability set '%s'", namespace.availability_set)
def _validate_vm_vmss_create_vnet(cmd, namespace, for_scale_set=False):
from msrestazure.tools import is_valid_resource_id
vnet = namespace.vnet_name
subnet = namespace.subnet
rg = namespace.resource_group_name
location = namespace.location
nics = getattr(namespace, 'nics', None)
if not vnet and not subnet and not nics:
logger.debug('no subnet specified. Attempting to find an existing Vnet and subnet...')
# if nothing specified, try to find an existing vnet and subnet in the target resource group
client = get_network_client(cmd.cli_ctx).virtual_networks
# find VNET in target resource group that matches the VM's location with a matching subnet
for vnet_match in (v for v in client.list(rg) if v.location == location and v.subnets):
# 1 - find a suitable existing vnet/subnet
result = None
if not for_scale_set:
result = next((s for s in vnet_match.subnets if s.name.lower() != 'gatewaysubnet'), None)
else:
def _check_subnet(s):
if s.name.lower() == 'gatewaysubnet':
return False
subnet_mask = s.address_prefix.split('/')[-1]
return _subnet_capacity_check(subnet_mask, namespace.instance_count,
not namespace.disable_overprovision)
result = next((s for s in vnet_match.subnets if _check_subnet(s)), None)
if not result:
continue
namespace.subnet = result.name
namespace.vnet_name = vnet_match.name
namespace.vnet_type = 'existing'
logger.debug("existing vnet '%s' and subnet '%s' found", namespace.vnet_name, namespace.subnet)
return
if subnet:
subnet_is_id = is_valid_resource_id(subnet)
if (subnet_is_id and vnet) or (not subnet_is_id and not vnet):
raise CLIError("incorrect '--subnet' usage: --subnet SUBNET_ID | "
"--subnet SUBNET_NAME --vnet-name VNET_NAME")
subnet_exists = \
check_existence(cmd.cli_ctx, subnet, rg, 'Microsoft.Network', 'subnets', vnet, 'virtualNetworks')
if subnet_is_id and not subnet_exists:
raise CLIError("Subnet '{}' does not exist.".format(subnet))
elif subnet_exists:
# 2 - user specified existing vnet/subnet
namespace.vnet_type = 'existing'
logger.debug("using specified vnet '%s' and subnet '%s'", namespace.vnet_name, namespace.subnet)
return
# 3 - create a new vnet/subnet
namespace.vnet_type = 'new'
logger.debug('no suitable subnet found. One will be created.')
def _subnet_capacity_check(subnet_mask, vmss_instance_count, over_provision):
mask = int(subnet_mask)
# '2' are the reserved broadcasting addresses
# '*1.5' so we have enough leeway for over-provision
factor = 1.5 if over_provision else 1
return ((1 << (32 - mask)) - 2) > int(vmss_instance_count * factor)
def _validate_vm_vmss_accelerated_networking(cli_ctx, namespace):
if namespace.accelerated_networking is None:
size = getattr(namespace, 'size', None) or getattr(namespace, 'vm_sku', None)
size = size.lower()
# to refresh the list, run 'az vm create --accelerated-networking --size Standard_DS1_v2' and
# get it from the error
aval_sizes = ['Standard_D3_v2', 'Standard_D12_v2', 'Standard_D3_v2_Promo', 'Standard_D12_v2_Promo',
'Standard_DS3_v2', 'Standard_DS12_v2', 'Standard_DS13-4_v2', 'Standard_DS14-4_v2',
'Standard_DS3_v2_Promo', 'Standard_DS12_v2_Promo', 'Standard_DS13-4_v2_Promo',
'Standard_DS14-4_v2_Promo', 'Standard_F4', 'Standard_F4s', 'Standard_D8_v3', 'Standard_D8s_v3',
'Standard_D32-8s_v3', 'Standard_E8_v3', 'Standard_E8s_v3', 'Standard_D3_v2_ABC',
'Standard_D12_v2_ABC', 'Standard_F4_ABC', 'Standard_F8s_v2', 'Standard_D4_v2',
'Standard_D13_v2', 'Standard_D4_v2_Promo', 'Standard_D13_v2_Promo', 'Standard_DS4_v2',
'Standard_DS13_v2', 'Standard_DS14-8_v2', 'Standard_DS4_v2_Promo', 'Standard_DS13_v2_Promo',
'Standard_DS14-8_v2_Promo', 'Standard_F8', 'Standard_F8s', 'Standard_M64-16ms',
'Standard_D16_v3', 'Standard_D16s_v3', 'Standard_D32-16s_v3', 'Standard_D64-16s_v3',
'Standard_E16_v3', 'Standard_E16s_v3', 'Standard_E32-16s_v3', 'Standard_D4_v2_ABC',
'Standard_D13_v2_ABC', 'Standard_F8_ABC', 'Standard_F16s_v2', 'Standard_D5_v2',
'Standard_D14_v2', 'Standard_D5_v2_Promo', 'Standard_D14_v2_Promo', 'Standard_DS5_v2',
'Standard_DS14_v2', 'Standard_DS5_v2_Promo', 'Standard_DS14_v2_Promo', 'Standard_F16',
'Standard_F16s', 'Standard_M64-32ms', 'Standard_M128-32ms', 'Standard_D32_v3',
'Standard_D32s_v3', 'Standard_D64-32s_v3', 'Standard_E32_v3', 'Standard_E32s_v3',
'Standard_E32-8s_v3', 'Standard_E32-16_v3', 'Standard_D5_v2_ABC', 'Standard_D14_v2_ABC',
'Standard_F16_ABC', 'Standard_F32s_v2', 'Standard_D15_v2', 'Standard_D15_v2_Promo',
'Standard_D15_v2_Nested', 'Standard_DS15_v2', 'Standard_DS15_v2_Promo',
'Standard_DS15_v2_Nested', 'Standard_D40_v3', 'Standard_D40s_v3', 'Standard_D15_v2_ABC',
'Standard_M64ms', 'Standard_M64s', 'Standard_M128-64ms', 'Standard_D64_v3', 'Standard_D64s_v3',
'Standard_E64_v3', 'Standard_E64s_v3', 'Standard_E64-16s_v3', 'Standard_E64-32s_v3',
'Standard_F64s_v2', 'Standard_F72s_v2', 'Standard_M128s', 'Standard_M128ms', 'Standard_L8s_v2',
'Standard_L16s_v2', 'Standard_L32s_v2', 'Standard_L64s_v2', 'Standard_L96s_v2', 'SQLGL',
'SQLGLCore', 'Standard_D4_v3', 'Standard_D4s_v3', 'Standard_D2_v2', 'Standard_DS2_v2',
'Standard_E4_v3', 'Standard_E4s_v3', 'Standard_F2', 'Standard_F2s', 'Standard_F4s_v2',
'Standard_D11_v2', 'Standard_DS11_v2', 'AZAP_Performance_ComputeV17C']
aval_sizes = [x.lower() for x in aval_sizes]
if size not in aval_sizes:
return
new_4core_sizes = ['Standard_D3_v2', 'Standard_D3_v2_Promo', 'Standard_D3_v2_ABC', 'Standard_DS3_v2',
'Standard_DS3_v2_Promo', 'Standard_D12_v2', 'Standard_D12_v2_Promo', 'Standard_D12_v2_ABC',
'Standard_DS12_v2', 'Standard_DS12_v2_Promo', 'Standard_F8s_v2', 'Standard_F4',
'Standard_F4_ABC', 'Standard_F4s', 'Standard_E8_v3', 'Standard_E8s_v3', 'Standard_D8_v3',
'Standard_D8s_v3']
new_4core_sizes = [x.lower() for x in new_4core_sizes]
if size not in new_4core_sizes:
compute_client = _compute_client_factory(cli_ctx)
sizes = compute_client.virtual_machine_sizes.list(namespace.location)
size_info = next((s for s in sizes if s.name.lower() == size), None)
if size_info is None or size_info.number_of_cores < 8:
return
# VMs need to be a supported image in the marketplace
# Ubuntu 16.04, SLES 12 SP3, RHEL 7.4, CentOS 7.4, CoreOS Linux, Debian "Stretch" with backports kernel
# Oracle Linux 7.4, Windows Server 2016, Windows Server 2012R2
publisher, offer, sku = namespace.os_publisher, namespace.os_offer, namespace.os_sku
if not publisher:
return
publisher, offer, sku = publisher.lower(), offer.lower(), sku.lower()
distros = [('canonical', 'UbuntuServer', '^16.04'), ('suse', 'sles', '^12-sp3'), ('redhat', 'rhel', '^7.4'),
('openlogic', 'centos', '^7.4'), ('coreos', 'coreos', None), ('credativ', 'debian', '-backports'),
('oracle', 'oracle-linux', '^7.4'), ('MicrosoftWindowsServer', 'WindowsServer', '^2016'),
('MicrosoftWindowsServer', 'WindowsServer', '^2012-R2')]
import re
for p, o, s in distros:
if p.lower() == publisher and (o is None or o.lower() == offer) and (s is None or re.match(s, sku, re.I)):
namespace.accelerated_networking = True
def _validate_vmss_create_subnet(namespace):
if namespace.vnet_type == 'new':
if namespace.subnet_address_prefix is None:
cidr = namespace.vnet_address_prefix.split('/', 1)[0]
i = 0
for i in range(24, 16, -1):
if _subnet_capacity_check(i, namespace.instance_count, not namespace.disable_overprovision):
break
if i < 16:
err = "instance count '{}' is out of range of 2^16 subnet size'"
raise CLIError(err.format(namespace.instance_count))
namespace.subnet_address_prefix = '{}/{}'.format(cidr, i)
if namespace.app_gateway_type and namespace.app_gateway_subnet_address_prefix is None:
namespace.app_gateway_subnet_address_prefix = _get_next_subnet_addr_suffix(
namespace.vnet_address_prefix, namespace.subnet_address_prefix, 24)
def _get_next_subnet_addr_suffix(vnet_cidr, subnet_cidr, new_mask):
def _convert_to_int(address, bit_mask_len):
a, b, c, d = [int(x) for x in address.split('.')]
result = '{0:08b}{1:08b}{2:08b}{3:08b}'.format(a, b, c, d)
return int(result[:-bit_mask_len], 2)
error_msg = "usage error: --subnet-address-prefix value should be a subrange of --vnet-address-prefix's"
# extract vnet information needed to verify the defaults we are coming out
vnet_ip_address, mask = vnet_cidr.split('/')
vnet_bit_mask_len = 32 - int(mask)
vnet_int = _convert_to_int(vnet_ip_address, vnet_bit_mask_len)
subnet_ip_address, mask = subnet_cidr.split('/')
subnet_bit_mask_len = 32 - int(mask)
if vnet_bit_mask_len <= subnet_bit_mask_len:
raise CLIError(error_msg)
candidate_int = _convert_to_int(subnet_ip_address, subnet_bit_mask_len) + 1
if (candidate_int >> (vnet_bit_mask_len - subnet_bit_mask_len)) > vnet_int: # overflows?
candidate_int = candidate_int - 2 # try the other way around
if (candidate_int >> (vnet_bit_mask_len - subnet_bit_mask_len)) > vnet_int:
raise CLIError(error_msg)
# format back to the cidr
candaidate_str = '{0:32b}'.format(candidate_int << subnet_bit_mask_len)
return '{0}.{1}.{2}.{3}/{4}'.format(int(candaidate_str[0:8], 2), int(candaidate_str[8:16], 2),
int(candaidate_str[16:24], 2), int(candaidate_str[24:32], 2),
new_mask)
def _validate_vm_create_nsg(cmd, namespace):
if namespace.nsg:
if check_existence(cmd.cli_ctx, namespace.nsg, namespace.resource_group_name,
'Microsoft.Network', 'networkSecurityGroups'):
namespace.nsg_type = 'existing'
logger.debug("using specified NSG '%s'", namespace.nsg)
else:
namespace.nsg_type = 'new'
logger.debug("specified NSG '%s' not found. It will be created.", namespace.nsg)
elif namespace.nsg == '':
namespace.nsg_type = None
logger.debug('no NSG will be used')
elif namespace.nsg is None:
namespace.nsg_type = 'new'
logger.debug('new NSG will be created')
def _validate_vmss_create_nsg(cmd, namespace):
if namespace.nsg:
namespace.nsg = _get_resource_id(cmd.cli_ctx, namespace.nsg, namespace.resource_group_name,
'networkSecurityGroups', 'Microsoft.Network')
def _validate_vm_vmss_create_public_ip(cmd, namespace):
if namespace.public_ip_address:
if check_existence(cmd.cli_ctx, namespace.public_ip_address, namespace.resource_group_name,
'Microsoft.Network', 'publicIPAddresses'):
namespace.public_ip_address_type = 'existing'
logger.debug("using existing specified public IP '%s'", namespace.public_ip_address)
else:
namespace.public_ip_address_type = 'new'
logger.debug("specified public IP '%s' not found. It will be created.", namespace.public_ip_address)
elif namespace.public_ip_address == '':
namespace.public_ip_address_type = None
logger.debug('no public IP address will be used')
elif namespace.public_ip_address is None:
namespace.public_ip_address_type = 'new'
logger.debug('new public IP address will be created')
# Public-IP SKU is only exposed for VM. VMSS has no such needs so far
if getattr(namespace, 'public_ip_sku', None):
from azure.cli.core.profiles import ResourceType
PublicIPAddressSkuName, IPAllocationMethod = cmd.get_models('PublicIPAddressSkuName', 'IPAllocationMethod',
resource_type=ResourceType.MGMT_NETWORK)
if namespace.public_ip_sku == PublicIPAddressSkuName.standard.value:
if not namespace.public_ip_address_allocation:
namespace.public_ip_address_allocation = IPAllocationMethod.static.value
def _validate_vmss_create_public_ip(cmd, namespace):
if namespace.load_balancer_type is None and namespace.app_gateway_type is None:
if namespace.public_ip_address:
raise CLIError('--public-ip-address can only be used when creating a new load '
'balancer or application gateway frontend.')
namespace.public_ip_address = ''
_validate_vm_vmss_create_public_ip(cmd, namespace)
def _validate_vm_create_nics(cmd, namespace):
from msrestazure.tools import resource_id
from azure.cli.core.commands.client_factory import get_subscription_id
nics_value = namespace.nics
nics = []
if not nics_value:
namespace.nic_type = 'new'
logger.debug('new NIC will be created')
return
if not isinstance(nics_value, list):
nics_value = [nics_value]
for n in nics_value:
nics.append({
'id': n if '/' in n else resource_id(name=n,
resource_group=namespace.resource_group_name,
namespace='Microsoft.Network',
type='networkInterfaces',
subscription=get_subscription_id(cmd.cli_ctx)),
'properties': {
'primary': nics_value[0] == n
}
})
namespace.nics = nics
namespace.nic_type = 'existing'
namespace.public_ip_address_type = None
logger.debug('existing NIC(s) will be used')
def _validate_vm_vmss_create_auth(namespace):
if namespace.storage_profile in [StorageProfile.ManagedSpecializedOSDisk,
StorageProfile.SASpecializedOSDisk]:
return
namespace.admin_username = _validate_admin_username(namespace.admin_username, namespace.os_type)
if not namespace.os_type:
raise CLIError("Unable to resolve OS type. Specify '--os-type' argument.")
if not namespace.authentication_type:
# apply default auth type (password for Windows, ssh for Linux) by examining the OS type
namespace.authentication_type = 'password' \
if (namespace.os_type.lower() == 'windows' or namespace.admin_password) else 'ssh'
if namespace.os_type.lower() == 'windows' and namespace.authentication_type == 'ssh':
raise CLIError('SSH not supported for Windows VMs.')
# validate proper arguments supplied based on the authentication type
if namespace.authentication_type == 'password':
if namespace.ssh_key_value or namespace.ssh_dest_key_path:
raise ValueError(
"incorrect usage for authentication-type 'password': "
"[--admin-username USERNAME] --admin-password PASSWORD")
from knack.prompting import prompt_pass, NoTTYException
try:
if not namespace.admin_password:
namespace.admin_password = prompt_pass('Admin Password: ', confirm=True)
except NoTTYException:
raise CLIError('Please specify password in non-interactive mode.')
# validate password
_validate_admin_password(namespace.admin_password,
namespace.os_type)
elif namespace.authentication_type == 'ssh':
if namespace.admin_password:
raise ValueError('Admin password cannot be used with SSH authentication type')
validate_ssh_key(namespace)
if not namespace.ssh_dest_key_path:
namespace.ssh_dest_key_path = \
'/home/{}/.ssh/authorized_keys'.format(namespace.admin_username)
def _validate_admin_username(username, os_type):
import re
if not username:
raise CLIError("admin user name can not be empty")
is_linux = (os_type.lower() == 'linux')
# pylint: disable=line-too-long
pattern = (r'[\\\/"\[\]:|<>+=;,?*@#()!A-Z]+' if is_linux else r'[\\\/"\[\]:|<>+=;,?*@]+')
linux_err = r'admin user name cannot contain upper case character A-Z, special characters \/"[]:|<>+=;,?*@#()! or start with $ or -'
win_err = r'admin user name cannot contain special characters \/"[]:|<>+=;,?*@# or ends with .'
if re.findall(pattern, username):
raise CLIError(linux_err if is_linux else win_err)
if is_linux and re.findall(r'^[$-]+', username):
raise CLIError(linux_err)
if not is_linux and username.endswith('.'):
raise CLIError(win_err)
disallowed_user_names = [
"administrator", "admin", "user", "user1", "test", "user2",
"test1", "user3", "admin1", "1", "123", "a", "actuser", "adm",
"admin2", "aspnet", "backup", "console", "guest",
"owner", "root", "server", "sql", "support", "support_388945a0",
"sys", "test2", "test3", "user4", "user5"]
if username.lower() in disallowed_user_names:
raise CLIError("This user name '{}' meets the general requirements, but is specifically disallowed for this image. Please try a different value.".format(username))
return username
def _validate_admin_password(password, os_type):
import re
is_linux = (os_type.lower() == 'linux')
max_length = 72 if is_linux else 123
min_length = 12
if len(password) not in range(min_length, max_length + 1):
raise CLIError('The password length must be between {} and {}'.format(min_length,
max_length))
contains_lower = re.findall('[a-z]+', password)
contains_upper = re.findall('[A-Z]+', password)
contains_digit = re.findall('[0-9]+', password)
contains_special_char = re.findall(r'[ `~!@#$%^&*()=+_\[\]{}\|;:.\/\'\",<>?]+', password)
count = len([x for x in [contains_lower, contains_upper,
contains_digit, contains_special_char] if x])
# pylint: disable=line-too-long
if count < 3:
raise CLIError('Password must have the 3 of the following: 1 lower case character, 1 upper case character, 1 number and 1 special character')
def validate_ssh_key(namespace):
string_or_file = (namespace.ssh_key_value or
os.path.join(os.path.expanduser('~'), '.ssh', 'id_rsa.pub'))
content = string_or_file
if os.path.exists(string_or_file):
logger.info('Use existing SSH public key file: %s', string_or_file)
with open(string_or_file, 'r') as f:
content = f.read()
elif not keys.is_valid_ssh_rsa_public_key(content):
if namespace.generate_ssh_keys:
# figure out appropriate file names:
# 'base_name'(with private keys), and 'base_name.pub'(with public keys)
public_key_filepath = string_or_file
if public_key_filepath[-4:].lower() == '.pub':
private_key_filepath = public_key_filepath[:-4]
else:
private_key_filepath = public_key_filepath + '.private'
content = keys.generate_ssh_keys(private_key_filepath, public_key_filepath)
logger.warning("SSH key files '%s' and '%s' have been generated under ~/.ssh to "
"allow SSH access to the VM. If using machines without "
"permanent storage, back up your keys to a safe location.",
private_key_filepath, public_key_filepath)
else:
raise CLIError('An RSA key file or key value must be supplied to SSH Key Value. '
'You can use --generate-ssh-keys to let CLI generate one for you')
namespace.ssh_key_value = content
def _validate_vm_vmss_msi(cmd, namespace, from_set_command=False):
if from_set_command or namespace.assign_identity is not None:
identities = namespace.assign_identity or []
from ._vm_utils import MSI_LOCAL_ID
for i, _ in enumerate(identities):
if identities[i] != MSI_LOCAL_ID:
identities[i] = _get_resource_id(cmd.cli_ctx, identities[i], namespace.resource_group_name,
'userAssignedIdentities', 'Microsoft.ManagedIdentity')
if not namespace.identity_scope and getattr(namespace.identity_role, 'is_default', None) is None:
raise CLIError("usage error: '--role {}' is not applicable as the '--scope' is not provided".format(
namespace.identity_role))
user_assigned_identities = [x for x in identities if x != MSI_LOCAL_ID]
if user_assigned_identities and not cmd.supported_api_version(min_api='2017-12-01'):
raise CLIError('usage error: user assigned identity is only available under profile '
'with minimum Compute API version of 2017-12-01')
if namespace.identity_scope:
if identities and MSI_LOCAL_ID not in identities:
raise CLIError("usage error: '--scope'/'--role' is only applicable when assign system identity")
# keep 'identity_role' for output as logical name is more readable
setattr(namespace, 'identity_role_id', _resolve_role_id(cmd.cli_ctx, namespace.identity_role,
namespace.identity_scope))
elif namespace.identity_scope or getattr(namespace.identity_role, 'is_default', None) is None:
raise CLIError('usage error: --assign-identity [--scope SCOPE] [--role ROLE]')
def _resolve_role_id(cli_ctx, role, scope):
import re
import uuid
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.cli.core.profiles import ResourceType
client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_AUTHORIZATION).role_definitions
role_id = None
if re.match(r'/subscriptions/.+/providers/Microsoft.Authorization/roleDefinitions/',
role, re.I):
role_id = role
else:
try:
uuid.UUID(role)
role_id = '/subscriptions/{}/providers/Microsoft.Authorization/roleDefinitions/{}'.format(
client.config.subscription_id, role)
except ValueError:
pass
if not role_id: # retrieve role id
role_defs = list(client.list(scope, "roleName eq '{}'".format(role)))
if not role_defs:
raise CLIError("Role '{}' doesn't exist.".format(role))
elif len(role_defs) > 1:
ids = [r.id for r in role_defs]
err = "More than one role matches the given name '{}'. Please pick an id from '{}'"
raise CLIError(err.format(role, ids))
role_id = role_defs[0].id
return role_id
def process_vm_create_namespace(cmd, namespace):
validate_tags(namespace)
_validate_location(cmd, namespace, namespace.zone, namespace.size)
validate_asg_names_or_ids(cmd, namespace)
_validate_vm_create_storage_profile(cmd, namespace)
if namespace.storage_profile in [StorageProfile.SACustomImage,
StorageProfile.SAPirImage]:
_validate_vm_create_storage_account(cmd, namespace)
_validate_vm_create_availability_set(cmd, namespace)
_validate_vm_vmss_create_vnet(cmd, namespace)
_validate_vm_create_nsg(cmd, namespace)
_validate_vm_vmss_create_public_ip(cmd, namespace)
_validate_vm_create_nics(cmd, namespace)
_validate_vm_vmss_accelerated_networking(cmd.cli_ctx, namespace)
_validate_vm_vmss_create_auth(namespace)
if namespace.secrets:
_validate_secrets(namespace.secrets, namespace.os_type)
if namespace.license_type and namespace.os_type.lower() != 'windows':
raise CLIError('usage error: --license-type is only applicable on Windows VM')
_validate_vm_vmss_msi(cmd, namespace)
if namespace.boot_diagnostics_storage:
namespace.boot_diagnostics_storage = get_storage_blob_uri(cmd.cli_ctx, namespace.boot_diagnostics_storage)
# endregion
# region VMSS Create Validators
def _get_default_address_pool(cli_ctx, resource_group, balancer_name, balancer_type):
option_name = '--backend-pool-name'
client = getattr(get_network_client(cli_ctx), balancer_type, None)
if not client:
raise CLIError('unrecognized balancer type: {}'.format(balancer_type))
balancer = client.get(resource_group, balancer_name)
values = [x.name for x in balancer.backend_address_pools]
if len(values) > 1:
raise CLIError("Multiple possible values found for '{0}': {1}\nSpecify '{0}' "
"explicitly.".format(option_name, ', '.join(values)))
elif not values:
raise CLIError("No existing values found for '{0}'. Create one first and try "
"again.".format(option_name))
return values[0]
def _validate_vmss_single_placement_group(namespace):
if namespace.platform_fault_domain_count is not None and namespace.zones is None:
raise CLIError('usage error: --platform-fault-domain-count COUNT --zones ZONES')
if namespace.zones or namespace.instance_count > 100:
if namespace.single_placement_group is None:
namespace.single_placement_group = False
elif namespace.single_placement_group:
raise CLIError("usage error: '--single-placement-group' should be turned off for zonal scale-sets or with"
" 100+ instances")
def _validate_vmss_create_load_balancer_or_app_gateway(cmd, namespace):
from msrestazure.azure_exceptions import CloudError
from msrestazure.tools import parse_resource_id
from azure.cli.core.profiles import ResourceType
std_lb_is_available = cmd.supported_api_version(min_api='2017-08-01', resource_type=ResourceType.MGMT_NETWORK)
if namespace.load_balancer and namespace.application_gateway:
raise CLIError('incorrect usage: --load-balancer NAME_OR_ID | '
'--application-gateway NAME_OR_ID')
# Resolve the type of balancer (if any) being used
balancer_type = 'None'
if namespace.load_balancer is None and namespace.application_gateway is None:
if std_lb_is_available:
balancer_type = 'loadBalancer'
else: # needed for Stack profile 2017_03_09
balancer_type = 'loadBalancer' if namespace.single_placement_group is not False else 'applicationGateway'
logger.debug("W/o STD LB, defaulting to '%s' under because single placement group is disabled",
balancer_type)
elif namespace.load_balancer:
balancer_type = 'loadBalancer'
elif namespace.application_gateway:
balancer_type = 'applicationGateway'
if balancer_type == 'applicationGateway':
if namespace.application_gateway:
client = get_network_client(cmd.cli_ctx).application_gateways
try:
rg = parse_resource_id(namespace.application_gateway).get(
'resource_group', namespace.resource_group_name)
ag_name = parse_resource_id(namespace.application_gateway)['name']
client.get(rg, ag_name)
namespace.app_gateway_type = 'existing'
namespace.backend_pool_name = namespace.backend_pool_name or \
_get_default_address_pool(cmd.cli_ctx, rg, ag_name, 'application_gateways')
logger.debug("using specified existing application gateway '%s'", namespace.application_gateway)
except CloudError:
namespace.app_gateway_type = 'new'
logger.debug("application gateway '%s' not found. It will be created.", namespace.application_gateway)
elif namespace.application_gateway == '':
namespace.app_gateway_type = None
logger.debug('no application gateway will be used')
elif namespace.application_gateway is None:
namespace.app_gateway_type = 'new'
logger.debug('new application gateway will be created')
# AppGateway frontend
required = []
if namespace.app_gateway_type == 'new':
required.append('app_gateway_sku')
required.append('app_gateway_capacity')
if namespace.vnet_type != 'new':
required.append('app_gateway_subnet_address_prefix')
elif namespace.app_gateway_type == 'existing':
required.append('backend_pool_name')
forbidden = ['nat_pool_name', 'load_balancer', 'health_probe']
validate_parameter_set(namespace, required, forbidden, description='network balancer: application gateway')
elif balancer_type == 'loadBalancer':
# LoadBalancer frontend
required = []
forbidden = ['app_gateway_subnet_address_prefix', 'application_gateway', 'app_gateway_sku',
'app_gateway_capacity']
validate_parameter_set(namespace, required, forbidden, description='network balancer: load balancer')
if namespace.load_balancer:
rg = parse_resource_id(namespace.load_balancer).get('resource_group', namespace.resource_group_name)
lb_name = parse_resource_id(namespace.load_balancer)['name']
lb = get_network_lb(cmd.cli_ctx, namespace.resource_group_name, lb_name)
if lb:
namespace.load_balancer_type = 'existing'
namespace.backend_pool_name = namespace.backend_pool_name or \
_get_default_address_pool(cmd.cli_ctx, rg, lb_name, 'load_balancers')
if not namespace.nat_pool_name:
if len(lb.inbound_nat_pools) > 1:
raise CLIError("Multiple possible values found for '{0}': {1}\nSpecify '{0}' explicitly.".format( # pylint: disable=line-too-long
'--nat-pool-name', ', '.join([n.name for n in lb.inbound_nat_pools])))
elif not lb.inbound_nat_pools: # Associated scaleset will be missing ssh/rdp, so warn here.
logger.warning("No inbound nat pool was configured on '%s'", namespace.load_balancer)
else:
namespace.nat_pool_name = lb.inbound_nat_pools[0].name
logger.debug("using specified existing load balancer '%s'", namespace.load_balancer)
else:
namespace.load_balancer_type = 'new'
logger.debug("load balancer '%s' not found. It will be created.", namespace.load_balancer)
elif namespace.load_balancer == '':
namespace.load_balancer_type = None
logger.debug('no load balancer will be used')
elif namespace.load_balancer is None:
namespace.load_balancer_type = 'new'
logger.debug('new load balancer will be created')
if namespace.load_balancer_type == 'new' and namespace.single_placement_group is False and std_lb_is_available:
LBSkuName = cmd.get_models('LoadBalancerSkuName', resource_type=ResourceType.MGMT_NETWORK)
if namespace.load_balancer_sku is None:
namespace.load_balancer_sku = LBSkuName.standard.value
logger.debug("use Standard sku as single placement group is turned off")
elif namespace.load_balancer_sku == LBSkuName.basic.value:
if namespace.zones:
err = "'Standard' load balancer is required for zonal scale-sets"
elif namespace.instance_count > 100:
err = "'Standard' load balancer is required for scale-sets with 100+ instances"
else:
err = "'Standard' load balancer is required because 'single placement group' is turned off"
raise CLIError('usage error:{}'.format(err))
def get_network_client(cli_ctx):
from azure.cli.core.profiles import ResourceType
from azure.cli.core.commands.client_factory import get_mgmt_service_client
return get_mgmt_service_client(cli_ctx, ResourceType.MGMT_NETWORK, api_version=get_target_network_api(cli_ctx))
def get_network_lb(cli_ctx, resource_group_name, lb_name):
from msrestazure.azure_exceptions import CloudError
network_client = get_network_client(cli_ctx)
try:
return network_client.load_balancers.get(resource_group_name, lb_name)
except CloudError:
return None
def process_vmss_create_namespace(cmd, namespace):
validate_tags(namespace)
if namespace.vm_sku is None:
from azure.cli.core.cloud import AZURE_US_GOV_CLOUD
if cmd.cli_ctx.cloud.name != AZURE_US_GOV_CLOUD.name:
namespace.vm_sku = 'Standard_DS1_v2'
else:
namespace.vm_sku = 'Standard_D1_v2'
_validate_location(cmd, namespace, namespace.zones, namespace.vm_sku)
validate_asg_names_or_ids(cmd, namespace)
_validate_vm_create_storage_profile(cmd, namespace, for_scale_set=True)
_validate_vm_vmss_create_vnet(cmd, namespace, for_scale_set=True)
_validate_vmss_single_placement_group(namespace)
_validate_vmss_create_load_balancer_or_app_gateway(cmd, namespace)
_validate_vmss_create_subnet(namespace)
_validate_vmss_create_public_ip(cmd, namespace)
_validate_vmss_create_nsg(cmd, namespace)
_validate_vm_vmss_accelerated_networking(cmd.cli_ctx, namespace)
_validate_vm_vmss_create_auth(namespace)
_validate_vm_vmss_msi(cmd, namespace)
if namespace.license_type and namespace.os_type.lower() != 'windows':
raise CLIError('usage error: --license-type is only applicable on Windows VM scaleset')
if not namespace.public_ip_per_vm and namespace.vm_domain_name:
raise CLIError('Usage error: --vm-domain-name can only be used when --public-ip-per-vm is enabled')
if namespace.eviction_policy and not namespace.priority:
raise CLIError('Usage error: --priority PRIORITY [--eviction-policy POLICY]')
# endregion
# region disk, snapshot, image validators
def validate_vm_disk(cmd, namespace):
namespace.disk = _get_resource_id(cmd.cli_ctx, namespace.disk,
namespace.resource_group_name, 'disks', 'Microsoft.Compute')
def validate_vmss_disk(cmd, namespace):
if namespace.disk:
namespace.disk = _get_resource_id(cmd.cli_ctx, namespace.disk,
namespace.resource_group_name, 'disks', 'Microsoft.Compute')
if bool(namespace.disk) == bool(namespace.size_gb):
raise CLIError('usage error: --disk EXIST_DISK --instance-id ID | --size-gb GB')
elif bool(namespace.disk) != bool(namespace.instance_id):
raise CLIError('usage error: --disk EXIST_DISK --instance-id ID')
def process_disk_or_snapshot_create_namespace(cmd, namespace):
from msrestazure.azure_exceptions import CloudError
validate_tags(namespace)
if namespace.source:
usage_error = 'usage error: --source {SNAPSHOT | DISK} | --source VHD_BLOB_URI [--source-storage-account-id ID]'
try:
namespace.source_blob_uri, namespace.source_disk, namespace.source_snapshot = _figure_out_storage_source(
cmd.cli_ctx, namespace.resource_group_name, namespace.source)
if not namespace.source_blob_uri and namespace.source_storage_account_id:
raise CLIError(usage_error)
except CloudError:
raise CLIError(usage_error)
def process_image_create_namespace(cmd, namespace):
from msrestazure.tools import parse_resource_id
from msrestazure.azure_exceptions import CloudError
validate_tags(namespace)
try:
# try capturing from VM, a most common scenario
res_id = _get_resource_id(cmd.cli_ctx, namespace.source, namespace.resource_group_name,
'virtualMachines', 'Microsoft.Compute')
res = parse_resource_id(res_id)
compute_client = _compute_client_factory(cmd.cli_ctx, subscription_id=res['subscription'])
vm_info = compute_client.virtual_machines.get(res['resource_group'], res['name'])
# pylint: disable=no-member
namespace.os_type = vm_info.storage_profile.os_disk.os_type.value
namespace.source_virtual_machine = res_id
if namespace.data_disk_sources:
raise CLIError("'--data-disk-sources' is not allowed when capturing "
"images from virtual machines")
except CloudError:
namespace.os_blob_uri, namespace.os_disk, namespace.os_snapshot = _figure_out_storage_source(cmd.cli_ctx, namespace.resource_group_name, namespace.source) # pylint: disable=line-too-long
namespace.data_blob_uris = []
namespace.data_disks = []
namespace.data_snapshots = []
if namespace.data_disk_sources:
for data_disk_source in namespace.data_disk_sources:
source_blob_uri, source_disk, source_snapshot = _figure_out_storage_source(
cmd.cli_ctx, namespace.resource_group_name, data_disk_source)
if source_blob_uri:
namespace.data_blob_uris.append(source_blob_uri)
if source_disk:
namespace.data_disks.append(source_disk)
if source_snapshot:
namespace.data_snapshots.append(source_snapshot)
if not namespace.os_type:
raise CLIError("usage error: os type is required to create the image, "
"please specify '--os-type OS_TYPE'")
def _figure_out_storage_source(cli_ctx, resource_group_name, source):
from msrestazure.azure_exceptions import CloudError
source_blob_uri = None
source_disk = None
source_snapshot = None
if urlparse(source).scheme: # a uri?
source_blob_uri = source
elif '/disks/' in source.lower():
source_disk = source
elif '/snapshots/' in source.lower():
source_snapshot = source
else:
compute_client = _compute_client_factory(cli_ctx)
# pylint: disable=no-member
try:
info = compute_client.snapshots.get(resource_group_name, source)
source_snapshot = info.id
except CloudError:
info = compute_client.disks.get(resource_group_name, source)
source_disk = info.id
return (source_blob_uri, source_disk, source_snapshot)
def process_disk_encryption_namespace(cmd, namespace):
namespace.disk_encryption_keyvault = _get_resource_id(cmd.cli_ctx, namespace.disk_encryption_keyvault,
namespace.resource_group_name,
'vaults', 'Microsoft.KeyVault')
if namespace.key_encryption_keyvault:
if not namespace.key_encryption_key:
raise CLIError("Incorrect usage '--key-encryption-keyvault': "
"'--key-encryption-key' is required")
namespace.key_encryption_keyvault = _get_resource_id(cmd.cli_ctx, namespace.key_encryption_keyvault,
namespace.resource_group_name,
'vaults', 'Microsoft.KeyVault')
def process_assign_identity_namespace(cmd, namespace):
_validate_vm_vmss_msi(cmd, namespace, from_set_command=True)
def process_remove_identity_namespace(cmd, namespace):
if namespace.identities:
from ._vm_utils import MSI_LOCAL_ID
for i in range(len(namespace.identities)):
if namespace.identities[i] != MSI_LOCAL_ID:
namespace.identities[i] = _get_resource_id(cmd.cli_ctx, namespace.identities[i],
namespace.resource_group_name,
'userAssignedIdentities',
'Microsoft.ManagedIdentity')
# TODO move to its own command module https://github.com/Azure/azure-cli/issues/5105
def process_msi_namespace(cmd, namespace):
get_default_location_from_resource_group(cmd, namespace)
validate_tags(namespace)
def process_gallery_image_version_namespace(cmd, namespace):
TargetRegion = cmd.get_models('TargetRegion')
if namespace.target_regions:
regions_info = []
for t in namespace.target_regions:
parts = t.split('=', 1)
if len(parts) == 1:
regions_info.append(TargetRegion(name=parts[0]))
else:
try:
replica_count = int(parts[1])
except ValueError:
raise CLIError("usage error: {}'s replica count must be an integer".format(parts[0]))
regions_info.append(TargetRegion(name=parts[0], regional_replica_count=replica_count))
namespace.target_regions = regions_info
# endregion
| en | 0.660559 | # -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- # pylint:disable=too-many-lines # pylint: disable=import-error Fetch resource group from vault name :param str vault_name: name of the key vault :return: resource group name or None :rtype: str Validates a parsed JSON array containing secrets for use in VM Creation Secrets JSON structure [{ "sourceVault": { "id": "value" }, "vaultCertificates": [{ "certificateUrl": "value", "certificateStore": "cert store name (only on windows)" }] }] :param dict secrets: Dict fitting the JSON description above :param string os_type: the type of OS (linux or windows) :return: errors if any were found :rtype: list # region VM Create Validators Systematically determines what type is supplied for the --image parameter. Updates the namespace and returns the type for subsequent processing. # 1 - check if a fully-qualified ID (assumes it is an image ID) # 2 - attempt to match an URN pattern # 3 - unmanaged vhd based images? # 4 - attempt to match an URN alias (most likely) # 5 - check if an existing managed disk image resource # pylint: disable=no-member # pylint: disable=inconsistent-return-statements # For Stack (compute - 2017-03-30), Resource_sku doesn't implement location_info property # pylint: disable=too-many-branches, too-many-statements # use minimal parameters to resolve the expected storage profile # STORAGE PROFILE #3 # STORAGE PROFILE #6 # STORAGE PROFILE #2 # STORAGE PROFILE #5 # STORAGE PROFILE #1 # STORAGE PROFILE #4 # did not specify image XOR attach-os-disk # perform parameter validation for the specific storage profile # start with the required/forbidden parameters for VM # VMSS lacks some parameters, so scrub these out # set default storage SKU if not provided and using an image based OS # pylint: disable=line-too-long # Now verify that the status of required and forbidden parameters # extract additional information from a managed custom image # pylint: disable=no-member # accept disk name or ID # attach_data_disks are not exposed yet for VMSS, so use 'getattr' to avoid crash # 1 - existing storage account specified # 2 - params for new storage account specified # find storage account in target resource group that matches the VM's location # 3 - nothing specified - find viable storage account in target resource group # 4 - nothing specified - create a new storage account # if nothing specified, try to find an existing vnet and subnet in the target resource group # find VNET in target resource group that matches the VM's location with a matching subnet # 1 - find a suitable existing vnet/subnet # 2 - user specified existing vnet/subnet # 3 - create a new vnet/subnet # '2' are the reserved broadcasting addresses # '*1.5' so we have enough leeway for over-provision # to refresh the list, run 'az vm create --accelerated-networking --size Standard_DS1_v2' and # get it from the error # VMs need to be a supported image in the marketplace # Ubuntu 16.04, SLES 12 SP3, RHEL 7.4, CentOS 7.4, CoreOS Linux, Debian "Stretch" with backports kernel # Oracle Linux 7.4, Windows Server 2016, Windows Server 2012R2 # extract vnet information needed to verify the defaults we are coming out # overflows? # try the other way around # format back to the cidr # Public-IP SKU is only exposed for VM. VMSS has no such needs so far # apply default auth type (password for Windows, ssh for Linux) by examining the OS type # validate proper arguments supplied based on the authentication type # validate password # pylint: disable=line-too-long #()!A-Z]+' if is_linux else r'[\\\/"\[\]:|<>+=;,?*@]+') #()! or start with $ or -' # or ends with .' #$%^&*()=+_\[\]{}\|;:.\/\'\",<>?]+', password) # pylint: disable=line-too-long # figure out appropriate file names: # 'base_name'(with private keys), and 'base_name.pub'(with public keys) # keep 'identity_role' for output as logical name is more readable # retrieve role id # endregion # region VMSS Create Validators # Resolve the type of balancer (if any) being used # needed for Stack profile 2017_03_09 # AppGateway frontend # LoadBalancer frontend # pylint: disable=line-too-long # Associated scaleset will be missing ssh/rdp, so warn here. # endregion # region disk, snapshot, image validators # try capturing from VM, a most common scenario # pylint: disable=no-member # pylint: disable=line-too-long # a uri? # pylint: disable=no-member # TODO move to its own command module https://github.com/Azure/azure-cli/issues/5105 # endregion | 1.946675 | 2 |
scraping/faqscraper.py | ednihs-yahska/unibrowser | 0 | 242 | <reponame>ednihs-yahska/unibrowser
import re
import httplib2
from bs4 import BeautifulSoup
from scraping.faqscrapperutil import stripExtra, removeDuplicates, removeBlackListedQuestions, getBlackListedQuestions, convertToJsonList, saveToMongo
from scraping.Constants import ENABLE_CUSTOM_QUESTIONS_FILTER, FAQ_LINKS, COLLECTION_NAME
def cleanQuestions(questions):
questionList = []
for question in questions:
questionList.append(stripExtra(question.lstrip().rstrip()))
return removeDuplicates(questionList)
def getLastAnswer(question, bodyText):
start = bodyText.index(question) + len(question)
text = bodyText[start : -1].lstrip()
# print(text.lstrip())
whitespaceCount = 0
# print(answerLength)
for i in range(0, len(text)):
# print(answer[i], ' isSpace : ', answer[i].isspace())
if text[i].isspace():
whitespaceCount = whitespaceCount + 1
if whitespaceCount >= 3:
# print(0 + i - 3)
# print(text[0 : 0 + i - 2])
return text[0 : 0 + i - 2]
else :
if whitespaceCount != 0:
whitespaceCount = 0
def cleanAnswer(answer):
answerLength = len(answer)
whitespaceCount = 0
# print(answerLength)
for i in range(0, answerLength):
# print(answer[i], ' isSpace : ', answer[i].isspace())
if answer[i].isspace():
whitespaceCount = whitespaceCount + 1
if whitespaceCount >= 3:
# print(0 + i - 3)
return answer[0 : 0 + i - 2].lstrip()
else :
if whitespaceCount != 0:
whitespaceCount = 0
return answer.rstrip()
def getAnswers(body, questions):
bodyText = body.getText()
# answerTag = getAnswerTag(body, bodyText, questions)
# print(bodyText)
questionCount = len(questions)
answerList = []
for i in range(0, questionCount):
print('Q: ', questions[i])
if i == questionCount - 1:
#Last element
answer = getLastAnswer(questions[i], bodyText)
else :
start = bodyText.index(questions[i]) + len(questions[i])
end = bodyText.index(questions[i + 1], start, -1)
print("Start : ", start , " End : ", end)
soup1 = BeautifulSoup(bodyText[start : end], 'html.parser')
# print(soup1)
answer = soup1.getText().lstrip()
answer = cleanAnswer(answer)
answerList.append(answer)
print('A: ', answer)
return answerList
def processWithCustomQuestions(questions):
# isCustomQuestionsEnabled = checkConfigForFlag(ENABLE_CUSTOM_QUESTIONS_FILTER)
# print("isCustomQuestionsEnabled : ", isCustomQuestionsEnabled)
if ENABLE_CUSTOM_QUESTIONS_FILTER == False:
return
blackListedQuestions = getBlackListedQuestions()
removeBlackListedQuestions(questions, blackListedQuestions)
print(questions)
def getFaqOfLink(link):
# print("LINK : ", link)
http = httplib2.Http()
status, html = http.request(link)
soup = BeautifulSoup(html, 'html.parser')
body = soup.body
questions = cleanQuestions(soup(text=re.compile(r'\s*((?:how|How|Can|can|what|What|where|Where|describe|Describe|Who|who|When|when|Why|why|Should|should|is|Is|I|Do|do|Are|are|Will|will)[^.<>?]*?\s*\?)')))
# print(questions)
processWithCustomQuestions(questions)
answerList = getAnswers(body, questions)
return questions, answerList
# link = "https://transportation.oregonstate.edu/aabc/frequently-asked-questions"
# questions, answerList = getFaqOfLink(link)
if __name__== "__main__":
with open(FAQ_LINKS, 'r') as myfile:
FAQ_LINKS = myfile.read().split('\n')
faqJsonList = []
for i in range(0, len(FAQ_LINKS)):
link = FAQ_LINKS[i]
questions, answerList = getFaqOfLink(link)
jsonList = convertToJsonList(link, questions, answerList)
faqJsonList.extend(jsonList)
# saveJsonToFile(faqJsonList, "output.txt")
saveToMongo(faqJsonList, COLLECTION_NAME) | import re
import httplib2
from bs4 import BeautifulSoup
from scraping.faqscrapperutil import stripExtra, removeDuplicates, removeBlackListedQuestions, getBlackListedQuestions, convertToJsonList, saveToMongo
from scraping.Constants import ENABLE_CUSTOM_QUESTIONS_FILTER, FAQ_LINKS, COLLECTION_NAME
def cleanQuestions(questions):
questionList = []
for question in questions:
questionList.append(stripExtra(question.lstrip().rstrip()))
return removeDuplicates(questionList)
def getLastAnswer(question, bodyText):
start = bodyText.index(question) + len(question)
text = bodyText[start : -1].lstrip()
# print(text.lstrip())
whitespaceCount = 0
# print(answerLength)
for i in range(0, len(text)):
# print(answer[i], ' isSpace : ', answer[i].isspace())
if text[i].isspace():
whitespaceCount = whitespaceCount + 1
if whitespaceCount >= 3:
# print(0 + i - 3)
# print(text[0 : 0 + i - 2])
return text[0 : 0 + i - 2]
else :
if whitespaceCount != 0:
whitespaceCount = 0
def cleanAnswer(answer):
answerLength = len(answer)
whitespaceCount = 0
# print(answerLength)
for i in range(0, answerLength):
# print(answer[i], ' isSpace : ', answer[i].isspace())
if answer[i].isspace():
whitespaceCount = whitespaceCount + 1
if whitespaceCount >= 3:
# print(0 + i - 3)
return answer[0 : 0 + i - 2].lstrip()
else :
if whitespaceCount != 0:
whitespaceCount = 0
return answer.rstrip()
def getAnswers(body, questions):
bodyText = body.getText()
# answerTag = getAnswerTag(body, bodyText, questions)
# print(bodyText)
questionCount = len(questions)
answerList = []
for i in range(0, questionCount):
print('Q: ', questions[i])
if i == questionCount - 1:
#Last element
answer = getLastAnswer(questions[i], bodyText)
else :
start = bodyText.index(questions[i]) + len(questions[i])
end = bodyText.index(questions[i + 1], start, -1)
print("Start : ", start , " End : ", end)
soup1 = BeautifulSoup(bodyText[start : end], 'html.parser')
# print(soup1)
answer = soup1.getText().lstrip()
answer = cleanAnswer(answer)
answerList.append(answer)
print('A: ', answer)
return answerList
def processWithCustomQuestions(questions):
# isCustomQuestionsEnabled = checkConfigForFlag(ENABLE_CUSTOM_QUESTIONS_FILTER)
# print("isCustomQuestionsEnabled : ", isCustomQuestionsEnabled)
if ENABLE_CUSTOM_QUESTIONS_FILTER == False:
return
blackListedQuestions = getBlackListedQuestions()
removeBlackListedQuestions(questions, blackListedQuestions)
print(questions)
def getFaqOfLink(link):
# print("LINK : ", link)
http = httplib2.Http()
status, html = http.request(link)
soup = BeautifulSoup(html, 'html.parser')
body = soup.body
questions = cleanQuestions(soup(text=re.compile(r'\s*((?:how|How|Can|can|what|What|where|Where|describe|Describe|Who|who|When|when|Why|why|Should|should|is|Is|I|Do|do|Are|are|Will|will)[^.<>?]*?\s*\?)')))
# print(questions)
processWithCustomQuestions(questions)
answerList = getAnswers(body, questions)
return questions, answerList
# link = "https://transportation.oregonstate.edu/aabc/frequently-asked-questions"
# questions, answerList = getFaqOfLink(link)
if __name__== "__main__":
with open(FAQ_LINKS, 'r') as myfile:
FAQ_LINKS = myfile.read().split('\n')
faqJsonList = []
for i in range(0, len(FAQ_LINKS)):
link = FAQ_LINKS[i]
questions, answerList = getFaqOfLink(link)
jsonList = convertToJsonList(link, questions, answerList)
faqJsonList.extend(jsonList)
# saveJsonToFile(faqJsonList, "output.txt")
saveToMongo(faqJsonList, COLLECTION_NAME) | en | 0.484064 | # print(text.lstrip()) # print(answerLength) # print(answer[i], ' isSpace : ', answer[i].isspace()) # print(0 + i - 3) # print(text[0 : 0 + i - 2]) # print(answerLength) # print(answer[i], ' isSpace : ', answer[i].isspace()) # print(0 + i - 3) # answerTag = getAnswerTag(body, bodyText, questions) # print(bodyText) #Last element # print(soup1) # isCustomQuestionsEnabled = checkConfigForFlag(ENABLE_CUSTOM_QUESTIONS_FILTER) # print("isCustomQuestionsEnabled : ", isCustomQuestionsEnabled) # print("LINK : ", link) # print(questions) # link = "https://transportation.oregonstate.edu/aabc/frequently-asked-questions" # questions, answerList = getFaqOfLink(link) # saveJsonToFile(faqJsonList, "output.txt") | 2.941205 | 3 |
messages/term_utils.py | ckousoulis/macos-messages | 0 | 243 | <gh_stars>0
"""Terminal utilities specific to message archives.
Creates colored text and helps write Messages output.
"""
from contextlib import contextmanager
import itertools
import readline
FG_COLORS = dict(itertools.chain(
zip(("black",
"red",
"green",
"yellow",
"blue",
"magenta",
"cyan",
"white",
), range(30, 38)),
zip(("bright_black",
"bright_red",
"bright_green",
"bright_yellow",
"bright_blue",
"bright_magenta",
"bright_cyan",
"bright_white",
), range(90, 98))))
BG_COLORS = dict((f"on_{key}", val + 10) for key, val in FG_COLORS.items())
ATTRIBUTES = dict(
zip(("bold",
"faint",
"italic",
"underline",
"slow_blink",
"rapid_blink",
"reverse",
"conceal",
"strikethrough",
), range(1, 10)))
def colored(text, color=None, on_color=None, attrs=None, escape=False):
"""Wraps text with ANSI escape codes to achieve the desired look.
Args:
color: The foreground color.
on_color: The background color.
attrs: A list of effects.
escape: True to escape invisibles (for readline); else False.
Returns:
A string with the original text wrapped by escape codes.
"""
def sgr(*codes):
return "\x1b[%sm" % ";".join(map(str, codes))
def esc(text):
return "\x01%s\x02" % text
codes = []
if color:
codes.append(FG_COLORS[color])
if on_color:
codes.append(BG_COLORS[on_color])
if attrs:
codes.extend(ATTRIBUTES[attr] for attr in attrs)
if not escape:
esc = lambda n: n
return "%s%s%s" % (esc(sgr(*codes)), text, esc(sgr(0)))
@contextmanager
def readline_disabled():
"""Context manager to temporarily disable readline features.
"""
readline.set_auto_history(False)
try:
yield
finally:
readline.set_auto_history(True)
def confirm(text):
"""Presents a yes/no prompt to the user and handles replies.
Args:
text: A message string to present before confirmation.
Returns:
True if the user confirmed the prompt; else False.
"""
replies = {
"yes": True,
"no": False,
}
prompt = "%s (yes/no): " % colored("Are you sure?", "red",
attrs=["bold"], escape=True)
reply = ""
with readline_disabled():
print(text)
while reply not in replies:
try:
reply = input(prompt).casefold()
except (EOFError, KeyboardInterrupt):
reply = "no"
print(reply)
return replies[reply]
| """Terminal utilities specific to message archives.
Creates colored text and helps write Messages output.
"""
from contextlib import contextmanager
import itertools
import readline
FG_COLORS = dict(itertools.chain(
zip(("black",
"red",
"green",
"yellow",
"blue",
"magenta",
"cyan",
"white",
), range(30, 38)),
zip(("bright_black",
"bright_red",
"bright_green",
"bright_yellow",
"bright_blue",
"bright_magenta",
"bright_cyan",
"bright_white",
), range(90, 98))))
BG_COLORS = dict((f"on_{key}", val + 10) for key, val in FG_COLORS.items())
ATTRIBUTES = dict(
zip(("bold",
"faint",
"italic",
"underline",
"slow_blink",
"rapid_blink",
"reverse",
"conceal",
"strikethrough",
), range(1, 10)))
def colored(text, color=None, on_color=None, attrs=None, escape=False):
"""Wraps text with ANSI escape codes to achieve the desired look.
Args:
color: The foreground color.
on_color: The background color.
attrs: A list of effects.
escape: True to escape invisibles (for readline); else False.
Returns:
A string with the original text wrapped by escape codes.
"""
def sgr(*codes):
return "\x1b[%sm" % ";".join(map(str, codes))
def esc(text):
return "\x01%s\x02" % text
codes = []
if color:
codes.append(FG_COLORS[color])
if on_color:
codes.append(BG_COLORS[on_color])
if attrs:
codes.extend(ATTRIBUTES[attr] for attr in attrs)
if not escape:
esc = lambda n: n
return "%s%s%s" % (esc(sgr(*codes)), text, esc(sgr(0)))
@contextmanager
def readline_disabled():
"""Context manager to temporarily disable readline features.
"""
readline.set_auto_history(False)
try:
yield
finally:
readline.set_auto_history(True)
def confirm(text):
"""Presents a yes/no prompt to the user and handles replies.
Args:
text: A message string to present before confirmation.
Returns:
True if the user confirmed the prompt; else False.
"""
replies = {
"yes": True,
"no": False,
}
prompt = "%s (yes/no): " % colored("Are you sure?", "red",
attrs=["bold"], escape=True)
reply = ""
with readline_disabled():
print(text)
while reply not in replies:
try:
reply = input(prompt).casefold()
except (EOFError, KeyboardInterrupt):
reply = "no"
print(reply)
return replies[reply] | en | 0.666137 | Terminal utilities specific to message archives. Creates colored text and helps write Messages output. Wraps text with ANSI escape codes to achieve the desired look. Args: color: The foreground color. on_color: The background color. attrs: A list of effects. escape: True to escape invisibles (for readline); else False. Returns: A string with the original text wrapped by escape codes. Context manager to temporarily disable readline features. Presents a yes/no prompt to the user and handles replies. Args: text: A message string to present before confirmation. Returns: True if the user confirmed the prompt; else False. | 3.427016 | 3 |
tests/test_subpixel_upsample.py | Project-MONAI/MONAI | 2,971 | 244 | # Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
import torch.nn as nn
from parameterized import parameterized
from monai.networks import eval_mode
from monai.networks.blocks import SubpixelUpsample
from monai.networks.layers.factories import Conv
TEST_CASE_SUBPIXEL = []
for inch in range(1, 5):
for dim in range(1, 4):
for factor in range(1, 3):
test_case = [
{"dimensions": dim, "in_channels": inch, "scale_factor": factor},
(2, inch, *([8] * dim)),
(2, inch, *([8 * factor] * dim)),
]
TEST_CASE_SUBPIXEL.append(test_case)
TEST_CASE_SUBPIXEL_2D_EXTRA = [
{"dimensions": 2, "in_channels": 2, "scale_factor": 3},
(2, 2, 8, 4), # different size for H and W
(2, 2, 24, 12),
]
TEST_CASE_SUBPIXEL_3D_EXTRA = [
{"dimensions": 3, "in_channels": 1, "scale_factor": 2},
(2, 1, 16, 8, 4), # different size for H, W and D
(2, 1, 32, 16, 8),
]
conv_block = nn.Sequential(
Conv[Conv.CONV, 3](1, 4, kernel_size=1), Conv[Conv.CONV, 3](4, 8, kernel_size=3, stride=1, padding=1)
)
TEST_CASE_SUBPIXEL_CONV_BLOCK_EXTRA = [
{"dimensions": 3, "in_channels": 1, "scale_factor": 2, "conv_block": conv_block},
(2, 1, 16, 8, 4), # different size for H, W and D
(2, 1, 32, 16, 8),
]
TEST_CASE_SUBPIXEL.append(TEST_CASE_SUBPIXEL_2D_EXTRA)
TEST_CASE_SUBPIXEL.append(TEST_CASE_SUBPIXEL_3D_EXTRA)
TEST_CASE_SUBPIXEL.append(TEST_CASE_SUBPIXEL_CONV_BLOCK_EXTRA)
# add every test back with the pad/pool sequential component omitted
for tests in list(TEST_CASE_SUBPIXEL):
args: dict = tests[0] # type: ignore
args = dict(args)
args["apply_pad_pool"] = False
TEST_CASE_SUBPIXEL.append([args, tests[1], tests[2]])
class TestSUBPIXEL(unittest.TestCase):
@parameterized.expand(TEST_CASE_SUBPIXEL)
def test_subpixel_shape(self, input_param, input_shape, expected_shape):
net = SubpixelUpsample(**input_param)
with eval_mode(net):
result = net.forward(torch.randn(input_shape))
self.assertEqual(result.shape, expected_shape)
if __name__ == "__main__":
unittest.main()
| # Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
import torch.nn as nn
from parameterized import parameterized
from monai.networks import eval_mode
from monai.networks.blocks import SubpixelUpsample
from monai.networks.layers.factories import Conv
TEST_CASE_SUBPIXEL = []
for inch in range(1, 5):
for dim in range(1, 4):
for factor in range(1, 3):
test_case = [
{"dimensions": dim, "in_channels": inch, "scale_factor": factor},
(2, inch, *([8] * dim)),
(2, inch, *([8 * factor] * dim)),
]
TEST_CASE_SUBPIXEL.append(test_case)
TEST_CASE_SUBPIXEL_2D_EXTRA = [
{"dimensions": 2, "in_channels": 2, "scale_factor": 3},
(2, 2, 8, 4), # different size for H and W
(2, 2, 24, 12),
]
TEST_CASE_SUBPIXEL_3D_EXTRA = [
{"dimensions": 3, "in_channels": 1, "scale_factor": 2},
(2, 1, 16, 8, 4), # different size for H, W and D
(2, 1, 32, 16, 8),
]
conv_block = nn.Sequential(
Conv[Conv.CONV, 3](1, 4, kernel_size=1), Conv[Conv.CONV, 3](4, 8, kernel_size=3, stride=1, padding=1)
)
TEST_CASE_SUBPIXEL_CONV_BLOCK_EXTRA = [
{"dimensions": 3, "in_channels": 1, "scale_factor": 2, "conv_block": conv_block},
(2, 1, 16, 8, 4), # different size for H, W and D
(2, 1, 32, 16, 8),
]
TEST_CASE_SUBPIXEL.append(TEST_CASE_SUBPIXEL_2D_EXTRA)
TEST_CASE_SUBPIXEL.append(TEST_CASE_SUBPIXEL_3D_EXTRA)
TEST_CASE_SUBPIXEL.append(TEST_CASE_SUBPIXEL_CONV_BLOCK_EXTRA)
# add every test back with the pad/pool sequential component omitted
for tests in list(TEST_CASE_SUBPIXEL):
args: dict = tests[0] # type: ignore
args = dict(args)
args["apply_pad_pool"] = False
TEST_CASE_SUBPIXEL.append([args, tests[1], tests[2]])
class TestSUBPIXEL(unittest.TestCase):
@parameterized.expand(TEST_CASE_SUBPIXEL)
def test_subpixel_shape(self, input_param, input_shape, expected_shape):
net = SubpixelUpsample(**input_param)
with eval_mode(net):
result = net.forward(torch.randn(input_shape))
self.assertEqual(result.shape, expected_shape)
if __name__ == "__main__":
unittest.main()
| en | 0.83666 | # Copyright 2020 - 2021 MONAI Consortium # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # different size for H and W # different size for H, W and D # different size for H, W and D # add every test back with the pad/pool sequential component omitted # type: ignore | 1.618434 | 2 |
gen-cfg.py | magetron/secure-flow-prototype | 0 | 245 | <filename>gen-cfg.py
from staticfg import CFGBuilder
userCfg = CFGBuilder().build_from_file('user.py', './auction/user.py')
bidCfg = CFGBuilder().build_from_file('bid.py', './auction/bid.py')
auctionCfg = CFGBuilder().build_from_file('auction.py','./auction/auction.py')
#auctionEventCfg = CFGBuilder().build_from_file('auction_event.py','./auction/auction_event.py')
bidCfg.build_visual('bidCfg', 'pdf')
auctionCfg.build_visual('auctionCfg', 'pdf')
#auctionEventCfg.build_visual('auctionEventCfg.pdf', 'pdf')
| <filename>gen-cfg.py
from staticfg import CFGBuilder
userCfg = CFGBuilder().build_from_file('user.py', './auction/user.py')
bidCfg = CFGBuilder().build_from_file('bid.py', './auction/bid.py')
auctionCfg = CFGBuilder().build_from_file('auction.py','./auction/auction.py')
#auctionEventCfg = CFGBuilder().build_from_file('auction_event.py','./auction/auction_event.py')
bidCfg.build_visual('bidCfg', 'pdf')
auctionCfg.build_visual('auctionCfg', 'pdf')
#auctionEventCfg.build_visual('auctionEventCfg.pdf', 'pdf')
| en | 0.381468 | #auctionEventCfg = CFGBuilder().build_from_file('auction_event.py','./auction/auction_event.py') #auctionEventCfg.build_visual('auctionEventCfg.pdf', 'pdf') | 1.525246 | 2 |
CodeForces/A2OJ Ladder/softuni_problem.py | dimitrov-dimitar/competitive-programming | 0 | 246 | <filename>CodeForces/A2OJ Ladder/softuni_problem.py
total_budget = 0
while True:
destination = input()
if destination == "End":
break
minimal_budget = float(input())
while True:
command = input()
if command == "End":
break
money = float(command)
total_budget += money
if total_budget >= minimal_budget:
print(f"Going to {destination}!")
total_budget = 0
break
| <filename>CodeForces/A2OJ Ladder/softuni_problem.py
total_budget = 0
while True:
destination = input()
if destination == "End":
break
minimal_budget = float(input())
while True:
command = input()
if command == "End":
break
money = float(command)
total_budget += money
if total_budget >= minimal_budget:
print(f"Going to {destination}!")
total_budget = 0
break
| none | 1 | 3.767604 | 4 |
|
footmark/ram/regioninfo.py | rockzhu/footmark | 0 | 247 | from footmark.regioninfo import RegionInfo
class RAMRegionInfo(RegionInfo):
"""
Represents an ram Region
"""
def __init__(self, connection=None, name=None, id=None,
connection_cls=None):
from footmark.ram.connection import RAMConnection
super(RAMRegionInfo, self).__init__(connection, name, id,
RAMConnection)
| from footmark.regioninfo import RegionInfo
class RAMRegionInfo(RegionInfo):
"""
Represents an ram Region
"""
def __init__(self, connection=None, name=None, id=None,
connection_cls=None):
from footmark.ram.connection import RAMConnection
super(RAMRegionInfo, self).__init__(connection, name, id,
RAMConnection)
| en | 0.830251 | Represents an ram Region | 2.45882 | 2 |
glue/plugins/export_d3po.py | sergiopasra/glue | 1 | 248 | from __future__ import absolute_import, division, print_function
import os
import json
from glue.core import Subset
DISPATCH = {}
def save_page(page, page_number, label, subset):
""" Convert a tab of a glue session into a D3PO page
:param page: Tuple of data viewers to save
:param label: Tab label
"""
result = {}
# layout settings
result['grid'] = {'nRows': 1, 'nColumns': len(page)}
result['name'] = str(label)
result['caption'] = 'Generated by Glue'
# style settings
d = page[0]._data[0]
unselected = dict(opacity=d.style.alpha,
size=d.style.markersize / 2,
color=d.style.color)
result['markerStyle'] = dict(unselected=unselected)
if subset is not None:
s = subset.style
selected = dict(opacity=s.alpha, size=s.markersize / 2, color=s.color)
result['markerStyle']['selected'] = selected
result['selection'] = {'type': 'booleanColumn',
'columnName': 'selection_%i' % page_number}
result['histogramStyle'] = result['markerStyle']
# save each plot
result['plots'] = list(map(save_plot, page, range(len(page))))
return result
def save_plot_base(plot, index):
result = {}
result['gridPosition'] = [0, index]
return result
def save_plot(plot, index):
typ = type(plot)
return DISPATCH[typ](plot, index)
def save_scatter(plot, index):
""" Convert a single glue scatter plot to a D3PO plot
:param plot: Glue scatter plot
:class:`~glue.viewers.scatter.qt.ScatterViewer`
:param index: 1D index of plot on the page
:type index: int
:rtype: json-serializable dict
"""
result = save_plot_base(plot, index)
result['type'] = 'scatter'
result['xAxis'] = dict(columnName=plot.state.x_att.label,
range=[float(plot.state.x_min), float(plot.state.x_max)])
result['yAxis'] = dict(columnName=plot.state.y_att.label,
range=[float(plot.state.y_min), float(plot.state.y_max)])
# XXX log scales
return result
def save_histogram(plot, index):
""" Convert a single histogram to a D3PO plot
:param plot: Glue histogram
:type plot: :class:`~glue.viewers.histogram.qt.HistogramViewer`
:param index: 1D index of plot on the page
:type index: int
:rtype: json-serializable dict
"""
result = save_plot_base(plot, index)
result['type'] = 'histogram'
result['xAxis'] = dict(columnName=plot.state.x_att.label,
bins=int(plot.state.hist_n_bin),
range=[float(plot.state.hist_x_min), float(plot.state.hist_x_max)])
# XXX normed, cumultive, log
return result
def stage_subsets(application):
"""
Return a tuple of the subset to use for each stage/tab,
or None if the tab has no subset
If more than one subset is used per stage/tab, returns None
"""
result = []
for page in application.viewers:
subset = None
for viewer in page:
for layer_artist in viewer.layers:
if not layer_artist.visible:
continue
s = layer_artist.layer
if not isinstance(s, Subset):
continue
if subset is not None and s is not subset:
return None
if subset is None:
subset = s
result.append(subset)
return tuple(result)
def can_save_d3po(application):
"""
Check whether an application can be exported to D3PO.
Raises an exception if not
"""
dc = application.session.data_collection
if len(dc) != 1:
raise ValueError("D3PO Export only supports a single dataset")
for tab in application.viewers:
for viewer in tab:
if not isinstance(viewer, tuple(DISPATCH.keys())):
raise ValueError("D3PO Export only supports scatter "
"and histogram plots")
if sum(len(tab) for tab in application.viewers) == 0:
raise ValueError("D3PO Export requires at least one scatterplot "
"or histogram")
if stage_subsets(application) is None:
raise ValueError("D3PO Export restricted to 0 or 1 subsets visible "
"in each tab")
def make_data_file(data, subsets, path):
"""
Create the data.csv file, given Data and tuple of subsets
"""
from astropy.table import Table, Column
data_path = os.path.join(path, 'data.csv')
t = Table([data[c] for c in data.components],
names=[c.label for c in data.components])
for i, subset in enumerate(subsets):
if subset is None:
continue
c = Column(data=subset.to_mask().astype('i'), name='selection_%i' % i)
t.add_column(c)
t.write(data_path, format='ascii', delimiter=',')
def save_d3po(application, path, launch=True):
"""Save a Glue session to a D3PO bundle.
Currently, this has the following restrictions:
- The Glue session must have only one dataset open, and 0 or 1 subsets
- Only scatter plots or histograms are present
- At least one plot is present
:param application: Glue appication to save
:param path: Path to directory to save in. Will be created if needed
"""
if os.path.exists(path) and not os.path.isdir(path):
os.unlink(path)
if not os.path.exists(path):
os.mkdir(path)
data = application.session.data_collection[0]
subsets = stage_subsets(application)
viewers = application.viewers
# data.csv
make_data_file(data, subsets, path)
# states.json
result = {}
result['filename'] = 'data.csv' # XXX don't think this is needed?
result['title'] = "Glue export of %s" % data.label
result['states'] = list(map(save_page, application.viewers,
range(len(viewers)),
application.tab_names,
subsets))
state_path = os.path.join(path, 'states.json')
with open(state_path, 'w') as outfile:
json.dump(result, outfile, indent=2, sort_keys=True)
# index.html
html_path = os.path.join(path, 'index.html')
with open(html_path, 'w') as outfile:
outfile.write(HTML)
# show the result
if launch:
launch_d3po(path)
def launch_d3po(path):
"""Start a server to view an exported D3PO bundle, and open a browser.
:param path: The TLD of the bundle
"""
from glue.external.six.moves.socketserver import TCPServer
from glue.external.six.moves.SimpleHTTPServer import SimpleHTTPRequestHandler
from random import randrange
from socket import error
import webbrowser
from threading import Thread
os.chdir(path)
while True:
try:
PORT = randrange(8000, 9000)
server = TCPServer(("", PORT), SimpleHTTPRequestHandler, False)
server.allow_reuse_address = True
server.server_bind()
break
except error: # port already taken
pass
print('Serving D3PO on port 0.0.0.0:%i' % PORT)
server.server_activate()
thread = Thread(target=server.serve_forever)
thread.setDaemon(True) # do not prevent shutdown
thread.start()
webbrowser.open('http://0.0.0.0:%i' % PORT)
def setup():
from glue.config import exporters
exporters.add('D3PO', save_d3po, can_save_d3po, outmode='directory')
HTML = """
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8" />
<link rel="stylesheet" type="text/css" href="http://d3po.org/static/css/style.css">
<link rel="stylesheet" type="text/css" href="http://d3po.org/static/css/d3po.css">
<link href='http://fonts.googleapis.com/css?family=Source+Sans+Pro:100,200,300,400,700' rel='stylesheet' type='text/css'>
<style>
#footer {
position: fixed;
bottom: 0;
right: 0;
}
</style>
<!-- not to be confused with Planet Telex -->
<!-- Javscript dependencies -->
<script src="http://d3js.org/d3.v3.min.js" charset="utf-8"></script>
<script src="http://d3po.org/static/js/util.js"></script>
<script src="//ajax.googleapis.com/ajax/libs/jquery/1.10.2/jquery.min.js"></script>
<script src="http://d3po.org/static/js/d3po.js"></script>
<script src="http://d3po.org/static/js/d3po.init.js"></script>
</head>
<body>
<div id="svg"><svg></svg></div>
<div id="controls">
<ul class="navigation">
</ul>
</div>
<div id="caption"></div>
<div id="footer">
More information: <a href="http://d3po.org">d3po.org</a>
</div>
<script type="text/javascript">
$(document).ready(function() {
initialize('states.json', 'data.csv');
}
);
</script>
</body>
</html>
"""
try:
from glue.viewers.scatter.qt import ScatterViewer
from glue.viewers.histogram.qt import HistogramViewer
except ImportError:
pass
else:
DISPATCH[ScatterViewer] = save_scatter
DISPATCH[HistogramViewer] = save_histogram
| from __future__ import absolute_import, division, print_function
import os
import json
from glue.core import Subset
DISPATCH = {}
def save_page(page, page_number, label, subset):
""" Convert a tab of a glue session into a D3PO page
:param page: Tuple of data viewers to save
:param label: Tab label
"""
result = {}
# layout settings
result['grid'] = {'nRows': 1, 'nColumns': len(page)}
result['name'] = str(label)
result['caption'] = 'Generated by Glue'
# style settings
d = page[0]._data[0]
unselected = dict(opacity=d.style.alpha,
size=d.style.markersize / 2,
color=d.style.color)
result['markerStyle'] = dict(unselected=unselected)
if subset is not None:
s = subset.style
selected = dict(opacity=s.alpha, size=s.markersize / 2, color=s.color)
result['markerStyle']['selected'] = selected
result['selection'] = {'type': 'booleanColumn',
'columnName': 'selection_%i' % page_number}
result['histogramStyle'] = result['markerStyle']
# save each plot
result['plots'] = list(map(save_plot, page, range(len(page))))
return result
def save_plot_base(plot, index):
result = {}
result['gridPosition'] = [0, index]
return result
def save_plot(plot, index):
typ = type(plot)
return DISPATCH[typ](plot, index)
def save_scatter(plot, index):
""" Convert a single glue scatter plot to a D3PO plot
:param plot: Glue scatter plot
:class:`~glue.viewers.scatter.qt.ScatterViewer`
:param index: 1D index of plot on the page
:type index: int
:rtype: json-serializable dict
"""
result = save_plot_base(plot, index)
result['type'] = 'scatter'
result['xAxis'] = dict(columnName=plot.state.x_att.label,
range=[float(plot.state.x_min), float(plot.state.x_max)])
result['yAxis'] = dict(columnName=plot.state.y_att.label,
range=[float(plot.state.y_min), float(plot.state.y_max)])
# XXX log scales
return result
def save_histogram(plot, index):
""" Convert a single histogram to a D3PO plot
:param plot: Glue histogram
:type plot: :class:`~glue.viewers.histogram.qt.HistogramViewer`
:param index: 1D index of plot on the page
:type index: int
:rtype: json-serializable dict
"""
result = save_plot_base(plot, index)
result['type'] = 'histogram'
result['xAxis'] = dict(columnName=plot.state.x_att.label,
bins=int(plot.state.hist_n_bin),
range=[float(plot.state.hist_x_min), float(plot.state.hist_x_max)])
# XXX normed, cumultive, log
return result
def stage_subsets(application):
"""
Return a tuple of the subset to use for each stage/tab,
or None if the tab has no subset
If more than one subset is used per stage/tab, returns None
"""
result = []
for page in application.viewers:
subset = None
for viewer in page:
for layer_artist in viewer.layers:
if not layer_artist.visible:
continue
s = layer_artist.layer
if not isinstance(s, Subset):
continue
if subset is not None and s is not subset:
return None
if subset is None:
subset = s
result.append(subset)
return tuple(result)
def can_save_d3po(application):
"""
Check whether an application can be exported to D3PO.
Raises an exception if not
"""
dc = application.session.data_collection
if len(dc) != 1:
raise ValueError("D3PO Export only supports a single dataset")
for tab in application.viewers:
for viewer in tab:
if not isinstance(viewer, tuple(DISPATCH.keys())):
raise ValueError("D3PO Export only supports scatter "
"and histogram plots")
if sum(len(tab) for tab in application.viewers) == 0:
raise ValueError("D3PO Export requires at least one scatterplot "
"or histogram")
if stage_subsets(application) is None:
raise ValueError("D3PO Export restricted to 0 or 1 subsets visible "
"in each tab")
def make_data_file(data, subsets, path):
"""
Create the data.csv file, given Data and tuple of subsets
"""
from astropy.table import Table, Column
data_path = os.path.join(path, 'data.csv')
t = Table([data[c] for c in data.components],
names=[c.label for c in data.components])
for i, subset in enumerate(subsets):
if subset is None:
continue
c = Column(data=subset.to_mask().astype('i'), name='selection_%i' % i)
t.add_column(c)
t.write(data_path, format='ascii', delimiter=',')
def save_d3po(application, path, launch=True):
"""Save a Glue session to a D3PO bundle.
Currently, this has the following restrictions:
- The Glue session must have only one dataset open, and 0 or 1 subsets
- Only scatter plots or histograms are present
- At least one plot is present
:param application: Glue appication to save
:param path: Path to directory to save in. Will be created if needed
"""
if os.path.exists(path) and not os.path.isdir(path):
os.unlink(path)
if not os.path.exists(path):
os.mkdir(path)
data = application.session.data_collection[0]
subsets = stage_subsets(application)
viewers = application.viewers
# data.csv
make_data_file(data, subsets, path)
# states.json
result = {}
result['filename'] = 'data.csv' # XXX don't think this is needed?
result['title'] = "Glue export of %s" % data.label
result['states'] = list(map(save_page, application.viewers,
range(len(viewers)),
application.tab_names,
subsets))
state_path = os.path.join(path, 'states.json')
with open(state_path, 'w') as outfile:
json.dump(result, outfile, indent=2, sort_keys=True)
# index.html
html_path = os.path.join(path, 'index.html')
with open(html_path, 'w') as outfile:
outfile.write(HTML)
# show the result
if launch:
launch_d3po(path)
def launch_d3po(path):
"""Start a server to view an exported D3PO bundle, and open a browser.
:param path: The TLD of the bundle
"""
from glue.external.six.moves.socketserver import TCPServer
from glue.external.six.moves.SimpleHTTPServer import SimpleHTTPRequestHandler
from random import randrange
from socket import error
import webbrowser
from threading import Thread
os.chdir(path)
while True:
try:
PORT = randrange(8000, 9000)
server = TCPServer(("", PORT), SimpleHTTPRequestHandler, False)
server.allow_reuse_address = True
server.server_bind()
break
except error: # port already taken
pass
print('Serving D3PO on port 0.0.0.0:%i' % PORT)
server.server_activate()
thread = Thread(target=server.serve_forever)
thread.setDaemon(True) # do not prevent shutdown
thread.start()
webbrowser.open('http://0.0.0.0:%i' % PORT)
def setup():
from glue.config import exporters
exporters.add('D3PO', save_d3po, can_save_d3po, outmode='directory')
HTML = """
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8" />
<link rel="stylesheet" type="text/css" href="http://d3po.org/static/css/style.css">
<link rel="stylesheet" type="text/css" href="http://d3po.org/static/css/d3po.css">
<link href='http://fonts.googleapis.com/css?family=Source+Sans+Pro:100,200,300,400,700' rel='stylesheet' type='text/css'>
<style>
#footer {
position: fixed;
bottom: 0;
right: 0;
}
</style>
<!-- not to be confused with Planet Telex -->
<!-- Javscript dependencies -->
<script src="http://d3js.org/d3.v3.min.js" charset="utf-8"></script>
<script src="http://d3po.org/static/js/util.js"></script>
<script src="//ajax.googleapis.com/ajax/libs/jquery/1.10.2/jquery.min.js"></script>
<script src="http://d3po.org/static/js/d3po.js"></script>
<script src="http://d3po.org/static/js/d3po.init.js"></script>
</head>
<body>
<div id="svg"><svg></svg></div>
<div id="controls">
<ul class="navigation">
</ul>
</div>
<div id="caption"></div>
<div id="footer">
More information: <a href="http://d3po.org">d3po.org</a>
</div>
<script type="text/javascript">
$(document).ready(function() {
initialize('states.json', 'data.csv');
}
);
</script>
</body>
</html>
"""
try:
from glue.viewers.scatter.qt import ScatterViewer
from glue.viewers.histogram.qt import HistogramViewer
except ImportError:
pass
else:
DISPATCH[ScatterViewer] = save_scatter
DISPATCH[HistogramViewer] = save_histogram
| en | 0.482933 | Convert a tab of a glue session into a D3PO page :param page: Tuple of data viewers to save :param label: Tab label # layout settings # style settings # save each plot Convert a single glue scatter plot to a D3PO plot :param plot: Glue scatter plot :class:`~glue.viewers.scatter.qt.ScatterViewer` :param index: 1D index of plot on the page :type index: int :rtype: json-serializable dict # XXX log scales Convert a single histogram to a D3PO plot :param plot: Glue histogram :type plot: :class:`~glue.viewers.histogram.qt.HistogramViewer` :param index: 1D index of plot on the page :type index: int :rtype: json-serializable dict # XXX normed, cumultive, log Return a tuple of the subset to use for each stage/tab, or None if the tab has no subset If more than one subset is used per stage/tab, returns None Check whether an application can be exported to D3PO. Raises an exception if not Create the data.csv file, given Data and tuple of subsets Save a Glue session to a D3PO bundle. Currently, this has the following restrictions: - The Glue session must have only one dataset open, and 0 or 1 subsets - Only scatter plots or histograms are present - At least one plot is present :param application: Glue appication to save :param path: Path to directory to save in. Will be created if needed # data.csv # states.json # XXX don't think this is needed? # index.html # show the result Start a server to view an exported D3PO bundle, and open a browser. :param path: The TLD of the bundle # port already taken # do not prevent shutdown <!DOCTYPE html> <html> <head> <meta charset="utf-8" /> <link rel="stylesheet" type="text/css" href="http://d3po.org/static/css/style.css"> <link rel="stylesheet" type="text/css" href="http://d3po.org/static/css/d3po.css"> <link href='http://fonts.googleapis.com/css?family=Source+Sans+Pro:100,200,300,400,700' rel='stylesheet' type='text/css'> <style> #footer { position: fixed; bottom: 0; right: 0; } </style> <!-- not to be confused with Planet Telex --> <!-- Javscript dependencies --> <script src="http://d3js.org/d3.v3.min.js" charset="utf-8"></script> <script src="http://d3po.org/static/js/util.js"></script> <script src="//ajax.googleapis.com/ajax/libs/jquery/1.10.2/jquery.min.js"></script> <script src="http://d3po.org/static/js/d3po.js"></script> <script src="http://d3po.org/static/js/d3po.init.js"></script> </head> <body> <div id="svg"><svg></svg></div> <div id="controls"> <ul class="navigation"> </ul> </div> <div id="caption"></div> <div id="footer"> More information: <a href="http://d3po.org">d3po.org</a> </div> <script type="text/javascript"> $(document).ready(function() { initialize('states.json', 'data.csv'); } ); </script> </body> </html> | 2.499443 | 2 |
exercicios/ex 061 a 070/ex061.py | CarlosWillian/python | 0 | 249 | <gh_stars>0
print('Crie sua P.A. de 10 termos')
n1 = int(input('Digite o primeiro termo da P.A.: '))
r = int(input('Digite a razão: '))
termo = n1
c = 1
print('A P.A. é (', end='')
while c <= 10:
print('{}'.format(termo), end='')
print(', ' if c < 10 else '', end='')
termo += r
c += 1
print(')')
| print('Crie sua P.A. de 10 termos')
n1 = int(input('Digite o primeiro termo da P.A.: '))
r = int(input('Digite a razão: '))
termo = n1
c = 1
print('A P.A. é (', end='')
while c <= 10:
print('{}'.format(termo), end='')
print(', ' if c < 10 else '', end='')
termo += r
c += 1
print(')') | none | 1 | 3.846659 | 4 |
|
src/pyrqlite/connections.py | zmedico/pyrqlite | 2 | 250 | <reponame>zmedico/pyrqlite
from __future__ import unicode_literals
import codecs
import logging
try:
from http.client import HTTPConnection, HTTPSConnection
except ImportError:
# pylint: disable=import-error
from httplib import HTTPConnection, HTTPSConnection
try:
from urllib.parse import urlparse
except ImportError:
# pylint: disable=import-error
from urlparse import urlparse
from .constants import (
UNLIMITED_REDIRECTS,
)
from .cursors import Cursor
from ._ephemeral import EphemeralRqlited as _EphemeralRqlited
from .extensions import PARSE_DECLTYPES, PARSE_COLNAMES
class Connection(object):
from .exceptions import (
Warning,
Error,
InterfaceError,
DatabaseError,
DataError,
OperationalError,
IntegrityError,
InternalError,
ProgrammingError,
NotSupportedError,
)
def __init__(self, scheme='http', host='localhost', port=4001,
user=None, password=<PASSWORD>, connect_timeout=None,
detect_types=0, max_redirects=UNLIMITED_REDIRECTS):
self.messages = []
self.scheme = scheme
self.host = host
self.port = port
self._headers = {}
if not (user is None or password is None):
self._headers['Authorization'] = 'Basic ' + \
codecs.encode('{}:{}'.format(user, password).encode('utf-8'),
'base64').decode('utf-8').rstrip('\n')
self.connect_timeout = connect_timeout
self.max_redirects = max_redirects
self.detect_types = detect_types
self.parse_decltypes = detect_types & PARSE_DECLTYPES
self.parse_colnames = detect_types & PARSE_COLNAMES
self._ephemeral = None
if scheme == ':memory:':
self._ephemeral = _EphemeralRqlited().__enter__()
self.host, self.port = self._ephemeral.http
self._connection = self._init_connection()
def _init_connection(self):
if self.scheme in ('http', ':memory:'):
cls = HTTPConnection
elif self.scheme == 'https':
cls = HTTPSConnection
else:
raise Connection.ProgrammingError('Unsupported scheme %r' % self.scheme)
return cls(self.host, port=self.port,
timeout=None if self.connect_timeout is None else float(self.connect_timeout))
def _retry_request(self, method, uri, body=None, headers={}):
tries = 10
while tries:
tries -= 1
try:
self._connection.request(method, uri, body=body,
headers=dict(self._headers, **headers))
return self._connection.getresponse()
except Exception:
if not tries:
raise
self._connection.close()
self._connection = self._init_connection()
def _fetch_response(self, method, uri, body=None, headers={}):
"""
Fetch a response, handling redirection.
"""
response = self._retry_request(method, uri, body=body, headers=headers)
redirects = 0
while response.status == 301 and \
response.getheader('Location') is not None and \
(self.max_redirects == UNLIMITED_REDIRECTS or redirects < self.max_redirects):
redirects += 1
uri = response.getheader('Location')
location = urlparse(uri)
logging.getLogger(__name__).debug("status: %s reason: '%s' location: '%s'",
response.status, response.reason, uri)
if self.host != location.hostname or self.port != location.port:
self._connection.close()
self.host = location.hostname
self.port = location.port
self._connection = self._init_connection()
response = self._retry_request(method, uri, body=body, headers=headers)
return response
def close(self):
"""Close the connection now (rather than whenever .__del__() is
called).
The connection will be unusable from this point forward; an
Error (or subclass) exception will be raised if any operation
is attempted with the connection. The same applies to all
cursor objects trying to use the connection. Note that closing
a connection without committing the changes first will cause an
implicit rollback to be performed."""
self._connection.close()
if self._ephemeral is not None:
self._ephemeral.__exit__(None, None, None)
self._ephemeral = None
def __del__(self):
self.close()
def commit(self):
"""Database modules that do not support transactions should
implement this method with void functionality."""
pass
def rollback(self):
"""This method is optional since not all databases provide
transaction support. """
pass
def cursor(self, factory=None):
"""Return a new Cursor Object using the connection."""
if factory:
return factory(self)
else:
return Cursor(self)
def execute(self, *args, **kwargs):
return self.cursor().execute(*args, **kwargs)
| from __future__ import unicode_literals
import codecs
import logging
try:
from http.client import HTTPConnection, HTTPSConnection
except ImportError:
# pylint: disable=import-error
from httplib import HTTPConnection, HTTPSConnection
try:
from urllib.parse import urlparse
except ImportError:
# pylint: disable=import-error
from urlparse import urlparse
from .constants import (
UNLIMITED_REDIRECTS,
)
from .cursors import Cursor
from ._ephemeral import EphemeralRqlited as _EphemeralRqlited
from .extensions import PARSE_DECLTYPES, PARSE_COLNAMES
class Connection(object):
from .exceptions import (
Warning,
Error,
InterfaceError,
DatabaseError,
DataError,
OperationalError,
IntegrityError,
InternalError,
ProgrammingError,
NotSupportedError,
)
def __init__(self, scheme='http', host='localhost', port=4001,
user=None, password=<PASSWORD>, connect_timeout=None,
detect_types=0, max_redirects=UNLIMITED_REDIRECTS):
self.messages = []
self.scheme = scheme
self.host = host
self.port = port
self._headers = {}
if not (user is None or password is None):
self._headers['Authorization'] = 'Basic ' + \
codecs.encode('{}:{}'.format(user, password).encode('utf-8'),
'base64').decode('utf-8').rstrip('\n')
self.connect_timeout = connect_timeout
self.max_redirects = max_redirects
self.detect_types = detect_types
self.parse_decltypes = detect_types & PARSE_DECLTYPES
self.parse_colnames = detect_types & PARSE_COLNAMES
self._ephemeral = None
if scheme == ':memory:':
self._ephemeral = _EphemeralRqlited().__enter__()
self.host, self.port = self._ephemeral.http
self._connection = self._init_connection()
def _init_connection(self):
if self.scheme in ('http', ':memory:'):
cls = HTTPConnection
elif self.scheme == 'https':
cls = HTTPSConnection
else:
raise Connection.ProgrammingError('Unsupported scheme %r' % self.scheme)
return cls(self.host, port=self.port,
timeout=None if self.connect_timeout is None else float(self.connect_timeout))
def _retry_request(self, method, uri, body=None, headers={}):
tries = 10
while tries:
tries -= 1
try:
self._connection.request(method, uri, body=body,
headers=dict(self._headers, **headers))
return self._connection.getresponse()
except Exception:
if not tries:
raise
self._connection.close()
self._connection = self._init_connection()
def _fetch_response(self, method, uri, body=None, headers={}):
"""
Fetch a response, handling redirection.
"""
response = self._retry_request(method, uri, body=body, headers=headers)
redirects = 0
while response.status == 301 and \
response.getheader('Location') is not None and \
(self.max_redirects == UNLIMITED_REDIRECTS or redirects < self.max_redirects):
redirects += 1
uri = response.getheader('Location')
location = urlparse(uri)
logging.getLogger(__name__).debug("status: %s reason: '%s' location: '%s'",
response.status, response.reason, uri)
if self.host != location.hostname or self.port != location.port:
self._connection.close()
self.host = location.hostname
self.port = location.port
self._connection = self._init_connection()
response = self._retry_request(method, uri, body=body, headers=headers)
return response
def close(self):
"""Close the connection now (rather than whenever .__del__() is
called).
The connection will be unusable from this point forward; an
Error (or subclass) exception will be raised if any operation
is attempted with the connection. The same applies to all
cursor objects trying to use the connection. Note that closing
a connection without committing the changes first will cause an
implicit rollback to be performed."""
self._connection.close()
if self._ephemeral is not None:
self._ephemeral.__exit__(None, None, None)
self._ephemeral = None
def __del__(self):
self.close()
def commit(self):
"""Database modules that do not support transactions should
implement this method with void functionality."""
pass
def rollback(self):
"""This method is optional since not all databases provide
transaction support. """
pass
def cursor(self, factory=None):
"""Return a new Cursor Object using the connection."""
if factory:
return factory(self)
else:
return Cursor(self)
def execute(self, *args, **kwargs):
return self.cursor().execute(*args, **kwargs) | en | 0.824377 | # pylint: disable=import-error # pylint: disable=import-error Fetch a response, handling redirection. Close the connection now (rather than whenever .__del__() is called). The connection will be unusable from this point forward; an Error (or subclass) exception will be raised if any operation is attempted with the connection. The same applies to all cursor objects trying to use the connection. Note that closing a connection without committing the changes first will cause an implicit rollback to be performed. Database modules that do not support transactions should implement this method with void functionality. This method is optional since not all databases provide transaction support. Return a new Cursor Object using the connection. | 2.233063 | 2 |
PythonBasics/ConditionalStatements/Exercise/toy_shop.py | achoraev/SoftUni | 0 | 251 | <filename>PythonBasics/ConditionalStatements/Exercise/toy_shop.py<gh_stars>0
price = float(input())
puzzles = int(input())
dolls = int(input())
bears = int(input())
minions = int(input())
trucks = int(input())
total_toys = puzzles + dolls + bears + minions + trucks
price_puzzles = puzzles * 2.6
price_dolls = dolls * 3
price_bears = bears * 4.1
price_minions = minions * 8.2
price_trucks = trucks * 2
total_price = price_puzzles + price_dolls + price_bears + price_minions + price_trucks
if total_toys >= 50:
total_price = total_price - (total_price * 0.25)
rent = total_price * 0.1
total_price = total_price - rent
if total_price >= price:
print(f"Yes! {(total_price - price):.2f} lv left.")
else:
print(f"Not enough money! {(price - total_price):.2f} lv needed.")
| <filename>PythonBasics/ConditionalStatements/Exercise/toy_shop.py<gh_stars>0
price = float(input())
puzzles = int(input())
dolls = int(input())
bears = int(input())
minions = int(input())
trucks = int(input())
total_toys = puzzles + dolls + bears + minions + trucks
price_puzzles = puzzles * 2.6
price_dolls = dolls * 3
price_bears = bears * 4.1
price_minions = minions * 8.2
price_trucks = trucks * 2
total_price = price_puzzles + price_dolls + price_bears + price_minions + price_trucks
if total_toys >= 50:
total_price = total_price - (total_price * 0.25)
rent = total_price * 0.1
total_price = total_price - rent
if total_price >= price:
print(f"Yes! {(total_price - price):.2f} lv left.")
else:
print(f"Not enough money! {(price - total_price):.2f} lv needed.")
| none | 1 | 3.81125 | 4 |
|
ironic/tests/api/utils.py | citrix-openstack-build/ironic | 0 | 252 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# -*- encoding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utils for testing the API service.
"""
import datetime
import json
ADMIN_TOKEN = '<PASSWORD>'
MEMBER_TOKEN = '<PASSWORD>'
class FakeMemcache(object):
"""Fake cache that is used for keystone tokens lookup."""
_cache = {
'tokens/%s' % ADMIN_TOKEN: {
'access': {
'token': {'id': ADMIN_TOKEN},
'user': {'id': 'user_id1',
'name': 'user_name1',
'tenantId': '123i2910',
'tenantName': 'mytenant',
'roles': [{'name': 'admin'}]
},
}
},
'tokens/%s' % MEMBER_TOKEN: {
'access': {
'token': {'id': MEMBER_TOKEN},
'user': {'id': 'user_id2',
'name': 'user-good',
'tenantId': 'project-good',
'tenantName': 'goodies',
'roles': [{'name': 'Member'}]
}
}
}
}
def __init__(self):
self.set_key = None
self.set_value = None
self.token_expiration = None
def get(self, key):
dt = datetime.datetime.now() + datetime.timedelta(minutes=5)
return json.dumps((self._cache.get(key), dt.strftime('%s')))
def set(self, key, value, timeout=None):
self.set_value = value
self.set_key = key
| # vim: tabstop=4 shiftwidth=4 softtabstop=4
# -*- encoding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utils for testing the API service.
"""
import datetime
import json
ADMIN_TOKEN = '<PASSWORD>'
MEMBER_TOKEN = '<PASSWORD>'
class FakeMemcache(object):
"""Fake cache that is used for keystone tokens lookup."""
_cache = {
'tokens/%s' % ADMIN_TOKEN: {
'access': {
'token': {'id': ADMIN_TOKEN},
'user': {'id': 'user_id1',
'name': 'user_name1',
'tenantId': '123i2910',
'tenantName': 'mytenant',
'roles': [{'name': 'admin'}]
},
}
},
'tokens/%s' % MEMBER_TOKEN: {
'access': {
'token': {'id': MEMBER_TOKEN},
'user': {'id': 'user_id2',
'name': 'user-good',
'tenantId': 'project-good',
'tenantName': 'goodies',
'roles': [{'name': 'Member'}]
}
}
}
}
def __init__(self):
self.set_key = None
self.set_value = None
self.token_expiration = None
def get(self, key):
dt = datetime.datetime.now() + datetime.timedelta(minutes=5)
return json.dumps((self._cache.get(key), dt.strftime('%s')))
def set(self, key, value, timeout=None):
self.set_value = value
self.set_key = key
| en | 0.822265 | # vim: tabstop=4 shiftwidth=4 softtabstop=4 # -*- encoding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. Utils for testing the API service. Fake cache that is used for keystone tokens lookup. | 1.963549 | 2 |
bookshelf/main/forms.py | thewordisbird/bookshelf | 0 | 253 | <reponame>thewordisbird/bookshelf
import datetime
from flask_wtf import FlaskForm
from wtforms import (
StringField,
TextAreaField,
DateTimeField,
HiddenField,
PasswordField,
)
from wtforms.validators import DataRequired, ValidationError, Email, EqualTo
class NullableDateTimeField(DateTimeField):
"""Modify DateField to allow for Null values"""
def process_formdata(self, valuelist):
# Bypasses wtForms validation for blank datetime field.
if valuelist:
date_str = " ".join(valuelist).strip()
if date_str == "":
self.data = None
return
try:
self.data = datetime.datetime.strptime(date_str, self.format)
except ValueError:
self.data = None
raise ValueError(self.gettext("Not a valid date value"))
class SearchForm(FlaskForm):
search = StringField("Search", validators=[DataRequired()])
class ReviewForm(FlaskForm):
rating = HiddenField("Rating", validators=[DataRequired()])
review_title = StringField("Headline")
review_content = TextAreaField("Review")
date_started = NullableDateTimeField("Date Started", format="%m/%d/%Y")
date_finished = NullableDateTimeField("Date Finished", format="%m/%d/%Y")
def validate_date_finished(self, date_finished):
if self.date_started.data and date_finished.data:
if self.date_started.data > date_finished.data:
print("Date finished must be greater than or equal to date started")
raise ValidationError(
"Date finished must be greater than or equal to date started."
)
elif self.date_started.data or date_finished.data:
print("missing date")
raise ValidationError("If setting read dates, both dates are required.")
class EditProfileForm(FlaskForm):
display_name = StringField("Name", validators=[])
email = StringField("Email", validators=[Email(message="Invalid Email Address.")])
password = PasswordField(
"Password",
validators=[EqualTo("<PASSWORD>", message="Passwords must match.")],
)
confirm_password = PasswordField("Confirm Password", validators=[])
| import datetime
from flask_wtf import FlaskForm
from wtforms import (
StringField,
TextAreaField,
DateTimeField,
HiddenField,
PasswordField,
)
from wtforms.validators import DataRequired, ValidationError, Email, EqualTo
class NullableDateTimeField(DateTimeField):
"""Modify DateField to allow for Null values"""
def process_formdata(self, valuelist):
# Bypasses wtForms validation for blank datetime field.
if valuelist:
date_str = " ".join(valuelist).strip()
if date_str == "":
self.data = None
return
try:
self.data = datetime.datetime.strptime(date_str, self.format)
except ValueError:
self.data = None
raise ValueError(self.gettext("Not a valid date value"))
class SearchForm(FlaskForm):
search = StringField("Search", validators=[DataRequired()])
class ReviewForm(FlaskForm):
rating = HiddenField("Rating", validators=[DataRequired()])
review_title = StringField("Headline")
review_content = TextAreaField("Review")
date_started = NullableDateTimeField("Date Started", format="%m/%d/%Y")
date_finished = NullableDateTimeField("Date Finished", format="%m/%d/%Y")
def validate_date_finished(self, date_finished):
if self.date_started.data and date_finished.data:
if self.date_started.data > date_finished.data:
print("Date finished must be greater than or equal to date started")
raise ValidationError(
"Date finished must be greater than or equal to date started."
)
elif self.date_started.data or date_finished.data:
print("missing date")
raise ValidationError("If setting read dates, both dates are required.")
class EditProfileForm(FlaskForm):
display_name = StringField("Name", validators=[])
email = StringField("Email", validators=[Email(message="Invalid Email Address.")])
password = PasswordField(
"Password",
validators=[EqualTo("<PASSWORD>", message="Passwords must match.")],
)
confirm_password = PasswordField("Confirm Password", validators=[]) | en | 0.487782 | Modify DateField to allow for Null values # Bypasses wtForms validation for blank datetime field. | 3.106429 | 3 |
main.py | DanielM24/Romanian-sub-dialect-identificator | 0 | 254 | # -*- coding: utf-8 -*-
"""Proiect.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1TR1Frf0EX4PtFZkLlVdGtMTINqhoQwRw
"""
# Importarea librariilor
import numpy as np
import pandas as pd # pandas pentru citirea fisierelor
from sklearn import preprocessing
from sklearn import svm # importarea modelului
from sklearn.feature_extraction.text import TfidfVectorizer # modelarea datelor pentru a obtine valori numerice din text
from sklearn.metrics import classification_report, confusion_matrix
# Incarcarea datelor
train_labels = pd.read_csv('train_labels.txt', sep='\t', header=None, engine='python')
train_labels = train_labels.to_numpy() # convertim data frame-ul intr-un vector
train_labels = train_labels[:,1] # pastram doar etichetele
train_samples = pd.read_csv('train_samples.txt', sep='\t', header=None, engine='python')
train_samples = train_samples.to_numpy()
train_samples = train_samples[:,1] # pastram doar cuvintele
validation_samples = pd.read_csv('validation_samples.txt', sep='\t', header=None, engine='python')
validation_samples = validation_samples.to_numpy()
validation_samples = validation_samples[:,1] # salvam cuvintele
validation_labels = pd.read_csv('validation_labels.txt', sep='\t', header=None, engine='python')
validation_labels = validation_labels.to_numpy()
validation_labels = validation_labels[:,1] # pastram doar etichetele
test_samples = pd.read_csv('test_samples.txt', sep='\t', header=None, engine='python')
test_samples = test_samples.to_numpy()
label = test_samples[:,0] # salvam etichetele
test_samples = test_samples[:,1] # salvam cuvintele
def normalize_data(train_data, test_data, type='l2'): # functia care intoarce datele normalizate
#tipul de normalizare este setat implicit la l2
scaler = None
if type == 'standard':
scaler = preprocessing.StandardScaler()
elif type == 'min_max':
scaler = preprocessing.MinMaxScaler()
elif type == 'l1' or type == 'l2':
scaler = preprocessing.Normalizer(norm = type)
if scaler is not None:
scaler.fit(train_data)
scaled_train_data = scaler.transform(train_data)
scaled_test_data = scaler.transform(test_data)
return scaled_train_data, scaled_test_data
else:
return train_data, test_data
# Modelarea datelor
vectorizer = TfidfVectorizer()
training_features = vectorizer.fit_transform(train_samples)
validation_features = vectorizer.transform(validation_samples)
testing_features = vectorizer.transform(test_samples)
# Normalizarea datelor
norm_train, norm_test = normalize_data(training_features, testing_features)
norm_validation, _ = normalize_data(validation_features, validation_features)
# Aplicam modelul SVM
model_svm = svm.SVC(kernel='linear', C=23, gamma=110) # definim modelul
model_svm.fit(norm_train, train_labels) # procesul de invatare
test_predictions = model_svm.predict(norm_test) # predictie pe datele de test
print("Classification report: ")
print(classification_report(validation_labels, model_svm.predict(norm_validation)))
print("Confusion matrix: ")
print(confusion_matrix(validation_labels, model_svm.predict(norm_validation)))
# Exportarea datelor in format CSV
test_export = {'id':label,'label':test_predictions}
data_f = pd.DataFrame(test_export)
data_f.to_csv('test_submission.csv',index=False) | # -*- coding: utf-8 -*-
"""Proiect.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1TR1Frf0EX4PtFZkLlVdGtMTINqhoQwRw
"""
# Importarea librariilor
import numpy as np
import pandas as pd # pandas pentru citirea fisierelor
from sklearn import preprocessing
from sklearn import svm # importarea modelului
from sklearn.feature_extraction.text import TfidfVectorizer # modelarea datelor pentru a obtine valori numerice din text
from sklearn.metrics import classification_report, confusion_matrix
# Incarcarea datelor
train_labels = pd.read_csv('train_labels.txt', sep='\t', header=None, engine='python')
train_labels = train_labels.to_numpy() # convertim data frame-ul intr-un vector
train_labels = train_labels[:,1] # pastram doar etichetele
train_samples = pd.read_csv('train_samples.txt', sep='\t', header=None, engine='python')
train_samples = train_samples.to_numpy()
train_samples = train_samples[:,1] # pastram doar cuvintele
validation_samples = pd.read_csv('validation_samples.txt', sep='\t', header=None, engine='python')
validation_samples = validation_samples.to_numpy()
validation_samples = validation_samples[:,1] # salvam cuvintele
validation_labels = pd.read_csv('validation_labels.txt', sep='\t', header=None, engine='python')
validation_labels = validation_labels.to_numpy()
validation_labels = validation_labels[:,1] # pastram doar etichetele
test_samples = pd.read_csv('test_samples.txt', sep='\t', header=None, engine='python')
test_samples = test_samples.to_numpy()
label = test_samples[:,0] # salvam etichetele
test_samples = test_samples[:,1] # salvam cuvintele
def normalize_data(train_data, test_data, type='l2'): # functia care intoarce datele normalizate
#tipul de normalizare este setat implicit la l2
scaler = None
if type == 'standard':
scaler = preprocessing.StandardScaler()
elif type == 'min_max':
scaler = preprocessing.MinMaxScaler()
elif type == 'l1' or type == 'l2':
scaler = preprocessing.Normalizer(norm = type)
if scaler is not None:
scaler.fit(train_data)
scaled_train_data = scaler.transform(train_data)
scaled_test_data = scaler.transform(test_data)
return scaled_train_data, scaled_test_data
else:
return train_data, test_data
# Modelarea datelor
vectorizer = TfidfVectorizer()
training_features = vectorizer.fit_transform(train_samples)
validation_features = vectorizer.transform(validation_samples)
testing_features = vectorizer.transform(test_samples)
# Normalizarea datelor
norm_train, norm_test = normalize_data(training_features, testing_features)
norm_validation, _ = normalize_data(validation_features, validation_features)
# Aplicam modelul SVM
model_svm = svm.SVC(kernel='linear', C=23, gamma=110) # definim modelul
model_svm.fit(norm_train, train_labels) # procesul de invatare
test_predictions = model_svm.predict(norm_test) # predictie pe datele de test
print("Classification report: ")
print(classification_report(validation_labels, model_svm.predict(norm_validation)))
print("Confusion matrix: ")
print(confusion_matrix(validation_labels, model_svm.predict(norm_validation)))
# Exportarea datelor in format CSV
test_export = {'id':label,'label':test_predictions}
data_f = pd.DataFrame(test_export)
data_f.to_csv('test_submission.csv',index=False) | ro | 0.493865 | # -*- coding: utf-8 -*- Proiect.ipynb Automatically generated by Colaboratory. Original file is located at https://colab.research.google.com/drive/1TR1Frf0EX4PtFZkLlVdGtMTINqhoQwRw # Importarea librariilor # pandas pentru citirea fisierelor # importarea modelului # modelarea datelor pentru a obtine valori numerice din text # Incarcarea datelor # convertim data frame-ul intr-un vector # pastram doar etichetele # pastram doar cuvintele # salvam cuvintele # pastram doar etichetele # salvam etichetele # salvam cuvintele # functia care intoarce datele normalizate #tipul de normalizare este setat implicit la l2 # Modelarea datelor # Normalizarea datelor # Aplicam modelul SVM # definim modelul # procesul de invatare # predictie pe datele de test # Exportarea datelor in format CSV | 2.596723 | 3 |
logger/__init__.py | remmyzen/nqs-tensorflow2 | 4 | 255 | from .logger import Logger
from .logger_supervised import LoggerSupervised
| from .logger import Logger
from .logger_supervised import LoggerSupervised
| none | 1 | 1.037343 | 1 |
|
flytekit/core/workflow.py | milton0825/flytekit | 0 | 256 | from __future__ import annotations
import collections
import inspect
from dataclasses import dataclass
from enum import Enum
from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union
from flytekit.common import constants as _common_constants
from flytekit.common.exceptions.user import FlyteValidationException, FlyteValueException
from flytekit.core.base_task import PythonTask
from flytekit.core.class_based_resolver import ClassStorageTaskResolver
from flytekit.core.condition import ConditionalSection
from flytekit.core.context_manager import (
BranchEvalMode,
CompilationState,
ExecutionState,
FlyteContext,
FlyteContextManager,
FlyteEntities,
)
from flytekit.core.interface import (
Interface,
transform_inputs_to_parameters,
transform_interface_to_typed_interface,
transform_signature_to_interface,
)
from flytekit.core.launch_plan import LaunchPlan
from flytekit.core.node import Node
from flytekit.core.promise import (
NodeOutput,
Promise,
VoidPromise,
binding_from_python_std,
create_and_link_node,
create_native_named_tuple,
create_task_output,
translate_inputs_to_literals,
)
from flytekit.core.python_auto_container import PythonAutoContainerTask
from flytekit.core.reference_entity import ReferenceEntity, WorkflowReference
from flytekit.core.type_engine import TypeEngine
from flytekit.loggers import logger
from flytekit.models import interface as _interface_models
from flytekit.models import literals as _literal_models
from flytekit.models.core import workflow as _workflow_model
GLOBAL_START_NODE = Node(
id=_common_constants.GLOBAL_INPUT_NODE_ID,
metadata=None,
bindings=[],
upstream_nodes=[],
flyte_entity=None,
)
class WorkflowFailurePolicy(Enum):
FAIL_IMMEDIATELY = _workflow_model.WorkflowMetadata.OnFailurePolicy.FAIL_IMMEDIATELY
FAIL_AFTER_EXECUTABLE_NODES_COMPLETE = (
_workflow_model.WorkflowMetadata.OnFailurePolicy.FAIL_AFTER_EXECUTABLE_NODES_COMPLETE
)
@dataclass
class WorkflowMetadata(object):
on_failure: WorkflowFailurePolicy
def __post_init__(self):
if (
self.on_failure != WorkflowFailurePolicy.FAIL_IMMEDIATELY
and self.on_failure != WorkflowFailurePolicy.FAIL_AFTER_EXECUTABLE_NODES_COMPLETE
):
raise FlyteValidationException(f"Failure policy {self.on_failure} not acceptable")
def to_flyte_model(self):
if self.on_failure == WorkflowFailurePolicy.FAIL_IMMEDIATELY:
on_failure = 0
else:
on_failure = 1
return _workflow_model.WorkflowMetadata(on_failure=on_failure)
@dataclass
class WorkflowMetadataDefaults(object):
"""
This class is similarly named to the one above. Please see the IDL for more information but essentially, this
WorkflowMetadataDefaults class represents the defaults that are handed down to a workflow's tasks, whereas
WorkflowMetadata represents metadata about the workflow itself.
"""
interruptible: bool
def __post_init__(self):
if self.interruptible is not True and self.interruptible is not False:
raise FlyteValidationException(f"Interruptible must be boolean, {self.interruptible} invalid")
def to_flyte_model(self):
return _workflow_model.WorkflowMetadataDefaults(interruptible=self.interruptible)
def construct_input_promises(inputs: List[str]):
return {
input_name: Promise(var=input_name, val=NodeOutput(node=GLOBAL_START_NODE, var=input_name))
for input_name in inputs
}
def get_promise(binding_data: _literal_models.BindingData, outputs_cache: Dict[Node, Dict[str, Promise]]) -> Promise:
"""
This is a helper function that will turn a binding into a Promise object, using a lookup map. Please see
get_promise_map for the rest of the details.
"""
if binding_data.promise is not None:
if not isinstance(binding_data.promise, NodeOutput):
raise FlyteValidationException(
f"Binding data Promises have to be of the NodeOutput type {type(binding_data.promise)} found"
)
# b.var is the name of the input to the task
# binding_data.promise.var is the name of the upstream node's output we want
return outputs_cache[binding_data.promise.node][binding_data.promise.var]
elif binding_data.scalar is not None:
return Promise(var="placeholder", val=_literal_models.Literal(scalar=binding_data.scalar))
elif binding_data.collection is not None:
literals = []
for bd in binding_data.collection.bindings:
p = get_promise(bd, outputs_cache)
literals.append(p.val)
return Promise(
var="placeholder",
val=_literal_models.Literal(collection=_literal_models.LiteralCollection(literals=literals)),
)
elif binding_data.map is not None:
literals = {}
for k, bd in binding_data.map.bindings.items():
p = get_promise(bd, outputs_cache)
literals[k] = p.val
return Promise(
var="placeholder", val=_literal_models.Literal(map=_literal_models.LiteralMap(literals=literals))
)
raise FlyteValidationException("Binding type unrecognized.")
def get_promise_map(
bindings: List[_literal_models.Binding], outputs_cache: Dict[Node, Dict[str, Promise]]
) -> Dict[str, Promise]:
"""
Local execution of imperatively defined workflows is done node by node. This function will fill in the node's
entity's input arguments, which are specified using the bindings list, and a map of nodes to its outputs.
Basically this takes the place of propeller in resolving bindings, pulling in outputs from previously completed
nodes and filling in the necessary inputs.
"""
entity_kwargs = {}
for b in bindings:
entity_kwargs[b.var] = get_promise(b.binding, outputs_cache)
return entity_kwargs
class WorkflowBase(object):
def __init__(
self,
name: str,
workflow_metadata: WorkflowMetadata,
workflow_metadata_defaults: WorkflowMetadataDefaults,
python_interface: Interface,
**kwargs,
):
self._name = name
self._workflow_metadata = workflow_metadata
self._workflow_metadata_defaults = workflow_metadata_defaults
self._python_interface = python_interface
self._interface = transform_interface_to_typed_interface(python_interface)
self._inputs = {}
self._unbound_inputs = set()
self._nodes = []
self._output_bindings: Optional[List[_literal_models.Binding]] = []
FlyteEntities.entities.append(self)
super().__init__(**kwargs)
@property
def name(self) -> str:
return self._name
@property
def short_name(self) -> str:
return self._name.split(".")[-1]
@property
def workflow_metadata(self) -> Optional[WorkflowMetadata]:
return self._workflow_metadata
@property
def workflow_metadata_defaults(self):
return self._workflow_metadata_defaults
@property
def python_interface(self) -> Interface:
return self._python_interface
@property
def interface(self) -> _interface_models.TypedInterface:
return self._interface
@property
def output_bindings(self) -> List[_literal_models.Binding]:
return self._output_bindings
@property
def nodes(self) -> List[Node]:
return self._nodes
def __repr__(self):
return (
f"WorkflowBase - {self._name} && "
f"Inputs ({len(self._python_interface.inputs)}): {self._python_interface.inputs} && "
f"Outputs ({len(self._python_interface.outputs)}): {self._python_interface.outputs} && "
f"Output bindings: {self._output_bindings} && "
)
def __call__(self, *args, **kwargs):
"""
The call pattern for Workflows is close to, but not exactly, the call pattern for Tasks. For local execution,
it goes
__call__ -> _local_execute -> execute
From execute, different things happen for the two Workflow styles. For PythonFunctionWorkflows, the Python
function is run, for the ImperativeWorkflow, each node is run one at a time.
"""
if len(args) > 0:
raise AssertionError("Only Keyword Arguments are supported for Workflow executions")
ctx = FlyteContextManager.current_context()
# Get default agruements and override with kwargs passed in
input_kwargs = self.python_interface.default_inputs_as_kwargs
input_kwargs.update(kwargs)
# The first condition is compilation.
if ctx.compilation_state is not None:
return create_and_link_node(ctx, entity=self, interface=self.python_interface, **input_kwargs)
# This condition is hit when this workflow (self) is being called as part of a parent's workflow local run.
# The context specifying the local workflow execution has already been set.
elif (
ctx.execution_state is not None and ctx.execution_state.mode == ExecutionState.Mode.LOCAL_WORKFLOW_EXECUTION
):
if ctx.execution_state.branch_eval_mode == BranchEvalMode.BRANCH_SKIPPED:
if self.python_interface and self.python_interface.output_tuple_name:
variables = [k for k in self.python_interface.outputs.keys()]
output_tuple = collections.namedtuple(self.python_interface.output_tuple_name, variables)
nones = [None for _ in self.python_interface.outputs.keys()]
return output_tuple(*nones)
else:
return None
# We are already in a local execution, just continue the execution context
return self._local_execute(ctx, **input_kwargs)
# Last is starting a local workflow execution
else:
# Run some sanity checks
# Even though the _local_execute call generally expects inputs to be Promises, we don't have to do the
# conversion here in this loop. The reason is because we don't prevent users from specifying inputs
# as direct scalars, which means there's another Promise-generating loop inside _local_execute too
for k, v in input_kwargs.items():
if k not in self.interface.inputs:
raise ValueError(f"Received unexpected keyword argument {k}")
if isinstance(v, Promise):
raise ValueError(f"Received a promise for a workflow call, when expecting a native value for {k}")
with FlyteContextManager.with_context(
ctx.with_execution_state(
ctx.new_execution_state().with_params(mode=ExecutionState.Mode.LOCAL_WORKFLOW_EXECUTION)
)
) as child_ctx:
result = self._local_execute(child_ctx, **input_kwargs)
expected_outputs = len(self.python_interface.outputs)
if expected_outputs == 0:
if result is None or isinstance(result, VoidPromise):
return None
else:
raise Exception(f"Workflow local execution expected 0 outputs but something received {result}")
if (1 < expected_outputs == len(result)) or (result is not None and expected_outputs == 1):
return create_native_named_tuple(ctx, result, self.python_interface)
raise ValueError("expected outputs and actual outputs do not match")
def execute(self, **kwargs):
raise Exception("Should not be called")
def _local_execute(self, ctx: FlyteContext, **kwargs) -> Union[Tuple[Promise], Promise, VoidPromise]:
# This is done to support the invariant that Workflow local executions always work with Promise objects
# holding Flyte literal values. Even in a wf, a user can call a sub-workflow with a Python native value.
for k, v in kwargs.items():
if not isinstance(v, Promise):
t = self.python_interface.inputs[k]
kwargs[k] = Promise(var=k, val=TypeEngine.to_literal(ctx, v, t, self.interface.inputs[k].type))
# The output of this will always be a combination of Python native values and Promises containing Flyte
# Literals.
function_outputs = self.execute(**kwargs)
# First handle the empty return case.
# A workflow function may return a task that doesn't return anything
# def wf():
# return t1()
# or it may not return at all
# def wf():
# t1()
# In the former case we get the task's VoidPromise, in the latter we get None
if isinstance(function_outputs, VoidPromise) or function_outputs is None:
if len(self.python_interface.outputs) != 0:
raise FlyteValueException(
function_outputs,
f"{function_outputs} received but interface has {len(self.python_interface.outputs)} outputs.",
)
return VoidPromise(self.name)
# Because we should've already returned in the above check, we just raise an error here.
if len(self.python_interface.outputs) == 0:
raise FlyteValueException(
function_outputs, f"{function_outputs} received but should've been VoidPromise or None."
)
expected_output_names = list(self.python_interface.outputs.keys())
if len(expected_output_names) == 1:
# Here we have to handle the fact that the wf could've been declared with a typing.NamedTuple of
# length one. That convention is used for naming outputs - and single-length-NamedTuples are
# particularly troublesome but elegant handling of them is not a high priority
# Again, we're using the output_tuple_name as a proxy.
if self.python_interface.output_tuple_name and isinstance(function_outputs, tuple):
wf_outputs_as_map = {expected_output_names[0]: function_outputs[0]}
else:
wf_outputs_as_map = {expected_output_names[0]: function_outputs}
else:
wf_outputs_as_map = {expected_output_names[i]: function_outputs[i] for i, _ in enumerate(function_outputs)}
# Basically we need to repackage the promises coming from the tasks into Promises that match the workflow's
# interface. We do that by extracting out the literals, and creating new Promises
wf_outputs_as_literal_dict = translate_inputs_to_literals(
ctx,
wf_outputs_as_map,
flyte_interface_types=self.interface.outputs,
native_types=self.python_interface.outputs,
)
# Recreate new promises that use the workflow's output names.
new_promises = [Promise(var, wf_outputs_as_literal_dict[var]) for var in expected_output_names]
return create_task_output(new_promises, self.python_interface)
class ImperativeWorkflow(WorkflowBase):
def __init__(
self,
name: str,
failure_policy: Optional[WorkflowFailurePolicy] = None,
interruptible: Optional[bool] = False,
):
metadata = WorkflowMetadata(on_failure=failure_policy or WorkflowFailurePolicy.FAIL_IMMEDIATELY)
workflow_metadata_defaults = WorkflowMetadataDefaults(interruptible)
self._compilation_state = CompilationState(prefix="")
self._inputs = {}
# This unbound inputs construct is just here to help workflow authors detect issues a bit earlier. It just
# keeps track of workflow inputs that you've declared with add_workflow_input but haven't yet consumed. This
# is an error that Admin would return at compile time anyways, but this allows flytekit to raise
# the error earlier.
self._unbound_inputs = set()
super().__init__(
name=name,
workflow_metadata=metadata,
workflow_metadata_defaults=workflow_metadata_defaults,
python_interface=Interface(),
)
@property
def compilation_state(self) -> CompilationState:
"""
Compilation is done a bit at a time, one task or other entity call at a time. This is why this workflow
class has to keep track of its own compilation state.
"""
return self._compilation_state
@property
def nodes(self) -> List[Node]:
return self._compilation_state.nodes
@property
def inputs(self) -> Dict[str, Promise]:
"""
This holds the input promises to the workflow. The nodes in these Promise objects should always point to
the global start node.
"""
return self._inputs
def __repr__(self):
return super().__repr__() + f"Nodes ({len(self.compilation_state.nodes)}): {self.compilation_state.nodes}"
def execute(self, **kwargs):
"""
Called by _local_execute. This function is how local execution for imperative workflows runs. Because when an
entity is added using the add_entity function, all inputs to that entity should've been already declared, we
can just iterate through the nodes in order and we shouldn't run into any dependency issues. That is, we force
the user to declare entities already in a topological sort. To keep track of outputs, we create a map to
start things off, filled in only with the workflow inputs (if any). As things are run, their outputs are stored
in this map.
After all nodes are run, we fill in workflow level outputs the same way as any other previous node.
"""
if not self.ready():
raise FlyteValidationException(f"Workflow not ready, wf is currently {self}")
# Create a map that holds the outputs of each node.
intermediate_node_outputs = {GLOBAL_START_NODE: {}} # type: Dict[Node, Dict[str, Promise]]
# Start things off with the outputs of the global input node, i.e. the inputs to the workflow.
# _local_execute should've already ensured that all the values in kwargs are Promise objects
for k, v in kwargs.items():
intermediate_node_outputs[GLOBAL_START_NODE][k] = v
# Next iterate through the nodes in order.
for node in self.compilation_state.nodes:
if node not in intermediate_node_outputs.keys():
intermediate_node_outputs[node] = {}
# Retrieve the entity from the node, and call it by looking up the promises the node's bindings require,
# and then fill them in using the node output tracker map we have.
entity = node.flyte_entity
entity_kwargs = get_promise_map(node.bindings, intermediate_node_outputs)
# Handle the calling and outputs of each node's entity
results = entity(**entity_kwargs)
expected_output_names = list(entity.python_interface.outputs.keys())
if isinstance(results, VoidPromise) or results is None:
continue # pragma: no cover # Move along, nothing to assign
# Because we should've already returned in the above check, we just raise an Exception here.
if len(entity.python_interface.outputs) == 0:
raise FlyteValueException(results, f"{results} received but should've been VoidPromise or None.")
# if there's only one output,
if len(expected_output_names) == 1:
if entity.python_interface.output_tuple_name and isinstance(results, tuple):
intermediate_node_outputs[node][expected_output_names[0]] = results[0]
else:
intermediate_node_outputs[node][expected_output_names[0]] = results
else:
if len(results) != len(expected_output_names):
raise FlyteValueException(results, f"Different lengths {results} {expected_output_names}")
for idx, r in enumerate(results):
intermediate_node_outputs[node][expected_output_names[idx]] = r
# The rest of this function looks like the above but now we're doing it for the workflow as a whole rather
# than just one node at a time.
if len(self.python_interface.outputs) == 0:
return VoidPromise(self.name)
# The values that we return below from the output have to be pulled by fulfilling all of the
# workflow's output bindings.
# The return style here has to match what 1) what the workflow would've returned had it been declared
# functionally, and 2) what a user would return in mock function. That is, if it's a tuple, then it
# should be a tuple here, if it's a one element named tuple, then we do a one-element non-named tuple,
# if it's a single element then we return a single element
if len(self.output_bindings) == 1:
# Again use presence of output_tuple_name to understand that we're dealing with a one-element
# named tuple
if self.python_interface.output_tuple_name:
return (get_promise(self.output_bindings[0].binding, intermediate_node_outputs),)
# Just a normal single element
return get_promise(self.output_bindings[0].binding, intermediate_node_outputs)
return tuple([get_promise(b.binding, intermediate_node_outputs) for b in self.output_bindings])
def add_entity(self, entity: Union[PythonTask, LaunchPlan, WorkflowBase], **kwargs) -> Node:
"""
Anytime you add an entity, all the inputs to the entity must be bound.
"""
# circular import
from flytekit.core.node_creation import create_node
ctx = FlyteContext.current_context()
if ctx.compilation_state is not None:
raise Exception("Can't already be compiling")
with FlyteContextManager.with_context(ctx.with_compilation_state(self.compilation_state)) as ctx:
n = create_node(entity=entity, **kwargs)
def get_input_values(input_value):
if isinstance(input_value, list):
input_promises = []
for x in input_value:
input_promises.extend(get_input_values(x))
return input_promises
if isinstance(input_value, dict):
input_promises = []
for _, v in input_value.items():
input_promises.extend(get_input_values(v))
return input_promises
else:
return [input_value]
# Every time an entity is added, mark it as used. The above function though will gather all the input
# values but we're only interested in the ones that are Promises so let's filter for those.
# There's probably a way to clean this up, maybe key off of the name instead of value?
all_input_values = get_input_values(kwargs)
for input_value in filter(lambda x: isinstance(x, Promise), all_input_values):
if input_value in self._unbound_inputs:
self._unbound_inputs.remove(input_value)
return n
def add_workflow_input(self, input_name: str, python_type: Type) -> Interface:
"""
Adds an input to the workflow.
"""
if input_name in self._inputs:
raise FlyteValidationException(f"Input {input_name} has already been specified for wf {self.name}.")
self._python_interface = self._python_interface.with_inputs(extra_inputs={input_name: python_type})
self._interface = transform_interface_to_typed_interface(self._python_interface)
self._inputs[input_name] = Promise(var=input_name, val=NodeOutput(node=GLOBAL_START_NODE, var=input_name))
self._unbound_inputs.add(self._inputs[input_name])
return self._inputs[input_name]
def add_workflow_output(
self, output_name: str, p: Union[Promise, List[Promise], Dict[str, Promise]], python_type: Optional[Type] = None
):
"""
Add an output with the given name from the given node output.
"""
if output_name in self._python_interface.outputs:
raise FlyteValidationException(f"Output {output_name} already exists in workflow {self.name}")
if python_type is None:
if type(p) == list or type(p) == dict:
raise FlyteValidationException(
f"If specifying a list or dict of Promises, you must specify the python_type type for {output_name}"
f" starting with the container type (e.g. List[int]"
)
python_type = p.ref.node.flyte_entity.python_interface.outputs[p.var]
logger.debug(f"Inferring python type for wf output {output_name} from Promise provided {python_type}")
flyte_type = TypeEngine.to_literal_type(python_type=python_type)
ctx = FlyteContext.current_context()
if ctx.compilation_state is not None:
raise Exception("Can't already be compiling")
with FlyteContextManager.with_context(ctx.with_compilation_state(self.compilation_state)) as ctx:
b = binding_from_python_std(
ctx, output_name, expected_literal_type=flyte_type, t_value=p, t_value_type=python_type
)
self._output_bindings.append(b)
self._python_interface = self._python_interface.with_outputs(extra_outputs={output_name: python_type})
self._interface = transform_interface_to_typed_interface(self._python_interface)
def add_task(self, task: PythonTask, **kwargs) -> Node:
return self.add_entity(task, **kwargs)
def add_launch_plan(self, launch_plan: LaunchPlan, **kwargs) -> Node:
return self.add_entity(launch_plan, **kwargs)
def add_subwf(self, sub_wf: WorkflowBase, **kwargs) -> Node:
return self.add_entity(sub_wf, **kwargs)
def ready(self) -> bool:
"""
This function returns whether or not the workflow is in a ready state, which means
* Has at least one node
* All workflow inputs are bound
These conditions assume that all nodes and workflow i/o changes were done with the functions above, which
do additional checking.
"""
if len(self.compilation_state.nodes) == 0:
return False
if len(self._unbound_inputs) > 0:
return False
return True
class PythonFunctionWorkflow(WorkflowBase, ClassStorageTaskResolver):
"""
Please read :std:ref:`flyte:divedeep-workflows` first for a high-level understanding of what workflows are in Flyte.
This Python object represents a workflow defined by a function and decorated with the
:py:func:`@workflow <flytekit.workflow>` decorator. Please see notes on that object for additional information.
"""
def __init__(
self,
workflow_function: Callable,
metadata: Optional[WorkflowMetadata],
default_metadata: Optional[WorkflowMetadataDefaults],
):
name = f"{workflow_function.__module__}.{workflow_function.__name__}"
self._workflow_function = workflow_function
native_interface = transform_signature_to_interface(inspect.signature(workflow_function))
# TODO do we need this - can this not be in launchplan only?
# This can be in launch plan only, but is here only so that we don't have to re-evaluate. Or
# we can re-evaluate.
self._input_parameters = None
super().__init__(
name=name,
workflow_metadata=metadata,
workflow_metadata_defaults=default_metadata,
python_interface=native_interface,
)
@property
def function(self):
return self._workflow_function
def task_name(self, t: PythonAutoContainerTask) -> str:
return f"{self.name}.{t.__module__}.{t.name}"
def compile(self, **kwargs):
"""
Supply static Python native values in the kwargs if you want them to be used in the compilation. This mimics
a 'closure' in the traditional sense of the word.
"""
ctx = FlyteContextManager.current_context()
self._input_parameters = transform_inputs_to_parameters(ctx, self.python_interface)
all_nodes = []
prefix = f"{ctx.compilation_state.prefix}-{self.short_name}-" if ctx.compilation_state is not None else ""
with FlyteContextManager.with_context(
ctx.with_compilation_state(CompilationState(prefix=prefix, task_resolver=self))
) as comp_ctx:
# Construct the default input promise bindings, but then override with the provided inputs, if any
input_kwargs = construct_input_promises([k for k in self.interface.inputs.keys()])
input_kwargs.update(kwargs)
workflow_outputs = self._workflow_function(**input_kwargs)
all_nodes.extend(comp_ctx.compilation_state.nodes)
# This little loop was added as part of the task resolver change. The task resolver interface itself is
# more or less stateless (the future-proofing get_all_tasks function notwithstanding). However the
# implementation of the TaskResolverMixin that this workflow class inherits from (ClassStorageTaskResolver)
# does store state. This loop adds Tasks that are defined within the body of the workflow to the workflow
# object itself.
for n in comp_ctx.compilation_state.nodes:
if isinstance(n.flyte_entity, PythonAutoContainerTask) and n.flyte_entity.task_resolver == self:
logger.debug(f"WF {self.name} saving task {n.flyte_entity.name}")
self.add(n.flyte_entity)
# Iterate through the workflow outputs
bindings = []
output_names = list(self.interface.outputs.keys())
# The reason the length 1 case is separate is because the one output might be a list. We don't want to
# iterate through the list here, instead we should let the binding creation unwrap it and make a binding
# collection/map out of it.
if len(output_names) == 1:
if isinstance(workflow_outputs, tuple):
if len(workflow_outputs) != 1:
raise AssertionError(
f"The Workflow specification indicates only one return value, received {len(workflow_outputs)}"
)
if self.python_interface.output_tuple_name is None:
raise AssertionError(
"Outputs specification for Workflow does not define a tuple, but return value is a tuple"
)
workflow_outputs = workflow_outputs[0]
t = self.python_interface.outputs[output_names[0]]
b = binding_from_python_std(
ctx,
output_names[0],
self.interface.outputs[output_names[0]].type,
workflow_outputs,
t,
)
bindings.append(b)
elif len(output_names) > 1:
if not isinstance(workflow_outputs, tuple):
raise AssertionError("The Workflow specification indicates multiple return values, received only one")
if len(output_names) != len(workflow_outputs):
raise Exception(f"Length mismatch {len(output_names)} vs {len(workflow_outputs)}")
for i, out in enumerate(output_names):
if isinstance(workflow_outputs[i], ConditionalSection):
raise AssertionError("A Conditional block (if-else) should always end with an `else_()` clause")
t = self.python_interface.outputs[out]
b = binding_from_python_std(
ctx,
out,
self.interface.outputs[out].type,
workflow_outputs[i],
t,
)
bindings.append(b)
# Save all the things necessary to create an SdkWorkflow, except for the missing project and domain
self._nodes = all_nodes
self._output_bindings = bindings
if not output_names:
return None
if len(output_names) == 1:
return bindings[0]
return tuple(bindings)
def execute(self, **kwargs):
"""
This function is here only to try to streamline the pattern between workflows and tasks. Since tasks
call execute from dispatch_execute which is in _local_execute, workflows should also call an execute inside
_local_execute. This makes mocking cleaner.
"""
return self._workflow_function(**kwargs)
def workflow(
_workflow_function=None,
failure_policy: Optional[WorkflowFailurePolicy] = None,
interruptible: Optional[bool] = False,
):
"""
This decorator declares a function to be a Flyte workflow. Workflows are declarative entities that construct a DAG
of tasks using the data flow between tasks.
Unlike a task, the function body of a workflow is evaluated at serialization-time (aka compile-time). This is because
while we can determine the entire structure of a task by looking at the function's signature,
workflows need to run through the function itself because the body of the function is what expresses the workflow structure.
It's also important to note that, local execution notwithstanding, it is not evaluated again when the workflow runs on Flyte.
That is, workflows should not call non-Flyte entities since they are only run once (again, this is with respect to
the platform, local runs notwithstanding).
Please see the :std:doc:`cookbook:sphx_glr_auto_core_flyte_basics_basic_workflow.py` for more usage examples.
:param _workflow_function: This argument is implicitly passed and represents the decorated function.
:param failure_policy: Use the options in flytekit.WorkflowFailurePolicy
:param interruptible: Whether or not tasks launched from this workflow are by default interruptible
"""
def wrapper(fn):
workflow_metadata = WorkflowMetadata(on_failure=failure_policy or WorkflowFailurePolicy.FAIL_IMMEDIATELY)
workflow_metadata_defaults = WorkflowMetadataDefaults(interruptible)
workflow_instance = PythonFunctionWorkflow(
fn, metadata=workflow_metadata, default_metadata=workflow_metadata_defaults
)
workflow_instance.compile()
return workflow_instance
if _workflow_function:
return wrapper(_workflow_function)
else:
return wrapper
class ReferenceWorkflow(ReferenceEntity, PythonFunctionWorkflow):
"""
A reference workflow is a pointer to a workflow that already exists on your Flyte installation. This
object will not initiate a network call to Admin, which is why the user is asked to provide the expected interface.
If at registration time the interface provided causes an issue with compilation, an error will be returned.
"""
def __init__(
self, project: str, domain: str, name: str, version: str, inputs: Dict[str, Type], outputs: Dict[str, Type]
):
super().__init__(WorkflowReference(project, domain, name, version), inputs, outputs)
def reference_workflow(
project: str,
domain: str,
name: str,
version: str,
) -> Callable[[Callable[..., Any]], ReferenceWorkflow]:
"""
A reference workflow is a pointer to a workflow that already exists on your Flyte installation. This
object will not initiate a network call to Admin, which is why the user is asked to provide the expected interface.
If at registration time the interface provided causes an issue with compilation, an error will be returned.
"""
def wrapper(fn) -> ReferenceWorkflow:
interface = transform_signature_to_interface(inspect.signature(fn))
return ReferenceWorkflow(project, domain, name, version, interface.inputs, interface.outputs)
return wrapper
| from __future__ import annotations
import collections
import inspect
from dataclasses import dataclass
from enum import Enum
from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union
from flytekit.common import constants as _common_constants
from flytekit.common.exceptions.user import FlyteValidationException, FlyteValueException
from flytekit.core.base_task import PythonTask
from flytekit.core.class_based_resolver import ClassStorageTaskResolver
from flytekit.core.condition import ConditionalSection
from flytekit.core.context_manager import (
BranchEvalMode,
CompilationState,
ExecutionState,
FlyteContext,
FlyteContextManager,
FlyteEntities,
)
from flytekit.core.interface import (
Interface,
transform_inputs_to_parameters,
transform_interface_to_typed_interface,
transform_signature_to_interface,
)
from flytekit.core.launch_plan import LaunchPlan
from flytekit.core.node import Node
from flytekit.core.promise import (
NodeOutput,
Promise,
VoidPromise,
binding_from_python_std,
create_and_link_node,
create_native_named_tuple,
create_task_output,
translate_inputs_to_literals,
)
from flytekit.core.python_auto_container import PythonAutoContainerTask
from flytekit.core.reference_entity import ReferenceEntity, WorkflowReference
from flytekit.core.type_engine import TypeEngine
from flytekit.loggers import logger
from flytekit.models import interface as _interface_models
from flytekit.models import literals as _literal_models
from flytekit.models.core import workflow as _workflow_model
GLOBAL_START_NODE = Node(
id=_common_constants.GLOBAL_INPUT_NODE_ID,
metadata=None,
bindings=[],
upstream_nodes=[],
flyte_entity=None,
)
class WorkflowFailurePolicy(Enum):
FAIL_IMMEDIATELY = _workflow_model.WorkflowMetadata.OnFailurePolicy.FAIL_IMMEDIATELY
FAIL_AFTER_EXECUTABLE_NODES_COMPLETE = (
_workflow_model.WorkflowMetadata.OnFailurePolicy.FAIL_AFTER_EXECUTABLE_NODES_COMPLETE
)
@dataclass
class WorkflowMetadata(object):
on_failure: WorkflowFailurePolicy
def __post_init__(self):
if (
self.on_failure != WorkflowFailurePolicy.FAIL_IMMEDIATELY
and self.on_failure != WorkflowFailurePolicy.FAIL_AFTER_EXECUTABLE_NODES_COMPLETE
):
raise FlyteValidationException(f"Failure policy {self.on_failure} not acceptable")
def to_flyte_model(self):
if self.on_failure == WorkflowFailurePolicy.FAIL_IMMEDIATELY:
on_failure = 0
else:
on_failure = 1
return _workflow_model.WorkflowMetadata(on_failure=on_failure)
@dataclass
class WorkflowMetadataDefaults(object):
"""
This class is similarly named to the one above. Please see the IDL for more information but essentially, this
WorkflowMetadataDefaults class represents the defaults that are handed down to a workflow's tasks, whereas
WorkflowMetadata represents metadata about the workflow itself.
"""
interruptible: bool
def __post_init__(self):
if self.interruptible is not True and self.interruptible is not False:
raise FlyteValidationException(f"Interruptible must be boolean, {self.interruptible} invalid")
def to_flyte_model(self):
return _workflow_model.WorkflowMetadataDefaults(interruptible=self.interruptible)
def construct_input_promises(inputs: List[str]):
return {
input_name: Promise(var=input_name, val=NodeOutput(node=GLOBAL_START_NODE, var=input_name))
for input_name in inputs
}
def get_promise(binding_data: _literal_models.BindingData, outputs_cache: Dict[Node, Dict[str, Promise]]) -> Promise:
"""
This is a helper function that will turn a binding into a Promise object, using a lookup map. Please see
get_promise_map for the rest of the details.
"""
if binding_data.promise is not None:
if not isinstance(binding_data.promise, NodeOutput):
raise FlyteValidationException(
f"Binding data Promises have to be of the NodeOutput type {type(binding_data.promise)} found"
)
# b.var is the name of the input to the task
# binding_data.promise.var is the name of the upstream node's output we want
return outputs_cache[binding_data.promise.node][binding_data.promise.var]
elif binding_data.scalar is not None:
return Promise(var="placeholder", val=_literal_models.Literal(scalar=binding_data.scalar))
elif binding_data.collection is not None:
literals = []
for bd in binding_data.collection.bindings:
p = get_promise(bd, outputs_cache)
literals.append(p.val)
return Promise(
var="placeholder",
val=_literal_models.Literal(collection=_literal_models.LiteralCollection(literals=literals)),
)
elif binding_data.map is not None:
literals = {}
for k, bd in binding_data.map.bindings.items():
p = get_promise(bd, outputs_cache)
literals[k] = p.val
return Promise(
var="placeholder", val=_literal_models.Literal(map=_literal_models.LiteralMap(literals=literals))
)
raise FlyteValidationException("Binding type unrecognized.")
def get_promise_map(
bindings: List[_literal_models.Binding], outputs_cache: Dict[Node, Dict[str, Promise]]
) -> Dict[str, Promise]:
"""
Local execution of imperatively defined workflows is done node by node. This function will fill in the node's
entity's input arguments, which are specified using the bindings list, and a map of nodes to its outputs.
Basically this takes the place of propeller in resolving bindings, pulling in outputs from previously completed
nodes and filling in the necessary inputs.
"""
entity_kwargs = {}
for b in bindings:
entity_kwargs[b.var] = get_promise(b.binding, outputs_cache)
return entity_kwargs
class WorkflowBase(object):
def __init__(
self,
name: str,
workflow_metadata: WorkflowMetadata,
workflow_metadata_defaults: WorkflowMetadataDefaults,
python_interface: Interface,
**kwargs,
):
self._name = name
self._workflow_metadata = workflow_metadata
self._workflow_metadata_defaults = workflow_metadata_defaults
self._python_interface = python_interface
self._interface = transform_interface_to_typed_interface(python_interface)
self._inputs = {}
self._unbound_inputs = set()
self._nodes = []
self._output_bindings: Optional[List[_literal_models.Binding]] = []
FlyteEntities.entities.append(self)
super().__init__(**kwargs)
@property
def name(self) -> str:
return self._name
@property
def short_name(self) -> str:
return self._name.split(".")[-1]
@property
def workflow_metadata(self) -> Optional[WorkflowMetadata]:
return self._workflow_metadata
@property
def workflow_metadata_defaults(self):
return self._workflow_metadata_defaults
@property
def python_interface(self) -> Interface:
return self._python_interface
@property
def interface(self) -> _interface_models.TypedInterface:
return self._interface
@property
def output_bindings(self) -> List[_literal_models.Binding]:
return self._output_bindings
@property
def nodes(self) -> List[Node]:
return self._nodes
def __repr__(self):
return (
f"WorkflowBase - {self._name} && "
f"Inputs ({len(self._python_interface.inputs)}): {self._python_interface.inputs} && "
f"Outputs ({len(self._python_interface.outputs)}): {self._python_interface.outputs} && "
f"Output bindings: {self._output_bindings} && "
)
def __call__(self, *args, **kwargs):
"""
The call pattern for Workflows is close to, but not exactly, the call pattern for Tasks. For local execution,
it goes
__call__ -> _local_execute -> execute
From execute, different things happen for the two Workflow styles. For PythonFunctionWorkflows, the Python
function is run, for the ImperativeWorkflow, each node is run one at a time.
"""
if len(args) > 0:
raise AssertionError("Only Keyword Arguments are supported for Workflow executions")
ctx = FlyteContextManager.current_context()
# Get default agruements and override with kwargs passed in
input_kwargs = self.python_interface.default_inputs_as_kwargs
input_kwargs.update(kwargs)
# The first condition is compilation.
if ctx.compilation_state is not None:
return create_and_link_node(ctx, entity=self, interface=self.python_interface, **input_kwargs)
# This condition is hit when this workflow (self) is being called as part of a parent's workflow local run.
# The context specifying the local workflow execution has already been set.
elif (
ctx.execution_state is not None and ctx.execution_state.mode == ExecutionState.Mode.LOCAL_WORKFLOW_EXECUTION
):
if ctx.execution_state.branch_eval_mode == BranchEvalMode.BRANCH_SKIPPED:
if self.python_interface and self.python_interface.output_tuple_name:
variables = [k for k in self.python_interface.outputs.keys()]
output_tuple = collections.namedtuple(self.python_interface.output_tuple_name, variables)
nones = [None for _ in self.python_interface.outputs.keys()]
return output_tuple(*nones)
else:
return None
# We are already in a local execution, just continue the execution context
return self._local_execute(ctx, **input_kwargs)
# Last is starting a local workflow execution
else:
# Run some sanity checks
# Even though the _local_execute call generally expects inputs to be Promises, we don't have to do the
# conversion here in this loop. The reason is because we don't prevent users from specifying inputs
# as direct scalars, which means there's another Promise-generating loop inside _local_execute too
for k, v in input_kwargs.items():
if k not in self.interface.inputs:
raise ValueError(f"Received unexpected keyword argument {k}")
if isinstance(v, Promise):
raise ValueError(f"Received a promise for a workflow call, when expecting a native value for {k}")
with FlyteContextManager.with_context(
ctx.with_execution_state(
ctx.new_execution_state().with_params(mode=ExecutionState.Mode.LOCAL_WORKFLOW_EXECUTION)
)
) as child_ctx:
result = self._local_execute(child_ctx, **input_kwargs)
expected_outputs = len(self.python_interface.outputs)
if expected_outputs == 0:
if result is None or isinstance(result, VoidPromise):
return None
else:
raise Exception(f"Workflow local execution expected 0 outputs but something received {result}")
if (1 < expected_outputs == len(result)) or (result is not None and expected_outputs == 1):
return create_native_named_tuple(ctx, result, self.python_interface)
raise ValueError("expected outputs and actual outputs do not match")
def execute(self, **kwargs):
raise Exception("Should not be called")
def _local_execute(self, ctx: FlyteContext, **kwargs) -> Union[Tuple[Promise], Promise, VoidPromise]:
# This is done to support the invariant that Workflow local executions always work with Promise objects
# holding Flyte literal values. Even in a wf, a user can call a sub-workflow with a Python native value.
for k, v in kwargs.items():
if not isinstance(v, Promise):
t = self.python_interface.inputs[k]
kwargs[k] = Promise(var=k, val=TypeEngine.to_literal(ctx, v, t, self.interface.inputs[k].type))
# The output of this will always be a combination of Python native values and Promises containing Flyte
# Literals.
function_outputs = self.execute(**kwargs)
# First handle the empty return case.
# A workflow function may return a task that doesn't return anything
# def wf():
# return t1()
# or it may not return at all
# def wf():
# t1()
# In the former case we get the task's VoidPromise, in the latter we get None
if isinstance(function_outputs, VoidPromise) or function_outputs is None:
if len(self.python_interface.outputs) != 0:
raise FlyteValueException(
function_outputs,
f"{function_outputs} received but interface has {len(self.python_interface.outputs)} outputs.",
)
return VoidPromise(self.name)
# Because we should've already returned in the above check, we just raise an error here.
if len(self.python_interface.outputs) == 0:
raise FlyteValueException(
function_outputs, f"{function_outputs} received but should've been VoidPromise or None."
)
expected_output_names = list(self.python_interface.outputs.keys())
if len(expected_output_names) == 1:
# Here we have to handle the fact that the wf could've been declared with a typing.NamedTuple of
# length one. That convention is used for naming outputs - and single-length-NamedTuples are
# particularly troublesome but elegant handling of them is not a high priority
# Again, we're using the output_tuple_name as a proxy.
if self.python_interface.output_tuple_name and isinstance(function_outputs, tuple):
wf_outputs_as_map = {expected_output_names[0]: function_outputs[0]}
else:
wf_outputs_as_map = {expected_output_names[0]: function_outputs}
else:
wf_outputs_as_map = {expected_output_names[i]: function_outputs[i] for i, _ in enumerate(function_outputs)}
# Basically we need to repackage the promises coming from the tasks into Promises that match the workflow's
# interface. We do that by extracting out the literals, and creating new Promises
wf_outputs_as_literal_dict = translate_inputs_to_literals(
ctx,
wf_outputs_as_map,
flyte_interface_types=self.interface.outputs,
native_types=self.python_interface.outputs,
)
# Recreate new promises that use the workflow's output names.
new_promises = [Promise(var, wf_outputs_as_literal_dict[var]) for var in expected_output_names]
return create_task_output(new_promises, self.python_interface)
class ImperativeWorkflow(WorkflowBase):
def __init__(
self,
name: str,
failure_policy: Optional[WorkflowFailurePolicy] = None,
interruptible: Optional[bool] = False,
):
metadata = WorkflowMetadata(on_failure=failure_policy or WorkflowFailurePolicy.FAIL_IMMEDIATELY)
workflow_metadata_defaults = WorkflowMetadataDefaults(interruptible)
self._compilation_state = CompilationState(prefix="")
self._inputs = {}
# This unbound inputs construct is just here to help workflow authors detect issues a bit earlier. It just
# keeps track of workflow inputs that you've declared with add_workflow_input but haven't yet consumed. This
# is an error that Admin would return at compile time anyways, but this allows flytekit to raise
# the error earlier.
self._unbound_inputs = set()
super().__init__(
name=name,
workflow_metadata=metadata,
workflow_metadata_defaults=workflow_metadata_defaults,
python_interface=Interface(),
)
@property
def compilation_state(self) -> CompilationState:
"""
Compilation is done a bit at a time, one task or other entity call at a time. This is why this workflow
class has to keep track of its own compilation state.
"""
return self._compilation_state
@property
def nodes(self) -> List[Node]:
return self._compilation_state.nodes
@property
def inputs(self) -> Dict[str, Promise]:
"""
This holds the input promises to the workflow. The nodes in these Promise objects should always point to
the global start node.
"""
return self._inputs
def __repr__(self):
return super().__repr__() + f"Nodes ({len(self.compilation_state.nodes)}): {self.compilation_state.nodes}"
def execute(self, **kwargs):
"""
Called by _local_execute. This function is how local execution for imperative workflows runs. Because when an
entity is added using the add_entity function, all inputs to that entity should've been already declared, we
can just iterate through the nodes in order and we shouldn't run into any dependency issues. That is, we force
the user to declare entities already in a topological sort. To keep track of outputs, we create a map to
start things off, filled in only with the workflow inputs (if any). As things are run, their outputs are stored
in this map.
After all nodes are run, we fill in workflow level outputs the same way as any other previous node.
"""
if not self.ready():
raise FlyteValidationException(f"Workflow not ready, wf is currently {self}")
# Create a map that holds the outputs of each node.
intermediate_node_outputs = {GLOBAL_START_NODE: {}} # type: Dict[Node, Dict[str, Promise]]
# Start things off with the outputs of the global input node, i.e. the inputs to the workflow.
# _local_execute should've already ensured that all the values in kwargs are Promise objects
for k, v in kwargs.items():
intermediate_node_outputs[GLOBAL_START_NODE][k] = v
# Next iterate through the nodes in order.
for node in self.compilation_state.nodes:
if node not in intermediate_node_outputs.keys():
intermediate_node_outputs[node] = {}
# Retrieve the entity from the node, and call it by looking up the promises the node's bindings require,
# and then fill them in using the node output tracker map we have.
entity = node.flyte_entity
entity_kwargs = get_promise_map(node.bindings, intermediate_node_outputs)
# Handle the calling and outputs of each node's entity
results = entity(**entity_kwargs)
expected_output_names = list(entity.python_interface.outputs.keys())
if isinstance(results, VoidPromise) or results is None:
continue # pragma: no cover # Move along, nothing to assign
# Because we should've already returned in the above check, we just raise an Exception here.
if len(entity.python_interface.outputs) == 0:
raise FlyteValueException(results, f"{results} received but should've been VoidPromise or None.")
# if there's only one output,
if len(expected_output_names) == 1:
if entity.python_interface.output_tuple_name and isinstance(results, tuple):
intermediate_node_outputs[node][expected_output_names[0]] = results[0]
else:
intermediate_node_outputs[node][expected_output_names[0]] = results
else:
if len(results) != len(expected_output_names):
raise FlyteValueException(results, f"Different lengths {results} {expected_output_names}")
for idx, r in enumerate(results):
intermediate_node_outputs[node][expected_output_names[idx]] = r
# The rest of this function looks like the above but now we're doing it for the workflow as a whole rather
# than just one node at a time.
if len(self.python_interface.outputs) == 0:
return VoidPromise(self.name)
# The values that we return below from the output have to be pulled by fulfilling all of the
# workflow's output bindings.
# The return style here has to match what 1) what the workflow would've returned had it been declared
# functionally, and 2) what a user would return in mock function. That is, if it's a tuple, then it
# should be a tuple here, if it's a one element named tuple, then we do a one-element non-named tuple,
# if it's a single element then we return a single element
if len(self.output_bindings) == 1:
# Again use presence of output_tuple_name to understand that we're dealing with a one-element
# named tuple
if self.python_interface.output_tuple_name:
return (get_promise(self.output_bindings[0].binding, intermediate_node_outputs),)
# Just a normal single element
return get_promise(self.output_bindings[0].binding, intermediate_node_outputs)
return tuple([get_promise(b.binding, intermediate_node_outputs) for b in self.output_bindings])
def add_entity(self, entity: Union[PythonTask, LaunchPlan, WorkflowBase], **kwargs) -> Node:
"""
Anytime you add an entity, all the inputs to the entity must be bound.
"""
# circular import
from flytekit.core.node_creation import create_node
ctx = FlyteContext.current_context()
if ctx.compilation_state is not None:
raise Exception("Can't already be compiling")
with FlyteContextManager.with_context(ctx.with_compilation_state(self.compilation_state)) as ctx:
n = create_node(entity=entity, **kwargs)
def get_input_values(input_value):
if isinstance(input_value, list):
input_promises = []
for x in input_value:
input_promises.extend(get_input_values(x))
return input_promises
if isinstance(input_value, dict):
input_promises = []
for _, v in input_value.items():
input_promises.extend(get_input_values(v))
return input_promises
else:
return [input_value]
# Every time an entity is added, mark it as used. The above function though will gather all the input
# values but we're only interested in the ones that are Promises so let's filter for those.
# There's probably a way to clean this up, maybe key off of the name instead of value?
all_input_values = get_input_values(kwargs)
for input_value in filter(lambda x: isinstance(x, Promise), all_input_values):
if input_value in self._unbound_inputs:
self._unbound_inputs.remove(input_value)
return n
def add_workflow_input(self, input_name: str, python_type: Type) -> Interface:
"""
Adds an input to the workflow.
"""
if input_name in self._inputs:
raise FlyteValidationException(f"Input {input_name} has already been specified for wf {self.name}.")
self._python_interface = self._python_interface.with_inputs(extra_inputs={input_name: python_type})
self._interface = transform_interface_to_typed_interface(self._python_interface)
self._inputs[input_name] = Promise(var=input_name, val=NodeOutput(node=GLOBAL_START_NODE, var=input_name))
self._unbound_inputs.add(self._inputs[input_name])
return self._inputs[input_name]
def add_workflow_output(
self, output_name: str, p: Union[Promise, List[Promise], Dict[str, Promise]], python_type: Optional[Type] = None
):
"""
Add an output with the given name from the given node output.
"""
if output_name in self._python_interface.outputs:
raise FlyteValidationException(f"Output {output_name} already exists in workflow {self.name}")
if python_type is None:
if type(p) == list or type(p) == dict:
raise FlyteValidationException(
f"If specifying a list or dict of Promises, you must specify the python_type type for {output_name}"
f" starting with the container type (e.g. List[int]"
)
python_type = p.ref.node.flyte_entity.python_interface.outputs[p.var]
logger.debug(f"Inferring python type for wf output {output_name} from Promise provided {python_type}")
flyte_type = TypeEngine.to_literal_type(python_type=python_type)
ctx = FlyteContext.current_context()
if ctx.compilation_state is not None:
raise Exception("Can't already be compiling")
with FlyteContextManager.with_context(ctx.with_compilation_state(self.compilation_state)) as ctx:
b = binding_from_python_std(
ctx, output_name, expected_literal_type=flyte_type, t_value=p, t_value_type=python_type
)
self._output_bindings.append(b)
self._python_interface = self._python_interface.with_outputs(extra_outputs={output_name: python_type})
self._interface = transform_interface_to_typed_interface(self._python_interface)
def add_task(self, task: PythonTask, **kwargs) -> Node:
return self.add_entity(task, **kwargs)
def add_launch_plan(self, launch_plan: LaunchPlan, **kwargs) -> Node:
return self.add_entity(launch_plan, **kwargs)
def add_subwf(self, sub_wf: WorkflowBase, **kwargs) -> Node:
return self.add_entity(sub_wf, **kwargs)
def ready(self) -> bool:
"""
This function returns whether or not the workflow is in a ready state, which means
* Has at least one node
* All workflow inputs are bound
These conditions assume that all nodes and workflow i/o changes were done with the functions above, which
do additional checking.
"""
if len(self.compilation_state.nodes) == 0:
return False
if len(self._unbound_inputs) > 0:
return False
return True
class PythonFunctionWorkflow(WorkflowBase, ClassStorageTaskResolver):
"""
Please read :std:ref:`flyte:divedeep-workflows` first for a high-level understanding of what workflows are in Flyte.
This Python object represents a workflow defined by a function and decorated with the
:py:func:`@workflow <flytekit.workflow>` decorator. Please see notes on that object for additional information.
"""
def __init__(
self,
workflow_function: Callable,
metadata: Optional[WorkflowMetadata],
default_metadata: Optional[WorkflowMetadataDefaults],
):
name = f"{workflow_function.__module__}.{workflow_function.__name__}"
self._workflow_function = workflow_function
native_interface = transform_signature_to_interface(inspect.signature(workflow_function))
# TODO do we need this - can this not be in launchplan only?
# This can be in launch plan only, but is here only so that we don't have to re-evaluate. Or
# we can re-evaluate.
self._input_parameters = None
super().__init__(
name=name,
workflow_metadata=metadata,
workflow_metadata_defaults=default_metadata,
python_interface=native_interface,
)
@property
def function(self):
return self._workflow_function
def task_name(self, t: PythonAutoContainerTask) -> str:
return f"{self.name}.{t.__module__}.{t.name}"
def compile(self, **kwargs):
"""
Supply static Python native values in the kwargs if you want them to be used in the compilation. This mimics
a 'closure' in the traditional sense of the word.
"""
ctx = FlyteContextManager.current_context()
self._input_parameters = transform_inputs_to_parameters(ctx, self.python_interface)
all_nodes = []
prefix = f"{ctx.compilation_state.prefix}-{self.short_name}-" if ctx.compilation_state is not None else ""
with FlyteContextManager.with_context(
ctx.with_compilation_state(CompilationState(prefix=prefix, task_resolver=self))
) as comp_ctx:
# Construct the default input promise bindings, but then override with the provided inputs, if any
input_kwargs = construct_input_promises([k for k in self.interface.inputs.keys()])
input_kwargs.update(kwargs)
workflow_outputs = self._workflow_function(**input_kwargs)
all_nodes.extend(comp_ctx.compilation_state.nodes)
# This little loop was added as part of the task resolver change. The task resolver interface itself is
# more or less stateless (the future-proofing get_all_tasks function notwithstanding). However the
# implementation of the TaskResolverMixin that this workflow class inherits from (ClassStorageTaskResolver)
# does store state. This loop adds Tasks that are defined within the body of the workflow to the workflow
# object itself.
for n in comp_ctx.compilation_state.nodes:
if isinstance(n.flyte_entity, PythonAutoContainerTask) and n.flyte_entity.task_resolver == self:
logger.debug(f"WF {self.name} saving task {n.flyte_entity.name}")
self.add(n.flyte_entity)
# Iterate through the workflow outputs
bindings = []
output_names = list(self.interface.outputs.keys())
# The reason the length 1 case is separate is because the one output might be a list. We don't want to
# iterate through the list here, instead we should let the binding creation unwrap it and make a binding
# collection/map out of it.
if len(output_names) == 1:
if isinstance(workflow_outputs, tuple):
if len(workflow_outputs) != 1:
raise AssertionError(
f"The Workflow specification indicates only one return value, received {len(workflow_outputs)}"
)
if self.python_interface.output_tuple_name is None:
raise AssertionError(
"Outputs specification for Workflow does not define a tuple, but return value is a tuple"
)
workflow_outputs = workflow_outputs[0]
t = self.python_interface.outputs[output_names[0]]
b = binding_from_python_std(
ctx,
output_names[0],
self.interface.outputs[output_names[0]].type,
workflow_outputs,
t,
)
bindings.append(b)
elif len(output_names) > 1:
if not isinstance(workflow_outputs, tuple):
raise AssertionError("The Workflow specification indicates multiple return values, received only one")
if len(output_names) != len(workflow_outputs):
raise Exception(f"Length mismatch {len(output_names)} vs {len(workflow_outputs)}")
for i, out in enumerate(output_names):
if isinstance(workflow_outputs[i], ConditionalSection):
raise AssertionError("A Conditional block (if-else) should always end with an `else_()` clause")
t = self.python_interface.outputs[out]
b = binding_from_python_std(
ctx,
out,
self.interface.outputs[out].type,
workflow_outputs[i],
t,
)
bindings.append(b)
# Save all the things necessary to create an SdkWorkflow, except for the missing project and domain
self._nodes = all_nodes
self._output_bindings = bindings
if not output_names:
return None
if len(output_names) == 1:
return bindings[0]
return tuple(bindings)
def execute(self, **kwargs):
"""
This function is here only to try to streamline the pattern between workflows and tasks. Since tasks
call execute from dispatch_execute which is in _local_execute, workflows should also call an execute inside
_local_execute. This makes mocking cleaner.
"""
return self._workflow_function(**kwargs)
def workflow(
_workflow_function=None,
failure_policy: Optional[WorkflowFailurePolicy] = None,
interruptible: Optional[bool] = False,
):
"""
This decorator declares a function to be a Flyte workflow. Workflows are declarative entities that construct a DAG
of tasks using the data flow between tasks.
Unlike a task, the function body of a workflow is evaluated at serialization-time (aka compile-time). This is because
while we can determine the entire structure of a task by looking at the function's signature,
workflows need to run through the function itself because the body of the function is what expresses the workflow structure.
It's also important to note that, local execution notwithstanding, it is not evaluated again when the workflow runs on Flyte.
That is, workflows should not call non-Flyte entities since they are only run once (again, this is with respect to
the platform, local runs notwithstanding).
Please see the :std:doc:`cookbook:sphx_glr_auto_core_flyte_basics_basic_workflow.py` for more usage examples.
:param _workflow_function: This argument is implicitly passed and represents the decorated function.
:param failure_policy: Use the options in flytekit.WorkflowFailurePolicy
:param interruptible: Whether or not tasks launched from this workflow are by default interruptible
"""
def wrapper(fn):
workflow_metadata = WorkflowMetadata(on_failure=failure_policy or WorkflowFailurePolicy.FAIL_IMMEDIATELY)
workflow_metadata_defaults = WorkflowMetadataDefaults(interruptible)
workflow_instance = PythonFunctionWorkflow(
fn, metadata=workflow_metadata, default_metadata=workflow_metadata_defaults
)
workflow_instance.compile()
return workflow_instance
if _workflow_function:
return wrapper(_workflow_function)
else:
return wrapper
class ReferenceWorkflow(ReferenceEntity, PythonFunctionWorkflow):
"""
A reference workflow is a pointer to a workflow that already exists on your Flyte installation. This
object will not initiate a network call to Admin, which is why the user is asked to provide the expected interface.
If at registration time the interface provided causes an issue with compilation, an error will be returned.
"""
def __init__(
self, project: str, domain: str, name: str, version: str, inputs: Dict[str, Type], outputs: Dict[str, Type]
):
super().__init__(WorkflowReference(project, domain, name, version), inputs, outputs)
def reference_workflow(
project: str,
domain: str,
name: str,
version: str,
) -> Callable[[Callable[..., Any]], ReferenceWorkflow]:
"""
A reference workflow is a pointer to a workflow that already exists on your Flyte installation. This
object will not initiate a network call to Admin, which is why the user is asked to provide the expected interface.
If at registration time the interface provided causes an issue with compilation, an error will be returned.
"""
def wrapper(fn) -> ReferenceWorkflow:
interface = transform_signature_to_interface(inspect.signature(fn))
return ReferenceWorkflow(project, domain, name, version, interface.inputs, interface.outputs)
return wrapper
| en | 0.907511 | This class is similarly named to the one above. Please see the IDL for more information but essentially, this WorkflowMetadataDefaults class represents the defaults that are handed down to a workflow's tasks, whereas WorkflowMetadata represents metadata about the workflow itself. This is a helper function that will turn a binding into a Promise object, using a lookup map. Please see get_promise_map for the rest of the details. # b.var is the name of the input to the task # binding_data.promise.var is the name of the upstream node's output we want Local execution of imperatively defined workflows is done node by node. This function will fill in the node's entity's input arguments, which are specified using the bindings list, and a map of nodes to its outputs. Basically this takes the place of propeller in resolving bindings, pulling in outputs from previously completed nodes and filling in the necessary inputs. The call pattern for Workflows is close to, but not exactly, the call pattern for Tasks. For local execution, it goes __call__ -> _local_execute -> execute From execute, different things happen for the two Workflow styles. For PythonFunctionWorkflows, the Python function is run, for the ImperativeWorkflow, each node is run one at a time. # Get default agruements and override with kwargs passed in # The first condition is compilation. # This condition is hit when this workflow (self) is being called as part of a parent's workflow local run. # The context specifying the local workflow execution has already been set. # We are already in a local execution, just continue the execution context # Last is starting a local workflow execution # Run some sanity checks # Even though the _local_execute call generally expects inputs to be Promises, we don't have to do the # conversion here in this loop. The reason is because we don't prevent users from specifying inputs # as direct scalars, which means there's another Promise-generating loop inside _local_execute too # This is done to support the invariant that Workflow local executions always work with Promise objects # holding Flyte literal values. Even in a wf, a user can call a sub-workflow with a Python native value. # The output of this will always be a combination of Python native values and Promises containing Flyte # Literals. # First handle the empty return case. # A workflow function may return a task that doesn't return anything # def wf(): # return t1() # or it may not return at all # def wf(): # t1() # In the former case we get the task's VoidPromise, in the latter we get None # Because we should've already returned in the above check, we just raise an error here. # Here we have to handle the fact that the wf could've been declared with a typing.NamedTuple of # length one. That convention is used for naming outputs - and single-length-NamedTuples are # particularly troublesome but elegant handling of them is not a high priority # Again, we're using the output_tuple_name as a proxy. # Basically we need to repackage the promises coming from the tasks into Promises that match the workflow's # interface. We do that by extracting out the literals, and creating new Promises # Recreate new promises that use the workflow's output names. # This unbound inputs construct is just here to help workflow authors detect issues a bit earlier. It just # keeps track of workflow inputs that you've declared with add_workflow_input but haven't yet consumed. This # is an error that Admin would return at compile time anyways, but this allows flytekit to raise # the error earlier. Compilation is done a bit at a time, one task or other entity call at a time. This is why this workflow class has to keep track of its own compilation state. This holds the input promises to the workflow. The nodes in these Promise objects should always point to the global start node. Called by _local_execute. This function is how local execution for imperative workflows runs. Because when an entity is added using the add_entity function, all inputs to that entity should've been already declared, we can just iterate through the nodes in order and we shouldn't run into any dependency issues. That is, we force the user to declare entities already in a topological sort. To keep track of outputs, we create a map to start things off, filled in only with the workflow inputs (if any). As things are run, their outputs are stored in this map. After all nodes are run, we fill in workflow level outputs the same way as any other previous node. # Create a map that holds the outputs of each node. # type: Dict[Node, Dict[str, Promise]] # Start things off with the outputs of the global input node, i.e. the inputs to the workflow. # _local_execute should've already ensured that all the values in kwargs are Promise objects # Next iterate through the nodes in order. # Retrieve the entity from the node, and call it by looking up the promises the node's bindings require, # and then fill them in using the node output tracker map we have. # Handle the calling and outputs of each node's entity # pragma: no cover # Move along, nothing to assign # Because we should've already returned in the above check, we just raise an Exception here. # if there's only one output, # The rest of this function looks like the above but now we're doing it for the workflow as a whole rather # than just one node at a time. # The values that we return below from the output have to be pulled by fulfilling all of the # workflow's output bindings. # The return style here has to match what 1) what the workflow would've returned had it been declared # functionally, and 2) what a user would return in mock function. That is, if it's a tuple, then it # should be a tuple here, if it's a one element named tuple, then we do a one-element non-named tuple, # if it's a single element then we return a single element # Again use presence of output_tuple_name to understand that we're dealing with a one-element # named tuple # Just a normal single element Anytime you add an entity, all the inputs to the entity must be bound. # circular import # Every time an entity is added, mark it as used. The above function though will gather all the input # values but we're only interested in the ones that are Promises so let's filter for those. # There's probably a way to clean this up, maybe key off of the name instead of value? Adds an input to the workflow. Add an output with the given name from the given node output. This function returns whether or not the workflow is in a ready state, which means * Has at least one node * All workflow inputs are bound These conditions assume that all nodes and workflow i/o changes were done with the functions above, which do additional checking. Please read :std:ref:`flyte:divedeep-workflows` first for a high-level understanding of what workflows are in Flyte. This Python object represents a workflow defined by a function and decorated with the :py:func:`@workflow <flytekit.workflow>` decorator. Please see notes on that object for additional information. # TODO do we need this - can this not be in launchplan only? # This can be in launch plan only, but is here only so that we don't have to re-evaluate. Or # we can re-evaluate. Supply static Python native values in the kwargs if you want them to be used in the compilation. This mimics a 'closure' in the traditional sense of the word. # Construct the default input promise bindings, but then override with the provided inputs, if any # This little loop was added as part of the task resolver change. The task resolver interface itself is # more or less stateless (the future-proofing get_all_tasks function notwithstanding). However the # implementation of the TaskResolverMixin that this workflow class inherits from (ClassStorageTaskResolver) # does store state. This loop adds Tasks that are defined within the body of the workflow to the workflow # object itself. # Iterate through the workflow outputs # The reason the length 1 case is separate is because the one output might be a list. We don't want to # iterate through the list here, instead we should let the binding creation unwrap it and make a binding # collection/map out of it. # Save all the things necessary to create an SdkWorkflow, except for the missing project and domain This function is here only to try to streamline the pattern between workflows and tasks. Since tasks call execute from dispatch_execute which is in _local_execute, workflows should also call an execute inside _local_execute. This makes mocking cleaner. This decorator declares a function to be a Flyte workflow. Workflows are declarative entities that construct a DAG of tasks using the data flow between tasks. Unlike a task, the function body of a workflow is evaluated at serialization-time (aka compile-time). This is because while we can determine the entire structure of a task by looking at the function's signature, workflows need to run through the function itself because the body of the function is what expresses the workflow structure. It's also important to note that, local execution notwithstanding, it is not evaluated again when the workflow runs on Flyte. That is, workflows should not call non-Flyte entities since they are only run once (again, this is with respect to the platform, local runs notwithstanding). Please see the :std:doc:`cookbook:sphx_glr_auto_core_flyte_basics_basic_workflow.py` for more usage examples. :param _workflow_function: This argument is implicitly passed and represents the decorated function. :param failure_policy: Use the options in flytekit.WorkflowFailurePolicy :param interruptible: Whether or not tasks launched from this workflow are by default interruptible A reference workflow is a pointer to a workflow that already exists on your Flyte installation. This object will not initiate a network call to Admin, which is why the user is asked to provide the expected interface. If at registration time the interface provided causes an issue with compilation, an error will be returned. A reference workflow is a pointer to a workflow that already exists on your Flyte installation. This object will not initiate a network call to Admin, which is why the user is asked to provide the expected interface. If at registration time the interface provided causes an issue with compilation, an error will be returned. | 1.692711 | 2 |
nemo/collections/asr/parts/numba/rnnt_loss/rnnt_numpy.py | madhukarkm/NeMo | 4,145 | 257 | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright 2018-2019, <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from torch.autograd import Function, Variable
from torch.nn import Module
def check_type(var, t, name):
if var.dtype is not t:
raise TypeError("{} must be {}".format(name, t))
def check_contiguous(var, name):
if not var.is_contiguous():
raise ValueError("{} must be contiguous".format(name))
def check_dim(var, dim, name):
if len(var.shape) != dim:
raise ValueError("{} must be {}D".format(name, dim))
def certify_inputs(log_probs, labels, lengths, label_lengths):
# check_type(log_probs, torch.float32, "log_probs")
check_type(labels, torch.int32, "labels")
check_type(label_lengths, torch.int32, "label_lengths")
check_type(lengths, torch.int32, "lengths")
check_contiguous(log_probs, "log_probs")
check_contiguous(labels, "labels")
check_contiguous(label_lengths, "label_lengths")
check_contiguous(lengths, "lengths")
if lengths.shape[0] != log_probs.shape[0]:
raise ValueError(
f"Must have a length per example. "
f"Given lengths dim: {lengths.shape[0]}, "
f"Log probs dim : {log_probs.shape[0]}"
)
if label_lengths.shape[0] != log_probs.shape[0]:
raise ValueError(
"Must have a label length per example. "
f"Given label lengths dim : {label_lengths.shape[0]}, "
f"Log probs dim : {log_probs.shape[0]}"
)
check_dim(log_probs, 4, "log_probs")
check_dim(labels, 2, "labels")
check_dim(lengths, 1, "lenghts")
check_dim(label_lengths, 1, "label_lenghts")
max_T = torch.max(lengths)
max_U = torch.max(label_lengths)
T, U = log_probs.shape[1:3]
if T != max_T:
raise ValueError(f"Input length mismatch! Given T: {T}, Expected max T from input lengths: {max_T}")
if U != max_U + 1:
raise ValueError(f"Output length mismatch! Given U: {U}, Expected max U from target lengths: {max_U} + 1")
def _assert_no_grad(tensor):
assert not tensor.requires_grad, (
"gradients only computed for log_probs - please " "mark other tensors as not requiring gradients"
)
def forward_pass(log_probs, labels, blank):
"""
Computes probability of the forward variable alpha.
Args:
log_probs: Tensor of shape [T, U, V+1]
labels: Labels of shape [B, U]
blank: Index of the blank token.
Returns:
A tuple of the forward variable probabilities - alpha of shape [T, U]
and the log likelihood of this forward step.
"""
T, U, _ = log_probs.shape
alphas = np.zeros((T, U), dtype='f')
for t in range(1, T):
alphas[t, 0] = alphas[t - 1, 0] + log_probs[t - 1, 0, blank]
for u in range(1, U):
alphas[0, u] = alphas[0, u - 1] + log_probs[0, u - 1, labels[u - 1]]
for t in range(1, T):
for u in range(1, U):
no_emit = alphas[t - 1, u] + log_probs[t - 1, u, blank]
emit = alphas[t, u - 1] + log_probs[t, u - 1, labels[u - 1]]
alphas[t, u] = np.logaddexp(emit, no_emit)
loglike = alphas[T - 1, U - 1] + log_probs[T - 1, U - 1, blank]
return alphas, loglike
def backward_pass(log_probs, labels, blank):
"""
Computes probability of the backward variable beta.
Args:
log_probs: Tensor of shape [T, U, V+1]
labels: Labels of shape [B, U]
blank: Index of the blank token.
Returns:
A tuple of the backward variable probabilities - beta of shape [T, U]
and the log likelihood of this backward step.
"""
T, U, _ = log_probs.shape
betas = np.zeros((T, U), dtype='f')
betas[T - 1, U - 1] = log_probs[T - 1, U - 1, blank]
for t in reversed(range(T - 1)):
betas[t, U - 1] = betas[t + 1, U - 1] + log_probs[t, U - 1, blank]
for u in reversed(range(U - 1)):
betas[T - 1, u] = betas[T - 1, u + 1] + log_probs[T - 1, u, labels[u]]
for t in reversed(range(T - 1)):
for u in reversed(range(U - 1)):
no_emit = betas[t + 1, u] + log_probs[t, u, blank]
emit = betas[t, u + 1] + log_probs[t, u, labels[u]]
betas[t, u] = np.logaddexp(emit, no_emit)
return betas, betas[0, 0]
def compute_gradient(log_probs, alphas, betas, labels, blank, fastemit_lambda):
"""
Computes the gradients of the log_probs with respect to the log probability of this step occuring.
Args:
Args:
log_probs: Tensor of shape [T, U, V+1]
alphas: Tensor of shape [T, U] which represents the forward variable.
betas: Tensor of shape [T, U] which represents the backward variable.
labels: Labels of shape [B, U]
blank: Index of the blank token.
Returns:
Gradients of shape [T, U, V+1] with respect to the forward log probability
"""
T, U, _ = log_probs.shape
grads = np.full(log_probs.shape, -float("inf"))
log_like = betas[0, 0] # == alphas[T - 1, U - 1] + betas[T - 1, U - 1]
# // grad to last blank transition
grads[T - 1, U - 1, blank] = alphas[T - 1, U - 1]
grads[: T - 1, :, blank] = alphas[: T - 1, :] + betas[1:, :]
# // grad to label transition
for u, l in enumerate(labels):
grads[:, u, l] = alphas[:, u] + betas[:, u + 1]
grads = -np.exp(grads + log_probs - log_like)
if fastemit_lambda > 0.0:
for u, l in enumerate(labels):
grads[:, u, l] = (1.0 + fastemit_lambda) * grads[:, u, l]
return grads
def fastemit_regularization(log_probs, labels, alphas, betas, blank, fastemit_lambda):
"""
Describes the computation of FastEmit regularization from the paper -
[FastEmit: Low-latency Streaming ASR with Sequence-level Emission Regularization](https://arxiv.org/abs/2010.11148)
Args:
log_probs: Tensor of shape [T, U, V+1]
labels: Unused. Labels of shape [B, U]
alphas: Tensor of shape [T, U] which represents the forward variable.
betas: Unused. Tensor of shape [T, U] which represents the backward variable.
blank: Index of the blank token.
fastemit_lambda: Float scaling factor for FastEmit regularization.
Returns:
The regularized negative log likelihood - lambda * P˜(At, u|x)
"""
# General calculation of the fastemit regularization alignments
T, U, _ = log_probs.shape
# alignment = np.zeros((T, U), dtype='float32')
#
# for t in range(0, T):
# alignment[t, U - 1] = alphas[t, U - 1] + betas[t, U - 1]
#
# for t in range(0, T):
# for u in range(0, U - 1):
# emit = alphas[t, u] + log_probs[t, u, labels[u]] + betas[t, u + 1]
# alignment[t, u] = emit
# reg = fastemit_lambda * (alignment[T - 1, U - 1])
# The above is equivalent to below, without need of computing above
# reg = fastemit_lambda * (alphas[T - 1, U - 1] + betas[T - 1, U - 1])
# The above is also equivalent to below, without need of computing the betas alignment matrix
reg = fastemit_lambda * (alphas[T - 1, U - 1] + log_probs[T - 1, U - 1, blank])
return -reg
def transduce(log_probs, labels, blank=0, fastemit_lambda=0.0):
"""
Args:
log_probs: 3D array with shape
[input len, output len + 1, vocab size]
labels: 1D array with shape [output time steps]
blank: Index of the blank token.
fastemit_lambda: Float scaling factor for FastEmit regularization.
Returns:
float: The negative log-likelihood
3D array: Gradients with respect to the
unnormalized input actications
2d arrays: Alphas matrix (TxU)
2d array: Betas matrix (TxU)
"""
alphas, ll_forward = forward_pass(log_probs, labels, blank)
betas, ll_backward = backward_pass(log_probs, labels, blank)
grads = compute_gradient(log_probs, alphas, betas, labels, blank, fastemit_lambda)
return -ll_forward, grads, alphas, betas
def transduce_batch(log_probs, labels, flen, glen, blank=0, fastemit_lambda=0.0):
"""
Compute the transducer loss of the batch.
Args:
log_probs: [B, T, U, V+1]. Activation matrix normalized with log-softmax.
labels: [B, U+1] - ground truth labels with <SOS> padded as blank token in the beginning.
flen: Length vector of the acoustic sequence.
glen: Length vector of the target sequence.
blank: Id of the blank token.
fastemit_lambda: Float scaling factor for FastEmit regularization.
Returns:
Batch of transducer forward log probabilities (loss) and the gradients of the activation matrix.
"""
grads = np.zeros_like(log_probs)
costs = []
for b in range(log_probs.shape[0]):
t = int(flen[b])
u = int(glen[b]) + 1
ll, g, alphas, betas = transduce(log_probs[b, :t, :u, :], labels[b, : u - 1], blank, fastemit_lambda)
grads[b, :t, :u, :] = g
reg = fastemit_regularization(
log_probs[b, :t, :u, :], labels[b, : u - 1], alphas, betas, blank, fastemit_lambda
)
ll += reg
costs.append(ll)
return costs, grads
class _RNNT(Function):
@staticmethod
def forward(ctx, acts, labels, act_lens, label_lens, blank, fastemit_lambda):
costs, grads = transduce_batch(
acts.detach().cpu().numpy(),
labels.cpu().numpy(),
act_lens.cpu().numpy(),
label_lens.cpu().numpy(),
blank,
fastemit_lambda,
)
costs = torch.FloatTensor([sum(costs)])
grads = torch.Tensor(grads).to(acts)
ctx.grads = grads
return costs
@staticmethod
def backward(ctx, grad_output):
return ctx.grads, None, None, None, None, None
class RNNTLoss(Module):
"""
Parameters:
`blank_label` (int): default 0 - label index of blank token
fastemit_lambda: Float scaling factor for FastEmit regularization.
"""
def __init__(self, blank: int = 0, fastemit_lambda: float = 0.0):
super(RNNTLoss, self).__init__()
self.blank = blank
self.fastemit_lambda = fastemit_lambda
self.rnnt = _RNNT.apply
def forward(self, acts, labels, act_lens, label_lens):
assert len(labels.size()) == 2
_assert_no_grad(labels)
_assert_no_grad(act_lens)
_assert_no_grad(label_lens)
certify_inputs(acts, labels, act_lens, label_lens)
acts = torch.nn.functional.log_softmax(acts, -1)
return self.rnnt(acts, labels, act_lens, label_lens, self.blank, self.fastemit_lambda)
if __name__ == '__main__':
loss = RNNTLoss(fastemit_lambda=0.01)
torch.manual_seed(0)
acts = torch.randn(1, 2, 5, 3)
labels = torch.tensor([[0, 2, 1, 2]], dtype=torch.int32)
act_lens = torch.tensor([2], dtype=torch.int32)
label_lens = torch.tensor([len(labels[0])], dtype=torch.int32)
loss_val = loss(acts, labels, act_lens, label_lens)
| # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright 2018-2019, <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from torch.autograd import Function, Variable
from torch.nn import Module
def check_type(var, t, name):
if var.dtype is not t:
raise TypeError("{} must be {}".format(name, t))
def check_contiguous(var, name):
if not var.is_contiguous():
raise ValueError("{} must be contiguous".format(name))
def check_dim(var, dim, name):
if len(var.shape) != dim:
raise ValueError("{} must be {}D".format(name, dim))
def certify_inputs(log_probs, labels, lengths, label_lengths):
# check_type(log_probs, torch.float32, "log_probs")
check_type(labels, torch.int32, "labels")
check_type(label_lengths, torch.int32, "label_lengths")
check_type(lengths, torch.int32, "lengths")
check_contiguous(log_probs, "log_probs")
check_contiguous(labels, "labels")
check_contiguous(label_lengths, "label_lengths")
check_contiguous(lengths, "lengths")
if lengths.shape[0] != log_probs.shape[0]:
raise ValueError(
f"Must have a length per example. "
f"Given lengths dim: {lengths.shape[0]}, "
f"Log probs dim : {log_probs.shape[0]}"
)
if label_lengths.shape[0] != log_probs.shape[0]:
raise ValueError(
"Must have a label length per example. "
f"Given label lengths dim : {label_lengths.shape[0]}, "
f"Log probs dim : {log_probs.shape[0]}"
)
check_dim(log_probs, 4, "log_probs")
check_dim(labels, 2, "labels")
check_dim(lengths, 1, "lenghts")
check_dim(label_lengths, 1, "label_lenghts")
max_T = torch.max(lengths)
max_U = torch.max(label_lengths)
T, U = log_probs.shape[1:3]
if T != max_T:
raise ValueError(f"Input length mismatch! Given T: {T}, Expected max T from input lengths: {max_T}")
if U != max_U + 1:
raise ValueError(f"Output length mismatch! Given U: {U}, Expected max U from target lengths: {max_U} + 1")
def _assert_no_grad(tensor):
assert not tensor.requires_grad, (
"gradients only computed for log_probs - please " "mark other tensors as not requiring gradients"
)
def forward_pass(log_probs, labels, blank):
"""
Computes probability of the forward variable alpha.
Args:
log_probs: Tensor of shape [T, U, V+1]
labels: Labels of shape [B, U]
blank: Index of the blank token.
Returns:
A tuple of the forward variable probabilities - alpha of shape [T, U]
and the log likelihood of this forward step.
"""
T, U, _ = log_probs.shape
alphas = np.zeros((T, U), dtype='f')
for t in range(1, T):
alphas[t, 0] = alphas[t - 1, 0] + log_probs[t - 1, 0, blank]
for u in range(1, U):
alphas[0, u] = alphas[0, u - 1] + log_probs[0, u - 1, labels[u - 1]]
for t in range(1, T):
for u in range(1, U):
no_emit = alphas[t - 1, u] + log_probs[t - 1, u, blank]
emit = alphas[t, u - 1] + log_probs[t, u - 1, labels[u - 1]]
alphas[t, u] = np.logaddexp(emit, no_emit)
loglike = alphas[T - 1, U - 1] + log_probs[T - 1, U - 1, blank]
return alphas, loglike
def backward_pass(log_probs, labels, blank):
"""
Computes probability of the backward variable beta.
Args:
log_probs: Tensor of shape [T, U, V+1]
labels: Labels of shape [B, U]
blank: Index of the blank token.
Returns:
A tuple of the backward variable probabilities - beta of shape [T, U]
and the log likelihood of this backward step.
"""
T, U, _ = log_probs.shape
betas = np.zeros((T, U), dtype='f')
betas[T - 1, U - 1] = log_probs[T - 1, U - 1, blank]
for t in reversed(range(T - 1)):
betas[t, U - 1] = betas[t + 1, U - 1] + log_probs[t, U - 1, blank]
for u in reversed(range(U - 1)):
betas[T - 1, u] = betas[T - 1, u + 1] + log_probs[T - 1, u, labels[u]]
for t in reversed(range(T - 1)):
for u in reversed(range(U - 1)):
no_emit = betas[t + 1, u] + log_probs[t, u, blank]
emit = betas[t, u + 1] + log_probs[t, u, labels[u]]
betas[t, u] = np.logaddexp(emit, no_emit)
return betas, betas[0, 0]
def compute_gradient(log_probs, alphas, betas, labels, blank, fastemit_lambda):
"""
Computes the gradients of the log_probs with respect to the log probability of this step occuring.
Args:
Args:
log_probs: Tensor of shape [T, U, V+1]
alphas: Tensor of shape [T, U] which represents the forward variable.
betas: Tensor of shape [T, U] which represents the backward variable.
labels: Labels of shape [B, U]
blank: Index of the blank token.
Returns:
Gradients of shape [T, U, V+1] with respect to the forward log probability
"""
T, U, _ = log_probs.shape
grads = np.full(log_probs.shape, -float("inf"))
log_like = betas[0, 0] # == alphas[T - 1, U - 1] + betas[T - 1, U - 1]
# // grad to last blank transition
grads[T - 1, U - 1, blank] = alphas[T - 1, U - 1]
grads[: T - 1, :, blank] = alphas[: T - 1, :] + betas[1:, :]
# // grad to label transition
for u, l in enumerate(labels):
grads[:, u, l] = alphas[:, u] + betas[:, u + 1]
grads = -np.exp(grads + log_probs - log_like)
if fastemit_lambda > 0.0:
for u, l in enumerate(labels):
grads[:, u, l] = (1.0 + fastemit_lambda) * grads[:, u, l]
return grads
def fastemit_regularization(log_probs, labels, alphas, betas, blank, fastemit_lambda):
"""
Describes the computation of FastEmit regularization from the paper -
[FastEmit: Low-latency Streaming ASR with Sequence-level Emission Regularization](https://arxiv.org/abs/2010.11148)
Args:
log_probs: Tensor of shape [T, U, V+1]
labels: Unused. Labels of shape [B, U]
alphas: Tensor of shape [T, U] which represents the forward variable.
betas: Unused. Tensor of shape [T, U] which represents the backward variable.
blank: Index of the blank token.
fastemit_lambda: Float scaling factor for FastEmit regularization.
Returns:
The regularized negative log likelihood - lambda * P˜(At, u|x)
"""
# General calculation of the fastemit regularization alignments
T, U, _ = log_probs.shape
# alignment = np.zeros((T, U), dtype='float32')
#
# for t in range(0, T):
# alignment[t, U - 1] = alphas[t, U - 1] + betas[t, U - 1]
#
# for t in range(0, T):
# for u in range(0, U - 1):
# emit = alphas[t, u] + log_probs[t, u, labels[u]] + betas[t, u + 1]
# alignment[t, u] = emit
# reg = fastemit_lambda * (alignment[T - 1, U - 1])
# The above is equivalent to below, without need of computing above
# reg = fastemit_lambda * (alphas[T - 1, U - 1] + betas[T - 1, U - 1])
# The above is also equivalent to below, without need of computing the betas alignment matrix
reg = fastemit_lambda * (alphas[T - 1, U - 1] + log_probs[T - 1, U - 1, blank])
return -reg
def transduce(log_probs, labels, blank=0, fastemit_lambda=0.0):
"""
Args:
log_probs: 3D array with shape
[input len, output len + 1, vocab size]
labels: 1D array with shape [output time steps]
blank: Index of the blank token.
fastemit_lambda: Float scaling factor for FastEmit regularization.
Returns:
float: The negative log-likelihood
3D array: Gradients with respect to the
unnormalized input actications
2d arrays: Alphas matrix (TxU)
2d array: Betas matrix (TxU)
"""
alphas, ll_forward = forward_pass(log_probs, labels, blank)
betas, ll_backward = backward_pass(log_probs, labels, blank)
grads = compute_gradient(log_probs, alphas, betas, labels, blank, fastemit_lambda)
return -ll_forward, grads, alphas, betas
def transduce_batch(log_probs, labels, flen, glen, blank=0, fastemit_lambda=0.0):
"""
Compute the transducer loss of the batch.
Args:
log_probs: [B, T, U, V+1]. Activation matrix normalized with log-softmax.
labels: [B, U+1] - ground truth labels with <SOS> padded as blank token in the beginning.
flen: Length vector of the acoustic sequence.
glen: Length vector of the target sequence.
blank: Id of the blank token.
fastemit_lambda: Float scaling factor for FastEmit regularization.
Returns:
Batch of transducer forward log probabilities (loss) and the gradients of the activation matrix.
"""
grads = np.zeros_like(log_probs)
costs = []
for b in range(log_probs.shape[0]):
t = int(flen[b])
u = int(glen[b]) + 1
ll, g, alphas, betas = transduce(log_probs[b, :t, :u, :], labels[b, : u - 1], blank, fastemit_lambda)
grads[b, :t, :u, :] = g
reg = fastemit_regularization(
log_probs[b, :t, :u, :], labels[b, : u - 1], alphas, betas, blank, fastemit_lambda
)
ll += reg
costs.append(ll)
return costs, grads
class _RNNT(Function):
@staticmethod
def forward(ctx, acts, labels, act_lens, label_lens, blank, fastemit_lambda):
costs, grads = transduce_batch(
acts.detach().cpu().numpy(),
labels.cpu().numpy(),
act_lens.cpu().numpy(),
label_lens.cpu().numpy(),
blank,
fastemit_lambda,
)
costs = torch.FloatTensor([sum(costs)])
grads = torch.Tensor(grads).to(acts)
ctx.grads = grads
return costs
@staticmethod
def backward(ctx, grad_output):
return ctx.grads, None, None, None, None, None
class RNNTLoss(Module):
"""
Parameters:
`blank_label` (int): default 0 - label index of blank token
fastemit_lambda: Float scaling factor for FastEmit regularization.
"""
def __init__(self, blank: int = 0, fastemit_lambda: float = 0.0):
super(RNNTLoss, self).__init__()
self.blank = blank
self.fastemit_lambda = fastemit_lambda
self.rnnt = _RNNT.apply
def forward(self, acts, labels, act_lens, label_lens):
assert len(labels.size()) == 2
_assert_no_grad(labels)
_assert_no_grad(act_lens)
_assert_no_grad(label_lens)
certify_inputs(acts, labels, act_lens, label_lens)
acts = torch.nn.functional.log_softmax(acts, -1)
return self.rnnt(acts, labels, act_lens, label_lens, self.blank, self.fastemit_lambda)
if __name__ == '__main__':
loss = RNNTLoss(fastemit_lambda=0.01)
torch.manual_seed(0)
acts = torch.randn(1, 2, 5, 3)
labels = torch.tensor([[0, 2, 1, 2]], dtype=torch.int32)
act_lens = torch.tensor([2], dtype=torch.int32)
label_lens = torch.tensor([len(labels[0])], dtype=torch.int32)
loss_val = loss(acts, labels, act_lens, label_lens)
| en | 0.763438 | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Copyright 2018-2019, <NAME> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # check_type(log_probs, torch.float32, "log_probs") Computes probability of the forward variable alpha. Args: log_probs: Tensor of shape [T, U, V+1] labels: Labels of shape [B, U] blank: Index of the blank token. Returns: A tuple of the forward variable probabilities - alpha of shape [T, U] and the log likelihood of this forward step. Computes probability of the backward variable beta. Args: log_probs: Tensor of shape [T, U, V+1] labels: Labels of shape [B, U] blank: Index of the blank token. Returns: A tuple of the backward variable probabilities - beta of shape [T, U] and the log likelihood of this backward step. Computes the gradients of the log_probs with respect to the log probability of this step occuring. Args: Args: log_probs: Tensor of shape [T, U, V+1] alphas: Tensor of shape [T, U] which represents the forward variable. betas: Tensor of shape [T, U] which represents the backward variable. labels: Labels of shape [B, U] blank: Index of the blank token. Returns: Gradients of shape [T, U, V+1] with respect to the forward log probability # == alphas[T - 1, U - 1] + betas[T - 1, U - 1] # // grad to last blank transition # // grad to label transition Describes the computation of FastEmit regularization from the paper - [FastEmit: Low-latency Streaming ASR with Sequence-level Emission Regularization](https://arxiv.org/abs/2010.11148) Args: log_probs: Tensor of shape [T, U, V+1] labels: Unused. Labels of shape [B, U] alphas: Tensor of shape [T, U] which represents the forward variable. betas: Unused. Tensor of shape [T, U] which represents the backward variable. blank: Index of the blank token. fastemit_lambda: Float scaling factor for FastEmit regularization. Returns: The regularized negative log likelihood - lambda * P˜(At, u|x) # General calculation of the fastemit regularization alignments # alignment = np.zeros((T, U), dtype='float32') # # for t in range(0, T): # alignment[t, U - 1] = alphas[t, U - 1] + betas[t, U - 1] # # for t in range(0, T): # for u in range(0, U - 1): # emit = alphas[t, u] + log_probs[t, u, labels[u]] + betas[t, u + 1] # alignment[t, u] = emit # reg = fastemit_lambda * (alignment[T - 1, U - 1]) # The above is equivalent to below, without need of computing above # reg = fastemit_lambda * (alphas[T - 1, U - 1] + betas[T - 1, U - 1]) # The above is also equivalent to below, without need of computing the betas alignment matrix Args: log_probs: 3D array with shape [input len, output len + 1, vocab size] labels: 1D array with shape [output time steps] blank: Index of the blank token. fastemit_lambda: Float scaling factor for FastEmit regularization. Returns: float: The negative log-likelihood 3D array: Gradients with respect to the unnormalized input actications 2d arrays: Alphas matrix (TxU) 2d array: Betas matrix (TxU) Compute the transducer loss of the batch. Args: log_probs: [B, T, U, V+1]. Activation matrix normalized with log-softmax. labels: [B, U+1] - ground truth labels with <SOS> padded as blank token in the beginning. flen: Length vector of the acoustic sequence. glen: Length vector of the target sequence. blank: Id of the blank token. fastemit_lambda: Float scaling factor for FastEmit regularization. Returns: Batch of transducer forward log probabilities (loss) and the gradients of the activation matrix. Parameters: `blank_label` (int): default 0 - label index of blank token fastemit_lambda: Float scaling factor for FastEmit regularization. | 2.033377 | 2 |
dataset_specifications/swirls.py | joeloskarsson/CGAN-regression | 12 | 258 | <reponame>joeloskarsson/CGAN-regression
import numpy as np
import math
from dataset_specifications.dataset import Dataset
class SwirlsSet(Dataset):
def __init__(self):
super().__init__()
self.name = "swirls"
self.n_samples = {
"train": 2000,
"val": 1000,
"test": 1000,
}
self.y_dim = 2
# 2D heteroskedastic Gaussian mixture model with 2 components
def sample_ys(self, xs):
n = xs.shape[0]
components = np.random.randint(2, size=n) # uniform 0,1
angles = math.pi*components + (math.pi/2.)*xs[:,0] # Angles to centers
means = np.stack((np.cos(angles), np.sin(angles)), axis=1)
noise = np.random.randn(n, 2) # samples form 2d gaussian
std = 0.3 - 0.2*np.abs(xs-1.)
ys = means + std*noise
return ys
def sample(self, n):
xs = np.random.uniform(low=0., high=2., size=(n,1))
ys = self.sample_ys(xs)
return np.concatenate((xs, ys), axis=1)
| import numpy as np
import math
from dataset_specifications.dataset import Dataset
class SwirlsSet(Dataset):
def __init__(self):
super().__init__()
self.name = "swirls"
self.n_samples = {
"train": 2000,
"val": 1000,
"test": 1000,
}
self.y_dim = 2
# 2D heteroskedastic Gaussian mixture model with 2 components
def sample_ys(self, xs):
n = xs.shape[0]
components = np.random.randint(2, size=n) # uniform 0,1
angles = math.pi*components + (math.pi/2.)*xs[:,0] # Angles to centers
means = np.stack((np.cos(angles), np.sin(angles)), axis=1)
noise = np.random.randn(n, 2) # samples form 2d gaussian
std = 0.3 - 0.2*np.abs(xs-1.)
ys = means + std*noise
return ys
def sample(self, n):
xs = np.random.uniform(low=0., high=2., size=(n,1))
ys = self.sample_ys(xs)
return np.concatenate((xs, ys), axis=1) | en | 0.779509 | # 2D heteroskedastic Gaussian mixture model with 2 components # uniform 0,1 # Angles to centers # samples form 2d gaussian | 3.198799 | 3 |
pytorch-frontend/tools/code_coverage/oss_coverage.py | AndreasKaratzas/stonne | 40 | 259 | <gh_stars>10-100
#!/usr/bin/env python
import time
from package.oss.cov_json import get_json_report
from package.oss.init import initialization
from package.tool.summarize_jsons import summarize_jsons
from package.util.setting import TestPlatform
def report_coverage() -> None:
start_time = time.time()
(options, test_list, interested_folders) = initialization()
# run cpp tests
get_json_report(test_list, options)
# collect coverage data from json profiles
if options.need_summary:
summarize_jsons(
test_list, interested_folders, [""], TestPlatform.OSS, start_time
)
if __name__ == "__main__":
report_coverage()
| #!/usr/bin/env python
import time
from package.oss.cov_json import get_json_report
from package.oss.init import initialization
from package.tool.summarize_jsons import summarize_jsons
from package.util.setting import TestPlatform
def report_coverage() -> None:
start_time = time.time()
(options, test_list, interested_folders) = initialization()
# run cpp tests
get_json_report(test_list, options)
# collect coverage data from json profiles
if options.need_summary:
summarize_jsons(
test_list, interested_folders, [""], TestPlatform.OSS, start_time
)
if __name__ == "__main__":
report_coverage() | en | 0.516494 | #!/usr/bin/env python # run cpp tests # collect coverage data from json profiles | 1.993815 | 2 |
twitoff/predict.py | dscohen75/twitoff | 0 | 260 | import numpy as np
from sklearn.linear_model import LogisticRegression
from .models import User
from .twitter import vectorize_tweet
def predict_user(user1_name, user2_name, tweet_text):
"""
Determine and return which user is more likely to say a given Tweet.
Example: predict_user('ausen', 'elonmusk', 'Lambda School Rocks!')
Returns 1 corresponding to 1st user passed in, or 0 for second.
"""
user1 = User.query.filter(User.name == user1_name).one()
user2 = User.query.filter(User.name == user2_name).one()
user1_vect = np.array([tweet.vect for tweet in user1.tweets])
user2_vect = np.array([tweet.vect for tweet in user2.tweets])
vects = np.vstack([user1_vect, user2_vect])
labels = np.concatenate([np.ones(len(user1.tweets)),
np.zeros(len(user2.tweets))])
log_reg = LogisticRegression().fit(vects, labels)
# We've done the model fitting, now to predict...
hypo_tweet_vect = vectorize_tweet(tweet_text)
return log_reg.predict(np.array(hypo_tweet_vect).reshape(1,-1))
| import numpy as np
from sklearn.linear_model import LogisticRegression
from .models import User
from .twitter import vectorize_tweet
def predict_user(user1_name, user2_name, tweet_text):
"""
Determine and return which user is more likely to say a given Tweet.
Example: predict_user('ausen', 'elonmusk', 'Lambda School Rocks!')
Returns 1 corresponding to 1st user passed in, or 0 for second.
"""
user1 = User.query.filter(User.name == user1_name).one()
user2 = User.query.filter(User.name == user2_name).one()
user1_vect = np.array([tweet.vect for tweet in user1.tweets])
user2_vect = np.array([tweet.vect for tweet in user2.tweets])
vects = np.vstack([user1_vect, user2_vect])
labels = np.concatenate([np.ones(len(user1.tweets)),
np.zeros(len(user2.tweets))])
log_reg = LogisticRegression().fit(vects, labels)
# We've done the model fitting, now to predict...
hypo_tweet_vect = vectorize_tweet(tweet_text)
return log_reg.predict(np.array(hypo_tweet_vect).reshape(1,-1))
| en | 0.860636 | Determine and return which user is more likely to say a given Tweet. Example: predict_user('ausen', 'elonmusk', 'Lambda School Rocks!') Returns 1 corresponding to 1st user passed in, or 0 for second. # We've done the model fitting, now to predict... | 3.278383 | 3 |
groupthink/version.py | emanuelfeld/groupthink | 1 | 261 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of groupthink.
# https://github.com/emanuelfeld/groupthink
# This project is in the public domain within the United States.
# Additionally, the Government of the District of Columbia waives
# copyright and related rights in the work worldwide through the CC0 1.0
# Universal public domain dedication.
__version__ = '1.0.0' # NOQA
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of groupthink.
# https://github.com/emanuelfeld/groupthink
# This project is in the public domain within the United States.
# Additionally, the Government of the District of Columbia waives
# copyright and related rights in the work worldwide through the CC0 1.0
# Universal public domain dedication.
__version__ = '1.0.0' # NOQA
| en | 0.857192 | #!/usr/bin/env python # -*- coding: utf-8 -*- # This file is part of groupthink. # https://github.com/emanuelfeld/groupthink # This project is in the public domain within the United States. # Additionally, the Government of the District of Columbia waives # copyright and related rights in the work worldwide through the CC0 1.0 # Universal public domain dedication. # NOQA | 1.645718 | 2 |
feeder/feeder_ucf.py | George-Polya/st-gcn | 0 | 262 | # sys
import os
import sys
import numpy as np
import random
import pickle
import json
# torch
import torch
import torch.nn as nn
from torchvision import datasets, transforms
# operation
from . import tools
class Feeder_UCF(torch.utils.data.Dataset):
""" Feeder for skeleton-based action recognition in kinetics-skeleton dataset
Arguments:
data_path: the path to '.npy' data, the shape of data should be (N, C, T, V, M)
label_path: the path to label
random_choose: If true, randomly choose a portion of the input sequence
random_shift: If true, randomly pad zeros at the begining or end of sequence
random_move: If true, perform randomly but continuously changed transformation to input sequence
window_size: The length of the output sequence
pose_matching: If ture, match the pose between two frames
num_person_in: The number of people the feeder can observe in the input sequence
num_person_out: The number of people the feeder in the output sequence
debug: If true, only use the first 100 samples
"""
def __init__(self,
data_path,
label_path,
ignore_empty_sample=True,
random_choose=False,
random_shift=False,
random_move=False,
window_size=-1,
pose_matching=False,
num_person_in=5,
num_person_out=2,
debug=False):
self.debug = debug
self.data_path = data_path
self.label_path = label_path
self.random_choose = random_choose
self.random_shift = random_shift
self.random_move = random_move
self.window_size = window_size
self.num_person_in = num_person_in
self.num_person_out = num_person_out
self.pose_matching = pose_matching
self.ignore_empty_sample = ignore_empty_sample
self.load_data()
def load_data(self):
# load file list
self.sample_name = os.listdir(self.data_path)
if self.debug:
self.sample_name = self.sample_name[0:2]
# load label
label_path = self.label_path
with open(label_path) as f:
label_info = json.load(f)
sample_id = [name.split('.')[0] for name in self.sample_name]
self.label = np.array(
[label_info[id]['label_index'] for id in sample_id])
has_skeleton = np.array(
[label_info[id]['has_skeleton'] for id in sample_id])
# ignore the samples which does not has skeleton sequence
if self.ignore_empty_sample:
self.sample_name = [
s for h, s in zip(has_skeleton, self.sample_name) if h
]
self.label = self.label[has_skeleton]
# output data shape (N, C, T, V, M)
self.N = len(self.sample_name) #sample
self.C = 3 #channel
self.T = 90000 #frame
self.V = 18 #joint
self.M = self.num_person_out #person
def __len__(self):
return len(self.sample_name)
def __iter__(self):
return self
def __getitem__(self, index):
# output shape (C, T, V, M)
# get data
sample_name = self.sample_name[index]
sample_path = os.path.join(self.data_path, sample_name)
with open(sample_path, 'r') as f:
video_info = json.load(f)
# fill data_numpy
data_numpy = np.zeros((self.C, self.T, self.V, self.num_person_in))
count = 0
for frame_info in video_info['data']:
frame_index = frame_info['frame_index']
for m, skeleton_info in enumerate(frame_info["skeleton"]):
if m >= self.num_person_in:
break
pose = skeleton_info['pose']
score = skeleton_info['score']
frame_index = int(frame_index)
# print(frame_index)
data_numpy[0, frame_index, :, m] = pose[0::2]
data_numpy[1, frame_index, :, m] = pose[1::2]
data_numpy[2, frame_index, :, m] = score
# count += 1
# print(" ",count, " ")
# centralization
data_numpy[0:2] = data_numpy[0:2] - 0.5
data_numpy[0][data_numpy[2] == 0] = 0
data_numpy[1][data_numpy[2] == 0] = 0
# get & check label index
label = video_info['label_index']
assert (self.label[index] == label)
# data augmentation
if self.random_shift:
data_numpy = tools.random_shift(data_numpy)
if self.random_choose:
data_numpy = tools.random_choose(data_numpy, self.window_size)
elif self.window_size > 0:
data_numpy = tools.auto_pading(data_numpy, self.window_size)
if self.random_move:
data_numpy = tools.random_move(data_numpy)
# sort by score
sort_index = (-data_numpy[2, :, :, :].sum(axis=1)).argsort(axis=1)
for t, s in enumerate(sort_index):
data_numpy[:, t, :, :] = data_numpy[:, t, :, s].transpose((1, 2,
0))
data_numpy = data_numpy[:, :, :, 0:self.num_person_out]
# match poses between 2 frames
if self.pose_matching:
data_numpy = tools.openpose_match(data_numpy)
return data_numpy, label
def top_k(self, score, top_k):
assert (all(self.label >= 0))
rank = score.argsort()
hit_top_k = [l in rank[i, -top_k:] for i, l in enumerate(self.label)]
return sum(hit_top_k) * 1.0 / len(hit_top_k)
def top_k_by_category(self, score, top_k):
assert (all(self.label >= 0))
return tools.top_k_by_category(self.label, score, top_k)
def calculate_recall_precision(self, score):
assert (all(self.label >= 0))
return tools.calculate_recall_precision(self.label, score)
| # sys
import os
import sys
import numpy as np
import random
import pickle
import json
# torch
import torch
import torch.nn as nn
from torchvision import datasets, transforms
# operation
from . import tools
class Feeder_UCF(torch.utils.data.Dataset):
""" Feeder for skeleton-based action recognition in kinetics-skeleton dataset
Arguments:
data_path: the path to '.npy' data, the shape of data should be (N, C, T, V, M)
label_path: the path to label
random_choose: If true, randomly choose a portion of the input sequence
random_shift: If true, randomly pad zeros at the begining or end of sequence
random_move: If true, perform randomly but continuously changed transformation to input sequence
window_size: The length of the output sequence
pose_matching: If ture, match the pose between two frames
num_person_in: The number of people the feeder can observe in the input sequence
num_person_out: The number of people the feeder in the output sequence
debug: If true, only use the first 100 samples
"""
def __init__(self,
data_path,
label_path,
ignore_empty_sample=True,
random_choose=False,
random_shift=False,
random_move=False,
window_size=-1,
pose_matching=False,
num_person_in=5,
num_person_out=2,
debug=False):
self.debug = debug
self.data_path = data_path
self.label_path = label_path
self.random_choose = random_choose
self.random_shift = random_shift
self.random_move = random_move
self.window_size = window_size
self.num_person_in = num_person_in
self.num_person_out = num_person_out
self.pose_matching = pose_matching
self.ignore_empty_sample = ignore_empty_sample
self.load_data()
def load_data(self):
# load file list
self.sample_name = os.listdir(self.data_path)
if self.debug:
self.sample_name = self.sample_name[0:2]
# load label
label_path = self.label_path
with open(label_path) as f:
label_info = json.load(f)
sample_id = [name.split('.')[0] for name in self.sample_name]
self.label = np.array(
[label_info[id]['label_index'] for id in sample_id])
has_skeleton = np.array(
[label_info[id]['has_skeleton'] for id in sample_id])
# ignore the samples which does not has skeleton sequence
if self.ignore_empty_sample:
self.sample_name = [
s for h, s in zip(has_skeleton, self.sample_name) if h
]
self.label = self.label[has_skeleton]
# output data shape (N, C, T, V, M)
self.N = len(self.sample_name) #sample
self.C = 3 #channel
self.T = 90000 #frame
self.V = 18 #joint
self.M = self.num_person_out #person
def __len__(self):
return len(self.sample_name)
def __iter__(self):
return self
def __getitem__(self, index):
# output shape (C, T, V, M)
# get data
sample_name = self.sample_name[index]
sample_path = os.path.join(self.data_path, sample_name)
with open(sample_path, 'r') as f:
video_info = json.load(f)
# fill data_numpy
data_numpy = np.zeros((self.C, self.T, self.V, self.num_person_in))
count = 0
for frame_info in video_info['data']:
frame_index = frame_info['frame_index']
for m, skeleton_info in enumerate(frame_info["skeleton"]):
if m >= self.num_person_in:
break
pose = skeleton_info['pose']
score = skeleton_info['score']
frame_index = int(frame_index)
# print(frame_index)
data_numpy[0, frame_index, :, m] = pose[0::2]
data_numpy[1, frame_index, :, m] = pose[1::2]
data_numpy[2, frame_index, :, m] = score
# count += 1
# print(" ",count, " ")
# centralization
data_numpy[0:2] = data_numpy[0:2] - 0.5
data_numpy[0][data_numpy[2] == 0] = 0
data_numpy[1][data_numpy[2] == 0] = 0
# get & check label index
label = video_info['label_index']
assert (self.label[index] == label)
# data augmentation
if self.random_shift:
data_numpy = tools.random_shift(data_numpy)
if self.random_choose:
data_numpy = tools.random_choose(data_numpy, self.window_size)
elif self.window_size > 0:
data_numpy = tools.auto_pading(data_numpy, self.window_size)
if self.random_move:
data_numpy = tools.random_move(data_numpy)
# sort by score
sort_index = (-data_numpy[2, :, :, :].sum(axis=1)).argsort(axis=1)
for t, s in enumerate(sort_index):
data_numpy[:, t, :, :] = data_numpy[:, t, :, s].transpose((1, 2,
0))
data_numpy = data_numpy[:, :, :, 0:self.num_person_out]
# match poses between 2 frames
if self.pose_matching:
data_numpy = tools.openpose_match(data_numpy)
return data_numpy, label
def top_k(self, score, top_k):
assert (all(self.label >= 0))
rank = score.argsort()
hit_top_k = [l in rank[i, -top_k:] for i, l in enumerate(self.label)]
return sum(hit_top_k) * 1.0 / len(hit_top_k)
def top_k_by_category(self, score, top_k):
assert (all(self.label >= 0))
return tools.top_k_by_category(self.label, score, top_k)
def calculate_recall_precision(self, score):
assert (all(self.label >= 0))
return tools.calculate_recall_precision(self.label, score)
| en | 0.69298 | # sys # torch # operation Feeder for skeleton-based action recognition in kinetics-skeleton dataset Arguments: data_path: the path to '.npy' data, the shape of data should be (N, C, T, V, M) label_path: the path to label random_choose: If true, randomly choose a portion of the input sequence random_shift: If true, randomly pad zeros at the begining or end of sequence random_move: If true, perform randomly but continuously changed transformation to input sequence window_size: The length of the output sequence pose_matching: If ture, match the pose between two frames num_person_in: The number of people the feeder can observe in the input sequence num_person_out: The number of people the feeder in the output sequence debug: If true, only use the first 100 samples # load file list # load label # ignore the samples which does not has skeleton sequence # output data shape (N, C, T, V, M) #sample #channel #frame #joint #person # output shape (C, T, V, M) # get data # fill data_numpy # print(frame_index) # count += 1 # print(" ",count, " ") # centralization # get & check label index # data augmentation # sort by score # match poses between 2 frames | 2.710792 | 3 |
apps/core/migrations/0001_initial.py | Visualway/Vitary | 4 | 263 | <gh_stars>1-10
# Generated by Django 4.0.2 on 2022-03-02 03:29
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('vit', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Badge',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('description', models.TextField()),
('color', models.CharField(choices=[('success', 'Green'), ('info', 'Blue'), ('link', 'Purple'), ('primary', 'Turquoise'), ('warning', 'Yellow'), ('danger', 'Red'), ('dark', 'Black'), ('white', 'White')], max_length=50)),
('special', models.BooleanField(default=False)),
],
options={
'ordering': ['name'],
},
),
migrations.CreateModel(
name='Requirments',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('description', models.TextField()),
('badge', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.badge')),
],
options={
'ordering': ['name'],
},
),
migrations.CreateModel(
name='Abuse',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('abuse_type', models.CharField(choices=[('ABUSE', 'Abuse'), ('INAPPROPRIATE', 'Inappropriate'), ('SPAM', 'Spam'), ('BULLYING', 'Bullying'), ('SEXUAL_CONTENT', 'Sexual Content'), ('OTHER', 'Other')], max_length=50)),
('description', models.TextField()),
('date', models.DateTimeField(auto_now_add=True)),
('to_vit', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='vit.vit')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'Abuses',
'ordering': ['-date'],
},
),
]
| # Generated by Django 4.0.2 on 2022-03-02 03:29
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('vit', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Badge',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('description', models.TextField()),
('color', models.CharField(choices=[('success', 'Green'), ('info', 'Blue'), ('link', 'Purple'), ('primary', 'Turquoise'), ('warning', 'Yellow'), ('danger', 'Red'), ('dark', 'Black'), ('white', 'White')], max_length=50)),
('special', models.BooleanField(default=False)),
],
options={
'ordering': ['name'],
},
),
migrations.CreateModel(
name='Requirments',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('description', models.TextField()),
('badge', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.badge')),
],
options={
'ordering': ['name'],
},
),
migrations.CreateModel(
name='Abuse',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('abuse_type', models.CharField(choices=[('ABUSE', 'Abuse'), ('INAPPROPRIATE', 'Inappropriate'), ('SPAM', 'Spam'), ('BULLYING', 'Bullying'), ('SEXUAL_CONTENT', 'Sexual Content'), ('OTHER', 'Other')], max_length=50)),
('description', models.TextField()),
('date', models.DateTimeField(auto_now_add=True)),
('to_vit', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='vit.vit')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'Abuses',
'ordering': ['-date'],
},
),
] | en | 0.854728 | # Generated by Django 4.0.2 on 2022-03-02 03:29 | 1.787997 | 2 |
tests/common/schema_registry.py | epiphany-platform/cdl-temporary | 8 | 264 | import os
import subprocess
import time
import grpc
import tests.rpc.proto.schema_registry_pb2 as pb2
import tests.rpc.proto.schema_registry_pb2_grpc as pb2_grpc
from tests.common.postgres import PostgresConfig
EXE = os.getenv('SCHEMA_REGISTRY_EXE') or 'schema-registry'
class SchemaRegistry:
def __init__(self,
edge_registry_addr,
kafka_brokers,
postgres_config: PostgresConfig,
kafka_group_id='schema_registry',
input_port='50101',
initial_schema=None):
self.edge_registry_addr = edge_registry_addr
self.kafka_brokers = kafka_brokers
self.kafka_group_id = kafka_group_id
self.input_port = input_port
self.postgres_config = postgres_config
self.initial_schema = initial_schema
self.svc = None
def start(self):
env = {
"SCHEMA_REGISTRY_COMMUNICATION_METHOD": 'kafka',
"SCHEMA_REGISTRY_KAFKA__BROKERS": self.kafka_brokers,
"SCHEMA_REGISTRY_KAFKA__GROUP_ID": self.kafka_group_id,
"SCHEMA_REGISTRY_INPUT_PORT": self.input_port,
"SCHEMA_REGISTRY_MONITORING__OTEL_SERVICE_NAME": 'schema-registry',
"SCHEMA_REGISTRY_MONITORING__STATUS_PORT": '0',
"SCHEMA_REGISTRY_SERVICES__EDGE_REGISTRY_URL": self.edge_registry_addr,
**self.postgres_config.to_dict("SCHEMA_REGISTRY")
}
if self.initial_schema is not None:
env.update(SCHEMA_REGISTRY_IMPORT_FILE=self.initial_schema)
self.svc = subprocess.Popen([EXE], env=env)
time.sleep(3)
return self
def stop(self):
self.svc.kill()
def create_schema(self, name, destination, query, body, schema_type):
with grpc.insecure_channel(f"localhost:{self.input_port}") as channel:
stub = pb2_grpc.SchemaRegistryStub(channel)
resp = stub.AddSchema(
pb2.NewSchema(
definition=bytes(body, 'utf-8'),
name=name,
insert_destination=destination,
query_address=query,
schema_type=pb2.SchemaType(schema_type=schema_type)))
return resp.id
| import os
import subprocess
import time
import grpc
import tests.rpc.proto.schema_registry_pb2 as pb2
import tests.rpc.proto.schema_registry_pb2_grpc as pb2_grpc
from tests.common.postgres import PostgresConfig
EXE = os.getenv('SCHEMA_REGISTRY_EXE') or 'schema-registry'
class SchemaRegistry:
def __init__(self,
edge_registry_addr,
kafka_brokers,
postgres_config: PostgresConfig,
kafka_group_id='schema_registry',
input_port='50101',
initial_schema=None):
self.edge_registry_addr = edge_registry_addr
self.kafka_brokers = kafka_brokers
self.kafka_group_id = kafka_group_id
self.input_port = input_port
self.postgres_config = postgres_config
self.initial_schema = initial_schema
self.svc = None
def start(self):
env = {
"SCHEMA_REGISTRY_COMMUNICATION_METHOD": 'kafka',
"SCHEMA_REGISTRY_KAFKA__BROKERS": self.kafka_brokers,
"SCHEMA_REGISTRY_KAFKA__GROUP_ID": self.kafka_group_id,
"SCHEMA_REGISTRY_INPUT_PORT": self.input_port,
"SCHEMA_REGISTRY_MONITORING__OTEL_SERVICE_NAME": 'schema-registry',
"SCHEMA_REGISTRY_MONITORING__STATUS_PORT": '0',
"SCHEMA_REGISTRY_SERVICES__EDGE_REGISTRY_URL": self.edge_registry_addr,
**self.postgres_config.to_dict("SCHEMA_REGISTRY")
}
if self.initial_schema is not None:
env.update(SCHEMA_REGISTRY_IMPORT_FILE=self.initial_schema)
self.svc = subprocess.Popen([EXE], env=env)
time.sleep(3)
return self
def stop(self):
self.svc.kill()
def create_schema(self, name, destination, query, body, schema_type):
with grpc.insecure_channel(f"localhost:{self.input_port}") as channel:
stub = pb2_grpc.SchemaRegistryStub(channel)
resp = stub.AddSchema(
pb2.NewSchema(
definition=bytes(body, 'utf-8'),
name=name,
insert_destination=destination,
query_address=query,
schema_type=pb2.SchemaType(schema_type=schema_type)))
return resp.id
| none | 1 | 2.099643 | 2 |
|
testsuite/tests/apicast/policy/routing/test_routing_policy_catch_all.py | dlaso99/3scale-tests | 5 | 265 | """
When a routing policy is set with an empty condition, it should be loaded correctly and should route all
the requests to a correct backend.
"""
from urllib.parse import urlparse
import pytest
from packaging.version import Version # noqa # pylint: disable=unused-import
from testsuite import TESTED_VERSION, rawobj # noqa # pylint: disable=unused-import
from testsuite.echoed_request import EchoedRequest
pytestmark = [
pytest.mark.skipif("TESTED_VERSION < Version('2.11')"),
pytest.mark.issue("https://issues.redhat.com/browse/THREESCALE-6415")]
@pytest.fixture(scope="module")
def service_proxy_settings(private_base_url):
"""
Asserts, that echo api is used as the default backend
"""
return rawobj.Proxy(private_base_url("echo_api"))
@pytest.fixture(scope="module")
def service(service, private_base_url):
"""
Set the routing policy to route all requests to httpbin.
(Using the logic that an empty condition should act as a catch all rule)
"""
proxy = service.proxy.list()
proxy.policies.insert(0, rawobj.PolicyConfig(
"routing", {
"rules": [
{
"url": private_base_url("httpbin"),
"condition": {},
}]}))
return service
def test_routing_policy_without_header(api_client, private_base_url):
"""
Sends a request and asserts, that the routing policy is active and the
requests is routed to the correct backend (httpbin)
"""
parsed_url = urlparse(private_base_url("httpbin"))
response = api_client().get("/get")
assert response.status_code == 200
echoed_request = EchoedRequest.create(response)
assert echoed_request.headers["Host"] == parsed_url.hostname
| """
When a routing policy is set with an empty condition, it should be loaded correctly and should route all
the requests to a correct backend.
"""
from urllib.parse import urlparse
import pytest
from packaging.version import Version # noqa # pylint: disable=unused-import
from testsuite import TESTED_VERSION, rawobj # noqa # pylint: disable=unused-import
from testsuite.echoed_request import EchoedRequest
pytestmark = [
pytest.mark.skipif("TESTED_VERSION < Version('2.11')"),
pytest.mark.issue("https://issues.redhat.com/browse/THREESCALE-6415")]
@pytest.fixture(scope="module")
def service_proxy_settings(private_base_url):
"""
Asserts, that echo api is used as the default backend
"""
return rawobj.Proxy(private_base_url("echo_api"))
@pytest.fixture(scope="module")
def service(service, private_base_url):
"""
Set the routing policy to route all requests to httpbin.
(Using the logic that an empty condition should act as a catch all rule)
"""
proxy = service.proxy.list()
proxy.policies.insert(0, rawobj.PolicyConfig(
"routing", {
"rules": [
{
"url": private_base_url("httpbin"),
"condition": {},
}]}))
return service
def test_routing_policy_without_header(api_client, private_base_url):
"""
Sends a request and asserts, that the routing policy is active and the
requests is routed to the correct backend (httpbin)
"""
parsed_url = urlparse(private_base_url("httpbin"))
response = api_client().get("/get")
assert response.status_code == 200
echoed_request = EchoedRequest.create(response)
assert echoed_request.headers["Host"] == parsed_url.hostname
| en | 0.918448 | When a routing policy is set with an empty condition, it should be loaded correctly and should route all the requests to a correct backend. # noqa # pylint: disable=unused-import # noqa # pylint: disable=unused-import Asserts, that echo api is used as the default backend Set the routing policy to route all requests to httpbin. (Using the logic that an empty condition should act as a catch all rule) Sends a request and asserts, that the routing policy is active and the requests is routed to the correct backend (httpbin) | 2.495696 | 2 |
ceilometer/data_processing/notifications.py | vmturbo/ceilometer | 0 | 266 | <gh_stars>0
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo.config import cfg
import oslo.messaging
from ceilometer import plugin
from ceilometer import sample
OPTS = [
cfg.StrOpt('sahara_control_exchange',
default='sahara',
help="Exchange name for Data Processing notifications."),
]
cfg.CONF.register_opts(OPTS)
SERVICE = 'sahara'
class DataProcessing(plugin.NotificationBase):
resource_name = '%s.cluster' % SERVICE
@property
def event_types(self):
return [
'%s.create' % self.resource_name,
'%s.update' % self.resource_name,
'%s.delete' % self.resource_name,
]
@staticmethod
def get_targets(conf):
"""Return a sequence of oslo.messaging.Target
It is defining the exchange and topics to be connected for this plugin.
"""
return [oslo.messaging.Target(topic=topic,
exchange=conf.sahara_control_exchange)
for topic in conf.notification_topics]
def process_notification(self, message):
name = message['event_type'].replace(self.resource_name, 'cluster')
project_id = message['payload']['project_id']
user_id = message['_context_user_id']
yield sample.Sample.from_notification(
name=name,
type=sample.TYPE_DELTA,
unit='cluster',
volume=1,
resource_id=message['payload']['cluster_id'],
user_id=user_id,
project_id=project_id,
message=message)
| # Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo.config import cfg
import oslo.messaging
from ceilometer import plugin
from ceilometer import sample
OPTS = [
cfg.StrOpt('sahara_control_exchange',
default='sahara',
help="Exchange name for Data Processing notifications."),
]
cfg.CONF.register_opts(OPTS)
SERVICE = 'sahara'
class DataProcessing(plugin.NotificationBase):
resource_name = '%s.cluster' % SERVICE
@property
def event_types(self):
return [
'%s.create' % self.resource_name,
'%s.update' % self.resource_name,
'%s.delete' % self.resource_name,
]
@staticmethod
def get_targets(conf):
"""Return a sequence of oslo.messaging.Target
It is defining the exchange and topics to be connected for this plugin.
"""
return [oslo.messaging.Target(topic=topic,
exchange=conf.sahara_control_exchange)
for topic in conf.notification_topics]
def process_notification(self, message):
name = message['event_type'].replace(self.resource_name, 'cluster')
project_id = message['payload']['project_id']
user_id = message['_context_user_id']
yield sample.Sample.from_notification(
name=name,
type=sample.TYPE_DELTA,
unit='cluster',
volume=1,
resource_id=message['payload']['cluster_id'],
user_id=user_id,
project_id=project_id,
message=message) | en | 0.856879 | # Copyright (c) 2014 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. Return a sequence of oslo.messaging.Target It is defining the exchange and topics to be connected for this plugin. | 1.827797 | 2 |
tests/src/Diksha_Reports/usage_by_textbook/download_all_collection_records.py | JalajaTR/cQube | 0 | 267 | <gh_stars>0
import os
import time
from selenium.webdriver.support.select import Select
from Data.parameters import Data
from get_dir import pwd
from reuse_func import GetData
class All_records_download():
def __init__(self,driver):
self.driver = driver
self.filename =''
def test_download_csv(self):
self.data = GetData()
self.p = pwd()
self.driver.find_element_by_xpath(Data.hyper_link).click()
self.data.page_loading(self.driver)
colltype = Select(self.driver.find_element_by_name('collection_type'))
colltype.select_by_visible_text(' Overall ')
self.data.page_loading(self.driver)
self.driver.find_element_by_id(Data.Download).click()
time.sleep(4)
self.filename = self.p.get_download_dir() + '/collectionType_all_data.csv'
time.sleep(2)
file = os.path.isfile(self.filename)
os.remove(self.filename)
return file
| import os
import time
from selenium.webdriver.support.select import Select
from Data.parameters import Data
from get_dir import pwd
from reuse_func import GetData
class All_records_download():
def __init__(self,driver):
self.driver = driver
self.filename =''
def test_download_csv(self):
self.data = GetData()
self.p = pwd()
self.driver.find_element_by_xpath(Data.hyper_link).click()
self.data.page_loading(self.driver)
colltype = Select(self.driver.find_element_by_name('collection_type'))
colltype.select_by_visible_text(' Overall ')
self.data.page_loading(self.driver)
self.driver.find_element_by_id(Data.Download).click()
time.sleep(4)
self.filename = self.p.get_download_dir() + '/collectionType_all_data.csv'
time.sleep(2)
file = os.path.isfile(self.filename)
os.remove(self.filename)
return file | none | 1 | 2.690723 | 3 |
|
yt_dlp/cookies.py | Naysabots/yt-dlp | 0 | 268 | import contextlib
import ctypes
import json
import os
import shutil
import struct
import subprocess
import sys
import tempfile
from datetime import datetime, timedelta, timezone
from enum import Enum, auto
from hashlib import pbkdf2_hmac
from .aes import (
aes_cbc_decrypt_bytes,
aes_gcm_decrypt_and_verify_bytes,
unpad_pkcs7,
)
from .compat import compat_b64decode, compat_cookiejar_Cookie
from .minicurses import MultilinePrinter, QuietMultilinePrinter
from .utils import Popen, YoutubeDLCookieJar, error_to_str, expand_path
try:
import sqlite3
SQLITE_AVAILABLE = True
except ImportError:
# although sqlite3 is part of the standard library, it is possible to compile python without
# sqlite support. See: https://github.com/yt-dlp/yt-dlp/issues/544
SQLITE_AVAILABLE = False
try:
import secretstorage
SECRETSTORAGE_AVAILABLE = True
except ImportError:
SECRETSTORAGE_AVAILABLE = False
SECRETSTORAGE_UNAVAILABLE_REASON = (
'as the `secretstorage` module is not installed. '
'Please install by running `python3 -m pip install secretstorage`.')
except Exception as _err:
SECRETSTORAGE_AVAILABLE = False
SECRETSTORAGE_UNAVAILABLE_REASON = f'as the `secretstorage` module could not be initialized. {_err}'
CHROMIUM_BASED_BROWSERS = {'brave', 'chrome', 'chromium', 'edge', 'opera', 'vivaldi'}
SUPPORTED_BROWSERS = CHROMIUM_BASED_BROWSERS | {'firefox', 'safari'}
class YDLLogger:
def __init__(self, ydl=None):
self._ydl = ydl
def debug(self, message):
if self._ydl:
self._ydl.write_debug(message)
def info(self, message):
if self._ydl:
self._ydl.to_screen(f'[Cookies] {message}')
def warning(self, message, only_once=False):
if self._ydl:
self._ydl.report_warning(message, only_once)
def error(self, message):
if self._ydl:
self._ydl.report_error(message)
def progress_bar(self):
"""Return a context manager with a print method. (Optional)"""
# Do not print to files/pipes, loggers, or when --no-progress is used
if not self._ydl or self._ydl.params.get('noprogress') or self._ydl.params.get('logger'):
return
file = self._ydl._out_files['error']
try:
if not file.isatty():
return
except BaseException:
return
printer = MultilinePrinter(file, preserve_output=False)
printer.print = lambda message: printer.print_at_line(f'[Cookies] {message}', 0)
return printer
def _create_progress_bar(logger):
if hasattr(logger, 'progress_bar'):
printer = logger.progress_bar()
if printer:
return printer
printer = QuietMultilinePrinter()
printer.print = lambda _: None
return printer
def load_cookies(cookie_file, browser_specification, ydl):
cookie_jars = []
if browser_specification is not None:
browser_name, profile, keyring = _parse_browser_specification(*browser_specification)
cookie_jars.append(extract_cookies_from_browser(browser_name, profile, YDLLogger(ydl), keyring=keyring))
if cookie_file is not None:
cookie_file = expand_path(cookie_file)
jar = YoutubeDLCookieJar(cookie_file)
if os.access(cookie_file, os.R_OK):
jar.load(ignore_discard=True, ignore_expires=True)
cookie_jars.append(jar)
return _merge_cookie_jars(cookie_jars)
def extract_cookies_from_browser(browser_name, profile=None, logger=YDLLogger(), *, keyring=None):
if browser_name == 'firefox':
return _extract_firefox_cookies(profile, logger)
elif browser_name == 'safari':
return _extract_safari_cookies(profile, logger)
elif browser_name in CHROMIUM_BASED_BROWSERS:
return _extract_chrome_cookies(browser_name, profile, keyring, logger)
else:
raise ValueError(f'unknown browser: {browser_name}')
def _extract_firefox_cookies(profile, logger):
logger.info('Extracting cookies from firefox')
if not SQLITE_AVAILABLE:
logger.warning('Cannot extract cookies from firefox without sqlite3 support. '
'Please use a python interpreter compiled with sqlite3 support')
return YoutubeDLCookieJar()
if profile is None:
search_root = _firefox_browser_dir()
elif _is_path(profile):
search_root = profile
else:
search_root = os.path.join(_firefox_browser_dir(), profile)
cookie_database_path = _find_most_recently_used_file(search_root, 'cookies.sqlite', logger)
if cookie_database_path is None:
raise FileNotFoundError(f'could not find firefox cookies database in {search_root}')
logger.debug(f'Extracting cookies from: "{cookie_database_path}"')
with tempfile.TemporaryDirectory(prefix='yt_dlp') as tmpdir:
cursor = None
try:
cursor = _open_database_copy(cookie_database_path, tmpdir)
cursor.execute('SELECT host, name, value, path, expiry, isSecure FROM moz_cookies')
jar = YoutubeDLCookieJar()
with _create_progress_bar(logger) as progress_bar:
table = cursor.fetchall()
total_cookie_count = len(table)
for i, (host, name, value, path, expiry, is_secure) in enumerate(table):
progress_bar.print(f'Loading cookie {i: 6d}/{total_cookie_count: 6d}')
cookie = compat_cookiejar_Cookie(
version=0, name=name, value=value, port=None, port_specified=False,
domain=host, domain_specified=bool(host), domain_initial_dot=host.startswith('.'),
path=path, path_specified=bool(path), secure=is_secure, expires=expiry, discard=False,
comment=None, comment_url=None, rest={})
jar.set_cookie(cookie)
logger.info(f'Extracted {len(jar)} cookies from firefox')
return jar
finally:
if cursor is not None:
cursor.connection.close()
def _firefox_browser_dir():
if sys.platform in ('linux', 'linux2'):
return os.path.expanduser('~/.mozilla/firefox')
elif sys.platform == 'win32':
return os.path.expandvars(R'%APPDATA%\Mozilla\Firefox\Profiles')
elif sys.platform == 'darwin':
return os.path.expanduser('~/Library/Application Support/Firefox')
else:
raise ValueError(f'unsupported platform: {sys.platform}')
def _get_chromium_based_browser_settings(browser_name):
# https://chromium.googlesource.com/chromium/src/+/HEAD/docs/user_data_dir.md
if sys.platform in ('linux', 'linux2'):
config = _config_home()
browser_dir = {
'brave': os.path.join(config, 'BraveSoftware/Brave-Browser'),
'chrome': os.path.join(config, 'google-chrome'),
'chromium': os.path.join(config, 'chromium'),
'edge': os.path.join(config, 'microsoft-edge'),
'opera': os.path.join(config, 'opera'),
'vivaldi': os.path.join(config, 'vivaldi'),
}[browser_name]
elif sys.platform == 'win32':
appdata_local = os.path.expandvars('%LOCALAPPDATA%')
appdata_roaming = os.path.expandvars('%APPDATA%')
browser_dir = {
'brave': os.path.join(appdata_local, R'BraveSoftware\Brave-Browser\User Data'),
'chrome': os.path.join(appdata_local, R'Google\Chrome\User Data'),
'chromium': os.path.join(appdata_local, R'Chromium\User Data'),
'edge': os.path.join(appdata_local, R'Microsoft\Edge\User Data'),
'opera': os.path.join(appdata_roaming, R'Opera Software\Opera Stable'),
'vivaldi': os.path.join(appdata_local, R'Vivaldi\User Data'),
}[browser_name]
elif sys.platform == 'darwin':
appdata = os.path.expanduser('~/Library/Application Support')
browser_dir = {
'brave': os.path.join(appdata, 'BraveSoftware/Brave-Browser'),
'chrome': os.path.join(appdata, 'Google/Chrome'),
'chromium': os.path.join(appdata, 'Chromium'),
'edge': os.path.join(appdata, 'Microsoft Edge'),
'opera': os.path.join(appdata, 'com.operasoftware.Opera'),
'vivaldi': os.path.join(appdata, 'Vivaldi'),
}[browser_name]
else:
raise ValueError(f'unsupported platform: {sys.platform}')
# Linux keyring names can be determined by snooping on dbus while opening the browser in KDE:
# dbus-monitor "interface='org.kde.KWallet'" "type=method_return"
keyring_name = {
'brave': 'Brave',
'chrome': 'Chrome',
'chromium': 'Chromium',
'edge': 'Microsoft Edge' if sys.platform == 'darwin' else 'Chromium',
'opera': 'Opera' if sys.platform == 'darwin' else 'Chromium',
'vivaldi': 'Vivaldi' if sys.platform == 'darwin' else 'Chrome',
}[browser_name]
browsers_without_profiles = {'opera'}
return {
'browser_dir': browser_dir,
'keyring_name': keyring_name,
'supports_profiles': browser_name not in browsers_without_profiles
}
def _extract_chrome_cookies(browser_name, profile, keyring, logger):
logger.info(f'Extracting cookies from {browser_name}')
if not SQLITE_AVAILABLE:
logger.warning(f'Cannot extract cookies from {browser_name} without sqlite3 support. '
'Please use a python interpreter compiled with sqlite3 support')
return YoutubeDLCookieJar()
config = _get_chromium_based_browser_settings(browser_name)
if profile is None:
search_root = config['browser_dir']
elif _is_path(profile):
search_root = profile
config['browser_dir'] = os.path.dirname(profile) if config['supports_profiles'] else profile
else:
if config['supports_profiles']:
search_root = os.path.join(config['browser_dir'], profile)
else:
logger.error(f'{browser_name} does not support profiles')
search_root = config['browser_dir']
cookie_database_path = _find_most_recently_used_file(search_root, 'Cookies', logger)
if cookie_database_path is None:
raise FileNotFoundError(f'could not find {browser_name} cookies database in "{search_root}"')
logger.debug(f'Extracting cookies from: "{cookie_database_path}"')
decryptor = get_cookie_decryptor(config['browser_dir'], config['keyring_name'], logger, keyring=keyring)
with tempfile.TemporaryDirectory(prefix='yt_dlp') as tmpdir:
cursor = None
try:
cursor = _open_database_copy(cookie_database_path, tmpdir)
cursor.connection.text_factory = bytes
column_names = _get_column_names(cursor, 'cookies')
secure_column = 'is_secure' if 'is_secure' in column_names else 'secure'
cursor.execute(f'SELECT host_key, name, value, encrypted_value, path, expires_utc, {secure_column} FROM cookies')
jar = YoutubeDLCookieJar()
failed_cookies = 0
unencrypted_cookies = 0
with _create_progress_bar(logger) as progress_bar:
table = cursor.fetchall()
total_cookie_count = len(table)
for i, line in enumerate(table):
progress_bar.print(f'Loading cookie {i: 6d}/{total_cookie_count: 6d}')
is_encrypted, cookie = _process_chrome_cookie(decryptor, *line)
if not cookie:
failed_cookies += 1
continue
elif not is_encrypted:
unencrypted_cookies += 1
jar.set_cookie(cookie)
if failed_cookies > 0:
failed_message = f' ({failed_cookies} could not be decrypted)'
else:
failed_message = ''
logger.info(f'Extracted {len(jar)} cookies from {browser_name}{failed_message}')
counts = decryptor.cookie_counts.copy()
counts['unencrypted'] = unencrypted_cookies
logger.debug(f'cookie version breakdown: {counts}')
return jar
finally:
if cursor is not None:
cursor.connection.close()
def _process_chrome_cookie(decryptor, host_key, name, value, encrypted_value, path, expires_utc, is_secure):
host_key = host_key.decode('utf-8')
name = name.decode('utf-8')
value = value.decode('utf-8')
path = path.decode('utf-8')
is_encrypted = not value and encrypted_value
if is_encrypted:
value = decryptor.decrypt(encrypted_value)
if value is None:
return is_encrypted, None
return is_encrypted, compat_cookiejar_Cookie(
version=0, name=name, value=value, port=None, port_specified=False,
domain=host_key, domain_specified=bool(host_key), domain_initial_dot=host_key.startswith('.'),
path=path, path_specified=bool(path), secure=is_secure, expires=expires_utc, discard=False,
comment=None, comment_url=None, rest={})
class ChromeCookieDecryptor:
"""
Overview:
Linux:
- cookies are either v10 or v11
- v10: AES-CBC encrypted with a fixed key
- v11: AES-CBC encrypted with an OS protected key (keyring)
- v11 keys can be stored in various places depending on the activate desktop environment [2]
Mac:
- cookies are either v10 or not v10
- v10: AES-CBC encrypted with an OS protected key (keyring) and more key derivation iterations than linux
- not v10: 'old data' stored as plaintext
Windows:
- cookies are either v10 or not v10
- v10: AES-GCM encrypted with a key which is encrypted with DPAPI
- not v10: encrypted with DPAPI
Sources:
- [1] https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/
- [2] https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/key_storage_linux.cc
- KeyStorageLinux::CreateService
"""
def decrypt(self, encrypted_value):
raise NotImplementedError('Must be implemented by sub classes')
@property
def cookie_counts(self):
raise NotImplementedError('Must be implemented by sub classes')
def get_cookie_decryptor(browser_root, browser_keyring_name, logger, *, keyring=None):
if sys.platform in ('linux', 'linux2'):
return LinuxChromeCookieDecryptor(browser_keyring_name, logger, keyring=keyring)
elif sys.platform == 'darwin':
return MacChromeCookieDecryptor(browser_keyring_name, logger)
elif sys.platform == 'win32':
return WindowsChromeCookieDecryptor(browser_root, logger)
else:
raise NotImplementedError(f'Chrome cookie decryption is not supported on this platform: {sys.platform}')
class LinuxChromeCookieDecryptor(ChromeCookieDecryptor):
def __init__(self, browser_keyring_name, logger, *, keyring=None):
self._logger = logger
self._v10_key = self.derive_key(b'peanuts')
password = _get_linux_keyring_password(browser_keyring_name, keyring, logger)
self._v11_key = None if password is None else self.derive_key(password)
self._cookie_counts = {'v10': 0, 'v11': 0, 'other': 0}
@staticmethod
def derive_key(password):
# values from
# https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/os_crypt_linux.cc
return pbkdf2_sha1(password, salt=b'<PASSWORD>', iterations=1, key_length=16)
@property
def cookie_counts(self):
return self._cookie_counts
def decrypt(self, encrypted_value):
version = encrypted_value[:3]
ciphertext = encrypted_value[3:]
if version == b'v10':
self._cookie_counts['v10'] += 1
return _decrypt_aes_cbc(ciphertext, self._v10_key, self._logger)
elif version == b'v11':
self._cookie_counts['v11'] += 1
if self._v11_key is None:
self._logger.warning('cannot decrypt v11 cookies: no key found', only_once=True)
return None
return _decrypt_aes_cbc(ciphertext, self._v11_key, self._logger)
else:
self._cookie_counts['other'] += 1
return None
class MacChromeCookieDecryptor(ChromeCookieDecryptor):
def __init__(self, browser_keyring_name, logger):
self._logger = logger
password = _get_mac_keyring_password(browser_keyring_name, logger)
self._v10_key = None if password is None else self.derive_key(password)
self._cookie_counts = {'v10': 0, 'other': 0}
@staticmethod
def derive_key(password):
# values from
# https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/os_crypt_mac.mm
return pbkdf2_sha1(password, salt=b'<PASSWORD>', iterations=1003, key_length=16)
@property
def cookie_counts(self):
return self._cookie_counts
def decrypt(self, encrypted_value):
version = encrypted_value[:3]
ciphertext = encrypted_value[3:]
if version == b'v10':
self._cookie_counts['v10'] += 1
if self._v10_key is None:
self._logger.warning('cannot decrypt v10 cookies: no key found', only_once=True)
return None
return _decrypt_aes_cbc(ciphertext, self._v10_key, self._logger)
else:
self._cookie_counts['other'] += 1
# other prefixes are considered 'old data' which were stored as plaintext
# https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/os_crypt_mac.mm
return encrypted_value
class WindowsChromeCookieDecryptor(ChromeCookieDecryptor):
def __init__(self, browser_root, logger):
self._logger = logger
self._v10_key = _get_windows_v10_key(browser_root, logger)
self._cookie_counts = {'v10': 0, 'other': 0}
@property
def cookie_counts(self):
return self._cookie_counts
def decrypt(self, encrypted_value):
version = encrypted_value[:3]
ciphertext = encrypted_value[3:]
if version == b'v10':
self._cookie_counts['v10'] += 1
if self._v10_key is None:
self._logger.warning('cannot decrypt v10 cookies: no key found', only_once=True)
return None
# https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/os_crypt_win.cc
# kNonceLength
nonce_length = 96 // 8
# boringssl
# EVP_AEAD_AES_GCM_TAG_LEN
authentication_tag_length = 16
raw_ciphertext = ciphertext
nonce = raw_ciphertext[:nonce_length]
ciphertext = raw_ciphertext[nonce_length:-authentication_tag_length]
authentication_tag = raw_ciphertext[-authentication_tag_length:]
return _decrypt_aes_gcm(ciphertext, self._v10_key, nonce, authentication_tag, self._logger)
else:
self._cookie_counts['other'] += 1
# any other prefix means the data is DPAPI encrypted
# https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/os_crypt_win.cc
return _decrypt_windows_dpapi(encrypted_value, self._logger).decode('utf-8')
def _extract_safari_cookies(profile, logger):
if profile is not None:
logger.error('safari does not support profiles')
if sys.platform != 'darwin':
raise ValueError(f'unsupported platform: {sys.platform}')
cookies_path = os.path.expanduser('~/Library/Cookies/Cookies.binarycookies')
if not os.path.isfile(cookies_path):
logger.debug('Trying secondary cookie location')
cookies_path = os.path.expanduser('~/Library/Containers/com.apple.Safari/Data/Library/Cookies/Cookies.binarycookies')
if not os.path.isfile(cookies_path):
raise FileNotFoundError('could not find safari cookies database')
with open(cookies_path, 'rb') as f:
cookies_data = f.read()
jar = parse_safari_cookies(cookies_data, logger=logger)
logger.info(f'Extracted {len(jar)} cookies from safari')
return jar
class ParserError(Exception):
pass
class DataParser:
def __init__(self, data, logger):
self._data = data
self.cursor = 0
self._logger = logger
def read_bytes(self, num_bytes):
if num_bytes < 0:
raise ParserError(f'invalid read of {num_bytes} bytes')
end = self.cursor + num_bytes
if end > len(self._data):
raise ParserError('reached end of input')
data = self._data[self.cursor:end]
self.cursor = end
return data
def expect_bytes(self, expected_value, message):
value = self.read_bytes(len(expected_value))
if value != expected_value:
raise ParserError(f'unexpected value: {value} != {expected_value} ({message})')
def read_uint(self, big_endian=False):
data_format = '>I' if big_endian else '<I'
return struct.unpack(data_format, self.read_bytes(4))[0]
def read_double(self, big_endian=False):
data_format = '>d' if big_endian else '<d'
return struct.unpack(data_format, self.read_bytes(8))[0]
def read_cstring(self):
buffer = []
while True:
c = self.read_bytes(1)
if c == b'\x00':
return b''.join(buffer).decode('utf-8')
else:
buffer.append(c)
def skip(self, num_bytes, description='unknown'):
if num_bytes > 0:
self._logger.debug(f'skipping {num_bytes} bytes ({description}): {self.read_bytes(num_bytes)!r}')
elif num_bytes < 0:
raise ParserError(f'invalid skip of {num_bytes} bytes')
def skip_to(self, offset, description='unknown'):
self.skip(offset - self.cursor, description)
def skip_to_end(self, description='unknown'):
self.skip_to(len(self._data), description)
def _mac_absolute_time_to_posix(timestamp):
return int((datetime(2001, 1, 1, 0, 0, tzinfo=timezone.utc) + timedelta(seconds=timestamp)).timestamp())
def _parse_safari_cookies_header(data, logger):
p = DataParser(data, logger)
p.expect_bytes(b'cook', 'database signature')
number_of_pages = p.read_uint(big_endian=True)
page_sizes = [p.read_uint(big_endian=True) for _ in range(number_of_pages)]
return page_sizes, p.cursor
def _parse_safari_cookies_page(data, jar, logger):
p = DataParser(data, logger)
p.expect_bytes(b'\x00\x00\x01\x00', 'page signature')
number_of_cookies = p.read_uint()
record_offsets = [p.read_uint() for _ in range(number_of_cookies)]
if number_of_cookies == 0:
logger.debug(f'a cookies page of size {len(data)} has no cookies')
return
p.skip_to(record_offsets[0], 'unknown page header field')
with _create_progress_bar(logger) as progress_bar:
for i, record_offset in enumerate(record_offsets):
progress_bar.print(f'Loading cookie {i: 6d}/{number_of_cookies: 6d}')
p.skip_to(record_offset, 'space between records')
record_length = _parse_safari_cookies_record(data[record_offset:], jar, logger)
p.read_bytes(record_length)
p.skip_to_end('space in between pages')
def _parse_safari_cookies_record(data, jar, logger):
p = DataParser(data, logger)
record_size = p.read_uint()
p.skip(4, 'unknown record field 1')
flags = p.read_uint()
is_secure = bool(flags & 0x0001)
p.skip(4, 'unknown record field 2')
domain_offset = p.read_uint()
name_offset = p.read_uint()
path_offset = p.read_uint()
value_offset = p.read_uint()
p.skip(8, 'unknown record field 3')
expiration_date = _mac_absolute_time_to_posix(p.read_double())
_creation_date = _mac_absolute_time_to_posix(p.read_double()) # noqa: F841
try:
p.skip_to(domain_offset)
domain = p.read_cstring()
p.skip_to(name_offset)
name = p.read_cstring()
p.skip_to(path_offset)
path = p.read_cstring()
p.skip_to(value_offset)
value = p.read_cstring()
except UnicodeDecodeError:
logger.warning('failed to parse Safari cookie because UTF-8 decoding failed', only_once=True)
return record_size
p.skip_to(record_size, 'space at the end of the record')
cookie = compat_cookiejar_Cookie(
version=0, name=name, value=value, port=None, port_specified=False,
domain=domain, domain_specified=bool(domain), domain_initial_dot=domain.startswith('.'),
path=path, path_specified=bool(path), secure=is_secure, expires=expiration_date, discard=False,
comment=None, comment_url=None, rest={})
jar.set_cookie(cookie)
return record_size
def parse_safari_cookies(data, jar=None, logger=YDLLogger()):
"""
References:
- https://github.com/libyal/dtformats/blob/main/documentation/Safari%20Cookies.asciidoc
- this data appears to be out of date but the important parts of the database structure is the same
- there are a few bytes here and there which are skipped during parsing
"""
if jar is None:
jar = YoutubeDLCookieJar()
page_sizes, body_start = _parse_safari_cookies_header(data, logger)
p = DataParser(data[body_start:], logger)
for page_size in page_sizes:
_parse_safari_cookies_page(p.read_bytes(page_size), jar, logger)
p.skip_to_end('footer')
return jar
class _LinuxDesktopEnvironment(Enum):
"""
https://chromium.googlesource.com/chromium/src/+/refs/heads/main/base/nix/xdg_util.h
DesktopEnvironment
"""
OTHER = auto()
CINNAMON = auto()
GNOME = auto()
KDE = auto()
PANTHEON = auto()
UNITY = auto()
XFCE = auto()
class _LinuxKeyring(Enum):
"""
https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/key_storage_util_linux.h
SelectedLinuxBackend
"""
KWALLET = auto()
GNOMEKEYRING = auto()
BASICTEXT = auto()
SUPPORTED_KEYRINGS = _LinuxKeyring.__members__.keys()
def _get_linux_desktop_environment(env):
"""
https://chromium.googlesource.com/chromium/src/+/refs/heads/main/base/nix/xdg_util.cc
GetDesktopEnvironment
"""
xdg_current_desktop = env.get('XDG_CURRENT_DESKTOP', None)
desktop_session = env.get('DESKTOP_SESSION', None)
if xdg_current_desktop is not None:
xdg_current_desktop = xdg_current_desktop.split(':')[0].strip()
if xdg_current_desktop == 'Unity':
if desktop_session is not None and 'gnome-fallback' in desktop_session:
return _LinuxDesktopEnvironment.GNOME
else:
return _LinuxDesktopEnvironment.UNITY
elif xdg_current_desktop == 'GNOME':
return _LinuxDesktopEnvironment.GNOME
elif xdg_current_desktop == 'X-Cinnamon':
return _LinuxDesktopEnvironment.CINNAMON
elif xdg_current_desktop == 'KDE':
return _LinuxDesktopEnvironment.KDE
elif xdg_current_desktop == 'Pantheon':
return _LinuxDesktopEnvironment.PANTHEON
elif xdg_current_desktop == 'XFCE':
return _LinuxDesktopEnvironment.XFCE
elif desktop_session is not None:
if desktop_session in ('mate', 'gnome'):
return _LinuxDesktopEnvironment.GNOME
elif 'kde' in desktop_session:
return _LinuxDesktopEnvironment.KDE
elif 'xfce' in desktop_session:
return _LinuxDesktopEnvironment.XFCE
else:
if 'GNOME_DESKTOP_SESSION_ID' in env:
return _LinuxDesktopEnvironment.GNOME
elif 'KDE_FULL_SESSION' in env:
return _LinuxDesktopEnvironment.KDE
return _LinuxDesktopEnvironment.OTHER
def _choose_linux_keyring(logger):
"""
https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/key_storage_util_linux.cc
SelectBackend
"""
desktop_environment = _get_linux_desktop_environment(os.environ)
logger.debug(f'detected desktop environment: {desktop_environment.name}')
if desktop_environment == _LinuxDesktopEnvironment.KDE:
linux_keyring = _LinuxKeyring.KWALLET
elif desktop_environment == _LinuxDesktopEnvironment.OTHER:
linux_keyring = _LinuxKeyring.BASICTEXT
else:
linux_keyring = _LinuxKeyring.GNOMEKEYRING
return linux_keyring
def _get_kwallet_network_wallet(logger):
""" The name of the wallet used to store network passwords.
https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/kwallet_dbus.cc
KWalletDBus::NetworkWallet
which does a dbus call to the following function:
https://api.kde.org/frameworks/kwallet/html/classKWallet_1_1Wallet.html
Wallet::NetworkWallet
"""
default_wallet = 'kdewallet'
try:
proc = Popen([
'dbus-send', '--session', '--print-reply=literal',
'--dest=org.kde.kwalletd5',
'/modules/kwalletd5',
'org.kde.KWallet.networkWallet'
], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
stdout, stderr = proc.communicate_or_kill()
if proc.returncode != 0:
logger.warning('failed to read NetworkWallet')
return default_wallet
else:
network_wallet = stdout.decode('utf-8').strip()
logger.debug(f'NetworkWallet = "{network_wallet}"')
return network_wallet
except Exception as e:
logger.warning(f'exception while obtaining NetworkWallet: {e}')
return default_wallet
def _get_kwallet_password(browser_keyring_name, logger):
logger.debug('using kwallet-query to obtain password from kwallet')
if shutil.which('kwallet-query') is None:
logger.error('kwallet-query command not found. KWallet and kwallet-query '
'must be installed to read from KWallet. kwallet-query should be'
'included in the kwallet package for your distribution')
return b''
network_wallet = _get_kwallet_network_wallet(logger)
try:
proc = Popen([
'kwallet-query',
'--read-password', f'{browser_keyring_name} Safe Storage',
'--folder', f'{browser_keyring_name} Keys',
network_wallet
], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
stdout, stderr = proc.communicate_or_kill()
if proc.returncode != 0:
logger.error(f'kwallet-query failed with return code {proc.returncode}. Please consult '
'the kwallet-query man page for details')
return b''
else:
if stdout.lower().startswith(b'failed to read'):
logger.debug('failed to read password from kwallet. Using empty string instead')
# this sometimes occurs in KDE because chrome does not check hasEntry and instead
# just tries to read the value (which kwallet returns "") whereas kwallet-query
# checks hasEntry. To verify this:
# dbus-monitor "interface='org.kde.KWallet'" "type=method_return"
# while starting chrome.
# this may be a bug as the intended behaviour is to generate a random password and store
# it, but that doesn't matter here.
return b''
else:
logger.debug('password found')
if stdout[-1:] == b'\n':
stdout = stdout[:-1]
return stdout
except Exception as e:
logger.warning(f'exception running kwallet-query: {error_to_str(e)}')
return b''
def _get_gnome_keyring_password(browser_keyring_name, logger):
if not SECRETSTORAGE_AVAILABLE:
logger.error(f'secretstorage not available {SECRETSTORAGE_UNAVAILABLE_REASON}')
return b''
# the Gnome keyring does not seem to organise keys in the same way as KWallet,
# using `dbus-monitor` during startup, it can be observed that chromium lists all keys
# and presumably searches for its key in the list. It appears that we must do the same.
# https://github.com/jaraco/keyring/issues/556
with contextlib.closing(secretstorage.dbus_init()) as con:
col = secretstorage.get_default_collection(con)
for item in col.get_all_items():
if item.get_label() == f'{browser_keyring_name} Safe Storage':
return item.get_secret()
else:
logger.error('failed to read from keyring')
return b''
def _get_linux_keyring_password(browser_keyring_name, keyring, logger):
# note: chrome/chromium can be run with the following flags to determine which keyring backend
# it has chosen to use
# chromium --enable-logging=stderr --v=1 2>&1 | grep key_storage_
# Chromium supports a flag: --password-store=<basic|gnome|kwallet> so the automatic detection
# will not be sufficient in all cases.
keyring = _LinuxKeyring[keyring] if keyring else _choose_linux_keyring(logger)
logger.debug(f'Chosen keyring: {keyring.name}')
if keyring == _LinuxKeyring.KWALLET:
return _get_kwallet_password(browser_keyring_name, logger)
elif keyring == _LinuxKeyring.GNOMEKEYRING:
return _get_gnome_keyring_password(browser_keyring_name, logger)
elif keyring == _LinuxKeyring.BASICTEXT:
# when basic text is chosen, all cookies are stored as v10 (so no keyring password is required)
return None
assert False, f'Unknown keyring {keyring}'
def _get_mac_keyring_password(browser_keyring_name, logger):
logger.debug('using find-generic-password to obtain password from OSX keychain')
try:
proc = Popen(
['security', 'find-generic-password',
'-w', # write password to stdout
'-a', browser_keyring_name, # match 'account'
'-s', f'{browser_keyring_name} Safe Storage'], # match 'service'
stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
stdout, stderr = proc.communicate_or_kill()
if stdout[-1:] == b'\n':
stdout = stdout[:-1]
return stdout
except Exception as e:
logger.warning(f'exception running find-generic-password: {error_to_str(e)}')
return None
def _get_windows_v10_key(browser_root, logger):
path = _find_most_recently_used_file(browser_root, 'Local State', logger)
if path is None:
logger.error('could not find local state file')
return None
logger.debug(f'Found local state file at "{path}"')
with open(path, encoding='utf8') as f:
data = json.load(f)
try:
base64_key = data['os_crypt']['encrypted_key']
except KeyError:
logger.error('no encrypted key in Local State')
return None
encrypted_key = compat_b64decode(base64_key)
prefix = b'DPAPI'
if not encrypted_key.startswith(prefix):
logger.error('invalid key')
return None
return _decrypt_windows_dpapi(encrypted_key[len(prefix):], logger)
def pbkdf2_sha1(password, salt, iterations, key_length):
return pbkdf2_hmac('sha1', password, salt, iterations, key_length)
def _decrypt_aes_cbc(ciphertext, key, logger, initialization_vector=b' ' * 16):
plaintext = unpad_pkcs7(aes_cbc_decrypt_bytes(ciphertext, key, initialization_vector))
try:
return plaintext.decode('utf-8')
except UnicodeDecodeError:
logger.warning('failed to decrypt cookie (AES-CBC) because UTF-8 decoding failed. Possibly the key is wrong?', only_once=True)
return None
def _decrypt_aes_gcm(ciphertext, key, nonce, authentication_tag, logger):
try:
plaintext = aes_gcm_decrypt_and_verify_bytes(ciphertext, key, authentication_tag, nonce)
except ValueError:
logger.warning('failed to decrypt cookie (AES-GCM) because the MAC check failed. Possibly the key is wrong?', only_once=True)
return None
try:
return plaintext.decode('utf-8')
except UnicodeDecodeError:
logger.warning('failed to decrypt cookie (AES-GCM) because UTF-8 decoding failed. Possibly the key is wrong?', only_once=True)
return None
def _decrypt_windows_dpapi(ciphertext, logger):
"""
References:
- https://docs.microsoft.com/en-us/windows/win32/api/dpapi/nf-dpapi-cryptunprotectdata
"""
from ctypes.wintypes import DWORD
class DATA_BLOB(ctypes.Structure):
_fields_ = [('cbData', DWORD),
('pbData', ctypes.POINTER(ctypes.c_char))]
buffer = ctypes.create_string_buffer(ciphertext)
blob_in = DATA_BLOB(ctypes.sizeof(buffer), buffer)
blob_out = DATA_BLOB()
ret = ctypes.windll.crypt32.CryptUnprotectData(
ctypes.byref(blob_in), # pDataIn
None, # ppszDataDescr: human readable description of pDataIn
None, # pOptionalEntropy: salt?
None, # pvReserved: must be NULL
None, # pPromptStruct: information about prompts to display
0, # dwFlags
ctypes.byref(blob_out) # pDataOut
)
if not ret:
logger.warning('failed to decrypt with DPAPI', only_once=True)
return None
result = ctypes.string_at(blob_out.pbData, blob_out.cbData)
ctypes.windll.kernel32.LocalFree(blob_out.pbData)
return result
def _config_home():
return os.environ.get('XDG_CONFIG_HOME', os.path.expanduser('~/.config'))
def _open_database_copy(database_path, tmpdir):
# cannot open sqlite databases if they are already in use (e.g. by the browser)
database_copy_path = os.path.join(tmpdir, 'temporary.sqlite')
shutil.copy(database_path, database_copy_path)
conn = sqlite3.connect(database_copy_path)
return conn.cursor()
def _get_column_names(cursor, table_name):
table_info = cursor.execute(f'PRAGMA table_info({table_name})').fetchall()
return [row[1].decode('utf-8') for row in table_info]
def _find_most_recently_used_file(root, filename, logger):
# if there are multiple browser profiles, take the most recently used one
i, paths = 0, []
with _create_progress_bar(logger) as progress_bar:
for curr_root, dirs, files in os.walk(root):
for file in files:
i += 1
progress_bar.print(f'Searching for "{filename}": {i: 6d} files searched')
if file == filename:
paths.append(os.path.join(curr_root, file))
return None if not paths else max(paths, key=lambda path: os.lstat(path).st_mtime)
def _merge_cookie_jars(jars):
output_jar = YoutubeDLCookieJar()
for jar in jars:
for cookie in jar:
output_jar.set_cookie(cookie)
if jar.filename is not None:
output_jar.filename = jar.filename
return output_jar
def _is_path(value):
return os.path.sep in value
def _parse_browser_specification(browser_name, profile=None, keyring=None):
if browser_name not in SUPPORTED_BROWSERS:
raise ValueError(f'unsupported browser: "{browser_name}"')
if keyring not in (None, *SUPPORTED_KEYRINGS):
raise ValueError(f'unsupported keyring: "{keyring}"')
if profile is not None and _is_path(profile):
profile = os.path.expanduser(profile)
return browser_name, profile, keyring
| import contextlib
import ctypes
import json
import os
import shutil
import struct
import subprocess
import sys
import tempfile
from datetime import datetime, timedelta, timezone
from enum import Enum, auto
from hashlib import pbkdf2_hmac
from .aes import (
aes_cbc_decrypt_bytes,
aes_gcm_decrypt_and_verify_bytes,
unpad_pkcs7,
)
from .compat import compat_b64decode, compat_cookiejar_Cookie
from .minicurses import MultilinePrinter, QuietMultilinePrinter
from .utils import Popen, YoutubeDLCookieJar, error_to_str, expand_path
try:
import sqlite3
SQLITE_AVAILABLE = True
except ImportError:
# although sqlite3 is part of the standard library, it is possible to compile python without
# sqlite support. See: https://github.com/yt-dlp/yt-dlp/issues/544
SQLITE_AVAILABLE = False
try:
import secretstorage
SECRETSTORAGE_AVAILABLE = True
except ImportError:
SECRETSTORAGE_AVAILABLE = False
SECRETSTORAGE_UNAVAILABLE_REASON = (
'as the `secretstorage` module is not installed. '
'Please install by running `python3 -m pip install secretstorage`.')
except Exception as _err:
SECRETSTORAGE_AVAILABLE = False
SECRETSTORAGE_UNAVAILABLE_REASON = f'as the `secretstorage` module could not be initialized. {_err}'
CHROMIUM_BASED_BROWSERS = {'brave', 'chrome', 'chromium', 'edge', 'opera', 'vivaldi'}
SUPPORTED_BROWSERS = CHROMIUM_BASED_BROWSERS | {'firefox', 'safari'}
class YDLLogger:
def __init__(self, ydl=None):
self._ydl = ydl
def debug(self, message):
if self._ydl:
self._ydl.write_debug(message)
def info(self, message):
if self._ydl:
self._ydl.to_screen(f'[Cookies] {message}')
def warning(self, message, only_once=False):
if self._ydl:
self._ydl.report_warning(message, only_once)
def error(self, message):
if self._ydl:
self._ydl.report_error(message)
def progress_bar(self):
"""Return a context manager with a print method. (Optional)"""
# Do not print to files/pipes, loggers, or when --no-progress is used
if not self._ydl or self._ydl.params.get('noprogress') or self._ydl.params.get('logger'):
return
file = self._ydl._out_files['error']
try:
if not file.isatty():
return
except BaseException:
return
printer = MultilinePrinter(file, preserve_output=False)
printer.print = lambda message: printer.print_at_line(f'[Cookies] {message}', 0)
return printer
def _create_progress_bar(logger):
if hasattr(logger, 'progress_bar'):
printer = logger.progress_bar()
if printer:
return printer
printer = QuietMultilinePrinter()
printer.print = lambda _: None
return printer
def load_cookies(cookie_file, browser_specification, ydl):
cookie_jars = []
if browser_specification is not None:
browser_name, profile, keyring = _parse_browser_specification(*browser_specification)
cookie_jars.append(extract_cookies_from_browser(browser_name, profile, YDLLogger(ydl), keyring=keyring))
if cookie_file is not None:
cookie_file = expand_path(cookie_file)
jar = YoutubeDLCookieJar(cookie_file)
if os.access(cookie_file, os.R_OK):
jar.load(ignore_discard=True, ignore_expires=True)
cookie_jars.append(jar)
return _merge_cookie_jars(cookie_jars)
def extract_cookies_from_browser(browser_name, profile=None, logger=YDLLogger(), *, keyring=None):
if browser_name == 'firefox':
return _extract_firefox_cookies(profile, logger)
elif browser_name == 'safari':
return _extract_safari_cookies(profile, logger)
elif browser_name in CHROMIUM_BASED_BROWSERS:
return _extract_chrome_cookies(browser_name, profile, keyring, logger)
else:
raise ValueError(f'unknown browser: {browser_name}')
def _extract_firefox_cookies(profile, logger):
logger.info('Extracting cookies from firefox')
if not SQLITE_AVAILABLE:
logger.warning('Cannot extract cookies from firefox without sqlite3 support. '
'Please use a python interpreter compiled with sqlite3 support')
return YoutubeDLCookieJar()
if profile is None:
search_root = _firefox_browser_dir()
elif _is_path(profile):
search_root = profile
else:
search_root = os.path.join(_firefox_browser_dir(), profile)
cookie_database_path = _find_most_recently_used_file(search_root, 'cookies.sqlite', logger)
if cookie_database_path is None:
raise FileNotFoundError(f'could not find firefox cookies database in {search_root}')
logger.debug(f'Extracting cookies from: "{cookie_database_path}"')
with tempfile.TemporaryDirectory(prefix='yt_dlp') as tmpdir:
cursor = None
try:
cursor = _open_database_copy(cookie_database_path, tmpdir)
cursor.execute('SELECT host, name, value, path, expiry, isSecure FROM moz_cookies')
jar = YoutubeDLCookieJar()
with _create_progress_bar(logger) as progress_bar:
table = cursor.fetchall()
total_cookie_count = len(table)
for i, (host, name, value, path, expiry, is_secure) in enumerate(table):
progress_bar.print(f'Loading cookie {i: 6d}/{total_cookie_count: 6d}')
cookie = compat_cookiejar_Cookie(
version=0, name=name, value=value, port=None, port_specified=False,
domain=host, domain_specified=bool(host), domain_initial_dot=host.startswith('.'),
path=path, path_specified=bool(path), secure=is_secure, expires=expiry, discard=False,
comment=None, comment_url=None, rest={})
jar.set_cookie(cookie)
logger.info(f'Extracted {len(jar)} cookies from firefox')
return jar
finally:
if cursor is not None:
cursor.connection.close()
def _firefox_browser_dir():
if sys.platform in ('linux', 'linux2'):
return os.path.expanduser('~/.mozilla/firefox')
elif sys.platform == 'win32':
return os.path.expandvars(R'%APPDATA%\Mozilla\Firefox\Profiles')
elif sys.platform == 'darwin':
return os.path.expanduser('~/Library/Application Support/Firefox')
else:
raise ValueError(f'unsupported platform: {sys.platform}')
def _get_chromium_based_browser_settings(browser_name):
# https://chromium.googlesource.com/chromium/src/+/HEAD/docs/user_data_dir.md
if sys.platform in ('linux', 'linux2'):
config = _config_home()
browser_dir = {
'brave': os.path.join(config, 'BraveSoftware/Brave-Browser'),
'chrome': os.path.join(config, 'google-chrome'),
'chromium': os.path.join(config, 'chromium'),
'edge': os.path.join(config, 'microsoft-edge'),
'opera': os.path.join(config, 'opera'),
'vivaldi': os.path.join(config, 'vivaldi'),
}[browser_name]
elif sys.platform == 'win32':
appdata_local = os.path.expandvars('%LOCALAPPDATA%')
appdata_roaming = os.path.expandvars('%APPDATA%')
browser_dir = {
'brave': os.path.join(appdata_local, R'BraveSoftware\Brave-Browser\User Data'),
'chrome': os.path.join(appdata_local, R'Google\Chrome\User Data'),
'chromium': os.path.join(appdata_local, R'Chromium\User Data'),
'edge': os.path.join(appdata_local, R'Microsoft\Edge\User Data'),
'opera': os.path.join(appdata_roaming, R'Opera Software\Opera Stable'),
'vivaldi': os.path.join(appdata_local, R'Vivaldi\User Data'),
}[browser_name]
elif sys.platform == 'darwin':
appdata = os.path.expanduser('~/Library/Application Support')
browser_dir = {
'brave': os.path.join(appdata, 'BraveSoftware/Brave-Browser'),
'chrome': os.path.join(appdata, 'Google/Chrome'),
'chromium': os.path.join(appdata, 'Chromium'),
'edge': os.path.join(appdata, 'Microsoft Edge'),
'opera': os.path.join(appdata, 'com.operasoftware.Opera'),
'vivaldi': os.path.join(appdata, 'Vivaldi'),
}[browser_name]
else:
raise ValueError(f'unsupported platform: {sys.platform}')
# Linux keyring names can be determined by snooping on dbus while opening the browser in KDE:
# dbus-monitor "interface='org.kde.KWallet'" "type=method_return"
keyring_name = {
'brave': 'Brave',
'chrome': 'Chrome',
'chromium': 'Chromium',
'edge': 'Microsoft Edge' if sys.platform == 'darwin' else 'Chromium',
'opera': 'Opera' if sys.platform == 'darwin' else 'Chromium',
'vivaldi': 'Vivaldi' if sys.platform == 'darwin' else 'Chrome',
}[browser_name]
browsers_without_profiles = {'opera'}
return {
'browser_dir': browser_dir,
'keyring_name': keyring_name,
'supports_profiles': browser_name not in browsers_without_profiles
}
def _extract_chrome_cookies(browser_name, profile, keyring, logger):
logger.info(f'Extracting cookies from {browser_name}')
if not SQLITE_AVAILABLE:
logger.warning(f'Cannot extract cookies from {browser_name} without sqlite3 support. '
'Please use a python interpreter compiled with sqlite3 support')
return YoutubeDLCookieJar()
config = _get_chromium_based_browser_settings(browser_name)
if profile is None:
search_root = config['browser_dir']
elif _is_path(profile):
search_root = profile
config['browser_dir'] = os.path.dirname(profile) if config['supports_profiles'] else profile
else:
if config['supports_profiles']:
search_root = os.path.join(config['browser_dir'], profile)
else:
logger.error(f'{browser_name} does not support profiles')
search_root = config['browser_dir']
cookie_database_path = _find_most_recently_used_file(search_root, 'Cookies', logger)
if cookie_database_path is None:
raise FileNotFoundError(f'could not find {browser_name} cookies database in "{search_root}"')
logger.debug(f'Extracting cookies from: "{cookie_database_path}"')
decryptor = get_cookie_decryptor(config['browser_dir'], config['keyring_name'], logger, keyring=keyring)
with tempfile.TemporaryDirectory(prefix='yt_dlp') as tmpdir:
cursor = None
try:
cursor = _open_database_copy(cookie_database_path, tmpdir)
cursor.connection.text_factory = bytes
column_names = _get_column_names(cursor, 'cookies')
secure_column = 'is_secure' if 'is_secure' in column_names else 'secure'
cursor.execute(f'SELECT host_key, name, value, encrypted_value, path, expires_utc, {secure_column} FROM cookies')
jar = YoutubeDLCookieJar()
failed_cookies = 0
unencrypted_cookies = 0
with _create_progress_bar(logger) as progress_bar:
table = cursor.fetchall()
total_cookie_count = len(table)
for i, line in enumerate(table):
progress_bar.print(f'Loading cookie {i: 6d}/{total_cookie_count: 6d}')
is_encrypted, cookie = _process_chrome_cookie(decryptor, *line)
if not cookie:
failed_cookies += 1
continue
elif not is_encrypted:
unencrypted_cookies += 1
jar.set_cookie(cookie)
if failed_cookies > 0:
failed_message = f' ({failed_cookies} could not be decrypted)'
else:
failed_message = ''
logger.info(f'Extracted {len(jar)} cookies from {browser_name}{failed_message}')
counts = decryptor.cookie_counts.copy()
counts['unencrypted'] = unencrypted_cookies
logger.debug(f'cookie version breakdown: {counts}')
return jar
finally:
if cursor is not None:
cursor.connection.close()
def _process_chrome_cookie(decryptor, host_key, name, value, encrypted_value, path, expires_utc, is_secure):
host_key = host_key.decode('utf-8')
name = name.decode('utf-8')
value = value.decode('utf-8')
path = path.decode('utf-8')
is_encrypted = not value and encrypted_value
if is_encrypted:
value = decryptor.decrypt(encrypted_value)
if value is None:
return is_encrypted, None
return is_encrypted, compat_cookiejar_Cookie(
version=0, name=name, value=value, port=None, port_specified=False,
domain=host_key, domain_specified=bool(host_key), domain_initial_dot=host_key.startswith('.'),
path=path, path_specified=bool(path), secure=is_secure, expires=expires_utc, discard=False,
comment=None, comment_url=None, rest={})
class ChromeCookieDecryptor:
"""
Overview:
Linux:
- cookies are either v10 or v11
- v10: AES-CBC encrypted with a fixed key
- v11: AES-CBC encrypted with an OS protected key (keyring)
- v11 keys can be stored in various places depending on the activate desktop environment [2]
Mac:
- cookies are either v10 or not v10
- v10: AES-CBC encrypted with an OS protected key (keyring) and more key derivation iterations than linux
- not v10: 'old data' stored as plaintext
Windows:
- cookies are either v10 or not v10
- v10: AES-GCM encrypted with a key which is encrypted with DPAPI
- not v10: encrypted with DPAPI
Sources:
- [1] https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/
- [2] https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/key_storage_linux.cc
- KeyStorageLinux::CreateService
"""
def decrypt(self, encrypted_value):
raise NotImplementedError('Must be implemented by sub classes')
@property
def cookie_counts(self):
raise NotImplementedError('Must be implemented by sub classes')
def get_cookie_decryptor(browser_root, browser_keyring_name, logger, *, keyring=None):
if sys.platform in ('linux', 'linux2'):
return LinuxChromeCookieDecryptor(browser_keyring_name, logger, keyring=keyring)
elif sys.platform == 'darwin':
return MacChromeCookieDecryptor(browser_keyring_name, logger)
elif sys.platform == 'win32':
return WindowsChromeCookieDecryptor(browser_root, logger)
else:
raise NotImplementedError(f'Chrome cookie decryption is not supported on this platform: {sys.platform}')
class LinuxChromeCookieDecryptor(ChromeCookieDecryptor):
def __init__(self, browser_keyring_name, logger, *, keyring=None):
self._logger = logger
self._v10_key = self.derive_key(b'peanuts')
password = _get_linux_keyring_password(browser_keyring_name, keyring, logger)
self._v11_key = None if password is None else self.derive_key(password)
self._cookie_counts = {'v10': 0, 'v11': 0, 'other': 0}
@staticmethod
def derive_key(password):
# values from
# https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/os_crypt_linux.cc
return pbkdf2_sha1(password, salt=b'<PASSWORD>', iterations=1, key_length=16)
@property
def cookie_counts(self):
return self._cookie_counts
def decrypt(self, encrypted_value):
version = encrypted_value[:3]
ciphertext = encrypted_value[3:]
if version == b'v10':
self._cookie_counts['v10'] += 1
return _decrypt_aes_cbc(ciphertext, self._v10_key, self._logger)
elif version == b'v11':
self._cookie_counts['v11'] += 1
if self._v11_key is None:
self._logger.warning('cannot decrypt v11 cookies: no key found', only_once=True)
return None
return _decrypt_aes_cbc(ciphertext, self._v11_key, self._logger)
else:
self._cookie_counts['other'] += 1
return None
class MacChromeCookieDecryptor(ChromeCookieDecryptor):
def __init__(self, browser_keyring_name, logger):
self._logger = logger
password = _get_mac_keyring_password(browser_keyring_name, logger)
self._v10_key = None if password is None else self.derive_key(password)
self._cookie_counts = {'v10': 0, 'other': 0}
@staticmethod
def derive_key(password):
# values from
# https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/os_crypt_mac.mm
return pbkdf2_sha1(password, salt=b'<PASSWORD>', iterations=1003, key_length=16)
@property
def cookie_counts(self):
return self._cookie_counts
def decrypt(self, encrypted_value):
version = encrypted_value[:3]
ciphertext = encrypted_value[3:]
if version == b'v10':
self._cookie_counts['v10'] += 1
if self._v10_key is None:
self._logger.warning('cannot decrypt v10 cookies: no key found', only_once=True)
return None
return _decrypt_aes_cbc(ciphertext, self._v10_key, self._logger)
else:
self._cookie_counts['other'] += 1
# other prefixes are considered 'old data' which were stored as plaintext
# https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/os_crypt_mac.mm
return encrypted_value
class WindowsChromeCookieDecryptor(ChromeCookieDecryptor):
def __init__(self, browser_root, logger):
self._logger = logger
self._v10_key = _get_windows_v10_key(browser_root, logger)
self._cookie_counts = {'v10': 0, 'other': 0}
@property
def cookie_counts(self):
return self._cookie_counts
def decrypt(self, encrypted_value):
version = encrypted_value[:3]
ciphertext = encrypted_value[3:]
if version == b'v10':
self._cookie_counts['v10'] += 1
if self._v10_key is None:
self._logger.warning('cannot decrypt v10 cookies: no key found', only_once=True)
return None
# https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/os_crypt_win.cc
# kNonceLength
nonce_length = 96 // 8
# boringssl
# EVP_AEAD_AES_GCM_TAG_LEN
authentication_tag_length = 16
raw_ciphertext = ciphertext
nonce = raw_ciphertext[:nonce_length]
ciphertext = raw_ciphertext[nonce_length:-authentication_tag_length]
authentication_tag = raw_ciphertext[-authentication_tag_length:]
return _decrypt_aes_gcm(ciphertext, self._v10_key, nonce, authentication_tag, self._logger)
else:
self._cookie_counts['other'] += 1
# any other prefix means the data is DPAPI encrypted
# https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/os_crypt_win.cc
return _decrypt_windows_dpapi(encrypted_value, self._logger).decode('utf-8')
def _extract_safari_cookies(profile, logger):
if profile is not None:
logger.error('safari does not support profiles')
if sys.platform != 'darwin':
raise ValueError(f'unsupported platform: {sys.platform}')
cookies_path = os.path.expanduser('~/Library/Cookies/Cookies.binarycookies')
if not os.path.isfile(cookies_path):
logger.debug('Trying secondary cookie location')
cookies_path = os.path.expanduser('~/Library/Containers/com.apple.Safari/Data/Library/Cookies/Cookies.binarycookies')
if not os.path.isfile(cookies_path):
raise FileNotFoundError('could not find safari cookies database')
with open(cookies_path, 'rb') as f:
cookies_data = f.read()
jar = parse_safari_cookies(cookies_data, logger=logger)
logger.info(f'Extracted {len(jar)} cookies from safari')
return jar
class ParserError(Exception):
pass
class DataParser:
def __init__(self, data, logger):
self._data = data
self.cursor = 0
self._logger = logger
def read_bytes(self, num_bytes):
if num_bytes < 0:
raise ParserError(f'invalid read of {num_bytes} bytes')
end = self.cursor + num_bytes
if end > len(self._data):
raise ParserError('reached end of input')
data = self._data[self.cursor:end]
self.cursor = end
return data
def expect_bytes(self, expected_value, message):
value = self.read_bytes(len(expected_value))
if value != expected_value:
raise ParserError(f'unexpected value: {value} != {expected_value} ({message})')
def read_uint(self, big_endian=False):
data_format = '>I' if big_endian else '<I'
return struct.unpack(data_format, self.read_bytes(4))[0]
def read_double(self, big_endian=False):
data_format = '>d' if big_endian else '<d'
return struct.unpack(data_format, self.read_bytes(8))[0]
def read_cstring(self):
buffer = []
while True:
c = self.read_bytes(1)
if c == b'\x00':
return b''.join(buffer).decode('utf-8')
else:
buffer.append(c)
def skip(self, num_bytes, description='unknown'):
if num_bytes > 0:
self._logger.debug(f'skipping {num_bytes} bytes ({description}): {self.read_bytes(num_bytes)!r}')
elif num_bytes < 0:
raise ParserError(f'invalid skip of {num_bytes} bytes')
def skip_to(self, offset, description='unknown'):
self.skip(offset - self.cursor, description)
def skip_to_end(self, description='unknown'):
self.skip_to(len(self._data), description)
def _mac_absolute_time_to_posix(timestamp):
return int((datetime(2001, 1, 1, 0, 0, tzinfo=timezone.utc) + timedelta(seconds=timestamp)).timestamp())
def _parse_safari_cookies_header(data, logger):
p = DataParser(data, logger)
p.expect_bytes(b'cook', 'database signature')
number_of_pages = p.read_uint(big_endian=True)
page_sizes = [p.read_uint(big_endian=True) for _ in range(number_of_pages)]
return page_sizes, p.cursor
def _parse_safari_cookies_page(data, jar, logger):
p = DataParser(data, logger)
p.expect_bytes(b'\x00\x00\x01\x00', 'page signature')
number_of_cookies = p.read_uint()
record_offsets = [p.read_uint() for _ in range(number_of_cookies)]
if number_of_cookies == 0:
logger.debug(f'a cookies page of size {len(data)} has no cookies')
return
p.skip_to(record_offsets[0], 'unknown page header field')
with _create_progress_bar(logger) as progress_bar:
for i, record_offset in enumerate(record_offsets):
progress_bar.print(f'Loading cookie {i: 6d}/{number_of_cookies: 6d}')
p.skip_to(record_offset, 'space between records')
record_length = _parse_safari_cookies_record(data[record_offset:], jar, logger)
p.read_bytes(record_length)
p.skip_to_end('space in between pages')
def _parse_safari_cookies_record(data, jar, logger):
p = DataParser(data, logger)
record_size = p.read_uint()
p.skip(4, 'unknown record field 1')
flags = p.read_uint()
is_secure = bool(flags & 0x0001)
p.skip(4, 'unknown record field 2')
domain_offset = p.read_uint()
name_offset = p.read_uint()
path_offset = p.read_uint()
value_offset = p.read_uint()
p.skip(8, 'unknown record field 3')
expiration_date = _mac_absolute_time_to_posix(p.read_double())
_creation_date = _mac_absolute_time_to_posix(p.read_double()) # noqa: F841
try:
p.skip_to(domain_offset)
domain = p.read_cstring()
p.skip_to(name_offset)
name = p.read_cstring()
p.skip_to(path_offset)
path = p.read_cstring()
p.skip_to(value_offset)
value = p.read_cstring()
except UnicodeDecodeError:
logger.warning('failed to parse Safari cookie because UTF-8 decoding failed', only_once=True)
return record_size
p.skip_to(record_size, 'space at the end of the record')
cookie = compat_cookiejar_Cookie(
version=0, name=name, value=value, port=None, port_specified=False,
domain=domain, domain_specified=bool(domain), domain_initial_dot=domain.startswith('.'),
path=path, path_specified=bool(path), secure=is_secure, expires=expiration_date, discard=False,
comment=None, comment_url=None, rest={})
jar.set_cookie(cookie)
return record_size
def parse_safari_cookies(data, jar=None, logger=YDLLogger()):
"""
References:
- https://github.com/libyal/dtformats/blob/main/documentation/Safari%20Cookies.asciidoc
- this data appears to be out of date but the important parts of the database structure is the same
- there are a few bytes here and there which are skipped during parsing
"""
if jar is None:
jar = YoutubeDLCookieJar()
page_sizes, body_start = _parse_safari_cookies_header(data, logger)
p = DataParser(data[body_start:], logger)
for page_size in page_sizes:
_parse_safari_cookies_page(p.read_bytes(page_size), jar, logger)
p.skip_to_end('footer')
return jar
class _LinuxDesktopEnvironment(Enum):
"""
https://chromium.googlesource.com/chromium/src/+/refs/heads/main/base/nix/xdg_util.h
DesktopEnvironment
"""
OTHER = auto()
CINNAMON = auto()
GNOME = auto()
KDE = auto()
PANTHEON = auto()
UNITY = auto()
XFCE = auto()
class _LinuxKeyring(Enum):
"""
https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/key_storage_util_linux.h
SelectedLinuxBackend
"""
KWALLET = auto()
GNOMEKEYRING = auto()
BASICTEXT = auto()
SUPPORTED_KEYRINGS = _LinuxKeyring.__members__.keys()
def _get_linux_desktop_environment(env):
"""
https://chromium.googlesource.com/chromium/src/+/refs/heads/main/base/nix/xdg_util.cc
GetDesktopEnvironment
"""
xdg_current_desktop = env.get('XDG_CURRENT_DESKTOP', None)
desktop_session = env.get('DESKTOP_SESSION', None)
if xdg_current_desktop is not None:
xdg_current_desktop = xdg_current_desktop.split(':')[0].strip()
if xdg_current_desktop == 'Unity':
if desktop_session is not None and 'gnome-fallback' in desktop_session:
return _LinuxDesktopEnvironment.GNOME
else:
return _LinuxDesktopEnvironment.UNITY
elif xdg_current_desktop == 'GNOME':
return _LinuxDesktopEnvironment.GNOME
elif xdg_current_desktop == 'X-Cinnamon':
return _LinuxDesktopEnvironment.CINNAMON
elif xdg_current_desktop == 'KDE':
return _LinuxDesktopEnvironment.KDE
elif xdg_current_desktop == 'Pantheon':
return _LinuxDesktopEnvironment.PANTHEON
elif xdg_current_desktop == 'XFCE':
return _LinuxDesktopEnvironment.XFCE
elif desktop_session is not None:
if desktop_session in ('mate', 'gnome'):
return _LinuxDesktopEnvironment.GNOME
elif 'kde' in desktop_session:
return _LinuxDesktopEnvironment.KDE
elif 'xfce' in desktop_session:
return _LinuxDesktopEnvironment.XFCE
else:
if 'GNOME_DESKTOP_SESSION_ID' in env:
return _LinuxDesktopEnvironment.GNOME
elif 'KDE_FULL_SESSION' in env:
return _LinuxDesktopEnvironment.KDE
return _LinuxDesktopEnvironment.OTHER
def _choose_linux_keyring(logger):
"""
https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/key_storage_util_linux.cc
SelectBackend
"""
desktop_environment = _get_linux_desktop_environment(os.environ)
logger.debug(f'detected desktop environment: {desktop_environment.name}')
if desktop_environment == _LinuxDesktopEnvironment.KDE:
linux_keyring = _LinuxKeyring.KWALLET
elif desktop_environment == _LinuxDesktopEnvironment.OTHER:
linux_keyring = _LinuxKeyring.BASICTEXT
else:
linux_keyring = _LinuxKeyring.GNOMEKEYRING
return linux_keyring
def _get_kwallet_network_wallet(logger):
""" The name of the wallet used to store network passwords.
https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/kwallet_dbus.cc
KWalletDBus::NetworkWallet
which does a dbus call to the following function:
https://api.kde.org/frameworks/kwallet/html/classKWallet_1_1Wallet.html
Wallet::NetworkWallet
"""
default_wallet = 'kdewallet'
try:
proc = Popen([
'dbus-send', '--session', '--print-reply=literal',
'--dest=org.kde.kwalletd5',
'/modules/kwalletd5',
'org.kde.KWallet.networkWallet'
], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
stdout, stderr = proc.communicate_or_kill()
if proc.returncode != 0:
logger.warning('failed to read NetworkWallet')
return default_wallet
else:
network_wallet = stdout.decode('utf-8').strip()
logger.debug(f'NetworkWallet = "{network_wallet}"')
return network_wallet
except Exception as e:
logger.warning(f'exception while obtaining NetworkWallet: {e}')
return default_wallet
def _get_kwallet_password(browser_keyring_name, logger):
logger.debug('using kwallet-query to obtain password from kwallet')
if shutil.which('kwallet-query') is None:
logger.error('kwallet-query command not found. KWallet and kwallet-query '
'must be installed to read from KWallet. kwallet-query should be'
'included in the kwallet package for your distribution')
return b''
network_wallet = _get_kwallet_network_wallet(logger)
try:
proc = Popen([
'kwallet-query',
'--read-password', f'{browser_keyring_name} Safe Storage',
'--folder', f'{browser_keyring_name} Keys',
network_wallet
], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
stdout, stderr = proc.communicate_or_kill()
if proc.returncode != 0:
logger.error(f'kwallet-query failed with return code {proc.returncode}. Please consult '
'the kwallet-query man page for details')
return b''
else:
if stdout.lower().startswith(b'failed to read'):
logger.debug('failed to read password from kwallet. Using empty string instead')
# this sometimes occurs in KDE because chrome does not check hasEntry and instead
# just tries to read the value (which kwallet returns "") whereas kwallet-query
# checks hasEntry. To verify this:
# dbus-monitor "interface='org.kde.KWallet'" "type=method_return"
# while starting chrome.
# this may be a bug as the intended behaviour is to generate a random password and store
# it, but that doesn't matter here.
return b''
else:
logger.debug('password found')
if stdout[-1:] == b'\n':
stdout = stdout[:-1]
return stdout
except Exception as e:
logger.warning(f'exception running kwallet-query: {error_to_str(e)}')
return b''
def _get_gnome_keyring_password(browser_keyring_name, logger):
if not SECRETSTORAGE_AVAILABLE:
logger.error(f'secretstorage not available {SECRETSTORAGE_UNAVAILABLE_REASON}')
return b''
# the Gnome keyring does not seem to organise keys in the same way as KWallet,
# using `dbus-monitor` during startup, it can be observed that chromium lists all keys
# and presumably searches for its key in the list. It appears that we must do the same.
# https://github.com/jaraco/keyring/issues/556
with contextlib.closing(secretstorage.dbus_init()) as con:
col = secretstorage.get_default_collection(con)
for item in col.get_all_items():
if item.get_label() == f'{browser_keyring_name} Safe Storage':
return item.get_secret()
else:
logger.error('failed to read from keyring')
return b''
def _get_linux_keyring_password(browser_keyring_name, keyring, logger):
# note: chrome/chromium can be run with the following flags to determine which keyring backend
# it has chosen to use
# chromium --enable-logging=stderr --v=1 2>&1 | grep key_storage_
# Chromium supports a flag: --password-store=<basic|gnome|kwallet> so the automatic detection
# will not be sufficient in all cases.
keyring = _LinuxKeyring[keyring] if keyring else _choose_linux_keyring(logger)
logger.debug(f'Chosen keyring: {keyring.name}')
if keyring == _LinuxKeyring.KWALLET:
return _get_kwallet_password(browser_keyring_name, logger)
elif keyring == _LinuxKeyring.GNOMEKEYRING:
return _get_gnome_keyring_password(browser_keyring_name, logger)
elif keyring == _LinuxKeyring.BASICTEXT:
# when basic text is chosen, all cookies are stored as v10 (so no keyring password is required)
return None
assert False, f'Unknown keyring {keyring}'
def _get_mac_keyring_password(browser_keyring_name, logger):
logger.debug('using find-generic-password to obtain password from OSX keychain')
try:
proc = Popen(
['security', 'find-generic-password',
'-w', # write password to stdout
'-a', browser_keyring_name, # match 'account'
'-s', f'{browser_keyring_name} Safe Storage'], # match 'service'
stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
stdout, stderr = proc.communicate_or_kill()
if stdout[-1:] == b'\n':
stdout = stdout[:-1]
return stdout
except Exception as e:
logger.warning(f'exception running find-generic-password: {error_to_str(e)}')
return None
def _get_windows_v10_key(browser_root, logger):
path = _find_most_recently_used_file(browser_root, 'Local State', logger)
if path is None:
logger.error('could not find local state file')
return None
logger.debug(f'Found local state file at "{path}"')
with open(path, encoding='utf8') as f:
data = json.load(f)
try:
base64_key = data['os_crypt']['encrypted_key']
except KeyError:
logger.error('no encrypted key in Local State')
return None
encrypted_key = compat_b64decode(base64_key)
prefix = b'DPAPI'
if not encrypted_key.startswith(prefix):
logger.error('invalid key')
return None
return _decrypt_windows_dpapi(encrypted_key[len(prefix):], logger)
def pbkdf2_sha1(password, salt, iterations, key_length):
return pbkdf2_hmac('sha1', password, salt, iterations, key_length)
def _decrypt_aes_cbc(ciphertext, key, logger, initialization_vector=b' ' * 16):
plaintext = unpad_pkcs7(aes_cbc_decrypt_bytes(ciphertext, key, initialization_vector))
try:
return plaintext.decode('utf-8')
except UnicodeDecodeError:
logger.warning('failed to decrypt cookie (AES-CBC) because UTF-8 decoding failed. Possibly the key is wrong?', only_once=True)
return None
def _decrypt_aes_gcm(ciphertext, key, nonce, authentication_tag, logger):
try:
plaintext = aes_gcm_decrypt_and_verify_bytes(ciphertext, key, authentication_tag, nonce)
except ValueError:
logger.warning('failed to decrypt cookie (AES-GCM) because the MAC check failed. Possibly the key is wrong?', only_once=True)
return None
try:
return plaintext.decode('utf-8')
except UnicodeDecodeError:
logger.warning('failed to decrypt cookie (AES-GCM) because UTF-8 decoding failed. Possibly the key is wrong?', only_once=True)
return None
def _decrypt_windows_dpapi(ciphertext, logger):
"""
References:
- https://docs.microsoft.com/en-us/windows/win32/api/dpapi/nf-dpapi-cryptunprotectdata
"""
from ctypes.wintypes import DWORD
class DATA_BLOB(ctypes.Structure):
_fields_ = [('cbData', DWORD),
('pbData', ctypes.POINTER(ctypes.c_char))]
buffer = ctypes.create_string_buffer(ciphertext)
blob_in = DATA_BLOB(ctypes.sizeof(buffer), buffer)
blob_out = DATA_BLOB()
ret = ctypes.windll.crypt32.CryptUnprotectData(
ctypes.byref(blob_in), # pDataIn
None, # ppszDataDescr: human readable description of pDataIn
None, # pOptionalEntropy: salt?
None, # pvReserved: must be NULL
None, # pPromptStruct: information about prompts to display
0, # dwFlags
ctypes.byref(blob_out) # pDataOut
)
if not ret:
logger.warning('failed to decrypt with DPAPI', only_once=True)
return None
result = ctypes.string_at(blob_out.pbData, blob_out.cbData)
ctypes.windll.kernel32.LocalFree(blob_out.pbData)
return result
def _config_home():
return os.environ.get('XDG_CONFIG_HOME', os.path.expanduser('~/.config'))
def _open_database_copy(database_path, tmpdir):
# cannot open sqlite databases if they are already in use (e.g. by the browser)
database_copy_path = os.path.join(tmpdir, 'temporary.sqlite')
shutil.copy(database_path, database_copy_path)
conn = sqlite3.connect(database_copy_path)
return conn.cursor()
def _get_column_names(cursor, table_name):
table_info = cursor.execute(f'PRAGMA table_info({table_name})').fetchall()
return [row[1].decode('utf-8') for row in table_info]
def _find_most_recently_used_file(root, filename, logger):
# if there are multiple browser profiles, take the most recently used one
i, paths = 0, []
with _create_progress_bar(logger) as progress_bar:
for curr_root, dirs, files in os.walk(root):
for file in files:
i += 1
progress_bar.print(f'Searching for "{filename}": {i: 6d} files searched')
if file == filename:
paths.append(os.path.join(curr_root, file))
return None if not paths else max(paths, key=lambda path: os.lstat(path).st_mtime)
def _merge_cookie_jars(jars):
output_jar = YoutubeDLCookieJar()
for jar in jars:
for cookie in jar:
output_jar.set_cookie(cookie)
if jar.filename is not None:
output_jar.filename = jar.filename
return output_jar
def _is_path(value):
return os.path.sep in value
def _parse_browser_specification(browser_name, profile=None, keyring=None):
if browser_name not in SUPPORTED_BROWSERS:
raise ValueError(f'unsupported browser: "{browser_name}"')
if keyring not in (None, *SUPPORTED_KEYRINGS):
raise ValueError(f'unsupported keyring: "{keyring}"')
if profile is not None and _is_path(profile):
profile = os.path.expanduser(profile)
return browser_name, profile, keyring
| en | 0.735715 | # although sqlite3 is part of the standard library, it is possible to compile python without # sqlite support. See: https://github.com/yt-dlp/yt-dlp/issues/544 Return a context manager with a print method. (Optional) # Do not print to files/pipes, loggers, or when --no-progress is used # https://chromium.googlesource.com/chromium/src/+/HEAD/docs/user_data_dir.md # Linux keyring names can be determined by snooping on dbus while opening the browser in KDE: # dbus-monitor "interface='org.kde.KWallet'" "type=method_return" Overview: Linux: - cookies are either v10 or v11 - v10: AES-CBC encrypted with a fixed key - v11: AES-CBC encrypted with an OS protected key (keyring) - v11 keys can be stored in various places depending on the activate desktop environment [2] Mac: - cookies are either v10 or not v10 - v10: AES-CBC encrypted with an OS protected key (keyring) and more key derivation iterations than linux - not v10: 'old data' stored as plaintext Windows: - cookies are either v10 or not v10 - v10: AES-GCM encrypted with a key which is encrypted with DPAPI - not v10: encrypted with DPAPI Sources: - [1] https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/ - [2] https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/key_storage_linux.cc - KeyStorageLinux::CreateService # values from # https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/os_crypt_linux.cc # values from # https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/os_crypt_mac.mm # other prefixes are considered 'old data' which were stored as plaintext # https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/os_crypt_mac.mm # https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/os_crypt_win.cc # kNonceLength # boringssl # EVP_AEAD_AES_GCM_TAG_LEN # any other prefix means the data is DPAPI encrypted # https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/os_crypt_win.cc # noqa: F841 References: - https://github.com/libyal/dtformats/blob/main/documentation/Safari%20Cookies.asciidoc - this data appears to be out of date but the important parts of the database structure is the same - there are a few bytes here and there which are skipped during parsing https://chromium.googlesource.com/chromium/src/+/refs/heads/main/base/nix/xdg_util.h DesktopEnvironment https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/key_storage_util_linux.h SelectedLinuxBackend https://chromium.googlesource.com/chromium/src/+/refs/heads/main/base/nix/xdg_util.cc GetDesktopEnvironment https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/key_storage_util_linux.cc SelectBackend The name of the wallet used to store network passwords. https://chromium.googlesource.com/chromium/src/+/refs/heads/main/components/os_crypt/kwallet_dbus.cc KWalletDBus::NetworkWallet which does a dbus call to the following function: https://api.kde.org/frameworks/kwallet/html/classKWallet_1_1Wallet.html Wallet::NetworkWallet # this sometimes occurs in KDE because chrome does not check hasEntry and instead # just tries to read the value (which kwallet returns "") whereas kwallet-query # checks hasEntry. To verify this: # dbus-monitor "interface='org.kde.KWallet'" "type=method_return" # while starting chrome. # this may be a bug as the intended behaviour is to generate a random password and store # it, but that doesn't matter here. # the Gnome keyring does not seem to organise keys in the same way as KWallet, # using `dbus-monitor` during startup, it can be observed that chromium lists all keys # and presumably searches for its key in the list. It appears that we must do the same. # https://github.com/jaraco/keyring/issues/556 # note: chrome/chromium can be run with the following flags to determine which keyring backend # it has chosen to use # chromium --enable-logging=stderr --v=1 2>&1 | grep key_storage_ # Chromium supports a flag: --password-store=<basic|gnome|kwallet> so the automatic detection # will not be sufficient in all cases. # when basic text is chosen, all cookies are stored as v10 (so no keyring password is required) # write password to stdout # match 'account' # match 'service' References: - https://docs.microsoft.com/en-us/windows/win32/api/dpapi/nf-dpapi-cryptunprotectdata # pDataIn # ppszDataDescr: human readable description of pDataIn # pOptionalEntropy: salt? # pvReserved: must be NULL # pPromptStruct: information about prompts to display # dwFlags # pDataOut # cannot open sqlite databases if they are already in use (e.g. by the browser) # if there are multiple browser profiles, take the most recently used one | 2.137206 | 2 |
hgtools/tests/conftest.py | jaraco/hgtools | 1 | 269 | <reponame>jaraco/hgtools
import os
import pytest
from hgtools import managers
def _ensure_present(mgr):
try:
mgr.version()
except Exception:
pytest.skip()
@pytest.fixture
def tmpdir_as_cwd(tmpdir):
with tmpdir.as_cwd():
yield tmpdir
@pytest.fixture
def hg_repo(tmpdir_as_cwd):
mgr = managers.MercurialManager()
_ensure_present(mgr)
mgr._invoke('init', '.')
os.makedirs('bar')
touch('bar/baz')
mgr._invoke('addremove')
mgr._invoke('ci', '-m', 'committed')
with open('bar/baz', 'w') as baz:
baz.write('content')
mgr._invoke('ci', '-m', 'added content')
return tmpdir_as_cwd
@pytest.fixture
def git_repo(tmpdir_as_cwd):
mgr = managers.GitManager()
_ensure_present(mgr)
mgr._invoke('init')
mgr._invoke('config', 'user.email', '<EMAIL>')
mgr._invoke('config', 'user.name', 'HGTools')
os.makedirs('bar')
touch('bar/baz')
mgr._invoke('add', '.')
mgr._invoke('commit', '-m', 'committed')
with open('bar/baz', 'w') as baz:
baz.write('content')
mgr._invoke('commit', '-am', 'added content')
return tmpdir_as_cwd
def touch(filename):
with open(filename, 'a'):
pass
| import os
import pytest
from hgtools import managers
def _ensure_present(mgr):
try:
mgr.version()
except Exception:
pytest.skip()
@pytest.fixture
def tmpdir_as_cwd(tmpdir):
with tmpdir.as_cwd():
yield tmpdir
@pytest.fixture
def hg_repo(tmpdir_as_cwd):
mgr = managers.MercurialManager()
_ensure_present(mgr)
mgr._invoke('init', '.')
os.makedirs('bar')
touch('bar/baz')
mgr._invoke('addremove')
mgr._invoke('ci', '-m', 'committed')
with open('bar/baz', 'w') as baz:
baz.write('content')
mgr._invoke('ci', '-m', 'added content')
return tmpdir_as_cwd
@pytest.fixture
def git_repo(tmpdir_as_cwd):
mgr = managers.GitManager()
_ensure_present(mgr)
mgr._invoke('init')
mgr._invoke('config', 'user.email', '<EMAIL>')
mgr._invoke('config', 'user.name', 'HGTools')
os.makedirs('bar')
touch('bar/baz')
mgr._invoke('add', '.')
mgr._invoke('commit', '-m', 'committed')
with open('bar/baz', 'w') as baz:
baz.write('content')
mgr._invoke('commit', '-am', 'added content')
return tmpdir_as_cwd
def touch(filename):
with open(filename, 'a'):
pass | none | 1 | 2.005934 | 2 |
|
gfirefly/dbentrust/dbutils.py | handsome3163/H2Dgame-Firefly | 675 | 270 | <reponame>handsome3163/H2Dgame-Firefly<gh_stars>100-1000
#coding:utf8
'''
Created on 2013-8-21
@author: lan (www.9miao.com)
'''
import itertools
import datetime
def safeunicode(obj, encoding='utf-8'):
r"""
Converts any given object to unicode string.
>>> safeunicode('hello')
u'hello'
>>> safeunicode(2)
u'2'
>>> safeunicode('\xe1\x88\xb4')
u'\u1234'
"""
t = type(obj)
if t is unicode:
return obj
elif t is str:
return obj.decode(encoding)
elif t in [int, float, bool]:
return unicode(obj)
elif hasattr(obj, '__unicode__') or isinstance(obj, unicode):
return unicode(obj)
else:
return str(obj).decode(encoding)
def safestr(obj, encoding='utf-8'):
r"""
Converts any given object to utf-8 encoded string.
>>> safestr('hello')
'hello'
>>> safestr(u'\u1234')
'\xe1\x88\xb4'
>>> safestr(2)
'2'
"""
if isinstance(obj, unicode):
return obj.encode(encoding)
elif isinstance(obj, str):
return obj
elif hasattr(obj, 'next'): # iterator
return itertools.imap(safestr, obj)
else:
return str(obj)
def sqlify(obj):
"""
converts `obj` to its proper SQL version
>>> sqlify(None)
'NULL'
>>> sqlify(True)
"'t'"
>>> sqlify(3)
'3'
"""
# because `1 == True and hash(1) == hash(True)`
# we have to do this the hard way...
if obj is None:
return 'NULL'
elif obj is True:
return "'t'"
elif obj is False:
return "'f'"
elif datetime and isinstance(obj, datetime.datetime):
return repr(obj.isoformat())
else:
if isinstance(obj, unicode): obj = obj.encode('utf8')
return repr(obj)
def sqllist(lst):
"""
Converts the arguments for use in something like a WHERE clause.
>>> sqllist(['a', 'b'])
'a, b'
>>> sqllist('a')
'a'
>>> sqllist(u'abc')
u'abc'
"""
if isinstance(lst, basestring):
return lst
else:
return ', '.join(lst)
def _sqllist(values):
"""
>>> _sqllist([1, 2, 3])
<sql: '(1, 2, 3)'>
"""
items = []
items.append('(')
for i, v in enumerate(values):
if i != 0:
items.append(', ')
items.append(sqlparam(v))
items.append(')')
return SQLQuery(items)
def sqlquote(a):
"""
Ensures `a` is quoted properly for use in a SQL query.
>>> 'WHERE x = ' + sqlquote(True) + ' AND y = ' + sqlquote(3)
<sql: "WHERE x = 't' AND y = 3">
>>> 'WHERE x = ' + sqlquote(True) + ' AND y IN ' + sqlquote([2, 3])
<sql: "WHERE x = 't' AND y IN (2, 3)">
"""
if isinstance(a, list):
return _sqllist(a)
else:
return sqlparam(a).sqlquery()
def _interpolate(sformat):
"""
Takes a format string and returns a list of 2-tuples of the form
(boolean, string) where boolean says whether string should be evaled
or not.
from <http://lfw.org/python/Itpl.py> (public domain, Ka-Ping Yee)
"""
from tokenize import tokenprog
tokenprog = tokenprog
def matchorfail(text, pos):
match = tokenprog.match(text, pos)
if match is None:
raise _ItplError(text, pos)
return match, match.end()
namechars = "abcdefghijklmnopqrstuvwxyz" \
"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_";
chunks = []
pos = 0
while 1:
dollar = sformat.find("$", pos)
if dollar < 0:
break
nextchar = sformat[dollar + 1]
if nextchar == "{":
chunks.append((0, sformat[pos:dollar]))
pos, level = dollar + 2, 1
while level:
match, pos = matchorfail(sformat, pos)
tstart, tend = match.regs[3]
token = sformat[tstart:tend]
if token == "{":
level = level + 1
elif token == "}":
level = level - 1
chunks.append((1, sformat[dollar + 2:pos - 1]))
elif nextchar in namechars:
chunks.append((0, sformat[pos:dollar]))
match, pos = matchorfail(sformat, dollar + 1)
while pos < len(sformat):
if sformat[pos] == "." and \
pos + 1 < len(sformat) and sformat[pos + 1] in namechars:
match, pos = matchorfail(sformat, pos + 1)
elif sformat[pos] in "([":
pos, level = pos + 1, 1
while level:
match, pos = matchorfail(sformat, pos)
tstart, tend = match.regs[3]
token = sformat[tstart:tend]
if token[0] in "([":
level = level + 1
elif token[0] in ")]":
level = level - 1
else:
break
chunks.append((1, sformat[dollar + 1:pos]))
else:
chunks.append((0, sformat[pos:dollar + 1]))
pos = dollar + 1 + (nextchar == "$")
if pos < len(sformat):
chunks.append((0, sformat[pos:]))
return chunks
def sqlwhere(dictionary, grouping=' AND '):
"""
Converts a `dictionary` to an SQL WHERE clause `SQLQuery`.
>>> sqlwhere({'cust_id': 2, 'order_id':3})
<sql: 'order_id = 3 AND cust_id = 2'>
>>> sqlwhere({'cust_id': 2, 'order_id':3}, grouping=', ')
<sql: 'order_id = 3, cust_id = 2'>
>>> sqlwhere({'a': 'a', 'b': 'b'}).query()
'a = %s AND b = %s'
"""
return SQLQuery.join([k + ' = ' + sqlparam(v) for k, v in dictionary.items()], grouping)
def reparam(string_, dictionary):
"""
Takes a string and a dictionary and interpolates the string
using values from the dictionary. Returns an `SQLQuery` for the result.
>>> reparam("s = $s", dict(s=True))
<sql: "s = 't'">
>>> reparam("s IN $s", dict(s=[1, 2]))
<sql: 's IN (1, 2)'>
"""
dictionary = dictionary.copy() # eval mucks with it
result = []
for live, chunk in _interpolate(string_):
if live:
v = eval(chunk, dictionary)
result.append(sqlquote(v))
else:
result.append(chunk)
return SQLQuery.join(result, '')
class UnknownParamstyle(Exception):
"""
raised for unsupported db paramstyles
(currently supported: qmark, numeric, format, pyformat)
"""
pass
class _ItplError(ValueError):
def __init__(self, text, pos):
ValueError.__init__(self)
self.text = text
self.pos = pos
def __str__(self):
return "unfinished expression in %s at char %d" % (
repr(self.text), self.pos)
class SQLParam(object):
"""
Parameter in SQLQuery.
>>> q = SQLQuery(["SELECT * FROM test WHERE name=", SQLParam("joe")])
>>> q
<sql: "SELECT * FROM test WHERE name='joe'">
>>> q.query()
'SELECT * FROM test WHERE name=%s'
>>> q.values()
['joe']
"""
__slots__ = ["value"]
def __init__(self, value):
self.value = value
def get_marker(self, paramstyle='pyformat'):
if paramstyle == 'qmark':
return '?'
elif paramstyle == 'numeric':
return ':1'
elif paramstyle is None or paramstyle in ['format', 'pyformat']:
return '%s'
raise UnknownParamstyle, paramstyle
def sqlquery(self):
return SQLQuery([self])
def __add__(self, other):
return self.sqlquery() + other
def __radd__(self, other):
return other + self.sqlquery()
def __str__(self):
return str(self.value)
def __repr__(self):
return '<param: %s>' % repr(self.value)
sqlparam = SQLParam
class SQLQuery(object):
"""
You can pass this sort of thing as a clause in any db function.
Otherwise, you can pass a dictionary to the keyword argument `vars`
and the function will call reparam for you.
Internally, consists of `items`, which is a list of strings and
SQLParams, which get concatenated to produce the actual query.
"""
__slots__ = ["items"]
# tested in sqlquote's docstring
def __init__(self, items=None):
r"""Creates a new SQLQuery.
>>> SQLQuery("x")
<sql: 'x'>
>>> q = SQLQuery(['SELECT * FROM ', 'test', ' WHERE x=', SQLParam(1)])
>>> q
<sql: 'SELECT * FROM test WHERE x=1'>
>>> q.query(), q.values()
('SELECT * FROM test WHERE x=%s', [1])
>>> SQLQuery(SQLParam(1))
<sql: '1'>
"""
if items is None:
self.items = []
elif isinstance(items, list):
self.items = items
elif isinstance(items, SQLParam):
self.items = [items]
elif isinstance(items, SQLQuery):
self.items = list(items.items)
else:
self.items = [items]
# Take care of SQLLiterals
for i, item in enumerate(self.items):
if isinstance(item, SQLParam) and isinstance(item.value, SQLLiteral):
self.items[i] = item.value.v
def append(self, value):
self.items.append(value)
def __add__(self, other):
if isinstance(other, basestring):
items = [other]
elif isinstance(other, SQLQuery):
items = other.items
else:
return NotImplemented
return SQLQuery(self.items + items)
def __radd__(self, other):
if isinstance(other, basestring):
items = [other]
else:
return NotImplemented
return SQLQuery(items + self.items)
def __iadd__(self, other):
if isinstance(other, (basestring, SQLParam)):
self.items.append(other)
elif isinstance(other, SQLQuery):
self.items.extend(other.items)
else:
return NotImplemented
return self
def __len__(self):
return len(self.query())
def query(self, paramstyle=None):
"""
Returns the query part of the sql query.
>>> q = SQLQuery(["SELECT * FROM test WHERE name=", SQLParam('joe')])
>>> q.query()
'SELECT * FROM test WHERE name=%s'
>>> q.query(paramstyle='qmark')
'SELECT * FROM test WHERE name=?'
"""
s = []
for x in self.items:
if isinstance(x, SQLParam):
x = x.get_marker(paramstyle)
s.append(safestr(x))
else:
x = safestr(x)
# automatically escape % characters in the query
# For backward compatability, ignore escaping when the query looks already escaped
if paramstyle in ['format', 'pyformat']:
if '%' in x and '%%' not in x:
x = x.replace('%', '%%')
s.append(x)
return "".join(s)
def values(self):
"""
Returns the values of the parameters used in the sql query.
>>> q = SQLQuery(["SELECT * FROM test WHERE name=", SQLParam('joe')])
>>> q.values()
['joe']
"""
return [i.value for i in self.items if isinstance(i, SQLParam)]
def join(items, sep=' ', prefix=None, suffix=None, target=None):
"""
Joins multiple queries.
>>> SQLQuery.join(['a', 'b'], ', ')
<sql: 'a, b'>
Optinally, prefix and suffix arguments can be provided.
>>> SQLQuery.join(['a', 'b'], ', ', prefix='(', suffix=')')
<sql: '(a, b)'>
If target argument is provided, the items are appended to target instead of creating a new SQLQuery.
"""
if target is None:
target = SQLQuery()
target_items = target.items
if prefix:
target_items.append(prefix)
for i, item in enumerate(items):
if i != 0:
target_items.append(sep)
if isinstance(item, SQLQuery):
target_items.extend(item.items)
else:
target_items.append(item)
if suffix:
target_items.append(suffix)
return target
join = staticmethod(join)
def _str(self):
try:
return self.query() % tuple([sqlify(x) for x in self.values()])
except (ValueError, TypeError):
return self.query()
def __str__(self):
return safestr(self._str())
def __unicode__(self):
return safeunicode(self._str())
def __repr__(self):
return '<sql: %s>' % repr(str(self))
class SQLLiteral:
"""
Protects a string from `sqlquote`.
>>> sqlquote('NOW()')
<sql: "'NOW()'">
>>> sqlquote(SQLLiteral('NOW()'))
<sql: 'NOW()'>
"""
def __init__(self, v):
self.v = v
def __repr__(self):
return self.v
class SQLProducer:
"""Database"""
def __init__(self):
"""Creates a database.
"""
pass
def query(self, sql_query,processed=False, svars=None):
"""
Execute SQL query `sql_query` using dictionary `vars` to interpolate it.
If `processed=True`, `vars` is a `reparam`-style list to use
instead of interpolating.
>>> db = DB(None, {})
>>> db.query("SELECT * FROM foo", _test=True)
<sql: 'SELECT * FROM foo'>
>>> db.query("SELECT * FROM foo WHERE x = $x", vars=dict(x='f'), _test=True)
<sql: "SELECT * FROM foo WHERE x = 'f'">
>>> db.query("SELECT * FROM foo WHERE x = " + sqlquote('f'), _test=True)
<sql: "SELECT * FROM foo WHERE x = 'f'">
"""
if svars is None:
svars = {}
if not processed and not isinstance(sql_query, SQLQuery):
sql_query = reparam(sql_query, svars)
return sql_query
def sql_clauses(self, what, tables, where, group, order, limit, offset):
return (
('SELECT', what),
('FROM', sqllist(tables)),
('WHERE', where),
('GROUP BY', group),
('ORDER BY', order),
('LIMIT', limit),
('OFFSET', offset))
def gen_clause(self, sql, val, svars):
if isinstance(val, (int, long)):
if sql == 'WHERE':
nout = 'id = ' + sqlquote(val)
else:
nout = SQLQuery(val)
elif isinstance(val, (list, tuple)) and len(val) == 2:
nout = SQLQuery(val[0], val[1]) # backwards-compatibility
elif isinstance(val, SQLQuery):
nout = val
else:
nout = reparam(val, svars)
def xjoin(a, b):
if a and b: return a + ' ' + b
else: return a or b
return xjoin(sql, nout)
def _where(self, where, svars):
if isinstance(where, (int, long)):
where = "id = " + sqlparam(where)
elif isinstance(where, (list, tuple)) and len(where) == 2:
where = SQLQuery(where[0], where[1])
elif isinstance(where, SQLQuery):
pass
else:
where = reparam(where, svars)
return where
def select(self, tables, svars=None, what='*', where=None, order=None, group=None,
limit=None, offset=None, _test=False):
"""
Selects `what` from `tables` with clauses `where`, `order`,
`group`, `limit`, and `offset`. Uses vars to interpolate.
Otherwise, each clause can be a SQLQuery.
>>> db = DB(None, {})
>>> db.select('foo', _test=True)
<sql: 'SELECT * FROM foo'>
>>> db.select(['foo', 'bar'], where="foo.bar_id = bar.id", limit=5, _test=True)
<sql: 'SELECT * FROM foo, bar WHERE foo.bar_id = bar.id LIMIT 5'>
"""
if svars is None: svars = {}
sql_clauses = self.sql_clauses(what, tables, where, group, order, limit, offset)
clauses = [self.gen_clause(sql, val, svars) for sql, val in sql_clauses if val is not None]
qout = SQLQuery.join(clauses)
if _test: return qout
return self.query(qout, processed=True)
def insert(self, tablename, seqname=None, _test=False, **values):
"""
Inserts `values` into `tablename`. Returns current sequence ID.
Set `seqname` to the ID if it's not the default, or to `False`
if there isn't one.
>>> db = DB(None, {})
>>> q = db.insert('foo', name='bob', age=2, created=SQLLiteral('NOW()'), _test=True)
>>> q
<sql: "INSERT INTO foo (age, name, created) VALUES (2, 'bob', NOW())">
>>> q.query()
'INSERT INTO foo (age, name, created) VALUES (%s, %s, NOW())'
>>> q.values()
[2, 'bob']
"""
def q(x): return "(" + x + ")"
if values:
_keys = SQLQuery.join(values.keys(), ', ')
_values = SQLQuery.join([sqlparam(v) for v in values.values()], ', ')
sql_query = "INSERT INTO %s " % tablename + q(_keys) + ' VALUES ' + q(_values)
else:
sql_query = SQLQuery(self._get_insert_default_values_query(tablename))
return sql_query
def _get_insert_default_values_query(self, table):
return "INSERT INTO %s DEFAULT VALUES" % table
def multiple_insert(self, tablename, values, seqname=None, _test=False):
"""
Inserts multiple rows into `tablename`. The `values` must be a list of dictioanries,
one for each row to be inserted, each with the same set of keys.
Returns the list of ids of the inserted rows.
Set `seqname` to the ID if it's not the default, or to `False`
if there isn't one.
>>> db = DB(None, {})
>>> db.supports_multiple_insert = True
>>> values = [{"name": "foo", "email": "<EMAIL>"}, {"name": "bar", "email": "<EMAIL>"}]
>>> db.multiple_insert('person', values=values, _test=True)
<sql: "INSERT INTO person (name, email) VALUES ('foo', '<EMAIL>'), ('bar', '<EMAIL>')">
"""
if not values:
return []
if not self.supports_multiple_insert:
out = [self.insert(tablename, seqname=seqname, _test=_test, **v) for v in values]
if seqname is False:
return None
else:
return out
keys = values[0].keys()
#@@ make sure all keys are valid
# make sure all rows have same keys.
for v in values:
if v.keys() != keys:
raise ValueError, 'Bad data'
sql_query = SQLQuery('INSERT INTO %s (%s) VALUES ' % (tablename, ', '.join(keys)))
for i, row in enumerate(values):
if i != 0:
sql_query.append(", ")
SQLQuery.join([SQLParam(row[k]) for k in keys], sep=", ", target=sql_query, prefix="(", suffix=")")
if _test: return sql_query
db_cursor = self._db_cursor()
if seqname is not False:
sql_query = self._process_insert_query(sql_query, tablename, seqname)
if isinstance(sql_query, tuple):
# for some databases, a separate query has to be made to find
# the id of the inserted row.
q1, q2 = sql_query
self._db_execute(db_cursor, q1)
self._db_execute(db_cursor, q2)
else:
self._db_execute(db_cursor, sql_query)
try:
out = db_cursor.fetchone()[0]
out = range(out-len(values)+1, out+1)
except Exception:
out = None
if not self.ctx.transactions:
self.ctx.commit()
return out
def update(self, tables, where, svars=None, _test=False, **values):
"""
Update `tables` with clause `where` (interpolated using `vars`)
and setting `values`.
>>> db = DB(None, {})
>>> name = 'Joseph'
>>> q = db.update('foo', where='name = $name', name='bob', age=2,
... created=SQLLiteral('NOW()'), vars=locals(), _test=True)
>>> q
<sql: "UPDATE foo SET age = 2, name = 'bob', created = NOW() WHERE name = 'Joseph'">
>>> q.query()
'UPDATE foo SET age = %s, name = %s, created = NOW() WHERE name = %s'
>>> q.values()
[2, 'bob', 'Joseph']
"""
if svars is None: svars = {}
where = self._where(where, svars)
query = (
"UPDATE " + sqllist(tables) +
" SET " + sqlwhere(values, ', ') +
" WHERE " + where)
if _test: return query
db_cursor = self._db_cursor()
self._db_execute(db_cursor, query)
if not self.ctx.transactions:
self.ctx.commit()
return db_cursor.rowcount
def delete(self, table, where, using=None, svars=None, _test=False):
"""
Deletes from `table` with clauses `where` and `using`.
>>> db = DB(None, {})
>>> name = 'Joe'
>>> db.delete('foo', where='name = $name', vars=locals(), _test=True)
<sql: "DELETE FROM foo WHERE name = 'Joe'">
"""
if svars is None:
svars = {}
where = self._where(where, svars)
q = 'DELETE FROM ' + table
if using:
q += ' USING ' + sqllist(using)
if where:
q += ' WHERE ' + where
return q
sqlproducer = SQLProducer()
| #coding:utf8
'''
Created on 2013-8-21
@author: lan (www.9miao.com)
'''
import itertools
import datetime
def safeunicode(obj, encoding='utf-8'):
r"""
Converts any given object to unicode string.
>>> safeunicode('hello')
u'hello'
>>> safeunicode(2)
u'2'
>>> safeunicode('\xe1\x88\xb4')
u'\u1234'
"""
t = type(obj)
if t is unicode:
return obj
elif t is str:
return obj.decode(encoding)
elif t in [int, float, bool]:
return unicode(obj)
elif hasattr(obj, '__unicode__') or isinstance(obj, unicode):
return unicode(obj)
else:
return str(obj).decode(encoding)
def safestr(obj, encoding='utf-8'):
r"""
Converts any given object to utf-8 encoded string.
>>> safestr('hello')
'hello'
>>> safestr(u'\u1234')
'\xe1\x88\xb4'
>>> safestr(2)
'2'
"""
if isinstance(obj, unicode):
return obj.encode(encoding)
elif isinstance(obj, str):
return obj
elif hasattr(obj, 'next'): # iterator
return itertools.imap(safestr, obj)
else:
return str(obj)
def sqlify(obj):
"""
converts `obj` to its proper SQL version
>>> sqlify(None)
'NULL'
>>> sqlify(True)
"'t'"
>>> sqlify(3)
'3'
"""
# because `1 == True and hash(1) == hash(True)`
# we have to do this the hard way...
if obj is None:
return 'NULL'
elif obj is True:
return "'t'"
elif obj is False:
return "'f'"
elif datetime and isinstance(obj, datetime.datetime):
return repr(obj.isoformat())
else:
if isinstance(obj, unicode): obj = obj.encode('utf8')
return repr(obj)
def sqllist(lst):
"""
Converts the arguments for use in something like a WHERE clause.
>>> sqllist(['a', 'b'])
'a, b'
>>> sqllist('a')
'a'
>>> sqllist(u'abc')
u'abc'
"""
if isinstance(lst, basestring):
return lst
else:
return ', '.join(lst)
def _sqllist(values):
"""
>>> _sqllist([1, 2, 3])
<sql: '(1, 2, 3)'>
"""
items = []
items.append('(')
for i, v in enumerate(values):
if i != 0:
items.append(', ')
items.append(sqlparam(v))
items.append(')')
return SQLQuery(items)
def sqlquote(a):
"""
Ensures `a` is quoted properly for use in a SQL query.
>>> 'WHERE x = ' + sqlquote(True) + ' AND y = ' + sqlquote(3)
<sql: "WHERE x = 't' AND y = 3">
>>> 'WHERE x = ' + sqlquote(True) + ' AND y IN ' + sqlquote([2, 3])
<sql: "WHERE x = 't' AND y IN (2, 3)">
"""
if isinstance(a, list):
return _sqllist(a)
else:
return sqlparam(a).sqlquery()
def _interpolate(sformat):
"""
Takes a format string and returns a list of 2-tuples of the form
(boolean, string) where boolean says whether string should be evaled
or not.
from <http://lfw.org/python/Itpl.py> (public domain, Ka-Ping Yee)
"""
from tokenize import tokenprog
tokenprog = tokenprog
def matchorfail(text, pos):
match = tokenprog.match(text, pos)
if match is None:
raise _ItplError(text, pos)
return match, match.end()
namechars = "abcdefghijklmnopqrstuvwxyz" \
"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_";
chunks = []
pos = 0
while 1:
dollar = sformat.find("$", pos)
if dollar < 0:
break
nextchar = sformat[dollar + 1]
if nextchar == "{":
chunks.append((0, sformat[pos:dollar]))
pos, level = dollar + 2, 1
while level:
match, pos = matchorfail(sformat, pos)
tstart, tend = match.regs[3]
token = sformat[tstart:tend]
if token == "{":
level = level + 1
elif token == "}":
level = level - 1
chunks.append((1, sformat[dollar + 2:pos - 1]))
elif nextchar in namechars:
chunks.append((0, sformat[pos:dollar]))
match, pos = matchorfail(sformat, dollar + 1)
while pos < len(sformat):
if sformat[pos] == "." and \
pos + 1 < len(sformat) and sformat[pos + 1] in namechars:
match, pos = matchorfail(sformat, pos + 1)
elif sformat[pos] in "([":
pos, level = pos + 1, 1
while level:
match, pos = matchorfail(sformat, pos)
tstart, tend = match.regs[3]
token = sformat[tstart:tend]
if token[0] in "([":
level = level + 1
elif token[0] in ")]":
level = level - 1
else:
break
chunks.append((1, sformat[dollar + 1:pos]))
else:
chunks.append((0, sformat[pos:dollar + 1]))
pos = dollar + 1 + (nextchar == "$")
if pos < len(sformat):
chunks.append((0, sformat[pos:]))
return chunks
def sqlwhere(dictionary, grouping=' AND '):
"""
Converts a `dictionary` to an SQL WHERE clause `SQLQuery`.
>>> sqlwhere({'cust_id': 2, 'order_id':3})
<sql: 'order_id = 3 AND cust_id = 2'>
>>> sqlwhere({'cust_id': 2, 'order_id':3}, grouping=', ')
<sql: 'order_id = 3, cust_id = 2'>
>>> sqlwhere({'a': 'a', 'b': 'b'}).query()
'a = %s AND b = %s'
"""
return SQLQuery.join([k + ' = ' + sqlparam(v) for k, v in dictionary.items()], grouping)
def reparam(string_, dictionary):
"""
Takes a string and a dictionary and interpolates the string
using values from the dictionary. Returns an `SQLQuery` for the result.
>>> reparam("s = $s", dict(s=True))
<sql: "s = 't'">
>>> reparam("s IN $s", dict(s=[1, 2]))
<sql: 's IN (1, 2)'>
"""
dictionary = dictionary.copy() # eval mucks with it
result = []
for live, chunk in _interpolate(string_):
if live:
v = eval(chunk, dictionary)
result.append(sqlquote(v))
else:
result.append(chunk)
return SQLQuery.join(result, '')
class UnknownParamstyle(Exception):
"""
raised for unsupported db paramstyles
(currently supported: qmark, numeric, format, pyformat)
"""
pass
class _ItplError(ValueError):
def __init__(self, text, pos):
ValueError.__init__(self)
self.text = text
self.pos = pos
def __str__(self):
return "unfinished expression in %s at char %d" % (
repr(self.text), self.pos)
class SQLParam(object):
"""
Parameter in SQLQuery.
>>> q = SQLQuery(["SELECT * FROM test WHERE name=", SQLParam("joe")])
>>> q
<sql: "SELECT * FROM test WHERE name='joe'">
>>> q.query()
'SELECT * FROM test WHERE name=%s'
>>> q.values()
['joe']
"""
__slots__ = ["value"]
def __init__(self, value):
self.value = value
def get_marker(self, paramstyle='pyformat'):
if paramstyle == 'qmark':
return '?'
elif paramstyle == 'numeric':
return ':1'
elif paramstyle is None or paramstyle in ['format', 'pyformat']:
return '%s'
raise UnknownParamstyle, paramstyle
def sqlquery(self):
return SQLQuery([self])
def __add__(self, other):
return self.sqlquery() + other
def __radd__(self, other):
return other + self.sqlquery()
def __str__(self):
return str(self.value)
def __repr__(self):
return '<param: %s>' % repr(self.value)
sqlparam = SQLParam
class SQLQuery(object):
"""
You can pass this sort of thing as a clause in any db function.
Otherwise, you can pass a dictionary to the keyword argument `vars`
and the function will call reparam for you.
Internally, consists of `items`, which is a list of strings and
SQLParams, which get concatenated to produce the actual query.
"""
__slots__ = ["items"]
# tested in sqlquote's docstring
def __init__(self, items=None):
r"""Creates a new SQLQuery.
>>> SQLQuery("x")
<sql: 'x'>
>>> q = SQLQuery(['SELECT * FROM ', 'test', ' WHERE x=', SQLParam(1)])
>>> q
<sql: 'SELECT * FROM test WHERE x=1'>
>>> q.query(), q.values()
('SELECT * FROM test WHERE x=%s', [1])
>>> SQLQuery(SQLParam(1))
<sql: '1'>
"""
if items is None:
self.items = []
elif isinstance(items, list):
self.items = items
elif isinstance(items, SQLParam):
self.items = [items]
elif isinstance(items, SQLQuery):
self.items = list(items.items)
else:
self.items = [items]
# Take care of SQLLiterals
for i, item in enumerate(self.items):
if isinstance(item, SQLParam) and isinstance(item.value, SQLLiteral):
self.items[i] = item.value.v
def append(self, value):
self.items.append(value)
def __add__(self, other):
if isinstance(other, basestring):
items = [other]
elif isinstance(other, SQLQuery):
items = other.items
else:
return NotImplemented
return SQLQuery(self.items + items)
def __radd__(self, other):
if isinstance(other, basestring):
items = [other]
else:
return NotImplemented
return SQLQuery(items + self.items)
def __iadd__(self, other):
if isinstance(other, (basestring, SQLParam)):
self.items.append(other)
elif isinstance(other, SQLQuery):
self.items.extend(other.items)
else:
return NotImplemented
return self
def __len__(self):
return len(self.query())
def query(self, paramstyle=None):
"""
Returns the query part of the sql query.
>>> q = SQLQuery(["SELECT * FROM test WHERE name=", SQLParam('joe')])
>>> q.query()
'SELECT * FROM test WHERE name=%s'
>>> q.query(paramstyle='qmark')
'SELECT * FROM test WHERE name=?'
"""
s = []
for x in self.items:
if isinstance(x, SQLParam):
x = x.get_marker(paramstyle)
s.append(safestr(x))
else:
x = safestr(x)
# automatically escape % characters in the query
# For backward compatability, ignore escaping when the query looks already escaped
if paramstyle in ['format', 'pyformat']:
if '%' in x and '%%' not in x:
x = x.replace('%', '%%')
s.append(x)
return "".join(s)
def values(self):
"""
Returns the values of the parameters used in the sql query.
>>> q = SQLQuery(["SELECT * FROM test WHERE name=", SQLParam('joe')])
>>> q.values()
['joe']
"""
return [i.value for i in self.items if isinstance(i, SQLParam)]
def join(items, sep=' ', prefix=None, suffix=None, target=None):
"""
Joins multiple queries.
>>> SQLQuery.join(['a', 'b'], ', ')
<sql: 'a, b'>
Optinally, prefix and suffix arguments can be provided.
>>> SQLQuery.join(['a', 'b'], ', ', prefix='(', suffix=')')
<sql: '(a, b)'>
If target argument is provided, the items are appended to target instead of creating a new SQLQuery.
"""
if target is None:
target = SQLQuery()
target_items = target.items
if prefix:
target_items.append(prefix)
for i, item in enumerate(items):
if i != 0:
target_items.append(sep)
if isinstance(item, SQLQuery):
target_items.extend(item.items)
else:
target_items.append(item)
if suffix:
target_items.append(suffix)
return target
join = staticmethod(join)
def _str(self):
try:
return self.query() % tuple([sqlify(x) for x in self.values()])
except (ValueError, TypeError):
return self.query()
def __str__(self):
return safestr(self._str())
def __unicode__(self):
return safeunicode(self._str())
def __repr__(self):
return '<sql: %s>' % repr(str(self))
class SQLLiteral:
"""
Protects a string from `sqlquote`.
>>> sqlquote('NOW()')
<sql: "'NOW()'">
>>> sqlquote(SQLLiteral('NOW()'))
<sql: 'NOW()'>
"""
def __init__(self, v):
self.v = v
def __repr__(self):
return self.v
class SQLProducer:
"""Database"""
def __init__(self):
"""Creates a database.
"""
pass
def query(self, sql_query,processed=False, svars=None):
"""
Execute SQL query `sql_query` using dictionary `vars` to interpolate it.
If `processed=True`, `vars` is a `reparam`-style list to use
instead of interpolating.
>>> db = DB(None, {})
>>> db.query("SELECT * FROM foo", _test=True)
<sql: 'SELECT * FROM foo'>
>>> db.query("SELECT * FROM foo WHERE x = $x", vars=dict(x='f'), _test=True)
<sql: "SELECT * FROM foo WHERE x = 'f'">
>>> db.query("SELECT * FROM foo WHERE x = " + sqlquote('f'), _test=True)
<sql: "SELECT * FROM foo WHERE x = 'f'">
"""
if svars is None:
svars = {}
if not processed and not isinstance(sql_query, SQLQuery):
sql_query = reparam(sql_query, svars)
return sql_query
def sql_clauses(self, what, tables, where, group, order, limit, offset):
return (
('SELECT', what),
('FROM', sqllist(tables)),
('WHERE', where),
('GROUP BY', group),
('ORDER BY', order),
('LIMIT', limit),
('OFFSET', offset))
def gen_clause(self, sql, val, svars):
if isinstance(val, (int, long)):
if sql == 'WHERE':
nout = 'id = ' + sqlquote(val)
else:
nout = SQLQuery(val)
elif isinstance(val, (list, tuple)) and len(val) == 2:
nout = SQLQuery(val[0], val[1]) # backwards-compatibility
elif isinstance(val, SQLQuery):
nout = val
else:
nout = reparam(val, svars)
def xjoin(a, b):
if a and b: return a + ' ' + b
else: return a or b
return xjoin(sql, nout)
def _where(self, where, svars):
if isinstance(where, (int, long)):
where = "id = " + sqlparam(where)
elif isinstance(where, (list, tuple)) and len(where) == 2:
where = SQLQuery(where[0], where[1])
elif isinstance(where, SQLQuery):
pass
else:
where = reparam(where, svars)
return where
def select(self, tables, svars=None, what='*', where=None, order=None, group=None,
limit=None, offset=None, _test=False):
"""
Selects `what` from `tables` with clauses `where`, `order`,
`group`, `limit`, and `offset`. Uses vars to interpolate.
Otherwise, each clause can be a SQLQuery.
>>> db = DB(None, {})
>>> db.select('foo', _test=True)
<sql: 'SELECT * FROM foo'>
>>> db.select(['foo', 'bar'], where="foo.bar_id = bar.id", limit=5, _test=True)
<sql: 'SELECT * FROM foo, bar WHERE foo.bar_id = bar.id LIMIT 5'>
"""
if svars is None: svars = {}
sql_clauses = self.sql_clauses(what, tables, where, group, order, limit, offset)
clauses = [self.gen_clause(sql, val, svars) for sql, val in sql_clauses if val is not None]
qout = SQLQuery.join(clauses)
if _test: return qout
return self.query(qout, processed=True)
def insert(self, tablename, seqname=None, _test=False, **values):
"""
Inserts `values` into `tablename`. Returns current sequence ID.
Set `seqname` to the ID if it's not the default, or to `False`
if there isn't one.
>>> db = DB(None, {})
>>> q = db.insert('foo', name='bob', age=2, created=SQLLiteral('NOW()'), _test=True)
>>> q
<sql: "INSERT INTO foo (age, name, created) VALUES (2, 'bob', NOW())">
>>> q.query()
'INSERT INTO foo (age, name, created) VALUES (%s, %s, NOW())'
>>> q.values()
[2, 'bob']
"""
def q(x): return "(" + x + ")"
if values:
_keys = SQLQuery.join(values.keys(), ', ')
_values = SQLQuery.join([sqlparam(v) for v in values.values()], ', ')
sql_query = "INSERT INTO %s " % tablename + q(_keys) + ' VALUES ' + q(_values)
else:
sql_query = SQLQuery(self._get_insert_default_values_query(tablename))
return sql_query
def _get_insert_default_values_query(self, table):
return "INSERT INTO %s DEFAULT VALUES" % table
def multiple_insert(self, tablename, values, seqname=None, _test=False):
"""
Inserts multiple rows into `tablename`. The `values` must be a list of dictioanries,
one for each row to be inserted, each with the same set of keys.
Returns the list of ids of the inserted rows.
Set `seqname` to the ID if it's not the default, or to `False`
if there isn't one.
>>> db = DB(None, {})
>>> db.supports_multiple_insert = True
>>> values = [{"name": "foo", "email": "<EMAIL>"}, {"name": "bar", "email": "<EMAIL>"}]
>>> db.multiple_insert('person', values=values, _test=True)
<sql: "INSERT INTO person (name, email) VALUES ('foo', '<EMAIL>'), ('bar', '<EMAIL>')">
"""
if not values:
return []
if not self.supports_multiple_insert:
out = [self.insert(tablename, seqname=seqname, _test=_test, **v) for v in values]
if seqname is False:
return None
else:
return out
keys = values[0].keys()
#@@ make sure all keys are valid
# make sure all rows have same keys.
for v in values:
if v.keys() != keys:
raise ValueError, 'Bad data'
sql_query = SQLQuery('INSERT INTO %s (%s) VALUES ' % (tablename, ', '.join(keys)))
for i, row in enumerate(values):
if i != 0:
sql_query.append(", ")
SQLQuery.join([SQLParam(row[k]) for k in keys], sep=", ", target=sql_query, prefix="(", suffix=")")
if _test: return sql_query
db_cursor = self._db_cursor()
if seqname is not False:
sql_query = self._process_insert_query(sql_query, tablename, seqname)
if isinstance(sql_query, tuple):
# for some databases, a separate query has to be made to find
# the id of the inserted row.
q1, q2 = sql_query
self._db_execute(db_cursor, q1)
self._db_execute(db_cursor, q2)
else:
self._db_execute(db_cursor, sql_query)
try:
out = db_cursor.fetchone()[0]
out = range(out-len(values)+1, out+1)
except Exception:
out = None
if not self.ctx.transactions:
self.ctx.commit()
return out
def update(self, tables, where, svars=None, _test=False, **values):
"""
Update `tables` with clause `where` (interpolated using `vars`)
and setting `values`.
>>> db = DB(None, {})
>>> name = 'Joseph'
>>> q = db.update('foo', where='name = $name', name='bob', age=2,
... created=SQLLiteral('NOW()'), vars=locals(), _test=True)
>>> q
<sql: "UPDATE foo SET age = 2, name = 'bob', created = NOW() WHERE name = 'Joseph'">
>>> q.query()
'UPDATE foo SET age = %s, name = %s, created = NOW() WHERE name = %s'
>>> q.values()
[2, 'bob', 'Joseph']
"""
if svars is None: svars = {}
where = self._where(where, svars)
query = (
"UPDATE " + sqllist(tables) +
" SET " + sqlwhere(values, ', ') +
" WHERE " + where)
if _test: return query
db_cursor = self._db_cursor()
self._db_execute(db_cursor, query)
if not self.ctx.transactions:
self.ctx.commit()
return db_cursor.rowcount
def delete(self, table, where, using=None, svars=None, _test=False):
"""
Deletes from `table` with clauses `where` and `using`.
>>> db = DB(None, {})
>>> name = 'Joe'
>>> db.delete('foo', where='name = $name', vars=locals(), _test=True)
<sql: "DELETE FROM foo WHERE name = 'Joe'">
"""
if svars is None:
svars = {}
where = self._where(where, svars)
q = 'DELETE FROM ' + table
if using:
q += ' USING ' + sqllist(using)
if where:
q += ' WHERE ' + where
return q
sqlproducer = SQLProducer() | en | 0.546829 | #coding:utf8 Created on 2013-8-21 @author: lan (www.9miao.com) Converts any given object to unicode string. >>> safeunicode('hello') u'hello' >>> safeunicode(2) u'2' >>> safeunicode('\xe1\x88\xb4') u'\u1234' Converts any given object to utf-8 encoded string. >>> safestr('hello') 'hello' >>> safestr(u'\u1234') '\xe1\x88\xb4' >>> safestr(2) '2' # iterator converts `obj` to its proper SQL version >>> sqlify(None) 'NULL' >>> sqlify(True) "'t'" >>> sqlify(3) '3' # because `1 == True and hash(1) == hash(True)` # we have to do this the hard way... Converts the arguments for use in something like a WHERE clause. >>> sqllist(['a', 'b']) 'a, b' >>> sqllist('a') 'a' >>> sqllist(u'abc') u'abc' >>> _sqllist([1, 2, 3]) <sql: '(1, 2, 3)'> Ensures `a` is quoted properly for use in a SQL query. >>> 'WHERE x = ' + sqlquote(True) + ' AND y = ' + sqlquote(3) <sql: "WHERE x = 't' AND y = 3"> >>> 'WHERE x = ' + sqlquote(True) + ' AND y IN ' + sqlquote([2, 3]) <sql: "WHERE x = 't' AND y IN (2, 3)"> Takes a format string and returns a list of 2-tuples of the form (boolean, string) where boolean says whether string should be evaled or not. from <http://lfw.org/python/Itpl.py> (public domain, Ka-Ping Yee) Converts a `dictionary` to an SQL WHERE clause `SQLQuery`. >>> sqlwhere({'cust_id': 2, 'order_id':3}) <sql: 'order_id = 3 AND cust_id = 2'> >>> sqlwhere({'cust_id': 2, 'order_id':3}, grouping=', ') <sql: 'order_id = 3, cust_id = 2'> >>> sqlwhere({'a': 'a', 'b': 'b'}).query() 'a = %s AND b = %s' Takes a string and a dictionary and interpolates the string using values from the dictionary. Returns an `SQLQuery` for the result. >>> reparam("s = $s", dict(s=True)) <sql: "s = 't'"> >>> reparam("s IN $s", dict(s=[1, 2])) <sql: 's IN (1, 2)'> # eval mucks with it raised for unsupported db paramstyles (currently supported: qmark, numeric, format, pyformat) Parameter in SQLQuery. >>> q = SQLQuery(["SELECT * FROM test WHERE name=", SQLParam("joe")]) >>> q <sql: "SELECT * FROM test WHERE name='joe'"> >>> q.query() 'SELECT * FROM test WHERE name=%s' >>> q.values() ['joe'] You can pass this sort of thing as a clause in any db function. Otherwise, you can pass a dictionary to the keyword argument `vars` and the function will call reparam for you. Internally, consists of `items`, which is a list of strings and SQLParams, which get concatenated to produce the actual query. # tested in sqlquote's docstring Creates a new SQLQuery. >>> SQLQuery("x") <sql: 'x'> >>> q = SQLQuery(['SELECT * FROM ', 'test', ' WHERE x=', SQLParam(1)]) >>> q <sql: 'SELECT * FROM test WHERE x=1'> >>> q.query(), q.values() ('SELECT * FROM test WHERE x=%s', [1]) >>> SQLQuery(SQLParam(1)) <sql: '1'> # Take care of SQLLiterals Returns the query part of the sql query. >>> q = SQLQuery(["SELECT * FROM test WHERE name=", SQLParam('joe')]) >>> q.query() 'SELECT * FROM test WHERE name=%s' >>> q.query(paramstyle='qmark') 'SELECT * FROM test WHERE name=?' # automatically escape % characters in the query # For backward compatability, ignore escaping when the query looks already escaped Returns the values of the parameters used in the sql query. >>> q = SQLQuery(["SELECT * FROM test WHERE name=", SQLParam('joe')]) >>> q.values() ['joe'] Joins multiple queries. >>> SQLQuery.join(['a', 'b'], ', ') <sql: 'a, b'> Optinally, prefix and suffix arguments can be provided. >>> SQLQuery.join(['a', 'b'], ', ', prefix='(', suffix=')') <sql: '(a, b)'> If target argument is provided, the items are appended to target instead of creating a new SQLQuery. Protects a string from `sqlquote`. >>> sqlquote('NOW()') <sql: "'NOW()'"> >>> sqlquote(SQLLiteral('NOW()')) <sql: 'NOW()'> Database Creates a database. Execute SQL query `sql_query` using dictionary `vars` to interpolate it. If `processed=True`, `vars` is a `reparam`-style list to use instead of interpolating. >>> db = DB(None, {}) >>> db.query("SELECT * FROM foo", _test=True) <sql: 'SELECT * FROM foo'> >>> db.query("SELECT * FROM foo WHERE x = $x", vars=dict(x='f'), _test=True) <sql: "SELECT * FROM foo WHERE x = 'f'"> >>> db.query("SELECT * FROM foo WHERE x = " + sqlquote('f'), _test=True) <sql: "SELECT * FROM foo WHERE x = 'f'"> # backwards-compatibility Selects `what` from `tables` with clauses `where`, `order`, `group`, `limit`, and `offset`. Uses vars to interpolate. Otherwise, each clause can be a SQLQuery. >>> db = DB(None, {}) >>> db.select('foo', _test=True) <sql: 'SELECT * FROM foo'> >>> db.select(['foo', 'bar'], where="foo.bar_id = bar.id", limit=5, _test=True) <sql: 'SELECT * FROM foo, bar WHERE foo.bar_id = bar.id LIMIT 5'> Inserts `values` into `tablename`. Returns current sequence ID. Set `seqname` to the ID if it's not the default, or to `False` if there isn't one. >>> db = DB(None, {}) >>> q = db.insert('foo', name='bob', age=2, created=SQLLiteral('NOW()'), _test=True) >>> q <sql: "INSERT INTO foo (age, name, created) VALUES (2, 'bob', NOW())"> >>> q.query() 'INSERT INTO foo (age, name, created) VALUES (%s, %s, NOW())' >>> q.values() [2, 'bob'] Inserts multiple rows into `tablename`. The `values` must be a list of dictioanries, one for each row to be inserted, each with the same set of keys. Returns the list of ids of the inserted rows. Set `seqname` to the ID if it's not the default, or to `False` if there isn't one. >>> db = DB(None, {}) >>> db.supports_multiple_insert = True >>> values = [{"name": "foo", "email": "<EMAIL>"}, {"name": "bar", "email": "<EMAIL>"}] >>> db.multiple_insert('person', values=values, _test=True) <sql: "INSERT INTO person (name, email) VALUES ('foo', '<EMAIL>'), ('bar', '<EMAIL>')"> #@@ make sure all keys are valid # make sure all rows have same keys. # for some databases, a separate query has to be made to find # the id of the inserted row. Update `tables` with clause `where` (interpolated using `vars`) and setting `values`. >>> db = DB(None, {}) >>> name = 'Joseph' >>> q = db.update('foo', where='name = $name', name='bob', age=2, ... created=SQLLiteral('NOW()'), vars=locals(), _test=True) >>> q <sql: "UPDATE foo SET age = 2, name = 'bob', created = NOW() WHERE name = 'Joseph'"> >>> q.query() 'UPDATE foo SET age = %s, name = %s, created = NOW() WHERE name = %s' >>> q.values() [2, 'bob', 'Joseph'] Deletes from `table` with clauses `where` and `using`. >>> db = DB(None, {}) >>> name = 'Joe' >>> db.delete('foo', where='name = $name', vars=locals(), _test=True) <sql: "DELETE FROM foo WHERE name = 'Joe'"> | 3.600782 | 4 |
dddppp/settings.py | tysonclugg/dddppp | 0 | 271 | """
Django settings for dddppp project.
Generated by 'django-admin startproject' using Django 1.8.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import pkg_resources
import pwd
PROJECT_NAME = 'dddppp'
# Enforce a valid POSIX environment
# Get missing environment variables via call to pwd.getpwuid(...)
_PW_CACHE = None
_PW_MAP = {
'LOGNAME': 'pw_name',
'USER': 'pw_name',
'USERNAME': 'pw_name',
'UID': 'pw_uid',
'GID': 'pw_gid',
'HOME': 'pw_dir',
'SHELL': 'pw_shell',
}
for _missing_env in set(_PW_MAP).difference(os.environ):
if _PW_CACHE is None:
_PW_CACHE = pwd.getpwuid(os.getuid())
os.environ[_missing_env] = str(getattr(_PW_CACHE, _PW_MAP[_missing_env]))
del _PW_CACHE, _PW_MAP, pwd
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '<KEY>
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = [
'localhost',
]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'dddp',
'dddp.server',
'dddp.accounts',
'dddppp.slides',
]
for (requirement, pth) in [
('django-extensions', 'django_extensions'),
]:
try:
pkg_resources.get_distribution(requirement)
except (
pkg_resources.DistributionNotFound,
pkg_resources.VersionConflict,
):
continue
INSTALLED_APPS.append(pth)
MIDDLEWARE_CLASSES = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
#'django.middleware.security.SecurityMiddleware',
]
ROOT_URLCONF = 'dddppp.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'dddppp.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.environ.get('PGDATABASE', PROJECT_NAME),
'USER': os.environ.get('PGUSER', os.environ['LOGNAME']),
'PASSWORD': os.environ.get('DJANGO_DATABASE_PASSWORD', ''),
'HOST': os.environ.get('PGHOST', ''),
'PORT': os.environ.get('PGPORT', ''),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-au'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
# django-secure
# see: https://github.com/carljm/django-secure/ for more options
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
#SECURE_SSL_REDIRECT = True
SECURE_CONTENT_TYPE_NOSNIFF = True
SECURE_FRAME_DENY = True
SESSION_COOKIE_SECURE = True
SESSION_COOKIE_HTTPONLY = True
DDDPPP_CONTENT_TYPES = []
PROJ_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
| """
Django settings for dddppp project.
Generated by 'django-admin startproject' using Django 1.8.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import pkg_resources
import pwd
PROJECT_NAME = 'dddppp'
# Enforce a valid POSIX environment
# Get missing environment variables via call to pwd.getpwuid(...)
_PW_CACHE = None
_PW_MAP = {
'LOGNAME': 'pw_name',
'USER': 'pw_name',
'USERNAME': 'pw_name',
'UID': 'pw_uid',
'GID': 'pw_gid',
'HOME': 'pw_dir',
'SHELL': 'pw_shell',
}
for _missing_env in set(_PW_MAP).difference(os.environ):
if _PW_CACHE is None:
_PW_CACHE = pwd.getpwuid(os.getuid())
os.environ[_missing_env] = str(getattr(_PW_CACHE, _PW_MAP[_missing_env]))
del _PW_CACHE, _PW_MAP, pwd
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '<KEY>
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = [
'localhost',
]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'dddp',
'dddp.server',
'dddp.accounts',
'dddppp.slides',
]
for (requirement, pth) in [
('django-extensions', 'django_extensions'),
]:
try:
pkg_resources.get_distribution(requirement)
except (
pkg_resources.DistributionNotFound,
pkg_resources.VersionConflict,
):
continue
INSTALLED_APPS.append(pth)
MIDDLEWARE_CLASSES = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
#'django.middleware.security.SecurityMiddleware',
]
ROOT_URLCONF = 'dddppp.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'dddppp.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.environ.get('PGDATABASE', PROJECT_NAME),
'USER': os.environ.get('PGUSER', os.environ['LOGNAME']),
'PASSWORD': os.environ.get('DJANGO_DATABASE_PASSWORD', ''),
'HOST': os.environ.get('PGHOST', ''),
'PORT': os.environ.get('PGPORT', ''),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-au'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
# django-secure
# see: https://github.com/carljm/django-secure/ for more options
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
#SECURE_SSL_REDIRECT = True
SECURE_CONTENT_TYPE_NOSNIFF = True
SECURE_FRAME_DENY = True
SESSION_COOKIE_SECURE = True
SESSION_COOKIE_HTTPONLY = True
DDDPPP_CONTENT_TYPES = []
PROJ_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
| en | 0.605819 | Django settings for dddppp project. Generated by 'django-admin startproject' using Django 1.8.2. For more information on this file, see https://docs.djangoproject.com/en/1.8/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.8/ref/settings/ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) # Enforce a valid POSIX environment # Get missing environment variables via call to pwd.getpwuid(...) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! # SECURITY WARNING: don't run with debug turned on in production! # Application definition #'django.middleware.security.SecurityMiddleware', # Database # https://docs.djangoproject.com/en/1.8/ref/settings/#databases # Internationalization # https://docs.djangoproject.com/en/1.8/topics/i18n/ # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.8/howto/static-files/ # django-secure # see: https://github.com/carljm/django-secure/ for more options #SECURE_SSL_REDIRECT = True | 2.036999 | 2 |
setup.py | dantas/wifi | 1 | 272 | <filename>setup.py
#!/usr/bin/env python
from setuptools import setup
import os
__doc__ = """
Command line tool and library wrappers around iwlist and
/etc/network/interfaces.
"""
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
install_requires = [
'setuptools',
'pbkdf2',
]
try:
import argparse
except:
install_requires.append('argparse')
version = '1.0.0'
setup(
name='wifi',
version=version,
author='<NAME>, <NAME>',
author_email='<EMAIL>',
description=__doc__,
long_description=read('README.rst'),
packages=['wifi'],
scripts=['bin/wifi'],
test_suite='tests',
platforms=["Debian"],
license='BSD',
install_requires=install_requires,
classifiers=[
"License :: OSI Approved :: BSD License",
"Topic :: System :: Networking",
"Operating System :: POSIX :: Linux",
"Environment :: Console",
"Programming Language :: Python",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.3",
],
data_files=[
('/etc/bash_completion.d/', ['extras/wifi-completion.bash']),
]
)
| <filename>setup.py
#!/usr/bin/env python
from setuptools import setup
import os
__doc__ = """
Command line tool and library wrappers around iwlist and
/etc/network/interfaces.
"""
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
install_requires = [
'setuptools',
'pbkdf2',
]
try:
import argparse
except:
install_requires.append('argparse')
version = '1.0.0'
setup(
name='wifi',
version=version,
author='<NAME>, <NAME>',
author_email='<EMAIL>',
description=__doc__,
long_description=read('README.rst'),
packages=['wifi'],
scripts=['bin/wifi'],
test_suite='tests',
platforms=["Debian"],
license='BSD',
install_requires=install_requires,
classifiers=[
"License :: OSI Approved :: BSD License",
"Topic :: System :: Networking",
"Operating System :: POSIX :: Linux",
"Environment :: Console",
"Programming Language :: Python",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.3",
],
data_files=[
('/etc/bash_completion.d/', ['extras/wifi-completion.bash']),
]
)
| en | 0.654381 | #!/usr/bin/env python Command line tool and library wrappers around iwlist and /etc/network/interfaces. | 1.768459 | 2 |
auth-api/src/auth_api/resources/org_products.py | severinbeauvais/sbc-auth | 0 | 273 | <gh_stars>0
# Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API endpoints for managing an Org resource."""
from flask import request
from flask_restplus import Namespace, Resource, cors
from auth_api import status as http_status
from auth_api.exceptions import BusinessException
from auth_api.jwt_wrapper import JWTWrapper
from auth_api.schemas import ProductSubscriptionSchema
from auth_api.schemas import utils as schema_utils
from auth_api.services import Product as ProductService
from auth_api.tracer import Tracer
from auth_api.utils.roles import Role
from auth_api.utils.util import cors_preflight
API = Namespace('products', description='Endpoints for products management')
TRACER = Tracer.get_instance()
_JWT = JWTWrapper.get_instance()
@cors_preflight('GET,POST,OPTIONS')
@API.route('', methods=['GET', 'POST', 'OPTIONS'])
class OrgProducts(Resource):
"""Resource for managing product subscriptions."""
@staticmethod
@TRACER.trace()
@cors.crossdomain(origin='*')
@_JWT.has_one_of_roles([Role.STAFF_CREATE_ACCOUNTS.value])
def post(org_id):
"""Post a new product subscription to the org using the request body."""
request_json = request.get_json()
valid_format, errors = schema_utils.validate(request_json, 'org_product_subscription')
if not valid_format:
return {'message': schema_utils.serialize(errors)}, http_status.HTTP_400_BAD_REQUEST
try:
subscriptions = ProductService.create_product_subscription(org_id, request_json)
if subscriptions is None:
response, status = {'message': 'Not authorized to perform this action'}, \
http_status.HTTP_401_UNAUTHORIZED
else:
response, status = {'subscriptions': ProductSubscriptionSchema().dump(subscriptions, many=True)}, \
http_status.HTTP_201_CREATED
except BusinessException as exception:
response, status = {'code': exception.code, 'message': exception.message}, exception.status_code
return response, status
| # Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API endpoints for managing an Org resource."""
from flask import request
from flask_restplus import Namespace, Resource, cors
from auth_api import status as http_status
from auth_api.exceptions import BusinessException
from auth_api.jwt_wrapper import JWTWrapper
from auth_api.schemas import ProductSubscriptionSchema
from auth_api.schemas import utils as schema_utils
from auth_api.services import Product as ProductService
from auth_api.tracer import Tracer
from auth_api.utils.roles import Role
from auth_api.utils.util import cors_preflight
API = Namespace('products', description='Endpoints for products management')
TRACER = Tracer.get_instance()
_JWT = JWTWrapper.get_instance()
@cors_preflight('GET,POST,OPTIONS')
@API.route('', methods=['GET', 'POST', 'OPTIONS'])
class OrgProducts(Resource):
"""Resource for managing product subscriptions."""
@staticmethod
@TRACER.trace()
@cors.crossdomain(origin='*')
@_JWT.has_one_of_roles([Role.STAFF_CREATE_ACCOUNTS.value])
def post(org_id):
"""Post a new product subscription to the org using the request body."""
request_json = request.get_json()
valid_format, errors = schema_utils.validate(request_json, 'org_product_subscription')
if not valid_format:
return {'message': schema_utils.serialize(errors)}, http_status.HTTP_400_BAD_REQUEST
try:
subscriptions = ProductService.create_product_subscription(org_id, request_json)
if subscriptions is None:
response, status = {'message': 'Not authorized to perform this action'}, \
http_status.HTTP_401_UNAUTHORIZED
else:
response, status = {'subscriptions': ProductSubscriptionSchema().dump(subscriptions, many=True)}, \
http_status.HTTP_201_CREATED
except BusinessException as exception:
response, status = {'code': exception.code, 'message': exception.message}, exception.status_code
return response, status | en | 0.859022 | # Copyright © 2019 Province of British Columbia # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. API endpoints for managing an Org resource. Resource for managing product subscriptions. Post a new product subscription to the org using the request body. | 1.803578 | 2 |
TWLight/applications/management/commands/send_coordinator_reminders.py | nicole331/TWLight | 67 | 274 | import logging
from collections import Counter
from django.core.management.base import BaseCommand
from django.db.models import Q
from TWLight.applications.models import Application
from TWLight.resources.models import Partner
from TWLight.applications.signals import Reminder
from TWLight.users.models import Editor
logger = logging.getLogger(__name__)
class Command(BaseCommand):
def handle(self, *args, **options):
# This is not DRY. Originally, this pulled the queryset from
# TWLight.applications.views.ListApplicationsView.get_queryset().
# But that now expects a request object. So, we did a copy/paste.
# We're actually getting apps with a status of PENDING or QUESTION
# or APPROVED, and their corresponding user preferences being True
# for partners with a status of AVAILABLE.
all_apps = (
Application.objects.filter(
Q(
partner__coordinator__editor__user__userprofile__pending_app_reminders=True
)
& Q(status=Application.PENDING)
| Q(
partner__coordinator__editor__user__userprofile__discussion_app_reminders=True
)
& Q(status=Application.QUESTION)
| Q(
partner__coordinator__editor__user__userprofile__approved_app_reminders=True
)
& Q(status=Application.APPROVED),
partner__status__in=[Partner.AVAILABLE],
editor__isnull=False,
)
.exclude(editor__user__groups__name="restricted")
.order_by("status", "partner", "date_created")
)
# A deduplicated dict of coordinators from the pending app queryset, along
# with a count of how many total pending apps they have
coordinators = Counter(
all_apps.values_list(
"partner__coordinator__editor",
"partner__coordinator__email",
"partner__coordinator__editor__user__userprofile__lang",
)
)
for coordinator, count in list(coordinators.items()):
try:
# We create a dictionary with the three status codes
# we'd want to send emails for, and their corresponding
# counts.
app_status_and_count = {
Application.PENDING: all_apps.filter(
status=Application.PENDING,
partner__coordinator__editor=coordinator[0],
).count(),
Application.QUESTION: all_apps.filter(
status=Application.QUESTION,
partner__coordinator__editor=coordinator[0],
).count(),
Application.APPROVED: all_apps.filter(
status=Application.APPROVED,
partner__coordinator__editor=coordinator[0],
).count(),
}
editor = Editor.objects.get(id=coordinator[0])
except Editor.DoesNotExist:
logger.info(
"Editor {} does not exist; skipping.".format(coordinator[0])
)
break
# Only bother with the signal if we have a coordinator email.
if coordinator[1]:
Reminder.coordinator_reminder.send(
sender=self.__class__,
app_status_and_count=app_status_and_count,
coordinator_wp_username=editor.wp_username,
coordinator_email=coordinator[1],
coordinator_lang=coordinator[2],
)
| import logging
from collections import Counter
from django.core.management.base import BaseCommand
from django.db.models import Q
from TWLight.applications.models import Application
from TWLight.resources.models import Partner
from TWLight.applications.signals import Reminder
from TWLight.users.models import Editor
logger = logging.getLogger(__name__)
class Command(BaseCommand):
def handle(self, *args, **options):
# This is not DRY. Originally, this pulled the queryset from
# TWLight.applications.views.ListApplicationsView.get_queryset().
# But that now expects a request object. So, we did a copy/paste.
# We're actually getting apps with a status of PENDING or QUESTION
# or APPROVED, and their corresponding user preferences being True
# for partners with a status of AVAILABLE.
all_apps = (
Application.objects.filter(
Q(
partner__coordinator__editor__user__userprofile__pending_app_reminders=True
)
& Q(status=Application.PENDING)
| Q(
partner__coordinator__editor__user__userprofile__discussion_app_reminders=True
)
& Q(status=Application.QUESTION)
| Q(
partner__coordinator__editor__user__userprofile__approved_app_reminders=True
)
& Q(status=Application.APPROVED),
partner__status__in=[Partner.AVAILABLE],
editor__isnull=False,
)
.exclude(editor__user__groups__name="restricted")
.order_by("status", "partner", "date_created")
)
# A deduplicated dict of coordinators from the pending app queryset, along
# with a count of how many total pending apps they have
coordinators = Counter(
all_apps.values_list(
"partner__coordinator__editor",
"partner__coordinator__email",
"partner__coordinator__editor__user__userprofile__lang",
)
)
for coordinator, count in list(coordinators.items()):
try:
# We create a dictionary with the three status codes
# we'd want to send emails for, and their corresponding
# counts.
app_status_and_count = {
Application.PENDING: all_apps.filter(
status=Application.PENDING,
partner__coordinator__editor=coordinator[0],
).count(),
Application.QUESTION: all_apps.filter(
status=Application.QUESTION,
partner__coordinator__editor=coordinator[0],
).count(),
Application.APPROVED: all_apps.filter(
status=Application.APPROVED,
partner__coordinator__editor=coordinator[0],
).count(),
}
editor = Editor.objects.get(id=coordinator[0])
except Editor.DoesNotExist:
logger.info(
"Editor {} does not exist; skipping.".format(coordinator[0])
)
break
# Only bother with the signal if we have a coordinator email.
if coordinator[1]:
Reminder.coordinator_reminder.send(
sender=self.__class__,
app_status_and_count=app_status_and_count,
coordinator_wp_username=editor.wp_username,
coordinator_email=coordinator[1],
coordinator_lang=coordinator[2],
)
| en | 0.908517 | # This is not DRY. Originally, this pulled the queryset from # TWLight.applications.views.ListApplicationsView.get_queryset(). # But that now expects a request object. So, we did a copy/paste. # We're actually getting apps with a status of PENDING or QUESTION # or APPROVED, and their corresponding user preferences being True # for partners with a status of AVAILABLE. # A deduplicated dict of coordinators from the pending app queryset, along # with a count of how many total pending apps they have # We create a dictionary with the three status codes # we'd want to send emails for, and their corresponding # counts. # Only bother with the signal if we have a coordinator email. | 1.914747 | 2 |
python/3D-rrt/pvtrace/LightSources.py | rapattack88/mcclanahoochie | 1 | 275 | # pvtrace is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# pvtrace is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
from external.transformations import translation_matrix, rotation_matrix
import external.transformations as tf
from Trace import Photon
from Geometry import Box, Cylinder, FinitePlane, transform_point, transform_direction, rotation_matrix_from_vector_alignment, norm
from Materials import Spectrum
def random_spherecial_vector():
# This method of calculating isotropic vectors is taken from GNU Scientific Library
LOOP = True
while LOOP:
x = -1. + 2. * np.random.uniform()
y = -1. + 2. * np.random.uniform()
s = x**2 + y**2
if s <= 1.0:
LOOP = False
z = -1. + 2. * s
a = 2 * np.sqrt(1 - s)
x = a * x
y = a * y
return np.array([x,y,z])
class SimpleSource(object):
"""A light source that will generate photons of a single colour, direction and position."""
def __init__(self, position=[0,0,0], direction=[0,0,1], wavelength=555, use_random_polarisation=False):
super(SimpleSource, self).__init__()
self.position = position
self.direction = direction
self.wavelength = wavelength
self.use_random_polarisation = use_random_polarisation
self.throw = 0
self.source_id = "SimpleSource_" + str(id(self))
def photon(self):
photon = Photon()
photon.source = self.source_id
photon.position = np.array(self.position)
photon.direction = np.array(self.direction)
photon.active = True
photon.wavelength = self.wavelength
# If use_polarisation is set generate a random polarisation vector of the photon
if self.use_random_polarisation:
# Randomise rotation angle around xy-plane, the transform from +z to the direction of the photon
vec = random_spherecial_vector()
vec[2] = 0.
vec = norm(vec)
R = rotation_matrix_from_vector_alignment(self.direction, [0,0,1])
photon.polarisation = transform_direction(vec, R)
else:
photon.polarisation = None
photon.id = self.throw
self.throw = self.throw + 1
return photon
class Laser(object):
"""A light source that will generate photons of a single colour, direction and position."""
def __init__(self, position=[0,0,0], direction=[0,0,1], wavelength=555, polarisation=None):
super(Laser, self).__init__()
self.position = np.array(position)
self.direction = np.array(direction)
self.wavelength = wavelength
assert polarisation != None, "Polarisation of the Laser is not set."
self.polarisation = np.array(polarisation)
self.throw = 0
self.source_id = "LaserSource_" + str(id(self))
def photon(self):
photon = Photon()
photon.source = self.source_id
photon.position = np.array(self.position)
photon.direction = np.array(self.direction)
photon.active = True
photon.wavelength = self.wavelength
photon.polarisation = self.polarisation
photon.id = self.throw
self.throw = self.throw + 1
return photon
class PlanarSource(object):
"""A box that emits photons from the top surface (normal), sampled from the spectrum."""
def __init__(self, spectrum=None, wavelength=555, direction=(0,0,1), length=0.05, width=0.05):
super(PlanarSource, self).__init__()
self.spectrum = spectrum
self.wavelength = wavelength
self.plane = FinitePlane(length=length, width=width)
self.length = length
self.width = width
# direction is the direction that photons are fired out of the plane in the GLOBAL FRAME.
# i.e. this is passed directly to the photon to set is's direction
self.direction = direction
self.throw = 0
self.source_id = "PlanarSource_" + str(id(self))
def translate(self, translation):
self.plane.append_transform(tf.translation_matrix(translation))
def rotate(self, angle, axis):
self.plane.append_transform(tf.rotation_matrix(angle, axis))
def photon(self):
photon = Photon()
photon.source = self.source_id
photon.id = self.throw
self.throw = self.throw + 1
# Create a point which is on the surface of the finite plane in it's local frame
x = np.random.uniform(0., self.length)
y = np.random.uniform(0., self.width)
local_point = (x, y, 0.)
# Transform the direciton
photon.position = transform_point(local_point, self.plane.transform)
photon.direction = self.direction
photon.active = True
if self.spectrum != None:
photon.wavelength = self.spectrum.wavelength_at_probability(np.random.uniform())
else:
photon.wavelength = self.wavelength
return photon
class LensSource(object):
"""
A source where photons generated in a plane are focused on a line with space tolerance given by variable "focussize".
The focus line should be perpendicular to the plane normal and aligned with the z-axis.
"""
def __init__(self, spectrum = None, wavelength = 555, linepoint=(0,0,0), linedirection=(0,0,1), focussize = 0, planeorigin = (-1,-1,-1), planeextent = (-1,1,1)):
super(LensSource, self).__init__()
self.spectrum = spectrum
self.wavelength = wavelength
self.planeorigin = planeorigin
self.planeextent = planeextent
self.linepoint = np.array(linepoint)
self.linedirection = np.array(linedirection)
self.focussize = focussize
self.throw = 0
self.source_id = "LensSource_" + str(id(self))
def photon(self):
photon = Photon()
photon.source = self.source_id
photon.id = self.throw
self.throw = self.throw + 1
# Position
x = np.random.uniform(self.planeorigin[0],self.planeextent[0])
y = np.random.uniform(self.planeorigin[1],self.planeextent[1])
z = np.random.uniform(self.planeorigin[2],self.planeextent[2])
photon.position = np.array((x,y,z))
# Direction
focuspoint = np.array((0.,0.,0.))
focuspoint[0] = self.linepoint[0] + np.random.uniform(-self.focussize,self.focussize)
focuspoint[1] = self.linepoint[1] + np.random.uniform(-self.focussize,self.focussize)
focuspoint[2] = photon.position[2]
direction = focuspoint - photon.position
modulus = (direction[0]**2+direction[1]**2+direction[2]**2)**0.5
photon.direction = direction/modulus
# Wavelength
if self.spectrum != None:
photon.wavelength = self.spectrum.wavelength_at_probability(np.random.uniform())
else:
photon.wavelength = self.wavelength
return photon
class LensSourceAngle(object):
"""
A source where photons generated in a plane are focused on a line with space tolerance given by variable "focussize".
The focus line should be perpendicular to the plane normal and aligned with the z-axis.
For this lense an additional z-boost is added (Angle of incidence in z-direction).
"""
def __init__(self, spectrum = None, wavelength = 555, linepoint=(0,0,0), linedirection=(0,0,1), angle = 0, focussize = 0, planeorigin = (-1,-1,-1), planeextent = (-1,1,1)):
super(LensSourceAngle, self).__init__()
self.spectrum = spectrum
self.wavelength = wavelength
self.planeorigin = planeorigin
self.planeextent = planeextent
self.linepoint = np.array(linepoint)
self.linedirection = np.array(linedirection)
self.focussize = focussize
self.angle = angle
self.throw = 0
self.source_id = "LensSourceAngle_" + str(id(self))
def photon(self):
photon = Photon()
photon.id = self.throw
self.throw = self.throw + 1
# Position
x = np.random.uniform(self.planeorigin[0],self.planeextent[0])
y = np.random.uniform(self.planeorigin[1],self.planeextent[1])
boost = y*np.tan(self.angle)
z = np.random.uniform(self.planeorigin[2],self.planeextent[2]) - boost
photon.position = np.array((x,y,z))
# Direction
focuspoint = np.array((0.,0.,0.))
focuspoint[0] = self.linepoint[0] + np.random.uniform(-self.focussize,self.focussize)
focuspoint[1] = self.linepoint[1] + np.random.uniform(-self.focussize,self.focussize)
focuspoint[2] = photon.position[2] + boost
direction = focuspoint - photon.position
modulus = (direction[0]**2+direction[1]**2+direction[2]**2)**0.5
photon.direction = direction/modulus
# Wavelength
if self.spectrum != None:
photon.wavelength = self.spectrum.wavelength_at_probability(np.random.uniform())
else:
photon.wavelength = self.wavelength
return photon
class CylindricalSource(object):
"""
A source for photons emitted in a random direction and position inside a cylinder(radius, length)
"""
def __init__(self, spectrum = None, wavelength = 555, radius = 1, length = 10):
super(CylindricalSource, self).__init__()
self.spectrum = spectrum
self.wavelength = wavelength
self.shape = Cylinder(radius = radius, length = length)
self.radius = radius
self.length = length
self.throw = 0
self.source_id = "CylindricalSource_" + str(id(self))
def translate(self, translation):
self.shape.append_transform(tf.translation_matrix(translation))
def rotate(self, angle, axis):
self.shape.append_transform(tf.rotation_matrix(angle, axis))
def photon(self):
photon = Photon()
photon.source = self.source_id
photon.id = self.throw
self.throw = self.throw + 1
# Position of emission
phi = np.random.uniform(0., 2*np.pi)
r = np.random.uniform(0.,self.radius)
x = r*np.cos(phi)
y = r*np.sin(phi)
z = np.random.uniform(0.,self.length)
local_center = (x,y,z)
photon.position = transform_point(local_center, self.shape.transform)
# Direction of emission (no need to transform if meant to be isotropic)
phi = np.random.uniform(0.,2*np.pi)
theta = np.random.uniform(0.,np.pi)
x = np.cos(phi)*np.sin(theta)
y = np.sin(phi)*np.sin(theta)
z = np.cos(theta)
local_direction = (x,y,z)
photon.direction = local_direction
# Set wavelength of photon
if self.spectrum != None:
photon.wavelength = self.spectrum.wavelength_at_probability(np.random.uniform())
else:
photon.wavelength = self.wavelength
# Further initialisation
photon.active = True
return photon
class PointSource(object):
"""
A point source that emits randomly in solid angle specified by phimin, ..., thetamax
"""
def __init__(self, spectrum = None, wavelength = 555, center = (0.,0.,0.), phimin = 0, phimax = 2*np.pi, thetamin = 0, thetamax = np.pi):
super(PointSource, self).__init__()
self.spectrum = spectrum
self.wavelength = wavelength
self.center = center
self.phimin = phimin
self.phimax = phimax
self.thetamin = thetamin
self.thetamax = thetamax
self.throw = 0
self.source_id = "PointSource_" + str(id(self))
def photon(self):
photon = Photon()
photon.source = self.source_id
photon.id = self.throw
self.throw = self.throw + 1
phi = np.random.uniform(self.phimin, self.phimax)
theta = np.random.uniform(self.thetamin, self.thetamax)
x = np.cos(phi)*np.sin(theta)
y = np.sin(phi)*np.sin(theta)
z = np.cos(theta)
direction = (x,y,z)
transform = tf.translation_matrix((0,0,0))
point = transform_point(self.center, transform)
photon.direction = direction
photon.position = point
if self.spectrum != None:
photon.wavelength = self.spectrum.wavelength_at_probability(np.random.uniform())
else:
photon.wavelength = self.wavelength
photon.active = True
return photon
class RadialSource(object):
"""
A point source that emits at discrete angles theta(i) and phi(i)
"""
def __init__(self, spectrum = None, wavelength = 555, center = (0.,0.,0.), phimin = 0, phimax = 2*np.pi, thetamin = 0, thetamax = np.pi, spacing=20):
super(RadialSource, self).__init__()
self.spectrum = spectrum
self.wavelength = wavelength
self.center = center
self.phimin = phimin
self.phimax = phimax
self.thetamin = thetamin
self.thetamax = thetamax
self.spacing = spacing
self.throw = 0
self.source_id = "RadialSource_" + str(id(self))
def photon(self):
photon = Photon()
photon.source = self.source_id
photon.id = self.throw
self.throw = self.throw + 1
intphi = np.random.randint(1, self.spacing+1)
inttheta = np.random.randint(1, self.spacing+1)
phi = intphi*(self.phimax-self.phimin)/self.spacing
if self.thetamin == self.thetamax:
theta = self.thetamin
else:
theta = inttheta*(self.thetamax-self.thetamin)/self.spacing
x = np.cos(phi)*np.sin(theta)
y = np.sin(phi)*np.sin(theta)
z = np.cos(theta)
direction = (x,y,z)
transform = tf.translation_matrix((0,0,0))
point = transform_point(self.center, transform)
photon.direction = direction
photon.position = point
if self.spectrum != None:
photon.wavelength = self.spectrum.wavelength_at_probability(np.random.uniform())
else:
photon.wavelength = self.wavelength
photon.active = True
return photon
| # pvtrace is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# pvtrace is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
from external.transformations import translation_matrix, rotation_matrix
import external.transformations as tf
from Trace import Photon
from Geometry import Box, Cylinder, FinitePlane, transform_point, transform_direction, rotation_matrix_from_vector_alignment, norm
from Materials import Spectrum
def random_spherecial_vector():
# This method of calculating isotropic vectors is taken from GNU Scientific Library
LOOP = True
while LOOP:
x = -1. + 2. * np.random.uniform()
y = -1. + 2. * np.random.uniform()
s = x**2 + y**2
if s <= 1.0:
LOOP = False
z = -1. + 2. * s
a = 2 * np.sqrt(1 - s)
x = a * x
y = a * y
return np.array([x,y,z])
class SimpleSource(object):
"""A light source that will generate photons of a single colour, direction and position."""
def __init__(self, position=[0,0,0], direction=[0,0,1], wavelength=555, use_random_polarisation=False):
super(SimpleSource, self).__init__()
self.position = position
self.direction = direction
self.wavelength = wavelength
self.use_random_polarisation = use_random_polarisation
self.throw = 0
self.source_id = "SimpleSource_" + str(id(self))
def photon(self):
photon = Photon()
photon.source = self.source_id
photon.position = np.array(self.position)
photon.direction = np.array(self.direction)
photon.active = True
photon.wavelength = self.wavelength
# If use_polarisation is set generate a random polarisation vector of the photon
if self.use_random_polarisation:
# Randomise rotation angle around xy-plane, the transform from +z to the direction of the photon
vec = random_spherecial_vector()
vec[2] = 0.
vec = norm(vec)
R = rotation_matrix_from_vector_alignment(self.direction, [0,0,1])
photon.polarisation = transform_direction(vec, R)
else:
photon.polarisation = None
photon.id = self.throw
self.throw = self.throw + 1
return photon
class Laser(object):
"""A light source that will generate photons of a single colour, direction and position."""
def __init__(self, position=[0,0,0], direction=[0,0,1], wavelength=555, polarisation=None):
super(Laser, self).__init__()
self.position = np.array(position)
self.direction = np.array(direction)
self.wavelength = wavelength
assert polarisation != None, "Polarisation of the Laser is not set."
self.polarisation = np.array(polarisation)
self.throw = 0
self.source_id = "LaserSource_" + str(id(self))
def photon(self):
photon = Photon()
photon.source = self.source_id
photon.position = np.array(self.position)
photon.direction = np.array(self.direction)
photon.active = True
photon.wavelength = self.wavelength
photon.polarisation = self.polarisation
photon.id = self.throw
self.throw = self.throw + 1
return photon
class PlanarSource(object):
"""A box that emits photons from the top surface (normal), sampled from the spectrum."""
def __init__(self, spectrum=None, wavelength=555, direction=(0,0,1), length=0.05, width=0.05):
super(PlanarSource, self).__init__()
self.spectrum = spectrum
self.wavelength = wavelength
self.plane = FinitePlane(length=length, width=width)
self.length = length
self.width = width
# direction is the direction that photons are fired out of the plane in the GLOBAL FRAME.
# i.e. this is passed directly to the photon to set is's direction
self.direction = direction
self.throw = 0
self.source_id = "PlanarSource_" + str(id(self))
def translate(self, translation):
self.plane.append_transform(tf.translation_matrix(translation))
def rotate(self, angle, axis):
self.plane.append_transform(tf.rotation_matrix(angle, axis))
def photon(self):
photon = Photon()
photon.source = self.source_id
photon.id = self.throw
self.throw = self.throw + 1
# Create a point which is on the surface of the finite plane in it's local frame
x = np.random.uniform(0., self.length)
y = np.random.uniform(0., self.width)
local_point = (x, y, 0.)
# Transform the direciton
photon.position = transform_point(local_point, self.plane.transform)
photon.direction = self.direction
photon.active = True
if self.spectrum != None:
photon.wavelength = self.spectrum.wavelength_at_probability(np.random.uniform())
else:
photon.wavelength = self.wavelength
return photon
class LensSource(object):
"""
A source where photons generated in a plane are focused on a line with space tolerance given by variable "focussize".
The focus line should be perpendicular to the plane normal and aligned with the z-axis.
"""
def __init__(self, spectrum = None, wavelength = 555, linepoint=(0,0,0), linedirection=(0,0,1), focussize = 0, planeorigin = (-1,-1,-1), planeextent = (-1,1,1)):
super(LensSource, self).__init__()
self.spectrum = spectrum
self.wavelength = wavelength
self.planeorigin = planeorigin
self.planeextent = planeextent
self.linepoint = np.array(linepoint)
self.linedirection = np.array(linedirection)
self.focussize = focussize
self.throw = 0
self.source_id = "LensSource_" + str(id(self))
def photon(self):
photon = Photon()
photon.source = self.source_id
photon.id = self.throw
self.throw = self.throw + 1
# Position
x = np.random.uniform(self.planeorigin[0],self.planeextent[0])
y = np.random.uniform(self.planeorigin[1],self.planeextent[1])
z = np.random.uniform(self.planeorigin[2],self.planeextent[2])
photon.position = np.array((x,y,z))
# Direction
focuspoint = np.array((0.,0.,0.))
focuspoint[0] = self.linepoint[0] + np.random.uniform(-self.focussize,self.focussize)
focuspoint[1] = self.linepoint[1] + np.random.uniform(-self.focussize,self.focussize)
focuspoint[2] = photon.position[2]
direction = focuspoint - photon.position
modulus = (direction[0]**2+direction[1]**2+direction[2]**2)**0.5
photon.direction = direction/modulus
# Wavelength
if self.spectrum != None:
photon.wavelength = self.spectrum.wavelength_at_probability(np.random.uniform())
else:
photon.wavelength = self.wavelength
return photon
class LensSourceAngle(object):
"""
A source where photons generated in a plane are focused on a line with space tolerance given by variable "focussize".
The focus line should be perpendicular to the plane normal and aligned with the z-axis.
For this lense an additional z-boost is added (Angle of incidence in z-direction).
"""
def __init__(self, spectrum = None, wavelength = 555, linepoint=(0,0,0), linedirection=(0,0,1), angle = 0, focussize = 0, planeorigin = (-1,-1,-1), planeextent = (-1,1,1)):
super(LensSourceAngle, self).__init__()
self.spectrum = spectrum
self.wavelength = wavelength
self.planeorigin = planeorigin
self.planeextent = planeextent
self.linepoint = np.array(linepoint)
self.linedirection = np.array(linedirection)
self.focussize = focussize
self.angle = angle
self.throw = 0
self.source_id = "LensSourceAngle_" + str(id(self))
def photon(self):
photon = Photon()
photon.id = self.throw
self.throw = self.throw + 1
# Position
x = np.random.uniform(self.planeorigin[0],self.planeextent[0])
y = np.random.uniform(self.planeorigin[1],self.planeextent[1])
boost = y*np.tan(self.angle)
z = np.random.uniform(self.planeorigin[2],self.planeextent[2]) - boost
photon.position = np.array((x,y,z))
# Direction
focuspoint = np.array((0.,0.,0.))
focuspoint[0] = self.linepoint[0] + np.random.uniform(-self.focussize,self.focussize)
focuspoint[1] = self.linepoint[1] + np.random.uniform(-self.focussize,self.focussize)
focuspoint[2] = photon.position[2] + boost
direction = focuspoint - photon.position
modulus = (direction[0]**2+direction[1]**2+direction[2]**2)**0.5
photon.direction = direction/modulus
# Wavelength
if self.spectrum != None:
photon.wavelength = self.spectrum.wavelength_at_probability(np.random.uniform())
else:
photon.wavelength = self.wavelength
return photon
class CylindricalSource(object):
"""
A source for photons emitted in a random direction and position inside a cylinder(radius, length)
"""
def __init__(self, spectrum = None, wavelength = 555, radius = 1, length = 10):
super(CylindricalSource, self).__init__()
self.spectrum = spectrum
self.wavelength = wavelength
self.shape = Cylinder(radius = radius, length = length)
self.radius = radius
self.length = length
self.throw = 0
self.source_id = "CylindricalSource_" + str(id(self))
def translate(self, translation):
self.shape.append_transform(tf.translation_matrix(translation))
def rotate(self, angle, axis):
self.shape.append_transform(tf.rotation_matrix(angle, axis))
def photon(self):
photon = Photon()
photon.source = self.source_id
photon.id = self.throw
self.throw = self.throw + 1
# Position of emission
phi = np.random.uniform(0., 2*np.pi)
r = np.random.uniform(0.,self.radius)
x = r*np.cos(phi)
y = r*np.sin(phi)
z = np.random.uniform(0.,self.length)
local_center = (x,y,z)
photon.position = transform_point(local_center, self.shape.transform)
# Direction of emission (no need to transform if meant to be isotropic)
phi = np.random.uniform(0.,2*np.pi)
theta = np.random.uniform(0.,np.pi)
x = np.cos(phi)*np.sin(theta)
y = np.sin(phi)*np.sin(theta)
z = np.cos(theta)
local_direction = (x,y,z)
photon.direction = local_direction
# Set wavelength of photon
if self.spectrum != None:
photon.wavelength = self.spectrum.wavelength_at_probability(np.random.uniform())
else:
photon.wavelength = self.wavelength
# Further initialisation
photon.active = True
return photon
class PointSource(object):
"""
A point source that emits randomly in solid angle specified by phimin, ..., thetamax
"""
def __init__(self, spectrum = None, wavelength = 555, center = (0.,0.,0.), phimin = 0, phimax = 2*np.pi, thetamin = 0, thetamax = np.pi):
super(PointSource, self).__init__()
self.spectrum = spectrum
self.wavelength = wavelength
self.center = center
self.phimin = phimin
self.phimax = phimax
self.thetamin = thetamin
self.thetamax = thetamax
self.throw = 0
self.source_id = "PointSource_" + str(id(self))
def photon(self):
photon = Photon()
photon.source = self.source_id
photon.id = self.throw
self.throw = self.throw + 1
phi = np.random.uniform(self.phimin, self.phimax)
theta = np.random.uniform(self.thetamin, self.thetamax)
x = np.cos(phi)*np.sin(theta)
y = np.sin(phi)*np.sin(theta)
z = np.cos(theta)
direction = (x,y,z)
transform = tf.translation_matrix((0,0,0))
point = transform_point(self.center, transform)
photon.direction = direction
photon.position = point
if self.spectrum != None:
photon.wavelength = self.spectrum.wavelength_at_probability(np.random.uniform())
else:
photon.wavelength = self.wavelength
photon.active = True
return photon
class RadialSource(object):
"""
A point source that emits at discrete angles theta(i) and phi(i)
"""
def __init__(self, spectrum = None, wavelength = 555, center = (0.,0.,0.), phimin = 0, phimax = 2*np.pi, thetamin = 0, thetamax = np.pi, spacing=20):
super(RadialSource, self).__init__()
self.spectrum = spectrum
self.wavelength = wavelength
self.center = center
self.phimin = phimin
self.phimax = phimax
self.thetamin = thetamin
self.thetamax = thetamax
self.spacing = spacing
self.throw = 0
self.source_id = "RadialSource_" + str(id(self))
def photon(self):
photon = Photon()
photon.source = self.source_id
photon.id = self.throw
self.throw = self.throw + 1
intphi = np.random.randint(1, self.spacing+1)
inttheta = np.random.randint(1, self.spacing+1)
phi = intphi*(self.phimax-self.phimin)/self.spacing
if self.thetamin == self.thetamax:
theta = self.thetamin
else:
theta = inttheta*(self.thetamax-self.thetamin)/self.spacing
x = np.cos(phi)*np.sin(theta)
y = np.sin(phi)*np.sin(theta)
z = np.cos(theta)
direction = (x,y,z)
transform = tf.translation_matrix((0,0,0))
point = transform_point(self.center, transform)
photon.direction = direction
photon.position = point
if self.spectrum != None:
photon.wavelength = self.spectrum.wavelength_at_probability(np.random.uniform())
else:
photon.wavelength = self.wavelength
photon.active = True
return photon
| en | 0.8772 | # pvtrace is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # pvtrace is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # This method of calculating isotropic vectors is taken from GNU Scientific Library A light source that will generate photons of a single colour, direction and position. # If use_polarisation is set generate a random polarisation vector of the photon # Randomise rotation angle around xy-plane, the transform from +z to the direction of the photon A light source that will generate photons of a single colour, direction and position. A box that emits photons from the top surface (normal), sampled from the spectrum. # direction is the direction that photons are fired out of the plane in the GLOBAL FRAME. # i.e. this is passed directly to the photon to set is's direction # Create a point which is on the surface of the finite plane in it's local frame # Transform the direciton A source where photons generated in a plane are focused on a line with space tolerance given by variable "focussize". The focus line should be perpendicular to the plane normal and aligned with the z-axis. # Position # Direction # Wavelength A source where photons generated in a plane are focused on a line with space tolerance given by variable "focussize". The focus line should be perpendicular to the plane normal and aligned with the z-axis. For this lense an additional z-boost is added (Angle of incidence in z-direction). # Position # Direction # Wavelength A source for photons emitted in a random direction and position inside a cylinder(radius, length) # Position of emission # Direction of emission (no need to transform if meant to be isotropic) # Set wavelength of photon # Further initialisation A point source that emits randomly in solid angle specified by phimin, ..., thetamax A point source that emits at discrete angles theta(i) and phi(i) | 2.665731 | 3 |
circuitry/circuitry.py | nthparty/circuitry | 3 | 276 | """Embedded DSL for assembling logic circuits.
Embedded domain-specific combinator library for
assembling abstract definitions of logic circuits
and synthesizing circuits from those definitions.
"""
from __future__ import annotations
from typing import Sequence
import doctest
from parts import parts
from circuit import op, gate, circuit, signature
class bit():
"""
Class for representing an abstract bit. Such a bit
can be interpreted concretely as a value, but it is
also used to keep track of relationships between
operators and to represent the wires within a
circuit built up out of those operators.
>>> bit.hook_operation(lambda o, v, *args: None)
>>> bit.circuit(circuit())
>>> b = output(input(1).and_(input(1)))
>>> b.value == bit.circuit().evaluate([1,1])[0]
True
>>> def make_hook(bit_):
... def hook(o, v, *args):
... return bit_.constructor(*args)(v, bit_.gate(o, [a.gate for a in args]))
... return hook
>>> bit.hook_operation(make_hook(bit))
>>> bit.circuit(circuit())
>>> b = output(input(0).and_(input(0)))
>>> b.value == bit.circuit().evaluate([0,0])[0]
True
"""
_circuit = None
_hook_operation = None
@staticmethod
def circuit(circuit_=None):
if circuit_ is not None:
bit._circuit = circuit_
return None
else:
bit._circuit.prune_and_topological_sort_stable()
return bit._circuit
@staticmethod
def hook_operation(hook=None):
bit._hook_operation = hook
@staticmethod
def operation(o, *args):
# Ensure second argument is a `bit`.
args = list(args)
if len(args) == 2:
args[1] = constant(args[1]) if isinstance(args[1], int) else args[1]
# Compute the value of the result of the operation on the arguments.
v = o(*[a.value for a in args])
# Return output from hook if it exists and if
# it returns an output.
if bit._hook_operation is not None:
r = bit._hook_operation(o, v, *args)
if r is not None:
return r
return bit.constructor(*args)(v, bit.gate(o, [a.gate for a in args]))
@staticmethod
def constructor(b1, b2=None):
# The inference code below is not currently in use.
"""
if isinstance(b1, input_one) and isinstance(b2, input_one):
return input_one
elif isinstance(b1, input_two) and isinstance(b2, input_two):
return input_two
elif isinstance(b1, (input_one, input_two)) and b2 is None:
return type(b1)
else:
return bit
"""
return bit
@staticmethod
def gate(operation, igs):
return bit._circuit.gate(operation, igs)
def __init__(self, value, gate_=None):
self.value = value
self.gate = bit._circuit.gate() if gate_ is None else gate_
def __int__(self):
return self.value
def not_(self):
"""
>>> results = []
>>> for x in [0, 1]:
... bit.circuit(circuit())
... b = output(input(x).not_())
... results.append(int(b) == bit.circuit().evaluate([x])[0])
>>> all(results)
True
"""
return bit.operation(op.not_, self)
def __invert__(self):
"""
>>> results = []
>>> for x in [0, 1]:
... bit.circuit(circuit())
... b = output(~input(x))
... results.append(int(b) == bit.circuit().evaluate([x])[0])
>>> all(results)
True
"""
return bit.operation(op.not_, self)
def __rsub__(self, other):
"""
>>> results = []
>>> for x in [0, 1]:
... bit.circuit(circuit())
... b = output(1 - input(x))
... results.append(int(b) == bit.circuit().evaluate([x])[0])
>>> all(results)
True
>>> bit.circuit(circuit())
>>> 2 - input(0)
Traceback (most recent call last):
...
ValueError: can only subtract a bit from the integer 1
"""
if other == 1:
return bit.operation(op.not_, self)
raise ValueError('can only subtract a bit from the integer 1')
def and_(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x).and_(input(y)))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.and_, self, other)
def __and__(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x) & input(y))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.and_, self, other)
def __rand__(self, other):
"""
>>> bit.circuit(circuit())
>>> b = 0 & constant(1)
>>> b.value
0
"""
return self & (constant(other) if isinstance(other, int) else other)
def nimp(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x).nimp(input(y)))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.nimp_, self, other)
def nimp_(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x).nimp_(input(y)))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.nimp_, self, other)
def __gt__(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x) > input(y))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return self.nimp(other)
def nif(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x).nif(input(y)))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.nif_, self, other)
def nif_(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x).nif_(input(y)))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.nif_, self, other)
def __lt__(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x) < input(y))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return self.nif(other)
def xor(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x).xor(input(y)))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.xor_, self, other)
def xor_(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x).xor_(input(y)))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.xor_, self, other)
def __xor__(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x) ^ input(y))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.xor_, self, other)
def __rxor__(self, other):
"""
>>> bit.circuit(circuit())
>>> b = 1 ^ constant(0)
>>> b.value
1
"""
return self ^ (constant(other) if isinstance(other, int) else other)
def or_(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x).or_(input(y)))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.or_, self, other)
def __or__(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x) | input(y))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.or_, self, other)
def __ror__(self, other):
"""
>>> bit.circuit(circuit())
>>> b = 1 | constant(0)
>>> b.value
1
"""
return self | (constant(other) if isinstance(other, int) else other)
def nor(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x).nor(input(y)))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.nor_, self, other)
def nor_(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x).nor_(input(y)))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.nor_, self, other)
def __mod__(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x) % input(y))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.nor_, self, other)
def xnor(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x).xnor(input(y)))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.xnor_, self, other)
def xnor_(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x).xnor_(input(y)))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.xnor_, self, other)
def __eq__(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x) == input(y))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.xnor_, self, other)
def if_(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x).if_(input(y)))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.if_, self, other)
def __ge__(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x) >= input(y))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.if_, self, other)
def imp(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x).imp(input(y)))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.imp_, self, other)
def imp_(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x).imp_(input(y)))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.imp_, self, other)
def __le__(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x) <= input(y))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.imp_, self, other)
def nand(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x).nand(input(y)))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.nand_, self, other)
def nand_(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x).nand_(input(y)))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.nand_, self, other)
def __matmul__(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x) @ input(y))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.nand_, self, other)
class constant(bit):
"""Bit that is designated as a constant input."""
class input(bit):
"""Bit that is designated as a variable input."""
def __init__(self: bit, value: int):
self.value = value
self.gate = bit._circuit.gate(op.id_, is_input=True)
class input_one(input):
"""Bit that is designated as a variable input from one source."""
class input_two(input):
"""Bit that is designated as a variable input from a second source."""
class output(bit):
"""
Bit that is designated an output.
>>> bit.circuit(circuit())
>>> b0 = output(input(1).not_())
>>> b1 = output(b0.not_())
>>> b2 = output(b0)
>>> [b0.value, b1.value, b2.value]
[0, 1, 0]
"""
def __init__(self: bit, b: bit):
# Check if bit is ready as final output or whether there are others dependent on it.
if len(b.gate.outputs) > 0:
b = ~(~b) # Preserve the bit by copying it to a new wire.
self.value = b.value
self.gate = bit._circuit.gate(op.id_, [b.gate], is_output=True)
class bits_type(int): # pylint: disable=R0903
"""
Class for representing an input or output type of a
function decorated for automated synthesis.
"""
class bits(list):
"""
Class for representing a vector of abstract bits.
"""
@staticmethod
def from_byte(byte_: int, constructor=bit) -> bits:
return bits([
constructor(bit_)
for bit_ in reversed([(byte_>>i)%2 for i in range(8)])
])
@staticmethod
def from_bytes(bytes_, constructor=bit) -> bits:
"""
>>> bit.circuit(circuit())
>>> [b.value for b in bits.from_bytes(bytes([255]))]
[1, 1, 1, 1, 1, 1, 1, 1]
>>> bit.circuit(circuit())
>>> [b.value for b in bits.from_bytes(bytes([11, 0]))]
[0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]
"""
return bits([
bit_
for byte_ in bytes_
for bit_ in bits.from_byte(byte_, constructor)
])
@staticmethod
def zeros(n: int) -> bits:
"""
>>> bit.circuit(circuit())
>>> xs = bits.zeros(3)
>>> ys = outputs(xs.not_())
>>> [y.value for y in ys]
[1, 1, 1]
"""
return bits([constant(0)]*n)
def __new__(cls, argument = None) -> bits:
"""
Return bits object given the supplied argument.
"""
return bits_type(argument)\
if isinstance(argument, int) else\
list.__new__(cls, argument)
def __int__(self: bits) -> int:
"""
>>> bit.circuit(circuit())
>>> xs = constants([0, 0, 0])
>>> ys = outputs(xs.not_())
>>> int(ys)
7
"""
return sum(int(b)*(2**i) for (i, b) in zip(range(len(self)), reversed(self)))
def not_(self: bits) -> bits:
"""
>>> results = []
>>> for x in [0, 1]:
... bit.circuit(circuit())
... xs = inputs([x, x, x])
... ys = outputs(xs.not_())
... ns = [int(y) for y in ys]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x]))
>>> all(results)
True
"""
return bits([x.not_() for x in self])
def __invert__(self: bits) -> bits:
"""
>>> results = []
>>> for x in [0, 1]:
... bit.circuit(circuit())
... xs = inputs([x, x, x])
... ys = outputs(~xs)
... ns = [int(y) for y in ys]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x]))
>>> all(results)
True
"""
return bits([x.not_() for x in self])
def and_(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs.and_(ys))
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.and_(y) for (x, y) in zip(self, other)])
def __and__(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs & ys)
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.and_(y) for (x, y) in zip(self, other)])
def nimp(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs.nimp(ys))
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.nimp_(y) for (x, y) in zip(self, other)])
def nimp_(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs.nimp_(ys))
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.nimp_(y) for (x, y) in zip(self, other)])
def __gt__(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs > ys)
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.nimp_(y) for (x, y) in zip(self, other)])
def nif(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs.nif(ys))
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.nif_(y) for (x, y) in zip(self, other)])
def nif_(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs.nif_(ys))
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.nif_(y) for (x, y) in zip(self, other)])
def __lt__(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs < ys)
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.nif_(y) for (x, y) in zip(self, other)])
def xor(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs.xor(ys))
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.xor_(y) for (x, y) in zip(self, other)])
def xor_(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs.xor_(ys))
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.xor_(y) for (x, y) in zip(self, other)])
def __xor__(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs ^ ys)
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.xor_(y) for (x, y) in zip(self, other)])
def or_(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs.or_(ys))
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.or_(y) for (x, y) in zip(self, other)])
def __or__(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs | ys)
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.or_(y) for (x, y) in zip(self, other)])
def nor(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs.nor(ys))
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.nor_(y) for (x, y) in zip(self, other)])
def nor_(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs.nor_(ys))
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.nor_(y) for (x, y) in zip(self, other)])
def __mod__(self, other) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs % ys)
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.nor_(y) for (x, y) in zip(self, other)])
def xnor(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs.xnor(ys))
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.xnor_(y) for (x, y) in zip(self, other)])
def xnor_(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs.xnor_(ys))
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.xnor_(y) for (x, y) in zip(self, other)])
def __eq__(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs == ys)
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.xnor_(y) for (x, y) in zip(self, other)])
def if_(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs.if_(ys))
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.if_(y) for (x, y) in zip(self, other)])
def __ge__(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs >= ys)
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.if_(y) for (x, y) in zip(self, other)])
def imp(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs.imp(ys))
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.imp_(y) for (x, y) in zip(self, other)])
def imp_(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs.imp_(ys))
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.imp_(y) for (x, y) in zip(self, other)])
def __le__(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs <= ys)
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.imp_(y) for (x, y) in zip(self, other)])
def nand(self: bits, other) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs.nand(ys))
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.nand_(y) for (x, y) in zip(self, other)])
def nand_(self: bits, other) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs.nand_(ys))
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.nand_(y) for (x, y) in zip(self, other)])
def __rshift__(self: bits, other) -> bits:
"""
Overloaded operator: rotation and shift operations.
>>> bit.circuit(circuit())
>>> bs = bits(map(bit, [1,1,1,1,0,0,0,0]))
>>> bs = bs >> 3
>>> [b.value for b in bs]
[0, 0, 0, 1, 1, 1, 1, 0]
>>> bit.circuit(circuit())
>>> bs = bits(map(bit, [0,0,0,0,1,1,1,1]))
>>> bs = bs >> {3}
>>> [b.value for b in bs]
[1, 1, 1, 0, 0, 0, 0, 1]
"""
if isinstance(other, set) and isinstance(list(other)[0], int): # Rotation.
quantity = list(other)[0]
return bits(self[len(self)-quantity:]) ** bits(self[0:len(self)-quantity])
else: # Shift
return bits([constant(0)]*other) ** bits(self[0:len(self)-other])
def __lshift__(self: bits, other) -> bits:
"""
>>> bit.circuit(circuit())
>>> bs = bits(map(bit, [1,1,1,1,0,0,0,0]))
>>> bs = bs << 3
>>> [b.value for b in bs]
[1, 0, 0, 0, 0, 0, 0, 0]
"""
return bits(self[other:]) ** bits([constant(0) for _ in range(other)])
def __truediv__(self: bits, other) -> Sequence[bits]:
"""
>>> bit.circuit(circuit())
>>> bs = bits(map(bit, [1,1,1,1,0,0,0,0]))
>>> bss = list(bs / 2)
>>> ([b.value for b in bss[0]], [b.value for b in bss[1]])
([1, 1, 1, 1], [0, 0, 0, 0])
>>> bit.circuit(circuit())
>>> bs = bits(map(bit, [1,1,1,1,0,0,0,0]))
>>> bss = list(bs / {2})
>>> [[b.value for b in bs] for bs in bss]
[[1, 1], [1, 1], [0, 0], [0, 0]]
>>> bit.circuit(circuit())
>>> bs = bits(map(bit, [1,1,1,1,0,0,0,0]))
>>> bss = list(bs / [1, 3, 4])
>>> [[b.value for b in bs] for bs in bss]
[[1], [1, 1, 1], [0, 0, 0, 0]]
"""
if isinstance(other, list) and len(other) > 0 and isinstance(other[0], int):
return map(bits, parts(self, length=other)) # Sequence of lengths.
elif isinstance(other, set) and len(other) == 1 and isinstance(list(other)[0], int):
return self / (len(self)//list(other)[0]) # Parts of length `other`.
else:
return map(bits, parts(self, other)) # Number of parts is `other`.
def __add__(self: bits, other) -> bits:
"""Concatenation of bit vectors."""
result = list(self)
result.extend(list(other))
return bits(result)
def __pow__(self: bits, other) -> bits:
"""Concatenation of bit vectors."""
return self + other
def constants(l):
return bits(map(constant, l))
def inputs(l):
return bits(map(input, l))
def outputs(l):
return bits(map(output, l))
def synthesize(f):
"""
Decorator for automatically synthesizing a circuit from a
function that takes only `bit` and/or `bits` objects as its
arguments and returns an output of type `bit` or `bits`.
>>> @synthesize
... def equal(x: bit, y: bit) -> bit:
... return (x & y) | ((1 - x) & (1 - y))
>>> xys = [bits([x, y]) for x in (0, 1) for y in (0, 1)]
>>> [equal.circuit.evaluate(xy) for xy in xys]
[[1], [0], [0], [1]]
>>> @synthesize
... def conjunction(xy: bits(2)) -> bits(2):
... return (xy[0], xy[0] & xy[1])
>>> xys = [bits([x, y]) for x in (0, 1) for y in (0, 1)]
>>> [conjunction.circuit.evaluate(xy) for xy in xys]
[[0, 0], [0, 0], [1, 0], [1, 1]]
>>> @synthesize
... def equal(x, y):
... return x & y
Traceback (most recent call last):
...
RuntimeError: automated circuit synthesis failed
"""
# Functions for determining types/signature from
# the type annotation of the decorated function.
type_in = lambda a: input(0) if a is bit else inputs([0] * a)
type_out = lambda a: output if a is bit else outputs
# For forward-compatibility with PEP 563.
eval_ = lambda a: eval(a) if isinstance(a, str) else a # pylint: disable=W0123
try:
# Construct the circuit and add it to the function as an attribute.
bit.circuit(circuit())
args_in = {
k: type_in(eval_(a))
for (k, a) in f.__annotations__.items() if k != 'return'
}
type_out(eval_(f.__annotations__['return']))(f(**args_in))
f.circuit = bit.circuit()
except:
raise RuntimeError('automated circuit synthesis failed') from None
# Return the original function.
return f
if __name__ == "__main__":
doctest.testmod() # pragma: no cover
| """Embedded DSL for assembling logic circuits.
Embedded domain-specific combinator library for
assembling abstract definitions of logic circuits
and synthesizing circuits from those definitions.
"""
from __future__ import annotations
from typing import Sequence
import doctest
from parts import parts
from circuit import op, gate, circuit, signature
class bit():
"""
Class for representing an abstract bit. Such a bit
can be interpreted concretely as a value, but it is
also used to keep track of relationships between
operators and to represent the wires within a
circuit built up out of those operators.
>>> bit.hook_operation(lambda o, v, *args: None)
>>> bit.circuit(circuit())
>>> b = output(input(1).and_(input(1)))
>>> b.value == bit.circuit().evaluate([1,1])[0]
True
>>> def make_hook(bit_):
... def hook(o, v, *args):
... return bit_.constructor(*args)(v, bit_.gate(o, [a.gate for a in args]))
... return hook
>>> bit.hook_operation(make_hook(bit))
>>> bit.circuit(circuit())
>>> b = output(input(0).and_(input(0)))
>>> b.value == bit.circuit().evaluate([0,0])[0]
True
"""
_circuit = None
_hook_operation = None
@staticmethod
def circuit(circuit_=None):
if circuit_ is not None:
bit._circuit = circuit_
return None
else:
bit._circuit.prune_and_topological_sort_stable()
return bit._circuit
@staticmethod
def hook_operation(hook=None):
bit._hook_operation = hook
@staticmethod
def operation(o, *args):
# Ensure second argument is a `bit`.
args = list(args)
if len(args) == 2:
args[1] = constant(args[1]) if isinstance(args[1], int) else args[1]
# Compute the value of the result of the operation on the arguments.
v = o(*[a.value for a in args])
# Return output from hook if it exists and if
# it returns an output.
if bit._hook_operation is not None:
r = bit._hook_operation(o, v, *args)
if r is not None:
return r
return bit.constructor(*args)(v, bit.gate(o, [a.gate for a in args]))
@staticmethod
def constructor(b1, b2=None):
# The inference code below is not currently in use.
"""
if isinstance(b1, input_one) and isinstance(b2, input_one):
return input_one
elif isinstance(b1, input_two) and isinstance(b2, input_two):
return input_two
elif isinstance(b1, (input_one, input_two)) and b2 is None:
return type(b1)
else:
return bit
"""
return bit
@staticmethod
def gate(operation, igs):
return bit._circuit.gate(operation, igs)
def __init__(self, value, gate_=None):
self.value = value
self.gate = bit._circuit.gate() if gate_ is None else gate_
def __int__(self):
return self.value
def not_(self):
"""
>>> results = []
>>> for x in [0, 1]:
... bit.circuit(circuit())
... b = output(input(x).not_())
... results.append(int(b) == bit.circuit().evaluate([x])[0])
>>> all(results)
True
"""
return bit.operation(op.not_, self)
def __invert__(self):
"""
>>> results = []
>>> for x in [0, 1]:
... bit.circuit(circuit())
... b = output(~input(x))
... results.append(int(b) == bit.circuit().evaluate([x])[0])
>>> all(results)
True
"""
return bit.operation(op.not_, self)
def __rsub__(self, other):
"""
>>> results = []
>>> for x in [0, 1]:
... bit.circuit(circuit())
... b = output(1 - input(x))
... results.append(int(b) == bit.circuit().evaluate([x])[0])
>>> all(results)
True
>>> bit.circuit(circuit())
>>> 2 - input(0)
Traceback (most recent call last):
...
ValueError: can only subtract a bit from the integer 1
"""
if other == 1:
return bit.operation(op.not_, self)
raise ValueError('can only subtract a bit from the integer 1')
def and_(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x).and_(input(y)))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.and_, self, other)
def __and__(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x) & input(y))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.and_, self, other)
def __rand__(self, other):
"""
>>> bit.circuit(circuit())
>>> b = 0 & constant(1)
>>> b.value
0
"""
return self & (constant(other) if isinstance(other, int) else other)
def nimp(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x).nimp(input(y)))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.nimp_, self, other)
def nimp_(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x).nimp_(input(y)))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.nimp_, self, other)
def __gt__(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x) > input(y))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return self.nimp(other)
def nif(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x).nif(input(y)))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.nif_, self, other)
def nif_(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x).nif_(input(y)))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.nif_, self, other)
def __lt__(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x) < input(y))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return self.nif(other)
def xor(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x).xor(input(y)))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.xor_, self, other)
def xor_(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x).xor_(input(y)))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.xor_, self, other)
def __xor__(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x) ^ input(y))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.xor_, self, other)
def __rxor__(self, other):
"""
>>> bit.circuit(circuit())
>>> b = 1 ^ constant(0)
>>> b.value
1
"""
return self ^ (constant(other) if isinstance(other, int) else other)
def or_(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x).or_(input(y)))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.or_, self, other)
def __or__(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x) | input(y))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.or_, self, other)
def __ror__(self, other):
"""
>>> bit.circuit(circuit())
>>> b = 1 | constant(0)
>>> b.value
1
"""
return self | (constant(other) if isinstance(other, int) else other)
def nor(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x).nor(input(y)))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.nor_, self, other)
def nor_(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x).nor_(input(y)))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.nor_, self, other)
def __mod__(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x) % input(y))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.nor_, self, other)
def xnor(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x).xnor(input(y)))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.xnor_, self, other)
def xnor_(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x).xnor_(input(y)))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.xnor_, self, other)
def __eq__(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x) == input(y))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.xnor_, self, other)
def if_(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x).if_(input(y)))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.if_, self, other)
def __ge__(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x) >= input(y))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.if_, self, other)
def imp(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x).imp(input(y)))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.imp_, self, other)
def imp_(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x).imp_(input(y)))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.imp_, self, other)
def __le__(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x) <= input(y))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.imp_, self, other)
def nand(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x).nand(input(y)))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.nand_, self, other)
def nand_(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x).nand_(input(y)))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.nand_, self, other)
def __matmul__(self, other):
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... b = output(input(x) @ input(y))
... results.append(int(b) == bit.circuit().evaluate([x,y])[0])
>>> all(results)
True
"""
return bit.operation(op.nand_, self, other)
class constant(bit):
"""Bit that is designated as a constant input."""
class input(bit):
"""Bit that is designated as a variable input."""
def __init__(self: bit, value: int):
self.value = value
self.gate = bit._circuit.gate(op.id_, is_input=True)
class input_one(input):
"""Bit that is designated as a variable input from one source."""
class input_two(input):
"""Bit that is designated as a variable input from a second source."""
class output(bit):
"""
Bit that is designated an output.
>>> bit.circuit(circuit())
>>> b0 = output(input(1).not_())
>>> b1 = output(b0.not_())
>>> b2 = output(b0)
>>> [b0.value, b1.value, b2.value]
[0, 1, 0]
"""
def __init__(self: bit, b: bit):
# Check if bit is ready as final output or whether there are others dependent on it.
if len(b.gate.outputs) > 0:
b = ~(~b) # Preserve the bit by copying it to a new wire.
self.value = b.value
self.gate = bit._circuit.gate(op.id_, [b.gate], is_output=True)
class bits_type(int): # pylint: disable=R0903
"""
Class for representing an input or output type of a
function decorated for automated synthesis.
"""
class bits(list):
"""
Class for representing a vector of abstract bits.
"""
@staticmethod
def from_byte(byte_: int, constructor=bit) -> bits:
return bits([
constructor(bit_)
for bit_ in reversed([(byte_>>i)%2 for i in range(8)])
])
@staticmethod
def from_bytes(bytes_, constructor=bit) -> bits:
"""
>>> bit.circuit(circuit())
>>> [b.value for b in bits.from_bytes(bytes([255]))]
[1, 1, 1, 1, 1, 1, 1, 1]
>>> bit.circuit(circuit())
>>> [b.value for b in bits.from_bytes(bytes([11, 0]))]
[0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]
"""
return bits([
bit_
for byte_ in bytes_
for bit_ in bits.from_byte(byte_, constructor)
])
@staticmethod
def zeros(n: int) -> bits:
"""
>>> bit.circuit(circuit())
>>> xs = bits.zeros(3)
>>> ys = outputs(xs.not_())
>>> [y.value for y in ys]
[1, 1, 1]
"""
return bits([constant(0)]*n)
def __new__(cls, argument = None) -> bits:
"""
Return bits object given the supplied argument.
"""
return bits_type(argument)\
if isinstance(argument, int) else\
list.__new__(cls, argument)
def __int__(self: bits) -> int:
"""
>>> bit.circuit(circuit())
>>> xs = constants([0, 0, 0])
>>> ys = outputs(xs.not_())
>>> int(ys)
7
"""
return sum(int(b)*(2**i) for (i, b) in zip(range(len(self)), reversed(self)))
def not_(self: bits) -> bits:
"""
>>> results = []
>>> for x in [0, 1]:
... bit.circuit(circuit())
... xs = inputs([x, x, x])
... ys = outputs(xs.not_())
... ns = [int(y) for y in ys]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x]))
>>> all(results)
True
"""
return bits([x.not_() for x in self])
def __invert__(self: bits) -> bits:
"""
>>> results = []
>>> for x in [0, 1]:
... bit.circuit(circuit())
... xs = inputs([x, x, x])
... ys = outputs(~xs)
... ns = [int(y) for y in ys]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x]))
>>> all(results)
True
"""
return bits([x.not_() for x in self])
def and_(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs.and_(ys))
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.and_(y) for (x, y) in zip(self, other)])
def __and__(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs & ys)
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.and_(y) for (x, y) in zip(self, other)])
def nimp(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs.nimp(ys))
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.nimp_(y) for (x, y) in zip(self, other)])
def nimp_(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs.nimp_(ys))
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.nimp_(y) for (x, y) in zip(self, other)])
def __gt__(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs > ys)
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.nimp_(y) for (x, y) in zip(self, other)])
def nif(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs.nif(ys))
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.nif_(y) for (x, y) in zip(self, other)])
def nif_(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs.nif_(ys))
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.nif_(y) for (x, y) in zip(self, other)])
def __lt__(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs < ys)
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.nif_(y) for (x, y) in zip(self, other)])
def xor(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs.xor(ys))
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.xor_(y) for (x, y) in zip(self, other)])
def xor_(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs.xor_(ys))
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.xor_(y) for (x, y) in zip(self, other)])
def __xor__(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs ^ ys)
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.xor_(y) for (x, y) in zip(self, other)])
def or_(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs.or_(ys))
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.or_(y) for (x, y) in zip(self, other)])
def __or__(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs | ys)
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.or_(y) for (x, y) in zip(self, other)])
def nor(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs.nor(ys))
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.nor_(y) for (x, y) in zip(self, other)])
def nor_(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs.nor_(ys))
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.nor_(y) for (x, y) in zip(self, other)])
def __mod__(self, other) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs % ys)
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.nor_(y) for (x, y) in zip(self, other)])
def xnor(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs.xnor(ys))
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.xnor_(y) for (x, y) in zip(self, other)])
def xnor_(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs.xnor_(ys))
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.xnor_(y) for (x, y) in zip(self, other)])
def __eq__(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs == ys)
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.xnor_(y) for (x, y) in zip(self, other)])
def if_(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs.if_(ys))
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.if_(y) for (x, y) in zip(self, other)])
def __ge__(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs >= ys)
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.if_(y) for (x, y) in zip(self, other)])
def imp(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs.imp(ys))
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.imp_(y) for (x, y) in zip(self, other)])
def imp_(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs.imp_(ys))
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.imp_(y) for (x, y) in zip(self, other)])
def __le__(self: bits, other: bits) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs <= ys)
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.imp_(y) for (x, y) in zip(self, other)])
def nand(self: bits, other) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs.nand(ys))
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.nand_(y) for (x, y) in zip(self, other)])
def nand_(self: bits, other) -> bits:
"""
>>> results = []
>>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]:
... bit.circuit(circuit())
... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y]))
... zs = outputs(xs.nand_(ys))
... ns = [int(z) for z in zs]
... c = bit.circuit()
... results.append(ns == c.evaluate([x, x, x, y, y, y]))
>>> all(results)
True
"""
return bits([x.nand_(y) for (x, y) in zip(self, other)])
def __rshift__(self: bits, other) -> bits:
"""
Overloaded operator: rotation and shift operations.
>>> bit.circuit(circuit())
>>> bs = bits(map(bit, [1,1,1,1,0,0,0,0]))
>>> bs = bs >> 3
>>> [b.value for b in bs]
[0, 0, 0, 1, 1, 1, 1, 0]
>>> bit.circuit(circuit())
>>> bs = bits(map(bit, [0,0,0,0,1,1,1,1]))
>>> bs = bs >> {3}
>>> [b.value for b in bs]
[1, 1, 1, 0, 0, 0, 0, 1]
"""
if isinstance(other, set) and isinstance(list(other)[0], int): # Rotation.
quantity = list(other)[0]
return bits(self[len(self)-quantity:]) ** bits(self[0:len(self)-quantity])
else: # Shift
return bits([constant(0)]*other) ** bits(self[0:len(self)-other])
def __lshift__(self: bits, other) -> bits:
"""
>>> bit.circuit(circuit())
>>> bs = bits(map(bit, [1,1,1,1,0,0,0,0]))
>>> bs = bs << 3
>>> [b.value for b in bs]
[1, 0, 0, 0, 0, 0, 0, 0]
"""
return bits(self[other:]) ** bits([constant(0) for _ in range(other)])
def __truediv__(self: bits, other) -> Sequence[bits]:
"""
>>> bit.circuit(circuit())
>>> bs = bits(map(bit, [1,1,1,1,0,0,0,0]))
>>> bss = list(bs / 2)
>>> ([b.value for b in bss[0]], [b.value for b in bss[1]])
([1, 1, 1, 1], [0, 0, 0, 0])
>>> bit.circuit(circuit())
>>> bs = bits(map(bit, [1,1,1,1,0,0,0,0]))
>>> bss = list(bs / {2})
>>> [[b.value for b in bs] for bs in bss]
[[1, 1], [1, 1], [0, 0], [0, 0]]
>>> bit.circuit(circuit())
>>> bs = bits(map(bit, [1,1,1,1,0,0,0,0]))
>>> bss = list(bs / [1, 3, 4])
>>> [[b.value for b in bs] for bs in bss]
[[1], [1, 1, 1], [0, 0, 0, 0]]
"""
if isinstance(other, list) and len(other) > 0 and isinstance(other[0], int):
return map(bits, parts(self, length=other)) # Sequence of lengths.
elif isinstance(other, set) and len(other) == 1 and isinstance(list(other)[0], int):
return self / (len(self)//list(other)[0]) # Parts of length `other`.
else:
return map(bits, parts(self, other)) # Number of parts is `other`.
def __add__(self: bits, other) -> bits:
"""Concatenation of bit vectors."""
result = list(self)
result.extend(list(other))
return bits(result)
def __pow__(self: bits, other) -> bits:
"""Concatenation of bit vectors."""
return self + other
def constants(l):
return bits(map(constant, l))
def inputs(l):
return bits(map(input, l))
def outputs(l):
return bits(map(output, l))
def synthesize(f):
"""
Decorator for automatically synthesizing a circuit from a
function that takes only `bit` and/or `bits` objects as its
arguments and returns an output of type `bit` or `bits`.
>>> @synthesize
... def equal(x: bit, y: bit) -> bit:
... return (x & y) | ((1 - x) & (1 - y))
>>> xys = [bits([x, y]) for x in (0, 1) for y in (0, 1)]
>>> [equal.circuit.evaluate(xy) for xy in xys]
[[1], [0], [0], [1]]
>>> @synthesize
... def conjunction(xy: bits(2)) -> bits(2):
... return (xy[0], xy[0] & xy[1])
>>> xys = [bits([x, y]) for x in (0, 1) for y in (0, 1)]
>>> [conjunction.circuit.evaluate(xy) for xy in xys]
[[0, 0], [0, 0], [1, 0], [1, 1]]
>>> @synthesize
... def equal(x, y):
... return x & y
Traceback (most recent call last):
...
RuntimeError: automated circuit synthesis failed
"""
# Functions for determining types/signature from
# the type annotation of the decorated function.
type_in = lambda a: input(0) if a is bit else inputs([0] * a)
type_out = lambda a: output if a is bit else outputs
# For forward-compatibility with PEP 563.
eval_ = lambda a: eval(a) if isinstance(a, str) else a # pylint: disable=W0123
try:
# Construct the circuit and add it to the function as an attribute.
bit.circuit(circuit())
args_in = {
k: type_in(eval_(a))
for (k, a) in f.__annotations__.items() if k != 'return'
}
type_out(eval_(f.__annotations__['return']))(f(**args_in))
f.circuit = bit.circuit()
except:
raise RuntimeError('automated circuit synthesis failed') from None
# Return the original function.
return f
if __name__ == "__main__":
doctest.testmod() # pragma: no cover
| en | 0.512976 | Embedded DSL for assembling logic circuits. Embedded domain-specific combinator library for assembling abstract definitions of logic circuits and synthesizing circuits from those definitions. Class for representing an abstract bit. Such a bit can be interpreted concretely as a value, but it is also used to keep track of relationships between operators and to represent the wires within a circuit built up out of those operators. >>> bit.hook_operation(lambda o, v, *args: None) >>> bit.circuit(circuit()) >>> b = output(input(1).and_(input(1))) >>> b.value == bit.circuit().evaluate([1,1])[0] True >>> def make_hook(bit_): ... def hook(o, v, *args): ... return bit_.constructor(*args)(v, bit_.gate(o, [a.gate for a in args])) ... return hook >>> bit.hook_operation(make_hook(bit)) >>> bit.circuit(circuit()) >>> b = output(input(0).and_(input(0))) >>> b.value == bit.circuit().evaluate([0,0])[0] True # Ensure second argument is a `bit`. # Compute the value of the result of the operation on the arguments. # Return output from hook if it exists and if # it returns an output. # The inference code below is not currently in use. if isinstance(b1, input_one) and isinstance(b2, input_one): return input_one elif isinstance(b1, input_two) and isinstance(b2, input_two): return input_two elif isinstance(b1, (input_one, input_two)) and b2 is None: return type(b1) else: return bit >>> results = [] >>> for x in [0, 1]: ... bit.circuit(circuit()) ... b = output(input(x).not_()) ... results.append(int(b) == bit.circuit().evaluate([x])[0]) >>> all(results) True >>> results = [] >>> for x in [0, 1]: ... bit.circuit(circuit()) ... b = output(~input(x)) ... results.append(int(b) == bit.circuit().evaluate([x])[0]) >>> all(results) True >>> results = [] >>> for x in [0, 1]: ... bit.circuit(circuit()) ... b = output(1 - input(x)) ... results.append(int(b) == bit.circuit().evaluate([x])[0]) >>> all(results) True >>> bit.circuit(circuit()) >>> 2 - input(0) Traceback (most recent call last): ... ValueError: can only subtract a bit from the integer 1 >>> results = [] >>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]: ... bit.circuit(circuit()) ... b = output(input(x).and_(input(y))) ... results.append(int(b) == bit.circuit().evaluate([x,y])[0]) >>> all(results) True >>> results = [] >>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]: ... bit.circuit(circuit()) ... b = output(input(x) & input(y)) ... results.append(int(b) == bit.circuit().evaluate([x,y])[0]) >>> all(results) True >>> bit.circuit(circuit()) >>> b = 0 & constant(1) >>> b.value 0 >>> results = [] >>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]: ... bit.circuit(circuit()) ... b = output(input(x).nimp(input(y))) ... results.append(int(b) == bit.circuit().evaluate([x,y])[0]) >>> all(results) True >>> results = [] >>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]: ... bit.circuit(circuit()) ... b = output(input(x).nimp_(input(y))) ... results.append(int(b) == bit.circuit().evaluate([x,y])[0]) >>> all(results) True >>> results = [] >>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]: ... bit.circuit(circuit()) ... b = output(input(x) > input(y)) ... results.append(int(b) == bit.circuit().evaluate([x,y])[0]) >>> all(results) True >>> results = [] >>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]: ... bit.circuit(circuit()) ... b = output(input(x).nif(input(y))) ... results.append(int(b) == bit.circuit().evaluate([x,y])[0]) >>> all(results) True >>> results = [] >>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]: ... bit.circuit(circuit()) ... b = output(input(x).nif_(input(y))) ... results.append(int(b) == bit.circuit().evaluate([x,y])[0]) >>> all(results) True >>> results = [] >>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]: ... bit.circuit(circuit()) ... b = output(input(x) < input(y)) ... results.append(int(b) == bit.circuit().evaluate([x,y])[0]) >>> all(results) True >>> results = [] >>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]: ... bit.circuit(circuit()) ... b = output(input(x).xor(input(y))) ... results.append(int(b) == bit.circuit().evaluate([x,y])[0]) >>> all(results) True >>> results = [] >>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]: ... bit.circuit(circuit()) ... b = output(input(x).xor_(input(y))) ... results.append(int(b) == bit.circuit().evaluate([x,y])[0]) >>> all(results) True >>> results = [] >>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]: ... bit.circuit(circuit()) ... b = output(input(x) ^ input(y)) ... results.append(int(b) == bit.circuit().evaluate([x,y])[0]) >>> all(results) True >>> bit.circuit(circuit()) >>> b = 1 ^ constant(0) >>> b.value 1 >>> results = [] >>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]: ... bit.circuit(circuit()) ... b = output(input(x).or_(input(y))) ... results.append(int(b) == bit.circuit().evaluate([x,y])[0]) >>> all(results) True >>> results = [] >>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]: ... bit.circuit(circuit()) ... b = output(input(x) | input(y)) ... results.append(int(b) == bit.circuit().evaluate([x,y])[0]) >>> all(results) True >>> bit.circuit(circuit()) >>> b = 1 | constant(0) >>> b.value 1 >>> results = [] >>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]: ... bit.circuit(circuit()) ... b = output(input(x).nor(input(y))) ... results.append(int(b) == bit.circuit().evaluate([x,y])[0]) >>> all(results) True >>> results = [] >>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]: ... bit.circuit(circuit()) ... b = output(input(x).nor_(input(y))) ... results.append(int(b) == bit.circuit().evaluate([x,y])[0]) >>> all(results) True >>> results = [] >>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]: ... bit.circuit(circuit()) ... b = output(input(x) % input(y)) ... results.append(int(b) == bit.circuit().evaluate([x,y])[0]) >>> all(results) True >>> results = [] >>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]: ... bit.circuit(circuit()) ... b = output(input(x).xnor(input(y))) ... results.append(int(b) == bit.circuit().evaluate([x,y])[0]) >>> all(results) True >>> results = [] >>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]: ... bit.circuit(circuit()) ... b = output(input(x).xnor_(input(y))) ... results.append(int(b) == bit.circuit().evaluate([x,y])[0]) >>> all(results) True >>> results = [] >>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]: ... bit.circuit(circuit()) ... b = output(input(x) == input(y)) ... results.append(int(b) == bit.circuit().evaluate([x,y])[0]) >>> all(results) True >>> results = [] >>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]: ... bit.circuit(circuit()) ... b = output(input(x).if_(input(y))) ... results.append(int(b) == bit.circuit().evaluate([x,y])[0]) >>> all(results) True >>> results = [] >>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]: ... bit.circuit(circuit()) ... b = output(input(x) >= input(y)) ... results.append(int(b) == bit.circuit().evaluate([x,y])[0]) >>> all(results) True >>> results = [] >>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]: ... bit.circuit(circuit()) ... b = output(input(x).imp(input(y))) ... results.append(int(b) == bit.circuit().evaluate([x,y])[0]) >>> all(results) True >>> results = [] >>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]: ... bit.circuit(circuit()) ... b = output(input(x).imp_(input(y))) ... results.append(int(b) == bit.circuit().evaluate([x,y])[0]) >>> all(results) True >>> results = [] >>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]: ... bit.circuit(circuit()) ... b = output(input(x) <= input(y)) ... results.append(int(b) == bit.circuit().evaluate([x,y])[0]) >>> all(results) True >>> results = [] >>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]: ... bit.circuit(circuit()) ... b = output(input(x).nand(input(y))) ... results.append(int(b) == bit.circuit().evaluate([x,y])[0]) >>> all(results) True >>> results = [] >>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]: ... bit.circuit(circuit()) ... b = output(input(x).nand_(input(y))) ... results.append(int(b) == bit.circuit().evaluate([x,y])[0]) >>> all(results) True >>> results = [] >>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]: ... bit.circuit(circuit()) ... b = output(input(x) @ input(y)) ... results.append(int(b) == bit.circuit().evaluate([x,y])[0]) >>> all(results) True Bit that is designated as a constant input. Bit that is designated as a variable input. Bit that is designated as a variable input from one source. Bit that is designated as a variable input from a second source. Bit that is designated an output. >>> bit.circuit(circuit()) >>> b0 = output(input(1).not_()) >>> b1 = output(b0.not_()) >>> b2 = output(b0) >>> [b0.value, b1.value, b2.value] [0, 1, 0] # Check if bit is ready as final output or whether there are others dependent on it. # Preserve the bit by copying it to a new wire. # pylint: disable=R0903 Class for representing an input or output type of a function decorated for automated synthesis. Class for representing a vector of abstract bits. >>> bit.circuit(circuit()) >>> [b.value for b in bits.from_bytes(bytes([255]))] [1, 1, 1, 1, 1, 1, 1, 1] >>> bit.circuit(circuit()) >>> [b.value for b in bits.from_bytes(bytes([11, 0]))] [0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0] >>> bit.circuit(circuit()) >>> xs = bits.zeros(3) >>> ys = outputs(xs.not_()) >>> [y.value for y in ys] [1, 1, 1] Return bits object given the supplied argument. >>> bit.circuit(circuit()) >>> xs = constants([0, 0, 0]) >>> ys = outputs(xs.not_()) >>> int(ys) 7 >>> results = [] >>> for x in [0, 1]: ... bit.circuit(circuit()) ... xs = inputs([x, x, x]) ... ys = outputs(xs.not_()) ... ns = [int(y) for y in ys] ... c = bit.circuit() ... results.append(ns == c.evaluate([x, x, x])) >>> all(results) True >>> results = [] >>> for x in [0, 1]: ... bit.circuit(circuit()) ... xs = inputs([x, x, x]) ... ys = outputs(~xs) ... ns = [int(y) for y in ys] ... c = bit.circuit() ... results.append(ns == c.evaluate([x, x, x])) >>> all(results) True >>> results = [] >>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]: ... bit.circuit(circuit()) ... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y])) ... zs = outputs(xs.and_(ys)) ... ns = [int(z) for z in zs] ... c = bit.circuit() ... results.append(ns == c.evaluate([x, x, x, y, y, y])) >>> all(results) True >>> results = [] >>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]: ... bit.circuit(circuit()) ... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y])) ... zs = outputs(xs & ys) ... ns = [int(z) for z in zs] ... c = bit.circuit() ... results.append(ns == c.evaluate([x, x, x, y, y, y])) >>> all(results) True >>> results = [] >>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]: ... bit.circuit(circuit()) ... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y])) ... zs = outputs(xs.nimp(ys)) ... ns = [int(z) for z in zs] ... c = bit.circuit() ... results.append(ns == c.evaluate([x, x, x, y, y, y])) >>> all(results) True >>> results = [] >>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]: ... bit.circuit(circuit()) ... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y])) ... zs = outputs(xs.nimp_(ys)) ... ns = [int(z) for z in zs] ... c = bit.circuit() ... results.append(ns == c.evaluate([x, x, x, y, y, y])) >>> all(results) True >>> results = [] >>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]: ... bit.circuit(circuit()) ... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y])) ... zs = outputs(xs > ys) ... ns = [int(z) for z in zs] ... c = bit.circuit() ... results.append(ns == c.evaluate([x, x, x, y, y, y])) >>> all(results) True >>> results = [] >>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]: ... bit.circuit(circuit()) ... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y])) ... zs = outputs(xs.nif(ys)) ... ns = [int(z) for z in zs] ... c = bit.circuit() ... results.append(ns == c.evaluate([x, x, x, y, y, y])) >>> all(results) True >>> results = [] >>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]: ... bit.circuit(circuit()) ... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y])) ... zs = outputs(xs.nif_(ys)) ... ns = [int(z) for z in zs] ... c = bit.circuit() ... results.append(ns == c.evaluate([x, x, x, y, y, y])) >>> all(results) True >>> results = [] >>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]: ... bit.circuit(circuit()) ... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y])) ... zs = outputs(xs < ys) ... ns = [int(z) for z in zs] ... c = bit.circuit() ... results.append(ns == c.evaluate([x, x, x, y, y, y])) >>> all(results) True >>> results = [] >>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]: ... bit.circuit(circuit()) ... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y])) ... zs = outputs(xs.xor(ys)) ... ns = [int(z) for z in zs] ... c = bit.circuit() ... results.append(ns == c.evaluate([x, x, x, y, y, y])) >>> all(results) True >>> results = [] >>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]: ... bit.circuit(circuit()) ... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y])) ... zs = outputs(xs.xor_(ys)) ... ns = [int(z) for z in zs] ... c = bit.circuit() ... results.append(ns == c.evaluate([x, x, x, y, y, y])) >>> all(results) True >>> results = [] >>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]: ... bit.circuit(circuit()) ... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y])) ... zs = outputs(xs ^ ys) ... ns = [int(z) for z in zs] ... c = bit.circuit() ... results.append(ns == c.evaluate([x, x, x, y, y, y])) >>> all(results) True >>> results = [] >>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]: ... bit.circuit(circuit()) ... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y])) ... zs = outputs(xs.or_(ys)) ... ns = [int(z) for z in zs] ... c = bit.circuit() ... results.append(ns == c.evaluate([x, x, x, y, y, y])) >>> all(results) True >>> results = [] >>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]: ... bit.circuit(circuit()) ... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y])) ... zs = outputs(xs | ys) ... ns = [int(z) for z in zs] ... c = bit.circuit() ... results.append(ns == c.evaluate([x, x, x, y, y, y])) >>> all(results) True >>> results = [] >>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]: ... bit.circuit(circuit()) ... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y])) ... zs = outputs(xs.nor(ys)) ... ns = [int(z) for z in zs] ... c = bit.circuit() ... results.append(ns == c.evaluate([x, x, x, y, y, y])) >>> all(results) True >>> results = [] >>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]: ... bit.circuit(circuit()) ... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y])) ... zs = outputs(xs.nor_(ys)) ... ns = [int(z) for z in zs] ... c = bit.circuit() ... results.append(ns == c.evaluate([x, x, x, y, y, y])) >>> all(results) True >>> results = [] >>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]: ... bit.circuit(circuit()) ... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y])) ... zs = outputs(xs % ys) ... ns = [int(z) for z in zs] ... c = bit.circuit() ... results.append(ns == c.evaluate([x, x, x, y, y, y])) >>> all(results) True >>> results = [] >>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]: ... bit.circuit(circuit()) ... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y])) ... zs = outputs(xs.xnor(ys)) ... ns = [int(z) for z in zs] ... c = bit.circuit() ... results.append(ns == c.evaluate([x, x, x, y, y, y])) >>> all(results) True >>> results = [] >>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]: ... bit.circuit(circuit()) ... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y])) ... zs = outputs(xs.xnor_(ys)) ... ns = [int(z) for z in zs] ... c = bit.circuit() ... results.append(ns == c.evaluate([x, x, x, y, y, y])) >>> all(results) True >>> results = [] >>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]: ... bit.circuit(circuit()) ... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y])) ... zs = outputs(xs == ys) ... ns = [int(z) for z in zs] ... c = bit.circuit() ... results.append(ns == c.evaluate([x, x, x, y, y, y])) >>> all(results) True >>> results = [] >>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]: ... bit.circuit(circuit()) ... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y])) ... zs = outputs(xs.if_(ys)) ... ns = [int(z) for z in zs] ... c = bit.circuit() ... results.append(ns == c.evaluate([x, x, x, y, y, y])) >>> all(results) True >>> results = [] >>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]: ... bit.circuit(circuit()) ... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y])) ... zs = outputs(xs >= ys) ... ns = [int(z) for z in zs] ... c = bit.circuit() ... results.append(ns == c.evaluate([x, x, x, y, y, y])) >>> all(results) True >>> results = [] >>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]: ... bit.circuit(circuit()) ... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y])) ... zs = outputs(xs.imp(ys)) ... ns = [int(z) for z in zs] ... c = bit.circuit() ... results.append(ns == c.evaluate([x, x, x, y, y, y])) >>> all(results) True >>> results = [] >>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]: ... bit.circuit(circuit()) ... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y])) ... zs = outputs(xs.imp_(ys)) ... ns = [int(z) for z in zs] ... c = bit.circuit() ... results.append(ns == c.evaluate([x, x, x, y, y, y])) >>> all(results) True >>> results = [] >>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]: ... bit.circuit(circuit()) ... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y])) ... zs = outputs(xs <= ys) ... ns = [int(z) for z in zs] ... c = bit.circuit() ... results.append(ns == c.evaluate([x, x, x, y, y, y])) >>> all(results) True >>> results = [] >>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]: ... bit.circuit(circuit()) ... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y])) ... zs = outputs(xs.nand(ys)) ... ns = [int(z) for z in zs] ... c = bit.circuit() ... results.append(ns == c.evaluate([x, x, x, y, y, y])) >>> all(results) True >>> results = [] >>> for (x, y) in [(0, 0), (0, 1), (1, 0), (1, 1)]: ... bit.circuit(circuit()) ... (xs, ys) = (inputs([x, x, x]), inputs([y, y, y])) ... zs = outputs(xs.nand_(ys)) ... ns = [int(z) for z in zs] ... c = bit.circuit() ... results.append(ns == c.evaluate([x, x, x, y, y, y])) >>> all(results) True Overloaded operator: rotation and shift operations. >>> bit.circuit(circuit()) >>> bs = bits(map(bit, [1,1,1,1,0,0,0,0])) >>> bs = bs >> 3 >>> [b.value for b in bs] [0, 0, 0, 1, 1, 1, 1, 0] >>> bit.circuit(circuit()) >>> bs = bits(map(bit, [0,0,0,0,1,1,1,1])) >>> bs = bs >> {3} >>> [b.value for b in bs] [1, 1, 1, 0, 0, 0, 0, 1] # Rotation. # Shift >>> bit.circuit(circuit()) >>> bs = bits(map(bit, [1,1,1,1,0,0,0,0])) >>> bs = bs << 3 >>> [b.value for b in bs] [1, 0, 0, 0, 0, 0, 0, 0] >>> bit.circuit(circuit()) >>> bs = bits(map(bit, [1,1,1,1,0,0,0,0])) >>> bss = list(bs / 2) >>> ([b.value for b in bss[0]], [b.value for b in bss[1]]) ([1, 1, 1, 1], [0, 0, 0, 0]) >>> bit.circuit(circuit()) >>> bs = bits(map(bit, [1,1,1,1,0,0,0,0])) >>> bss = list(bs / {2}) >>> [[b.value for b in bs] for bs in bss] [[1, 1], [1, 1], [0, 0], [0, 0]] >>> bit.circuit(circuit()) >>> bs = bits(map(bit, [1,1,1,1,0,0,0,0])) >>> bss = list(bs / [1, 3, 4]) >>> [[b.value for b in bs] for bs in bss] [[1], [1, 1, 1], [0, 0, 0, 0]] # Sequence of lengths. # Parts of length `other`. # Number of parts is `other`. Concatenation of bit vectors. Concatenation of bit vectors. Decorator for automatically synthesizing a circuit from a function that takes only `bit` and/or `bits` objects as its arguments and returns an output of type `bit` or `bits`. >>> @synthesize ... def equal(x: bit, y: bit) -> bit: ... return (x & y) | ((1 - x) & (1 - y)) >>> xys = [bits([x, y]) for x in (0, 1) for y in (0, 1)] >>> [equal.circuit.evaluate(xy) for xy in xys] [[1], [0], [0], [1]] >>> @synthesize ... def conjunction(xy: bits(2)) -> bits(2): ... return (xy[0], xy[0] & xy[1]) >>> xys = [bits([x, y]) for x in (0, 1) for y in (0, 1)] >>> [conjunction.circuit.evaluate(xy) for xy in xys] [[0, 0], [0, 0], [1, 0], [1, 1]] >>> @synthesize ... def equal(x, y): ... return x & y Traceback (most recent call last): ... RuntimeError: automated circuit synthesis failed # Functions for determining types/signature from # the type annotation of the decorated function. # For forward-compatibility with PEP 563. # pylint: disable=W0123 # Construct the circuit and add it to the function as an attribute. # Return the original function. # pragma: no cover | 3.867022 | 4 |
plot_user_activity.py | KanayBhandari/discord_bot_project | 0 | 277 | import discord
import random
from datetime import datetime
import pandas as pd
import matplotlib.pyplot as plt
import csv
async def plot_user_activity(client, ctx):
plt.style.use('fivethirtyeight')
df = pd.read_csv('innovators.csv', encoding= 'unicode_escape')
author = df['author'].to_list()
message_counter = {}
for i in author:
if i in message_counter:
message_counter[i] += 1
else:
message_counter[i] = 1
# for not mentioning the bot in the line graph.
message_counter.pop('ninza_bot_test')
authors_in_discord = list(message_counter.keys())
no_of_messages = list(message_counter.values())
plt.plot(authors_in_discord, no_of_messages, marker = 'o', markersize=10)
plt.title('msg sent by author in the server.')
plt.xlabel('Author')
plt.ylabel('Message_count')
plt.savefig('output2.png')
plt.tight_layout()
plt.close()
await ctx.send(file = discord.File('output2.png'))
| import discord
import random
from datetime import datetime
import pandas as pd
import matplotlib.pyplot as plt
import csv
async def plot_user_activity(client, ctx):
plt.style.use('fivethirtyeight')
df = pd.read_csv('innovators.csv', encoding= 'unicode_escape')
author = df['author'].to_list()
message_counter = {}
for i in author:
if i in message_counter:
message_counter[i] += 1
else:
message_counter[i] = 1
# for not mentioning the bot in the line graph.
message_counter.pop('ninza_bot_test')
authors_in_discord = list(message_counter.keys())
no_of_messages = list(message_counter.values())
plt.plot(authors_in_discord, no_of_messages, marker = 'o', markersize=10)
plt.title('msg sent by author in the server.')
plt.xlabel('Author')
plt.ylabel('Message_count')
plt.savefig('output2.png')
plt.tight_layout()
plt.close()
await ctx.send(file = discord.File('output2.png'))
| en | 0.860768 | # for not mentioning the bot in the line graph. | 2.899337 | 3 |
setup.py | skojaku/fastnode2vec | 61 | 278 | <reponame>skojaku/fastnode2vec<gh_stars>10-100
#!/usr/bin/env python3
import os
from setuptools import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="fastnode2vec",
version="0.0.5",
author="<NAME>",
license="MIT",
author_email="<EMAIL>",
description="Fast implementation of node2vec",
long_description=read("README.md"),
long_description_content_type="text/markdown",
url="https://github.com/louisabraham/fastnode2vec",
packages=["fastnode2vec"],
install_requires=["numpy", "numba", "gensim", "click", "tqdm"],
python_requires=">=3.6",
entry_points={"console_scripts": ["fastnode2vec = fastnode2vec.cli:node2vec"]},
classifiers=["Topic :: Scientific/Engineering :: Artificial Intelligence"],
)
| #!/usr/bin/env python3
import os
from setuptools import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="fastnode2vec",
version="0.0.5",
author="<NAME>",
license="MIT",
author_email="<EMAIL>",
description="Fast implementation of node2vec",
long_description=read("README.md"),
long_description_content_type="text/markdown",
url="https://github.com/louisabraham/fastnode2vec",
packages=["fastnode2vec"],
install_requires=["numpy", "numba", "gensim", "click", "tqdm"],
python_requires=">=3.6",
entry_points={"console_scripts": ["fastnode2vec = fastnode2vec.cli:node2vec"]},
classifiers=["Topic :: Scientific/Engineering :: Artificial Intelligence"],
) | fr | 0.221828 | #!/usr/bin/env python3 | 1.593607 | 2 |
app/main/config.py | nhattvm11/flask-restful-boilerplate | 0 | 279 | import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
SECRET_KEY = os.getenv('SECRET_KEY', '')
DEBUG = False
class DevelopmentConfig(Config):
DEBUG = True
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'flask_main.db')
SQLALCHEMY_TRACK_MODIFICATIONS = False
class TestingConfig(Config):
DEBUG = True
TESTING = True
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'flask_main.db')
PRESERVE_CONTEXT_ON_EXCEPTION = False
SQLALCHEMY_TRACK_MODIFICATIONS = False
class ProductionConfig(Config):
DEBUG = False
config_by_name = dict(
dev=DevelopmentConfig,
test=TestingConfig,
prod=ProductionConfig
)
key = Config.SECRET_KEY
| import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
SECRET_KEY = os.getenv('SECRET_KEY', '')
DEBUG = False
class DevelopmentConfig(Config):
DEBUG = True
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'flask_main.db')
SQLALCHEMY_TRACK_MODIFICATIONS = False
class TestingConfig(Config):
DEBUG = True
TESTING = True
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'flask_main.db')
PRESERVE_CONTEXT_ON_EXCEPTION = False
SQLALCHEMY_TRACK_MODIFICATIONS = False
class ProductionConfig(Config):
DEBUG = False
config_by_name = dict(
dev=DevelopmentConfig,
test=TestingConfig,
prod=ProductionConfig
)
key = Config.SECRET_KEY
| none | 1 | 2.090603 | 2 |
|
Chapter07/library/check_user_py3.py | djouani/Learning-Ansible-2.X-Third-Edition | 22 | 280 | <reponame>djouani/Learning-Ansible-2.X-Third-Edition
#!/usr/bin/env python
import pwd
from ansible.module_utils.basic import AnsibleModule
class User:
def __init__(self, user):
self.user = user
# Check if user exists
def check_if_user_exists(self):
try:
user = pwd.getpwnam(self.user)
success = True
ret_msg = 'User %s exists' % self.user
except KeyError:
success = False
ret_msg = 'User %s does not exists' % self.user
return success, ret_msg
def main():
# Parsing argument file
module = AnsibleModule(
argument_spec = dict(
user = dict(required=True)
)
)
user = module.params.get('user')
chkusr = User(user)
success, ret_msg = chkusr.check_if_user_exists()
# Error handling and JSON return
if success:
module.exit_json(msg=ret_msg)
else:
module.fail_json(msg=ret_msg)
if __name__ == "__main__":
main()
| #!/usr/bin/env python
import pwd
from ansible.module_utils.basic import AnsibleModule
class User:
def __init__(self, user):
self.user = user
# Check if user exists
def check_if_user_exists(self):
try:
user = pwd.getpwnam(self.user)
success = True
ret_msg = 'User %s exists' % self.user
except KeyError:
success = False
ret_msg = 'User %s does not exists' % self.user
return success, ret_msg
def main():
# Parsing argument file
module = AnsibleModule(
argument_spec = dict(
user = dict(required=True)
)
)
user = module.params.get('user')
chkusr = User(user)
success, ret_msg = chkusr.check_if_user_exists()
# Error handling and JSON return
if success:
module.exit_json(msg=ret_msg)
else:
module.fail_json(msg=ret_msg)
if __name__ == "__main__":
main() | en | 0.336295 | #!/usr/bin/env python # Check if user exists # Parsing argument file # Error handling and JSON return | 2.816276 | 3 |
backend/server/converters/schema/ontology.py | GenomicsNX/cellxgene | 8 | 281 | """Methods for working with ontologies and the OLS."""
from urllib.parse import quote_plus
import requests
OLS_API_ROOT = "http://www.ebi.ac.uk/ols/api"
# Curie means something like CL:0000001
def _ontology_name(curie):
"""Get the name of the ontology from the curie, CL or UBERON for example."""
return curie.split(":")[0]
def _ontology_value(curie):
"""Get the id component of the curie, 0000001 from CL:0000001 for example."""
return curie.split(":")[1]
def _double_encode(url):
"""Double url encode a url. This is required by the OLS API."""
return quote_plus(quote_plus(url))
def _iri(curie):
"""Get the iri from a curie. This is a bit hopeful that they all map to purl.obolibrary.org"""
if _ontology_name(curie) == "EFO":
return f"http://www.ebi.ac.uk/efo/EFO_{_ontology_value(curie)}"
return f"http://purl.obolibrary.org/obo/{_ontology_name(curie)}_{_ontology_value(curie)}"
class OntologyLookupError(Exception):
"""Exception for some problem with looking up ontology information."""
def _ontology_info_url(curie):
"""Get the to make a GET to to get information about an ontology term."""
# If the curie is empty, just return an empty string. This happens when there is no
# valid ontology value.
if not curie:
return ""
else:
return f"{OLS_API_ROOT}/ontologies/{_ontology_name(curie)}/terms/{_double_encode(_iri(curie))}"
def get_ontology_label(curie):
"""For a given curie like 'CL:1000413', get the label like 'endothelial cell of artery'"""
url = _ontology_info_url(curie)
if not url:
return ""
response = requests.get(url)
if not response.ok:
raise OntologyLookupError(
f"Curie {curie} lookup failed, got status code {response.status_code}: {response.text}"
)
return response.json()["label"]
def lookup_candidate_term(label, ontology="cl", method="select"):
"""Lookup candidate terms for a label. This is useful when there is an existing label in a
submitted dataset, and you want to find an appropriate ontology term.
Args:
label: the label to find ontology terms for
ontology: the ontology to search in, cl or uberon or efo for example
method: select or search. search provides much broader results
Returns:
list of (curie, label) tuples returned by OLS
"""
# using OLS REST API [https://www.ebi.ac.uk/ols/docs/api]
url = f"{OLS_API_ROOT}/{method}?q={quote_plus(label)}&ontology={ontology.lower()}"
response = requests.get(url)
if not response.ok:
raise OntologyLookupError(
f"Label {label} lookup failed, got status code {response.status_code}: {response.text}"
)
return [(r["obo_id"], r["label"]) for r in response.json()["response"]["docs"]]
| """Methods for working with ontologies and the OLS."""
from urllib.parse import quote_plus
import requests
OLS_API_ROOT = "http://www.ebi.ac.uk/ols/api"
# Curie means something like CL:0000001
def _ontology_name(curie):
"""Get the name of the ontology from the curie, CL or UBERON for example."""
return curie.split(":")[0]
def _ontology_value(curie):
"""Get the id component of the curie, 0000001 from CL:0000001 for example."""
return curie.split(":")[1]
def _double_encode(url):
"""Double url encode a url. This is required by the OLS API."""
return quote_plus(quote_plus(url))
def _iri(curie):
"""Get the iri from a curie. This is a bit hopeful that they all map to purl.obolibrary.org"""
if _ontology_name(curie) == "EFO":
return f"http://www.ebi.ac.uk/efo/EFO_{_ontology_value(curie)}"
return f"http://purl.obolibrary.org/obo/{_ontology_name(curie)}_{_ontology_value(curie)}"
class OntologyLookupError(Exception):
"""Exception for some problem with looking up ontology information."""
def _ontology_info_url(curie):
"""Get the to make a GET to to get information about an ontology term."""
# If the curie is empty, just return an empty string. This happens when there is no
# valid ontology value.
if not curie:
return ""
else:
return f"{OLS_API_ROOT}/ontologies/{_ontology_name(curie)}/terms/{_double_encode(_iri(curie))}"
def get_ontology_label(curie):
"""For a given curie like 'CL:1000413', get the label like 'endothelial cell of artery'"""
url = _ontology_info_url(curie)
if not url:
return ""
response = requests.get(url)
if not response.ok:
raise OntologyLookupError(
f"Curie {curie} lookup failed, got status code {response.status_code}: {response.text}"
)
return response.json()["label"]
def lookup_candidate_term(label, ontology="cl", method="select"):
"""Lookup candidate terms for a label. This is useful when there is an existing label in a
submitted dataset, and you want to find an appropriate ontology term.
Args:
label: the label to find ontology terms for
ontology: the ontology to search in, cl or uberon or efo for example
method: select or search. search provides much broader results
Returns:
list of (curie, label) tuples returned by OLS
"""
# using OLS REST API [https://www.ebi.ac.uk/ols/docs/api]
url = f"{OLS_API_ROOT}/{method}?q={quote_plus(label)}&ontology={ontology.lower()}"
response = requests.get(url)
if not response.ok:
raise OntologyLookupError(
f"Label {label} lookup failed, got status code {response.status_code}: {response.text}"
)
return [(r["obo_id"], r["label"]) for r in response.json()["response"]["docs"]]
| en | 0.8091 | Methods for working with ontologies and the OLS. # Curie means something like CL:0000001 Get the name of the ontology from the curie, CL or UBERON for example. Get the id component of the curie, 0000001 from CL:0000001 for example. Double url encode a url. This is required by the OLS API. Get the iri from a curie. This is a bit hopeful that they all map to purl.obolibrary.org Exception for some problem with looking up ontology information. Get the to make a GET to to get information about an ontology term. # If the curie is empty, just return an empty string. This happens when there is no # valid ontology value. For a given curie like 'CL:1000413', get the label like 'endothelial cell of artery' Lookup candidate terms for a label. This is useful when there is an existing label in a submitted dataset, and you want to find an appropriate ontology term. Args: label: the label to find ontology terms for ontology: the ontology to search in, cl or uberon or efo for example method: select or search. search provides much broader results Returns: list of (curie, label) tuples returned by OLS # using OLS REST API [https://www.ebi.ac.uk/ols/docs/api] | 3.628516 | 4 |
survey/api/matrix.py | djaodjin/djaodjin-survey | 15 | 282 | <reponame>djaodjin/djaodjin-survey
# Copyright (c) 2020, DjaoDjin inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging, re
from collections import OrderedDict
from django.db.models import F
from django.http import Http404
from django.shortcuts import get_object_or_404
from extra_views.contrib.mixins import SearchableListMixin
from rest_framework import generics
from rest_framework.pagination import PageNumberPagination
from rest_framework import response as http
from ..compat import reverse
from ..mixins import MatrixMixin
from ..models import Answer, Matrix, EditableFilter
from ..utils import (get_account_model, get_account_serializer,
get_question_serializer)
from .serializers import EditableFilterSerializer, MatrixSerializer
LOGGER = logging.getLogger(__name__)
class MatrixCreateAPIView(generics.ListCreateAPIView):
"""
Filtered list of ``Question``.
**Examples**:
.. code-block:: http
GET /api/matrix/
Response:
{
"slug": "all",
"title": "All accounts against all questions",
"metric": {
"slug": "all-questions",
"title": "All questions",
"predicates": []
},
"cohorts": [{
"slug": "all-accounts",
"title": "All accounts",
"predicates": []
}]
}
.. code-block:: http
POST /api/matrix/
{
"slug": "all",
"title": "All accounts against all questions",
"metric": {
"slug": "all-questions",
"title": "All questions",
"predicates": []
},
"cohorts": [{
"slug": "all-accounts",
"title": "All accounts",
"predicates": []
}]
}
Response:
201 CREATED
{
"slug": "all",
"title": "All accounts against all questions",
"metric": {
"slug": "all-questions",
"title": "All questions",
"predicates": []
},
"cohorts": [{
"slug": "all-accounts",
"title": "All accounts",
"predicates": []
}]
}
"""
serializer_class = MatrixSerializer
def get_queryset(self):
return Matrix.objects.all()
class MatrixDetailAPIView(MatrixMixin, generics.RetrieveUpdateDestroyAPIView):
"""
A table of scores for cohorts aganist a metric.
**Examples**:
.. code-block:: http
GET /api/matrix/languages
Response:
[{
"slug": "languages",
"title": "All cohorts for all questions"
"scores":{
"portfolio-a": "0.1",
"portfolio-b": "0.5",
}
}]
"""
serializer_class = MatrixSerializer
lookup_field = 'slug'
lookup_url_kwarg = 'path'
question_model = get_question_serializer().Meta.model
def aggregate_scores(self, metric, cohorts, cut=None, accounts=None):
#pylint:disable=unused-argument,too-many-locals
if accounts is None:
accounts = get_account_model().objects.all()
scores = {}
if metric:
assert 'metric' in metric.tags, \
"filter '%s' is not tagged as a metric" % str(metric)
includes, excludes = metric.as_kwargs()
questions = self.question_model.objects.filter(
**includes).exclude(**excludes)
nb_questions = len(questions)
if nb_questions > 0:
for cohort in cohorts:
if isinstance(cohort, EditableFilter):
includes, excludes = cohort.as_kwargs()
qs_accounts = accounts.filter(
**includes).exclude(**excludes)
else:
# If `matrix.cohorts is None`, the `cohorts` argument
# will be a list of single account objects.
qs_accounts = [cohort]
nb_accounts = len(qs_accounts)
if nb_accounts > 0:
nb_correct_answers = Answer.objects.filter(
question__in=questions,
sample__account__in=qs_accounts).filter(
measured=F('question__correct_answer')).count()
score = nb_correct_answers * 100 / (
nb_questions * nb_accounts)
LOGGER.debug("score for '%s' = (%d * 100) "\
"/ (%d * %d) = %f", str(cohort), nb_correct_answers,
nb_questions, nb_accounts, score)
assert score <= 100
scores.update({str(cohort): score})
return {"scores": scores}
@property
def matrix(self):
if not hasattr(self, '_matrix'):
self._matrix = Matrix.objects.filter(
slug=self.kwargs.get(self.matrix_url_kwarg)).first()
return self._matrix
def get_accounts(self):
#pylint:disable=unused-argument,no-self-use
return get_account_model().objects.all()
def get_likely_metric(self, cohort_slug):
"""
Returns a URL to a ``Matrix`` derived from *cohort*.
Many times people will use the same name to either mean a cohort
or a metric and expect the system will magically switch between
both meaning. This is an attempt at magic.
"""
likely_metric = None
look = re.match(r"(\S+)(-\d+)", cohort_slug)
if look:
try:
likely_metric = self.request.build_absolute_uri(
reverse('matrix_chart', args=(
EditableFilter.objects.get(slug=look.group(1)).slug,)))
except EditableFilter.DoesNotExist:
pass
return likely_metric
def get(self, request, *args, **kwargs):
#pylint:disable=unused-argument,too-many-locals
matrix = self.matrix
if matrix:
metric = self.matrix.metric
else:
parts = self.kwargs.get(self.matrix_url_kwarg).split('/')
metric = get_object_or_404(EditableFilter, slug=parts[-1])
matrix = Matrix.objects.filter(slug=parts[0]).first()
if not matrix:
raise Http404()
cohort_serializer = EditableFilterSerializer
cohorts = matrix.cohorts.exclude(tags__contains='aggregate')
public_cohorts = matrix.cohorts.filter(tags__contains='aggregate')
cut = matrix.cut
if not cohorts:
# We don't have any cohorts, let's show individual accounts instead.
if cut:
includes, excludes = cut.as_kwargs()
accounts = self.get_accounts().filter(
**includes).exclude(**excludes)
else:
accounts = self.get_accounts()
cohort_serializer = get_account_serializer()
# Implementation Note: switch cohorts from an queryset
# of `EditableFilter` to a queryset of `Account` ...
cohorts = accounts
result = []
scores = {}
val = {
'slug': metric.slug,
'title': metric.title,
'metric': EditableFilterSerializer().to_representation(metric),
'cut': EditableFilterSerializer().to_representation(cut),
'cohorts': cohort_serializer(many=True).to_representation(cohorts)}
# In some case, a metric and cohort have a connection
# and could have the same name.
for cohort in val['cohorts']:
likely_metric = self.get_likely_metric(cohort['slug'])
if likely_metric:
cohort['likely_metric'] = likely_metric
scores.update(val)
scores.update({"values": self.aggregate_scores(
metric, cohorts, cut, accounts=self.get_accounts())})
result += [scores]
if public_cohorts:
public_scores = {}
public_scores.update(val)
public_scores.update(
{"cohorts": EditableFilterSerializer(
public_cohorts, many=True).data,
"values": self.aggregate_scores(metric, public_cohorts)})
result += [public_scores]
return http.Response(result)
class EditableFilterQuerysetMixin(object):
@staticmethod
def get_queryset():
return EditableFilter.objects.all()
class EditableFilterListAPIView(SearchableListMixin,
EditableFilterQuerysetMixin, generics.ListCreateAPIView):
"""
List fitlers
**Tags**: survey
**Examples**
.. code-block:: http
GET /api/xia/matrix/filters/ HTTP/1.1
responds
.. code-block:: json
{
"count": 2,
previous: null,
next: null,
results: [
{
"slug": "all",
"title": "All",
"tags": "",
"predicates": [
"rank": 1,
"operator": "",
"operand": "",
"field": "",
"selector": ""
],
"likely_metric": ""
},
{
"slug": "none",
"title": "None",
"tags": "",
"predicates": [
"rank": 1,
"operator": "",
"operand": "",
"field": "",
"selector": ""
],
"likely_metric": ""
}
]
}
"""
search_fields = ['tags']
serializer_class = EditableFilterSerializer
def post(self, request, *args, **kwargs):
"""
Create a fitler
**Tags**: survey
**Examples**
.. code-block:: http
POST /api/xia/matrix/filters/ HTTP/1.1
responds
.. code-block:: json
{
"count": 2,
previous: null,
next: null,
results: [
{
"slug": "all",
"title": "All",
"tags": "",
"predicates": [
"rank": 1,
"operator": "",
"operand": "",
"field": "",
"selector": ""
],
"likely_metric": ""
},
{
"slug": "none",
"title": "None",
"tags": "",
"predicates": [
"rank": 1,
"operator": "",
"operand": "",
"field": "",
"selector": ""
],
"likely_metric": ""
}
]
}
"""
#pylint:disable=useless-super-delegation
return super(EditableFilterListAPIView, self).post(
request, *args, **kwargs)
class EditableFilterDetailAPIView(generics.RetrieveUpdateDestroyAPIView):
"""
Retrieve a fitler
**Tags**: survey
**Examples**
.. code-block:: http
GET /api/xia/matrix/filters/all/ HTTP/1.1
responds
.. code-block:: json
{
"slug": "all",
"title": "All",
"tags": "",
"predicates": [
"rank": 1,
"operator": "",
"operand": "",
"field": "",
"selector": ""
],
"likely_metric": ""
}
"""
serializer_class = EditableFilterSerializer
lookup_field = 'slug'
lookup_url_kwarg = 'editable_filter'
def get_queryset(self):
return EditableFilter.objects.all()
def put(self, request, *args, **kwargs):
"""
Updates a fitler
**Tags**: survey
**Examples**
.. code-block:: http
PUT /api/xia/matrix/filters/all/ HTTP/1.1
.. code-block:: json
{
"slug": "all",
"title": "All",
"tags": "",
"predicates": [
"rank": 1,
"operator": "",
"operand": "",
"field": "",
"selector": ""
],
"likely_metric": ""
}
responds
.. code-block:: json
{
"slug": "all",
"title": "All",
"tags": "",
"predicates": [
"rank": 1,
"operator": "",
"operand": "",
"field": "",
"selector": ""
],
"likely_metric": ""
}
"""
#pylint:disable=useless-super-delegation
return super(EditableFilterDetailAPIView, self).put(
request, *args, **kwargs)
def delete(self, request, *args, **kwargs):
"""
Deletes a fitler
**Tags**: survey
**Examples**
.. code-block:: http
DELETE /api/xia/matrix/filters/all/ HTTP/1.1
"""
#pylint:disable=useless-super-delegation
return super(EditableFilterDetailAPIView, self).delete(
request, *args, **kwargs)
class EditableFilterPagination(PageNumberPagination):
def paginate_queryset(self, queryset, request, view=None):
self.editable_filter = view.editable_filter
return super(EditableFilterPagination, self).paginate_queryset(
queryset, request, view=view)
def get_paginated_response(self, data):
return http.Response(OrderedDict([
('editable_filter', EditableFilterSerializer().to_representation(
self.editable_filter)),
('count', self.page.paginator.count),
('next', self.get_next_link()),
('previous', self.get_previous_link()),
('results', data)
]))
class EditableFilterObjectsAPIView(generics.ListAPIView):
"""
List filter objects
**Tags**: survey
**Examples**
.. code-block:: http
GET /api/xia/matrix/filters/ HTTP/1.1
responds
.. code-block:: json
{
"created_at": "2020-01-01T00:00:00Z",
"measured": 12
}
"""
pagination_class = EditableFilterPagination
serializer_class = None # override in subclasses
lookup_field = 'slug'
lookup_url_kwarg = 'editable_filter'
def get_queryset(self):
return self.get_serializer_class().Meta.model.objects.all()
def get(self, request, *args, **kwargs): #pylint: disable=unused-argument
self.editable_filter = generics.get_object_or_404(
EditableFilter.objects.all(),
slug=self.kwargs[self.lookup_url_kwarg])
return super(EditableFilterObjectsAPIView, self).get(
request, *args, **kwargs)
class AccountListAPIView(EditableFilterObjectsAPIView):
"""
Filtered list of ``EditableFilter``.
**Examples**:
.. code-block:: http
GET /api/questions/languages
Response:
{
"slug": "languages",
"title": "All questions related to languages"
"predicates":[{
"operator": "contains",
"operand": "language",
"field": "text",
"selector":"keepmatching"
}]
}
"""
serializer_class = get_account_serializer()
class QuestionListAPIView(EditableFilterObjectsAPIView):
"""
Filtered list of ``Question``.
**Examples**:
.. code-block:: http
GET /api/questions/languages
Response:
{
"slug": "languages",
"title": "All questions related to languages"
"predicates":[{
"operator": "contains",
"operand": "language",
"field": "text",
"selector":"keepmatching"
}]
}
"""
serializer_class = get_question_serializer()
| # Copyright (c) 2020, DjaoDjin inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging, re
from collections import OrderedDict
from django.db.models import F
from django.http import Http404
from django.shortcuts import get_object_or_404
from extra_views.contrib.mixins import SearchableListMixin
from rest_framework import generics
from rest_framework.pagination import PageNumberPagination
from rest_framework import response as http
from ..compat import reverse
from ..mixins import MatrixMixin
from ..models import Answer, Matrix, EditableFilter
from ..utils import (get_account_model, get_account_serializer,
get_question_serializer)
from .serializers import EditableFilterSerializer, MatrixSerializer
LOGGER = logging.getLogger(__name__)
class MatrixCreateAPIView(generics.ListCreateAPIView):
"""
Filtered list of ``Question``.
**Examples**:
.. code-block:: http
GET /api/matrix/
Response:
{
"slug": "all",
"title": "All accounts against all questions",
"metric": {
"slug": "all-questions",
"title": "All questions",
"predicates": []
},
"cohorts": [{
"slug": "all-accounts",
"title": "All accounts",
"predicates": []
}]
}
.. code-block:: http
POST /api/matrix/
{
"slug": "all",
"title": "All accounts against all questions",
"metric": {
"slug": "all-questions",
"title": "All questions",
"predicates": []
},
"cohorts": [{
"slug": "all-accounts",
"title": "All accounts",
"predicates": []
}]
}
Response:
201 CREATED
{
"slug": "all",
"title": "All accounts against all questions",
"metric": {
"slug": "all-questions",
"title": "All questions",
"predicates": []
},
"cohorts": [{
"slug": "all-accounts",
"title": "All accounts",
"predicates": []
}]
}
"""
serializer_class = MatrixSerializer
def get_queryset(self):
return Matrix.objects.all()
class MatrixDetailAPIView(MatrixMixin, generics.RetrieveUpdateDestroyAPIView):
"""
A table of scores for cohorts aganist a metric.
**Examples**:
.. code-block:: http
GET /api/matrix/languages
Response:
[{
"slug": "languages",
"title": "All cohorts for all questions"
"scores":{
"portfolio-a": "0.1",
"portfolio-b": "0.5",
}
}]
"""
serializer_class = MatrixSerializer
lookup_field = 'slug'
lookup_url_kwarg = 'path'
question_model = get_question_serializer().Meta.model
def aggregate_scores(self, metric, cohorts, cut=None, accounts=None):
#pylint:disable=unused-argument,too-many-locals
if accounts is None:
accounts = get_account_model().objects.all()
scores = {}
if metric:
assert 'metric' in metric.tags, \
"filter '%s' is not tagged as a metric" % str(metric)
includes, excludes = metric.as_kwargs()
questions = self.question_model.objects.filter(
**includes).exclude(**excludes)
nb_questions = len(questions)
if nb_questions > 0:
for cohort in cohorts:
if isinstance(cohort, EditableFilter):
includes, excludes = cohort.as_kwargs()
qs_accounts = accounts.filter(
**includes).exclude(**excludes)
else:
# If `matrix.cohorts is None`, the `cohorts` argument
# will be a list of single account objects.
qs_accounts = [cohort]
nb_accounts = len(qs_accounts)
if nb_accounts > 0:
nb_correct_answers = Answer.objects.filter(
question__in=questions,
sample__account__in=qs_accounts).filter(
measured=F('question__correct_answer')).count()
score = nb_correct_answers * 100 / (
nb_questions * nb_accounts)
LOGGER.debug("score for '%s' = (%d * 100) "\
"/ (%d * %d) = %f", str(cohort), nb_correct_answers,
nb_questions, nb_accounts, score)
assert score <= 100
scores.update({str(cohort): score})
return {"scores": scores}
@property
def matrix(self):
if not hasattr(self, '_matrix'):
self._matrix = Matrix.objects.filter(
slug=self.kwargs.get(self.matrix_url_kwarg)).first()
return self._matrix
def get_accounts(self):
#pylint:disable=unused-argument,no-self-use
return get_account_model().objects.all()
def get_likely_metric(self, cohort_slug):
"""
Returns a URL to a ``Matrix`` derived from *cohort*.
Many times people will use the same name to either mean a cohort
or a metric and expect the system will magically switch between
both meaning. This is an attempt at magic.
"""
likely_metric = None
look = re.match(r"(\S+)(-\d+)", cohort_slug)
if look:
try:
likely_metric = self.request.build_absolute_uri(
reverse('matrix_chart', args=(
EditableFilter.objects.get(slug=look.group(1)).slug,)))
except EditableFilter.DoesNotExist:
pass
return likely_metric
def get(self, request, *args, **kwargs):
#pylint:disable=unused-argument,too-many-locals
matrix = self.matrix
if matrix:
metric = self.matrix.metric
else:
parts = self.kwargs.get(self.matrix_url_kwarg).split('/')
metric = get_object_or_404(EditableFilter, slug=parts[-1])
matrix = Matrix.objects.filter(slug=parts[0]).first()
if not matrix:
raise Http404()
cohort_serializer = EditableFilterSerializer
cohorts = matrix.cohorts.exclude(tags__contains='aggregate')
public_cohorts = matrix.cohorts.filter(tags__contains='aggregate')
cut = matrix.cut
if not cohorts:
# We don't have any cohorts, let's show individual accounts instead.
if cut:
includes, excludes = cut.as_kwargs()
accounts = self.get_accounts().filter(
**includes).exclude(**excludes)
else:
accounts = self.get_accounts()
cohort_serializer = get_account_serializer()
# Implementation Note: switch cohorts from an queryset
# of `EditableFilter` to a queryset of `Account` ...
cohorts = accounts
result = []
scores = {}
val = {
'slug': metric.slug,
'title': metric.title,
'metric': EditableFilterSerializer().to_representation(metric),
'cut': EditableFilterSerializer().to_representation(cut),
'cohorts': cohort_serializer(many=True).to_representation(cohorts)}
# In some case, a metric and cohort have a connection
# and could have the same name.
for cohort in val['cohorts']:
likely_metric = self.get_likely_metric(cohort['slug'])
if likely_metric:
cohort['likely_metric'] = likely_metric
scores.update(val)
scores.update({"values": self.aggregate_scores(
metric, cohorts, cut, accounts=self.get_accounts())})
result += [scores]
if public_cohorts:
public_scores = {}
public_scores.update(val)
public_scores.update(
{"cohorts": EditableFilterSerializer(
public_cohorts, many=True).data,
"values": self.aggregate_scores(metric, public_cohorts)})
result += [public_scores]
return http.Response(result)
class EditableFilterQuerysetMixin(object):
@staticmethod
def get_queryset():
return EditableFilter.objects.all()
class EditableFilterListAPIView(SearchableListMixin,
EditableFilterQuerysetMixin, generics.ListCreateAPIView):
"""
List fitlers
**Tags**: survey
**Examples**
.. code-block:: http
GET /api/xia/matrix/filters/ HTTP/1.1
responds
.. code-block:: json
{
"count": 2,
previous: null,
next: null,
results: [
{
"slug": "all",
"title": "All",
"tags": "",
"predicates": [
"rank": 1,
"operator": "",
"operand": "",
"field": "",
"selector": ""
],
"likely_metric": ""
},
{
"slug": "none",
"title": "None",
"tags": "",
"predicates": [
"rank": 1,
"operator": "",
"operand": "",
"field": "",
"selector": ""
],
"likely_metric": ""
}
]
}
"""
search_fields = ['tags']
serializer_class = EditableFilterSerializer
def post(self, request, *args, **kwargs):
"""
Create a fitler
**Tags**: survey
**Examples**
.. code-block:: http
POST /api/xia/matrix/filters/ HTTP/1.1
responds
.. code-block:: json
{
"count": 2,
previous: null,
next: null,
results: [
{
"slug": "all",
"title": "All",
"tags": "",
"predicates": [
"rank": 1,
"operator": "",
"operand": "",
"field": "",
"selector": ""
],
"likely_metric": ""
},
{
"slug": "none",
"title": "None",
"tags": "",
"predicates": [
"rank": 1,
"operator": "",
"operand": "",
"field": "",
"selector": ""
],
"likely_metric": ""
}
]
}
"""
#pylint:disable=useless-super-delegation
return super(EditableFilterListAPIView, self).post(
request, *args, **kwargs)
class EditableFilterDetailAPIView(generics.RetrieveUpdateDestroyAPIView):
"""
Retrieve a fitler
**Tags**: survey
**Examples**
.. code-block:: http
GET /api/xia/matrix/filters/all/ HTTP/1.1
responds
.. code-block:: json
{
"slug": "all",
"title": "All",
"tags": "",
"predicates": [
"rank": 1,
"operator": "",
"operand": "",
"field": "",
"selector": ""
],
"likely_metric": ""
}
"""
serializer_class = EditableFilterSerializer
lookup_field = 'slug'
lookup_url_kwarg = 'editable_filter'
def get_queryset(self):
return EditableFilter.objects.all()
def put(self, request, *args, **kwargs):
"""
Updates a fitler
**Tags**: survey
**Examples**
.. code-block:: http
PUT /api/xia/matrix/filters/all/ HTTP/1.1
.. code-block:: json
{
"slug": "all",
"title": "All",
"tags": "",
"predicates": [
"rank": 1,
"operator": "",
"operand": "",
"field": "",
"selector": ""
],
"likely_metric": ""
}
responds
.. code-block:: json
{
"slug": "all",
"title": "All",
"tags": "",
"predicates": [
"rank": 1,
"operator": "",
"operand": "",
"field": "",
"selector": ""
],
"likely_metric": ""
}
"""
#pylint:disable=useless-super-delegation
return super(EditableFilterDetailAPIView, self).put(
request, *args, **kwargs)
def delete(self, request, *args, **kwargs):
"""
Deletes a fitler
**Tags**: survey
**Examples**
.. code-block:: http
DELETE /api/xia/matrix/filters/all/ HTTP/1.1
"""
#pylint:disable=useless-super-delegation
return super(EditableFilterDetailAPIView, self).delete(
request, *args, **kwargs)
class EditableFilterPagination(PageNumberPagination):
def paginate_queryset(self, queryset, request, view=None):
self.editable_filter = view.editable_filter
return super(EditableFilterPagination, self).paginate_queryset(
queryset, request, view=view)
def get_paginated_response(self, data):
return http.Response(OrderedDict([
('editable_filter', EditableFilterSerializer().to_representation(
self.editable_filter)),
('count', self.page.paginator.count),
('next', self.get_next_link()),
('previous', self.get_previous_link()),
('results', data)
]))
class EditableFilterObjectsAPIView(generics.ListAPIView):
"""
List filter objects
**Tags**: survey
**Examples**
.. code-block:: http
GET /api/xia/matrix/filters/ HTTP/1.1
responds
.. code-block:: json
{
"created_at": "2020-01-01T00:00:00Z",
"measured": 12
}
"""
pagination_class = EditableFilterPagination
serializer_class = None # override in subclasses
lookup_field = 'slug'
lookup_url_kwarg = 'editable_filter'
def get_queryset(self):
return self.get_serializer_class().Meta.model.objects.all()
def get(self, request, *args, **kwargs): #pylint: disable=unused-argument
self.editable_filter = generics.get_object_or_404(
EditableFilter.objects.all(),
slug=self.kwargs[self.lookup_url_kwarg])
return super(EditableFilterObjectsAPIView, self).get(
request, *args, **kwargs)
class AccountListAPIView(EditableFilterObjectsAPIView):
"""
Filtered list of ``EditableFilter``.
**Examples**:
.. code-block:: http
GET /api/questions/languages
Response:
{
"slug": "languages",
"title": "All questions related to languages"
"predicates":[{
"operator": "contains",
"operand": "language",
"field": "text",
"selector":"keepmatching"
}]
}
"""
serializer_class = get_account_serializer()
class QuestionListAPIView(EditableFilterObjectsAPIView):
"""
Filtered list of ``Question``.
**Examples**:
.. code-block:: http
GET /api/questions/languages
Response:
{
"slug": "languages",
"title": "All questions related to languages"
"predicates":[{
"operator": "contains",
"operand": "language",
"field": "text",
"selector":"keepmatching"
}]
}
"""
serializer_class = get_question_serializer() | en | 0.520285 | # Copyright (c) 2020, DjaoDjin inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; # OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Filtered list of ``Question``. **Examples**: .. code-block:: http GET /api/matrix/ Response: { "slug": "all", "title": "All accounts against all questions", "metric": { "slug": "all-questions", "title": "All questions", "predicates": [] }, "cohorts": [{ "slug": "all-accounts", "title": "All accounts", "predicates": [] }] } .. code-block:: http POST /api/matrix/ { "slug": "all", "title": "All accounts against all questions", "metric": { "slug": "all-questions", "title": "All questions", "predicates": [] }, "cohorts": [{ "slug": "all-accounts", "title": "All accounts", "predicates": [] }] } Response: 201 CREATED { "slug": "all", "title": "All accounts against all questions", "metric": { "slug": "all-questions", "title": "All questions", "predicates": [] }, "cohorts": [{ "slug": "all-accounts", "title": "All accounts", "predicates": [] }] } A table of scores for cohorts aganist a metric. **Examples**: .. code-block:: http GET /api/matrix/languages Response: [{ "slug": "languages", "title": "All cohorts for all questions" "scores":{ "portfolio-a": "0.1", "portfolio-b": "0.5", } }] #pylint:disable=unused-argument,too-many-locals # If `matrix.cohorts is None`, the `cohorts` argument # will be a list of single account objects. #pylint:disable=unused-argument,no-self-use Returns a URL to a ``Matrix`` derived from *cohort*. Many times people will use the same name to either mean a cohort or a metric and expect the system will magically switch between both meaning. This is an attempt at magic. #pylint:disable=unused-argument,too-many-locals # We don't have any cohorts, let's show individual accounts instead. # Implementation Note: switch cohorts from an queryset # of `EditableFilter` to a queryset of `Account` ... # In some case, a metric and cohort have a connection # and could have the same name. List fitlers **Tags**: survey **Examples** .. code-block:: http GET /api/xia/matrix/filters/ HTTP/1.1 responds .. code-block:: json { "count": 2, previous: null, next: null, results: [ { "slug": "all", "title": "All", "tags": "", "predicates": [ "rank": 1, "operator": "", "operand": "", "field": "", "selector": "" ], "likely_metric": "" }, { "slug": "none", "title": "None", "tags": "", "predicates": [ "rank": 1, "operator": "", "operand": "", "field": "", "selector": "" ], "likely_metric": "" } ] } Create a fitler **Tags**: survey **Examples** .. code-block:: http POST /api/xia/matrix/filters/ HTTP/1.1 responds .. code-block:: json { "count": 2, previous: null, next: null, results: [ { "slug": "all", "title": "All", "tags": "", "predicates": [ "rank": 1, "operator": "", "operand": "", "field": "", "selector": "" ], "likely_metric": "" }, { "slug": "none", "title": "None", "tags": "", "predicates": [ "rank": 1, "operator": "", "operand": "", "field": "", "selector": "" ], "likely_metric": "" } ] } #pylint:disable=useless-super-delegation Retrieve a fitler **Tags**: survey **Examples** .. code-block:: http GET /api/xia/matrix/filters/all/ HTTP/1.1 responds .. code-block:: json { "slug": "all", "title": "All", "tags": "", "predicates": [ "rank": 1, "operator": "", "operand": "", "field": "", "selector": "" ], "likely_metric": "" } Updates a fitler **Tags**: survey **Examples** .. code-block:: http PUT /api/xia/matrix/filters/all/ HTTP/1.1 .. code-block:: json { "slug": "all", "title": "All", "tags": "", "predicates": [ "rank": 1, "operator": "", "operand": "", "field": "", "selector": "" ], "likely_metric": "" } responds .. code-block:: json { "slug": "all", "title": "All", "tags": "", "predicates": [ "rank": 1, "operator": "", "operand": "", "field": "", "selector": "" ], "likely_metric": "" } #pylint:disable=useless-super-delegation Deletes a fitler **Tags**: survey **Examples** .. code-block:: http DELETE /api/xia/matrix/filters/all/ HTTP/1.1 #pylint:disable=useless-super-delegation List filter objects **Tags**: survey **Examples** .. code-block:: http GET /api/xia/matrix/filters/ HTTP/1.1 responds .. code-block:: json { "created_at": "2020-01-01T00:00:00Z", "measured": 12 } # override in subclasses #pylint: disable=unused-argument Filtered list of ``EditableFilter``. **Examples**: .. code-block:: http GET /api/questions/languages Response: { "slug": "languages", "title": "All questions related to languages" "predicates":[{ "operator": "contains", "operand": "language", "field": "text", "selector":"keepmatching" }] } Filtered list of ``Question``. **Examples**: .. code-block:: http GET /api/questions/languages Response: { "slug": "languages", "title": "All questions related to languages" "predicates":[{ "operator": "contains", "operand": "language", "field": "text", "selector":"keepmatching" }] } | 1.321764 | 1 |
remove_labels.py | iFishy/DomainApp | 0 | 283 | from __future__ import print_function
import httplib2
import os
import sys
import pickle
from apiclient import discovery
from apiclient import errors
from oauth2client import client
from oauth2client import tools
from oauth2client.file import Storage
try:
import argparse
flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()
except ImportError:
flags = None
# If modifying these scopes, delete your previously saved credentials
# at ~/.credentials/gmail-python-quickstart.json
SCOPES = 'https://www.googleapis.com/auth/gmail.labels'
CLIENT_SECRET_FILE = 'client_secret.json'
APPLICATION_NAME = 'Inbox Organize'
def get_credentials():
"""Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns:
Credentials, the obtained credential.
"""
home_dir = os.path.expanduser('~')
credential_dir = os.path.join(home_dir, '.credentials')
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir,
'gmail-python-quickstart.json')
store = Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
flow.user_agent = APPLICATION_NAME
if flags:
credentials = tools.run_flow(flow, store, flags)
else: # Needed only for compatibility with Python 2.6
credentials = tools.run(flow, store)
print('Storing credentials to ' + credential_path)
return credentials
def GetLabels(service, user_id):
try:
response = service.users().labels().list(userId=user_id).execute()
labels = response['labels']
"""
for label in labels:
print ('Label id: %s - Label name: %s' % (label['id'], label['name']))
"""
return labels
except errors.HttpError as error:
print ('An error occurred: %s' % error)
def DeleteLabel(service, user_id, label_id):
try:
service.users().labels().delete(userId=user_id, id=label_id).execute()
print ('Label with id: %s deleted successfully.' % label_id)
except errors.HttpError as error:
print ('An error occurred: %s' % error)
def main():
credentials = get_credentials()
http = credentials.authorize(httplib2.Http())
service = discovery.build('gmail', 'v1', http=http)
userId = 'me'
labels = GetLabels(service, userId)
for label in labels:
if (label['type'] == 'user'):
print('Deleting label:', label['name'])
DeleteLabel(service, userId, label['id'])
if __name__ == '__main__':
main()
| from __future__ import print_function
import httplib2
import os
import sys
import pickle
from apiclient import discovery
from apiclient import errors
from oauth2client import client
from oauth2client import tools
from oauth2client.file import Storage
try:
import argparse
flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()
except ImportError:
flags = None
# If modifying these scopes, delete your previously saved credentials
# at ~/.credentials/gmail-python-quickstart.json
SCOPES = 'https://www.googleapis.com/auth/gmail.labels'
CLIENT_SECRET_FILE = 'client_secret.json'
APPLICATION_NAME = 'Inbox Organize'
def get_credentials():
"""Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns:
Credentials, the obtained credential.
"""
home_dir = os.path.expanduser('~')
credential_dir = os.path.join(home_dir, '.credentials')
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir,
'gmail-python-quickstart.json')
store = Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
flow.user_agent = APPLICATION_NAME
if flags:
credentials = tools.run_flow(flow, store, flags)
else: # Needed only for compatibility with Python 2.6
credentials = tools.run(flow, store)
print('Storing credentials to ' + credential_path)
return credentials
def GetLabels(service, user_id):
try:
response = service.users().labels().list(userId=user_id).execute()
labels = response['labels']
"""
for label in labels:
print ('Label id: %s - Label name: %s' % (label['id'], label['name']))
"""
return labels
except errors.HttpError as error:
print ('An error occurred: %s' % error)
def DeleteLabel(service, user_id, label_id):
try:
service.users().labels().delete(userId=user_id, id=label_id).execute()
print ('Label with id: %s deleted successfully.' % label_id)
except errors.HttpError as error:
print ('An error occurred: %s' % error)
def main():
credentials = get_credentials()
http = credentials.authorize(httplib2.Http())
service = discovery.build('gmail', 'v1', http=http)
userId = 'me'
labels = GetLabels(service, userId)
for label in labels:
if (label['type'] == 'user'):
print('Deleting label:', label['name'])
DeleteLabel(service, userId, label['id'])
if __name__ == '__main__':
main()
| en | 0.85717 | # If modifying these scopes, delete your previously saved credentials # at ~/.credentials/gmail-python-quickstart.json Gets valid user credentials from storage. If nothing has been stored, or if the stored credentials are invalid, the OAuth2 flow is completed to obtain the new credentials. Returns: Credentials, the obtained credential. # Needed only for compatibility with Python 2.6 for label in labels: print ('Label id: %s - Label name: %s' % (label['id'], label['name'])) | 2.673633 | 3 |
readgadget/modules/rs_structs.py | danielmarostica/pygadgetreader | 6 | 284 | import numpy as np
import sys
## ROCKSTAR ##
halostruct1 = np.dtype([('id',np.int64),
('pos',np.float32,(6,)),
('corevel',np.float32,(3,)),
('bulkvel',np.float32,(3,)),
('m',np.float32),
('r',np.float32),
('child_r',np.float32),
('vmax_r',np.float32),
('mgrav',np.float32),
('vmax',np.float32),
('rvmax',np.float32),
('rs',np.float32),
('klypin_rs',np.float32),
('vrms',np.float32),
('J',np.float32,(3,)),
('energy',np.float32),
('spin',np.float32),
('alt_m',np.float32,(4,)),
('Xoff',np.float32),
('Voff',np.float32),
('b_to_a',np.float32),
('c_to_a',np.float32),
('A',np.float32,(3,)),
('b_to_a2',np.float32),
('c_to_a2',np.float32),
('A2',np.float32,(3,)),
('bullock_spin',np.float32),
('kin_to_pot',np.float32),
('m_pe_b',np.float32),
('m_pe_d',np.float32),
('dummy1',np.float32), ## ALIGNMENT
('num_p',np.int64),
('num_child_particles',np.int64),
('p_start',np.int64),
('desc',np.int64),
('flags',np.int64),
('n_core',np.int64),
('dummy2',np.float32), ## ALIGNMENT
('min_pos_err',np.float32),
('min_vel_err',np.float32),
('min_bulkvel_err',np.float32)
])
halostruct2 = np.dtype([('id',np.int64),
('pos',np.float32,(6,)),
('corevel',np.float32,(3,)),
('bulkvel',np.float32,(3,)),
('m',np.float32),
('r',np.float32),
('child_r',np.float32),
('vmax_r',np.float32),
('mgrav',np.float32),
('vmax',np.float32),
('rvmax',np.float32),
('rs',np.float32),
('klypin_rs',np.float32),
('vrms',np.float32),
('J',np.float32,(3,)),
('energy',np.float32),
('spin',np.float32),
('alt_m',np.float32,(4,)),
('Xoff',np.float32),
('Voff',np.float32),
('b_to_a',np.float32),
('c_to_a',np.float32),
('A',np.float32,(3,)),
('b_to_a2',np.float32),
('c_to_a2',np.float32),
('A2',np.float32,(3,)),
('bullock_spin',np.float32),
('kin_to_pot',np.float32),
('m_pe_b',np.float32),
('m_pe_d',np.float32),
('halfmass_radius',np.float32),
#('dummy1',np.float32), ## ALIGNMENT
('num_p',np.int64),
('num_child_particles',np.int64),
('p_start',np.int64),
('desc',np.int64),
('flags',np.int64),
('n_core',np.int64),
('dummy2',np.float32), ## ALIGNMENT
('min_pos_err',np.float32),
('min_vel_err',np.float32),
('min_bulkvel_err',np.float32)
])
## ROCKSTAR-GALAXIES ##
halogalaxystruct1 = np.dtype([('id',np.int64),
('pos',np.float32,(6,)),
('corevel',np.float32,(3,)),
('bulkvel',np.float32,(3,)),
('m',np.float32),
('r',np.float32),
('child_r',np.float32),
('vmax_r',np.float32),
('mgrav',np.float32),
('vmax',np.float32),
('rvmax',np.float32),
('rs',np.float32),
('klypin_rs',np.float32),
('vrms',np.float32),
('J',np.float32,(3,)),
('energy',np.float32),
('spin',np.float32),
('alt_m',np.float32,(4,)),
('Xoff',np.float32),
('Voff',np.float32),
('b_to_a',np.float32),
('c_to_a',np.float32),
('A',np.float32,(3,)),
('b_to_a2',np.float32),
('c_to_a2',np.float32),
('A2',np.float32,(3,)),
('bullock_spin',np.float32),
('kin_to_pot',np.float32),
('m_pe_b',np.float32),
('m_pe_d',np.float32),
('dummy1',np.float32), ## ALIGNMENT
('num_p',np.int64),
('num_child_particles',np.int64),
('p_start',np.int64),
('desc',np.int64),
('flags',np.int64),
('n_core',np.int64),
('dummy2',np.float32), ## ALIGNMENT
('min_pos_err',np.float32),
('min_vel_err',np.float32),
('min_bulkvel_err',np.float32),
('type',np.int32),
('sm',np.float32),
('gas',np.float32),
('bh',np.float32),
('peak_density',np.float32),
('av_density',np.float32),
])
def getRSformat(obj):
if obj.galaxies == 0:
if obj.format_revision == 0:
print('OUTDATED ROCKSTAR, PLEASE UPDATE!')
sys.exit()
elif obj.format_revision == 1:
if obj.debug: print('returning halostruct1')
return halostruct1
elif obj.format_revision == 2:
if obj.debug: print('returning halostruct2')
return halostruct2
else:
print('found HALO_FORMAT_REVISION=%d, if this is >2 email me!' %
obj.format_revision)
sys.exit()
elif obj.galaxies == 1:
if obj.format_revision == 0:
print('OUTDATED ROCKSTAR-GALAXIES, PLEASE UPDATE!')
sys.exit()
elif obj.format_revision == 1:
if obj.debug: print('returning halogalaxystruct1')
return halogalaxystruct1
else:
print('found HALO_FORMAT_REVISION=%d, if this is >1 email me!' %
obj.format_revision)
sys.exit()
| import numpy as np
import sys
## ROCKSTAR ##
halostruct1 = np.dtype([('id',np.int64),
('pos',np.float32,(6,)),
('corevel',np.float32,(3,)),
('bulkvel',np.float32,(3,)),
('m',np.float32),
('r',np.float32),
('child_r',np.float32),
('vmax_r',np.float32),
('mgrav',np.float32),
('vmax',np.float32),
('rvmax',np.float32),
('rs',np.float32),
('klypin_rs',np.float32),
('vrms',np.float32),
('J',np.float32,(3,)),
('energy',np.float32),
('spin',np.float32),
('alt_m',np.float32,(4,)),
('Xoff',np.float32),
('Voff',np.float32),
('b_to_a',np.float32),
('c_to_a',np.float32),
('A',np.float32,(3,)),
('b_to_a2',np.float32),
('c_to_a2',np.float32),
('A2',np.float32,(3,)),
('bullock_spin',np.float32),
('kin_to_pot',np.float32),
('m_pe_b',np.float32),
('m_pe_d',np.float32),
('dummy1',np.float32), ## ALIGNMENT
('num_p',np.int64),
('num_child_particles',np.int64),
('p_start',np.int64),
('desc',np.int64),
('flags',np.int64),
('n_core',np.int64),
('dummy2',np.float32), ## ALIGNMENT
('min_pos_err',np.float32),
('min_vel_err',np.float32),
('min_bulkvel_err',np.float32)
])
halostruct2 = np.dtype([('id',np.int64),
('pos',np.float32,(6,)),
('corevel',np.float32,(3,)),
('bulkvel',np.float32,(3,)),
('m',np.float32),
('r',np.float32),
('child_r',np.float32),
('vmax_r',np.float32),
('mgrav',np.float32),
('vmax',np.float32),
('rvmax',np.float32),
('rs',np.float32),
('klypin_rs',np.float32),
('vrms',np.float32),
('J',np.float32,(3,)),
('energy',np.float32),
('spin',np.float32),
('alt_m',np.float32,(4,)),
('Xoff',np.float32),
('Voff',np.float32),
('b_to_a',np.float32),
('c_to_a',np.float32),
('A',np.float32,(3,)),
('b_to_a2',np.float32),
('c_to_a2',np.float32),
('A2',np.float32,(3,)),
('bullock_spin',np.float32),
('kin_to_pot',np.float32),
('m_pe_b',np.float32),
('m_pe_d',np.float32),
('halfmass_radius',np.float32),
#('dummy1',np.float32), ## ALIGNMENT
('num_p',np.int64),
('num_child_particles',np.int64),
('p_start',np.int64),
('desc',np.int64),
('flags',np.int64),
('n_core',np.int64),
('dummy2',np.float32), ## ALIGNMENT
('min_pos_err',np.float32),
('min_vel_err',np.float32),
('min_bulkvel_err',np.float32)
])
## ROCKSTAR-GALAXIES ##
halogalaxystruct1 = np.dtype([('id',np.int64),
('pos',np.float32,(6,)),
('corevel',np.float32,(3,)),
('bulkvel',np.float32,(3,)),
('m',np.float32),
('r',np.float32),
('child_r',np.float32),
('vmax_r',np.float32),
('mgrav',np.float32),
('vmax',np.float32),
('rvmax',np.float32),
('rs',np.float32),
('klypin_rs',np.float32),
('vrms',np.float32),
('J',np.float32,(3,)),
('energy',np.float32),
('spin',np.float32),
('alt_m',np.float32,(4,)),
('Xoff',np.float32),
('Voff',np.float32),
('b_to_a',np.float32),
('c_to_a',np.float32),
('A',np.float32,(3,)),
('b_to_a2',np.float32),
('c_to_a2',np.float32),
('A2',np.float32,(3,)),
('bullock_spin',np.float32),
('kin_to_pot',np.float32),
('m_pe_b',np.float32),
('m_pe_d',np.float32),
('dummy1',np.float32), ## ALIGNMENT
('num_p',np.int64),
('num_child_particles',np.int64),
('p_start',np.int64),
('desc',np.int64),
('flags',np.int64),
('n_core',np.int64),
('dummy2',np.float32), ## ALIGNMENT
('min_pos_err',np.float32),
('min_vel_err',np.float32),
('min_bulkvel_err',np.float32),
('type',np.int32),
('sm',np.float32),
('gas',np.float32),
('bh',np.float32),
('peak_density',np.float32),
('av_density',np.float32),
])
def getRSformat(obj):
if obj.galaxies == 0:
if obj.format_revision == 0:
print('OUTDATED ROCKSTAR, PLEASE UPDATE!')
sys.exit()
elif obj.format_revision == 1:
if obj.debug: print('returning halostruct1')
return halostruct1
elif obj.format_revision == 2:
if obj.debug: print('returning halostruct2')
return halostruct2
else:
print('found HALO_FORMAT_REVISION=%d, if this is >2 email me!' %
obj.format_revision)
sys.exit()
elif obj.galaxies == 1:
if obj.format_revision == 0:
print('OUTDATED ROCKSTAR-GALAXIES, PLEASE UPDATE!')
sys.exit()
elif obj.format_revision == 1:
if obj.debug: print('returning halogalaxystruct1')
return halogalaxystruct1
else:
print('found HALO_FORMAT_REVISION=%d, if this is >1 email me!' %
obj.format_revision)
sys.exit()
| id | 0.125114 | ## ROCKSTAR ## ## ALIGNMENT ## ALIGNMENT #('dummy1',np.float32), ## ALIGNMENT ## ALIGNMENT ## ROCKSTAR-GALAXIES ## ## ALIGNMENT ## ALIGNMENT | 1.593376 | 2 |
ics2entropiawiki.py | entropia/ics2entropiawiki | 2 | 285 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""ics2entropiawiki
Read an ics file with the entropia events and insert them in to the
entropia homepage wiki.
Example:
$ ics2entropiawiki.py --config /etc/ics2entropiawiki/config.ini
Inserts events not in the past to the "Termine" Wiki page and appends past
events to the "Vergangene_Termine" Site
"""
import locale
import configparser
import re
import requests
from argparse import ArgumentParser
from datetime import timedelta, datetime
from ics import Calendar
from mwclient import Site
from dateutil.tz import tzlocal
BOTWARNING = """
<!--
This text is automatically generated by the ics2entropiawiki bot, everything you write and everything you edit
WILL BE OVERWRITTEN
Dieser Text ist vom ics2entropiawiki bot automatisch generiert. Alles was hier manuell editiert, hinzugefügt wird
WIRD ÜBERSCHRIEBEN
-->
"""
TABLE_HEADER = """
{| class="termine" border="1" cellspacing="0" cellpadding="5" width="100%" style="border-collapse:collapse;"
! style="width:250px;" | Datum !! style="width:50px;" | Zeit !! Ort !! Beschreibung\
"""
ARCHIVE_TABLE_HEADER = """
{| class="termine" border="1" cellspacing="0" cellpadding="5" style="border-collapse:collapse;" width="100%"
|width=15%|'''Datum'''
|width=6%|'''Zeit'''
|width=15%|'''Ort'''
|width=69%|'''Beschreibung'''
"""
TABLE_FOOTER = (
"|}",
"\n",
"Weitere Links: [[Vorlage:Termine|Termine]] ",
"([https://entropia.de/index.php?title=Vorlage:Termine&action=edit Bearbeiten]),",
" [[Vorlage:Vergangene_Termine|Vergangene Termine]], [[Anfahrt]]"
)
LINE_SEPARATOR = "|-\n"
try:
locale.setlocale(locale.LC_ALL, 'de_DE.utf8')
except locale.Error:
pass
class EntropiaEvent:
"""
Parses an ics Event and converts it to an entropia-wiki suitable form
"""
def __init__(self, event):
"""
:param event: The event to be evaluated
:type event: ics.event.Event
"""
self.event = event
self.begintime = event.begin.datetime.astimezone()
self.endtime = event._end_time.datetime.astimezone()
@property
def location(self):
"""
Retrieve the location of an event
:return: location
:rtype: str
"""
locations = {
"entropia": "[[Anfahrt|Entropia]]",
}
location = " "
if self.event.location:
location = self.event.location
if location.lower() in locations.keys():
location = locations[location.lower()]
return location
@property
def begin_date(self):
"""
:return: Entropia-Wiki formatted begin time
:rtype: str
"""
return self.begintime.strftime("%a., %d.%m.%Y")
@property
def end_date(self):
"""
:return: Entropia-Wiki formatted end time
:rtype: str
"""
end_date = ""
if self.endtime - self.begintime > timedelta(days=1):
end_date = " - " + self.endtime.strftime("%a., %d.%m.%Y")
return end_date
@property
def days_to_event(self):
"""
:return: Days to the start of the event
:rtype: datetime.timedelta
"""
return self.endtime - datetime.now(tz=tzlocal())
@property
def is_past_event(self):
"""
:return: Check if the event lies in the past
:rtype: bool
"""
return self.days_to_event < timedelta(days=0)
@property
def start_time(self):
"""
:return: The starting time of the event
:rtype: str
"""
start_time = " "
if not self.event.all_day:
start_time = self.begintime.strftime("%H:%M")
return start_time
@property
def description(self):
"""
:return: The event's description
:rtype: str
"""
links = None
wiki = None
event = self.event
if event.description:
links = re.findall("^[Ll]ink:(.*)$", event.description)
wiki = re.findall("^[Ww]iki:(.*)$", event.description)
if links and event.name:
description = "["+links[0]+" "+event.name+"]"
elif wiki:
description = wiki[0]
elif not event.name:
description = "N.A."
else:
description = event.name
return description
def __str__(self):
"""
:return: A wiki line describing the event
:rtype: str
"""
return ("| " +
self.begin_date +
self.end_date +
" || " +
self.start_time +
" || " +
self.location +
" || " +
self.description
)
def append_past_events(past_events, wiki_user, wiki_pw, wiki_archive):
"""
Append the "new" past events to the wiki archive page
:param past_events: the past events that were not added to the events page
:type past_events: list
:param wiki_user: bot user for the wiki
:type wiki_user: str
:param wiki_pw: password for the wiki user
:type wiki_pw: str
:param wiki_archive: archive page
:type wiki_archive: str
:return: None
:rtype: None
"""
site = Site('entropia.de', path='/')
site.login(wiki_user, wiki_pw)
page = site.pages[wiki_archive]
text = page.text().split('\n')
last_table_position = 0
for event in past_events:
year_header = "== {} ==".format(event.endtime.strftime('%Y'))
for index, txtline in enumerate(text):
if txtline == '|}':
last_table_position = index
if str(event) in text:
continue
if year_header in text:
append_list = (
'\n' +
LINE_SEPARATOR +
str(event)
)
text = text[:last_table_position]+[append_list, ]+text[last_table_position:]
else:
append_list = (
3 * '\n' +
year_header +
ARCHIVE_TABLE_HEADER +
'\n' +
LINE_SEPARATOR +
'\n' +
str(event) +
'\n|}'
)
text = text[:last_table_position+1]+[append_list, ]+text[last_table_position+1:]
page.save("\n".join(text))
def get_args():
"""
Retrieve arguments from the command line, the config file respectively
:return: Parsed arguments from command line, config file
:rtype: list
"""
parser = ArgumentParser()
parser.add_argument(
"-c", "--config",
default="/etc/ics2entropiawiki/config.ini",
dest="configfile",
help="Configuration file path",
metavar="CONFIG"
)
parser.add_argument(
"-u", "--url",
dest="ics_url",
help="The URL under which the ICS-file can be retrieved",
metavar="URL",
)
parser.add_argument(
"-f", "--file",
dest="local_file",
help="Local ics file",
metavar="FILE"
)
parser.add_argument(
"--wiki-user",
dest="wiki_user",
help="Wiki user",
metavar="WIKIUSER"
)
parser.add_argument(
"--wiki-password",
dest="wiki_pw",
help="Wiki user's password",
metavar="WIKIPW"
)
parser.add_argument(
"--wiki-page",
dest="wiki_page",
help='Wiki page',
metavar='WIKIPAGE'
)
parser.add_argument(
"--wiki-archive",
dest="wiki_archive",
help='Wiki archive',
metavar='WIKIARCHIVE'
)
parser.add_argument(
"-d", "--debug",
dest="debug",
action="store_true",
default=False
)
args = parser.parse_args()
configfile = args.configfile
ics_url = args.ics_url
file = args.local_file
wiki = {
'user': args.wiki_user,
'pass': args.wiki_pw,
'page': args.wiki_page,
'archive': args.wiki_archive,
}
debug = args.debug
if configfile:
config = configparser.ConfigParser()
config.read(configfile)
try:
ics_url = config["default"]["url"]
wiki = config["wiki"]
except KeyError as error:
print("Please have a look at the sample config provided with the package")
raise error
return ics_url, file, wiki, debug
def deradicalise_ical(ics):
"""
:param ics: input file
:type ics: str
:return: file with remove radicale_headers
"""
deradicalised = ""
for line in ics.splitlines():
if 'X-RADICALE-NAME:' not in line:
deradicalised += "\n"+line
return deradicalised
def main():
"""
:return: None
:rtype: None
"""
ics_url, file, wiki, debug = get_args()
event_strings = []
past_events = []
if file:
calendar = Calendar(deradicalise_ical(open(file).read()))
else:
ics_result = requests.get(ics_url)
ics_result.encoding = 'utf-8'
calendar = Calendar(deradicalise_ical(ics_result.text))
for event in sorted(calendar.events, key=lambda ev: ev.begin):
event = EntropiaEvent(event)
if not event.is_past_event:
event_strings.append(
"\n" +
LINE_SEPARATOR +
str(event)
)
else:
past_events.append(event)
append_past_events(past_events, wiki['user'], wiki['pass'], wiki['archive'])
termine = BOTWARNING + "\n" + TABLE_HEADER + "\n" + "".join(event_strings) + "\n" + "".join(TABLE_FOOTER)
if debug:
print(termine)
site = Site('entropia.de', path='/')
site.login(wiki['user'], wiki['pass'])
page = site.pages[wiki['page']]
if termine:
page.save(termine, "Terminbot was here")
page.purge()
if __name__ == '__main__':
main()
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""ics2entropiawiki
Read an ics file with the entropia events and insert them in to the
entropia homepage wiki.
Example:
$ ics2entropiawiki.py --config /etc/ics2entropiawiki/config.ini
Inserts events not in the past to the "Termine" Wiki page and appends past
events to the "Vergangene_Termine" Site
"""
import locale
import configparser
import re
import requests
from argparse import ArgumentParser
from datetime import timedelta, datetime
from ics import Calendar
from mwclient import Site
from dateutil.tz import tzlocal
BOTWARNING = """
<!--
This text is automatically generated by the ics2entropiawiki bot, everything you write and everything you edit
WILL BE OVERWRITTEN
Dieser Text ist vom ics2entropiawiki bot automatisch generiert. Alles was hier manuell editiert, hinzugefügt wird
WIRD ÜBERSCHRIEBEN
-->
"""
TABLE_HEADER = """
{| class="termine" border="1" cellspacing="0" cellpadding="5" width="100%" style="border-collapse:collapse;"
! style="width:250px;" | Datum !! style="width:50px;" | Zeit !! Ort !! Beschreibung\
"""
ARCHIVE_TABLE_HEADER = """
{| class="termine" border="1" cellspacing="0" cellpadding="5" style="border-collapse:collapse;" width="100%"
|width=15%|'''Datum'''
|width=6%|'''Zeit'''
|width=15%|'''Ort'''
|width=69%|'''Beschreibung'''
"""
TABLE_FOOTER = (
"|}",
"\n",
"Weitere Links: [[Vorlage:Termine|Termine]] ",
"([https://entropia.de/index.php?title=Vorlage:Termine&action=edit Bearbeiten]),",
" [[Vorlage:Vergangene_Termine|Vergangene Termine]], [[Anfahrt]]"
)
LINE_SEPARATOR = "|-\n"
try:
locale.setlocale(locale.LC_ALL, 'de_DE.utf8')
except locale.Error:
pass
class EntropiaEvent:
"""
Parses an ics Event and converts it to an entropia-wiki suitable form
"""
def __init__(self, event):
"""
:param event: The event to be evaluated
:type event: ics.event.Event
"""
self.event = event
self.begintime = event.begin.datetime.astimezone()
self.endtime = event._end_time.datetime.astimezone()
@property
def location(self):
"""
Retrieve the location of an event
:return: location
:rtype: str
"""
locations = {
"entropia": "[[Anfahrt|Entropia]]",
}
location = " "
if self.event.location:
location = self.event.location
if location.lower() in locations.keys():
location = locations[location.lower()]
return location
@property
def begin_date(self):
"""
:return: Entropia-Wiki formatted begin time
:rtype: str
"""
return self.begintime.strftime("%a., %d.%m.%Y")
@property
def end_date(self):
"""
:return: Entropia-Wiki formatted end time
:rtype: str
"""
end_date = ""
if self.endtime - self.begintime > timedelta(days=1):
end_date = " - " + self.endtime.strftime("%a., %d.%m.%Y")
return end_date
@property
def days_to_event(self):
"""
:return: Days to the start of the event
:rtype: datetime.timedelta
"""
return self.endtime - datetime.now(tz=tzlocal())
@property
def is_past_event(self):
"""
:return: Check if the event lies in the past
:rtype: bool
"""
return self.days_to_event < timedelta(days=0)
@property
def start_time(self):
"""
:return: The starting time of the event
:rtype: str
"""
start_time = " "
if not self.event.all_day:
start_time = self.begintime.strftime("%H:%M")
return start_time
@property
def description(self):
"""
:return: The event's description
:rtype: str
"""
links = None
wiki = None
event = self.event
if event.description:
links = re.findall("^[Ll]ink:(.*)$", event.description)
wiki = re.findall("^[Ww]iki:(.*)$", event.description)
if links and event.name:
description = "["+links[0]+" "+event.name+"]"
elif wiki:
description = wiki[0]
elif not event.name:
description = "N.A."
else:
description = event.name
return description
def __str__(self):
"""
:return: A wiki line describing the event
:rtype: str
"""
return ("| " +
self.begin_date +
self.end_date +
" || " +
self.start_time +
" || " +
self.location +
" || " +
self.description
)
def append_past_events(past_events, wiki_user, wiki_pw, wiki_archive):
"""
Append the "new" past events to the wiki archive page
:param past_events: the past events that were not added to the events page
:type past_events: list
:param wiki_user: bot user for the wiki
:type wiki_user: str
:param wiki_pw: password for the wiki user
:type wiki_pw: str
:param wiki_archive: archive page
:type wiki_archive: str
:return: None
:rtype: None
"""
site = Site('entropia.de', path='/')
site.login(wiki_user, wiki_pw)
page = site.pages[wiki_archive]
text = page.text().split('\n')
last_table_position = 0
for event in past_events:
year_header = "== {} ==".format(event.endtime.strftime('%Y'))
for index, txtline in enumerate(text):
if txtline == '|}':
last_table_position = index
if str(event) in text:
continue
if year_header in text:
append_list = (
'\n' +
LINE_SEPARATOR +
str(event)
)
text = text[:last_table_position]+[append_list, ]+text[last_table_position:]
else:
append_list = (
3 * '\n' +
year_header +
ARCHIVE_TABLE_HEADER +
'\n' +
LINE_SEPARATOR +
'\n' +
str(event) +
'\n|}'
)
text = text[:last_table_position+1]+[append_list, ]+text[last_table_position+1:]
page.save("\n".join(text))
def get_args():
"""
Retrieve arguments from the command line, the config file respectively
:return: Parsed arguments from command line, config file
:rtype: list
"""
parser = ArgumentParser()
parser.add_argument(
"-c", "--config",
default="/etc/ics2entropiawiki/config.ini",
dest="configfile",
help="Configuration file path",
metavar="CONFIG"
)
parser.add_argument(
"-u", "--url",
dest="ics_url",
help="The URL under which the ICS-file can be retrieved",
metavar="URL",
)
parser.add_argument(
"-f", "--file",
dest="local_file",
help="Local ics file",
metavar="FILE"
)
parser.add_argument(
"--wiki-user",
dest="wiki_user",
help="Wiki user",
metavar="WIKIUSER"
)
parser.add_argument(
"--wiki-password",
dest="wiki_pw",
help="Wiki user's password",
metavar="WIKIPW"
)
parser.add_argument(
"--wiki-page",
dest="wiki_page",
help='Wiki page',
metavar='WIKIPAGE'
)
parser.add_argument(
"--wiki-archive",
dest="wiki_archive",
help='Wiki archive',
metavar='WIKIARCHIVE'
)
parser.add_argument(
"-d", "--debug",
dest="debug",
action="store_true",
default=False
)
args = parser.parse_args()
configfile = args.configfile
ics_url = args.ics_url
file = args.local_file
wiki = {
'user': args.wiki_user,
'pass': args.wiki_pw,
'page': args.wiki_page,
'archive': args.wiki_archive,
}
debug = args.debug
if configfile:
config = configparser.ConfigParser()
config.read(configfile)
try:
ics_url = config["default"]["url"]
wiki = config["wiki"]
except KeyError as error:
print("Please have a look at the sample config provided with the package")
raise error
return ics_url, file, wiki, debug
def deradicalise_ical(ics):
"""
:param ics: input file
:type ics: str
:return: file with remove radicale_headers
"""
deradicalised = ""
for line in ics.splitlines():
if 'X-RADICALE-NAME:' not in line:
deradicalised += "\n"+line
return deradicalised
def main():
"""
:return: None
:rtype: None
"""
ics_url, file, wiki, debug = get_args()
event_strings = []
past_events = []
if file:
calendar = Calendar(deradicalise_ical(open(file).read()))
else:
ics_result = requests.get(ics_url)
ics_result.encoding = 'utf-8'
calendar = Calendar(deradicalise_ical(ics_result.text))
for event in sorted(calendar.events, key=lambda ev: ev.begin):
event = EntropiaEvent(event)
if not event.is_past_event:
event_strings.append(
"\n" +
LINE_SEPARATOR +
str(event)
)
else:
past_events.append(event)
append_past_events(past_events, wiki['user'], wiki['pass'], wiki['archive'])
termine = BOTWARNING + "\n" + TABLE_HEADER + "\n" + "".join(event_strings) + "\n" + "".join(TABLE_FOOTER)
if debug:
print(termine)
site = Site('entropia.de', path='/')
site.login(wiki['user'], wiki['pass'])
page = site.pages[wiki['page']]
if termine:
page.save(termine, "Terminbot was here")
page.purge()
if __name__ == '__main__':
main()
| en | 0.48262 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- ics2entropiawiki Read an ics file with the entropia events and insert them in to the entropia homepage wiki. Example: $ ics2entropiawiki.py --config /etc/ics2entropiawiki/config.ini Inserts events not in the past to the "Termine" Wiki page and appends past events to the "Vergangene_Termine" Site <!-- This text is automatically generated by the ics2entropiawiki bot, everything you write and everything you edit WILL BE OVERWRITTEN Dieser Text ist vom ics2entropiawiki bot automatisch generiert. Alles was hier manuell editiert, hinzugefügt wird WIRD ÜBERSCHRIEBEN --> {| class="termine" border="1" cellspacing="0" cellpadding="5" width="100%" style="border-collapse:collapse;" ! style="width:250px;" | Datum !! style="width:50px;" | Zeit !! Ort !! Beschreibung\ {| class="termine" border="1" cellspacing="0" cellpadding="5" style="border-collapse:collapse;" width="100%" |width=15%|'''Datum''' |width=6%|'''Zeit''' |width=15%|'''Ort''' |width=69%|'''Beschreibung''' Parses an ics Event and converts it to an entropia-wiki suitable form :param event: The event to be evaluated :type event: ics.event.Event Retrieve the location of an event :return: location :rtype: str :return: Entropia-Wiki formatted begin time :rtype: str :return: Entropia-Wiki formatted end time :rtype: str :return: Days to the start of the event :rtype: datetime.timedelta :return: Check if the event lies in the past :rtype: bool :return: The starting time of the event :rtype: str :return: The event's description :rtype: str :return: A wiki line describing the event :rtype: str Append the "new" past events to the wiki archive page :param past_events: the past events that were not added to the events page :type past_events: list :param wiki_user: bot user for the wiki :type wiki_user: str :param wiki_pw: password for the wiki user :type wiki_pw: str :param wiki_archive: archive page :type wiki_archive: str :return: None :rtype: None Retrieve arguments from the command line, the config file respectively :return: Parsed arguments from command line, config file :rtype: list :param ics: input file :type ics: str :return: file with remove radicale_headers :return: None :rtype: None | 2.499982 | 2 |
Arrays/LeftRotation.py | anand722000/algo_ds_101 | 175 | 286 | #!/bin/python3
import math
import os
import random
import re
import sys
# Complete the rotLeft function below.
def rotLeft(a, d):
alist = list(a)
b = alist[d:]+alist[:d]
return b
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
nd = input().split()
n = int(nd[0])
d = int(nd[1])
a = list(map(int, input().rstrip().split()))
result = rotLeft(a, d)
fptr.write(' '.join(map(str, result)))
fptr.write('\n')
fptr.close()
| #!/bin/python3
import math
import os
import random
import re
import sys
# Complete the rotLeft function below.
def rotLeft(a, d):
alist = list(a)
b = alist[d:]+alist[:d]
return b
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
nd = input().split()
n = int(nd[0])
d = int(nd[1])
a = list(map(int, input().rstrip().split()))
result = rotLeft(a, d)
fptr.write(' '.join(map(str, result)))
fptr.write('\n')
fptr.close()
| en | 0.291262 | #!/bin/python3 # Complete the rotLeft function below. | 3.50045 | 4 |
tests/unittests/test_zoo.py | SaizhuoWang/carefree-learn | 0 | 287 | import os
import cflearn
import platform
import unittest
from cfdata.tabular import TabularDataset
num_jobs = 0 if platform.system() == "Linux" else 2
logging_folder = "__test_zoo__"
class TestZoo(unittest.TestCase):
@staticmethod
def _test_zoo_core(model: str) -> None:
x, y = TabularDataset.iris().xy
zoo_folder = os.path.join(logging_folder, f"__{model}__")
zoo = cflearn.Zoo(model)
for key, config in zoo.benchmarks.items():
local_logging_folder = os.path.join(zoo_folder, key)
config["logging_folder"] = local_logging_folder
m = cflearn.make(model, **config).fit(x, y)
cflearn.evaluate(x, y, pipelines=m)
cflearn._rmtree(logging_folder)
def test_fcnn_zoo(self) -> None:
self._test_zoo_core("fcnn")
def test_tree_dnn_zoo(self) -> None:
self._test_zoo_core("tree_dnn")
if __name__ == "__main__":
unittest.main()
| import os
import cflearn
import platform
import unittest
from cfdata.tabular import TabularDataset
num_jobs = 0 if platform.system() == "Linux" else 2
logging_folder = "__test_zoo__"
class TestZoo(unittest.TestCase):
@staticmethod
def _test_zoo_core(model: str) -> None:
x, y = TabularDataset.iris().xy
zoo_folder = os.path.join(logging_folder, f"__{model}__")
zoo = cflearn.Zoo(model)
for key, config in zoo.benchmarks.items():
local_logging_folder = os.path.join(zoo_folder, key)
config["logging_folder"] = local_logging_folder
m = cflearn.make(model, **config).fit(x, y)
cflearn.evaluate(x, y, pipelines=m)
cflearn._rmtree(logging_folder)
def test_fcnn_zoo(self) -> None:
self._test_zoo_core("fcnn")
def test_tree_dnn_zoo(self) -> None:
self._test_zoo_core("tree_dnn")
if __name__ == "__main__":
unittest.main()
| none | 1 | 2.055435 | 2 |
|
unit_13/26-Data_Structures/4_Merge_Sort_and_Linked_Lists/3_linked_list_merge_sort.py | duliodenis/python_master_degree | 19 | 288 | <gh_stars>10-100
#
# Data Structures: Linked List Merge Sort: The Conquer Step
# Python Techdegree
#
# Created by <NAME> on 3/24/19.
# Copyright (c) 2019 ddApps. All rights reserved.
# ------------------------------------------------
from linked_list import Node, LinkedList
def merge_sort(linked_list):
'''
Sorts a linked list in ascending order.
- Recuresively divide the linked list into sublists containing a single node
- Repeatedly merge the sublists to produce sorted swublists until one remains
Returns a sorted linked list.
Runs in O(kn log n) time.
'''
if linked_list.size() == 1:
return linked_list
elif linked_list.is_empty():
return linked_list
left_half, right_half = split(linked_list)
left = merge_sort(left_half)
right = merge_sort(right_half)
return merge(left, right)
def split(linked_list):
'''
Divide the unsorted list at the midpoint into sublists.
Takes O(k log n) quasilinear time.
'''
if linked_list == None or linked_list.head == None:
left_half = linked_list
right_half = None
return left_half, right_half
else: # non-empty linked lists
size = linked_list.size()
midpoint = size // 2
mid_node = linked_list.node_at_index(midpoint-1)
left_half = linked_list
right_half = LinkedList()
right_half = mid_node.next_node
mid_node.next_node = None
return left_half, right_half
def merge(left, right):
'''
Merges two linked lists, sorting by data in nodes.
Returns a new, merged list.
Runs in O(n) linear time.
'''
# Create a new linked list that contains nodes from
# merging left and right
merged = LinkedList()
# Add a fake head that is discarded later to simplify code
merged.add(0)
# Set current to the head of the linked list
current = merged.head
# Obtain head nodes for left and right linked lists
left_head = left.head
right_head = right.head
# Iterate over left and right until we reach the tail node
# of either
while left_head or right_head:
# If the head node of the left is None, we're past the tail
# Add the node from right to merged linkned list
if left_head is None:
current.next_node = right_head
# Call next on right to set loop condition to False
right_head = right_head.next_node
# If the head node of right is None, we're past the tail
# Add the tail node from left to merged linked list
elif right_head is None:
current.next_node = left_head
# Call next on left to set loop condition to False
left_head = left_head.next_node
else:
# Not at either tail node
# Obtain node data to perform comparison operations
left_data = left_head.data
right_data = right_head.data
# If data on left is less than right, set current to left node
if left_data < right_data:
current.next_node = left_head
# Move left head to next node
left_head = left_head.next_node
# If data on left is greater than right, set current to right node
else:
current.next_node = right_head
# Move right head to next node
right_head = right_head.next_node
# Move current to next node
current = current.next_node
# Discard fake head and set first merged node as head
head = merged.head.next_node
merged.head = head
return merged
l = LinkedList()
l.add(10)
l.add(2)
l.add(44)
l.add(15)
l.add(200)
print(l)
sorted_linked_list = merge_sort(l)
print(sorted_linked_list)
| #
# Data Structures: Linked List Merge Sort: The Conquer Step
# Python Techdegree
#
# Created by <NAME> on 3/24/19.
# Copyright (c) 2019 ddApps. All rights reserved.
# ------------------------------------------------
from linked_list import Node, LinkedList
def merge_sort(linked_list):
'''
Sorts a linked list in ascending order.
- Recuresively divide the linked list into sublists containing a single node
- Repeatedly merge the sublists to produce sorted swublists until one remains
Returns a sorted linked list.
Runs in O(kn log n) time.
'''
if linked_list.size() == 1:
return linked_list
elif linked_list.is_empty():
return linked_list
left_half, right_half = split(linked_list)
left = merge_sort(left_half)
right = merge_sort(right_half)
return merge(left, right)
def split(linked_list):
'''
Divide the unsorted list at the midpoint into sublists.
Takes O(k log n) quasilinear time.
'''
if linked_list == None or linked_list.head == None:
left_half = linked_list
right_half = None
return left_half, right_half
else: # non-empty linked lists
size = linked_list.size()
midpoint = size // 2
mid_node = linked_list.node_at_index(midpoint-1)
left_half = linked_list
right_half = LinkedList()
right_half = mid_node.next_node
mid_node.next_node = None
return left_half, right_half
def merge(left, right):
'''
Merges two linked lists, sorting by data in nodes.
Returns a new, merged list.
Runs in O(n) linear time.
'''
# Create a new linked list that contains nodes from
# merging left and right
merged = LinkedList()
# Add a fake head that is discarded later to simplify code
merged.add(0)
# Set current to the head of the linked list
current = merged.head
# Obtain head nodes for left and right linked lists
left_head = left.head
right_head = right.head
# Iterate over left and right until we reach the tail node
# of either
while left_head or right_head:
# If the head node of the left is None, we're past the tail
# Add the node from right to merged linkned list
if left_head is None:
current.next_node = right_head
# Call next on right to set loop condition to False
right_head = right_head.next_node
# If the head node of right is None, we're past the tail
# Add the tail node from left to merged linked list
elif right_head is None:
current.next_node = left_head
# Call next on left to set loop condition to False
left_head = left_head.next_node
else:
# Not at either tail node
# Obtain node data to perform comparison operations
left_data = left_head.data
right_data = right_head.data
# If data on left is less than right, set current to left node
if left_data < right_data:
current.next_node = left_head
# Move left head to next node
left_head = left_head.next_node
# If data on left is greater than right, set current to right node
else:
current.next_node = right_head
# Move right head to next node
right_head = right_head.next_node
# Move current to next node
current = current.next_node
# Discard fake head and set first merged node as head
head = merged.head.next_node
merged.head = head
return merged
l = LinkedList()
l.add(10)
l.add(2)
l.add(44)
l.add(15)
l.add(200)
print(l)
sorted_linked_list = merge_sort(l)
print(sorted_linked_list) | en | 0.862653 | # # Data Structures: Linked List Merge Sort: The Conquer Step # Python Techdegree # # Created by <NAME> on 3/24/19. # Copyright (c) 2019 ddApps. All rights reserved. # ------------------------------------------------ Sorts a linked list in ascending order. - Recuresively divide the linked list into sublists containing a single node - Repeatedly merge the sublists to produce sorted swublists until one remains Returns a sorted linked list. Runs in O(kn log n) time. Divide the unsorted list at the midpoint into sublists. Takes O(k log n) quasilinear time. # non-empty linked lists Merges two linked lists, sorting by data in nodes. Returns a new, merged list. Runs in O(n) linear time. # Create a new linked list that contains nodes from # merging left and right # Add a fake head that is discarded later to simplify code # Set current to the head of the linked list # Obtain head nodes for left and right linked lists # Iterate over left and right until we reach the tail node # of either # If the head node of the left is None, we're past the tail # Add the node from right to merged linkned list # Call next on right to set loop condition to False # If the head node of right is None, we're past the tail # Add the tail node from left to merged linked list # Call next on left to set loop condition to False # Not at either tail node # Obtain node data to perform comparison operations # If data on left is less than right, set current to left node # Move left head to next node # If data on left is greater than right, set current to right node # Move right head to next node # Move current to next node # Discard fake head and set first merged node as head | 4.408942 | 4 |
nearpy/examples/example2.py | samyoo78/NearPy | 624 | 289 | # -*- coding: utf-8 -*-
# Copyright (c) 2013 <NAME>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import numpy
import scipy
import unittest
import time
from nearpy import Engine
from nearpy.distances import CosineDistance
from nearpy.hashes import RandomBinaryProjections, HashPermutations, HashPermutationMapper
def example2():
# Dimension of feature space
DIM = 100
# Number of data points (dont do too much because of exact search)
POINTS = 20000
##########################################################
print('Performing indexing with HashPermutations...')
t0 = time.time()
# Create permutations meta-hash
permutations = HashPermutations('permut')
# Create binary hash as child hash
rbp_perm = RandomBinaryProjections('rbp_perm', 14)
rbp_conf = {'num_permutation':50,'beam_size':10,'num_neighbour':100}
# Add rbp as child hash of permutations hash
permutations.add_child_hash(rbp_perm, rbp_conf)
# Create engine
engine_perm = Engine(DIM, lshashes=[permutations], distance=CosineDistance())
# First index some random vectors
matrix = numpy.zeros((POINTS,DIM))
for i in range(POINTS):
v = numpy.random.randn(DIM)
matrix[i] = v
engine_perm.store_vector(v)
# Then update permuted index
permutations.build_permuted_index()
t1 = time.time()
print('Indexing took %f seconds' % (t1-t0))
# Get random query vector
query = numpy.random.randn(DIM)
# Do random query on engine 3
print('\nNeighbour distances with HashPermutations:')
print(' -> Candidate count is %d' % engine_perm.candidate_count(query))
results = engine_perm.neighbours(query)
dists = [x[2] for x in results]
print(dists)
# Real neighbours
print('\nReal neighbour distances:')
query = query.reshape((DIM))
dists = CosineDistance().distance(matrix, query)
dists = dists.reshape((-1,))
dists = sorted(dists)
print(dists[:10])
##########################################################
print('\nPerforming indexing with HashPermutationMapper...')
t0 = time.time()
# Create permutations meta-hash
permutations2 = HashPermutationMapper('permut2')
# Create binary hash as child hash
rbp_perm2 = RandomBinaryProjections('rbp_perm2', 14)
# Add rbp as child hash of permutations hash
permutations2.add_child_hash(rbp_perm2)
# Create engine
engine_perm2 = Engine(DIM, lshashes=[permutations2], distance=CosineDistance())
# First index some random vectors
matrix = numpy.zeros((POINTS,DIM))
for i in range(POINTS):
v = numpy.random.randn(DIM)
matrix[i] = v
engine_perm2.store_vector(v)
t1 = time.time()
print('Indexing took %f seconds' % (t1-t0))
# Get random query vector
query = numpy.random.randn(DIM)
# Do random query on engine 4
print('\nNeighbour distances with HashPermutationMapper:')
print(' -> Candidate count is %d' % engine_perm2.candidate_count(query))
results = engine_perm2.neighbours(query)
dists = [x[2] for x in results]
print(dists)
# Real neighbours
print('\nReal neighbour distances:')
query = query.reshape((DIM))
dists = CosineDistance().distance(matrix,query)
dists = dists.reshape((-1,))
dists = sorted(dists)
print(dists[:10])
##########################################################
print('\nPerforming indexing with multiple binary hashes...')
t0 = time.time()
hashes = []
for k in range(20):
hashes.append(RandomBinaryProjections('rbp_%d' % k, 10))
# Create engine
engine_rbps = Engine(DIM, lshashes=hashes, distance=CosineDistance())
# First index some random vectors
matrix = numpy.zeros((POINTS,DIM))
for i in range(POINTS):
v = numpy.random.randn(DIM)
matrix[i] = v
engine_rbps.store_vector(v)
t1 = time.time()
print('Indexing took %f seconds' % (t1-t0))
# Get random query vector
query = numpy.random.randn(DIM)
# Do random query on engine 4
print('\nNeighbour distances with multiple binary hashes:')
print(' -> Candidate count is %d' % engine_rbps.candidate_count(query))
results = engine_rbps.neighbours(query)
dists = [x[2] for x in results]
print(dists)
# Real neighbours
print('\nReal neighbour distances:')
query = query.reshape((DIM))
dists = CosineDistance().distance(matrix,query)
dists = dists.reshape((-1,))
dists = sorted(dists)
print(dists[:10])
##########################################################
| # -*- coding: utf-8 -*-
# Copyright (c) 2013 <NAME>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import numpy
import scipy
import unittest
import time
from nearpy import Engine
from nearpy.distances import CosineDistance
from nearpy.hashes import RandomBinaryProjections, HashPermutations, HashPermutationMapper
def example2():
# Dimension of feature space
DIM = 100
# Number of data points (dont do too much because of exact search)
POINTS = 20000
##########################################################
print('Performing indexing with HashPermutations...')
t0 = time.time()
# Create permutations meta-hash
permutations = HashPermutations('permut')
# Create binary hash as child hash
rbp_perm = RandomBinaryProjections('rbp_perm', 14)
rbp_conf = {'num_permutation':50,'beam_size':10,'num_neighbour':100}
# Add rbp as child hash of permutations hash
permutations.add_child_hash(rbp_perm, rbp_conf)
# Create engine
engine_perm = Engine(DIM, lshashes=[permutations], distance=CosineDistance())
# First index some random vectors
matrix = numpy.zeros((POINTS,DIM))
for i in range(POINTS):
v = numpy.random.randn(DIM)
matrix[i] = v
engine_perm.store_vector(v)
# Then update permuted index
permutations.build_permuted_index()
t1 = time.time()
print('Indexing took %f seconds' % (t1-t0))
# Get random query vector
query = numpy.random.randn(DIM)
# Do random query on engine 3
print('\nNeighbour distances with HashPermutations:')
print(' -> Candidate count is %d' % engine_perm.candidate_count(query))
results = engine_perm.neighbours(query)
dists = [x[2] for x in results]
print(dists)
# Real neighbours
print('\nReal neighbour distances:')
query = query.reshape((DIM))
dists = CosineDistance().distance(matrix, query)
dists = dists.reshape((-1,))
dists = sorted(dists)
print(dists[:10])
##########################################################
print('\nPerforming indexing with HashPermutationMapper...')
t0 = time.time()
# Create permutations meta-hash
permutations2 = HashPermutationMapper('permut2')
# Create binary hash as child hash
rbp_perm2 = RandomBinaryProjections('rbp_perm2', 14)
# Add rbp as child hash of permutations hash
permutations2.add_child_hash(rbp_perm2)
# Create engine
engine_perm2 = Engine(DIM, lshashes=[permutations2], distance=CosineDistance())
# First index some random vectors
matrix = numpy.zeros((POINTS,DIM))
for i in range(POINTS):
v = numpy.random.randn(DIM)
matrix[i] = v
engine_perm2.store_vector(v)
t1 = time.time()
print('Indexing took %f seconds' % (t1-t0))
# Get random query vector
query = numpy.random.randn(DIM)
# Do random query on engine 4
print('\nNeighbour distances with HashPermutationMapper:')
print(' -> Candidate count is %d' % engine_perm2.candidate_count(query))
results = engine_perm2.neighbours(query)
dists = [x[2] for x in results]
print(dists)
# Real neighbours
print('\nReal neighbour distances:')
query = query.reshape((DIM))
dists = CosineDistance().distance(matrix,query)
dists = dists.reshape((-1,))
dists = sorted(dists)
print(dists[:10])
##########################################################
print('\nPerforming indexing with multiple binary hashes...')
t0 = time.time()
hashes = []
for k in range(20):
hashes.append(RandomBinaryProjections('rbp_%d' % k, 10))
# Create engine
engine_rbps = Engine(DIM, lshashes=hashes, distance=CosineDistance())
# First index some random vectors
matrix = numpy.zeros((POINTS,DIM))
for i in range(POINTS):
v = numpy.random.randn(DIM)
matrix[i] = v
engine_rbps.store_vector(v)
t1 = time.time()
print('Indexing took %f seconds' % (t1-t0))
# Get random query vector
query = numpy.random.randn(DIM)
# Do random query on engine 4
print('\nNeighbour distances with multiple binary hashes:')
print(' -> Candidate count is %d' % engine_rbps.candidate_count(query))
results = engine_rbps.neighbours(query)
dists = [x[2] for x in results]
print(dists)
# Real neighbours
print('\nReal neighbour distances:')
query = query.reshape((DIM))
dists = CosineDistance().distance(matrix,query)
dists = dists.reshape((-1,))
dists = sorted(dists)
print(dists[:10])
##########################################################
| en | 0.66453 | # -*- coding: utf-8 -*- # Copyright (c) 2013 <NAME> # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # Dimension of feature space # Number of data points (dont do too much because of exact search) ########################################################## # Create permutations meta-hash # Create binary hash as child hash # Add rbp as child hash of permutations hash # Create engine # First index some random vectors # Then update permuted index # Get random query vector # Do random query on engine 3 # Real neighbours ########################################################## # Create permutations meta-hash # Create binary hash as child hash # Add rbp as child hash of permutations hash # Create engine # First index some random vectors # Get random query vector # Do random query on engine 4 # Real neighbours ########################################################## # Create engine # First index some random vectors # Get random query vector # Do random query on engine 4 # Real neighbours ########################################################## | 1.948359 | 2 |
discordbot.py | 8ka1alu/heroku-global-py | 0 | 290 | from discord.ext import commands, tasks # Bot Commands Frameworkをインポート
import traceback # エラー表示のためにインポート
import os
import discord
import r
TOKEN = os.environ['DISCORD_BOT_TOKEN']
prefix = os.environ['DISCORD_BOT_PREFIX'] #プレフィックス
# 読み込むコグの名前を格納しておく。
INITIAL_EXTENSIONS = [
'cogs.eval',
'cogs.glchat',
'cogs.gladd',
'cogs.gldel'
]
# クラスの定義。ClientのサブクラスであるBotクラスを継承。
class MyBot(commands.Bot):
# MyBotのコンストラクタ。
def __init__(self, command_prefix, help_command):
# スーパークラスのコンストラクタに値を渡して実行。
super().__init__(command_prefix,help_command)
# INITIAL_COGSに格納されている名前から、コグを読み込む。
# エラーが発生した場合は、エラー内容を表示。
for cog in INITIAL_EXTENSIONS:
try:
self.load_extension(cog)
except Exception:
traceback.print_exc()
# Botの準備完了時に呼び出されるイベント
async def on_ready(self):
print(self.user.name) # ボットの名前
print(self.user.id) # ボットのID
print(discord.__version__) # discord.pyのバージョン
print('----------------')
print('Hello World !!')
await self.change_presence(status=discord.Status.idle,activity=discord.Game(name=f'Ping:{self.ws.latency * 1000:.0f}ms'))
conn=r.connect()
ky=conn.keys()
global_ch="gloch"
count=0
for i in ky:
i=str(i)
if i == global_ch:
count+=1
if count>0:
smsd=conn.smembers(global_ch)
count=0
for q in smsd:
q=str(q)
if q=="0":
count+=1
if count>0:
p=conn.srem(global_ch,"0")
if p==True:
print("正常起動")
else:
print("異常発生")
else:
print(ky)
else:
p=conn.sadd(global_ch,"0")
if p==True:
print("正常起動")
else:
print("異常発生")
class JapaneseHelpCommand(commands.DefaultHelpCommand):
def __init__(self):
super().__init__()
self.commands_heading = "コマンド:"
self.no_category = "その他"
self.command_attrs["help"] = "コマンド一覧と簡単な説明を表示"
def get_ending_note(self):
return (f"各コマンドの説明: {prefix}help <コマンド名>\n"
f"各カテゴリの説明: {prefix}help <カテゴリ名>\n")
#MyBotのインスタンス化及び起動処理。
if __name__ == '__main__':
bot = MyBot(command_prefix=prefix,help_command=JapaneseHelpCommand()) # command_prefixはコマンドの最初の文字として使うもの。 e.g. !ping
bot.run(TOKEN) # Botのトークン
| from discord.ext import commands, tasks # Bot Commands Frameworkをインポート
import traceback # エラー表示のためにインポート
import os
import discord
import r
TOKEN = os.environ['DISCORD_BOT_TOKEN']
prefix = os.environ['DISCORD_BOT_PREFIX'] #プレフィックス
# 読み込むコグの名前を格納しておく。
INITIAL_EXTENSIONS = [
'cogs.eval',
'cogs.glchat',
'cogs.gladd',
'cogs.gldel'
]
# クラスの定義。ClientのサブクラスであるBotクラスを継承。
class MyBot(commands.Bot):
# MyBotのコンストラクタ。
def __init__(self, command_prefix, help_command):
# スーパークラスのコンストラクタに値を渡して実行。
super().__init__(command_prefix,help_command)
# INITIAL_COGSに格納されている名前から、コグを読み込む。
# エラーが発生した場合は、エラー内容を表示。
for cog in INITIAL_EXTENSIONS:
try:
self.load_extension(cog)
except Exception:
traceback.print_exc()
# Botの準備完了時に呼び出されるイベント
async def on_ready(self):
print(self.user.name) # ボットの名前
print(self.user.id) # ボットのID
print(discord.__version__) # discord.pyのバージョン
print('----------------')
print('Hello World !!')
await self.change_presence(status=discord.Status.idle,activity=discord.Game(name=f'Ping:{self.ws.latency * 1000:.0f}ms'))
conn=r.connect()
ky=conn.keys()
global_ch="gloch"
count=0
for i in ky:
i=str(i)
if i == global_ch:
count+=1
if count>0:
smsd=conn.smembers(global_ch)
count=0
for q in smsd:
q=str(q)
if q=="0":
count+=1
if count>0:
p=conn.srem(global_ch,"0")
if p==True:
print("正常起動")
else:
print("異常発生")
else:
print(ky)
else:
p=conn.sadd(global_ch,"0")
if p==True:
print("正常起動")
else:
print("異常発生")
class JapaneseHelpCommand(commands.DefaultHelpCommand):
def __init__(self):
super().__init__()
self.commands_heading = "コマンド:"
self.no_category = "その他"
self.command_attrs["help"] = "コマンド一覧と簡単な説明を表示"
def get_ending_note(self):
return (f"各コマンドの説明: {prefix}help <コマンド名>\n"
f"各カテゴリの説明: {prefix}help <カテゴリ名>\n")
#MyBotのインスタンス化及び起動処理。
if __name__ == '__main__':
bot = MyBot(command_prefix=prefix,help_command=JapaneseHelpCommand()) # command_prefixはコマンドの最初の文字として使うもの。 e.g. !ping
bot.run(TOKEN) # Botのトークン
| ja | 0.999833 | # Bot Commands Frameworkをインポート # エラー表示のためにインポート #プレフィックス # 読み込むコグの名前を格納しておく。 # クラスの定義。ClientのサブクラスであるBotクラスを継承。 # MyBotのコンストラクタ。 # スーパークラスのコンストラクタに値を渡して実行。 # INITIAL_COGSに格納されている名前から、コグを読み込む。 # エラーが発生した場合は、エラー内容を表示。 # Botの準備完了時に呼び出されるイベント # ボットの名前 # ボットのID # discord.pyのバージョン #MyBotのインスタンス化及び起動処理。 # command_prefixはコマンドの最初の文字として使うもの。 e.g. !ping # Botのトークン | 2.331722 | 2 |
examples/plot_spectral_unmixing.py | ealopez/pycroscopy | 0 | 291 | <gh_stars>0
"""
=================================================================
Spectral Unmixing
=================================================================
<NAME>, <NAME>, <NAME>
* Institute for Functional Imaging of Materials
* Center for Nanophase Materials Sciences
Oak Ridge National Laboratory, Oak Ridge TN 37831, USA
In this notebook we load some spectral data, and perform basic data analysis, including:
========================================================================================
* KMeans Clustering
* Non-negative Matrix Factorization
* Principal Component Analysis
Software Prerequisites:
=======================
* Standard distribution of **Anaconda** (includes numpy, scipy, matplotlib and sci-kit learn)
* **pycroscopy** : Though pycroscopy is mainly used here for plotting purposes only, it's true capabilities
are realized through the ability to seamlessly perform these analyses on any imaging dataset (regardless
of origin, size, complexity) and storing the results back into the same dataset among other things
"""
# Import packages
# Ensure that this code works on both python 2 and python 3
from __future__ import division, print_function, absolute_import, unicode_literals
# basic numeric computation:
import numpy as np
# The package used for creating and manipulating HDF5 files:
import h5py
# Plotting and visualization:
import matplotlib.pyplot as plt
# for downloading files:
import wget
import os
# multivariate analysis:
from sklearn.cluster import KMeans
from sklearn.decomposition import NMF
import subprocess
import sys
def install(package):
subprocess.call([sys.executable, "-m", "pip", "install", package])
# Package for downloading online files:
# finally import pycroscopy:
try:
import pycroscopy as px
except ImportError:
print('pycroscopy not found. Will install with pip.')
import pip
install('pycroscopy')
import pycroscopy as px
from pycroscopy.viz import cluster_utils
#####################################################################################
# The Data
# ========
#
# In this example, we will work on a **Band Excitation Piezoresponse Force Microscopy (BE-PFM)** imaging dataset
# acquired from advanced atomic force microscopes. In this dataset, a spectra was collected for each position in a two
# dimensional grid of spatial locations. Thus, this is a three dimensional dataset that has been flattened to a two
# dimensional matrix in accordance with the pycroscopy data format.
#
# Fortunately, all statistical analysis, machine learning, spectral unmixing algorithms, etc. only accept data that is
# formatted in the same manner of [position x spectra] in a two dimensional matrix.
#
# We will be using an data file available on our GitHub project page by default. You are encouraged
# to download this document as a Jupyter Notebook (button at the bottom of the page) and use your own data instead.
# When using your own data, you can skip this cell and provide the path to your data using the variable - data_file_path
data_file_path = 'temp_um.h5'
# download the data file from Github:
url = 'https://raw.githubusercontent.com/pycroscopy/pycroscopy/master/data/BELine_0004.h5'
data_file_path = wget.download(url, data_file_path, bar=None)
h5_file = h5py.File(data_file_path, mode='r+')
print('Contents of data file:')
print('----------------------')
px.hdf_utils.print_tree(h5_file)
print('----------------------')
h5_meas_grp = h5_file['Measurement_000']
# Extracting some basic parameters:
num_rows = px.hdf_utils.get_attr(h5_meas_grp, 'grid_num_rows')
num_cols = px.hdf_utils.get_attr(h5_meas_grp, 'grid_num_cols')
# Getting a reference to the main dataset:
h5_main = px.PycroDataset(h5_meas_grp['Channel_000/Raw_Data'])
px.hdf_utils.write_simple_attrs(h5_main, {'quantity': 'Deflection', 'units': 'V'})
# Extracting the X axis - vector of frequencies
h5_spec_vals = px.hdf_utils.get_auxiliary_datasets(h5_main, 'Spectroscopic_Values')[-1]
freq_vec = np.squeeze(h5_spec_vals.value) * 1E-3
print('Data currently of shape:', h5_main.shape)
x_label = 'Frequency (kHz)'
y_label = 'Amplitude (a.u.)'
#####################################################################################
# 1. Singular Value Decomposition (SVD)
# =====================================
#
# SVD is an eigenvector decomposition that is defined statistically, and therefore typically produces
# non-physical eigenvectors. Consequently, the interpretation of eigenvectors and abundance maps from
# SVD requires care and caution in interpretation. Nonetheless, it is a good method for quickly
# visualizing the major trends in the dataset since the resultant eigenvectors are sorted in descending
# order of variance or importance. Furthermore, SVD is also very well suited for data cleaning through
# the reconstruction of the dataset using only the first N (most significant) components.
#
# SVD results in three matrices:
#
# * V - Eigenvectors sorted by variance in descending order
# * U - corresponding abundance maps
# * S - Variance or importance of each of these components
#
# Advantage of pycroscopy:
# ------------------------
# Notice that we are working with a complex valued dataset. Passing the complex values as is to SVD would result in
# complex valued eigenvectors / endmembers as well as abundance maps. Complex valued abundance maps are not physical.
# Thus, one would need to restructure the data such that it is real-valued only.
#
# One solution is to stack the real value followed by the magnitude of the imaginary component before passing to SVD.
# After SVD, the real-valued eigenvectors would need to be treated as the concatenation of the real and imaginary
# components. So, the eigenvectors would need to be restructured to get back the complex valued eigenvectors.
#
# **Pycroscopy handles all these data transformations (both for the source dataset and the eigenvectors)
# automatically.** In general, pycroscopy handles compound / complex valued datasets everywhere possible
#
# Furthermore, while it is not discussed in this example, pycroscopy also writes back the results from SVD back to
# the same source h5 file including all relevant links to the source dataset and other ancillary datasets
decomposer = px.processing.svd_utils.SVD(h5_main, num_components=100)
h5_svd_group = decomposer.compute()
h5_u = h5_svd_group['U']
h5_v = h5_svd_group['V']
h5_s = h5_svd_group['S']
# Since the two spatial dimensions (x, y) have been collapsed to one, we need to reshape the abundance maps:
abun_maps = np.reshape(h5_u[:, :25], (num_rows, num_cols, -1))
px.plot_utils.plot_map_stack(abun_maps, num_comps=9, title='SVD Abundance Maps', reverse_dims=True,
color_bar_mode='single', cmap='inferno', title_yoffset=0.95)
# Visualize the variance / statistical importance of each component:
px.plot_utils.plot_scree(h5_s, title='Note the exponential drop of variance with number of components')
# Visualize the eigenvectors:
_ = px.plot_utils.plot_complex_spectra(h5_v[:9, :], x_label=x_label, y_label=y_label,
title='SVD Eigenvectors', evenly_spaced=False)
#####################################################################################
# 2. KMeans Clustering
# ====================
#
# KMeans clustering is a quick and easy method to determine the types of spectral responses present in the
# data. It is not a decomposition method, but a basic clustering method. The user inputs the number of
# clusters (sets) to partition the data into. The algorithm proceeds to find the optimal labeling
# (ie., assignment of each spectra as belonging to the k<sup>th</sup> set) such that the within-cluster
# sum of squares is minimized.
#
# Set the number of clusters below
num_clusters = 4
estimator = px.processing.Cluster(h5_main, KMeans(n_clusters=num_clusters))
h5_kmeans_grp = estimator.compute(h5_main)
h5_kmeans_labels = h5_kmeans_grp['Labels']
h5_kmeans_mean_resp = h5_kmeans_grp['Mean_Response']
cluster_utils.plot_cluster_h5_group(h5_kmeans_grp)
#####################################################################################
# 3. Non-negative Matrix Factorization (NMF)
# ===========================================
#
# NMF, or non-negative matrix factorization, is a method that is useful towards unmixing of spectral
# data. It only works on data with positive real values. It operates by approximate determination of
# factors (matrices) W and H, given a matrix V, as shown below
#
# .. image:: https://upload.wikimedia.org/wikipedia/commons/f/f9/NMF.png
#
# Unlike SVD and k-Means that can be applied to complex-valued datasets, NMF only works on non-negative datasets.
# For illustrative purposes, we will only take the amplitude component of the spectral data
num_comps = 4
# get the non-negative portion of the dataset
data_mat = np.abs(h5_main)
model = NMF(n_components=num_comps, init='random', random_state=0)
model.fit(data_mat)
fig, axis = plt.subplots(figsize=(5.5, 5))
px.plot_utils.plot_line_family(axis, freq_vec, model.components_, label_prefix='NMF Component #')
axis.set_xlabel(x_label, fontsize=12)
axis.set_ylabel(y_label, fontsize=12)
axis.set_title('NMF Components', fontsize=14)
axis.legend(bbox_to_anchor=[1.0, 1.0], fontsize=12)
#####################################################################################
# Close and delete the h5_file
h5_file.close()
os.remove(data_file_path)
| """
=================================================================
Spectral Unmixing
=================================================================
<NAME>, <NAME>, <NAME>
* Institute for Functional Imaging of Materials
* Center for Nanophase Materials Sciences
Oak Ridge National Laboratory, Oak Ridge TN 37831, USA
In this notebook we load some spectral data, and perform basic data analysis, including:
========================================================================================
* KMeans Clustering
* Non-negative Matrix Factorization
* Principal Component Analysis
Software Prerequisites:
=======================
* Standard distribution of **Anaconda** (includes numpy, scipy, matplotlib and sci-kit learn)
* **pycroscopy** : Though pycroscopy is mainly used here for plotting purposes only, it's true capabilities
are realized through the ability to seamlessly perform these analyses on any imaging dataset (regardless
of origin, size, complexity) and storing the results back into the same dataset among other things
"""
# Import packages
# Ensure that this code works on both python 2 and python 3
from __future__ import division, print_function, absolute_import, unicode_literals
# basic numeric computation:
import numpy as np
# The package used for creating and manipulating HDF5 files:
import h5py
# Plotting and visualization:
import matplotlib.pyplot as plt
# for downloading files:
import wget
import os
# multivariate analysis:
from sklearn.cluster import KMeans
from sklearn.decomposition import NMF
import subprocess
import sys
def install(package):
subprocess.call([sys.executable, "-m", "pip", "install", package])
# Package for downloading online files:
# finally import pycroscopy:
try:
import pycroscopy as px
except ImportError:
print('pycroscopy not found. Will install with pip.')
import pip
install('pycroscopy')
import pycroscopy as px
from pycroscopy.viz import cluster_utils
#####################################################################################
# The Data
# ========
#
# In this example, we will work on a **Band Excitation Piezoresponse Force Microscopy (BE-PFM)** imaging dataset
# acquired from advanced atomic force microscopes. In this dataset, a spectra was collected for each position in a two
# dimensional grid of spatial locations. Thus, this is a three dimensional dataset that has been flattened to a two
# dimensional matrix in accordance with the pycroscopy data format.
#
# Fortunately, all statistical analysis, machine learning, spectral unmixing algorithms, etc. only accept data that is
# formatted in the same manner of [position x spectra] in a two dimensional matrix.
#
# We will be using an data file available on our GitHub project page by default. You are encouraged
# to download this document as a Jupyter Notebook (button at the bottom of the page) and use your own data instead.
# When using your own data, you can skip this cell and provide the path to your data using the variable - data_file_path
data_file_path = 'temp_um.h5'
# download the data file from Github:
url = 'https://raw.githubusercontent.com/pycroscopy/pycroscopy/master/data/BELine_0004.h5'
data_file_path = wget.download(url, data_file_path, bar=None)
h5_file = h5py.File(data_file_path, mode='r+')
print('Contents of data file:')
print('----------------------')
px.hdf_utils.print_tree(h5_file)
print('----------------------')
h5_meas_grp = h5_file['Measurement_000']
# Extracting some basic parameters:
num_rows = px.hdf_utils.get_attr(h5_meas_grp, 'grid_num_rows')
num_cols = px.hdf_utils.get_attr(h5_meas_grp, 'grid_num_cols')
# Getting a reference to the main dataset:
h5_main = px.PycroDataset(h5_meas_grp['Channel_000/Raw_Data'])
px.hdf_utils.write_simple_attrs(h5_main, {'quantity': 'Deflection', 'units': 'V'})
# Extracting the X axis - vector of frequencies
h5_spec_vals = px.hdf_utils.get_auxiliary_datasets(h5_main, 'Spectroscopic_Values')[-1]
freq_vec = np.squeeze(h5_spec_vals.value) * 1E-3
print('Data currently of shape:', h5_main.shape)
x_label = 'Frequency (kHz)'
y_label = 'Amplitude (a.u.)'
#####################################################################################
# 1. Singular Value Decomposition (SVD)
# =====================================
#
# SVD is an eigenvector decomposition that is defined statistically, and therefore typically produces
# non-physical eigenvectors. Consequently, the interpretation of eigenvectors and abundance maps from
# SVD requires care and caution in interpretation. Nonetheless, it is a good method for quickly
# visualizing the major trends in the dataset since the resultant eigenvectors are sorted in descending
# order of variance or importance. Furthermore, SVD is also very well suited for data cleaning through
# the reconstruction of the dataset using only the first N (most significant) components.
#
# SVD results in three matrices:
#
# * V - Eigenvectors sorted by variance in descending order
# * U - corresponding abundance maps
# * S - Variance or importance of each of these components
#
# Advantage of pycroscopy:
# ------------------------
# Notice that we are working with a complex valued dataset. Passing the complex values as is to SVD would result in
# complex valued eigenvectors / endmembers as well as abundance maps. Complex valued abundance maps are not physical.
# Thus, one would need to restructure the data such that it is real-valued only.
#
# One solution is to stack the real value followed by the magnitude of the imaginary component before passing to SVD.
# After SVD, the real-valued eigenvectors would need to be treated as the concatenation of the real and imaginary
# components. So, the eigenvectors would need to be restructured to get back the complex valued eigenvectors.
#
# **Pycroscopy handles all these data transformations (both for the source dataset and the eigenvectors)
# automatically.** In general, pycroscopy handles compound / complex valued datasets everywhere possible
#
# Furthermore, while it is not discussed in this example, pycroscopy also writes back the results from SVD back to
# the same source h5 file including all relevant links to the source dataset and other ancillary datasets
decomposer = px.processing.svd_utils.SVD(h5_main, num_components=100)
h5_svd_group = decomposer.compute()
h5_u = h5_svd_group['U']
h5_v = h5_svd_group['V']
h5_s = h5_svd_group['S']
# Since the two spatial dimensions (x, y) have been collapsed to one, we need to reshape the abundance maps:
abun_maps = np.reshape(h5_u[:, :25], (num_rows, num_cols, -1))
px.plot_utils.plot_map_stack(abun_maps, num_comps=9, title='SVD Abundance Maps', reverse_dims=True,
color_bar_mode='single', cmap='inferno', title_yoffset=0.95)
# Visualize the variance / statistical importance of each component:
px.plot_utils.plot_scree(h5_s, title='Note the exponential drop of variance with number of components')
# Visualize the eigenvectors:
_ = px.plot_utils.plot_complex_spectra(h5_v[:9, :], x_label=x_label, y_label=y_label,
title='SVD Eigenvectors', evenly_spaced=False)
#####################################################################################
# 2. KMeans Clustering
# ====================
#
# KMeans clustering is a quick and easy method to determine the types of spectral responses present in the
# data. It is not a decomposition method, but a basic clustering method. The user inputs the number of
# clusters (sets) to partition the data into. The algorithm proceeds to find the optimal labeling
# (ie., assignment of each spectra as belonging to the k<sup>th</sup> set) such that the within-cluster
# sum of squares is minimized.
#
# Set the number of clusters below
num_clusters = 4
estimator = px.processing.Cluster(h5_main, KMeans(n_clusters=num_clusters))
h5_kmeans_grp = estimator.compute(h5_main)
h5_kmeans_labels = h5_kmeans_grp['Labels']
h5_kmeans_mean_resp = h5_kmeans_grp['Mean_Response']
cluster_utils.plot_cluster_h5_group(h5_kmeans_grp)
#####################################################################################
# 3. Non-negative Matrix Factorization (NMF)
# ===========================================
#
# NMF, or non-negative matrix factorization, is a method that is useful towards unmixing of spectral
# data. It only works on data with positive real values. It operates by approximate determination of
# factors (matrices) W and H, given a matrix V, as shown below
#
# .. image:: https://upload.wikimedia.org/wikipedia/commons/f/f9/NMF.png
#
# Unlike SVD and k-Means that can be applied to complex-valued datasets, NMF only works on non-negative datasets.
# For illustrative purposes, we will only take the amplitude component of the spectral data
num_comps = 4
# get the non-negative portion of the dataset
data_mat = np.abs(h5_main)
model = NMF(n_components=num_comps, init='random', random_state=0)
model.fit(data_mat)
fig, axis = plt.subplots(figsize=(5.5, 5))
px.plot_utils.plot_line_family(axis, freq_vec, model.components_, label_prefix='NMF Component #')
axis.set_xlabel(x_label, fontsize=12)
axis.set_ylabel(y_label, fontsize=12)
axis.set_title('NMF Components', fontsize=14)
axis.legend(bbox_to_anchor=[1.0, 1.0], fontsize=12)
#####################################################################################
# Close and delete the h5_file
h5_file.close()
os.remove(data_file_path) | en | 0.805627 | ================================================================= Spectral Unmixing ================================================================= <NAME>, <NAME>, <NAME> * Institute for Functional Imaging of Materials * Center for Nanophase Materials Sciences Oak Ridge National Laboratory, Oak Ridge TN 37831, USA In this notebook we load some spectral data, and perform basic data analysis, including: ======================================================================================== * KMeans Clustering * Non-negative Matrix Factorization * Principal Component Analysis Software Prerequisites: ======================= * Standard distribution of **Anaconda** (includes numpy, scipy, matplotlib and sci-kit learn) * **pycroscopy** : Though pycroscopy is mainly used here for plotting purposes only, it's true capabilities are realized through the ability to seamlessly perform these analyses on any imaging dataset (regardless of origin, size, complexity) and storing the results back into the same dataset among other things # Import packages # Ensure that this code works on both python 2 and python 3 # basic numeric computation: # The package used for creating and manipulating HDF5 files: # Plotting and visualization: # for downloading files: # multivariate analysis: # Package for downloading online files: # finally import pycroscopy: ##################################################################################### # The Data # ======== # # In this example, we will work on a **Band Excitation Piezoresponse Force Microscopy (BE-PFM)** imaging dataset # acquired from advanced atomic force microscopes. In this dataset, a spectra was collected for each position in a two # dimensional grid of spatial locations. Thus, this is a three dimensional dataset that has been flattened to a two # dimensional matrix in accordance with the pycroscopy data format. # # Fortunately, all statistical analysis, machine learning, spectral unmixing algorithms, etc. only accept data that is # formatted in the same manner of [position x spectra] in a two dimensional matrix. # # We will be using an data file available on our GitHub project page by default. You are encouraged # to download this document as a Jupyter Notebook (button at the bottom of the page) and use your own data instead. # When using your own data, you can skip this cell and provide the path to your data using the variable - data_file_path # download the data file from Github: # Extracting some basic parameters: # Getting a reference to the main dataset: # Extracting the X axis - vector of frequencies ##################################################################################### # 1. Singular Value Decomposition (SVD) # ===================================== # # SVD is an eigenvector decomposition that is defined statistically, and therefore typically produces # non-physical eigenvectors. Consequently, the interpretation of eigenvectors and abundance maps from # SVD requires care and caution in interpretation. Nonetheless, it is a good method for quickly # visualizing the major trends in the dataset since the resultant eigenvectors are sorted in descending # order of variance or importance. Furthermore, SVD is also very well suited for data cleaning through # the reconstruction of the dataset using only the first N (most significant) components. # # SVD results in three matrices: # # * V - Eigenvectors sorted by variance in descending order # * U - corresponding abundance maps # * S - Variance or importance of each of these components # # Advantage of pycroscopy: # ------------------------ # Notice that we are working with a complex valued dataset. Passing the complex values as is to SVD would result in # complex valued eigenvectors / endmembers as well as abundance maps. Complex valued abundance maps are not physical. # Thus, one would need to restructure the data such that it is real-valued only. # # One solution is to stack the real value followed by the magnitude of the imaginary component before passing to SVD. # After SVD, the real-valued eigenvectors would need to be treated as the concatenation of the real and imaginary # components. So, the eigenvectors would need to be restructured to get back the complex valued eigenvectors. # # **Pycroscopy handles all these data transformations (both for the source dataset and the eigenvectors) # automatically.** In general, pycroscopy handles compound / complex valued datasets everywhere possible # # Furthermore, while it is not discussed in this example, pycroscopy also writes back the results from SVD back to # the same source h5 file including all relevant links to the source dataset and other ancillary datasets # Since the two spatial dimensions (x, y) have been collapsed to one, we need to reshape the abundance maps: # Visualize the variance / statistical importance of each component: # Visualize the eigenvectors: ##################################################################################### # 2. KMeans Clustering # ==================== # # KMeans clustering is a quick and easy method to determine the types of spectral responses present in the # data. It is not a decomposition method, but a basic clustering method. The user inputs the number of # clusters (sets) to partition the data into. The algorithm proceeds to find the optimal labeling # (ie., assignment of each spectra as belonging to the k<sup>th</sup> set) such that the within-cluster # sum of squares is minimized. # # Set the number of clusters below ##################################################################################### # 3. Non-negative Matrix Factorization (NMF) # =========================================== # # NMF, or non-negative matrix factorization, is a method that is useful towards unmixing of spectral # data. It only works on data with positive real values. It operates by approximate determination of # factors (matrices) W and H, given a matrix V, as shown below # # .. image:: https://upload.wikimedia.org/wikipedia/commons/f/f9/NMF.png # # Unlike SVD and k-Means that can be applied to complex-valued datasets, NMF only works on non-negative datasets. # For illustrative purposes, we will only take the amplitude component of the spectral data # get the non-negative portion of the dataset #') ##################################################################################### # Close and delete the h5_file | 2.681486 | 3 |
test/test_downloadfile.py | foliant-docs/foliantcontrib.downloadfile | 0 | 292 | import shutil
from pathlib import Path
from unittest import TestCase
from unittest.mock import Mock
from unittest.mock import patch
from foliant.config.downloadfile import download_file
from foliant.config.downloadfile import get_file_ext_from_url
from foliant.config.downloadfile import get_file_name_from_url
class TestDownloadFile(TestCase):
def setUp(self):
self.project_dir = (Path(__file__).parent / 'project_dir').resolve()
self.project_dir.mkdir(exist_ok=True)
def tearDown(self):
shutil.rmtree(self.project_dir, ignore_errors=True)
@patch('foliant.config.downloadfile.urlopen', autospec=True)
def test_only_url(self, urlopen):
mock_response = Mock()
mock_response.read.return_value = b'File content'
urlopen.return_value = mock_response
url = 'http://example.com/myfile.txt'
download_file(root_dir=self.project_dir, url=url)
request = urlopen.call_args.args[0]
context = urlopen.call_args.kwargs['context']
self.assertEqual(request.headers, {})
self.assertIsNone(context)
with open(self.project_dir / 'myfile.txt') as f:
self.assertEqual(f.read(), 'File content')
@patch('foliant.config.downloadfile.urlopen', autospec=True)
def test_save_to(self, urlopen):
mock_response = Mock()
mock_response.read.return_value = b'File content'
urlopen.return_value = mock_response
url = 'http://example.com/myfile.txt'
save_to = 'subdir1/subdir2/downloaded.txt'
download_file(root_dir=self.project_dir, url=url, save_to=save_to)
request = urlopen.call_args.args[0]
context = urlopen.call_args.kwargs['context']
self.assertEqual(request.headers, {})
self.assertIsNone(context)
with open(self.project_dir / save_to) as f:
self.assertEqual(f.read(), 'File content')
@patch('foliant.config.downloadfile.urlopen', autospec=True)
def test_with_auth(self, urlopen):
mock_response = Mock()
mock_response.read.return_value = b'File content'
urlopen.return_value = mock_response
url = 'http://example.com/myfile.txt'
download_file(
root_dir=self.project_dir,
url=url,
login='john',
password='<PASSWORD>'
)
request = urlopen.call_args.args[0]
context = urlopen.call_args.kwargs['context']
self.assertIn('Authorization', request.headers)
self.assertIsNone(context)
with open(self.project_dir / 'myfile.txt') as f:
self.assertEqual(f.read(), 'File content')
class TestGetFileNameFromURL(TestCase):
def test_with_ext(self):
url = 'http://example.com/sub/myfile.txt'
name = get_file_name_from_url(url)
self.assertEqual(name, 'myfile.txt')
def test_no_ext(self):
url = 'http://example.com/sub/myfile'
name = get_file_name_from_url(url)
self.assertEqual(name, 'myfile')
def test_with_clutter(self):
url = 'http://example.com/sub/myfile.txt?param=val&foo=bar'
name = get_file_name_from_url(url)
self.assertEqual(name, 'myfile.txt')
class TestGetFileExtFromURL(TestCase):
def test_with_ext(self):
url = 'http://example.com/sub/myfile.txt'
ext = get_file_ext_from_url(url)
self.assertEqual(ext, '.txt')
def test_no_ext(self):
url = 'http://example.com/sub/myfile'
ext = get_file_ext_from_url(url)
self.assertEqual(ext, '')
def test_with_clutter(self):
url = 'http://example.com/sub/myfile.txt?param=val&foo=bar'
ext = get_file_ext_from_url(url)
self.assertEqual(ext, '.txt')
| import shutil
from pathlib import Path
from unittest import TestCase
from unittest.mock import Mock
from unittest.mock import patch
from foliant.config.downloadfile import download_file
from foliant.config.downloadfile import get_file_ext_from_url
from foliant.config.downloadfile import get_file_name_from_url
class TestDownloadFile(TestCase):
def setUp(self):
self.project_dir = (Path(__file__).parent / 'project_dir').resolve()
self.project_dir.mkdir(exist_ok=True)
def tearDown(self):
shutil.rmtree(self.project_dir, ignore_errors=True)
@patch('foliant.config.downloadfile.urlopen', autospec=True)
def test_only_url(self, urlopen):
mock_response = Mock()
mock_response.read.return_value = b'File content'
urlopen.return_value = mock_response
url = 'http://example.com/myfile.txt'
download_file(root_dir=self.project_dir, url=url)
request = urlopen.call_args.args[0]
context = urlopen.call_args.kwargs['context']
self.assertEqual(request.headers, {})
self.assertIsNone(context)
with open(self.project_dir / 'myfile.txt') as f:
self.assertEqual(f.read(), 'File content')
@patch('foliant.config.downloadfile.urlopen', autospec=True)
def test_save_to(self, urlopen):
mock_response = Mock()
mock_response.read.return_value = b'File content'
urlopen.return_value = mock_response
url = 'http://example.com/myfile.txt'
save_to = 'subdir1/subdir2/downloaded.txt'
download_file(root_dir=self.project_dir, url=url, save_to=save_to)
request = urlopen.call_args.args[0]
context = urlopen.call_args.kwargs['context']
self.assertEqual(request.headers, {})
self.assertIsNone(context)
with open(self.project_dir / save_to) as f:
self.assertEqual(f.read(), 'File content')
@patch('foliant.config.downloadfile.urlopen', autospec=True)
def test_with_auth(self, urlopen):
mock_response = Mock()
mock_response.read.return_value = b'File content'
urlopen.return_value = mock_response
url = 'http://example.com/myfile.txt'
download_file(
root_dir=self.project_dir,
url=url,
login='john',
password='<PASSWORD>'
)
request = urlopen.call_args.args[0]
context = urlopen.call_args.kwargs['context']
self.assertIn('Authorization', request.headers)
self.assertIsNone(context)
with open(self.project_dir / 'myfile.txt') as f:
self.assertEqual(f.read(), 'File content')
class TestGetFileNameFromURL(TestCase):
def test_with_ext(self):
url = 'http://example.com/sub/myfile.txt'
name = get_file_name_from_url(url)
self.assertEqual(name, 'myfile.txt')
def test_no_ext(self):
url = 'http://example.com/sub/myfile'
name = get_file_name_from_url(url)
self.assertEqual(name, 'myfile')
def test_with_clutter(self):
url = 'http://example.com/sub/myfile.txt?param=val&foo=bar'
name = get_file_name_from_url(url)
self.assertEqual(name, 'myfile.txt')
class TestGetFileExtFromURL(TestCase):
def test_with_ext(self):
url = 'http://example.com/sub/myfile.txt'
ext = get_file_ext_from_url(url)
self.assertEqual(ext, '.txt')
def test_no_ext(self):
url = 'http://example.com/sub/myfile'
ext = get_file_ext_from_url(url)
self.assertEqual(ext, '')
def test_with_clutter(self):
url = 'http://example.com/sub/myfile.txt?param=val&foo=bar'
ext = get_file_ext_from_url(url)
self.assertEqual(ext, '.txt')
| none | 1 | 2.696233 | 3 |
|
question.py | Lilium765/momoko | 0 | 293 | <gh_stars>0
import discord
client = discord.Client() # 接続に使用するオブジェクト
# 起動時
@client.event
async def on_ready():
print('ログイン成功')
# メッセージを監視
@client.event
async def on_message(message):
# 「/box」が頭についたメッセージならオウム返しする
if message.content.startswith('/box'):
# 文字から「/box」を抜く
question = message.content[len('/box'):].strip()
# 質問させたいチャンネルのid
target_channel_id = getTargetChannelId()
# id=0なら質問者にエラー報告DM
# idが0以外なら匿名質問する
if target_channel_id == 0:
dm = await message.author.create_dm() # 質問者へDM作成
await dm.send(
'Sorry, メッセージを送信できませんでした.'
'もう1度試してみてください.\n'
'【質問文】' + question)
else:
# 匿名質問させたいチャンネル
target_channel = client.get_channel(target_channel_id)
# チャンネルに質問メッセージ送信
await target_channel.send(question)
# 匿名質問させたいチャンネルのidを取得
# 指定したカテゴリにある最初のTextチャンネル=質問させたいチャンネルとみなす
# ただしカテゴリにチャンネルが無い時は0を返す
def getTargetChannelId() -> int:
# 質問させたいチャンネル(対象チャンネル)
target_channel = {'id': 0, 'position': 99999999}
# ***********************************************************
# 指定カテゴリ(対象チャンネルが含まれたカテゴリ)の名前
category_id = 711238137598181396 # カテゴリidを指定
target_category_name = client.get_channel(category_id).name
# ***********************************************************
# 指定したサーバにある全てのTextチャンネル一覧
all_channels = client.get_guild(602423784946925568).text_channels
# 全てのTextチャンネルから「指定カテゴリに属する最初のチャンネル」を探す
for channel in all_channels:
# 指定カテゴリに属する場合だけ対象チャンネル候補とみなす
if str(channel.category) == target_category_name:
# positionが小さいほうを「より対象チャンネルに近い」として交換
# 初期値はpositionが大きい(99999999)ので,必ず入れ替わる想定
# 繰り返せば,最後にはpositionが最も小さいチャンネルを代入できる
if target_channel['position'] > int(channel.position):
target_channel['id'] = int(channel.id)
target_channel['position'] = int(channel.position)
# 最終的に代入されたidを返す
return target_channel['id']
# botとしてDiscordに接続(botのトークンを指定)
client.run('605042341715378176')
| import discord
client = discord.Client() # 接続に使用するオブジェクト
# 起動時
@client.event
async def on_ready():
print('ログイン成功')
# メッセージを監視
@client.event
async def on_message(message):
# 「/box」が頭についたメッセージならオウム返しする
if message.content.startswith('/box'):
# 文字から「/box」を抜く
question = message.content[len('/box'):].strip()
# 質問させたいチャンネルのid
target_channel_id = getTargetChannelId()
# id=0なら質問者にエラー報告DM
# idが0以外なら匿名質問する
if target_channel_id == 0:
dm = await message.author.create_dm() # 質問者へDM作成
await dm.send(
'Sorry, メッセージを送信できませんでした.'
'もう1度試してみてください.\n'
'【質問文】' + question)
else:
# 匿名質問させたいチャンネル
target_channel = client.get_channel(target_channel_id)
# チャンネルに質問メッセージ送信
await target_channel.send(question)
# 匿名質問させたいチャンネルのidを取得
# 指定したカテゴリにある最初のTextチャンネル=質問させたいチャンネルとみなす
# ただしカテゴリにチャンネルが無い時は0を返す
def getTargetChannelId() -> int:
# 質問させたいチャンネル(対象チャンネル)
target_channel = {'id': 0, 'position': 99999999}
# ***********************************************************
# 指定カテゴリ(対象チャンネルが含まれたカテゴリ)の名前
category_id = 711238137598181396 # カテゴリidを指定
target_category_name = client.get_channel(category_id).name
# ***********************************************************
# 指定したサーバにある全てのTextチャンネル一覧
all_channels = client.get_guild(602423784946925568).text_channels
# 全てのTextチャンネルから「指定カテゴリに属する最初のチャンネル」を探す
for channel in all_channels:
# 指定カテゴリに属する場合だけ対象チャンネル候補とみなす
if str(channel.category) == target_category_name:
# positionが小さいほうを「より対象チャンネルに近い」として交換
# 初期値はpositionが大きい(99999999)ので,必ず入れ替わる想定
# 繰り返せば,最後にはpositionが最も小さいチャンネルを代入できる
if target_channel['position'] > int(channel.position):
target_channel['id'] = int(channel.id)
target_channel['position'] = int(channel.position)
# 最終的に代入されたidを返す
return target_channel['id']
# botとしてDiscordに接続(botのトークンを指定)
client.run('605042341715378176') | ja | 0.998062 | # 接続に使用するオブジェクト # 起動時 # メッセージを監視 # 「/box」が頭についたメッセージならオウム返しする # 文字から「/box」を抜く # 質問させたいチャンネルのid # id=0なら質問者にエラー報告DM # idが0以外なら匿名質問する # 質問者へDM作成 # 匿名質問させたいチャンネル # チャンネルに質問メッセージ送信 # 匿名質問させたいチャンネルのidを取得 # 指定したカテゴリにある最初のTextチャンネル=質問させたいチャンネルとみなす # ただしカテゴリにチャンネルが無い時は0を返す # 質問させたいチャンネル(対象チャンネル) # *********************************************************** # 指定カテゴリ(対象チャンネルが含まれたカテゴリ)の名前 # カテゴリidを指定 # *********************************************************** # 指定したサーバにある全てのTextチャンネル一覧 # 全てのTextチャンネルから「指定カテゴリに属する最初のチャンネル」を探す # 指定カテゴリに属する場合だけ対象チャンネル候補とみなす # positionが小さいほうを「より対象チャンネルに近い」として交換 # 初期値はpositionが大きい(99999999)ので,必ず入れ替わる想定 # 繰り返せば,最後にはpositionが最も小さいチャンネルを代入できる # 最終的に代入されたidを返す # botとしてDiscordに接続(botのトークンを指定) | 2.669111 | 3 |
figure_code/rate_of_change_tc.py | DavisWeaver/fears | 0 | 294 | import matplotlib.pyplot as plt
import numpy as np
from fears.utils import results_manager, plotter, dir_manager
import os
suffix = '07212021_0001'
data_folder = 'results_' + suffix
exp_info_file = 'experiment_info_' + suffix + '.p'
exp_folders,exp_info = results_manager.get_experiment_results(data_folder,
exp_info_file)
max_cells = exp_info.populations[0].max_cells
n_sims = exp_info.n_sims
k_abs = exp_info.slopes
exp_folders.reverse()
k_abs = np.flip(k_abs)
fig,ax = plt.subplots(nrows=2,ncols=2,figsize=(4,4))
pop = exp_info.populations[0]
ax = ax.reshape((len(k_abs),))
axnum = 0
tc_axes=[]
drug_axes=[]
for exp in exp_folders:
k_abs_t = exp[exp.find('=')+1:]
k_abs_t = float(k_abs_t)
num = np.argwhere(k_abs == k_abs_t)
num = num[0,0]
# generate timecourse axes
tcax = ax[axnum]
# da = tcax.twinx()
sim_files = os.listdir(path=exp)
sim_files = sorted(sim_files)
survive_count = 0
counts_total = None
k=0
while k < len(sim_files):
# for sim in sim_files:
sim = sim_files[k]
sim = exp + os.sep + sim
data = results_manager.get_data(sim)
dc = data[:,-1]
data = data[:,0:-1]
# data = data/np.max(data)
data_t = data[-1,:]
# check to see if any genotypes are at least 10% of the max cell count
if any(data_t >= 1):
survive_count += 1
if counts_total is None:
counts_total = data
else:
counts_total += data
# data = data/np.max(data)
# exp_info.populations[num].counts_log_scale = True
data = data/max_cells
if k==0:
drug_kwargs = {'alpha':0.7,
'color':'black',
'linewidth':2,
'label':'Drug Concentration ($\u03BC$M)'
}
tcax,drug_ax = plotter.plot_timecourse_to_axes(exp_info.populations[num],
data,
tcax,
drug_curve=dc,
drug_ax_sci_notation=True,
drug_kwargs=drug_kwargs,
legend_labels=False,
grayscale=True,
color='gray',
linewidth=1,
labelsize=12,
alpha=0.7
)
drug_ax.set_ylabel('')
drug_axes.append( drug_ax )
else:
tcax,da = plotter.plot_timecourse_to_axes(exp_info.populations[num],
data,
tcax,
grayscale=True,
color='gray',
legend_labels=False,
linewidth=2,
labelsize=12,
alpha=0.2
)
# drug_ax.set_ylim(0,10**4)
k+=1
if survive_count > 0:
counts_avg = counts_total/survive_count
# counts_avg = counts_avg/np.max(counts_avg)
# counts_avg = counts_total
counts_avg = counts_avg/np.max(counts_avg)
tcax,temp = plotter.plot_timecourse_to_axes(exp_info.populations[num],
counts_avg,
tcax,
labelsize=12)
# t = np.arange(len(dc))
# t = t*exp_info.populations[0].timestep_scale/24
# da.plot(t,dc)
tc_axes.append( tcax )
axnum+=1 | import matplotlib.pyplot as plt
import numpy as np
from fears.utils import results_manager, plotter, dir_manager
import os
suffix = '07212021_0001'
data_folder = 'results_' + suffix
exp_info_file = 'experiment_info_' + suffix + '.p'
exp_folders,exp_info = results_manager.get_experiment_results(data_folder,
exp_info_file)
max_cells = exp_info.populations[0].max_cells
n_sims = exp_info.n_sims
k_abs = exp_info.slopes
exp_folders.reverse()
k_abs = np.flip(k_abs)
fig,ax = plt.subplots(nrows=2,ncols=2,figsize=(4,4))
pop = exp_info.populations[0]
ax = ax.reshape((len(k_abs),))
axnum = 0
tc_axes=[]
drug_axes=[]
for exp in exp_folders:
k_abs_t = exp[exp.find('=')+1:]
k_abs_t = float(k_abs_t)
num = np.argwhere(k_abs == k_abs_t)
num = num[0,0]
# generate timecourse axes
tcax = ax[axnum]
# da = tcax.twinx()
sim_files = os.listdir(path=exp)
sim_files = sorted(sim_files)
survive_count = 0
counts_total = None
k=0
while k < len(sim_files):
# for sim in sim_files:
sim = sim_files[k]
sim = exp + os.sep + sim
data = results_manager.get_data(sim)
dc = data[:,-1]
data = data[:,0:-1]
# data = data/np.max(data)
data_t = data[-1,:]
# check to see if any genotypes are at least 10% of the max cell count
if any(data_t >= 1):
survive_count += 1
if counts_total is None:
counts_total = data
else:
counts_total += data
# data = data/np.max(data)
# exp_info.populations[num].counts_log_scale = True
data = data/max_cells
if k==0:
drug_kwargs = {'alpha':0.7,
'color':'black',
'linewidth':2,
'label':'Drug Concentration ($\u03BC$M)'
}
tcax,drug_ax = plotter.plot_timecourse_to_axes(exp_info.populations[num],
data,
tcax,
drug_curve=dc,
drug_ax_sci_notation=True,
drug_kwargs=drug_kwargs,
legend_labels=False,
grayscale=True,
color='gray',
linewidth=1,
labelsize=12,
alpha=0.7
)
drug_ax.set_ylabel('')
drug_axes.append( drug_ax )
else:
tcax,da = plotter.plot_timecourse_to_axes(exp_info.populations[num],
data,
tcax,
grayscale=True,
color='gray',
legend_labels=False,
linewidth=2,
labelsize=12,
alpha=0.2
)
# drug_ax.set_ylim(0,10**4)
k+=1
if survive_count > 0:
counts_avg = counts_total/survive_count
# counts_avg = counts_avg/np.max(counts_avg)
# counts_avg = counts_total
counts_avg = counts_avg/np.max(counts_avg)
tcax,temp = plotter.plot_timecourse_to_axes(exp_info.populations[num],
counts_avg,
tcax,
labelsize=12)
# t = np.arange(len(dc))
# t = t*exp_info.populations[0].timestep_scale/24
# da.plot(t,dc)
tc_axes.append( tcax )
axnum+=1 | en | 0.497776 | # generate timecourse axes # da = tcax.twinx() # for sim in sim_files: # data = data/np.max(data) # check to see if any genotypes are at least 10% of the max cell count # data = data/np.max(data) # exp_info.populations[num].counts_log_scale = True # drug_ax.set_ylim(0,10**4) # counts_avg = counts_avg/np.max(counts_avg) # counts_avg = counts_total # t = np.arange(len(dc)) # t = t*exp_info.populations[0].timestep_scale/24 # da.plot(t,dc) | 2.192819 | 2 |
rpython/annotator/annrpython.py | microvm/pypy-mu | 0 | 295 | <filename>rpython/annotator/annrpython.py
from __future__ import absolute_import
import types
from collections import defaultdict
from rpython.tool.ansi_print import AnsiLogger
from rpython.tool.pairtype import pair
from rpython.tool.error import (format_blocked_annotation_error,
gather_error, source_lines)
from rpython.flowspace.model import Variable, Constant, checkgraph
from rpython.translator import simplify, transform
from rpython.annotator import model as annmodel, signature
from rpython.annotator.model import (
typeof, s_ImpossibleValue, SomeInstance, intersection, difference)
from rpython.annotator.bookkeeper import Bookkeeper
from rpython.rtyper.normalizecalls import perform_normalizations
log = AnsiLogger("annrpython")
class RPythonAnnotator(object):
"""Block annotator for RPython.
See description in doc/translation.txt."""
def __init__(self, translator=None, policy=None, bookkeeper=None):
import rpython.rtyper.extfuncregistry # has side effects
if translator is None:
# interface for tests
from rpython.translator.translator import TranslationContext
translator = TranslationContext()
translator.annotator = self
self.translator = translator
self.pendingblocks = {} # map {block: graph-containing-it}
self.annotated = {} # set of blocks already seen
self.added_blocks = None # see processblock() below
self.links_followed = {} # set of links that have ever been followed
self.notify = {} # {block: {positions-to-reflow-from-when-done}}
self.fixed_graphs = {} # set of graphs not to annotate again
self.blocked_blocks = {} # set of {blocked_block: (graph, index)}
# --- the following information is recorded for debugging ---
self.blocked_graphs = {} # set of graphs that have blocked blocks
# --- end of debugging information ---
self.frozen = False
if policy is None:
from rpython.annotator.policy import AnnotatorPolicy
self.policy = AnnotatorPolicy()
else:
self.policy = policy
if bookkeeper is None:
bookkeeper = Bookkeeper(self)
self.bookkeeper = bookkeeper
def __getstate__(self):
attrs = """translator pendingblocks annotated links_followed
notify bookkeeper frozen policy added_blocks""".split()
ret = self.__dict__.copy()
for key, value in ret.items():
if key not in attrs:
assert type(value) is dict, (
"%r is not dict. please update %s.__getstate__" %
(key, self.__class__.__name__))
ret[key] = {}
return ret
#___ convenience high-level interface __________________
def build_types(self, function, input_arg_types, complete_now=True,
main_entry_point=False):
"""Recursively build annotations about the specific entry point."""
assert isinstance(function, types.FunctionType), "fix that!"
from rpython.annotator.policy import AnnotatorPolicy
policy = AnnotatorPolicy()
# make input arguments and set their type
args_s = [self.typeannotation(t) for t in input_arg_types]
# XXX hack
annmodel.TLS.check_str_without_nul = (
self.translator.config.translation.check_str_without_nul)
flowgraph, inputs_s = self.get_call_parameters(function, args_s, policy)
if main_entry_point:
self.translator.entry_point_graph = flowgraph
return self.build_graph_types(flowgraph, inputs_s, complete_now=complete_now)
def get_call_parameters(self, function, args_s, policy):
desc = self.bookkeeper.getdesc(function)
prevpolicy = self.policy
self.policy = policy
self.bookkeeper.enter(None)
try:
return desc.get_call_parameters(args_s)
finally:
self.bookkeeper.leave()
self.policy = prevpolicy
def annotate_helper(self, function, args_s, policy=None):
if policy is None:
from rpython.annotator.policy import AnnotatorPolicy
policy = AnnotatorPolicy()
# XXX hack
annmodel.TLS.check_str_without_nul = (
self.translator.config.translation.check_str_without_nul)
graph, inputcells = self.get_call_parameters(function, args_s, policy)
self.build_graph_types(graph, inputcells, complete_now=False)
self.complete_helpers(policy)
return graph
def complete_helpers(self, policy):
saved = self.policy, self.added_blocks
self.policy = policy
try:
self.added_blocks = {}
self.complete()
# invoke annotation simplifications for the new blocks
self.simplify(block_subset=self.added_blocks)
finally:
self.policy, self.added_blocks = saved
def build_graph_types(self, flowgraph, inputcells, complete_now=True):
checkgraph(flowgraph)
nbarg = len(flowgraph.getargs())
assert len(inputcells) == nbarg # wrong number of args
# register the entry point
self.addpendinggraph(flowgraph, inputcells)
# recursively proceed until no more pending block is left
if complete_now:
self.complete()
return self.annotation(flowgraph.getreturnvar())
def gettype(self, variable):
"""Return the known type of a control flow graph variable,
defaulting to 'object'."""
if isinstance(variable, Constant):
return type(variable.value)
elif isinstance(variable, Variable):
s_variable = variable.annotation
if s_variable:
return s_variable.knowntype
else:
return object
else:
raise TypeError("Variable or Constant instance expected, "
"got %r" % (variable,))
def getuserclassdefinitions(self):
"""Return a list of ClassDefs."""
return self.bookkeeper.classdefs
#___ medium-level interface ____________________________
def addpendinggraph(self, flowgraph, inputcells):
self.addpendingblock(flowgraph, flowgraph.startblock, inputcells)
def addpendingblock(self, graph, block, cells):
"""Register an entry point into block with the given input cells."""
if graph in self.fixed_graphs:
# special case for annotating/rtyping in several phases: calling
# a graph that has already been rtyped. Safety-check the new
# annotations that are passed in, and don't annotate the old
# graph -- it's already low-level operations!
for a, s_newarg in zip(block.inputargs, cells):
s_oldarg = self.binding(a)
assert annmodel.unionof(s_oldarg, s_newarg) == s_oldarg
else:
assert not self.frozen
if block not in self.annotated:
self.bindinputargs(graph, block, cells)
else:
self.mergeinputargs(graph, block, cells)
if not self.annotated[block]:
self.pendingblocks[block] = graph
def complete_pending_blocks(self):
while self.pendingblocks:
block, graph = self.pendingblocks.popitem()
self.processblock(graph, block)
def complete(self):
"""Process pending blocks until none is left."""
while True:
self.complete_pending_blocks()
self.policy.no_more_blocks_to_annotate(self)
if not self.pendingblocks:
break # finished
# make sure that the return variables of all graphs is annotated
if self.added_blocks is not None:
newgraphs = [self.annotated[block] for block in self.added_blocks]
newgraphs = dict.fromkeys(newgraphs)
got_blocked_blocks = False in newgraphs
else:
newgraphs = self.translator.graphs #all of them
got_blocked_blocks = False in self.annotated.values()
if got_blocked_blocks:
for graph in self.blocked_graphs.values():
self.blocked_graphs[graph] = True
blocked_blocks = [block for block, done in self.annotated.items()
if done is False]
assert len(blocked_blocks) == len(self.blocked_blocks)
text = format_blocked_annotation_error(self, self.blocked_blocks)
#raise SystemExit()
raise annmodel.AnnotatorError(text)
for graph in newgraphs:
v = graph.getreturnvar()
if v.annotation is None:
self.setbinding(v, s_ImpossibleValue)
def validate(self):
"""Check that the annotation results are valid"""
self.bookkeeper.check_no_flags_on_instances()
def annotation(self, arg):
"Gives the SomeValue corresponding to the given Variable or Constant."
if isinstance(arg, Variable):
return arg.annotation
elif isinstance(arg, Constant):
return self.bookkeeper.immutablevalue(arg.value)
else:
raise TypeError('Variable or Constant expected, got %r' % (arg,))
def binding(self, arg):
"Gives the SomeValue corresponding to the given Variable or Constant."
s_arg = self.annotation(arg)
if s_arg is None:
raise KeyError
return s_arg
def typeannotation(self, t):
return signature.annotation(t, self.bookkeeper)
def setbinding(self, arg, s_value):
s_old = arg.annotation
if s_old is not None:
if not s_value.contains(s_old):
log.WARNING("%s does not contain %s" % (s_value, s_old))
log.WARNING("%s" % annmodel.unionof(s_value, s_old))
assert False
arg.annotation = s_value
def warning(self, msg, pos=None):
if pos is None:
try:
pos = self.bookkeeper.position_key
except AttributeError:
pos = '?'
if pos != '?':
pos = self.whereami(pos)
log.WARNING("%s/ %s" % (pos, msg))
#___ interface for annotator.bookkeeper _______
def recursivecall(self, graph, whence, inputcells):
if isinstance(whence, tuple):
parent_graph, parent_block, parent_index = whence
tag = parent_block, parent_index
self.translator.update_call_graph(parent_graph, graph, tag)
# self.notify[graph.returnblock] is a dictionary of call
# points to this func which triggers a reflow whenever the
# return block of this graph has been analysed.
callpositions = self.notify.setdefault(graph.returnblock, {})
if whence is not None:
if callable(whence):
def callback():
whence(self, graph)
else:
callback = whence
callpositions[callback] = True
# generalize the function's input arguments
self.addpendingblock(graph, graph.startblock, inputcells)
# get the (current) return value
v = graph.getreturnvar()
try:
return self.binding(v)
except KeyError:
# the function didn't reach any return statement so far.
# (some functions actually never do, they always raise exceptions)
return s_ImpossibleValue
def reflowfromposition(self, position_key):
graph, block, index = position_key
self.reflowpendingblock(graph, block)
def call_sites(self):
newblocks = self.added_blocks
if newblocks is None:
newblocks = self.annotated # all of them
for block in newblocks:
for op in block.operations:
if op.opname in ('simple_call', 'call_args'):
yield op
# some blocks are partially annotated
if op.result.annotation is None:
break # ignore the unannotated part
#___ simplification (should be moved elsewhere?) _______
def simplify(self, block_subset=None, extra_passes=None):
# Generic simplifications
transform.transform_graph(self, block_subset=block_subset,
extra_passes=extra_passes)
if block_subset is None:
graphs = self.translator.graphs
else:
graphs = {}
for block in block_subset:
graph = self.annotated.get(block)
if graph:
graphs[graph] = True
for graph in graphs:
simplify.eliminate_empty_blocks(graph)
self.bookkeeper.compute_at_fixpoint()
if block_subset is None:
perform_normalizations(self)
#___ flowing annotations in blocks _____________________
def processblock(self, graph, block):
# Important: this is not called recursively.
# self.flowin() can only issue calls to self.addpendingblock().
# The analysis of a block can be in three states:
# * block not in self.annotated:
# never seen the block.
# * self.annotated[block] == False:
# the input variables of the block have bindings but we
# still have to consider all the operations in the block.
# * self.annotated[block] == graph-containing-block:
# analysis done (at least until we find we must generalize the
# input variables).
#print '* processblock', block, cells
self.annotated[block] = graph
if block in self.blocked_blocks:
del self.blocked_blocks[block]
try:
self.flowin(graph, block)
except BlockedInference as e:
self.annotated[block] = False # failed, hopefully temporarily
self.blocked_blocks[block] = (graph, e.opindex)
except Exception as e:
# hack for debug tools only
if not hasattr(e, '__annotator_block'):
setattr(e, '__annotator_block', block)
raise
# The dict 'added_blocks' is used by rpython.annlowlevel to
# detect which are the new blocks that annotating an additional
# small helper creates.
if self.added_blocks is not None:
self.added_blocks[block] = True
def reflowpendingblock(self, graph, block):
assert not self.frozen
assert graph not in self.fixed_graphs
self.pendingblocks[block] = graph
assert block in self.annotated
self.annotated[block] = False # must re-flow
self.blocked_blocks[block] = (graph, None)
def bindinputargs(self, graph, block, inputcells):
# Create the initial bindings for the input args of a block.
assert len(block.inputargs) == len(inputcells)
for a, cell in zip(block.inputargs, inputcells):
self.setbinding(a, cell)
self.annotated[block] = False # must flowin.
self.blocked_blocks[block] = (graph, None)
def mergeinputargs(self, graph, block, inputcells):
# Merge the new 'cells' with each of the block's existing input
# variables.
oldcells = [self.binding(a) for a in block.inputargs]
try:
unions = [annmodel.unionof(c1,c2) for c1, c2 in zip(oldcells,inputcells)]
except annmodel.UnionError as e:
# Add source code to the UnionError
e.source = '\n'.join(source_lines(graph, block, None, long=True))
raise
# if the merged cells changed, we must redo the analysis
if unions != oldcells:
self.bindinputargs(graph, block, unions)
def apply_renaming(self, s_out, renaming):
if hasattr(s_out, 'is_type_of'):
renamed_is_type_of = []
for v in s_out.is_type_of:
renamed_is_type_of += renaming[v]
assert s_out.knowntype is type
newcell = typeof(renamed_is_type_of)
if s_out.is_constant():
newcell.const = s_out.const
s_out = newcell
if hasattr(s_out, 'knowntypedata'):
renamed_knowntypedata = {}
for value, constraints in s_out.knowntypedata.items():
renamed_knowntypedata[value] = {}
for v, s in constraints.items():
new_vs = renaming.get(v, [])
for new_v in new_vs:
renamed_knowntypedata[value][new_v] = s
assert isinstance(s_out, annmodel.SomeBool)
newcell = annmodel.SomeBool()
if s_out.is_constant():
newcell.const = s_out.const
s_out = newcell
s_out.set_knowntypedata(renamed_knowntypedata)
return s_out
def whereami(self, position_key):
graph, block, i = position_key
blk = ""
if block:
at = block.at()
if at:
blk = " block"+at
opid=""
if i is not None:
opid = " op=%d" % i
return repr(graph) + blk + opid
def flowin(self, graph, block):
try:
i = 0
while i < len(block.operations):
op = block.operations[i]
with self.bookkeeper.at_position((graph, block, i)):
new_ops = op.transform(self)
if new_ops is not None:
block.operations[i:i+1] = new_ops
if not new_ops:
continue
new_ops[-1].result = op.result
op = new_ops[0]
self.consider_op(op)
i += 1
except BlockedInference as e:
if e.op is block.raising_op:
# this is the case where the last operation of the block will
# always raise an exception which is immediately caught by
# an exception handler. We then only follow the exceptional
# branches.
exits = [link for link in block.exits
if link.exitcase is not None]
elif e.op.opname in ('simple_call', 'call_args', 'next'):
# XXX warning, keep the name of the call operations in sync
# with the flow object space. These are the operations for
# which it is fine to always raise an exception. We then
# swallow the BlockedInference and that's it.
# About 'next': see test_annotate_iter_empty_container().
return
else:
# other cases are problematic (but will hopefully be solved
# later by reflowing). Throw the BlockedInference up to
# processblock().
e.opindex = i
raise
except annmodel.HarmlesslyBlocked:
return
except annmodel.AnnotatorError as e: # note that UnionError is a subclass
e.source = gather_error(self, graph, block, i)
raise
else:
# dead code removal: don't follow all exits if the exitswitch
# is known
exits = block.exits
if isinstance(block.exitswitch, Variable):
s_exitswitch = self.binding(block.exitswitch)
if s_exitswitch.is_constant():
exits = [link for link in exits
if link.exitcase == s_exitswitch.const]
if block.canraise:
op = block.raising_op
s_exception = self.get_exception(op)
for link in exits:
case = link.exitcase
if case is None:
self.follow_link(graph, link, {})
continue
if s_exception == s_ImpossibleValue:
break
s_case = SomeInstance(self.bookkeeper.getuniqueclassdef(case))
s_matching_exc = intersection(s_exception, s_case)
if s_matching_exc != s_ImpossibleValue:
self.follow_raise_link(graph, link, s_matching_exc)
s_exception = difference(s_exception, s_case)
else:
if isinstance(block.exitswitch, Variable):
knowntypedata = getattr(
block.exitswitch.annotation, "knowntypedata", {})
else:
knowntypedata = {}
for link in exits:
constraints = knowntypedata.get(link.exitcase, {})
self.follow_link(graph, link, constraints)
if block in self.notify:
# reflow from certain positions when this block is done
for callback in self.notify[block]:
if isinstance(callback, tuple):
self.reflowfromposition(callback) # callback is a position
else:
callback()
def follow_link(self, graph, link, constraints):
assert not (isinstance(link.exitcase, (types.ClassType, type)) and
issubclass(link.exitcase, BaseException))
ignore_link = False
inputs_s = []
renaming = defaultdict(list)
for v_out, v_input in zip(link.args, link.target.inputargs):
renaming[v_out].append(v_input)
for v_out in link.args:
s_out = self.annotation(v_out)
if v_out in constraints:
s_constraint = constraints[v_out]
s_out = pair(s_out, s_constraint).improve()
# ignore links that try to pass impossible values
if s_out == s_ImpossibleValue:
ignore_link = True
s_out = self.apply_renaming(s_out, renaming)
inputs_s.append(s_out)
if ignore_link:
return
self.links_followed[link] = True
self.addpendingblock(graph, link.target, inputs_s)
def follow_raise_link(self, graph, link, s_last_exc_value):
v_last_exc_type = link.last_exception
v_last_exc_value = link.last_exc_value
assert (isinstance(link.exitcase, (types.ClassType, type)) and
issubclass(link.exitcase, BaseException))
assert v_last_exc_type and v_last_exc_value
if isinstance(v_last_exc_value, Variable):
self.setbinding(v_last_exc_value, s_last_exc_value)
if isinstance(v_last_exc_type, Variable):
self.setbinding(v_last_exc_type, typeof([v_last_exc_value]))
inputs_s = []
renaming = defaultdict(list)
for v_out, v_input in zip(link.args, link.target.inputargs):
renaming[v_out].append(v_input)
for v_out, v_input in zip(link.args, link.target.inputargs):
if v_out == v_last_exc_type:
s_out = typeof(renaming[v_last_exc_value])
if isinstance(v_last_exc_type, Constant):
s_out.const = v_last_exc_type.value
elif v_last_exc_type.annotation.is_constant():
s_out.const = v_last_exc_type.annotation.const
inputs_s.append(s_out)
else:
s_out = self.annotation(v_out)
s_out = self.apply_renaming(s_out, renaming)
inputs_s.append(s_out)
self.links_followed[link] = True
self.addpendingblock(graph, link.target, inputs_s)
#___ creating the annotations based on operations ______
def consider_op(self, op):
# let's be careful about avoiding propagated SomeImpossibleValues
# to enter an op; the latter can result in violations of the
# more general results invariant: e.g. if SomeImpossibleValue enters is_
# is_(SomeImpossibleValue, None) -> SomeBool
# is_(SomeInstance(not None), None) -> SomeBool(const=False) ...
# boom -- in the assert of setbinding()
for arg in op.args:
if isinstance(self.annotation(arg), annmodel.SomeImpossibleValue):
raise BlockedInference(self, op, -1)
resultcell = op.consider(self)
if resultcell is None:
resultcell = s_ImpossibleValue
elif resultcell == s_ImpossibleValue:
raise BlockedInference(self, op, -1) # the operation cannot succeed
assert isinstance(resultcell, annmodel.SomeObject)
assert isinstance(op.result, Variable)
self.setbinding(op.result, resultcell) # bind resultcell to op.result
def get_exception(self, operation):
"""
Return the annotation for all exceptions that `operation` may raise.
"""
can_only_throw = operation.get_can_only_throw(self)
if can_only_throw is None:
return SomeInstance(self.bookkeeper.getuniqueclassdef(Exception))
else:
return self.bookkeeper.new_exception(can_only_throw)
class BlockedInference(Exception):
"""This exception signals the type inference engine that the situation
is currently blocked, and that it should try to progress elsewhere."""
def __init__(self, annotator, op, opindex):
self.annotator = annotator
try:
self.break_at = annotator.bookkeeper.position_key
except AttributeError:
self.break_at = None
self.op = op
self.opindex = opindex
def __repr__(self):
if not self.break_at:
break_at = "?"
else:
break_at = self.annotator.whereami(self.break_at)
return "<BlockedInference break_at %s [%s]>" %(break_at, self.op)
__str__ = __repr__
| <filename>rpython/annotator/annrpython.py
from __future__ import absolute_import
import types
from collections import defaultdict
from rpython.tool.ansi_print import AnsiLogger
from rpython.tool.pairtype import pair
from rpython.tool.error import (format_blocked_annotation_error,
gather_error, source_lines)
from rpython.flowspace.model import Variable, Constant, checkgraph
from rpython.translator import simplify, transform
from rpython.annotator import model as annmodel, signature
from rpython.annotator.model import (
typeof, s_ImpossibleValue, SomeInstance, intersection, difference)
from rpython.annotator.bookkeeper import Bookkeeper
from rpython.rtyper.normalizecalls import perform_normalizations
log = AnsiLogger("annrpython")
class RPythonAnnotator(object):
"""Block annotator for RPython.
See description in doc/translation.txt."""
def __init__(self, translator=None, policy=None, bookkeeper=None):
import rpython.rtyper.extfuncregistry # has side effects
if translator is None:
# interface for tests
from rpython.translator.translator import TranslationContext
translator = TranslationContext()
translator.annotator = self
self.translator = translator
self.pendingblocks = {} # map {block: graph-containing-it}
self.annotated = {} # set of blocks already seen
self.added_blocks = None # see processblock() below
self.links_followed = {} # set of links that have ever been followed
self.notify = {} # {block: {positions-to-reflow-from-when-done}}
self.fixed_graphs = {} # set of graphs not to annotate again
self.blocked_blocks = {} # set of {blocked_block: (graph, index)}
# --- the following information is recorded for debugging ---
self.blocked_graphs = {} # set of graphs that have blocked blocks
# --- end of debugging information ---
self.frozen = False
if policy is None:
from rpython.annotator.policy import AnnotatorPolicy
self.policy = AnnotatorPolicy()
else:
self.policy = policy
if bookkeeper is None:
bookkeeper = Bookkeeper(self)
self.bookkeeper = bookkeeper
def __getstate__(self):
attrs = """translator pendingblocks annotated links_followed
notify bookkeeper frozen policy added_blocks""".split()
ret = self.__dict__.copy()
for key, value in ret.items():
if key not in attrs:
assert type(value) is dict, (
"%r is not dict. please update %s.__getstate__" %
(key, self.__class__.__name__))
ret[key] = {}
return ret
#___ convenience high-level interface __________________
def build_types(self, function, input_arg_types, complete_now=True,
main_entry_point=False):
"""Recursively build annotations about the specific entry point."""
assert isinstance(function, types.FunctionType), "fix that!"
from rpython.annotator.policy import AnnotatorPolicy
policy = AnnotatorPolicy()
# make input arguments and set their type
args_s = [self.typeannotation(t) for t in input_arg_types]
# XXX hack
annmodel.TLS.check_str_without_nul = (
self.translator.config.translation.check_str_without_nul)
flowgraph, inputs_s = self.get_call_parameters(function, args_s, policy)
if main_entry_point:
self.translator.entry_point_graph = flowgraph
return self.build_graph_types(flowgraph, inputs_s, complete_now=complete_now)
def get_call_parameters(self, function, args_s, policy):
desc = self.bookkeeper.getdesc(function)
prevpolicy = self.policy
self.policy = policy
self.bookkeeper.enter(None)
try:
return desc.get_call_parameters(args_s)
finally:
self.bookkeeper.leave()
self.policy = prevpolicy
def annotate_helper(self, function, args_s, policy=None):
if policy is None:
from rpython.annotator.policy import AnnotatorPolicy
policy = AnnotatorPolicy()
# XXX hack
annmodel.TLS.check_str_without_nul = (
self.translator.config.translation.check_str_without_nul)
graph, inputcells = self.get_call_parameters(function, args_s, policy)
self.build_graph_types(graph, inputcells, complete_now=False)
self.complete_helpers(policy)
return graph
def complete_helpers(self, policy):
saved = self.policy, self.added_blocks
self.policy = policy
try:
self.added_blocks = {}
self.complete()
# invoke annotation simplifications for the new blocks
self.simplify(block_subset=self.added_blocks)
finally:
self.policy, self.added_blocks = saved
def build_graph_types(self, flowgraph, inputcells, complete_now=True):
checkgraph(flowgraph)
nbarg = len(flowgraph.getargs())
assert len(inputcells) == nbarg # wrong number of args
# register the entry point
self.addpendinggraph(flowgraph, inputcells)
# recursively proceed until no more pending block is left
if complete_now:
self.complete()
return self.annotation(flowgraph.getreturnvar())
def gettype(self, variable):
"""Return the known type of a control flow graph variable,
defaulting to 'object'."""
if isinstance(variable, Constant):
return type(variable.value)
elif isinstance(variable, Variable):
s_variable = variable.annotation
if s_variable:
return s_variable.knowntype
else:
return object
else:
raise TypeError("Variable or Constant instance expected, "
"got %r" % (variable,))
def getuserclassdefinitions(self):
"""Return a list of ClassDefs."""
return self.bookkeeper.classdefs
#___ medium-level interface ____________________________
def addpendinggraph(self, flowgraph, inputcells):
self.addpendingblock(flowgraph, flowgraph.startblock, inputcells)
def addpendingblock(self, graph, block, cells):
"""Register an entry point into block with the given input cells."""
if graph in self.fixed_graphs:
# special case for annotating/rtyping in several phases: calling
# a graph that has already been rtyped. Safety-check the new
# annotations that are passed in, and don't annotate the old
# graph -- it's already low-level operations!
for a, s_newarg in zip(block.inputargs, cells):
s_oldarg = self.binding(a)
assert annmodel.unionof(s_oldarg, s_newarg) == s_oldarg
else:
assert not self.frozen
if block not in self.annotated:
self.bindinputargs(graph, block, cells)
else:
self.mergeinputargs(graph, block, cells)
if not self.annotated[block]:
self.pendingblocks[block] = graph
def complete_pending_blocks(self):
while self.pendingblocks:
block, graph = self.pendingblocks.popitem()
self.processblock(graph, block)
def complete(self):
"""Process pending blocks until none is left."""
while True:
self.complete_pending_blocks()
self.policy.no_more_blocks_to_annotate(self)
if not self.pendingblocks:
break # finished
# make sure that the return variables of all graphs is annotated
if self.added_blocks is not None:
newgraphs = [self.annotated[block] for block in self.added_blocks]
newgraphs = dict.fromkeys(newgraphs)
got_blocked_blocks = False in newgraphs
else:
newgraphs = self.translator.graphs #all of them
got_blocked_blocks = False in self.annotated.values()
if got_blocked_blocks:
for graph in self.blocked_graphs.values():
self.blocked_graphs[graph] = True
blocked_blocks = [block for block, done in self.annotated.items()
if done is False]
assert len(blocked_blocks) == len(self.blocked_blocks)
text = format_blocked_annotation_error(self, self.blocked_blocks)
#raise SystemExit()
raise annmodel.AnnotatorError(text)
for graph in newgraphs:
v = graph.getreturnvar()
if v.annotation is None:
self.setbinding(v, s_ImpossibleValue)
def validate(self):
"""Check that the annotation results are valid"""
self.bookkeeper.check_no_flags_on_instances()
def annotation(self, arg):
"Gives the SomeValue corresponding to the given Variable or Constant."
if isinstance(arg, Variable):
return arg.annotation
elif isinstance(arg, Constant):
return self.bookkeeper.immutablevalue(arg.value)
else:
raise TypeError('Variable or Constant expected, got %r' % (arg,))
def binding(self, arg):
"Gives the SomeValue corresponding to the given Variable or Constant."
s_arg = self.annotation(arg)
if s_arg is None:
raise KeyError
return s_arg
def typeannotation(self, t):
return signature.annotation(t, self.bookkeeper)
def setbinding(self, arg, s_value):
s_old = arg.annotation
if s_old is not None:
if not s_value.contains(s_old):
log.WARNING("%s does not contain %s" % (s_value, s_old))
log.WARNING("%s" % annmodel.unionof(s_value, s_old))
assert False
arg.annotation = s_value
def warning(self, msg, pos=None):
if pos is None:
try:
pos = self.bookkeeper.position_key
except AttributeError:
pos = '?'
if pos != '?':
pos = self.whereami(pos)
log.WARNING("%s/ %s" % (pos, msg))
#___ interface for annotator.bookkeeper _______
def recursivecall(self, graph, whence, inputcells):
if isinstance(whence, tuple):
parent_graph, parent_block, parent_index = whence
tag = parent_block, parent_index
self.translator.update_call_graph(parent_graph, graph, tag)
# self.notify[graph.returnblock] is a dictionary of call
# points to this func which triggers a reflow whenever the
# return block of this graph has been analysed.
callpositions = self.notify.setdefault(graph.returnblock, {})
if whence is not None:
if callable(whence):
def callback():
whence(self, graph)
else:
callback = whence
callpositions[callback] = True
# generalize the function's input arguments
self.addpendingblock(graph, graph.startblock, inputcells)
# get the (current) return value
v = graph.getreturnvar()
try:
return self.binding(v)
except KeyError:
# the function didn't reach any return statement so far.
# (some functions actually never do, they always raise exceptions)
return s_ImpossibleValue
def reflowfromposition(self, position_key):
graph, block, index = position_key
self.reflowpendingblock(graph, block)
def call_sites(self):
newblocks = self.added_blocks
if newblocks is None:
newblocks = self.annotated # all of them
for block in newblocks:
for op in block.operations:
if op.opname in ('simple_call', 'call_args'):
yield op
# some blocks are partially annotated
if op.result.annotation is None:
break # ignore the unannotated part
#___ simplification (should be moved elsewhere?) _______
def simplify(self, block_subset=None, extra_passes=None):
# Generic simplifications
transform.transform_graph(self, block_subset=block_subset,
extra_passes=extra_passes)
if block_subset is None:
graphs = self.translator.graphs
else:
graphs = {}
for block in block_subset:
graph = self.annotated.get(block)
if graph:
graphs[graph] = True
for graph in graphs:
simplify.eliminate_empty_blocks(graph)
self.bookkeeper.compute_at_fixpoint()
if block_subset is None:
perform_normalizations(self)
#___ flowing annotations in blocks _____________________
def processblock(self, graph, block):
# Important: this is not called recursively.
# self.flowin() can only issue calls to self.addpendingblock().
# The analysis of a block can be in three states:
# * block not in self.annotated:
# never seen the block.
# * self.annotated[block] == False:
# the input variables of the block have bindings but we
# still have to consider all the operations in the block.
# * self.annotated[block] == graph-containing-block:
# analysis done (at least until we find we must generalize the
# input variables).
#print '* processblock', block, cells
self.annotated[block] = graph
if block in self.blocked_blocks:
del self.blocked_blocks[block]
try:
self.flowin(graph, block)
except BlockedInference as e:
self.annotated[block] = False # failed, hopefully temporarily
self.blocked_blocks[block] = (graph, e.opindex)
except Exception as e:
# hack for debug tools only
if not hasattr(e, '__annotator_block'):
setattr(e, '__annotator_block', block)
raise
# The dict 'added_blocks' is used by rpython.annlowlevel to
# detect which are the new blocks that annotating an additional
# small helper creates.
if self.added_blocks is not None:
self.added_blocks[block] = True
def reflowpendingblock(self, graph, block):
assert not self.frozen
assert graph not in self.fixed_graphs
self.pendingblocks[block] = graph
assert block in self.annotated
self.annotated[block] = False # must re-flow
self.blocked_blocks[block] = (graph, None)
def bindinputargs(self, graph, block, inputcells):
# Create the initial bindings for the input args of a block.
assert len(block.inputargs) == len(inputcells)
for a, cell in zip(block.inputargs, inputcells):
self.setbinding(a, cell)
self.annotated[block] = False # must flowin.
self.blocked_blocks[block] = (graph, None)
def mergeinputargs(self, graph, block, inputcells):
# Merge the new 'cells' with each of the block's existing input
# variables.
oldcells = [self.binding(a) for a in block.inputargs]
try:
unions = [annmodel.unionof(c1,c2) for c1, c2 in zip(oldcells,inputcells)]
except annmodel.UnionError as e:
# Add source code to the UnionError
e.source = '\n'.join(source_lines(graph, block, None, long=True))
raise
# if the merged cells changed, we must redo the analysis
if unions != oldcells:
self.bindinputargs(graph, block, unions)
def apply_renaming(self, s_out, renaming):
if hasattr(s_out, 'is_type_of'):
renamed_is_type_of = []
for v in s_out.is_type_of:
renamed_is_type_of += renaming[v]
assert s_out.knowntype is type
newcell = typeof(renamed_is_type_of)
if s_out.is_constant():
newcell.const = s_out.const
s_out = newcell
if hasattr(s_out, 'knowntypedata'):
renamed_knowntypedata = {}
for value, constraints in s_out.knowntypedata.items():
renamed_knowntypedata[value] = {}
for v, s in constraints.items():
new_vs = renaming.get(v, [])
for new_v in new_vs:
renamed_knowntypedata[value][new_v] = s
assert isinstance(s_out, annmodel.SomeBool)
newcell = annmodel.SomeBool()
if s_out.is_constant():
newcell.const = s_out.const
s_out = newcell
s_out.set_knowntypedata(renamed_knowntypedata)
return s_out
def whereami(self, position_key):
graph, block, i = position_key
blk = ""
if block:
at = block.at()
if at:
blk = " block"+at
opid=""
if i is not None:
opid = " op=%d" % i
return repr(graph) + blk + opid
def flowin(self, graph, block):
try:
i = 0
while i < len(block.operations):
op = block.operations[i]
with self.bookkeeper.at_position((graph, block, i)):
new_ops = op.transform(self)
if new_ops is not None:
block.operations[i:i+1] = new_ops
if not new_ops:
continue
new_ops[-1].result = op.result
op = new_ops[0]
self.consider_op(op)
i += 1
except BlockedInference as e:
if e.op is block.raising_op:
# this is the case where the last operation of the block will
# always raise an exception which is immediately caught by
# an exception handler. We then only follow the exceptional
# branches.
exits = [link for link in block.exits
if link.exitcase is not None]
elif e.op.opname in ('simple_call', 'call_args', 'next'):
# XXX warning, keep the name of the call operations in sync
# with the flow object space. These are the operations for
# which it is fine to always raise an exception. We then
# swallow the BlockedInference and that's it.
# About 'next': see test_annotate_iter_empty_container().
return
else:
# other cases are problematic (but will hopefully be solved
# later by reflowing). Throw the BlockedInference up to
# processblock().
e.opindex = i
raise
except annmodel.HarmlesslyBlocked:
return
except annmodel.AnnotatorError as e: # note that UnionError is a subclass
e.source = gather_error(self, graph, block, i)
raise
else:
# dead code removal: don't follow all exits if the exitswitch
# is known
exits = block.exits
if isinstance(block.exitswitch, Variable):
s_exitswitch = self.binding(block.exitswitch)
if s_exitswitch.is_constant():
exits = [link for link in exits
if link.exitcase == s_exitswitch.const]
if block.canraise:
op = block.raising_op
s_exception = self.get_exception(op)
for link in exits:
case = link.exitcase
if case is None:
self.follow_link(graph, link, {})
continue
if s_exception == s_ImpossibleValue:
break
s_case = SomeInstance(self.bookkeeper.getuniqueclassdef(case))
s_matching_exc = intersection(s_exception, s_case)
if s_matching_exc != s_ImpossibleValue:
self.follow_raise_link(graph, link, s_matching_exc)
s_exception = difference(s_exception, s_case)
else:
if isinstance(block.exitswitch, Variable):
knowntypedata = getattr(
block.exitswitch.annotation, "knowntypedata", {})
else:
knowntypedata = {}
for link in exits:
constraints = knowntypedata.get(link.exitcase, {})
self.follow_link(graph, link, constraints)
if block in self.notify:
# reflow from certain positions when this block is done
for callback in self.notify[block]:
if isinstance(callback, tuple):
self.reflowfromposition(callback) # callback is a position
else:
callback()
def follow_link(self, graph, link, constraints):
assert not (isinstance(link.exitcase, (types.ClassType, type)) and
issubclass(link.exitcase, BaseException))
ignore_link = False
inputs_s = []
renaming = defaultdict(list)
for v_out, v_input in zip(link.args, link.target.inputargs):
renaming[v_out].append(v_input)
for v_out in link.args:
s_out = self.annotation(v_out)
if v_out in constraints:
s_constraint = constraints[v_out]
s_out = pair(s_out, s_constraint).improve()
# ignore links that try to pass impossible values
if s_out == s_ImpossibleValue:
ignore_link = True
s_out = self.apply_renaming(s_out, renaming)
inputs_s.append(s_out)
if ignore_link:
return
self.links_followed[link] = True
self.addpendingblock(graph, link.target, inputs_s)
def follow_raise_link(self, graph, link, s_last_exc_value):
v_last_exc_type = link.last_exception
v_last_exc_value = link.last_exc_value
assert (isinstance(link.exitcase, (types.ClassType, type)) and
issubclass(link.exitcase, BaseException))
assert v_last_exc_type and v_last_exc_value
if isinstance(v_last_exc_value, Variable):
self.setbinding(v_last_exc_value, s_last_exc_value)
if isinstance(v_last_exc_type, Variable):
self.setbinding(v_last_exc_type, typeof([v_last_exc_value]))
inputs_s = []
renaming = defaultdict(list)
for v_out, v_input in zip(link.args, link.target.inputargs):
renaming[v_out].append(v_input)
for v_out, v_input in zip(link.args, link.target.inputargs):
if v_out == v_last_exc_type:
s_out = typeof(renaming[v_last_exc_value])
if isinstance(v_last_exc_type, Constant):
s_out.const = v_last_exc_type.value
elif v_last_exc_type.annotation.is_constant():
s_out.const = v_last_exc_type.annotation.const
inputs_s.append(s_out)
else:
s_out = self.annotation(v_out)
s_out = self.apply_renaming(s_out, renaming)
inputs_s.append(s_out)
self.links_followed[link] = True
self.addpendingblock(graph, link.target, inputs_s)
#___ creating the annotations based on operations ______
def consider_op(self, op):
# let's be careful about avoiding propagated SomeImpossibleValues
# to enter an op; the latter can result in violations of the
# more general results invariant: e.g. if SomeImpossibleValue enters is_
# is_(SomeImpossibleValue, None) -> SomeBool
# is_(SomeInstance(not None), None) -> SomeBool(const=False) ...
# boom -- in the assert of setbinding()
for arg in op.args:
if isinstance(self.annotation(arg), annmodel.SomeImpossibleValue):
raise BlockedInference(self, op, -1)
resultcell = op.consider(self)
if resultcell is None:
resultcell = s_ImpossibleValue
elif resultcell == s_ImpossibleValue:
raise BlockedInference(self, op, -1) # the operation cannot succeed
assert isinstance(resultcell, annmodel.SomeObject)
assert isinstance(op.result, Variable)
self.setbinding(op.result, resultcell) # bind resultcell to op.result
def get_exception(self, operation):
"""
Return the annotation for all exceptions that `operation` may raise.
"""
can_only_throw = operation.get_can_only_throw(self)
if can_only_throw is None:
return SomeInstance(self.bookkeeper.getuniqueclassdef(Exception))
else:
return self.bookkeeper.new_exception(can_only_throw)
class BlockedInference(Exception):
"""This exception signals the type inference engine that the situation
is currently blocked, and that it should try to progress elsewhere."""
def __init__(self, annotator, op, opindex):
self.annotator = annotator
try:
self.break_at = annotator.bookkeeper.position_key
except AttributeError:
self.break_at = None
self.op = op
self.opindex = opindex
def __repr__(self):
if not self.break_at:
break_at = "?"
else:
break_at = self.annotator.whereami(self.break_at)
return "<BlockedInference break_at %s [%s]>" %(break_at, self.op)
__str__ = __repr__
| en | 0.866289 | Block annotator for RPython. See description in doc/translation.txt. # has side effects # interface for tests # map {block: graph-containing-it} # set of blocks already seen # see processblock() below # set of links that have ever been followed # {block: {positions-to-reflow-from-when-done}} # set of graphs not to annotate again # set of {blocked_block: (graph, index)} # --- the following information is recorded for debugging --- # set of graphs that have blocked blocks # --- end of debugging information --- translator pendingblocks annotated links_followed notify bookkeeper frozen policy added_blocks #___ convenience high-level interface __________________ Recursively build annotations about the specific entry point. # make input arguments and set their type # XXX hack # XXX hack # invoke annotation simplifications for the new blocks # wrong number of args # register the entry point # recursively proceed until no more pending block is left Return the known type of a control flow graph variable, defaulting to 'object'. Return a list of ClassDefs. #___ medium-level interface ____________________________ Register an entry point into block with the given input cells. # special case for annotating/rtyping in several phases: calling # a graph that has already been rtyped. Safety-check the new # annotations that are passed in, and don't annotate the old # graph -- it's already low-level operations! Process pending blocks until none is left. # finished # make sure that the return variables of all graphs is annotated #all of them #raise SystemExit() Check that the annotation results are valid #___ interface for annotator.bookkeeper _______ # self.notify[graph.returnblock] is a dictionary of call # points to this func which triggers a reflow whenever the # return block of this graph has been analysed. # generalize the function's input arguments # get the (current) return value # the function didn't reach any return statement so far. # (some functions actually never do, they always raise exceptions) # all of them # some blocks are partially annotated # ignore the unannotated part #___ simplification (should be moved elsewhere?) _______ # Generic simplifications #___ flowing annotations in blocks _____________________ # Important: this is not called recursively. # self.flowin() can only issue calls to self.addpendingblock(). # The analysis of a block can be in three states: # * block not in self.annotated: # never seen the block. # * self.annotated[block] == False: # the input variables of the block have bindings but we # still have to consider all the operations in the block. # * self.annotated[block] == graph-containing-block: # analysis done (at least until we find we must generalize the # input variables). #print '* processblock', block, cells # failed, hopefully temporarily # hack for debug tools only # The dict 'added_blocks' is used by rpython.annlowlevel to # detect which are the new blocks that annotating an additional # small helper creates. # must re-flow # Create the initial bindings for the input args of a block. # must flowin. # Merge the new 'cells' with each of the block's existing input # variables. # Add source code to the UnionError # if the merged cells changed, we must redo the analysis # this is the case where the last operation of the block will # always raise an exception which is immediately caught by # an exception handler. We then only follow the exceptional # branches. # XXX warning, keep the name of the call operations in sync # with the flow object space. These are the operations for # which it is fine to always raise an exception. We then # swallow the BlockedInference and that's it. # About 'next': see test_annotate_iter_empty_container(). # other cases are problematic (but will hopefully be solved # later by reflowing). Throw the BlockedInference up to # processblock(). # note that UnionError is a subclass # dead code removal: don't follow all exits if the exitswitch # is known # reflow from certain positions when this block is done # callback is a position # ignore links that try to pass impossible values #___ creating the annotations based on operations ______ # let's be careful about avoiding propagated SomeImpossibleValues # to enter an op; the latter can result in violations of the # more general results invariant: e.g. if SomeImpossibleValue enters is_ # is_(SomeImpossibleValue, None) -> SomeBool # is_(SomeInstance(not None), None) -> SomeBool(const=False) ... # boom -- in the assert of setbinding() # the operation cannot succeed # bind resultcell to op.result Return the annotation for all exceptions that `operation` may raise. This exception signals the type inference engine that the situation is currently blocked, and that it should try to progress elsewhere. | 2.263583 | 2 |
azure-devops/azext_devops/vstsCompressed/work_item_tracking_process/v4_0/models/models.py | vijayraavi/azure-devops-cli-extension | 0 | 296 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class Control(Model):
"""Control.
:param contribution: Contribution for the control.
:type contribution: :class:`WitContribution <work-item-tracking.v4_0.models.WitContribution>`
:param control_type: Type of the control.
:type control_type: str
:param height: Height of the control, for html controls.
:type height: int
:param id: The id for the layout node.
:type id: str
:param inherited: A value indicating whether this layout node has been inherited from a parent layout. This is expected to only be only set by the combiner.
:type inherited: bool
:param is_contribution: A value indicating if the layout node is contribution or not.
:type is_contribution: bool
:param label: Label for the field
:type label: str
:param metadata: Inner text of the control.
:type metadata: str
:param order:
:type order: int
:param overridden: A value indicating whether this layout node has been overridden by a child layout.
:type overridden: bool
:param read_only: A value indicating if the control is readonly.
:type read_only: bool
:param visible: A value indicating if the control should be hidden or not.
:type visible: bool
:param watermark: Watermark text for the textbox.
:type watermark: str
"""
_attribute_map = {
'contribution': {'key': 'contribution', 'type': 'WitContribution'},
'control_type': {'key': 'controlType', 'type': 'str'},
'height': {'key': 'height', 'type': 'int'},
'id': {'key': 'id', 'type': 'str'},
'inherited': {'key': 'inherited', 'type': 'bool'},
'is_contribution': {'key': 'isContribution', 'type': 'bool'},
'label': {'key': 'label', 'type': 'str'},
'metadata': {'key': 'metadata', 'type': 'str'},
'order': {'key': 'order', 'type': 'int'},
'overridden': {'key': 'overridden', 'type': 'bool'},
'read_only': {'key': 'readOnly', 'type': 'bool'},
'visible': {'key': 'visible', 'type': 'bool'},
'watermark': {'key': 'watermark', 'type': 'str'}
}
def __init__(self, contribution=None, control_type=None, height=None, id=None, inherited=None, is_contribution=None, label=None, metadata=None, order=None, overridden=None, read_only=None, visible=None, watermark=None):
super(Control, self).__init__()
self.contribution = contribution
self.control_type = control_type
self.height = height
self.id = id
self.inherited = inherited
self.is_contribution = is_contribution
self.label = label
self.metadata = metadata
self.order = order
self.overridden = overridden
self.read_only = read_only
self.visible = visible
self.watermark = watermark
class CreateProcessModel(Model):
"""CreateProcessModel.
:param description:
:type description: str
:param name:
:type name: str
:param parent_process_type_id:
:type parent_process_type_id: str
:param reference_name:
:type reference_name: str
"""
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'parent_process_type_id': {'key': 'parentProcessTypeId', 'type': 'str'},
'reference_name': {'key': 'referenceName', 'type': 'str'}
}
def __init__(self, description=None, name=None, parent_process_type_id=None, reference_name=None):
super(CreateProcessModel, self).__init__()
self.description = description
self.name = name
self.parent_process_type_id = parent_process_type_id
self.reference_name = reference_name
class Extension(Model):
"""Extension.
:param id:
:type id: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'}
}
def __init__(self, id=None):
super(Extension, self).__init__()
self.id = id
class FieldModel(Model):
"""FieldModel.
:param description:
:type description: str
:param id:
:type id: str
:param is_identity:
:type is_identity: bool
:param name:
:type name: str
:param type:
:type type: object
:param url:
:type url: str
"""
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'is_identity': {'key': 'isIdentity', 'type': 'bool'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'object'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, description=None, id=None, is_identity=None, name=None, type=None, url=None):
super(FieldModel, self).__init__()
self.description = description
self.id = id
self.is_identity = is_identity
self.name = name
self.type = type
self.url = url
class FieldRuleModel(Model):
"""FieldRuleModel.
:param actions:
:type actions: list of :class:`RuleActionModel <work-item-tracking.v4_0.models.RuleActionModel>`
:param conditions:
:type conditions: list of :class:`RuleConditionModel <work-item-tracking.v4_0.models.RuleConditionModel>`
:param friendly_name:
:type friendly_name: str
:param id:
:type id: str
:param is_disabled:
:type is_disabled: bool
:param is_system:
:type is_system: bool
"""
_attribute_map = {
'actions': {'key': 'actions', 'type': '[RuleActionModel]'},
'conditions': {'key': 'conditions', 'type': '[RuleConditionModel]'},
'friendly_name': {'key': 'friendlyName', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'is_disabled': {'key': 'isDisabled', 'type': 'bool'},
'is_system': {'key': 'isSystem', 'type': 'bool'}
}
def __init__(self, actions=None, conditions=None, friendly_name=None, id=None, is_disabled=None, is_system=None):
super(FieldRuleModel, self).__init__()
self.actions = actions
self.conditions = conditions
self.friendly_name = friendly_name
self.id = id
self.is_disabled = is_disabled
self.is_system = is_system
class FormLayout(Model):
"""FormLayout.
:param extensions: Gets and sets extensions list
:type extensions: list of :class:`Extension <work-item-tracking.v4_0.models.Extension>`
:param pages: Top level tabs of the layout.
:type pages: list of :class:`Page <work-item-tracking.v4_0.models.Page>`
:param system_controls: Headers controls of the layout.
:type system_controls: list of :class:`Control <work-item-tracking.v4_0.models.Control>`
"""
_attribute_map = {
'extensions': {'key': 'extensions', 'type': '[Extension]'},
'pages': {'key': 'pages', 'type': '[Page]'},
'system_controls': {'key': 'systemControls', 'type': '[Control]'}
}
def __init__(self, extensions=None, pages=None, system_controls=None):
super(FormLayout, self).__init__()
self.extensions = extensions
self.pages = pages
self.system_controls = system_controls
class Group(Model):
"""Group.
:param contribution: Contribution for the group.
:type contribution: :class:`WitContribution <work-item-tracking.v4_0.models.WitContribution>`
:param controls: Controls to be put in the group.
:type controls: list of :class:`Control <work-item-tracking.v4_0.models.Control>`
:param height: The height for the contribution.
:type height: int
:param id: The id for the layout node.
:type id: str
:param inherited: A value indicating whether this layout node has been inherited from a parent layout. This is expected to only be only set by the combiner.
:type inherited: bool
:param is_contribution: A value indicating if the layout node is contribution are not.
:type is_contribution: bool
:param label: Label for the group.
:type label: str
:param order: Order in which the group should appear in the section.
:type order: int
:param overridden: A value indicating whether this layout node has been overridden by a child layout.
:type overridden: bool
:param visible: A value indicating if the group should be hidden or not.
:type visible: bool
"""
_attribute_map = {
'contribution': {'key': 'contribution', 'type': 'WitContribution'},
'controls': {'key': 'controls', 'type': '[Control]'},
'height': {'key': 'height', 'type': 'int'},
'id': {'key': 'id', 'type': 'str'},
'inherited': {'key': 'inherited', 'type': 'bool'},
'is_contribution': {'key': 'isContribution', 'type': 'bool'},
'label': {'key': 'label', 'type': 'str'},
'order': {'key': 'order', 'type': 'int'},
'overridden': {'key': 'overridden', 'type': 'bool'},
'visible': {'key': 'visible', 'type': 'bool'}
}
def __init__(self, contribution=None, controls=None, height=None, id=None, inherited=None, is_contribution=None, label=None, order=None, overridden=None, visible=None):
super(Group, self).__init__()
self.contribution = contribution
self.controls = controls
self.height = height
self.id = id
self.inherited = inherited
self.is_contribution = is_contribution
self.label = label
self.order = order
self.overridden = overridden
self.visible = visible
class Page(Model):
"""Page.
:param contribution: Contribution for the page.
:type contribution: :class:`WitContribution <work-item-tracking.v4_0.models.WitContribution>`
:param id: The id for the layout node.
:type id: str
:param inherited: A value indicating whether this layout node has been inherited from a parent layout. This is expected to only be only set by the combiner.
:type inherited: bool
:param is_contribution: A value indicating if the layout node is contribution are not.
:type is_contribution: bool
:param label: The label for the page.
:type label: str
:param locked: A value indicating whether any user operations are permitted on this page and the contents of this page
:type locked: bool
:param order: Order in which the page should appear in the layout.
:type order: int
:param overridden: A value indicating whether this layout node has been overridden by a child layout.
:type overridden: bool
:param page_type: The icon for the page.
:type page_type: object
:param sections: The sections of the page.
:type sections: list of :class:`Section <work-item-tracking.v4_0.models.Section>`
:param visible: A value indicating if the page should be hidden or not.
:type visible: bool
"""
_attribute_map = {
'contribution': {'key': 'contribution', 'type': 'WitContribution'},
'id': {'key': 'id', 'type': 'str'},
'inherited': {'key': 'inherited', 'type': 'bool'},
'is_contribution': {'key': 'isContribution', 'type': 'bool'},
'label': {'key': 'label', 'type': 'str'},
'locked': {'key': 'locked', 'type': 'bool'},
'order': {'key': 'order', 'type': 'int'},
'overridden': {'key': 'overridden', 'type': 'bool'},
'page_type': {'key': 'pageType', 'type': 'object'},
'sections': {'key': 'sections', 'type': '[Section]'},
'visible': {'key': 'visible', 'type': 'bool'}
}
def __init__(self, contribution=None, id=None, inherited=None, is_contribution=None, label=None, locked=None, order=None, overridden=None, page_type=None, sections=None, visible=None):
super(Page, self).__init__()
self.contribution = contribution
self.id = id
self.inherited = inherited
self.is_contribution = is_contribution
self.label = label
self.locked = locked
self.order = order
self.overridden = overridden
self.page_type = page_type
self.sections = sections
self.visible = visible
class ProcessModel(Model):
"""ProcessModel.
:param description:
:type description: str
:param name:
:type name: str
:param projects:
:type projects: list of :class:`ProjectReference <work-item-tracking.v4_0.models.ProjectReference>`
:param properties:
:type properties: :class:`ProcessProperties <work-item-tracking.v4_0.models.ProcessProperties>`
:param reference_name:
:type reference_name: str
:param type_id:
:type type_id: str
"""
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'projects': {'key': 'projects', 'type': '[ProjectReference]'},
'properties': {'key': 'properties', 'type': 'ProcessProperties'},
'reference_name': {'key': 'referenceName', 'type': 'str'},
'type_id': {'key': 'typeId', 'type': 'str'}
}
def __init__(self, description=None, name=None, projects=None, properties=None, reference_name=None, type_id=None):
super(ProcessModel, self).__init__()
self.description = description
self.name = name
self.projects = projects
self.properties = properties
self.reference_name = reference_name
self.type_id = type_id
class ProcessProperties(Model):
"""ProcessProperties.
:param class_:
:type class_: object
:param is_default:
:type is_default: bool
:param is_enabled:
:type is_enabled: bool
:param parent_process_type_id:
:type parent_process_type_id: str
:param version:
:type version: str
"""
_attribute_map = {
'class_': {'key': 'class', 'type': 'object'},
'is_default': {'key': 'isDefault', 'type': 'bool'},
'is_enabled': {'key': 'isEnabled', 'type': 'bool'},
'parent_process_type_id': {'key': 'parentProcessTypeId', 'type': 'str'},
'version': {'key': 'version', 'type': 'str'}
}
def __init__(self, class_=None, is_default=None, is_enabled=None, parent_process_type_id=None, version=None):
super(ProcessProperties, self).__init__()
self.class_ = class_
self.is_default = is_default
self.is_enabled = is_enabled
self.parent_process_type_id = parent_process_type_id
self.version = version
class ProjectReference(Model):
"""ProjectReference.
:param description:
:type description: str
:param id:
:type id: str
:param name:
:type name: str
:param url:
:type url: str
"""
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, description=None, id=None, name=None, url=None):
super(ProjectReference, self).__init__()
self.description = description
self.id = id
self.name = name
self.url = url
class RuleActionModel(Model):
"""RuleActionModel.
:param action_type:
:type action_type: str
:param target_field:
:type target_field: str
:param value:
:type value: str
"""
_attribute_map = {
'action_type': {'key': 'actionType', 'type': 'str'},
'target_field': {'key': 'targetField', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'}
}
def __init__(self, action_type=None, target_field=None, value=None):
super(RuleActionModel, self).__init__()
self.action_type = action_type
self.target_field = target_field
self.value = value
class RuleConditionModel(Model):
"""RuleConditionModel.
:param condition_type:
:type condition_type: str
:param field:
:type field: str
:param value:
:type value: str
"""
_attribute_map = {
'condition_type': {'key': 'conditionType', 'type': 'str'},
'field': {'key': 'field', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'}
}
def __init__(self, condition_type=None, field=None, value=None):
super(RuleConditionModel, self).__init__()
self.condition_type = condition_type
self.field = field
self.value = value
class Section(Model):
"""Section.
:param groups:
:type groups: list of :class:`Group <work-item-tracking.v4_0.models.Group>`
:param id: The id for the layout node.
:type id: str
:param overridden: A value indicating whether this layout node has been overridden by a child layout.
:type overridden: bool
"""
_attribute_map = {
'groups': {'key': 'groups', 'type': '[Group]'},
'id': {'key': 'id', 'type': 'str'},
'overridden': {'key': 'overridden', 'type': 'bool'}
}
def __init__(self, groups=None, id=None, overridden=None):
super(Section, self).__init__()
self.groups = groups
self.id = id
self.overridden = overridden
class UpdateProcessModel(Model):
"""UpdateProcessModel.
:param description:
:type description: str
:param is_default:
:type is_default: bool
:param is_enabled:
:type is_enabled: bool
:param name:
:type name: str
"""
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'is_default': {'key': 'isDefault', 'type': 'bool'},
'is_enabled': {'key': 'isEnabled', 'type': 'bool'},
'name': {'key': 'name', 'type': 'str'}
}
def __init__(self, description=None, is_default=None, is_enabled=None, name=None):
super(UpdateProcessModel, self).__init__()
self.description = description
self.is_default = is_default
self.is_enabled = is_enabled
self.name = name
class WitContribution(Model):
"""WitContribution.
:param contribution_id: The id for the contribution.
:type contribution_id: str
:param height: The height for the contribution.
:type height: int
:param inputs: A dictionary holding key value pairs for contribution inputs.
:type inputs: dict
:param show_on_deleted_work_item: A value indicating if the contribution should be show on deleted workItem.
:type show_on_deleted_work_item: bool
"""
_attribute_map = {
'contribution_id': {'key': 'contributionId', 'type': 'str'},
'height': {'key': 'height', 'type': 'int'},
'inputs': {'key': 'inputs', 'type': '{object}'},
'show_on_deleted_work_item': {'key': 'showOnDeletedWorkItem', 'type': 'bool'}
}
def __init__(self, contribution_id=None, height=None, inputs=None, show_on_deleted_work_item=None):
super(WitContribution, self).__init__()
self.contribution_id = contribution_id
self.height = height
self.inputs = inputs
self.show_on_deleted_work_item = show_on_deleted_work_item
class WorkItemBehavior(Model):
"""WorkItemBehavior.
:param abstract:
:type abstract: bool
:param color:
:type color: str
:param description:
:type description: str
:param fields:
:type fields: list of :class:`WorkItemBehaviorField <work-item-tracking.v4_0.models.WorkItemBehaviorField>`
:param id:
:type id: str
:param inherits:
:type inherits: :class:`WorkItemBehaviorReference <work-item-tracking.v4_0.models.WorkItemBehaviorReference>`
:param name:
:type name: str
:param overriden:
:type overriden: bool
:param rank:
:type rank: int
:param url:
:type url: str
"""
_attribute_map = {
'abstract': {'key': 'abstract', 'type': 'bool'},
'color': {'key': 'color', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'fields': {'key': 'fields', 'type': '[WorkItemBehaviorField]'},
'id': {'key': 'id', 'type': 'str'},
'inherits': {'key': 'inherits', 'type': 'WorkItemBehaviorReference'},
'name': {'key': 'name', 'type': 'str'},
'overriden': {'key': 'overriden', 'type': 'bool'},
'rank': {'key': 'rank', 'type': 'int'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, abstract=None, color=None, description=None, fields=None, id=None, inherits=None, name=None, overriden=None, rank=None, url=None):
super(WorkItemBehavior, self).__init__()
self.abstract = abstract
self.color = color
self.description = description
self.fields = fields
self.id = id
self.inherits = inherits
self.name = name
self.overriden = overriden
self.rank = rank
self.url = url
class WorkItemBehaviorField(Model):
"""WorkItemBehaviorField.
:param behavior_field_id:
:type behavior_field_id: str
:param id:
:type id: str
:param url:
:type url: str
"""
_attribute_map = {
'behavior_field_id': {'key': 'behaviorFieldId', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, behavior_field_id=None, id=None, url=None):
super(WorkItemBehaviorField, self).__init__()
self.behavior_field_id = behavior_field_id
self.id = id
self.url = url
class WorkItemBehaviorReference(Model):
"""WorkItemBehaviorReference.
:param id:
:type id: str
:param url:
:type url: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, id=None, url=None):
super(WorkItemBehaviorReference, self).__init__()
self.id = id
self.url = url
class WorkItemStateResultModel(Model):
"""WorkItemStateResultModel.
:param color:
:type color: str
:param hidden:
:type hidden: bool
:param id:
:type id: str
:param name:
:type name: str
:param order:
:type order: int
:param state_category:
:type state_category: str
:param url:
:type url: str
"""
_attribute_map = {
'color': {'key': 'color', 'type': 'str'},
'hidden': {'key': 'hidden', 'type': 'bool'},
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'order': {'key': 'order', 'type': 'int'},
'state_category': {'key': 'stateCategory', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, color=None, hidden=None, id=None, name=None, order=None, state_category=None, url=None):
super(WorkItemStateResultModel, self).__init__()
self.color = color
self.hidden = hidden
self.id = id
self.name = name
self.order = order
self.state_category = state_category
self.url = url
class WorkItemTypeBehavior(Model):
"""WorkItemTypeBehavior.
:param behavior:
:type behavior: :class:`WorkItemBehaviorReference <work-item-tracking.v4_0.models.WorkItemBehaviorReference>`
:param is_default:
:type is_default: bool
:param url:
:type url: str
"""
_attribute_map = {
'behavior': {'key': 'behavior', 'type': 'WorkItemBehaviorReference'},
'is_default': {'key': 'isDefault', 'type': 'bool'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, behavior=None, is_default=None, url=None):
super(WorkItemTypeBehavior, self).__init__()
self.behavior = behavior
self.is_default = is_default
self.url = url
class WorkItemTypeModel(Model):
"""WorkItemTypeModel.
:param behaviors:
:type behaviors: list of :class:`WorkItemTypeBehavior <work-item-tracking.v4_0.models.WorkItemTypeBehavior>`
:param class_:
:type class_: object
:param color:
:type color: str
:param description:
:type description: str
:param icon:
:type icon: str
:param id:
:type id: str
:param inherits: Parent WIT Id/Internal ReferenceName that it inherits from
:type inherits: str
:param is_disabled:
:type is_disabled: bool
:param layout:
:type layout: :class:`FormLayout <work-item-tracking.v4_0.models.FormLayout>`
:param name:
:type name: str
:param states:
:type states: list of :class:`WorkItemStateResultModel <work-item-tracking.v4_0.models.WorkItemStateResultModel>`
:param url:
:type url: str
"""
_attribute_map = {
'behaviors': {'key': 'behaviors', 'type': '[WorkItemTypeBehavior]'},
'class_': {'key': 'class', 'type': 'object'},
'color': {'key': 'color', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'icon': {'key': 'icon', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'inherits': {'key': 'inherits', 'type': 'str'},
'is_disabled': {'key': 'isDisabled', 'type': 'bool'},
'layout': {'key': 'layout', 'type': 'FormLayout'},
'name': {'key': 'name', 'type': 'str'},
'states': {'key': 'states', 'type': '[WorkItemStateResultModel]'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, behaviors=None, class_=None, color=None, description=None, icon=None, id=None, inherits=None, is_disabled=None, layout=None, name=None, states=None, url=None):
super(WorkItemTypeModel, self).__init__()
self.behaviors = behaviors
self.class_ = class_
self.color = color
self.description = description
self.icon = icon
self.id = id
self.inherits = inherits
self.is_disabled = is_disabled
self.layout = layout
self.name = name
self.states = states
self.url = url
| # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class Control(Model):
"""Control.
:param contribution: Contribution for the control.
:type contribution: :class:`WitContribution <work-item-tracking.v4_0.models.WitContribution>`
:param control_type: Type of the control.
:type control_type: str
:param height: Height of the control, for html controls.
:type height: int
:param id: The id for the layout node.
:type id: str
:param inherited: A value indicating whether this layout node has been inherited from a parent layout. This is expected to only be only set by the combiner.
:type inherited: bool
:param is_contribution: A value indicating if the layout node is contribution or not.
:type is_contribution: bool
:param label: Label for the field
:type label: str
:param metadata: Inner text of the control.
:type metadata: str
:param order:
:type order: int
:param overridden: A value indicating whether this layout node has been overridden by a child layout.
:type overridden: bool
:param read_only: A value indicating if the control is readonly.
:type read_only: bool
:param visible: A value indicating if the control should be hidden or not.
:type visible: bool
:param watermark: Watermark text for the textbox.
:type watermark: str
"""
_attribute_map = {
'contribution': {'key': 'contribution', 'type': 'WitContribution'},
'control_type': {'key': 'controlType', 'type': 'str'},
'height': {'key': 'height', 'type': 'int'},
'id': {'key': 'id', 'type': 'str'},
'inherited': {'key': 'inherited', 'type': 'bool'},
'is_contribution': {'key': 'isContribution', 'type': 'bool'},
'label': {'key': 'label', 'type': 'str'},
'metadata': {'key': 'metadata', 'type': 'str'},
'order': {'key': 'order', 'type': 'int'},
'overridden': {'key': 'overridden', 'type': 'bool'},
'read_only': {'key': 'readOnly', 'type': 'bool'},
'visible': {'key': 'visible', 'type': 'bool'},
'watermark': {'key': 'watermark', 'type': 'str'}
}
def __init__(self, contribution=None, control_type=None, height=None, id=None, inherited=None, is_contribution=None, label=None, metadata=None, order=None, overridden=None, read_only=None, visible=None, watermark=None):
super(Control, self).__init__()
self.contribution = contribution
self.control_type = control_type
self.height = height
self.id = id
self.inherited = inherited
self.is_contribution = is_contribution
self.label = label
self.metadata = metadata
self.order = order
self.overridden = overridden
self.read_only = read_only
self.visible = visible
self.watermark = watermark
class CreateProcessModel(Model):
"""CreateProcessModel.
:param description:
:type description: str
:param name:
:type name: str
:param parent_process_type_id:
:type parent_process_type_id: str
:param reference_name:
:type reference_name: str
"""
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'parent_process_type_id': {'key': 'parentProcessTypeId', 'type': 'str'},
'reference_name': {'key': 'referenceName', 'type': 'str'}
}
def __init__(self, description=None, name=None, parent_process_type_id=None, reference_name=None):
super(CreateProcessModel, self).__init__()
self.description = description
self.name = name
self.parent_process_type_id = parent_process_type_id
self.reference_name = reference_name
class Extension(Model):
"""Extension.
:param id:
:type id: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'}
}
def __init__(self, id=None):
super(Extension, self).__init__()
self.id = id
class FieldModel(Model):
"""FieldModel.
:param description:
:type description: str
:param id:
:type id: str
:param is_identity:
:type is_identity: bool
:param name:
:type name: str
:param type:
:type type: object
:param url:
:type url: str
"""
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'is_identity': {'key': 'isIdentity', 'type': 'bool'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'object'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, description=None, id=None, is_identity=None, name=None, type=None, url=None):
super(FieldModel, self).__init__()
self.description = description
self.id = id
self.is_identity = is_identity
self.name = name
self.type = type
self.url = url
class FieldRuleModel(Model):
"""FieldRuleModel.
:param actions:
:type actions: list of :class:`RuleActionModel <work-item-tracking.v4_0.models.RuleActionModel>`
:param conditions:
:type conditions: list of :class:`RuleConditionModel <work-item-tracking.v4_0.models.RuleConditionModel>`
:param friendly_name:
:type friendly_name: str
:param id:
:type id: str
:param is_disabled:
:type is_disabled: bool
:param is_system:
:type is_system: bool
"""
_attribute_map = {
'actions': {'key': 'actions', 'type': '[RuleActionModel]'},
'conditions': {'key': 'conditions', 'type': '[RuleConditionModel]'},
'friendly_name': {'key': 'friendlyName', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'is_disabled': {'key': 'isDisabled', 'type': 'bool'},
'is_system': {'key': 'isSystem', 'type': 'bool'}
}
def __init__(self, actions=None, conditions=None, friendly_name=None, id=None, is_disabled=None, is_system=None):
super(FieldRuleModel, self).__init__()
self.actions = actions
self.conditions = conditions
self.friendly_name = friendly_name
self.id = id
self.is_disabled = is_disabled
self.is_system = is_system
class FormLayout(Model):
"""FormLayout.
:param extensions: Gets and sets extensions list
:type extensions: list of :class:`Extension <work-item-tracking.v4_0.models.Extension>`
:param pages: Top level tabs of the layout.
:type pages: list of :class:`Page <work-item-tracking.v4_0.models.Page>`
:param system_controls: Headers controls of the layout.
:type system_controls: list of :class:`Control <work-item-tracking.v4_0.models.Control>`
"""
_attribute_map = {
'extensions': {'key': 'extensions', 'type': '[Extension]'},
'pages': {'key': 'pages', 'type': '[Page]'},
'system_controls': {'key': 'systemControls', 'type': '[Control]'}
}
def __init__(self, extensions=None, pages=None, system_controls=None):
super(FormLayout, self).__init__()
self.extensions = extensions
self.pages = pages
self.system_controls = system_controls
class Group(Model):
"""Group.
:param contribution: Contribution for the group.
:type contribution: :class:`WitContribution <work-item-tracking.v4_0.models.WitContribution>`
:param controls: Controls to be put in the group.
:type controls: list of :class:`Control <work-item-tracking.v4_0.models.Control>`
:param height: The height for the contribution.
:type height: int
:param id: The id for the layout node.
:type id: str
:param inherited: A value indicating whether this layout node has been inherited from a parent layout. This is expected to only be only set by the combiner.
:type inherited: bool
:param is_contribution: A value indicating if the layout node is contribution are not.
:type is_contribution: bool
:param label: Label for the group.
:type label: str
:param order: Order in which the group should appear in the section.
:type order: int
:param overridden: A value indicating whether this layout node has been overridden by a child layout.
:type overridden: bool
:param visible: A value indicating if the group should be hidden or not.
:type visible: bool
"""
_attribute_map = {
'contribution': {'key': 'contribution', 'type': 'WitContribution'},
'controls': {'key': 'controls', 'type': '[Control]'},
'height': {'key': 'height', 'type': 'int'},
'id': {'key': 'id', 'type': 'str'},
'inherited': {'key': 'inherited', 'type': 'bool'},
'is_contribution': {'key': 'isContribution', 'type': 'bool'},
'label': {'key': 'label', 'type': 'str'},
'order': {'key': 'order', 'type': 'int'},
'overridden': {'key': 'overridden', 'type': 'bool'},
'visible': {'key': 'visible', 'type': 'bool'}
}
def __init__(self, contribution=None, controls=None, height=None, id=None, inherited=None, is_contribution=None, label=None, order=None, overridden=None, visible=None):
super(Group, self).__init__()
self.contribution = contribution
self.controls = controls
self.height = height
self.id = id
self.inherited = inherited
self.is_contribution = is_contribution
self.label = label
self.order = order
self.overridden = overridden
self.visible = visible
class Page(Model):
"""Page.
:param contribution: Contribution for the page.
:type contribution: :class:`WitContribution <work-item-tracking.v4_0.models.WitContribution>`
:param id: The id for the layout node.
:type id: str
:param inherited: A value indicating whether this layout node has been inherited from a parent layout. This is expected to only be only set by the combiner.
:type inherited: bool
:param is_contribution: A value indicating if the layout node is contribution are not.
:type is_contribution: bool
:param label: The label for the page.
:type label: str
:param locked: A value indicating whether any user operations are permitted on this page and the contents of this page
:type locked: bool
:param order: Order in which the page should appear in the layout.
:type order: int
:param overridden: A value indicating whether this layout node has been overridden by a child layout.
:type overridden: bool
:param page_type: The icon for the page.
:type page_type: object
:param sections: The sections of the page.
:type sections: list of :class:`Section <work-item-tracking.v4_0.models.Section>`
:param visible: A value indicating if the page should be hidden or not.
:type visible: bool
"""
_attribute_map = {
'contribution': {'key': 'contribution', 'type': 'WitContribution'},
'id': {'key': 'id', 'type': 'str'},
'inherited': {'key': 'inherited', 'type': 'bool'},
'is_contribution': {'key': 'isContribution', 'type': 'bool'},
'label': {'key': 'label', 'type': 'str'},
'locked': {'key': 'locked', 'type': 'bool'},
'order': {'key': 'order', 'type': 'int'},
'overridden': {'key': 'overridden', 'type': 'bool'},
'page_type': {'key': 'pageType', 'type': 'object'},
'sections': {'key': 'sections', 'type': '[Section]'},
'visible': {'key': 'visible', 'type': 'bool'}
}
def __init__(self, contribution=None, id=None, inherited=None, is_contribution=None, label=None, locked=None, order=None, overridden=None, page_type=None, sections=None, visible=None):
super(Page, self).__init__()
self.contribution = contribution
self.id = id
self.inherited = inherited
self.is_contribution = is_contribution
self.label = label
self.locked = locked
self.order = order
self.overridden = overridden
self.page_type = page_type
self.sections = sections
self.visible = visible
class ProcessModel(Model):
"""ProcessModel.
:param description:
:type description: str
:param name:
:type name: str
:param projects:
:type projects: list of :class:`ProjectReference <work-item-tracking.v4_0.models.ProjectReference>`
:param properties:
:type properties: :class:`ProcessProperties <work-item-tracking.v4_0.models.ProcessProperties>`
:param reference_name:
:type reference_name: str
:param type_id:
:type type_id: str
"""
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'projects': {'key': 'projects', 'type': '[ProjectReference]'},
'properties': {'key': 'properties', 'type': 'ProcessProperties'},
'reference_name': {'key': 'referenceName', 'type': 'str'},
'type_id': {'key': 'typeId', 'type': 'str'}
}
def __init__(self, description=None, name=None, projects=None, properties=None, reference_name=None, type_id=None):
super(ProcessModel, self).__init__()
self.description = description
self.name = name
self.projects = projects
self.properties = properties
self.reference_name = reference_name
self.type_id = type_id
class ProcessProperties(Model):
"""ProcessProperties.
:param class_:
:type class_: object
:param is_default:
:type is_default: bool
:param is_enabled:
:type is_enabled: bool
:param parent_process_type_id:
:type parent_process_type_id: str
:param version:
:type version: str
"""
_attribute_map = {
'class_': {'key': 'class', 'type': 'object'},
'is_default': {'key': 'isDefault', 'type': 'bool'},
'is_enabled': {'key': 'isEnabled', 'type': 'bool'},
'parent_process_type_id': {'key': 'parentProcessTypeId', 'type': 'str'},
'version': {'key': 'version', 'type': 'str'}
}
def __init__(self, class_=None, is_default=None, is_enabled=None, parent_process_type_id=None, version=None):
super(ProcessProperties, self).__init__()
self.class_ = class_
self.is_default = is_default
self.is_enabled = is_enabled
self.parent_process_type_id = parent_process_type_id
self.version = version
class ProjectReference(Model):
"""ProjectReference.
:param description:
:type description: str
:param id:
:type id: str
:param name:
:type name: str
:param url:
:type url: str
"""
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, description=None, id=None, name=None, url=None):
super(ProjectReference, self).__init__()
self.description = description
self.id = id
self.name = name
self.url = url
class RuleActionModel(Model):
"""RuleActionModel.
:param action_type:
:type action_type: str
:param target_field:
:type target_field: str
:param value:
:type value: str
"""
_attribute_map = {
'action_type': {'key': 'actionType', 'type': 'str'},
'target_field': {'key': 'targetField', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'}
}
def __init__(self, action_type=None, target_field=None, value=None):
super(RuleActionModel, self).__init__()
self.action_type = action_type
self.target_field = target_field
self.value = value
class RuleConditionModel(Model):
"""RuleConditionModel.
:param condition_type:
:type condition_type: str
:param field:
:type field: str
:param value:
:type value: str
"""
_attribute_map = {
'condition_type': {'key': 'conditionType', 'type': 'str'},
'field': {'key': 'field', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'}
}
def __init__(self, condition_type=None, field=None, value=None):
super(RuleConditionModel, self).__init__()
self.condition_type = condition_type
self.field = field
self.value = value
class Section(Model):
"""Section.
:param groups:
:type groups: list of :class:`Group <work-item-tracking.v4_0.models.Group>`
:param id: The id for the layout node.
:type id: str
:param overridden: A value indicating whether this layout node has been overridden by a child layout.
:type overridden: bool
"""
_attribute_map = {
'groups': {'key': 'groups', 'type': '[Group]'},
'id': {'key': 'id', 'type': 'str'},
'overridden': {'key': 'overridden', 'type': 'bool'}
}
def __init__(self, groups=None, id=None, overridden=None):
super(Section, self).__init__()
self.groups = groups
self.id = id
self.overridden = overridden
class UpdateProcessModel(Model):
"""UpdateProcessModel.
:param description:
:type description: str
:param is_default:
:type is_default: bool
:param is_enabled:
:type is_enabled: bool
:param name:
:type name: str
"""
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'is_default': {'key': 'isDefault', 'type': 'bool'},
'is_enabled': {'key': 'isEnabled', 'type': 'bool'},
'name': {'key': 'name', 'type': 'str'}
}
def __init__(self, description=None, is_default=None, is_enabled=None, name=None):
super(UpdateProcessModel, self).__init__()
self.description = description
self.is_default = is_default
self.is_enabled = is_enabled
self.name = name
class WitContribution(Model):
"""WitContribution.
:param contribution_id: The id for the contribution.
:type contribution_id: str
:param height: The height for the contribution.
:type height: int
:param inputs: A dictionary holding key value pairs for contribution inputs.
:type inputs: dict
:param show_on_deleted_work_item: A value indicating if the contribution should be show on deleted workItem.
:type show_on_deleted_work_item: bool
"""
_attribute_map = {
'contribution_id': {'key': 'contributionId', 'type': 'str'},
'height': {'key': 'height', 'type': 'int'},
'inputs': {'key': 'inputs', 'type': '{object}'},
'show_on_deleted_work_item': {'key': 'showOnDeletedWorkItem', 'type': 'bool'}
}
def __init__(self, contribution_id=None, height=None, inputs=None, show_on_deleted_work_item=None):
super(WitContribution, self).__init__()
self.contribution_id = contribution_id
self.height = height
self.inputs = inputs
self.show_on_deleted_work_item = show_on_deleted_work_item
class WorkItemBehavior(Model):
"""WorkItemBehavior.
:param abstract:
:type abstract: bool
:param color:
:type color: str
:param description:
:type description: str
:param fields:
:type fields: list of :class:`WorkItemBehaviorField <work-item-tracking.v4_0.models.WorkItemBehaviorField>`
:param id:
:type id: str
:param inherits:
:type inherits: :class:`WorkItemBehaviorReference <work-item-tracking.v4_0.models.WorkItemBehaviorReference>`
:param name:
:type name: str
:param overriden:
:type overriden: bool
:param rank:
:type rank: int
:param url:
:type url: str
"""
_attribute_map = {
'abstract': {'key': 'abstract', 'type': 'bool'},
'color': {'key': 'color', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'fields': {'key': 'fields', 'type': '[WorkItemBehaviorField]'},
'id': {'key': 'id', 'type': 'str'},
'inherits': {'key': 'inherits', 'type': 'WorkItemBehaviorReference'},
'name': {'key': 'name', 'type': 'str'},
'overriden': {'key': 'overriden', 'type': 'bool'},
'rank': {'key': 'rank', 'type': 'int'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, abstract=None, color=None, description=None, fields=None, id=None, inherits=None, name=None, overriden=None, rank=None, url=None):
super(WorkItemBehavior, self).__init__()
self.abstract = abstract
self.color = color
self.description = description
self.fields = fields
self.id = id
self.inherits = inherits
self.name = name
self.overriden = overriden
self.rank = rank
self.url = url
class WorkItemBehaviorField(Model):
"""WorkItemBehaviorField.
:param behavior_field_id:
:type behavior_field_id: str
:param id:
:type id: str
:param url:
:type url: str
"""
_attribute_map = {
'behavior_field_id': {'key': 'behaviorFieldId', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, behavior_field_id=None, id=None, url=None):
super(WorkItemBehaviorField, self).__init__()
self.behavior_field_id = behavior_field_id
self.id = id
self.url = url
class WorkItemBehaviorReference(Model):
"""WorkItemBehaviorReference.
:param id:
:type id: str
:param url:
:type url: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, id=None, url=None):
super(WorkItemBehaviorReference, self).__init__()
self.id = id
self.url = url
class WorkItemStateResultModel(Model):
"""WorkItemStateResultModel.
:param color:
:type color: str
:param hidden:
:type hidden: bool
:param id:
:type id: str
:param name:
:type name: str
:param order:
:type order: int
:param state_category:
:type state_category: str
:param url:
:type url: str
"""
_attribute_map = {
'color': {'key': 'color', 'type': 'str'},
'hidden': {'key': 'hidden', 'type': 'bool'},
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'order': {'key': 'order', 'type': 'int'},
'state_category': {'key': 'stateCategory', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, color=None, hidden=None, id=None, name=None, order=None, state_category=None, url=None):
super(WorkItemStateResultModel, self).__init__()
self.color = color
self.hidden = hidden
self.id = id
self.name = name
self.order = order
self.state_category = state_category
self.url = url
class WorkItemTypeBehavior(Model):
"""WorkItemTypeBehavior.
:param behavior:
:type behavior: :class:`WorkItemBehaviorReference <work-item-tracking.v4_0.models.WorkItemBehaviorReference>`
:param is_default:
:type is_default: bool
:param url:
:type url: str
"""
_attribute_map = {
'behavior': {'key': 'behavior', 'type': 'WorkItemBehaviorReference'},
'is_default': {'key': 'isDefault', 'type': 'bool'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, behavior=None, is_default=None, url=None):
super(WorkItemTypeBehavior, self).__init__()
self.behavior = behavior
self.is_default = is_default
self.url = url
class WorkItemTypeModel(Model):
"""WorkItemTypeModel.
:param behaviors:
:type behaviors: list of :class:`WorkItemTypeBehavior <work-item-tracking.v4_0.models.WorkItemTypeBehavior>`
:param class_:
:type class_: object
:param color:
:type color: str
:param description:
:type description: str
:param icon:
:type icon: str
:param id:
:type id: str
:param inherits: Parent WIT Id/Internal ReferenceName that it inherits from
:type inherits: str
:param is_disabled:
:type is_disabled: bool
:param layout:
:type layout: :class:`FormLayout <work-item-tracking.v4_0.models.FormLayout>`
:param name:
:type name: str
:param states:
:type states: list of :class:`WorkItemStateResultModel <work-item-tracking.v4_0.models.WorkItemStateResultModel>`
:param url:
:type url: str
"""
_attribute_map = {
'behaviors': {'key': 'behaviors', 'type': '[WorkItemTypeBehavior]'},
'class_': {'key': 'class', 'type': 'object'},
'color': {'key': 'color', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'icon': {'key': 'icon', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'inherits': {'key': 'inherits', 'type': 'str'},
'is_disabled': {'key': 'isDisabled', 'type': 'bool'},
'layout': {'key': 'layout', 'type': 'FormLayout'},
'name': {'key': 'name', 'type': 'str'},
'states': {'key': 'states', 'type': '[WorkItemStateResultModel]'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, behaviors=None, class_=None, color=None, description=None, icon=None, id=None, inherits=None, is_disabled=None, layout=None, name=None, states=None, url=None):
super(WorkItemTypeModel, self).__init__()
self.behaviors = behaviors
self.class_ = class_
self.color = color
self.description = description
self.icon = icon
self.id = id
self.inherits = inherits
self.is_disabled = is_disabled
self.layout = layout
self.name = name
self.states = states
self.url = url
| en | 0.549875 | # -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- # Generated file, DO NOT EDIT # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------------------------- Control. :param contribution: Contribution for the control. :type contribution: :class:`WitContribution <work-item-tracking.v4_0.models.WitContribution>` :param control_type: Type of the control. :type control_type: str :param height: Height of the control, for html controls. :type height: int :param id: The id for the layout node. :type id: str :param inherited: A value indicating whether this layout node has been inherited from a parent layout. This is expected to only be only set by the combiner. :type inherited: bool :param is_contribution: A value indicating if the layout node is contribution or not. :type is_contribution: bool :param label: Label for the field :type label: str :param metadata: Inner text of the control. :type metadata: str :param order: :type order: int :param overridden: A value indicating whether this layout node has been overridden by a child layout. :type overridden: bool :param read_only: A value indicating if the control is readonly. :type read_only: bool :param visible: A value indicating if the control should be hidden or not. :type visible: bool :param watermark: Watermark text for the textbox. :type watermark: str CreateProcessModel. :param description: :type description: str :param name: :type name: str :param parent_process_type_id: :type parent_process_type_id: str :param reference_name: :type reference_name: str Extension. :param id: :type id: str FieldModel. :param description: :type description: str :param id: :type id: str :param is_identity: :type is_identity: bool :param name: :type name: str :param type: :type type: object :param url: :type url: str FieldRuleModel. :param actions: :type actions: list of :class:`RuleActionModel <work-item-tracking.v4_0.models.RuleActionModel>` :param conditions: :type conditions: list of :class:`RuleConditionModel <work-item-tracking.v4_0.models.RuleConditionModel>` :param friendly_name: :type friendly_name: str :param id: :type id: str :param is_disabled: :type is_disabled: bool :param is_system: :type is_system: bool FormLayout. :param extensions: Gets and sets extensions list :type extensions: list of :class:`Extension <work-item-tracking.v4_0.models.Extension>` :param pages: Top level tabs of the layout. :type pages: list of :class:`Page <work-item-tracking.v4_0.models.Page>` :param system_controls: Headers controls of the layout. :type system_controls: list of :class:`Control <work-item-tracking.v4_0.models.Control>` Group. :param contribution: Contribution for the group. :type contribution: :class:`WitContribution <work-item-tracking.v4_0.models.WitContribution>` :param controls: Controls to be put in the group. :type controls: list of :class:`Control <work-item-tracking.v4_0.models.Control>` :param height: The height for the contribution. :type height: int :param id: The id for the layout node. :type id: str :param inherited: A value indicating whether this layout node has been inherited from a parent layout. This is expected to only be only set by the combiner. :type inherited: bool :param is_contribution: A value indicating if the layout node is contribution are not. :type is_contribution: bool :param label: Label for the group. :type label: str :param order: Order in which the group should appear in the section. :type order: int :param overridden: A value indicating whether this layout node has been overridden by a child layout. :type overridden: bool :param visible: A value indicating if the group should be hidden or not. :type visible: bool Page. :param contribution: Contribution for the page. :type contribution: :class:`WitContribution <work-item-tracking.v4_0.models.WitContribution>` :param id: The id for the layout node. :type id: str :param inherited: A value indicating whether this layout node has been inherited from a parent layout. This is expected to only be only set by the combiner. :type inherited: bool :param is_contribution: A value indicating if the layout node is contribution are not. :type is_contribution: bool :param label: The label for the page. :type label: str :param locked: A value indicating whether any user operations are permitted on this page and the contents of this page :type locked: bool :param order: Order in which the page should appear in the layout. :type order: int :param overridden: A value indicating whether this layout node has been overridden by a child layout. :type overridden: bool :param page_type: The icon for the page. :type page_type: object :param sections: The sections of the page. :type sections: list of :class:`Section <work-item-tracking.v4_0.models.Section>` :param visible: A value indicating if the page should be hidden or not. :type visible: bool ProcessModel. :param description: :type description: str :param name: :type name: str :param projects: :type projects: list of :class:`ProjectReference <work-item-tracking.v4_0.models.ProjectReference>` :param properties: :type properties: :class:`ProcessProperties <work-item-tracking.v4_0.models.ProcessProperties>` :param reference_name: :type reference_name: str :param type_id: :type type_id: str ProcessProperties. :param class_: :type class_: object :param is_default: :type is_default: bool :param is_enabled: :type is_enabled: bool :param parent_process_type_id: :type parent_process_type_id: str :param version: :type version: str ProjectReference. :param description: :type description: str :param id: :type id: str :param name: :type name: str :param url: :type url: str RuleActionModel. :param action_type: :type action_type: str :param target_field: :type target_field: str :param value: :type value: str RuleConditionModel. :param condition_type: :type condition_type: str :param field: :type field: str :param value: :type value: str Section. :param groups: :type groups: list of :class:`Group <work-item-tracking.v4_0.models.Group>` :param id: The id for the layout node. :type id: str :param overridden: A value indicating whether this layout node has been overridden by a child layout. :type overridden: bool UpdateProcessModel. :param description: :type description: str :param is_default: :type is_default: bool :param is_enabled: :type is_enabled: bool :param name: :type name: str WitContribution. :param contribution_id: The id for the contribution. :type contribution_id: str :param height: The height for the contribution. :type height: int :param inputs: A dictionary holding key value pairs for contribution inputs. :type inputs: dict :param show_on_deleted_work_item: A value indicating if the contribution should be show on deleted workItem. :type show_on_deleted_work_item: bool WorkItemBehavior. :param abstract: :type abstract: bool :param color: :type color: str :param description: :type description: str :param fields: :type fields: list of :class:`WorkItemBehaviorField <work-item-tracking.v4_0.models.WorkItemBehaviorField>` :param id: :type id: str :param inherits: :type inherits: :class:`WorkItemBehaviorReference <work-item-tracking.v4_0.models.WorkItemBehaviorReference>` :param name: :type name: str :param overriden: :type overriden: bool :param rank: :type rank: int :param url: :type url: str WorkItemBehaviorField. :param behavior_field_id: :type behavior_field_id: str :param id: :type id: str :param url: :type url: str WorkItemBehaviorReference. :param id: :type id: str :param url: :type url: str WorkItemStateResultModel. :param color: :type color: str :param hidden: :type hidden: bool :param id: :type id: str :param name: :type name: str :param order: :type order: int :param state_category: :type state_category: str :param url: :type url: str WorkItemTypeBehavior. :param behavior: :type behavior: :class:`WorkItemBehaviorReference <work-item-tracking.v4_0.models.WorkItemBehaviorReference>` :param is_default: :type is_default: bool :param url: :type url: str WorkItemTypeModel. :param behaviors: :type behaviors: list of :class:`WorkItemTypeBehavior <work-item-tracking.v4_0.models.WorkItemTypeBehavior>` :param class_: :type class_: object :param color: :type color: str :param description: :type description: str :param icon: :type icon: str :param id: :type id: str :param inherits: Parent WIT Id/Internal ReferenceName that it inherits from :type inherits: str :param is_disabled: :type is_disabled: bool :param layout: :type layout: :class:`FormLayout <work-item-tracking.v4_0.models.FormLayout>` :param name: :type name: str :param states: :type states: list of :class:`WorkItemStateResultModel <work-item-tracking.v4_0.models.WorkItemStateResultModel>` :param url: :type url: str | 1.626517 | 2 |
responder.py | ziggyzacks/pyrecs | 2 | 297 | <reponame>ziggyzacks/pyrecs
import abc
from utils import LogMixin
class Reponse(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def redis(self):
""" redis connection """
return
@abc.abstractmethod
def fetch(self, ids):
""" hydrate relevant ids with data """
return
class Movies(Reponse, LogMixin):
DEFAULT_FIELDS = ['title', 'year', 'genres']
def __init__(self, **kwargs):
super().__init__()
for key, value in kwargs.items():
setattr(self, key, value)
def fetch(self, movies, fields=None, from_index=False):
""" hydrates class ids with metadata, return redis pipeline that must be executed """
if fields is None:
fields = Movies.DEFAULT_FIELDS
if from_index:
movies = self.redis.mget(('inverse:index:movie:{}'.format(idx) for idx in movies))
response = []
for movie in movies:
values = self.redis.hmget('movie:{}'.format(movie), fields)
obj = dict(zip(fields, values))
if 'genres' in obj:
obj['genres'] = obj['genres'].split(',')
if 'year' in obj:
obj['year'] = int(obj['year'])
response.append(obj)
return response
def movie_to_index(self, movies):
return self.redis.mget(('index:movie:{}'.format(m) for m in movies))
| import abc
from utils import LogMixin
class Reponse(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def redis(self):
""" redis connection """
return
@abc.abstractmethod
def fetch(self, ids):
""" hydrate relevant ids with data """
return
class Movies(Reponse, LogMixin):
DEFAULT_FIELDS = ['title', 'year', 'genres']
def __init__(self, **kwargs):
super().__init__()
for key, value in kwargs.items():
setattr(self, key, value)
def fetch(self, movies, fields=None, from_index=False):
""" hydrates class ids with metadata, return redis pipeline that must be executed """
if fields is None:
fields = Movies.DEFAULT_FIELDS
if from_index:
movies = self.redis.mget(('inverse:index:movie:{}'.format(idx) for idx in movies))
response = []
for movie in movies:
values = self.redis.hmget('movie:{}'.format(movie), fields)
obj = dict(zip(fields, values))
if 'genres' in obj:
obj['genres'] = obj['genres'].split(',')
if 'year' in obj:
obj['year'] = int(obj['year'])
response.append(obj)
return response
def movie_to_index(self, movies):
return self.redis.mget(('index:movie:{}'.format(m) for m in movies)) | en | 0.836875 | redis connection hydrate relevant ids with data hydrates class ids with metadata, return redis pipeline that must be executed | 2.881747 | 3 |
python/library.bzl | robfig/rules_proto | 0 | 298 | <gh_stars>0
load("//python:compile.bzl", "py_proto_compile", "py_grpc_compile")
load("@grpc_py_deps//:requirements.bzl", "all_requirements")
def py_proto_library(**kwargs):
name = kwargs.get("name")
deps = kwargs.get("deps")
verbose = kwargs.get("verbose")
visibility = kwargs.get("visibility")
name_pb = name + "_pb"
py_proto_compile(
name = name_pb,
deps = deps,
visibility = visibility,
verbose = verbose,
)
native.py_library(
name = name,
srcs = [name_pb],
deps = all_requirements, # fixme don't need grpc here
# This magically adds REPOSITORY_NAME/PACKAGE_NAME/{name_pb} to PYTHONPATH
imports = [name_pb],
visibility = visibility,
)
def py_grpc_library(**kwargs):
name = kwargs.get("name")
deps = kwargs.get("deps")
verbose = kwargs.get("verbose")
visibility = kwargs.get("visibility")
name_pb = name + "_pb"
py_grpc_compile(
name = name_pb,
deps = deps,
visibility = visibility,
verbose = verbose,
)
native.py_library(
name = name,
srcs = [name_pb],
deps = all_requirements,
# This magically adds REPOSITORY_NAME/PACKAGE_NAME/{name_pb} to PYTHONPATH
imports = [name_pb],
visibility = visibility,
) | load("//python:compile.bzl", "py_proto_compile", "py_grpc_compile")
load("@grpc_py_deps//:requirements.bzl", "all_requirements")
def py_proto_library(**kwargs):
name = kwargs.get("name")
deps = kwargs.get("deps")
verbose = kwargs.get("verbose")
visibility = kwargs.get("visibility")
name_pb = name + "_pb"
py_proto_compile(
name = name_pb,
deps = deps,
visibility = visibility,
verbose = verbose,
)
native.py_library(
name = name,
srcs = [name_pb],
deps = all_requirements, # fixme don't need grpc here
# This magically adds REPOSITORY_NAME/PACKAGE_NAME/{name_pb} to PYTHONPATH
imports = [name_pb],
visibility = visibility,
)
def py_grpc_library(**kwargs):
name = kwargs.get("name")
deps = kwargs.get("deps")
verbose = kwargs.get("verbose")
visibility = kwargs.get("visibility")
name_pb = name + "_pb"
py_grpc_compile(
name = name_pb,
deps = deps,
visibility = visibility,
verbose = verbose,
)
native.py_library(
name = name,
srcs = [name_pb],
deps = all_requirements,
# This magically adds REPOSITORY_NAME/PACKAGE_NAME/{name_pb} to PYTHONPATH
imports = [name_pb],
visibility = visibility,
) | en | 0.707361 | # fixme don't need grpc here # This magically adds REPOSITORY_NAME/PACKAGE_NAME/{name_pb} to PYTHONPATH # This magically adds REPOSITORY_NAME/PACKAGE_NAME/{name_pb} to PYTHONPATH | 2.103555 | 2 |
src/mem/slicc/ast/TypeDeclAST.py | qianlong4526888/haha | 135 | 299 | # Copyright (c) 1999-2008 <NAME> and <NAME>
# Copyright (c) 2009 The Hewlett-Packard Development Company
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from slicc.ast.DeclAST import DeclAST
from slicc.symbols.Type import Type
class TypeDeclAST(DeclAST):
def __init__(self, slicc, type_ast, pairs, field_asts):
super(TypeDeclAST, self).__init__(slicc, pairs)
self.type_ast = type_ast
self.field_asts = field_asts
def __repr__(self):
return "[TypeDecl: %r]" % (self.type_ast)
def files(self, parent=None):
if "external" in self:
return set()
if parent:
ident = "%s_%s" % (parent, self.type_ast.ident)
else:
ident = self.type_ast.ident
return set(("%s.hh" % ident, "%s.cc" % ident))
def generate(self):
ident = str(self.type_ast)
machine = self.symtab.state_machine
# Make the new type
new_type = Type(self.symtab, ident, self.location, self.pairs,
self.state_machine)
if machine:
machine.addType(new_type)
self.symtab.newSymbol(new_type)
self.symtab.pushFrame()
# Add all of the fields of the type to it
for field in self.field_asts:
field.generate(new_type)
self.symtab.popFrame()
| # Copyright (c) 1999-2008 <NAME> and <NAME>
# Copyright (c) 2009 The Hewlett-Packard Development Company
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from slicc.ast.DeclAST import DeclAST
from slicc.symbols.Type import Type
class TypeDeclAST(DeclAST):
def __init__(self, slicc, type_ast, pairs, field_asts):
super(TypeDeclAST, self).__init__(slicc, pairs)
self.type_ast = type_ast
self.field_asts = field_asts
def __repr__(self):
return "[TypeDecl: %r]" % (self.type_ast)
def files(self, parent=None):
if "external" in self:
return set()
if parent:
ident = "%s_%s" % (parent, self.type_ast.ident)
else:
ident = self.type_ast.ident
return set(("%s.hh" % ident, "%s.cc" % ident))
def generate(self):
ident = str(self.type_ast)
machine = self.symtab.state_machine
# Make the new type
new_type = Type(self.symtab, ident, self.location, self.pairs,
self.state_machine)
if machine:
machine.addType(new_type)
self.symtab.newSymbol(new_type)
self.symtab.pushFrame()
# Add all of the fields of the type to it
for field in self.field_asts:
field.generate(new_type)
self.symtab.popFrame()
| en | 0.72589 | # Copyright (c) 1999-2008 <NAME> and <NAME> # Copyright (c) 2009 The Hewlett-Packard Development Company # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer; # redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution; # neither the name of the copyright holders nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # Make the new type # Add all of the fields of the type to it | 1.527756 | 2 |