hexsha
stringlengths
40
40
size
int64
2
1.05M
content
stringlengths
2
1.05M
avg_line_length
float64
1.33
100
max_line_length
int64
1
1k
alphanum_fraction
float64
0.25
1
0912ee724b6b223d4842326e0f107884a4906d2e
3,137
extern crate clap; extern crate sha1; extern crate sha2; use clap::{App, Arg}; use sha2::Digest; use std::fs; use std::fs::read_dir; use std::io::{self, Read}; use std::path::Path; fn calculate_hash(data: &[u8], sha: usize) -> String { let strout: String = match sha { 224 => format!("{:x}", sha2::Sha224::digest(&data)), 256 => format!("{:x}", sha2::Sha256::digest(&data)), 384 => format!("{:x}", sha2::Sha384::digest(&data)), 512 => format!("{:x}", sha2::Sha512::digest(&data)), 512224 => format!("{:x}", sha2::Sha512Trunc224::digest(&data)), 512256 => format!("{:x}", sha2::Sha512Trunc256::digest(&data)), _ => format!("{:x}", sha1::Sha1::digest(&data)), }; strout } fn read_data(path: &Path, sha: usize) -> String { let mut results = Vec::new(); if path.is_dir() { let paths = read_dir(path).unwrap(); for entry in paths { if let Ok(entry) = entry { if entry.path().is_file() { if let Ok(mut file) = fs::File::open(&entry.path()) { let mut data = Vec::new(); file.read_to_end(&mut data).unwrap(); let strout = calculate_hash(&data, sha); results.push(strout + " " + entry.path().to_str().unwrap()) } } } } } else { if let Ok(mut file) = fs::File::open(&path) { let mut data = Vec::new(); file.read_to_end(&mut data).unwrap(); let strout = calculate_hash(&data, sha); results.push(strout + " " + path.to_str().unwrap()) } } return results.join("\n"); } fn main() { let matches = App::new("shasum") .version("0.7.0") .author("Smirnov V. <smirnovvad7@gmail.com>") .about("Print SHA checksums from STDIN, input file or directory.") .arg( Arg::new("FILE") .about("With no FILE, or when FILE is -, read standard input.") .takes_value(true) .required(false) .index(1), ) .arg( Arg::new("algorithm") .short('a') .long("algorithm") .value_name("algorithm") .about("Sets the algorithm to use") .takes_value(true) .required(false) .possible_values(&["1", "224", "256", "384", "512", "512224", "512256"]) .default_value("1"), ) .get_matches(); let sha: usize = matches.value_of("algorithm").unwrap().parse().unwrap(); match matches.value_of("FILE") { Some("-") | None => { let mut buffer = String::new(); let stdin = io::stdin(); let mut handle = stdin.lock(); handle.read_to_string(&mut buffer).unwrap(); println!("{}", calculate_hash(&buffer.as_bytes(), sha)); } Some(path) => { let path = Path::new(path); println!("{}", read_data(path, sha)); } } }
33.731183
88
0.48167
5df9d0222b4bb22ca10e9e3f1cbcd343c5ce1b7c
623
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // aux-build:two_macros.rs #[macro_use(macro_one)] #[macro_use(macro_two)] extern crate two_macros; pub fn main() { macro_one!(); macro_two!(); }
29.666667
68
0.725522
c1c4e7a2486fab0f20c4fd8e9c5cd7d6598c47be
3,135
/* * MIT License * * Copyright (c) 2021 Luiz Ferraz * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ //! Binary for solving day 18 of Advent of Code 2021 #![feature(box_patterns)] use crate::data::parsing::parse_many; use crate::data::Element; use anyhow::{anyhow, Context}; use aoc2021::nom::parse_all; use aoc2021::InputProvider; use aoc2021::{lazy_input, LazyInputProvider}; use rayon::prelude::*; static INPUT_DIR: LazyInputProvider = lazy_input!(18); mod data; fn challenge_one(input: &[Element]) -> anyhow::Result<i64> { // Get the magnitude of the summation of all the elements input .iter() .cloned() .reduce(std::ops::Add::add) .ok_or_else(|| anyhow!("No elements provided")) .map(|element| element.magnitude()) } fn challenge_two(input: &[Element]) -> anyhow::Result<i64> { // Get the maximum magnitude possible, adding just two of the elements input .into_par_iter() .enumerate() .flat_map(|(left_index, left)| { input .into_par_iter() .enumerate() .filter_map(move |(right_index, right)| { if left_index == right_index { None } else { Some((left.clone() + right.clone()).magnitude()) } }) }) .max() .ok_or_else(|| anyhow!("Could not compute maximum magnitude")) } fn process(name: &str) -> anyhow::Result<()> { let content = INPUT_DIR .get_input(&format!("{}.txt", name)) .context("reading content") .and_then(|content| parse_all(parse_many, &content))?; println!( "Challenge one ({}): {}", name, challenge_one(&content).context("challenge one")? ); println!( "Challenge two ({}): {}", name, challenge_two(&content).context("challenge two")? ); Ok(()) } fn main() -> anyhow::Result<()> { process("sample").context("sample data")?; process("input").context("real data")?; Ok(()) }
31.35
81
0.627432
71af51a31d22145cabe22c01569891fb34b2abdc
4,298
//! Structures used in Tendermint RPC mod block_results; use parity_scale_codec::Decode; use serde::{Deserialize, Serialize}; use crate::{ErrorKind, Result, ResultExt, Transaction}; use chain_core::init::config::InitConfig; use chain_core::tx::data::TxId; use chain_core::tx::fee::LinearFee; use chain_core::tx::{TxAux, TxEnclaveAux, TxPublicAux}; pub use self::block_results::BlockResults; pub use tendermint::rpc::endpoint::{ abci_query::AbciQuery, abci_query::Response as AbciQueryResponse, block::Response as BlockResponse, block_results::Response as BlockResultsResponse, broadcast::tx_sync::Response as BroadcastTxResponse, commit::Response as CommitResponse, status::Response as StatusResponse, validators::Response as ValidatorsResponse, }; pub use tendermint::rpc::endpoint::{broadcast, status}; pub use tendermint::{ abci, abci::transaction::Data, abci::Code, block::Header, block::Height, Block, Genesis as GenericGenesis, Hash, Time, }; /// crypto-com instantiated genesis type pub type Genesis = GenericGenesis<Option<InitConfig>>; /// crypto-com instantiated genesis type #[derive(Clone, Debug, Deserialize, Serialize)] pub struct GenesisResponse { /// Genesis data pub genesis: Genesis, } /// crypto-chain specific methods. pub trait BlockExt { /// Returns un-encrypted staking(deposit/unbound) transactions in a block /// (this may also contain invalid transactions) fn staking_transactions(&self) -> Result<Vec<Transaction>>; /// Returns ids of transactions whose main content is only available in enclaves (Transfer, Withdraw) fn enclave_transaction_ids(&self) -> Result<Vec<TxId>>; } impl BlockExt for Block { fn staking_transactions(&self) -> Result<Vec<Transaction>> { self.data .iter() .map(|raw| -> Result<TxAux> { TxAux::decode(&mut raw.clone().into_vec().as_slice()).chain(|| { ( ErrorKind::DeserializationError, "Unable to decode transactions from bytes in a block", ) }) }) .filter_map(|tx_aux_result| match tx_aux_result { Err(e) => Some(Err(e)), Ok(tx_aux) => match tx_aux { TxAux::EnclaveTx(TxEnclaveAux::DepositStakeTx { tx, .. }) => { Some(Ok(Transaction::DepositStakeTransaction(tx))) } TxAux::PublicTx(TxPublicAux::UnbondStakeTx(tx, _)) => { Some(Ok(Transaction::UnbondStakeTransaction(tx))) } _ => None, }, }) .collect::<Result<Vec<Transaction>>>() } fn enclave_transaction_ids(&self) -> Result<Vec<TxId>> { self.data .iter() .map(|raw| -> Result<TxAux> { TxAux::decode(&mut raw.clone().into_vec().as_slice()).chain(|| { ( ErrorKind::DeserializationError, "Unable to decode transactions from bytes in a block", ) }) }) .filter_map(|tx_aux_result| match tx_aux_result { Err(e) => Some(Err(e)), Ok(tx_aux) => match tx_aux { TxAux::EnclaveTx(TxEnclaveAux::WithdrawUnbondedStakeTx { .. }) => { Some(Ok(tx_aux.tx_id())) } TxAux::EnclaveTx(TxEnclaveAux::TransferTx { .. }) => Some(Ok(tx_aux.tx_id())), _ => None, }, }) .collect::<Result<Vec<TxId>>>() } } /// crypto-chain specific methods. pub trait GenesisExt { /// get fee policy fn fee_policy(&self) -> LinearFee; } impl GenesisExt for Genesis { fn fee_policy(&self) -> LinearFee { self.app_state .as_ref() .expect("parsed app state") .network_params .initial_fee_policy } } /// crypto-chain specific methods. pub trait AbciQueryExt { /// get query result fn bytes(&self) -> Vec<u8>; } impl AbciQueryExt for AbciQuery { fn bytes(&self) -> Vec<u8> { self.value.clone().unwrap_or_default() } }
34.66129
105
0.577943
ccbe0a5592c8cbf87a57d3fe57ecf8b2b8d215bb
6,159
// Copyright 2020 Parity Technologies (UK) Ltd. // This file is part of Polkadot. // Polkadot is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // Polkadot is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see <http://www.gnu.org/licenses/>. #![cfg_attr(not(feature = "std"), no_std)] //! Core Elexeum types. //! //! These core Elexeum types are used by the relay chain and the Parachains. use parity_scale_codec::{Decode, Encode}; #[cfg(feature = "std")] use parity_util_mem::MallocSizeOf; use scale_info::TypeInfo; use sp_runtime::{ generic, traits::{IdentifyAccount, Verify}, MultiSignature, }; pub use sp_runtime::traits::{BlakeTwo256, Hash as HashT}; /// The block number type used by Elexeum. /// 32-bits will allow for 136 years of blocks assuming 1 block per second. pub type BlockNumber = u32; /// An instant or duration in time. pub type Moment = u64; /// Alias to type for a signature for a transaction on the relay chain. This allows one of several /// kinds of underlying crypto to be used, so isn't a fixed size when encoded. pub type Signature = MultiSignature; /// Alias to the public key used for this chain, actually a `MultiSigner`. Like the signature, this /// also isn't a fixed size when encoded, as different cryptos have different size public keys. pub type AccountPublic = <Signature as Verify>::Signer; /// Alias to the opaque account ID type for this chain, actually a `AccountId32`. This is always /// 32 bytes. pub type AccountId = <AccountPublic as IdentifyAccount>::AccountId; /// The type for looking up accounts. We don't expect more than 4 billion of them. pub type AccountIndex = u32; /// Identifier for a chain. 32-bit should be plenty. pub type ChainId = u32; /// A hash of some data used by the relay chain. pub type Hash = sp_core::H256; /// Unit type wrapper around [`Hash`] that represents a candidate hash. /// /// This type is produced by [`CandidateReceipt::hash`]. /// /// This type makes it easy to enforce that a hash is a candidate hash on the type level. #[derive(Clone, Copy, Encode, Decode, Hash, Eq, PartialEq, Default, PartialOrd, Ord, TypeInfo)] #[cfg_attr(feature = "std", derive(MallocSizeOf))] pub struct CandidateHash(pub Hash); #[cfg(feature = "std")] impl std::fmt::Display for CandidateHash { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { self.0.fmt(f) } } impl sp_std::fmt::Debug for CandidateHash { fn fmt(&self, f: &mut sp_std::fmt::Formatter<'_>) -> sp_std::fmt::Result { write!(f, "{:?}", self.0) } } /// Index of a transaction in the relay chain. 32-bit should be plenty. pub type Nonce = u32; /// The balance of an account. /// 128-bits (or 38 significant decimal figures) will allow for 10 m currency (`10^7`) at a resolution /// to all for one second's worth of an annualised 50% reward be paid to a unit holder (`10^11` unit /// denomination), or `10^18` total atomic units, to grow at 50%/year for 51 years (`10^9` multiplier) /// for an eventual total of `10^27` units (27 significant decimal figures). /// We round denomination to `10^12` (12 SDF), and leave the other redundancy at the upper end so /// that 32 bits may be multiplied with a balance in 128 bits without worrying about overflow. pub type Balance = u128; /// Header type. pub type Header = generic::Header<BlockNumber, BlakeTwo256>; /// Block type. pub type Block = generic::Block<Header, UncheckedExtrinsic>; /// Block ID. pub type BlockId = generic::BlockId<Block>; /// Opaque, encoded, unchecked extrinsic. pub use sp_runtime::OpaqueExtrinsic as UncheckedExtrinsic; /// The information that goes alongside a `transfer_into_parachain` operation. Entirely opaque, it /// will generally be used for identifying the reason for the transfer. Typically it will hold the /// destination account to which the transfer should be credited. If still more information is /// needed, then this should be a hash with the pre-image presented via an off-chain mechanism on /// the parachain. pub type Remark = [u8; 32]; /// A message sent from the relay-chain down to a parachain. /// /// The size of the message is limited by the `config.max_downward_message_size` parameter. pub type DownwardMessage = sp_std::vec::Vec<u8>; /// A wrapped version of `DownwardMessage`. The difference is that it has attached the block number when /// the message was sent. #[derive(Encode, Decode, Clone, sp_runtime::RuntimeDebug, PartialEq, TypeInfo)] #[cfg_attr(feature = "std", derive(MallocSizeOf))] pub struct InboundDownwardMessage<BlockNumber = crate::BlockNumber> { /// The block number at which these messages were put into the downward message queue. pub sent_at: BlockNumber, /// The actual downward message to processes. pub msg: DownwardMessage, } /// An HRMP message seen from the perspective of a recipient. #[derive(Encode, Decode, Clone, sp_runtime::RuntimeDebug, PartialEq, TypeInfo)] #[cfg_attr(feature = "std", derive(MallocSizeOf))] pub struct InboundHrmpMessage<BlockNumber = crate::BlockNumber> { /// The block number at which this message was sent. /// Specifically, it is the block number at which the candidate that sends this message was /// enacted. pub sent_at: BlockNumber, /// The message payload. pub data: sp_std::vec::Vec<u8>, } /// An HRMP message seen from the perspective of a sender. #[derive(Encode, Decode, Clone, sp_runtime::RuntimeDebug, PartialEq, Eq, Hash, TypeInfo)] #[cfg_attr(feature = "std", derive(MallocSizeOf))] pub struct OutboundHrmpMessage<Id> { /// The para that will get this message in its downward message queue. pub recipient: Id, /// The message payload. pub data: sp_std::vec::Vec<u8>, } /// `V1` primitives. pub mod v1 { pub use super::*; }
39.480769
104
0.73145
f7bae99b9e20653c91191a4d5bd83da02c3135ff
313
#[test] fn roundtrip_compound_type() { use hdf5::H5Type; #[derive(H5Type)] #[repr(C)] struct Compound { a: u8, b: u8, } let dt = hdf5::Datatype::from_type::<Compound>().unwrap(); let td = dt.to_descriptor().unwrap(); assert_eq!(td, Compound::type_descriptor()); }
20.866667
62
0.571885
c19f8b92949875ad0b8afe8e938212744471171b
4,943
#[doc = "Reader of register IMR2"] pub type R = crate::R<u32, super::IMR2>; #[doc = "Reader of field `WRDY`"] pub type WRDY_R = crate::R<bool, bool>; #[doc = "Reader of field `ENDTX`"] pub type ENDTX_R = crate::R<bool, bool>; #[doc = "Reader of field `TXBUFE`"] pub type TXBUFE_R = crate::R<bool, bool>; #[doc = "Reader of field `UNRE`"] pub type UNRE_R = crate::R<bool, bool>; #[doc = "Reader of field `CMPM0`"] pub type CMPM0_R = crate::R<bool, bool>; #[doc = "Reader of field `CMPM1`"] pub type CMPM1_R = crate::R<bool, bool>; #[doc = "Reader of field `CMPM2`"] pub type CMPM2_R = crate::R<bool, bool>; #[doc = "Reader of field `CMPM3`"] pub type CMPM3_R = crate::R<bool, bool>; #[doc = "Reader of field `CMPM4`"] pub type CMPM4_R = crate::R<bool, bool>; #[doc = "Reader of field `CMPM5`"] pub type CMPM5_R = crate::R<bool, bool>; #[doc = "Reader of field `CMPM6`"] pub type CMPM6_R = crate::R<bool, bool>; #[doc = "Reader of field `CMPM7`"] pub type CMPM7_R = crate::R<bool, bool>; #[doc = "Reader of field `CMPU0`"] pub type CMPU0_R = crate::R<bool, bool>; #[doc = "Reader of field `CMPU1`"] pub type CMPU1_R = crate::R<bool, bool>; #[doc = "Reader of field `CMPU2`"] pub type CMPU2_R = crate::R<bool, bool>; #[doc = "Reader of field `CMPU3`"] pub type CMPU3_R = crate::R<bool, bool>; #[doc = "Reader of field `CMPU4`"] pub type CMPU4_R = crate::R<bool, bool>; #[doc = "Reader of field `CMPU5`"] pub type CMPU5_R = crate::R<bool, bool>; #[doc = "Reader of field `CMPU6`"] pub type CMPU6_R = crate::R<bool, bool>; #[doc = "Reader of field `CMPU7`"] pub type CMPU7_R = crate::R<bool, bool>; impl R { #[doc = "Bit 0 - Write Ready for Synchronous Channels Update Interrupt Mask"] #[inline(always)] pub fn wrdy(&self) -> WRDY_R { WRDY_R::new((self.bits & 0x01) != 0) } #[doc = "Bit 1 - PDC End of TX Buffer Interrupt Mask"] #[inline(always)] pub fn endtx(&self) -> ENDTX_R { ENDTX_R::new(((self.bits >> 1) & 0x01) != 0) } #[doc = "Bit 2 - PDC TX Buffer Empty Interrupt Mask"] #[inline(always)] pub fn txbufe(&self) -> TXBUFE_R { TXBUFE_R::new(((self.bits >> 2) & 0x01) != 0) } #[doc = "Bit 3 - Synchronous Channels Update Underrun Error Interrupt Mask"] #[inline(always)] pub fn unre(&self) -> UNRE_R { UNRE_R::new(((self.bits >> 3) & 0x01) != 0) } #[doc = "Bit 8 - Comparison 0 Match Interrupt Mask"] #[inline(always)] pub fn cmpm0(&self) -> CMPM0_R { CMPM0_R::new(((self.bits >> 8) & 0x01) != 0) } #[doc = "Bit 9 - Comparison 1 Match Interrupt Mask"] #[inline(always)] pub fn cmpm1(&self) -> CMPM1_R { CMPM1_R::new(((self.bits >> 9) & 0x01) != 0) } #[doc = "Bit 10 - Comparison 2 Match Interrupt Mask"] #[inline(always)] pub fn cmpm2(&self) -> CMPM2_R { CMPM2_R::new(((self.bits >> 10) & 0x01) != 0) } #[doc = "Bit 11 - Comparison 3 Match Interrupt Mask"] #[inline(always)] pub fn cmpm3(&self) -> CMPM3_R { CMPM3_R::new(((self.bits >> 11) & 0x01) != 0) } #[doc = "Bit 12 - Comparison 4 Match Interrupt Mask"] #[inline(always)] pub fn cmpm4(&self) -> CMPM4_R { CMPM4_R::new(((self.bits >> 12) & 0x01) != 0) } #[doc = "Bit 13 - Comparison 5 Match Interrupt Mask"] #[inline(always)] pub fn cmpm5(&self) -> CMPM5_R { CMPM5_R::new(((self.bits >> 13) & 0x01) != 0) } #[doc = "Bit 14 - Comparison 6 Match Interrupt Mask"] #[inline(always)] pub fn cmpm6(&self) -> CMPM6_R { CMPM6_R::new(((self.bits >> 14) & 0x01) != 0) } #[doc = "Bit 15 - Comparison 7 Match Interrupt Mask"] #[inline(always)] pub fn cmpm7(&self) -> CMPM7_R { CMPM7_R::new(((self.bits >> 15) & 0x01) != 0) } #[doc = "Bit 16 - Comparison 0 Update Interrupt Mask"] #[inline(always)] pub fn cmpu0(&self) -> CMPU0_R { CMPU0_R::new(((self.bits >> 16) & 0x01) != 0) } #[doc = "Bit 17 - Comparison 1 Update Interrupt Mask"] #[inline(always)] pub fn cmpu1(&self) -> CMPU1_R { CMPU1_R::new(((self.bits >> 17) & 0x01) != 0) } #[doc = "Bit 18 - Comparison 2 Update Interrupt Mask"] #[inline(always)] pub fn cmpu2(&self) -> CMPU2_R { CMPU2_R::new(((self.bits >> 18) & 0x01) != 0) } #[doc = "Bit 19 - Comparison 3 Update Interrupt Mask"] #[inline(always)] pub fn cmpu3(&self) -> CMPU3_R { CMPU3_R::new(((self.bits >> 19) & 0x01) != 0) } #[doc = "Bit 20 - Comparison 4 Update Interrupt Mask"] #[inline(always)] pub fn cmpu4(&self) -> CMPU4_R { CMPU4_R::new(((self.bits >> 20) & 0x01) != 0) } #[doc = "Bit 21 - Comparison 5 Update Interrupt Mask"] #[inline(always)] pub fn cmpu5(&self) -> CMPU5_R { CMPU5_R::new(((self.bits >> 21) & 0x01) != 0) } #[doc = "Bit 22 - Comparison 6 Update Interrupt Mask"] #[inline(always)] pub fn cmpu6(&self) -> CMPU6_R { CMPU6_R::new(((self.bits >> 22) & 0x01) != 0) } #[doc = "Bit 23 - Comparison 7 Update Interrupt Mask"] #[inline(always)] pub fn cmpu7(&self) -> CMPU7_R { CMPU7_R::new(((self.bits >> 23) & 0x01) != 0) } }
47.07619
86
0.600445
3a2c535d946940419db6ffc07ed32f03c919d05d
3,029
// Copyright (c) The Diem Core Contributors // SPDX-License-Identifier: Apache-2.0 use crate::{ smoke_test_environment::new_local_swarm, test_utils::diem_swarm_utils::load_validators_backend_storage, }; use diem_config::config::NodeConfig; use diem_global_constants::CONSENSUS_KEY; use diem_key_manager::{ diem_interface::{DiemInterface, JsonRpcDiemInterface}, KeyManager, }; use diem_secure_storage::{CryptoStorage, Storage}; use diem_time_service::TimeService; use diem_types::chain_id::ChainId; use forge::Node; use std::{convert::TryInto, thread, thread::sleep, time::Duration}; use tokio::runtime::Runtime; #[test] fn test_key_manager_consensus_rotation() { let runtime = Runtime::new().unwrap(); // Create and launch a local validator swarm let swarm = runtime.block_on(new_local_swarm(1)); let validator = swarm.validators().next().unwrap(); // Fetch the first node config in the swarm let node_config_path = validator.config_path(); let node_config = NodeConfig::load(&node_config_path).unwrap(); // Load validator's on disk storage let secure_backend = load_validators_backend_storage(validator); let storage: Storage = (&secure_backend).try_into().unwrap(); // Create a json-rpc connection to the blockchain and verify storage matches the on-chain state. let json_rpc_endpoint = validator.json_rpc_endpoint().to_string(); let diem_interface = JsonRpcDiemInterface::new(json_rpc_endpoint.clone()); let account = node_config.validator_network.unwrap().peer_id(); let current_consensus = storage.get_public_key(CONSENSUS_KEY).unwrap().public_key; let validator_info = diem_interface.retrieve_validator_info(account).unwrap(); assert_eq!(&current_consensus, validator_info.consensus_public_key()); // Create the key manager let key_manager_storage: Storage = (&secure_backend).try_into().unwrap(); let mut key_manager = KeyManager::new( JsonRpcDiemInterface::new(json_rpc_endpoint), key_manager_storage, TimeService::real(), 1, 1000, // Large sleep period to force a single rotation 1000, ChainId::test(), ); // Add some time padding to ensure the libra timestamp increases on-chain sleep(Duration::from_secs(10)); // Spawn the key manager and execute a rotation let _key_manager_thread = thread::spawn(move || key_manager.execute()); // Verify the consensus key has been rotated in secure storage and on-chain. for _ in 0..10 { sleep(Duration::from_secs(6)); let rotated_consensus = storage.get_public_key(CONSENSUS_KEY).unwrap().public_key; let validator_info = diem_interface.retrieve_validator_info(account).unwrap(); if current_consensus != rotated_consensus && validator_info.consensus_public_key() == &rotated_consensus { return; // The consensus key was successfully rotated } } panic!("The key manager failed to rotate the consensus key!"); }
39.337662
100
0.72004
ac3deae342f61d5d0c01620f3d38306c9d2d2d87
2,660
#[macro_use] extern crate serde_derive; /// Ethabi mod abi; /// The Jsonrpc Client pub mod client; /// Encryption algorithm library pub mod crypto; /// Error of tool pub mod error; /// Transaction protobuf code pub mod protos; /// Request and Response type pub mod rpctypes; pub use crate::abi::{decode_input, decode_logs, decode_params, encode_input, encode_params}; pub use crate::client::{parse_url, remove_0x, TransactionOptions}; pub use crate::crypto::{ ed25519_sign, Ed25519KeyPair, Ed25519PrivKey, Ed25519PubKey, Ed25519Signature, }; pub use crate::crypto::{ pubkey_to_address, secp256k1_sign, sign, sm2_sign, CreateKey, Encryption, Hashable, KeyPair, Message, PrivateKey, PubKey, Secp256k1KeyPair, Secp256k1PrivKey, Secp256k1PubKey, Signature, Sm2KeyPair, Sm2Privkey, Sm2Pubkey, Sm2Signature, }; pub use crate::error::ToolError; pub use crate::protos::{Crypto, SignedTransaction, Transaction, UnverifiedTransaction}; pub use crate::rpctypes::{JsonRpcParams, JsonRpcResponse, ParamsValue, ResponseValue}; pub use hex::{decode, encode}; pub use protobuf::Message as ProtoMessage; pub use types::{Address, H128, H160, H256, H264, H32, H512, H520, H64}; pub use types::{U256, U512, U64}; /// Format types pub trait LowerHex { /// hex doesn't with 0x fn lower_hex(&self) -> String; /// completed hex doesn't with 0x fn completed_lower_hex(&self) -> String; /// completed with 0x fn completed_lower_hex_with_0x(&self) -> String; /// hex with 0x fn lower_hex_with_0x(&self) -> String; } macro_rules! add_funcs { ([$( ($name:ident) ),+ ,]) => { add_funcs!([ $( ($name) ),+ ]); }; ([$( ($name:ident) ),+]) => { $( add_funcs!($name); )+ }; ($name:ident) => { impl LowerHex for $name { #[inline] fn lower_hex(&self) -> String { format!("{:x}", self) } #[inline] fn completed_lower_hex(&self) -> String { let len = stringify!($name)[1..].parse::<usize>().unwrap() / 4; format!("{:0>width$}", self.lower_hex(), width=len) } fn completed_lower_hex_with_0x(&self) -> String { let len = stringify!($name)[1..].parse::<usize>().unwrap() / 4; format!("0x{:0>width$}", self.lower_hex(), width=len) } #[inline] fn lower_hex_with_0x(&self) -> String { format!("{:#x}", self) } } } } add_funcs!([ (H32), (H64), (H128), (H160), (H256), (H264), (H512), (H520), (U64), (U256), (U512), ]);
27.708333
96
0.593609
2668328dc47e19930b4baef881e411eeedece0f7
8,650
// This file is part of linux-support. It is subject to the license terms in the COPYRIGHT file found in the top-level directory of this distribution and at https://raw.githubusercontent.com/lemonrock/linux-support/master/COPYRIGHT. No part of linux-support, including this file, may be copied, modified, propagated, or distributed except according to the terms contained in the COPYRIGHT file. // Copyright © 2020 The developers of linux-support. See the COPYRIGHT file in the top-level directory of this distribution and at https://raw.githubusercontent.com/lemonrock/linux-support/master/COPYRIGHT. /// A Q-format `Q16.16` unsigned fixed point number, viz:- /// /// * Integer component is unsigned and 16 bits (`u16`). /// * Fraction component is 16 bits. /// /// See upstream library `https://github.com/PetteriAimonen/libfixmath` for example code to implement trigonometric functions. #[derive(Default, Debug, Copy, Clone)] #[derive(Deserialize, Serialize)] #[repr(C)] pub struct Unsigned1616FixedPoint(u32); impl From<u32> for Unsigned1616FixedPoint { #[inline(always)] fn from(value: u32) -> Self { Self::from_u32(value) } } impl Into<u32> for Unsigned1616FixedPoint { #[inline(always)] fn into(self) -> u32 { self.into_u32() } } impl From<(BigEndianU16, BigEndianU16)> for Unsigned1616FixedPoint { #[inline(always)] fn from(value: (BigEndianU16, BigEndianU16)) -> Self { Self::new(u16::from_be_bytes(value.0), u16::from_be_bytes(value.1)) } } impl Into<(BigEndianU16, BigEndianU16)> for Unsigned1616FixedPoint { #[inline(always)] fn into(self) -> (BigEndianU16, BigEndianU16) { ( self.integer().to_be_bytes(), self.fraction().to_be_bytes(), ) } } impl Into<(u16, u16)> for Unsigned1616FixedPoint { #[inline(always)] fn into(self) -> (u16, u16) { ( self.integer(), self.fraction(), ) } } impl TryFrom<Signed1616FixedPoint> for Unsigned1616FixedPoint { type Error = ParseNumberError; #[inline(always)] fn try_from(value: Signed1616FixedPoint) -> Result<Self, Self::Error> { if value.is_negative() { Err(ParseNumberError::TooSmall) } else { Ok(unsafe { transmute(value) }) } } } impl PartialEq for Unsigned1616FixedPoint { #[inline(always)] fn eq(&self, rhs: &Self) -> bool { let left = (*self).into_u32(); let right = (*rhs).into_u32(); left == right } } impl Eq for Unsigned1616FixedPoint { } impl PartialOrd for Unsigned1616FixedPoint { #[inline(always)] fn partial_cmp(&self, rhs: &Self) -> Option<Ordering> { Some(self.cmp(rhs)) } } impl Ord for Unsigned1616FixedPoint { #[inline(always)] fn cmp(&self, rhs: &Self) -> Ordering { let left = (*self).into_u32(); let right = (*rhs).into_u32(); left.cmp(&right) } } impl Hash for Unsigned1616FixedPoint { #[inline(always)] fn hash<H: Hasher>(&self, state: &mut H) { let this = (*self).into_u32(); this.hash(state) } } impl Add for Unsigned1616FixedPoint { type Output = Self; #[inline(always)] fn add(self, rhs: Self) -> Self::Output { self.checked_add(rhs).unwrap() } } impl Sub for Unsigned1616FixedPoint { type Output = Self; #[inline(always)] fn sub(self, rhs: Self) -> Self::Output { self.checked_sub(rhs).unwrap() } } impl Rem for Unsigned1616FixedPoint { type Output = Self; #[inline(always)] fn rem(self, rhs: Self) -> Self::Output { let this = self.into_u32(); Self::from_u32(this % (rhs.into_u32())) } } impl AddAssign for Unsigned1616FixedPoint { #[inline(always)] fn add_assign(&mut self, rhs: Self) { *self = (*self).add(rhs) } } impl SubAssign for Unsigned1616FixedPoint { #[inline(always)] fn sub_assign(&mut self, rhs: Self) { *self = (*self).sub(rhs) } } impl RemAssign for Unsigned1616FixedPoint { #[inline(always)] fn rem_assign(&mut self, rhs: Self) { *self = (*self).rem(rhs) } } impl Unsigned1616FixedPoint { const SmallestInteger: u16 = 0x0000; const LargestInteger: u16 = 0xFFFF; const FractionSizeInBits: u64 = 16; const FractionsPerInteger: u64 = 1 << Self::FractionSizeInBits; /// Inclusive minimum. pub const InclusiveMinimum: Self = Self::new(Self::SmallestInteger, 0x0000); /// Inclusive maximum. pub const InclusiveMaximum: Self = Self::new(Self::LargestInteger, 0xFFFF); /// Zero, `0`. pub const Zero: Self = Self::new(Self::SmallestInteger, 0x0000); /// One, `1`. pub const One: Self = Self::new(0x0001, 0x0000); /// The constant `e`. pub const E: Self = Self::new(0x0002, 0xB7E1); /// The constant `π`. pub const PI: Self = Self::new(0x0003, 0x243F); /// New instance. #[inline(always)] pub const fn new(integer: u16, fraction: u16) -> Self { Self(((integer as u32) << 16) | (fraction as u32)) } /// Integer. #[inline(always)] pub const fn integer(self) -> u16 { (self.0 >> 16) as u16 } /// Fraction. #[inline(always)] pub const fn fraction(self) -> u16 { self.0 as u16 } /// Constant from u32. #[inline(always)] pub const fn from_u32(value: u32) -> Self { Self(value) } /// Constant into u32. #[inline(always)] pub const fn into_u32(self) -> u32 { self.0 } /// Saturating addition. #[inline(always)] pub fn saturating_add(self, rhs: Self) -> Self { match self.checked_add(rhs) { None => Self::InclusiveMaximum, Some(sum) => sum, } } /// Saturating subtraction. #[inline(always)] pub fn saturating_sub(self, rhs: Self) -> Self { match self.checked_sub(rhs) { None => Self::InclusiveMaximum, Some(difference) => difference, } } /// Checked addition. #[inline(always)] pub fn checked_add(self, rhs: Self) -> Option<Self> { let left_integer = self.integer(); let right_integer = rhs.integer(); if let Some(integer) = left_integer.checked_add(right_integer) { let left_fraction = self.fraction() as u64; let right_fraction = rhs.fraction() as u64; let fraction = left_fraction + right_fraction; if fraction < Self::FractionsPerInteger { Some(Self::new(integer, fraction as u16)) } else { if integer == Self::LargestInteger { None } else { let fraction = fraction - Self::FractionsPerInteger; debug_assert!(fraction < Self::FractionsPerInteger); Some(Self::new(integer + 1, fraction as u16)) } } } else { None } } /// Checked subtraction. #[inline(always)] pub fn checked_sub(self, rhs: Self) -> Option<Self> { let left_integer = self.integer(); let right_integer = rhs.integer(); match left_integer.checked_sub(right_integer) { None => None, Some(integer) => { let left_fraction = self.fraction(); let right_fraction = rhs.fraction(); if left_fraction >= right_fraction { let fraction = left_fraction - right_fraction; Some(Self::new(integer, fraction)) } else { if integer == Self::SmallestInteger { None } else { let integer = integer - 1; let fraction = (left_fraction as u64) + Self::FractionsPerInteger - (right_fraction as u64); Some(Self::new(integer, fraction as u16)) } } } } } /// Saturating multiply by a scalar. #[inline(always)] pub fn saturating_mul_by_scalar(self, scalar: u16) -> Self { match self.checked_mul_by_scalar(scalar) { Some(result) => result, None => Self::InclusiveMaximum, } } /// Checked multiply by a scalar. #[inline(always)] pub fn checked_mul_by_scalar(self, scalar: u16) -> Option<Self> { let original_integer = self.integer() as u64; let original_fraction = self.fraction() as u64; let scalar = scalar as u64; let total_fraction = original_fraction * scalar; let extra_integer = total_fraction / Self::FractionsPerInteger; let fraction = (total_fraction % Self::FractionsPerInteger) as u32; match original_integer.checked_mul(scalar) { Some(multiplied) => match multiplied.checked_add(extra_integer) { Some(integer) => return Some(Self::new(integer as u16, fraction as u16)), None => None, }, None => None, } } /// Checked divide by a scalar. #[inline(always)] pub fn checked_div_by_scalar(self, scalar: u16) -> Option<Self> { if scalar == 0 { None } else { let original_integer = self.integer() as u64; let original_fraction = self.fraction() as u64; let scalar = scalar as u64; let integer = original_integer / scalar; let carry = original_integer - integer * scalar; let extra_fraction = carry * Self::FractionsPerInteger / scalar; let fraction = original_fraction / scalar + extra_fraction; Some(Self::new(integer as u16, fraction as u16)) } } }
20.94431
394
0.664393
4b7509b9cac7c1fc5122df0601e17502fc21dff3
390
mod pyrocord; use crate::pyrocord::client::client; use pyo3::prelude::*; #[pymodule] fn init_models(_py: Python, _m: &PyModule) -> PyResult<()> { Ok(()) } #[pymodule] fn pyrocord(py: Python, m: &PyModule) -> PyResult<()> { m.add_class::<client::Client>()?; let submod = PyModule::new(py, "models")?; init_models(py, submod)?; m.add_submodule(submod)?; Ok(()) }
18.571429
60
0.607692
e4c8ff97c1b05ce1953aab2ff7bea091ef10c5d1
17,983
// Copyright 2019 The Druid Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! Events. use crate::kurbo::{Rect, Shape, Size, Vec2}; use druid_shell::{Clipboard, KeyEvent, TimerToken}; use crate::mouse::MouseEvent; use crate::{Command, Notification, WidgetId}; /// An event, propagated downwards during event flow. /// /// With two exceptions ([`Event::Command`] and [`Event::Notification`], which /// have special considerations outlined in their own docs) each event /// corresponds to some user action or other message recieved from the platform. /// /// Events are things that happen that can change the state of widgets. /// An important category is events plumbed from the platform windowing /// system, which includes mouse and keyboard events, but also (in the /// future) status changes such as window focus changes. /// /// Events can also be higher level concepts indicating state changes /// within the widget hierarchy, for example when a widget gains or loses /// focus or "hot" (also known as hover) status. /// /// Events are a key part of what is called "event flow", which is /// basically the propagation of an event through the widget hierarchy /// through the [`event`] widget method. A container widget will /// generally pass the event to its children, mediated through the /// [`WidgetPod`] container, which is where most of the event flow logic /// is applied (especially the decision whether or not to propagate). /// /// This enum is expected to grow considerably, as there are many, many /// different kinds of events that are relevant in a GUI. /// /// [`event`]: trait.Widget.html#tymethod.event /// [`WidgetPod`]: struct.WidgetPod.html #[derive(Debug, Clone)] pub enum Event { /// Sent to all widgets in a given window when that window is first instantiated. /// /// This should always be the first `Event` received, although widgets will /// receive [`LifeCycle::WidgetAdded`] first. /// /// Widgets should handle this event if they need to do some addition setup /// when a window is first created. /// /// [`LifeCycle::WidgetAdded`]: enum.LifeCycle.html#variant.WidgetAdded WindowConnected, /// Called on the root widget when the window size changes. /// /// Discussion: it's not obvious this should be propagated to user /// widgets. It *is* propagated through the RootWidget and handled /// in the WindowPod, but after that it might be considered better /// to just handle it in `layout`. WindowSize(Size), /// Called when a mouse button is pressed. MouseDown(MouseEvent), /// Called when a mouse button is released. MouseUp(MouseEvent), /// Called when the mouse is moved. /// /// The `MouseMove` event is propagated to the active widget, if /// there is one, otherwise to hot widgets (see `HotChanged`). /// If a widget loses its hot status due to `MouseMove` then that specific /// `MouseMove` event is also still sent to that widget. /// /// The `MouseMove` event is also the primary mechanism for widgets /// to set a cursor, for example to an I-bar inside a text widget. A /// simple tactic is for the widget to unconditionally call /// [`set_cursor`] in the MouseMove handler, as `MouseMove` is only /// propagated to active or hot widgets. /// /// [`set_cursor`]: struct.EventCtx.html#method.set_cursor MouseMove(MouseEvent), /// Called when the mouse wheel or trackpad is scrolled. Wheel(MouseEvent), /// Called when a key is pressed. KeyDown(KeyEvent), /// Called when a key is released. /// /// Because of repeat, there may be a number `KeyDown` events before /// a corresponding `KeyUp` is sent. KeyUp(KeyEvent), /// Called when a paste command is received. Paste(Clipboard), /// Called when the trackpad is pinched. /// /// The value is a delta. Zoom(f64), /// Called on a timer event. /// /// Request a timer event through [`EventCtx::request_timer()`]. That will /// cause a timer event later. /// /// Note that timer events from other widgets may be delivered as well. Use /// the token returned from the `request_timer()` call to filter events more /// precisely. /// /// [`EventCtx::request_timer()`]: struct.EventCtx.html#method.request_timer Timer(TimerToken), /// Called at the beginning of a new animation frame. /// /// On the first frame when transitioning from idle to animating, `interval` /// will be 0. (This logic is presently per-window but might change to /// per-widget to make it more consistent). Otherwise it is in nanoseconds. /// /// The `paint` method will be called shortly after this event is finished. /// As a result, you should try to avoid doing anything computationally /// intensive in response to an `AnimFrame` event: it might make Druid miss /// the monitor's refresh, causing lag or jerky animation. AnimFrame(u64), /// An event containing a [`Command`] to be handled by the widget. /// /// [`Command`]s are messages, optionally with attached data, that can /// may be generated from a number of sources: /// /// - If your application uses menus (either window or context menus) /// then the [`MenuItem`]s in the menu will each correspond to a `Command`. /// When the menu item is selected, that [`Command`] will be delivered to /// the root widget of the appropriate window. /// - If you are doing work in another thread (using an [`ExtEventSink`]) /// then [`Command`]s are the mechanism by which you communicate back to /// the main thread. /// - Widgets and other Druid components can send custom [`Command`]s at /// runtime, via methods such as [`EventCtx::submit_command`]. /// /// [`Command`]: struct.Command.html /// [`Widget`]: trait.Widget.html /// [`EventCtx::submit_command`]: struct.EventCtx.html#method.submit_command /// [`ExtEventSink`]: crate::ExtEventSink /// [`MenuItem`]: crate::MenuItem Command(Command), /// A [`Notification`] from one of this widget's descendants. /// /// While handling events, widgets can submit notifications to be /// delivered to their ancestors immdiately after they return. /// /// If you handle a [`Notification`], you should call [`EventCtx::set_handled`] /// to stop the notification from being delivered to further ancestors. /// /// ## Special considerations /// /// Notifications are slightly different from other events; they originate /// inside Druid, and they are delivered as part of the handling of another /// event. In this sense, they can sort of be thought of as an augmentation /// of an event; they are a way for multiple widgets to coordinate the /// handling of an event. /// /// [`EventCtx::set_handled`]: crate::EventCtx::set_handled Notification(Notification), /// Internal druid event. /// /// This should always be passed down to descendant [`WidgetPod`]s. /// /// [`WidgetPod`]: struct.WidgetPod.html Internal(InternalEvent), } /// Internal events used by druid inside [`WidgetPod`]. /// /// These events are translated into regular [`Event`]s /// and should not be used directly. /// /// [`WidgetPod`]: struct.WidgetPod.html /// [`Event`]: enum.Event.html #[derive(Debug, Clone)] pub enum InternalEvent { /// Sent in some cases when the mouse has left the window. /// /// This is used in cases when the platform no longer sends mouse events, /// but we know that we've stopped receiving the mouse events. MouseLeave, /// A command still in the process of being dispatched. TargetedCommand(Command), /// Used for routing timer events. RouteTimer(TimerToken, WidgetId), } /// Application life cycle events. /// /// Unlike [`Event`]s, [`LifeCycle`] events are generated by Druid, and /// may occur at different times during a given pass of the event loop. The /// [`LifeCycle::WidgetAdded`] event, for instance, may occur when the app /// first launches (during the handling of [`Event::WindowConnected`]) or it /// may occur during [`update`] cycle, if some widget has been added there. /// /// Similarly the [`LifeCycle::Size`] method occurs during [`layout`], and /// [`LifeCycle::HotChanged`] can occur both during [`event`] (if the mouse /// moves over a widget) or during [`layout`], if a widget is resized and /// that moves it under the mouse. /// /// [`event`]: crate::Widget::event /// [`update`]: crate::Widget::update /// [`layout`]: crate::Widget::layout #[derive(Debug, Clone)] pub enum LifeCycle { /// Sent to a `Widget` when it is added to the widget tree. This should be /// the first message that each widget receives. /// /// Widgets should handle this event in order to do any initial setup. /// /// In addition to setup, this event is also used by the framework to /// track certain types of important widget state. /// /// ## Registering children /// /// Container widgets (widgets which use [`WidgetPod`] to manage children) /// must ensure that this event is forwarded to those children. The [`WidgetPod`] /// itself will handle registering those children with the system; this is /// required for things like correct routing of events. /// /// ## Participating in focus /// /// Widgets which wish to participate in automatic focus (using tab to change /// focus) must handle this event and call [`LifeCycleCtx::register_for_focus`]. /// /// [`LifeCycleCtx::register_child`]: struct.LifeCycleCtx.html#method.register_child /// [`WidgetPod`]: struct.WidgetPod.html /// [`LifeCycleCtx::register_for_focus`]: struct.LifeCycleCtx.html#method.register_for_focus WidgetAdded, /// Called when the [`Size`] of the widget changes. /// /// This will be called after [`Widget::layout`], if the [`Size`] returned /// by the widget differs from its previous size. /// /// [`Size`]: struct.Size.html /// [`Widget::layout`]: trait.Widget.html#tymethod.layout Size(Size), /// Called when the "hot" status changes. /// /// This will always be called _before_ the event that triggered it; that is, /// when the mouse moves over a widget, that widget will receive /// `LifeCycle::HotChanged` before it receives `Event::MouseMove`. /// /// See [`is_hot`](struct.EventCtx.html#method.is_hot) for /// discussion about the hot status. HotChanged(bool), /// Called when the focus status changes. /// /// This will always be called immediately after a new widget gains focus. /// The newly focused widget will receive this with `true` and the widget /// that lost focus will receive this with `false`. /// /// See [`EventCtx::is_focused`] for more information about focus. /// /// [`EventCtx::is_focused`]: struct.EventCtx.html#method.is_focused FocusChanged(bool), /// Internal druid lifecycle event. /// /// This should always be passed down to descendant [`WidgetPod`]s. /// /// [`WidgetPod`]: struct.WidgetPod.html Internal(InternalLifeCycle), } /// Internal lifecycle events used by druid inside [`WidgetPod`]. /// /// These events are translated into regular [`LifeCycle`] events /// and should not be used directly. /// /// [`WidgetPod`]: struct.WidgetPod.html /// [`LifeCycle`]: enum.LifeCycle.html #[derive(Debug, Clone)] pub enum InternalLifeCycle { /// Used to route the `WidgetAdded` event to the required widgets. RouteWidgetAdded, /// Used to route the `FocusChanged` event. RouteFocusChanged { /// the widget that is losing focus, if any old: Option<WidgetId>, /// the widget that is gaining focus, if any new: Option<WidgetId>, }, /// Testing only: request the `WidgetState` of a specific widget. /// /// During testing, you may wish to verify that the state of a widget /// somewhere in the tree is as expected. In that case you can dispatch /// this event, specifying the widget in question, and that widget will /// set its state in the provided `Cell`, if it exists. #[cfg(test)] DebugRequestState { widget: WidgetId, state_cell: StateCell, }, #[cfg(test)] DebugInspectState(StateCheckFn), } impl Event { /// Transform the event for the contents of a scrolling container. /// /// the `force` flag is used to ensure an event is delivered even /// if the cursor is out of the viewport, such as if the contents are active /// or hot. pub fn transform_scroll(&self, offset: Vec2, viewport: Rect, force: bool) -> Option<Event> { match self { Event::MouseDown(mouse_event) => { if force || viewport.winding(mouse_event.pos) != 0 { let mut mouse_event = mouse_event.clone(); mouse_event.pos += offset; Some(Event::MouseDown(mouse_event)) } else { None } } Event::MouseUp(mouse_event) => { if force || viewport.winding(mouse_event.pos) != 0 { let mut mouse_event = mouse_event.clone(); mouse_event.pos += offset; Some(Event::MouseUp(mouse_event)) } else { None } } Event::MouseMove(mouse_event) => { if force || viewport.winding(mouse_event.pos) != 0 { let mut mouse_event = mouse_event.clone(); mouse_event.pos += offset; Some(Event::MouseMove(mouse_event)) } else { None } } Event::Wheel(mouse_event) => { if force || viewport.winding(mouse_event.pos) != 0 { let mut mouse_event = mouse_event.clone(); mouse_event.pos += offset; Some(Event::Wheel(mouse_event)) } else { None } } _ => Some(self.clone()), } } /// Whether this event should be sent to widgets which are currently not visible /// (for example the hidden tabs in a tabs widget). pub fn should_propagate_to_hidden(&self) -> bool { match self { Event::WindowConnected | Event::WindowSize(_) | Event::Timer(_) | Event::AnimFrame(_) | Event::Command(_) | Event::Notification(_) | Event::Internal(_) => true, Event::MouseDown(_) | Event::MouseUp(_) | Event::MouseMove(_) | Event::Wheel(_) | Event::KeyDown(_) | Event::KeyUp(_) | Event::Paste(_) | Event::Zoom(_) => false, } } } impl LifeCycle { /// Whether this event should be sent to widgets which are currently not visible /// (for example the hidden tabs in a tabs widget). pub fn should_propagate_to_hidden(&self) -> bool { match self { LifeCycle::WidgetAdded | LifeCycle::Internal(_) => true, LifeCycle::Size(_) | LifeCycle::HotChanged(_) | LifeCycle::FocusChanged(_) => false, } } } #[cfg(test)] pub(crate) use state_cell::{StateCell, StateCheckFn}; #[cfg(test)] mod state_cell { use crate::core::WidgetState; use crate::WidgetId; use std::{cell::RefCell, rc::Rc}; /// An interior-mutable struct for fetching BasteState. #[derive(Clone, Default)] pub struct StateCell(Rc<RefCell<Option<WidgetState>>>); #[derive(Clone)] pub struct StateCheckFn(Rc<dyn Fn(&WidgetState)>); /// a hacky way of printing the widget id if we panic struct WidgetDrop(bool, WidgetId); impl Drop for WidgetDrop { fn drop(&mut self) { if self.0 { eprintln!("panic in {:?}", self.1); } } } impl StateCell { /// Set the state. This will panic if it is called twice. pub(crate) fn set(&self, state: WidgetState) { assert!( self.0.borrow_mut().replace(state).is_none(), "StateCell already set" ) } #[allow(dead_code)] pub(crate) fn take(&self) -> Option<WidgetState> { self.0.borrow_mut().take() } } impl StateCheckFn { #[cfg(not(target_arch = "wasm32"))] pub(crate) fn new(f: impl Fn(&WidgetState) + 'static) -> Self { StateCheckFn(Rc::new(f)) } pub(crate) fn call(&self, state: &WidgetState) { let mut panic_reporter = WidgetDrop(true, state.id); (self.0)(&state); panic_reporter.0 = false; } } impl std::fmt::Debug for StateCell { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { let inner = if self.0.borrow().is_some() { "Some" } else { "None" }; write!(f, "StateCell({})", inner) } } impl std::fmt::Debug for StateCheckFn { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { write!(f, "StateCheckFn") } } }
39.350109
96
0.626203
080fee4d47f807e19d7f8304f3f1cfba8874c5f8
6,874
use crate::{from_lsp, state::LanguageServerSnapshot, to_lsp, FilePosition}; use lsp_types::{CompletionContext, CompletionItem, DocumentSymbol}; use mun_syntax::{AstNode, TextSize}; /// Computes the document symbols for a specific document. Converts the LSP types to internal /// formats and calls [`LanguageServerSnapshot::file_structure`] to fetch the symbols in the /// requested document. Once completed, returns the result converted back to LSP types. pub(crate) fn handle_document_symbol( snapshot: LanguageServerSnapshot, params: lsp_types::DocumentSymbolParams, ) -> anyhow::Result<Option<lsp_types::DocumentSymbolResponse>> { let file_id = from_lsp::file_id(&snapshot, &params.text_document.uri)?; let line_index = snapshot.analysis.file_line_index(file_id)?; let mut parents: Vec<(DocumentSymbol, Option<usize>)> = Vec::new(); for symbol in snapshot.analysis.file_structure(file_id)? { #[allow(deprecated)] let doc_symbol = DocumentSymbol { name: symbol.label, detail: symbol.detail, kind: to_lsp::symbol_kind(symbol.kind), tags: None, deprecated: None, range: to_lsp::range(symbol.node_range, &line_index), selection_range: to_lsp::range(symbol.navigation_range, &line_index), children: None, }; parents.push((doc_symbol, symbol.parent)); } Ok(Some(build_hierarchy_from_flat_list(parents).into())) } /// Computes completion items that should be presented to the user when the cursor is at a specific /// location. pub(crate) fn handle_completion( snapshot: LanguageServerSnapshot, params: lsp_types::CompletionParams, ) -> anyhow::Result<Option<lsp_types::CompletionResponse>> { let position = from_lsp::file_position(&snapshot, params.text_document_position)?; // If the completion was triggered after a single colon there is nothing to do. We only want // completion after a *double* colon (::) or after a dot (.). if is_position_at_single_colon(&snapshot, position, params.context)? { return Ok(None); } // Get all completions from the analysis database let items = match snapshot.analysis.completions(position)? { None => return Ok(None), Some(items) => items, }; // Convert all the items to the LSP protocol type let items: Vec<CompletionItem> = items.into_iter().map(to_lsp::completion_item).collect(); return Ok(Some(items.into())); /// Helper function to check if the given position is preceded by a single colon. fn is_position_at_single_colon( snapshot: &LanguageServerSnapshot, position: FilePosition, context: Option<CompletionContext>, ) -> anyhow::Result<bool> { if let Some(ctx) = context { if ctx.trigger_character.unwrap_or_default() == ":" { let source_file = snapshot.analysis.parse(position.file_id)?; let syntax = source_file.syntax(); let text = syntax.text(); if let Some(next_char) = text.char_at(position.offset) { let diff = TextSize::of(next_char) + TextSize::of(':'); let prev_char = position.offset - diff; if text.char_at(prev_char) != Some(':') { return Ok(true); } } } } Ok(false) } } /// Constructs a hierarchy of DocumentSymbols for a list of symbols that specify which index is the /// parent of a symbol. The parent index must always be smaller than the current index. fn build_hierarchy_from_flat_list( mut symbols_and_parent: Vec<(DocumentSymbol, Option<usize>)>, ) -> Vec<DocumentSymbol> { let mut result = Vec::new(); // Iterate over all elements in the list from back to front. while let Some((mut node, parent_index)) = symbols_and_parent.pop() { // If this node has children (added by the code below), they are in the reverse order. This // is because we iterate the input from back to front. if let Some(children) = &mut node.children { children.reverse(); } // Get the parent index of the current node. let parent = match parent_index { // If the parent doesnt have a node, directly use the result vector (its a root). None => &mut result, // If there is a parent, get a reference to the children vector of that parent. Some(i) => symbols_and_parent[i] .0 .children .get_or_insert_with(Vec::new), }; parent.push(node); } // The items where pushed in the reverse order, so reverse it right back result.reverse(); result } #[cfg(test)] mod tests { use crate::handlers::build_hierarchy_from_flat_list; use lsp_types::{DocumentSymbol, SymbolKind}; #[test] fn test_build_hierarchy_from_flat_list() { #[allow(deprecated)] let default_symbol = DocumentSymbol { name: "".to_string(), detail: None, kind: SymbolKind::File, tags: None, deprecated: None, range: Default::default(), selection_range: Default::default(), children: None, }; let mut list = Vec::new(); list.push(( DocumentSymbol { name: "a".to_string(), ..default_symbol.clone() }, None, )); list.push(( DocumentSymbol { name: "b".to_string(), ..default_symbol.clone() }, Some(0), )); list.push(( DocumentSymbol { name: "c".to_string(), ..default_symbol.clone() }, Some(0), )); list.push(( DocumentSymbol { name: "d".to_string(), ..default_symbol.clone() }, Some(1), )); assert_eq!( build_hierarchy_from_flat_list(list), vec![DocumentSymbol { name: "a".to_string(), children: Some(vec![ DocumentSymbol { name: "b".to_string(), children: Some(vec![DocumentSymbol { name: "d".to_string(), ..default_symbol.clone() }]), ..default_symbol.clone() }, DocumentSymbol { name: "c".to_string(), ..default_symbol.clone() } ]), ..default_symbol.clone() }] ) } }
35.071429
99
0.570119
eb4aea5eaacf0c1cd53d72bd920c103bbde19903
15,813
use clap::{App, Arg, ArgGroup, ArgMatches}; use cluster_test::{ aws::Aws, cluster::Cluster, deployment::{DeploymentManager, SOURCE_TAG, TESTED_TAG}, effects::{Effect, Reboot}, experiments::{Experiment, RebootRandomValidators}, health::{DebugPortLogThread, HealthCheckRunner, LogTail}, log_prune::LogPruner, slack::SlackClient, suite::ExperimentSuite, }; use failure::{ self, prelude::{bail, format_err}, }; use std::{ collections::HashSet, env, sync::mpsc::{self, TryRecvError}, thread, time::{Duration, Instant}, }; use termion::{color, style}; const HEALTH_POLL_INTERVAL: Duration = Duration::from_secs(5); pub fn main() { let matches = arg_matches(); if matches.is_present(ARG_PRUNE) { let util = ClusterUtil::setup(&matches); util.prune_logs(); return; } let mut runner = ClusterTestRunner::setup(&matches); if matches.is_present(ARG_RUN) { runner.run_suite_in_loop(); } else if matches.is_present(ARG_RUN_ONCE) { let experiment = RebootRandomValidators::new(3, &runner.cluster); runner.run_single_experiment(Box::new(experiment)).unwrap(); } else if matches.is_present(ARG_TAIL_LOGS) { runner.tail_logs(); } else if matches.is_present(ARG_HEALTH_CHECK) { runner.run_health_check(); } else if matches.is_present(ARG_WIPE_ALL_DB) { runner.wipe_all_db(); } else if matches.is_present(ARG_REBOOT) { runner.reboot(matches.values_of_lossy(ARG_REBOOT).unwrap()); } } struct ClusterUtil { cluster: Cluster, aws: Aws, } struct ClusterTestRunner { logs: LogTail, cluster: Cluster, health_check_runner: HealthCheckRunner, deployment_manager: DeploymentManager, experiment_interval: Duration, slack: Option<SlackClient>, } impl ClusterUtil { pub fn setup(matches: &ArgMatches) -> Self { let workplace = matches .value_of(ARG_WORKPLACE) .expect("workplace should be set"); let aws = Aws::new(workplace.into()); let peers = matches.values_of_lossy(ARG_PEERS); let cluster = Cluster::discover(&aws).expect("Failed to discover cluster"); let cluster = match peers { None => cluster, Some(peers) => cluster.sub_cluster(peers), }; println!("Discovered {} peers", cluster.instances().len()); Self { cluster, aws } } pub fn prune_logs(&self) { let log_prune = LogPruner::new(self.aws.clone()); log_prune.prune_logs(); } } impl ClusterTestRunner { /// Discovers cluster, setup log, etc pub fn setup(matches: &ArgMatches) -> Self { let util = ClusterUtil::setup(matches); let cluster = util.cluster; let aws = util.aws; let log_tail_started = Instant::now(); let logs = DebugPortLogThread::spawn_new(&cluster); let log_tail_startup_time = Instant::now() - log_tail_started; println!( "Log tail thread started in {} ms", log_tail_startup_time.as_millis() ); let health_check_runner = HealthCheckRunner::new_all(cluster.clone()); let experiment_interval_sec = match env::var("EXPERIMENT_INTERVAL") { Ok(s) => s.parse().expect("EXPERIMENT_INTERVAL env is not a number"), Err(..) => 15, }; let experiment_interval = Duration::from_secs(experiment_interval_sec); let deployment_manager = DeploymentManager::new(aws.clone(), cluster.clone()); let slack = SlackClient::try_new_from_environment(); Self { logs, cluster, health_check_runner, deployment_manager, experiment_interval, slack, } } pub fn run_suite_in_loop(&mut self) { let mut hash_to_tag = None; loop { if let Some(hash) = self.deployment_manager.latest_hash_changed() { println!( "New version of `{}` tag is available: `{}`", SOURCE_TAG, hash ); match self.redeploy(hash.clone()) { Err(e) => { self.report_failure(format!("Failed to deploy `{}`: {}", hash, e)); return; } Ok(true) => { self.slack_message(format!( "Deployed new version `{}`, running test suite", hash )); hash_to_tag = Some(hash); } Ok(false) => {} } } let suite = ExperimentSuite::new_pre_release(&self.cluster); if let Err(e) = self.run_suite(suite) { self.report_failure(format!("{}", e)); return; } if let Some(hash_to_tag) = hash_to_tag.take() { println!("Test suite succeed first time for `{}`", hash_to_tag); if let Err(e) = self .deployment_manager .tag_tested_image(hash_to_tag.clone()) { self.report_failure(format!("Failed to tag tested image: {}", e)); return; } self.slack_message(format!( "Test suite passed. Tagged `{}` as `{}`", hash_to_tag, TESTED_TAG )); } thread::sleep(self.experiment_interval); } } fn report_failure(&self, msg: String) { self.slack_message(msg); } fn redeploy(&mut self, hash: String) -> failure::Result<bool> { if !self.deployment_manager.redeploy(hash)? { return Ok(false); } println!("Waiting for 60 seconds to allow ECS to restart tasks..."); thread::sleep(Duration::from_secs(60)); println!("Waiting until all validators healthy after deployment"); self.wait_until_all_healthy()?; Ok(true) } fn run_suite(&mut self, suite: ExperimentSuite) -> failure::Result<()> { println!("Starting suite"); let suite_started = Instant::now(); for experiment in suite.experiments { let experiment_name = format!("{}", experiment); self.run_single_experiment(experiment).map_err(move |e| { format_err!("Experiment `{}` failed: `{}`", experiment_name, e) })?; thread::sleep(self.experiment_interval); } println!( "Suite completed in {:?}", Instant::now().duration_since(suite_started) ); Ok(()) } pub fn run_single_experiment( &mut self, experiment: Box<dyn Experiment>, ) -> failure::Result<()> { let events = self.logs.recv_all(); if !self.health_check_runner.run(&events).is_empty() { bail!("Some validators are unhealthy before experiment started"); } println!( "{}Starting experiment {}{}{}{}", style::Bold, color::Fg(color::Blue), experiment, color::Fg(color::Reset), style::Reset ); let affected_validators = experiment.affected_validators(); let (exp_result_sender, exp_result_recv) = mpsc::channel(); thread::spawn(move || { let result = experiment.run(); exp_result_sender .send(result) .expect("Failed to send experiment result"); }); // We expect experiments completes and cluster go into healthy state within timeout let experiment_deadline = Instant::now() + Duration::from_secs(10 * 60); loop { if Instant::now() > experiment_deadline { bail!("Experiment did not complete in time"); } let deadline = Instant::now() + HEALTH_POLL_INTERVAL; // Receive all events that arrived to aws log tail within next 1 second // This assumes so far that event propagation time is << 1s, this need to be refined // in future to account for actual event propagation delay let events = self.logs.recv_all_until_deadline(deadline); let failed_validators = self.health_check_runner.run(&events); for failed in failed_validators { if !affected_validators.contains(&failed) { bail!( "Validator {} failed, not expected for this experiment", failed ); } } match exp_result_recv.try_recv() { Ok(result) => { result.expect("Failed to run experiment"); break; } Err(TryRecvError::Empty) => { // Experiment in progress, continue monitoring health } Err(TryRecvError::Disconnected) => { panic!("Experiment thread exited without returning result"); } } } println!( "{}Experiment finished, waiting until all affected validators recover{}", style::Bold, style::Reset ); for validator in affected_validators.iter() { self.health_check_runner.invalidate(validator); } loop { if Instant::now() > experiment_deadline { bail!("Cluster did not become healthy in time"); } let deadline = Instant::now() + HEALTH_POLL_INTERVAL; // Receive all events that arrived to aws log tail within next 1 second // This assumes so far that event propagation time is << 1s, this need to be refined // in future to account for actual event propagation delay let events = self.logs.recv_all_until_deadline(deadline); let failed_validators = self.health_check_runner.run(&events); let mut still_affected_validator = HashSet::new(); for failed in failed_validators { if !affected_validators.contains(&failed) { bail!( "Validator {} failed, not expected for this experiment", failed ); } still_affected_validator.insert(failed); } if still_affected_validator.is_empty() { break; } } println!("Experiment completed"); Ok(()) } fn run_health_check(&mut self) { loop { let deadline = Instant::now() + Duration::from_secs(1); // Receive all events that arrived to aws log tail within next 1 second // This assumes so far that event propagation time is << 1s, this need to be refined // in future to account for actual event propagation delay let events = self.logs.recv_all_until_deadline(deadline); self.health_check_runner.run(&events); } } fn wait_until_all_healthy(&mut self) -> failure::Result<()> { let wait_deadline = Instant::now() + Duration::from_secs(10 * 60); for instance in self.cluster.instances() { self.health_check_runner.invalidate(instance.short_hash()); } loop { let now = Instant::now(); if now > wait_deadline { bail!("Validators did not become healthy after deployment"); } let deadline = now + HEALTH_POLL_INTERVAL; let events = self.logs.recv_all_until_deadline(deadline); if self.health_check_runner.run(&events).is_empty() { break; } } Ok(()) } fn tail_logs(self) { for log in self.logs.event_receiver { println!("{:?}", log); } } fn slack_message(&self, msg: String) { println!("{}", msg); if let Some(ref slack) = self.slack { if let Err(e) = slack.send_message(&msg) { println!("Failed to send slack message: {}", e); } } } fn wipe_all_db(self) { println!("Going to wipe db on all validators in cluster!"); println!("Waiting 10 seconds before proceed"); thread::sleep(Duration::from_secs(10)); println!("Starting..."); for instance in self.cluster.instances() { if let Err(e) = instance.run_cmd_tee_err(vec!["sudo", "rm", "-rf", "/data/libra/"]) { println!("Failed to wipe {}: {:?}", instance, e); } } println!("Done"); } fn reboot(self, validators: Vec<String>) { let mut reboots = vec![]; for validator in validators { match self.cluster.get_instance(&validator) { None => println!("{} not found", validator), Some(instance) => { println!("Rebooting {}", validator); let reboot = Reboot::new(instance.clone()); if let Err(err) = reboot.apply() { println!("Failed to reboot {}: {:?}", validator, err); } else { reboots.push(reboot); } } } } println!("Waiting to complete"); while reboots.iter().any(|r| !r.is_complete()) { thread::sleep(Duration::from_secs(5)); } println!("Completed"); } } const ARG_WORKPLACE: &str = "workplace"; const ARG_PEERS: &str = "peers"; // Actions: const ARG_TAIL_LOGS: &str = "tail-logs"; const ARG_HEALTH_CHECK: &str = "health-check"; const ARG_RUN: &str = "run"; const ARG_RUN_ONCE: &str = "run-once"; const ARG_WIPE_ALL_DB: &str = "wipe-all-db"; const ARG_REBOOT: &str = "reboot"; const ARG_PRUNE: &str = "prune_logs"; fn arg_matches() -> ArgMatches<'static> { // Parameters let workplace = Arg::with_name(ARG_WORKPLACE) .long("--workplace") .short("-w") .takes_value(true) .required(true); let peers = Arg::with_name(ARG_PEERS) .long("--peers") .short("-p") .takes_value(true) .use_delimiter(true) .conflicts_with(ARG_PRUNE); // Actions let wipe_all_db = Arg::with_name(ARG_WIPE_ALL_DB).long("--wipe-all-db"); let run = Arg::with_name(ARG_RUN).long("--run"); let run_once = Arg::with_name(ARG_RUN_ONCE).long("--run-once"); let tail_logs = Arg::with_name(ARG_TAIL_LOGS).long("--tail-logs"); let health_check = Arg::with_name(ARG_HEALTH_CHECK).long("--health-check"); let prune_logs = Arg::with_name(ARG_PRUNE).long("--prune-logs"); let reboot = Arg::with_name(ARG_REBOOT) .long("--reboot") .takes_value(true) .use_delimiter(true); // This grouping requires one and only one action (tail logs, run test, etc) let action_group = ArgGroup::with_name("action") .args(&[ ARG_TAIL_LOGS, ARG_RUN, ARG_RUN_ONCE, ARG_HEALTH_CHECK, ARG_WIPE_ALL_DB, ARG_REBOOT, ARG_PRUNE, ]) .required(true); App::new("cluster_test") .author("Libra Association <opensource@libra.org>") .group(action_group) .args(&[ // parameters workplace, peers, // actions run, run_once, tail_logs, health_check, wipe_all_db, reboot, prune_logs, ]) .get_matches() }
35.375839
97
0.546386
3ae9c668a829f8bb07c14280e5990bcef3ba1663
999
use language::operations::{make_param_doc, Operation, ParamInfo}; pub struct ReplaceScenePropsOp; const DOC : &str = "Replaces all instances of specified scene prop type with another scene prop type. Commonly used to replace damaged walls with their intact versions during normal visits to castle scenes. Can only be called in ti_before_mission_start trigger in module_mission_templates.py."; pub const OP_CODE: u32 = 1890; pub const IDENT: &str = "replace_scene_props"; impl Operation for ReplaceScenePropsOp { fn op_code(&self) -> u32 { OP_CODE } fn documentation(&self) -> &'static str { DOC } fn identifier(&self) -> &'static str { IDENT } fn param_info(&self) -> ParamInfo { ParamInfo { num_required: 2, num_optional: 0, param_docs: vec![ make_param_doc("<old_scene_prop_id>", ""), make_param_doc("<new_scene_prop_id>", ""), ], } } }
28.542857
294
0.634635
dd45b6ed278e8cca74fb7c03f08b0e89a08e1868
941
// Tests that array sizes that depend on const-params are checked using `ConstEvaluatable`. // revisions: full min #![cfg_attr(full, feature(const_generics))] #![cfg_attr(full, allow(incomplete_features))] #![cfg_attr(min, feature(min_const_generics))] #[allow(dead_code)] struct ArithArrayLen<const N: usize>([u32; 0 + N]); //[full]~^ ERROR constant expression depends on a generic parameter //[min]~^^ ERROR generic parameters must not be used inside of non-trivial constant values #[derive(PartialEq, Eq)] struct Config { arr_size: usize, } struct B<const CFG: Config> { //[min]~^ ERROR `Config` is forbidden arr: [u8; CFG.arr_size], //[full]~^ ERROR constant expression depends on a generic parameter //[min]~^^ ERROR generic parameters must not be used inside of non-trivial } const C: Config = Config { arr_size: 5 }; fn main() { let b = B::<C> { arr: [1, 2, 3, 4, 5] }; assert_eq!(b.arr.len(), 5); }
30.354839
91
0.683316
dd3a3dcf9e7362e7894168797174860ff573d3fc
5,290
use crate::utils::{ConfigurationResult, ServerError}; use clingo::{parse_term, Part, Symbol, TruthValue}; use serde_json::Value; pub fn json_to_configuration_result(val: &Value) -> Result<ConfigurationResult, ServerError> { match val { Value::String(s) => Ok(ConfigurationResult::Value(s.clone())), Value::Null => Err(ServerError::InternalError( "Could not parse configuration data".to_string(), )), Value::Bool(_) => Err(ServerError::InternalError( "Could not parse configuration data".to_string(), )), Value::Number(_) => Err(ServerError::InternalError( "Could not parse configuration data".to_string(), )), Value::Array(a) => { let mut arr = Vec::with_capacity(a.len()); for val in a { let x = json_to_configuration_result(val)?; arr.push(x) } Ok(ConfigurationResult::Array(arr)) } Value::Object(m) => { let mut arr = Vec::with_capacity(m.len()); for (e, val) in m { let x = json_to_configuration_result(val)?; arr.push((e.clone(), x)) } Ok(ConfigurationResult::Map(arr)) } } } pub fn json_to_symbol(val: &Value) -> Result<Symbol, ServerError> { match val { Value::String(s) => { let sym = clingo::parse_term(s)?; Ok(sym) } _ => Err(ServerError::InternalError( "Could not parse symbol data".to_string(), )), } } fn json_to_symbol_array(val: &Value) -> Result<Vec<Symbol>, ServerError> { match val { Value::Array(a) => { let mut arr = Vec::with_capacity(a.len()); for val in a { let x = json_to_symbol(val)?; arr.push(x) } Ok(arr) } _ => Err(ServerError::InternalError( "Could not parse parts data".to_string(), )), } } pub fn json_to_parts(val: &Value) -> Result<Vec<Part>, ServerError> { match val { Value::Object(m) => { let mut parts = Vec::with_capacity(m.len()); for (e, val) in m { let x = json_to_symbol_array(val)?; let part = Part::new(e, x)?; parts.push(part) } Ok(parts) } _ => Err(ServerError::InternalError( "Could not parse parts data".to_string(), )), } } pub fn json_to_assignment(val: &Value) -> Result<(Symbol, TruthValue), ServerError> { let parse_error = || ServerError::InternalError("Could not parse assignment data".to_string()); match val { Value::Object(m) => { let val = m.get("literal").ok_or_else(parse_error)?; let symbol = match val { Value::String(e) => parse_term(e)?, _ => { return Err(ServerError::InternalError( "Could not parse assignment data".to_string(), )) } }; let val = m.get("truth_value").ok_or_else(parse_error)?; let truth_value = match val { Value::String(e) => match e.as_str() { "True" => Ok(TruthValue::True), "False" => Ok(TruthValue::False), "Free" => Ok(TruthValue::Free), _ => Err(parse_error()), }, _ => Err(parse_error()), }?; Ok((symbol, truth_value)) } _ => Err(parse_error()), } } pub fn json_to_assumptions(val: &Value) -> Result<Vec<(clingo::Symbol, bool)>, ServerError> { match val { Value::Array(a) => { let mut arr = Vec::with_capacity(a.len()); for val in a { let val = match val { Value::Array(a) => { let name = match a.get(0) { Some(Value::String(s)) => s, _ => { return Err(ServerError::InternalError( "Could not parse assumptions data".to_string(), )) } }; let sym = clingo::parse_term(&name)?; let sign = match a.get(1) { Some(Value::Bool(b)) => *b, _ => { return Err(ServerError::InternalError( "Could not parse assumptions data".to_string(), )) } }; (sym, sign) } _ => { return Err(ServerError::InternalError( "Could not parse assumptions data".to_string(), )) } }; arr.push(val) } Ok(arr) } _ => Err(ServerError::InternalError( "Could not parse assumptions data".to_string(), )), } }
35.266667
99
0.44707
7a834657f0f836a2a1691a578651d058d56ad604
7,249
use std::{ fs::OpenOptions, io::{stdin, stdout, BufWriter, Read, Write}, path::Path, }; use clap::{arg_enum, App, AppSettings, Arg}; #[cfg(not(feature = "wasm"))] use grass::{from_path, from_string, Options, OutputStyle}; // TODO remove this arg_enum! { #[derive(PartialEq, Debug)] pub enum Style { Expanded, Compressed, } } arg_enum! { #[derive(PartialEq, Debug)] pub enum SourceMapUrls { Relative, Absolute, } } #[cfg(feature = "wasm")] fn main() {} #[cfg(not(feature = "wasm"))] #[cfg_attr(feature = "profiling", inline(never))] fn main() -> std::io::Result<()> { let matches = App::new("grass") .setting(AppSettings::ColoredHelp) .version(env!("CARGO_PKG_VERSION")) .about("A near-feature-complete Sass compiler written purely in Rust") .version_short("v") .arg( Arg::with_name("STDIN") .long("stdin") .help("Read the stylesheet from stdin"), ) .arg( Arg::with_name("INDENTED") .long("indented") .hidden(true) .help("Use the indented syntax for input from stdin"), ) .arg( Arg::with_name("LOAD_PATH") .short("I") .long("load-path") .help("A path to use when resolving imports. May be passed multiple times.") .multiple(true) .takes_value(true) .number_of_values(1) ) .arg( Arg::with_name("STYLE") // this is required for compatibility with ruby sass .short("t") // FIXME change this to short_alias later .short("s") .long("style") .help("Minified or expanded output") .default_value("expanded") .case_insensitive(true) .possible_values(&Style::variants()) .takes_value(true), ) .arg( Arg::with_name("NO_CHARSET") .long("no-charset") .help("Don't emit a @charset or BOM for CSS with non-ASCII characters."), ) .arg( Arg::with_name("UPDATE") .long("update") .hidden(true) .help("Only compile out-of-date stylesheets."), ) .arg( Arg::with_name("NO_ERROR_CSS") .long("no-error-css") .hidden(true) .help("When an error occurs, don't emit a stylesheet describing it."), ) // Source maps .arg( Arg::with_name("NO_SOURCE_MAP") .long("no-source-map") .hidden(true) .help("Whether to generate source maps."), ) .arg( Arg::with_name("SOURCE_MAP_URLS") .long("source-map-urls") .hidden(true) .help("How to link from source maps to source files.") .default_value("relative") .case_insensitive(true) .possible_values(&SourceMapUrls::variants()) .takes_value(true), ) .arg( Arg::with_name("EMBED_SOURCES") .long("embed-sources") .hidden(true) .help("Embed source file contents in source maps."), ) .arg( Arg::with_name("EMBED_SOURCE_MAP") .long("embed-source-map") .hidden(true) .help("Embed source map contents in CSS."), ) // Other .arg( Arg::with_name("WATCH") .long("watch") .hidden(true) .help("Watch stylesheets and recompile when they change."), ) .arg( Arg::with_name("POLL") .long("poll") .hidden(true) .help("Manually check for changes rather than using a native watcher. Only valid with --watch.") .requires("WATCH"), ) .arg( Arg::with_name("NO_STOP_ON_ERROR") .long("no-stop-on-error") .hidden(true) .help("Continue to compile more files after error is encountered.") ) .arg( Arg::with_name("INTERACTIVE") .short("i") .long("interactive") .hidden(true) .help("Run an interactive SassScript shell.") ) .arg( Arg::with_name("NO_COLOR") .short("c") .long("no-color") .hidden(true) .help("Whether to use terminal colors for messages.") ) .arg( Arg::with_name("NO_UNICODE") .long("no-unicode") .help("Whether to use Unicode characters for messages.") ) .arg( Arg::with_name("QUIET") .short("q") .long("quiet") .help("Don't print warnings."), ) .arg( Arg::with_name("INPUT") .required_unless("STDIN") .help("SCSS files"), ) .arg( Arg::with_name("OUTPUT") .help("Output SCSS file") ) // Hidden, legacy arguments .arg( Arg::with_name("PRECISION") .long("precision") .hidden(true) .takes_value(true) ) .get_matches(); let load_paths = matches .values_of("LOAD_PATH") .map_or_else(Vec::new, |vals| vals.map(Path::new).collect()); let style = match matches.value_of("STYLE").unwrap() { "expanded" => OutputStyle::Expanded, "compressed" => OutputStyle::Compressed, _ => unreachable!(), }; let options = &Options::default() .load_paths(&load_paths) .style(style) .quiet(matches.is_present("QUIET")) .unicode_error_messages(!matches.is_present("NO_UNICODE")) .allows_charset(!matches.is_present("NO_CHARSET")); let (mut stdout_write, mut file_write); let buf_out: &mut dyn Write = if let Some(path) = matches.value_of("OUTPUT") { file_write = BufWriter::new( OpenOptions::new() .create(true) .write(true) .truncate(true) .open(path)?, ); &mut file_write } else { stdout_write = BufWriter::new(stdout()); &mut stdout_write }; buf_out.write_all( if let Some(name) = matches.value_of("INPUT") { from_path(name, options) } else if matches.is_present("STDIN") { from_string( { let mut buffer = String::new(); stdin().read_to_string(&mut buffer)?; buffer }, options, ) } else { unreachable!() } .unwrap_or_else(|e| { eprintln!("{}", e); std::process::exit(1) }) .as_bytes(), )?; Ok(()) }
30.586498
112
0.472755
1e04c851827252f6ce0f3d27a9ed31e01cc3092a
2,310
use boostvoronoi::builder as VB; use boostvoronoi::BvError; type I = i32; type F = f64; fn almost_equal(x1: F, x2: F, y1: F, y2: F) -> bool { let delta = 0.0001; assert!(F::abs(x1 - x2) < delta, "{} != {}", x1, x2); assert!(F::abs(y1 - y2) < delta, "{} != {}", y1, y2); (F::abs(x1 - x2) < delta) && (F::abs(y1 - y2) < delta) } //#[ignore] #[test] /// four segments in a loop + one more fn two_segments_9() -> Result<(), BvError> { let output = { let points: [[I; 2]; 0] = []; let segments: [[I; 4]; 5] = [ [200, 200, 200, 400], [200, 400, 400, 400], [400, 400, 400, 200], [400, 200, 200, 200], [529, 242, 367, 107], ]; //let s = segments.iter().map(|x|x.into()).collect(); let _v = VB::to_points::<I, I>(&points); let _s = VB::to_segments::<I, I>(&segments); let mut vb = VB::Builder::<I, F>::default(); vb.with_vertices(_v.iter()).expect("two_segments_9"); vb.with_segments(_s.iter()).expect("two_segments_9"); vb.build().expect("two_segments_9") }; let v = output.vertices()[0].get(); assert!(almost_equal(v.x(), 200.0000000, v.y(), 200.0000000)); let v = output.vertices()[1].get(); assert!(almost_equal(v.x(), 200.0000000, v.y(), 400.0000000)); let v = output.vertices()[2].get(); assert!(almost_equal(v.x(), 333.3293560, v.y(), 147.4047728)); let v = output.vertices()[3].get(); assert!(almost_equal(v.x(), 200.0000000, v.y(), 3.5591398)); let v = output.vertices()[4].get(); assert!(almost_equal(v.x(), 400.0000000, v.y(), 200.0000000)); let v = output.vertices()[5].get(); assert!(almost_equal(v.x(), 300.0000000, v.y(), 300.0000000)); let v = output.vertices()[6].get(); assert!(almost_equal(v.x(), 400.0000000, v.y(), 400.0000000)); let v = output.vertices()[7].get(); assert!(almost_equal(v.x(), 400.0000000, v.y(), 171.5428751)); let v = output.vertices()[8].get(); assert!(almost_equal(v.x(), 430.6785590, v.y(), 200.0000000)); let v = output.vertices()[9].get(); assert!(almost_equal(v.x(), 478.6496933, v.y(), 302.4203680)); let v = output.vertices()[10].get(); assert!(almost_equal(v.x(), 561.2596899, v.y(), 400.0000000)); Ok(()) }
37.258065
66
0.546753
f7c27cfb275f411d71ae7effbf1649848a3c809d
725
//! //! Default windows for easy setup and event handling. //! Currently [glutin](https://crates.io/crates/glutin/main.rs) for cross-platform desktop //! and canvas using [wasm-bindgen](https://rustwasm.github.io/wasm-bindgen/) for web. //! #[doc(hidden)] pub mod frame_input; #[doc(inline)] pub use frame_input::*; #[doc(hidden)] #[cfg(all(feature = "glutin-window", not(target_arch = "wasm32")))] pub mod glutin_window; #[doc(inline)] #[cfg(all(feature = "glutin-window", not(target_arch = "wasm32")))] pub use crate::glutin_window::*; #[doc(hidden)] #[cfg(all(feature = "canvas", target_arch = "wasm32"))] pub mod canvas; #[doc(inline)] #[cfg(all(feature = "canvas", target_arch = "wasm32"))] pub use crate::canvas::*;
29
90
0.684138
71ebec40d80d2bfc01e213a06ff4205534fe991a
279
use pulldown_cmark::{html, Parser}; #[test] fn test_parse_markdown() { let markdown = include_str!("demo.md"); let parser = Parser::new(markdown); let mut html_output = String::new(); html::push_html(&mut html_output, parser); println!("{}", html_output); }
25.363636
46
0.655914
5dedc72265baa664dd2768e2dbcd2bcdd2804af6
530
// Copyright (c) The Libra Core Contributors // SPDX-License-Identifier: Apache-2.0 use crate::contract_event::{ContractEvent, EventWithProof}; use lcs::test_helpers::assert_canonical_encode_decode; use proptest::prelude::*; proptest! { #[test] fn event_lcs_roundtrip(event in any::<ContractEvent>()) { assert_canonical_encode_decode(event); } #[test] fn event_with_proof_lcs_roundtrip(event_with_proof in any::<EventWithProof>()) { assert_canonical_encode_decode(event_with_proof); } }
27.894737
84
0.728302
e63e5d7038285835bcf16a78daf40af58c05e98b
4,108
use super::super::{Language, TimeUnit}; #[derive(Default)] pub struct Russian; impl Russian { fn accusative(&self, tu: TimeUnit) -> &'static str { use TimeUnit::*; match tu { Nanoseconds => "наносекунду", Microseconds => "микросекунду", Milliseconds => "миллисекунду", Seconds => "секунду", Minutes => "минуту", Hours => "час", Days => "день", Weeks => "неделю", Months => "месяц", Years => "год", } } fn genitive(&self, tu: TimeUnit) -> &'static str { use TimeUnit::*; match tu { Nanoseconds => "наносекунды", Microseconds => "микросекунды", Milliseconds => "миллисекунды", Seconds => "секунды", Minutes => "минуты", Hours => "часа", Days => "дня", Weeks => "недели", Months => "месяца", Years => "года", } } fn genitive_plural(&self, tu: TimeUnit) -> &'static str { use TimeUnit::*; match tu { Nanoseconds => "наносекунд", Microseconds => "микросекунд", Milliseconds => "миллисекунд", Seconds => "секунд", Minutes => "минут", Hours => "часов", Days => "дней", Weeks => "недель", Months => "месяцев", Years => "лет", } } } impl Language for Russian { fn too_low (&self) -> &'static str { "сейчас" } fn too_high(&self) -> &'static str { "давно" } fn ago(&self) -> &'static str { "назад" } fn get_word(&self, tu: TimeUnit, x: u64) -> &'static str { //if (11..=20).contains(x) { if (x % 100) >= 11 && (x % 100) <= 20 { self.genitive_plural(tu) } else if x % 10 == 1 { self.accusative(tu) } else if x % 10 >= 2 && x % 10 <= 4 { self.genitive(tu) } else if x % 10 >= 5 || x % 10 == 0 { self.genitive_plural(tu) } else { unreachable!() } } } #[test] fn test() { use super::super::Formatter; use std::time::Duration; let f = Formatter::with_language(Russian); assert_eq!(f.convert(Duration::from_secs(60)), "1 минуту назад"); assert_eq!(f.convert(Duration::from_secs(2)), "2 секунды назад"); assert_eq!(f.convert(Duration::from_secs(5)), "5 секунд назад"); assert_eq!(f.convert(Duration::from_secs(12)), "12 секунд назад"); assert_eq!(f.convert(Duration::from_secs(1*3600*12*366)), "6 месяцев назад"); assert_eq!(f.convert(Duration::from_secs(1*3600*24*366)), "1 год назад"); assert_eq!(f.convert(Duration::from_secs(2*3600*24*366)), "2 года назад"); assert_eq!(f.convert(Duration::from_secs(4*3600*24*366)), "4 года назад"); assert_eq!(f.convert(Duration::from_secs(5*3600*24*366)), "5 лет назад"); assert_eq!(f.convert(Duration::from_secs(10*3600*24*366)), "10 лет назад"); assert_eq!(f.convert(Duration::from_secs(11*3600*24*366)), "11 лет назад"); assert_eq!(f.convert(Duration::from_secs(14*3600*24*366)), "14 лет назад"); assert_eq!(f.convert(Duration::from_secs(15*3600*24*366)), "15 лет назад"); assert_eq!(f.convert(Duration::from_secs(19*3600*24*366)), "19 лет назад"); assert_eq!(f.convert(Duration::from_secs(20*3600*24*366)), "20 лет назад"); assert_eq!(f.convert(Duration::from_secs(21*3600*24*366)), "21 год назад"); assert_eq!(f.convert(Duration::from_secs(32*3600*24*366)), "32 года назад"); assert_eq!(f.convert(Duration::from_secs(99*3600*24*366)), "99 лет назад"); assert_eq!(f.convert(Duration::from_secs(100*3600*24*366)), "100 лет назад"); assert_eq!(f.convert(Duration::from_secs(101*3600*24*366)), "101 год назад"); assert_eq!(f.convert(Duration::from_secs(111*3600*24*366)), "111 лет назад"); }
41.494949
81
0.527264
756914345ab6d991e559ee754dab54e92eb3891d
9,480
use brotli; use bzip2::read::BzDecoder; use std::io::{self, ErrorKind}; use std::vec::Vec; use std::{ convert::TryInto, io::{Cursor, Read, Seek}, }; use binread::{BinRead, BinResult, ReadOptions}; #[derive(Debug, Eq, PartialEq)] pub enum CompressorType { Bz2, Brotli, } const fn as_u32_be(array: &[u8; 4]) -> u32 { ((array[0] as u32) << 24) | ((array[1] as u32) << 16) | ((array[2] as u32) << 8) | ((array[3] as u32) << 0) } const fn as_u32_le(array: &[u8; 4]) -> u32 { ((array[3] as u32) << 24) | ((array[2] as u32) << 16) | ((array[1] as u32) << 8) | ((array[0] as u32) << 0) } const fn as_u64_le(arr: &[u8; 8]) -> u64 { return (as_u32_le(&[arr[0], arr[1], arr[2], arr[3]]) as u64) | (as_u32_le(&[arr[4], arr[5], arr[6], arr[7]]) as u64) << 32; } const fn as_u64_be(arr: &[u8; 8]) -> u64 { return (as_u32_be(&[arr[0], arr[1], arr[2], arr[3]]) as u64) << 32 | as_u32_be(&[arr[4], arr[5], arr[6], arr[7]]) as u64; } const LEGACY_BSDIFF_MAGIC: u64 = as_u64_be(b"BSDIFF40"); const BSDIFF2_MAGIC: u64 = as_u64_be(b"BSDF2\x00\x00\x00"); const BSDIFF3_MAGIC: u64 = as_u64_be(b"BDF3\x00\x00\x00\x00"); fn is_valid_compressor_type(compressor_type: u8) -> bool { return compressor_type == 1 || compressor_type == 2; } fn to_compressor_type(compressor_type: u8) -> CompressorType { return match compressor_type { 1 => CompressorType::Bz2, 2 => CompressorType::Brotli, o => panic!("Invalid compressor type: {}", o), }; } fn is_valid_bsdiff_magic(magic: u64) -> bool { let bytes = magic.to_be_bytes(); return (magic & BSDIFF2_MAGIC == BSDIFF2_MAGIC && is_valid_compressor_type(bytes[5]) && is_valid_compressor_type(bytes[6]) && is_valid_compressor_type(bytes[7])) || ((magic & BSDIFF3_MAGIC == BSDIFF3_MAGIC) // && is_valid_compressor_type(bytes[4]) && is_valid_compressor_type(bytes[5]) && is_valid_compressor_type(bytes[6]) && is_valid_compressor_type(bytes[7])); } #[derive(BinRead)] #[br(assert(magic == LEGACY_BSDIFF_MAGIC ||is_valid_bsdiff_magic(magic)), little)] #[derive(Debug, Clone, Copy)] pub struct BsdiffFormat { #[br(big)] pub magic: u64, pub compressed_ctrl_size: u64, pub compressed_diff_size: u64, pub new_file_size: u64, } impl BsdiffFormat { fn is_legacy_bsdiff_format(&self) -> bool { return self.magic == LEGACY_BSDIFF_MAGIC; } fn is_bsdiff3_format(&self) -> bool { return self.magic & BSDIFF3_MAGIC == BSDIFF3_MAGIC; } fn get_ctrl_compressor(&self) -> CompressorType { return if self.is_legacy_bsdiff_format() { CompressorType::Bz2 } else { to_compressor_type(self.magic.to_be_bytes()[5]) }; } fn get_diff_compressor(&self) -> CompressorType { return if self.is_legacy_bsdiff_format() { CompressorType::Bz2 } else { to_compressor_type(self.magic.to_be_bytes()[6]) }; } fn get_extra_compressor(&self) -> CompressorType { return if self.is_legacy_bsdiff_format() { CompressorType::Bz2 } else { to_compressor_type(self.magic.to_be_bytes()[7]) }; } } fn read_bsdiff_int<R: Read + Seek>(reader: &mut R, ro: &ReadOptions, _: ()) -> BinResult<i64> { // BSPatch uses a non-standard encoding of integers. // Highest bit of that integer is used as a sign bit, 1 = negative // and 0 = positive. // Therefore, if the highest bit is set, flip it, then do 2's complement // to get the integer in standard form let raw = u64::read_options(reader, ro, ())?; if raw & (1 << 63) == 0 { return Ok(raw.try_into().unwrap()); } else { let parsed: i64 = (raw & ((1 << 63) - 1)) as i64; return Ok(-parsed); } } #[derive(BinRead)] #[br(little)] #[derive(Debug)] pub struct ControlEntry { // The number of bytes to copy from the source and diff stream. pub diff_size: u64, // The number of bytes to copy from the extra stream. pub extra_size: u64, // The value to add to the source pointer after patching from the diff stream. #[br(parse_with=read_bsdiff_int)] offset_increment: i64, } // Control entry has 3 u64 fields, so 24 bytes in total. const CONTROL_ENTRY_SIZE: usize = 24; pub trait BinreadReader: Read + Seek {} pub struct BsdiffReader<'a> { data: &'a [u8], decompressed_ctrl_stream: Vec<u8>, pub header: BsdiffFormat, } pub struct ControlEntryIter<'a> { control_entry_reader: Cursor<&'a Vec<u8>>, control_entry_stream_len: usize, } impl<'a> Iterator for ControlEntryIter<'a> { type Item = ControlEntry; fn next(&mut self) -> Option<Self::Item> { if self.control_entry_reader.stream_position().unwrap() >= self.control_entry_stream_len as u64 { return None; } return Some(ControlEntry::read(&mut self.control_entry_reader).unwrap()); } } impl<'a> ControlEntryIter<'a> { fn new( mut control_entry_reader: Cursor<&Vec<u8>>, control_entry_stream_len: usize, ) -> ControlEntryIter { control_entry_reader .seek(std::io::SeekFrom::Start(0)) .expect("Failed to seek to beginning of control stream"); return ControlEntryIter { control_entry_reader, control_entry_stream_len, }; } } impl<'a> BsdiffReader<'a> { fn decompress(data: &[u8], compressor_type: CompressorType) -> Result<Vec<u8>, std::io::Error> { let mut buf = Vec::new(); match compressor_type { CompressorType::Brotli => { let mut reader = brotli::Decompressor::new(data, 4096 /* buffer size */); reader.read_to_end(&mut buf)?; } CompressorType::Bz2 => { let mut reader = BzDecoder::new(data); reader.read_to_end(&mut buf)?; } }; return Ok(buf); } pub fn new(data: &'a [u8]) -> Result<BsdiffReader<'a>, binread::Error> { let mut reader = Cursor::new(data); let header = BsdiffFormat::read(&mut reader)?; if header.is_bsdiff3_format() { let mut buf = [0 as u8; 8]; reader.read_exact(&mut buf).unwrap(); let compressed_mask_size = as_u64_le(&buf); let compressed_diff_size = header.compressed_diff_size; let compressed_diff_data = &data[32 + 8 + header.compressed_ctrl_size as usize..] [..header.compressed_diff_size as usize]; let decompressed_diff_size = Self::decompress(compressed_diff_data, header.get_ctrl_compressor()) .unwrap() .len(); let compressed_mask_data = &data[data.len() - compressed_mask_size as usize..]; let decompressed_mask_size = Self::decompress(compressed_mask_data, CompressorType::Brotli) .unwrap() .len(); println!( "Mask data: {}/{} = {}, diff data: {}/{} = {}", compressed_mask_size, decompressed_mask_size, compressed_mask_size as f32 / decompressed_mask_size as f32, compressed_diff_size, decompressed_diff_size, compressed_diff_size as f32 / decompressed_diff_size as f32, ); return Err(binread::Error::Io(std::io::Error::new( ErrorKind::InvalidData, "unsupported bsdiff3 format", ))); } // header takes up 32 bytes, so control stream start at offset 32. let decompressed_ctrl_stream = Self::decompress(&data[32..], header.get_ctrl_compressor())?; if decompressed_ctrl_stream.len() % CONTROL_ENTRY_SIZE != 0 { return Err(binread::Error::Io(std::io::Error::new( ErrorKind::InvalidData, format!( "Decompressed ctrl stream has length {}, which is not a multiple of {}", decompressed_ctrl_stream.len(), CONTROL_ENTRY_SIZE ), ))); } let compressed_diff_stream = &data[32 + header.compressed_ctrl_size as usize..] [..header.compressed_diff_size as usize]; let decompressed_diff_stream = Self::decompress(compressed_diff_stream, header.get_diff_compressor())?; let diff_stream_size = decompressed_diff_stream.len(); let diff_stream_zero_count = decompressed_diff_stream .into_iter() .map(|x| (x == 0) as u32) .sum::<u32>(); println!( "Diff stream has {}/{} = {}% zeros", diff_stream_zero_count, diff_stream_size, (diff_stream_zero_count as f64) / diff_stream_size as f64 * 100.0 ); return Ok(BsdiffReader { data, decompressed_ctrl_stream, header, }); } pub fn control_entries(&self) -> ControlEntryIter { let control_entry_reader = Cursor::new(&self.decompressed_ctrl_stream); return ControlEntryIter::new(control_entry_reader, self.decompressed_ctrl_stream.len()); } pub fn get_new_file_size(&self) -> u64 { return self.header.new_file_size; } }
34.347826
100
0.589768
3a457f3e04b24d6da76400e9220d90a9bc9330c1
2,078
/* * EVE Swagger Interface * * An OpenAPI for EVE Online * * OpenAPI spec version: 1.3.8 * * Generated by: https://github.com/swagger-api/swagger-codegen.git */ /// GetStatusOk : 200 ok object #[allow(unused_imports)] use serde_json::Value; #[derive(Debug, Serialize, Deserialize)] pub struct GetStatusOk { /// Current online player count #[serde(rename = "players")] players: i32, /// Running version as string #[serde(rename = "server_version")] server_version: String, /// Server start timestamp #[serde(rename = "start_time")] start_time: String, /// If the server is in VIP mode #[serde(rename = "vip")] vip: Option<bool> } impl GetStatusOk { /// 200 ok object pub fn new(players: i32, server_version: String, start_time: String) -> GetStatusOk { GetStatusOk { players: players, server_version: server_version, start_time: start_time, vip: None } } pub fn set_players(&mut self, players: i32) { self.players = players; } pub fn with_players(mut self, players: i32) -> GetStatusOk { self.players = players; self } pub fn players(&self) -> &i32 { &self.players } pub fn set_server_version(&mut self, server_version: String) { self.server_version = server_version; } pub fn with_server_version(mut self, server_version: String) -> GetStatusOk { self.server_version = server_version; self } pub fn server_version(&self) -> &String { &self.server_version } pub fn set_start_time(&mut self, start_time: String) { self.start_time = start_time; } pub fn with_start_time(mut self, start_time: String) -> GetStatusOk { self.start_time = start_time; self } pub fn start_time(&self) -> &String { &self.start_time } pub fn set_vip(&mut self, vip: bool) { self.vip = Some(vip); } pub fn with_vip(mut self, vip: bool) -> GetStatusOk { self.vip = Some(vip); self } pub fn vip(&self) -> Option<&bool> { self.vip.as_ref() } pub fn reset_vip(&mut self) { self.vip = None; } }
19.603774
87
0.646776
147347a75abe8264f04a1f140240ae809fb7189d
21,829
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! Code for type-checking closure expressions. use super::{check_fn, Expectation, FnCtxt, GeneratorTypes}; use astconv::AstConv; use rustc::hir::def_id::DefId; use rustc::infer::{InferOk, InferResult}; use rustc::infer::LateBoundRegionConversionTime; use rustc::infer::type_variable::TypeVariableOrigin; use rustc::ty::{self, ToPolyTraitRef, Ty}; use rustc::ty::subst::Substs; use rustc::ty::TypeFoldable; use std::cmp; use std::iter; use syntax::abi::Abi; use rustc::hir; struct ClosureSignatures<'tcx> { bound_sig: ty::PolyFnSig<'tcx>, liberated_sig: ty::FnSig<'tcx>, } impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { pub fn check_expr_closure( &self, expr: &hir::Expr, _capture: hir::CaptureClause, decl: &'gcx hir::FnDecl, body_id: hir::BodyId, expected: Expectation<'tcx>, ) -> Ty<'tcx> { debug!( "check_expr_closure(expr={:?},expected={:?})", expr, expected ); // It's always helpful for inference if we know the kind of // closure sooner rather than later, so first examine the expected // type, and see if can glean a closure kind from there. let (expected_sig, expected_kind) = match expected.to_option(self) { Some(ty) => self.deduce_expectations_from_expected_type(ty), None => (None, None), }; let body = self.tcx.hir.body(body_id); self.check_closure(expr, expected_kind, decl, body, expected_sig) } fn check_closure( &self, expr: &hir::Expr, opt_kind: Option<ty::ClosureKind>, decl: &'gcx hir::FnDecl, body: &'gcx hir::Body, expected_sig: Option<ty::FnSig<'tcx>>, ) -> Ty<'tcx> { debug!( "check_closure(opt_kind={:?}, expected_sig={:?})", opt_kind, expected_sig ); let expr_def_id = self.tcx.hir.local_def_id(expr.id); let ClosureSignatures { bound_sig, liberated_sig, } = self.sig_of_closure(expr_def_id, decl, body, expected_sig); debug!("check_closure: ty_of_closure returns {:?}", liberated_sig); let generator_types = check_fn( self, self.param_env, liberated_sig, decl, expr.id, body, true, ).1; // Create type variables (for now) to represent the transformed // types of upvars. These will be unified during the upvar // inference phase (`upvar.rs`). let base_substs = Substs::identity_for_item(self.tcx, self.tcx.closure_base_def_id(expr_def_id)); let substs = base_substs.extend_to( self.tcx, expr_def_id, |_, _| span_bug!(expr.span, "closure has region param"), |_, _| { self.infcx .next_ty_var(TypeVariableOrigin::ClosureSynthetic(expr.span)) }, ); let substs = ty::ClosureSubsts { substs }; let closure_type = self.tcx.mk_closure(expr_def_id, substs); if let Some(GeneratorTypes { yield_ty, interior }) = generator_types { self.demand_eqtype(expr.span, yield_ty, substs.generator_yield_ty(expr_def_id, self.tcx)); self.demand_eqtype(expr.span, liberated_sig.output(), substs.generator_return_ty(expr_def_id, self.tcx)); return self.tcx.mk_generator(expr_def_id, substs, interior); } debug!( "check_closure: expr.id={:?} closure_type={:?}", expr.id, closure_type ); // Tuple up the arguments and insert the resulting function type into // the `closures` table. let sig = bound_sig.map_bound(|sig| { self.tcx.mk_fn_sig( iter::once(self.tcx.intern_tup(sig.inputs(), false)), sig.output(), sig.variadic, sig.unsafety, sig.abi, ) }); debug!( "check_closure: expr_def_id={:?}, sig={:?}, opt_kind={:?}", expr_def_id, sig, opt_kind ); let sig_fn_ptr_ty = self.tcx.mk_fn_ptr(sig); self.demand_eqtype(expr.span, sig_fn_ptr_ty, substs.closure_sig_ty(expr_def_id, self.tcx)); if let Some(kind) = opt_kind { self.demand_eqtype(expr.span, kind.to_ty(self.tcx), substs.closure_kind_ty(expr_def_id, self.tcx)); } closure_type } fn deduce_expectations_from_expected_type( &self, expected_ty: Ty<'tcx>, ) -> (Option<ty::FnSig<'tcx>>, Option<ty::ClosureKind>) { debug!( "deduce_expectations_from_expected_type(expected_ty={:?})", expected_ty ); match expected_ty.sty { ty::TyDynamic(ref object_type, ..) => { let sig = object_type .projection_bounds() .filter_map(|pb| { let pb = pb.with_self_ty(self.tcx, self.tcx.types.err); self.deduce_sig_from_projection(&pb) }) .next(); let kind = object_type .principal() .and_then(|p| self.tcx.lang_items().fn_trait_kind(p.def_id())); (sig, kind) } ty::TyInfer(ty::TyVar(vid)) => self.deduce_expectations_from_obligations(vid), ty::TyFnPtr(sig) => (Some(sig.skip_binder().clone()), Some(ty::ClosureKind::Fn)), _ => (None, None), } } fn deduce_expectations_from_obligations( &self, expected_vid: ty::TyVid, ) -> (Option<ty::FnSig<'tcx>>, Option<ty::ClosureKind>) { let fulfillment_cx = self.fulfillment_cx.borrow(); // Here `expected_ty` is known to be a type inference variable. let expected_sig = fulfillment_cx .pending_obligations() .iter() .map(|obligation| &obligation.obligation) .filter_map(|obligation| { debug!( "deduce_expectations_from_obligations: obligation.predicate={:?}", obligation.predicate ); match obligation.predicate { // Given a Projection predicate, we can potentially infer // the complete signature. ty::Predicate::Projection(ref proj_predicate) => { let trait_ref = proj_predicate.to_poly_trait_ref(self.tcx); self.self_type_matches_expected_vid(trait_ref, expected_vid) .and_then(|_| self.deduce_sig_from_projection(proj_predicate)) } _ => None, } }) .next(); // Even if we can't infer the full signature, we may be able to // infer the kind. This can occur if there is a trait-reference // like `F : Fn<A>`. Note that due to subtyping we could encounter // many viable options, so pick the most restrictive. let expected_kind = fulfillment_cx .pending_obligations() .iter() .map(|obligation| &obligation.obligation) .filter_map(|obligation| { let opt_trait_ref = match obligation.predicate { ty::Predicate::Projection(ref data) => Some(data.to_poly_trait_ref(self.tcx)), ty::Predicate::Trait(ref data) => Some(data.to_poly_trait_ref()), ty::Predicate::Equate(..) => None, ty::Predicate::Subtype(..) => None, ty::Predicate::RegionOutlives(..) => None, ty::Predicate::TypeOutlives(..) => None, ty::Predicate::WellFormed(..) => None, ty::Predicate::ObjectSafe(..) => None, ty::Predicate::ConstEvaluatable(..) => None, // NB: This predicate is created by breaking down a // `ClosureType: FnFoo()` predicate, where // `ClosureType` represents some `TyClosure`. It can't // possibly be referring to the current closure, // because we haven't produced the `TyClosure` for // this closure yet; this is exactly why the other // code is looking for a self type of a unresolved // inference variable. ty::Predicate::ClosureKind(..) => None, }; opt_trait_ref .and_then(|tr| self.self_type_matches_expected_vid(tr, expected_vid)) .and_then(|tr| self.tcx.lang_items().fn_trait_kind(tr.def_id())) }) .fold(None, |best, cur| { Some(best.map_or(cur, |best| cmp::min(best, cur))) }); (expected_sig, expected_kind) } /// Given a projection like "<F as Fn(X)>::Result == Y", we can deduce /// everything we need to know about a closure. fn deduce_sig_from_projection( &self, projection: &ty::PolyProjectionPredicate<'tcx>, ) -> Option<ty::FnSig<'tcx>> { let tcx = self.tcx; debug!("deduce_sig_from_projection({:?})", projection); let trait_ref = projection.to_poly_trait_ref(tcx); if tcx.lang_items().fn_trait_kind(trait_ref.def_id()).is_none() { return None; } let arg_param_ty = trait_ref.substs().type_at(1); let arg_param_ty = self.resolve_type_vars_if_possible(&arg_param_ty); debug!( "deduce_sig_from_projection: arg_param_ty {:?}", arg_param_ty ); let input_tys = match arg_param_ty.sty { ty::TyTuple(tys, _) => tys.into_iter(), _ => { return None; } }; let ret_param_ty = projection.0.ty; let ret_param_ty = self.resolve_type_vars_if_possible(&ret_param_ty); debug!( "deduce_sig_from_projection: ret_param_ty {:?}", ret_param_ty ); let fn_sig = self.tcx.mk_fn_sig( input_tys.cloned(), ret_param_ty, false, hir::Unsafety::Normal, Abi::Rust, ); debug!("deduce_sig_from_projection: fn_sig {:?}", fn_sig); Some(fn_sig) } fn self_type_matches_expected_vid( &self, trait_ref: ty::PolyTraitRef<'tcx>, expected_vid: ty::TyVid, ) -> Option<ty::PolyTraitRef<'tcx>> { let self_ty = self.shallow_resolve(trait_ref.self_ty()); debug!( "self_type_matches_expected_vid(trait_ref={:?}, self_ty={:?})", trait_ref, self_ty ); match self_ty.sty { ty::TyInfer(ty::TyVar(v)) if expected_vid == v => Some(trait_ref), _ => None, } } fn sig_of_closure( &self, expr_def_id: DefId, decl: &hir::FnDecl, body: &hir::Body, expected_sig: Option<ty::FnSig<'tcx>>, ) -> ClosureSignatures<'tcx> { if let Some(e) = expected_sig { self.sig_of_closure_with_expectation(expr_def_id, decl, body, e) } else { self.sig_of_closure_no_expectation(expr_def_id, decl, body) } } /// If there is no expected signature, then we will convert the /// types that the user gave into a signature. fn sig_of_closure_no_expectation( &self, expr_def_id: DefId, decl: &hir::FnDecl, body: &hir::Body, ) -> ClosureSignatures<'tcx> { debug!("sig_of_closure_no_expectation()"); let bound_sig = self.supplied_sig_of_closure(decl); self.closure_sigs(expr_def_id, body, bound_sig) } /// Invoked to compute the signature of a closure expression. This /// combines any user-provided type annotations (e.g., `|x: u32| /// -> u32 { .. }`) with the expected signature. /// /// The approach is as follows: /// /// - Let `S` be the (higher-ranked) signature that we derive from the user's annotations. /// - Let `E` be the (higher-ranked) signature that we derive from the expectations, if any. /// - If we have no expectation `E`, then the signature of the closure is `S`. /// - Otherwise, the signature of the closure is E. Moreover: /// - Skolemize the late-bound regions in `E`, yielding `E'`. /// - Instantiate all the late-bound regions bound in the closure within `S` /// with fresh (existential) variables, yielding `S'` /// - Require that `E' = S'` /// - We could use some kind of subtyping relationship here, /// I imagine, but equality is easier and works fine for /// our purposes. /// /// The key intuition here is that the user's types must be valid /// from "the inside" of the closure, but the expectation /// ultimately drives the overall signature. /// /// # Examples /// /// ``` /// fn with_closure<F>(_: F) /// where F: Fn(&u32) -> &u32 { .. } /// /// with_closure(|x: &u32| { ... }) /// ``` /// /// Here: /// - E would be `fn(&u32) -> &u32`. /// - S would be `fn(&u32) -> /// - E' is `&'!0 u32 -> &'!0 u32` /// - S' is `&'?0 u32 -> ?T` /// /// S' can be unified with E' with `['?0 = '!0, ?T = &'!10 u32]`. /// /// # Arguments /// /// - `expr_def_id`: the def-id of the closure expression /// - `decl`: the HIR declaration of the closure /// - `body`: the body of the closure /// - `expected_sig`: the expected signature (if any). Note that /// this is missing a binder: that is, there may be late-bound /// regions with depth 1, which are bound then by the closure. fn sig_of_closure_with_expectation( &self, expr_def_id: DefId, decl: &hir::FnDecl, body: &hir::Body, expected_sig: ty::FnSig<'tcx>, ) -> ClosureSignatures<'tcx> { debug!( "sig_of_closure_with_expectation(expected_sig={:?})", expected_sig ); // Watch out for some surprises and just ignore the // expectation if things don't see to match up with what we // expect. if expected_sig.variadic != decl.variadic { return self.sig_of_closure_no_expectation(expr_def_id, decl, body); } else if expected_sig.inputs_and_output.len() != decl.inputs.len() + 1 { // we could probably handle this case more gracefully return self.sig_of_closure_no_expectation(expr_def_id, decl, body); } // Create a `PolyFnSig`. Note the oddity that late bound // regions appearing free in `expected_sig` are now bound up // in this binder we are creating. assert!(!expected_sig.has_regions_escaping_depth(1)); let bound_sig = ty::Binder(self.tcx.mk_fn_sig( expected_sig.inputs().iter().cloned(), expected_sig.output(), decl.variadic, hir::Unsafety::Normal, Abi::RustCall, )); // `deduce_expectations_from_expected_type` introduces // late-bound lifetimes defined elsewhere, which we now // anonymize away, so as not to confuse the user. let bound_sig = self.tcx.anonymize_late_bound_regions(&bound_sig); let closure_sigs = self.closure_sigs(expr_def_id, body, bound_sig); // Up till this point, we have ignored the annotations that the user // gave. This function will check that they unify successfully. // Along the way, it also writes out entries for types that the user // wrote into our tables, which are then later used by the privacy // check. match self.check_supplied_sig_against_expectation(decl, &closure_sigs) { Ok(infer_ok) => self.register_infer_ok_obligations(infer_ok), Err(_) => return self.sig_of_closure_no_expectation(expr_def_id, decl, body), } closure_sigs } /// Enforce the user's types against the expectation. See /// `sig_of_closure_with_expectation` for details on the overall /// strategy. fn check_supplied_sig_against_expectation( &self, decl: &hir::FnDecl, expected_sigs: &ClosureSignatures<'tcx>, ) -> InferResult<'tcx, ()> { // Get the signature S that the user gave. // // (See comment on `sig_of_closure_with_expectation` for the // meaning of these letters.) let supplied_sig = self.supplied_sig_of_closure(decl); debug!( "check_supplied_sig_against_expectation: supplied_sig={:?}", supplied_sig ); // FIXME(#45727): As discussed in [this comment][c1], naively // forcing equality here actually results in suboptimal error // messages in some cases. For now, if there would have been // an obvious error, we fallback to declaring the type of the // closure to be the one the user gave, which allows other // error message code to trigger. // // However, I think [there is potential to do even better // here][c2], since in *this* code we have the precise span of // the type parameter in question in hand when we report the // error. // // [c1]: https://github.com/rust-lang/rust/pull/45072#issuecomment-341089706 // [c2]: https://github.com/rust-lang/rust/pull/45072#issuecomment-341096796 self.infcx.commit_if_ok(|_| { let mut all_obligations = vec![]; // The liberated version of this signature should be be a subtype // of the liberated form of the expectation. for ((hir_ty, &supplied_ty), expected_ty) in decl.inputs.iter() .zip(*supplied_sig.inputs().skip_binder()) // binder moved to (*) below .zip(expected_sigs.liberated_sig.inputs()) // `liberated_sig` is E'. { // Instantiate (this part of..) S to S', i.e., with fresh variables. let (supplied_ty, _) = self.infcx.replace_late_bound_regions_with_fresh_var( hir_ty.span, LateBoundRegionConversionTime::FnCall, &ty::Binder(supplied_ty), ); // recreated from (*) above // Check that E' = S'. let cause = &self.misc(hir_ty.span); let InferOk { value: (), obligations, } = self.at(cause, self.param_env) .eq(*expected_ty, supplied_ty)?; all_obligations.extend(obligations); } let (supplied_output_ty, _) = self.infcx.replace_late_bound_regions_with_fresh_var( decl.output.span(), LateBoundRegionConversionTime::FnCall, &supplied_sig.output(), ); let cause = &self.misc(decl.output.span()); let InferOk { value: (), obligations, } = self.at(cause, self.param_env) .eq(expected_sigs.liberated_sig.output(), supplied_output_ty)?; all_obligations.extend(obligations); Ok(InferOk { value: (), obligations: all_obligations, }) }) } /// If there is no expected signature, then we will convert the /// types that the user gave into a signature. fn supplied_sig_of_closure(&self, decl: &hir::FnDecl) -> ty::PolyFnSig<'tcx> { let astconv: &AstConv = self; // First, convert the types that the user supplied (if any). let supplied_arguments = decl.inputs.iter().map(|a| astconv.ast_ty_to_ty(a)); let supplied_return = match decl.output { hir::Return(ref output) => astconv.ast_ty_to_ty(&output), hir::DefaultReturn(_) => astconv.ty_infer(decl.output.span()), }; let result = ty::Binder(self.tcx.mk_fn_sig( supplied_arguments, supplied_return, decl.variadic, hir::Unsafety::Normal, Abi::RustCall, )); debug!("supplied_sig_of_closure: result={:?}", result); result } fn closure_sigs( &self, expr_def_id: DefId, body: &hir::Body, bound_sig: ty::PolyFnSig<'tcx>, ) -> ClosureSignatures<'tcx> { let liberated_sig = self.tcx().liberate_late_bound_regions(expr_def_id, &bound_sig); let liberated_sig = self.inh.normalize_associated_types_in( body.value.span, body.value.id, self.param_env, &liberated_sig, ); ClosureSignatures { bound_sig, liberated_sig, } } }
37.766436
98
0.558569
e67c2b62a5d01816330be907f06cdcb806d02cc7
3,742
// Copyright 2020 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use { crate::model::{ actions::{Action, ActionKey, ActionSet, DiscoverAction}, component::{ Component, ComponentInstance, InstanceState, ResolvedInstanceState, WeakComponentInstance, }, error::ModelError, hooks::{Event, EventError, EventErrorPayload, EventPayload}, resolver::Resolver, }, async_trait::async_trait, std::convert::TryFrom, std::sync::Arc, }; /// Resolves a component instance's declaration and initializes its state. pub struct ResolveAction {} impl ResolveAction { pub fn new() -> Self { Self {} } } #[async_trait] impl Action for ResolveAction { type Output = Result<Component, ModelError>; async fn handle(&self, component: &Arc<ComponentInstance>) -> Self::Output { do_resolve(component).await } fn key(&self) -> ActionKey { ActionKey::Resolve } } async fn do_resolve(component: &Arc<ComponentInstance>) -> Result<Component, ModelError> { // Ensure `Resolved` is dispatched after `Discovered`. ActionSet::register(component.clone(), DiscoverAction::new()).await?; let result = async move { let first_resolve = { let state = component.lock_state().await; match *state { InstanceState::New => { panic!("Component should be at least discovered") } InstanceState::Discovered => true, InstanceState::Resolved(_) => false, InstanceState::Destroyed => { return Err(ModelError::instance_not_found(component.abs_moniker.clone())); } } }; let component_info = component.environment.resolve(&component.component_url).await.map_err(|err| { ModelError::ResolverError { url: component.component_url.clone(), err } })?; let component_info = Component::try_from(component_info)?; if first_resolve { { let mut state = component.lock_state().await; match *state { InstanceState::Resolved(_) => { panic!("Component was marked Resolved during Resolve action?"); } InstanceState::Destroyed => { return Err(ModelError::instance_not_found(component.abs_moniker.clone())); } InstanceState::New | InstanceState::Discovered => {} } state.set(InstanceState::Resolved( ResolvedInstanceState::new(component, component_info.decl.clone()).await?, )); } } Ok((component_info, first_resolve)) } .await; match result { Ok((component_info, false)) => Ok(component_info), Ok((component_info, true)) => { let event = Event::new( component, Ok(EventPayload::Resolved { component: WeakComponentInstance::from(component), resolved_url: component_info.resolved_url.clone(), decl: component_info.decl.clone(), }), ); component.hooks.dispatch(&event).await?; Ok(component_info) } Err(e) => { let event = Event::new(component, Err(EventError::new(&e, EventErrorPayload::Resolved))); component.hooks.dispatch(&event).await?; Err(e) } } }
35.301887
98
0.556654
e87c33d1b09dd42d6e688b0e079a7760817b4a36
18,487
use crate::utils::{implements_trait, is_entrypoint_fn, is_type_diagnostic_item, return_ty, span_lint}; use if_chain::if_chain; use itertools::Itertools; use rustc_ast::ast::{AttrKind, Attribute}; use rustc_data_structures::fx::FxHashSet; use rustc_hir as hir; use rustc_lint::{LateContext, LateLintPass}; use rustc_middle::lint::in_external_macro; use rustc_middle::ty; use rustc_session::{declare_tool_lint, impl_lint_pass}; use rustc_span::source_map::{BytePos, MultiSpan, Span}; use rustc_span::Pos; use std::ops::Range; use url::Url; declare_clippy_lint! { /// **What it does:** Checks for the presence of `_`, `::` or camel-case words /// outside ticks in documentation. /// /// **Why is this bad?** *Rustdoc* supports markdown formatting, `_`, `::` and /// camel-case probably indicates some code which should be included between /// ticks. `_` can also be used for emphasis in markdown, this lint tries to /// consider that. /// /// **Known problems:** Lots of bad docs won’t be fixed, what the lint checks /// for is limited, and there are still false positives. /// /// **Examples:** /// ```rust /// /// Do something with the foo_bar parameter. See also /// /// that::other::module::foo. /// // ^ `foo_bar` and `that::other::module::foo` should be ticked. /// fn doit(foo_bar: usize) {} /// ``` pub DOC_MARKDOWN, pedantic, "presence of `_`, `::` or camel-case outside backticks in documentation" } declare_clippy_lint! { /// **What it does:** Checks for the doc comments of publicly visible /// unsafe functions and warns if there is no `# Safety` section. /// /// **Why is this bad?** Unsafe functions should document their safety /// preconditions, so that users can be sure they are using them safely. /// /// **Known problems:** None. /// /// **Examples:** /// ```rust ///# type Universe = (); /// /// This function should really be documented /// pub unsafe fn start_apocalypse(u: &mut Universe) { /// unimplemented!(); /// } /// ``` /// /// At least write a line about safety: /// /// ```rust ///# type Universe = (); /// /// # Safety /// /// /// /// This function should not be called before the horsemen are ready. /// pub unsafe fn start_apocalypse(u: &mut Universe) { /// unimplemented!(); /// } /// ``` pub MISSING_SAFETY_DOC, style, "`pub unsafe fn` without `# Safety` docs" } declare_clippy_lint! { /// **What it does:** Checks the doc comments of publicly visible functions that /// return a `Result` type and warns if there is no `# Errors` section. /// /// **Why is this bad?** Documenting the type of errors that can be returned from a /// function can help callers write code to handle the errors appropriately. /// /// **Known problems:** None. /// /// **Examples:** /// /// Since the following function returns a `Result` it has an `# Errors` section in /// its doc comment: /// /// ```rust ///# use std::io; /// /// # Errors /// /// /// /// Will return `Err` if `filename` does not exist or the user does not have /// /// permission to read it. /// pub fn read(filename: String) -> io::Result<String> { /// unimplemented!(); /// } /// ``` pub MISSING_ERRORS_DOC, pedantic, "`pub fn` returns `Result` without `# Errors` in doc comment" } declare_clippy_lint! { /// **What it does:** Checks for `fn main() { .. }` in doctests /// /// **Why is this bad?** The test can be shorter (and likely more readable) /// if the `fn main()` is left implicit. /// /// **Known problems:** None. /// /// **Examples:** /// ``````rust /// /// An example of a doctest with a `main()` function /// /// /// /// # Examples /// /// /// /// ``` /// /// fn main() { /// /// // this needs not be in an `fn` /// /// } /// /// ``` /// fn needless_main() { /// unimplemented!(); /// } /// `````` pub NEEDLESS_DOCTEST_MAIN, style, "presence of `fn main() {` in code examples" } #[allow(clippy::module_name_repetitions)] #[derive(Clone)] pub struct DocMarkdown { valid_idents: FxHashSet<String>, in_trait_impl: bool, } impl DocMarkdown { pub fn new(valid_idents: FxHashSet<String>) -> Self { Self { valid_idents, in_trait_impl: false, } } } impl_lint_pass!(DocMarkdown => [DOC_MARKDOWN, MISSING_SAFETY_DOC, MISSING_ERRORS_DOC, NEEDLESS_DOCTEST_MAIN]); impl<'tcx> LateLintPass<'tcx> for DocMarkdown { fn check_crate(&mut self, cx: &LateContext<'tcx>, krate: &'tcx hir::Crate<'_>) { check_attrs(cx, &self.valid_idents, &krate.item.attrs); } fn check_item(&mut self, cx: &LateContext<'tcx>, item: &'tcx hir::Item<'_>) { let headers = check_attrs(cx, &self.valid_idents, &item.attrs); match item.kind { hir::ItemKind::Fn(ref sig, _, body_id) => { if !(is_entrypoint_fn(cx, cx.tcx.hir().local_def_id(item.hir_id).to_def_id()) || in_external_macro(cx.tcx.sess, item.span)) { lint_for_missing_headers(cx, item.hir_id, item.span, sig, headers, Some(body_id)); } }, hir::ItemKind::Impl { of_trait: ref trait_ref, .. } => { self.in_trait_impl = trait_ref.is_some(); }, _ => {}, } } fn check_item_post(&mut self, _cx: &LateContext<'tcx>, item: &'tcx hir::Item<'_>) { if let hir::ItemKind::Impl { .. } = item.kind { self.in_trait_impl = false; } } fn check_trait_item(&mut self, cx: &LateContext<'tcx>, item: &'tcx hir::TraitItem<'_>) { let headers = check_attrs(cx, &self.valid_idents, &item.attrs); if let hir::TraitItemKind::Fn(ref sig, ..) = item.kind { if !in_external_macro(cx.tcx.sess, item.span) { lint_for_missing_headers(cx, item.hir_id, item.span, sig, headers, None); } } } fn check_impl_item(&mut self, cx: &LateContext<'tcx>, item: &'tcx hir::ImplItem<'_>) { let headers = check_attrs(cx, &self.valid_idents, &item.attrs); if self.in_trait_impl || in_external_macro(cx.tcx.sess, item.span) { return; } if let hir::ImplItemKind::Fn(ref sig, body_id) = item.kind { lint_for_missing_headers(cx, item.hir_id, item.span, sig, headers, Some(body_id)); } } } fn lint_for_missing_headers<'tcx>( cx: &LateContext<'tcx>, hir_id: hir::HirId, span: impl Into<MultiSpan> + Copy, sig: &hir::FnSig<'_>, headers: DocHeaders, body_id: Option<hir::BodyId>, ) { if !cx.access_levels.is_exported(hir_id) { return; // Private functions do not require doc comments } if !headers.safety && sig.header.unsafety == hir::Unsafety::Unsafe { span_lint( cx, MISSING_SAFETY_DOC, span, "unsafe function's docs miss `# Safety` section", ); } if !headers.errors { if is_type_diagnostic_item(cx, return_ty(cx, hir_id), sym!(result_type)) { span_lint( cx, MISSING_ERRORS_DOC, span, "docs for function returning `Result` missing `# Errors` section", ); } else { if_chain! { if let Some(body_id) = body_id; if let Some(future) = cx.tcx.lang_items().future_trait(); let def_id = cx.tcx.hir().body_owner_def_id(body_id); let mir = cx.tcx.optimized_mir(def_id.to_def_id()); let ret_ty = mir.return_ty(); if implements_trait(cx, ret_ty, future, &[]); if let ty::Opaque(_, subs) = ret_ty.kind; if let Some(gen) = subs.types().next(); if let ty::Generator(_, subs, _) = gen.kind; if is_type_diagnostic_item(cx, subs.as_generator().return_ty(), sym!(result_type)); then { span_lint( cx, MISSING_ERRORS_DOC, span, "docs for function returning `Result` missing `# Errors` section", ); } } } } } /// Cleanup documentation decoration (`///` and such). /// /// We can't use `rustc_ast::attr::AttributeMethods::with_desugared_doc` or /// `rustc_ast::parse::lexer::comments::strip_doc_comment_decoration` because we /// need to keep track of /// the spans but this function is inspired from the later. #[allow(clippy::cast_possible_truncation)] #[must_use] pub fn strip_doc_comment_decoration(comment: &str, span: Span) -> (String, Vec<(usize, Span)>) { // one-line comments lose their prefix const ONELINERS: &[&str] = &["///!", "///", "//!", "//"]; for prefix in ONELINERS { if comment.starts_with(*prefix) { let doc = &comment[prefix.len()..]; let mut doc = doc.to_owned(); doc.push('\n'); return ( doc.to_owned(), vec![(doc.len(), span.with_lo(span.lo() + BytePos(prefix.len() as u32)))], ); } } if comment.starts_with("/*") { let doc = &comment[3..comment.len() - 2]; let mut sizes = vec![]; let mut contains_initial_stars = false; for line in doc.lines() { let offset = line.as_ptr() as usize - comment.as_ptr() as usize; debug_assert_eq!(offset as u32 as usize, offset); contains_initial_stars |= line.trim_start().starts_with('*'); // +1 for the newline sizes.push((line.len() + 1, span.with_lo(span.lo() + BytePos(offset as u32)))); } if !contains_initial_stars { return (doc.to_string(), sizes); } // remove the initial '*'s if any let mut no_stars = String::with_capacity(doc.len()); for line in doc.lines() { let mut chars = line.chars(); while let Some(c) = chars.next() { if c.is_whitespace() { no_stars.push(c); } else { no_stars.push(if c == '*' { ' ' } else { c }); break; } } no_stars.push_str(chars.as_str()); no_stars.push('\n'); } return (no_stars, sizes); } panic!("not a doc-comment: {}", comment); } #[derive(Copy, Clone)] struct DocHeaders { safety: bool, errors: bool, } fn check_attrs<'a>(cx: &LateContext<'_>, valid_idents: &FxHashSet<String>, attrs: &'a [Attribute]) -> DocHeaders { let mut doc = String::new(); let mut spans = vec![]; for attr in attrs { if let AttrKind::DocComment(ref comment) = attr.kind { let comment = comment.to_string(); let (comment, current_spans) = strip_doc_comment_decoration(&comment, attr.span); spans.extend_from_slice(&current_spans); doc.push_str(&comment); } else if attr.has_name(sym!(doc)) { // ignore mix of sugared and non-sugared doc // don't trigger the safety or errors check return DocHeaders { safety: true, errors: true, }; } } let mut current = 0; for &mut (ref mut offset, _) in &mut spans { let offset_copy = *offset; *offset = current; current += offset_copy; } if doc.is_empty() { return DocHeaders { safety: false, errors: false, }; } let parser = pulldown_cmark::Parser::new(&doc).into_offset_iter(); // Iterate over all `Events` and combine consecutive events into one let events = parser.coalesce(|previous, current| { use pulldown_cmark::Event::Text; let previous_range = previous.1; let current_range = current.1; match (previous.0, current.0) { (Text(previous), Text(current)) => { let mut previous = previous.to_string(); previous.push_str(&current); Ok((Text(previous.into()), previous_range)) }, (previous, current) => Err(((previous, previous_range), (current, current_range))), } }); check_doc(cx, valid_idents, events, &spans) } const RUST_CODE: &[&str] = &["rust", "no_run", "should_panic", "compile_fail", "edition2018"]; fn check_doc<'a, Events: Iterator<Item = (pulldown_cmark::Event<'a>, Range<usize>)>>( cx: &LateContext<'_>, valid_idents: &FxHashSet<String>, events: Events, spans: &[(usize, Span)], ) -> DocHeaders { // true if a safety header was found use pulldown_cmark::CodeBlockKind; use pulldown_cmark::Event::{ Code, End, FootnoteReference, HardBreak, Html, Rule, SoftBreak, Start, TaskListMarker, Text, }; use pulldown_cmark::Tag::{CodeBlock, Heading, Link}; let mut headers = DocHeaders { safety: false, errors: false, }; let mut in_code = false; let mut in_link = None; let mut in_heading = false; let mut is_rust = false; for (event, range) in events { match event { Start(CodeBlock(ref kind)) => { in_code = true; if let CodeBlockKind::Fenced(lang) = kind { is_rust = lang.is_empty() || !lang.contains("ignore") && lang.split(',').any(|i| RUST_CODE.contains(&i)); } }, End(CodeBlock(_)) => { in_code = false; is_rust = false; }, Start(Link(_, url, _)) => in_link = Some(url), End(Link(..)) => in_link = None, Start(Heading(_)) => in_heading = true, End(Heading(_)) => in_heading = false, Start(_tag) | End(_tag) => (), // We don't care about other tags Html(_html) => (), // HTML is weird, just ignore it SoftBreak | HardBreak | TaskListMarker(_) | Code(_) | Rule => (), FootnoteReference(text) | Text(text) => { if Some(&text) == in_link.as_ref() { // Probably a link of the form `<http://example.com>` // Which are represented as a link to "http://example.com" with // text "http://example.com" by pulldown-cmark continue; } headers.safety |= in_heading && text.trim() == "Safety"; headers.errors |= in_heading && text.trim() == "Errors"; let index = match spans.binary_search_by(|c| c.0.cmp(&range.start)) { Ok(o) => o, Err(e) => e - 1, }; let (begin, span) = spans[index]; if in_code { if is_rust { check_code(cx, &text, span); } } else { // Adjust for the beginning of the current `Event` let span = span.with_lo(span.lo() + BytePos::from_usize(range.start - begin)); check_text(cx, valid_idents, &text, span); } }, } } headers } static LEAVE_MAIN_PATTERNS: &[&str] = &["static", "fn main() {}", "extern crate", "async fn main() {"]; fn check_code(cx: &LateContext<'_>, text: &str, span: Span) { if text.contains("fn main() {") && !LEAVE_MAIN_PATTERNS.iter().any(|p| text.contains(p)) { span_lint(cx, NEEDLESS_DOCTEST_MAIN, span, "needless `fn main` in doctest"); } } fn check_text(cx: &LateContext<'_>, valid_idents: &FxHashSet<String>, text: &str, span: Span) { for word in text.split(|c: char| c.is_whitespace() || c == '\'') { // Trim punctuation as in `some comment (see foo::bar).` // ^^ // Or even as in `_foo bar_` which is emphasized. let word = word.trim_matches(|c: char| !c.is_alphanumeric()); if valid_idents.contains(word) { continue; } // Adjust for the current word let offset = word.as_ptr() as usize - text.as_ptr() as usize; let span = Span::new( span.lo() + BytePos::from_usize(offset), span.lo() + BytePos::from_usize(offset + word.len()), span.ctxt(), ); check_word(cx, word, span); } } fn check_word(cx: &LateContext<'_>, word: &str, span: Span) { /// Checks if a string is camel-case, i.e., contains at least two uppercase /// letters (`Clippy` is ok) and one lower-case letter (`NASA` is ok). /// Plurals are also excluded (`IDs` is ok). fn is_camel_case(s: &str) -> bool { if s.starts_with(|c: char| c.is_digit(10)) { return false; } let s = if s.ends_with('s') { &s[..s.len() - 1] } else { s }; s.chars().all(char::is_alphanumeric) && s.chars().filter(|&c| c.is_uppercase()).take(2).count() > 1 && s.chars().filter(|&c| c.is_lowercase()).take(1).count() > 0 } fn has_underscore(s: &str) -> bool { s != "_" && !s.contains("\\_") && s.contains('_') } fn has_hyphen(s: &str) -> bool { s != "-" && s.contains('-') } if let Ok(url) = Url::parse(word) { // try to get around the fact that `foo::bar` parses as a valid URL if !url.cannot_be_a_base() { span_lint( cx, DOC_MARKDOWN, span, "you should put bare URLs between `<`/`>` or make a proper Markdown link", ); return; } } // We assume that mixed-case words are not meant to be put inside bacticks. (Issue #2343) if has_underscore(word) && has_hyphen(word) { return; } if has_underscore(word) || word.contains("::") || is_camel_case(word) { span_lint( cx, DOC_MARKDOWN, span, &format!("you should put `{}` between ticks in the documentation", word), ); } }
35.146388
119
0.538487
6a1114d92a90bc70c0cb39e3771bbec1487bc66f
6,776
//! The `bank_forks` module implments BankForks a DAG of checkpointed Banks use hashbrown::{HashMap, HashSet}; use solana_metrics::counter::Counter; use solana_runtime::bank::Bank; use solana_sdk::timing; use std::ops::Index; use std::sync::Arc; use std::time::Instant; pub struct BankForks { banks: HashMap<u64, Arc<Bank>>, working_bank: Arc<Bank>, } impl Index<u64> for BankForks { type Output = Arc<Bank>; fn index(&self, bank_slot: u64) -> &Arc<Bank> { &self.banks[&bank_slot] } } impl BankForks { pub fn new(bank_slot: u64, bank: Bank) -> Self { let mut banks = HashMap::new(); let working_bank = Arc::new(bank); banks.insert(bank_slot, working_bank.clone()); Self { banks, working_bank, } } /// Create a map of bank slot id to the set of ancestors for the bank slot. pub fn ancestors(&self) -> HashMap<u64, HashSet<u64>> { let mut ancestors = HashMap::new(); for bank in self.banks.values() { let mut set: HashSet<u64> = bank.ancestors.keys().cloned().collect(); set.remove(&bank.slot()); ancestors.insert(bank.slot(), set); } ancestors } /// Create a map of bank slot id to the set of all of its descendants pub fn descendants(&self) -> HashMap<u64, HashSet<u64>> { let mut descendants = HashMap::new(); for bank in self.banks.values() { let _ = descendants.entry(bank.slot()).or_insert(HashSet::new()); let mut set: HashSet<u64> = bank.ancestors.keys().cloned().collect(); set.remove(&bank.slot()); for parent in set { descendants .entry(parent) .or_insert(HashSet::new()) .insert(bank.slot()); } } descendants } pub fn frozen_banks(&self) -> HashMap<u64, Arc<Bank>> { self.banks .iter() .filter(|(_, b)| b.is_frozen()) .map(|(k, b)| (*k, b.clone())) .collect() } pub fn active_banks(&self) -> Vec<u64> { self.banks .iter() .filter(|(_, v)| !v.is_frozen()) .map(|(k, _v)| *k) .collect() } pub fn get(&self, bank_slot: u64) -> Option<&Arc<Bank>> { self.banks.get(&bank_slot) } pub fn new_from_banks(initial_banks: &[Arc<Bank>]) -> Self { let mut banks = HashMap::new(); let working_bank = initial_banks[0].clone(); for bank in initial_banks { banks.insert(bank.slot(), bank.clone()); } Self { banks, working_bank, } } pub fn insert(&mut self, bank: Bank) { let bank = Arc::new(bank); let prev = self.banks.insert(bank.slot(), bank.clone()); assert!(prev.is_none()); self.working_bank = bank.clone(); } // TODO: really want to kill this... pub fn working_bank(&self) -> Arc<Bank> { self.working_bank.clone() } pub fn set_root(&mut self, root: u64) { let set_root_start = Instant::now(); let root_bank = self .banks .get(&root) .expect("root bank didn't exist in bank_forks"); root_bank.squash(); self.prune_non_root(root); inc_new_counter_info!( "bank-forks_set_root_ms", timing::duration_as_ms(&set_root_start.elapsed()) as usize ); } fn prune_non_root(&mut self, root: u64) { let descendants = self.descendants(); self.banks .retain(|slot, _| descendants[&root].contains(slot)) } } #[cfg(test)] mod tests { use super::*; use solana_sdk::genesis_block::GenesisBlock; use solana_sdk::hash::Hash; use solana_sdk::pubkey::Pubkey; #[test] fn test_bank_forks() { let (genesis_block, _) = GenesisBlock::new(10_000); let bank = Bank::new(&genesis_block); let mut bank_forks = BankForks::new(0, bank); let child_bank = Bank::new_from_parent(&bank_forks[0u64], &Pubkey::default(), 1); child_bank.register_tick(&Hash::default()); bank_forks.insert(child_bank); assert_eq!(bank_forks[1u64].tick_height(), 1); assert_eq!(bank_forks.working_bank().tick_height(), 1); } #[test] fn test_bank_forks_descendants() { let (genesis_block, _) = GenesisBlock::new(10_000); let bank = Bank::new(&genesis_block); let mut bank_forks = BankForks::new(0, bank); let bank0 = bank_forks[0].clone(); let bank = Bank::new_from_parent(&bank0, &Pubkey::default(), 1); bank_forks.insert(bank); let bank = Bank::new_from_parent(&bank0, &Pubkey::default(), 2); bank_forks.insert(bank); let descendants = bank_forks.descendants(); let children: Vec<u64> = descendants[&0].iter().cloned().collect(); assert_eq!(children, vec![1, 2]); assert!(descendants[&1].is_empty()); assert!(descendants[&2].is_empty()); } #[test] fn test_bank_forks_ancestors() { let (genesis_block, _) = GenesisBlock::new(10_000); let bank = Bank::new(&genesis_block); let mut bank_forks = BankForks::new(0, bank); let bank0 = bank_forks[0].clone(); let bank = Bank::new_from_parent(&bank0, &Pubkey::default(), 1); bank_forks.insert(bank); let bank = Bank::new_from_parent(&bank0, &Pubkey::default(), 2); bank_forks.insert(bank); let ancestors = bank_forks.ancestors(); assert!(ancestors[&0].is_empty()); let parents: Vec<u64> = ancestors[&1].iter().cloned().collect(); assert_eq!(parents, vec![0]); let parents: Vec<u64> = ancestors[&2].iter().cloned().collect(); assert_eq!(parents, vec![0]); } #[test] fn test_bank_forks_frozen_banks() { let (genesis_block, _) = GenesisBlock::new(10_000); let bank = Bank::new(&genesis_block); let mut bank_forks = BankForks::new(0, bank); let child_bank = Bank::new_from_parent(&bank_forks[0u64], &Pubkey::default(), 1); bank_forks.insert(child_bank); assert!(bank_forks.frozen_banks().get(&0).is_some()); assert!(bank_forks.frozen_banks().get(&1).is_none()); } #[test] fn test_bank_forks_active_banks() { let (genesis_block, _) = GenesisBlock::new(10_000); let bank = Bank::new(&genesis_block); let mut bank_forks = BankForks::new(0, bank); let child_bank = Bank::new_from_parent(&bank_forks[0u64], &Pubkey::default(), 1); bank_forks.insert(child_bank); assert_eq!(bank_forks.active_banks(), vec![1]); } }
33.215686
89
0.574233
90c4ac87ee0cd2b2243349286d8e39fd4ea75b21
60,179
// This file was generated by gir (https://github.com/gtk-rs/gir @ 58cffd4) // from gir-files (https://github.com/gtk-rs/gir-files @ ???) // DO NOT EDIT extern crate gstreamer_sys; extern crate shell_words; extern crate tempdir; use std::env; use std::error::Error; use std::path::Path; use std::mem::{align_of, size_of}; use std::process::Command; use std::str; use gstreamer_sys::*; static PACKAGES: &[&str] = &["gstreamer-1.0"]; #[derive(Clone, Debug)] struct Compiler { pub args: Vec<String>, } impl Compiler { pub fn new() -> Result<Compiler, Box<Error>> { let mut args = get_var("CC", "cc")?; args.push("-Wno-deprecated-declarations".to_owned()); // For %z support in printf when using MinGW. args.push("-D__USE_MINGW_ANSI_STDIO".to_owned()); args.extend(get_var("CFLAGS", "")?); args.extend(get_var("CPPFLAGS", "")?); args.extend(pkg_config_cflags(PACKAGES)?); Ok(Compiler { args }) } pub fn define<'a, V: Into<Option<&'a str>>>(&mut self, var: &str, val: V) { let arg = match val.into() { None => format!("-D{}", var), Some(val) => format!("-D{}={}", var, val), }; self.args.push(arg); } pub fn compile(&self, src: &Path, out: &Path) -> Result<(), Box<Error>> { let mut cmd = self.to_command(); cmd.arg(src); cmd.arg("-o"); cmd.arg(out); let status = cmd.spawn()?.wait()?; if !status.success() { return Err(format!("compilation command {:?} failed, {}", &cmd, status).into()); } Ok(()) } fn to_command(&self) -> Command { let mut cmd = Command::new(&self.args[0]); cmd.args(&self.args[1..]); cmd } } fn get_var(name: &str, default: &str) -> Result<Vec<String>, Box<Error>> { match env::var(name) { Ok(value) => Ok(shell_words::split(&value)?), Err(env::VarError::NotPresent) => Ok(shell_words::split(default)?), Err(err) => Err(format!("{} {}", name, err).into()), } } fn pkg_config_cflags(packages: &[&str]) -> Result<Vec<String>, Box<Error>> { if packages.is_empty() { return Ok(Vec::new()); } let mut cmd = Command::new("pkg-config"); cmd.arg("--cflags"); cmd.args(packages); let out = cmd.output()?; if !out.status.success() { return Err(format!("command {:?} returned {}", &cmd, out.status).into()); } let stdout = str::from_utf8(&out.stdout)?; Ok(shell_words::split(stdout.trim())?) } #[derive(Copy, Clone, Debug, Eq, PartialEq)] struct Layout { size: usize, alignment: usize, } #[derive(Copy, Clone, Debug, Default, Eq, PartialEq)] struct Results { /// Number of successfully completed tests. passed: usize, /// Total number of failed tests (including those that failed to compile). failed: usize, /// Number of tests that failed to compile. failed_to_compile: usize, } impl Results { fn record_passed(&mut self) { self.passed += 1; } fn record_failed(&mut self) { self.failed += 1; } fn record_failed_to_compile(&mut self) { self.failed += 1; self.failed_to_compile += 1; } fn summary(&self) -> String { format!( "{} passed; {} failed (compilation errors: {})", self.passed, self.failed, self.failed_to_compile) } fn expect_total_success(&self) { if self.failed == 0 { println!("OK: {}", self.summary()); } else { panic!("FAILED: {}", self.summary()); }; } } #[test] fn cross_validate_constants_with_c() { let tmpdir = tempdir::TempDir::new("abi").expect("temporary directory"); let cc = Compiler::new().expect("configured compiler"); assert_eq!("1", get_c_value(tmpdir.path(), &cc, "1").expect("C constant"), "failed to obtain correct constant value for 1"); let mut results : Results = Default::default(); for (i, &(name, rust_value)) in RUST_CONSTANTS.iter().enumerate() { match get_c_value(tmpdir.path(), &cc, name) { Err(e) => { results.record_failed_to_compile(); eprintln!("{}", e); }, Ok(ref c_value) => { if rust_value == c_value { results.record_passed(); } else { results.record_failed(); eprintln!("Constant value mismatch for {}\nRust: {:?}\nC: {:?}", name, rust_value, c_value); } } }; if (i + 1) % 25 == 0 { println!("constants ... {}", results.summary()); } } results.expect_total_success(); } #[test] fn cross_validate_layout_with_c() { let tmpdir = tempdir::TempDir::new("abi").expect("temporary directory"); let cc = Compiler::new().expect("configured compiler"); assert_eq!(Layout {size: 1, alignment: 1}, get_c_layout(tmpdir.path(), &cc, "char").expect("C layout"), "failed to obtain correct layout for char type"); let mut results : Results = Default::default(); for (i, &(name, rust_layout)) in RUST_LAYOUTS.iter().enumerate() { match get_c_layout(tmpdir.path(), &cc, name) { Err(e) => { results.record_failed_to_compile(); eprintln!("{}", e); }, Ok(c_layout) => { if rust_layout == c_layout { results.record_passed(); } else { results.record_failed(); eprintln!("Layout mismatch for {}\nRust: {:?}\nC: {:?}", name, rust_layout, &c_layout); } } }; if (i + 1) % 25 == 0 { println!("layout ... {}", results.summary()); } } results.expect_total_success(); } fn get_c_layout(dir: &Path, cc: &Compiler, name: &str) -> Result<Layout, Box<Error>> { let exe = dir.join("layout"); let mut cc = cc.clone(); cc.define("ABI_TYPE_NAME", name); cc.compile(Path::new("tests/layout.c"), &exe)?; let mut abi_cmd = Command::new(exe); let output = abi_cmd.output()?; if !output.status.success() { return Err(format!("command {:?} failed, {:?}", &abi_cmd, &output).into()); } let stdout = str::from_utf8(&output.stdout)?; let mut words = stdout.trim().split_whitespace(); let size = words.next().unwrap().parse().unwrap(); let alignment = words.next().unwrap().parse().unwrap(); Ok(Layout {size, alignment}) } fn get_c_value(dir: &Path, cc: &Compiler, name: &str) -> Result<String, Box<Error>> { let exe = dir.join("constant"); let mut cc = cc.clone(); cc.define("ABI_CONSTANT_NAME", name); cc.compile(Path::new("tests/constant.c"), &exe)?; let mut abi_cmd = Command::new(exe); let output = abi_cmd.output()?; if !output.status.success() { return Err(format!("command {:?} failed, {:?}", &abi_cmd, &output).into()); } let output = str::from_utf8(&output.stdout)?.trim(); if !output.starts_with("###gir test###") || !output.ends_with("###gir test###") { return Err(format!("command {:?} return invalid output, {:?}", &abi_cmd, &output).into()); } Ok(String::from(&output[14..(output.len() - 14)])) } const RUST_LAYOUTS: &[(&str, Layout)] = &[ ("GstAllocationParams", Layout {size: size_of::<GstAllocationParams>(), alignment: align_of::<GstAllocationParams>()}), ("GstAllocator", Layout {size: size_of::<GstAllocator>(), alignment: align_of::<GstAllocator>()}), ("GstAllocatorClass", Layout {size: size_of::<GstAllocatorClass>(), alignment: align_of::<GstAllocatorClass>()}), ("GstAllocatorFlags", Layout {size: size_of::<GstAllocatorFlags>(), alignment: align_of::<GstAllocatorFlags>()}), ("GstBin", Layout {size: size_of::<GstBin>(), alignment: align_of::<GstBin>()}), ("GstBinClass", Layout {size: size_of::<GstBinClass>(), alignment: align_of::<GstBinClass>()}), ("GstBinFlags", Layout {size: size_of::<GstBinFlags>(), alignment: align_of::<GstBinFlags>()}), ("GstBuffer", Layout {size: size_of::<GstBuffer>(), alignment: align_of::<GstBuffer>()}), ("GstBufferCopyFlags", Layout {size: size_of::<GstBufferCopyFlags>(), alignment: align_of::<GstBufferCopyFlags>()}), ("GstBufferFlags", Layout {size: size_of::<GstBufferFlags>(), alignment: align_of::<GstBufferFlags>()}), ("GstBufferPool", Layout {size: size_of::<GstBufferPool>(), alignment: align_of::<GstBufferPool>()}), ("GstBufferPoolAcquireFlags", Layout {size: size_of::<GstBufferPoolAcquireFlags>(), alignment: align_of::<GstBufferPoolAcquireFlags>()}), ("GstBufferPoolAcquireParams", Layout {size: size_of::<GstBufferPoolAcquireParams>(), alignment: align_of::<GstBufferPoolAcquireParams>()}), ("GstBufferPoolClass", Layout {size: size_of::<GstBufferPoolClass>(), alignment: align_of::<GstBufferPoolClass>()}), ("GstBufferingMode", Layout {size: size_of::<GstBufferingMode>(), alignment: align_of::<GstBufferingMode>()}), ("GstBus", Layout {size: size_of::<GstBus>(), alignment: align_of::<GstBus>()}), ("GstBusClass", Layout {size: size_of::<GstBusClass>(), alignment: align_of::<GstBusClass>()}), ("GstBusFlags", Layout {size: size_of::<GstBusFlags>(), alignment: align_of::<GstBusFlags>()}), ("GstBusSyncReply", Layout {size: size_of::<GstBusSyncReply>(), alignment: align_of::<GstBusSyncReply>()}), ("GstCaps", Layout {size: size_of::<GstCaps>(), alignment: align_of::<GstCaps>()}), ("GstCapsFlags", Layout {size: size_of::<GstCapsFlags>(), alignment: align_of::<GstCapsFlags>()}), ("GstCapsIntersectMode", Layout {size: size_of::<GstCapsIntersectMode>(), alignment: align_of::<GstCapsIntersectMode>()}), ("GstChildProxyInterface", Layout {size: size_of::<GstChildProxyInterface>(), alignment: align_of::<GstChildProxyInterface>()}), ("GstClock", Layout {size: size_of::<GstClock>(), alignment: align_of::<GstClock>()}), ("GstClockClass", Layout {size: size_of::<GstClockClass>(), alignment: align_of::<GstClockClass>()}), ("GstClockEntry", Layout {size: size_of::<GstClockEntry>(), alignment: align_of::<GstClockEntry>()}), ("GstClockEntryType", Layout {size: size_of::<GstClockEntryType>(), alignment: align_of::<GstClockEntryType>()}), ("GstClockFlags", Layout {size: size_of::<GstClockFlags>(), alignment: align_of::<GstClockFlags>()}), ("GstClockID", Layout {size: size_of::<GstClockID>(), alignment: align_of::<GstClockID>()}), ("GstClockReturn", Layout {size: size_of::<GstClockReturn>(), alignment: align_of::<GstClockReturn>()}), ("GstClockTime", Layout {size: size_of::<GstClockTime>(), alignment: align_of::<GstClockTime>()}), ("GstClockTimeDiff", Layout {size: size_of::<GstClockTimeDiff>(), alignment: align_of::<GstClockTimeDiff>()}), ("GstClockType", Layout {size: size_of::<GstClockType>(), alignment: align_of::<GstClockType>()}), ("GstControlBinding", Layout {size: size_of::<GstControlBinding>(), alignment: align_of::<GstControlBinding>()}), ("GstControlBindingClass", Layout {size: size_of::<GstControlBindingClass>(), alignment: align_of::<GstControlBindingClass>()}), ("GstControlSource", Layout {size: size_of::<GstControlSource>(), alignment: align_of::<GstControlSource>()}), ("GstControlSourceClass", Layout {size: size_of::<GstControlSourceClass>(), alignment: align_of::<GstControlSourceClass>()}), ("GstCoreError", Layout {size: size_of::<GstCoreError>(), alignment: align_of::<GstCoreError>()}), ("GstDebugCategory", Layout {size: size_of::<GstDebugCategory>(), alignment: align_of::<GstDebugCategory>()}), ("GstDebugColorFlags", Layout {size: size_of::<GstDebugColorFlags>(), alignment: align_of::<GstDebugColorFlags>()}), ("GstDebugColorMode", Layout {size: size_of::<GstDebugColorMode>(), alignment: align_of::<GstDebugColorMode>()}), ("GstDebugGraphDetails", Layout {size: size_of::<GstDebugGraphDetails>(), alignment: align_of::<GstDebugGraphDetails>()}), ("GstDebugLevel", Layout {size: size_of::<GstDebugLevel>(), alignment: align_of::<GstDebugLevel>()}), ("GstDevice", Layout {size: size_of::<GstDevice>(), alignment: align_of::<GstDevice>()}), ("GstDeviceClass", Layout {size: size_of::<GstDeviceClass>(), alignment: align_of::<GstDeviceClass>()}), ("GstDeviceMonitor", Layout {size: size_of::<GstDeviceMonitor>(), alignment: align_of::<GstDeviceMonitor>()}), ("GstDeviceMonitorClass", Layout {size: size_of::<GstDeviceMonitorClass>(), alignment: align_of::<GstDeviceMonitorClass>()}), ("GstDeviceProvider", Layout {size: size_of::<GstDeviceProvider>(), alignment: align_of::<GstDeviceProvider>()}), ("GstDeviceProviderClass", Layout {size: size_of::<GstDeviceProviderClass>(), alignment: align_of::<GstDeviceProviderClass>()}), ("GstElement", Layout {size: size_of::<GstElement>(), alignment: align_of::<GstElement>()}), ("GstElementClass", Layout {size: size_of::<GstElementClass>(), alignment: align_of::<GstElementClass>()}), ("GstElementFactoryListType", Layout {size: size_of::<GstElementFactoryListType>(), alignment: align_of::<GstElementFactoryListType>()}), ("GstElementFlags", Layout {size: size_of::<GstElementFlags>(), alignment: align_of::<GstElementFlags>()}), ("GstEvent", Layout {size: size_of::<GstEvent>(), alignment: align_of::<GstEvent>()}), ("GstEventType", Layout {size: size_of::<GstEventType>(), alignment: align_of::<GstEventType>()}), ("GstEventTypeFlags", Layout {size: size_of::<GstEventTypeFlags>(), alignment: align_of::<GstEventTypeFlags>()}), ("GstFlowReturn", Layout {size: size_of::<GstFlowReturn>(), alignment: align_of::<GstFlowReturn>()}), ("GstFormat", Layout {size: size_of::<GstFormat>(), alignment: align_of::<GstFormat>()}), ("GstFormatDefinition", Layout {size: size_of::<GstFormatDefinition>(), alignment: align_of::<GstFormatDefinition>()}), ("GstGhostPad", Layout {size: size_of::<GstGhostPad>(), alignment: align_of::<GstGhostPad>()}), ("GstGhostPadClass", Layout {size: size_of::<GstGhostPadClass>(), alignment: align_of::<GstGhostPadClass>()}), ("GstIterator", Layout {size: size_of::<GstIterator>(), alignment: align_of::<GstIterator>()}), ("GstIteratorItem", Layout {size: size_of::<GstIteratorItem>(), alignment: align_of::<GstIteratorItem>()}), ("GstIteratorResult", Layout {size: size_of::<GstIteratorResult>(), alignment: align_of::<GstIteratorResult>()}), ("GstLibraryError", Layout {size: size_of::<GstLibraryError>(), alignment: align_of::<GstLibraryError>()}), ("GstLockFlags", Layout {size: size_of::<GstLockFlags>(), alignment: align_of::<GstLockFlags>()}), ("GstMapFlags", Layout {size: size_of::<GstMapFlags>(), alignment: align_of::<GstMapFlags>()}), ("GstMapInfo", Layout {size: size_of::<GstMapInfo>(), alignment: align_of::<GstMapInfo>()}), ("GstMemory", Layout {size: size_of::<GstMemory>(), alignment: align_of::<GstMemory>()}), ("GstMemoryFlags", Layout {size: size_of::<GstMemoryFlags>(), alignment: align_of::<GstMemoryFlags>()}), ("GstMessage", Layout {size: size_of::<GstMessage>(), alignment: align_of::<GstMessage>()}), ("GstMessageType", Layout {size: size_of::<GstMessageType>(), alignment: align_of::<GstMessageType>()}), ("GstMeta", Layout {size: size_of::<GstMeta>(), alignment: align_of::<GstMeta>()}), ("GstMetaFlags", Layout {size: size_of::<GstMetaFlags>(), alignment: align_of::<GstMetaFlags>()}), ("GstMetaInfo", Layout {size: size_of::<GstMetaInfo>(), alignment: align_of::<GstMetaInfo>()}), ("GstMetaTransformCopy", Layout {size: size_of::<GstMetaTransformCopy>(), alignment: align_of::<GstMetaTransformCopy>()}), ("GstMiniObject", Layout {size: size_of::<GstMiniObject>(), alignment: align_of::<GstMiniObject>()}), ("GstMiniObjectFlags", Layout {size: size_of::<GstMiniObjectFlags>(), alignment: align_of::<GstMiniObjectFlags>()}), ("GstObject", Layout {size: size_of::<GstObject>(), alignment: align_of::<GstObject>()}), ("GstObjectClass", Layout {size: size_of::<GstObjectClass>(), alignment: align_of::<GstObjectClass>()}), ("GstObjectFlags", Layout {size: size_of::<GstObjectFlags>(), alignment: align_of::<GstObjectFlags>()}), ("GstPad", Layout {size: size_of::<GstPad>(), alignment: align_of::<GstPad>()}), ("GstPadClass", Layout {size: size_of::<GstPadClass>(), alignment: align_of::<GstPadClass>()}), ("GstPadDirection", Layout {size: size_of::<GstPadDirection>(), alignment: align_of::<GstPadDirection>()}), ("GstPadFlags", Layout {size: size_of::<GstPadFlags>(), alignment: align_of::<GstPadFlags>()}), ("GstPadLinkCheck", Layout {size: size_of::<GstPadLinkCheck>(), alignment: align_of::<GstPadLinkCheck>()}), ("GstPadLinkReturn", Layout {size: size_of::<GstPadLinkReturn>(), alignment: align_of::<GstPadLinkReturn>()}), ("GstPadMode", Layout {size: size_of::<GstPadMode>(), alignment: align_of::<GstPadMode>()}), ("GstPadPresence", Layout {size: size_of::<GstPadPresence>(), alignment: align_of::<GstPadPresence>()}), ("GstPadProbeInfo", Layout {size: size_of::<GstPadProbeInfo>(), alignment: align_of::<GstPadProbeInfo>()}), ("GstPadProbeReturn", Layout {size: size_of::<GstPadProbeReturn>(), alignment: align_of::<GstPadProbeReturn>()}), ("GstPadProbeType", Layout {size: size_of::<GstPadProbeType>(), alignment: align_of::<GstPadProbeType>()}), ("GstPadTemplate", Layout {size: size_of::<GstPadTemplate>(), alignment: align_of::<GstPadTemplate>()}), ("GstPadTemplateClass", Layout {size: size_of::<GstPadTemplateClass>(), alignment: align_of::<GstPadTemplateClass>()}), ("GstPadTemplateFlags", Layout {size: size_of::<GstPadTemplateFlags>(), alignment: align_of::<GstPadTemplateFlags>()}), ("GstParamSpecArray", Layout {size: size_of::<GstParamSpecArray>(), alignment: align_of::<GstParamSpecArray>()}), ("GstParamSpecFraction", Layout {size: size_of::<GstParamSpecFraction>(), alignment: align_of::<GstParamSpecFraction>()}), ("GstParentBufferMeta", Layout {size: size_of::<GstParentBufferMeta>(), alignment: align_of::<GstParentBufferMeta>()}), ("GstParseError", Layout {size: size_of::<GstParseError>(), alignment: align_of::<GstParseError>()}), ("GstParseFlags", Layout {size: size_of::<GstParseFlags>(), alignment: align_of::<GstParseFlags>()}), ("GstPipeline", Layout {size: size_of::<GstPipeline>(), alignment: align_of::<GstPipeline>()}), ("GstPipelineClass", Layout {size: size_of::<GstPipelineClass>(), alignment: align_of::<GstPipelineClass>()}), ("GstPipelineFlags", Layout {size: size_of::<GstPipelineFlags>(), alignment: align_of::<GstPipelineFlags>()}), ("GstPluginDependencyFlags", Layout {size: size_of::<GstPluginDependencyFlags>(), alignment: align_of::<GstPluginDependencyFlags>()}), ("GstPluginDesc", Layout {size: size_of::<GstPluginDesc>(), alignment: align_of::<GstPluginDesc>()}), ("GstPluginError", Layout {size: size_of::<GstPluginError>(), alignment: align_of::<GstPluginError>()}), ("GstPluginFlags", Layout {size: size_of::<GstPluginFlags>(), alignment: align_of::<GstPluginFlags>()}), ("GstPollFD", Layout {size: size_of::<GstPollFD>(), alignment: align_of::<GstPollFD>()}), ("GstPresetInterface", Layout {size: size_of::<GstPresetInterface>(), alignment: align_of::<GstPresetInterface>()}), ("GstProgressType", Layout {size: size_of::<GstProgressType>(), alignment: align_of::<GstProgressType>()}), ("GstPromise", Layout {size: size_of::<GstPromise>(), alignment: align_of::<GstPromise>()}), ("GstPromiseResult", Layout {size: size_of::<GstPromiseResult>(), alignment: align_of::<GstPromiseResult>()}), ("GstProtectionMeta", Layout {size: size_of::<GstProtectionMeta>(), alignment: align_of::<GstProtectionMeta>()}), ("GstProxyPad", Layout {size: size_of::<GstProxyPad>(), alignment: align_of::<GstProxyPad>()}), ("GstProxyPadClass", Layout {size: size_of::<GstProxyPadClass>(), alignment: align_of::<GstProxyPadClass>()}), ("GstQOSType", Layout {size: size_of::<GstQOSType>(), alignment: align_of::<GstQOSType>()}), ("GstQuery", Layout {size: size_of::<GstQuery>(), alignment: align_of::<GstQuery>()}), ("GstQueryType", Layout {size: size_of::<GstQueryType>(), alignment: align_of::<GstQueryType>()}), ("GstQueryTypeFlags", Layout {size: size_of::<GstQueryTypeFlags>(), alignment: align_of::<GstQueryTypeFlags>()}), ("GstRank", Layout {size: size_of::<GstRank>(), alignment: align_of::<GstRank>()}), ("GstReferenceTimestampMeta", Layout {size: size_of::<GstReferenceTimestampMeta>(), alignment: align_of::<GstReferenceTimestampMeta>()}), ("GstRegistry", Layout {size: size_of::<GstRegistry>(), alignment: align_of::<GstRegistry>()}), ("GstRegistryClass", Layout {size: size_of::<GstRegistryClass>(), alignment: align_of::<GstRegistryClass>()}), ("GstResourceError", Layout {size: size_of::<GstResourceError>(), alignment: align_of::<GstResourceError>()}), ("GstSchedulingFlags", Layout {size: size_of::<GstSchedulingFlags>(), alignment: align_of::<GstSchedulingFlags>()}), ("GstSearchMode", Layout {size: size_of::<GstSearchMode>(), alignment: align_of::<GstSearchMode>()}), ("GstSeekFlags", Layout {size: size_of::<GstSeekFlags>(), alignment: align_of::<GstSeekFlags>()}), ("GstSeekType", Layout {size: size_of::<GstSeekType>(), alignment: align_of::<GstSeekType>()}), ("GstSegment", Layout {size: size_of::<GstSegment>(), alignment: align_of::<GstSegment>()}), ("GstSegmentFlags", Layout {size: size_of::<GstSegmentFlags>(), alignment: align_of::<GstSegmentFlags>()}), ("GstStackTraceFlags", Layout {size: size_of::<GstStackTraceFlags>(), alignment: align_of::<GstStackTraceFlags>()}), ("GstState", Layout {size: size_of::<GstState>(), alignment: align_of::<GstState>()}), ("GstStateChange", Layout {size: size_of::<GstStateChange>(), alignment: align_of::<GstStateChange>()}), ("GstStateChangeReturn", Layout {size: size_of::<GstStateChangeReturn>(), alignment: align_of::<GstStateChangeReturn>()}), ("GstStaticCaps", Layout {size: size_of::<GstStaticCaps>(), alignment: align_of::<GstStaticCaps>()}), ("GstStaticPadTemplate", Layout {size: size_of::<GstStaticPadTemplate>(), alignment: align_of::<GstStaticPadTemplate>()}), ("GstStream", Layout {size: size_of::<GstStream>(), alignment: align_of::<GstStream>()}), ("GstStreamClass", Layout {size: size_of::<GstStreamClass>(), alignment: align_of::<GstStreamClass>()}), ("GstStreamCollection", Layout {size: size_of::<GstStreamCollection>(), alignment: align_of::<GstStreamCollection>()}), ("GstStreamCollectionClass", Layout {size: size_of::<GstStreamCollectionClass>(), alignment: align_of::<GstStreamCollectionClass>()}), ("GstStreamError", Layout {size: size_of::<GstStreamError>(), alignment: align_of::<GstStreamError>()}), ("GstStreamFlags", Layout {size: size_of::<GstStreamFlags>(), alignment: align_of::<GstStreamFlags>()}), ("GstStreamStatusType", Layout {size: size_of::<GstStreamStatusType>(), alignment: align_of::<GstStreamStatusType>()}), ("GstStreamType", Layout {size: size_of::<GstStreamType>(), alignment: align_of::<GstStreamType>()}), ("GstStructure", Layout {size: size_of::<GstStructure>(), alignment: align_of::<GstStructure>()}), ("GstStructureChangeType", Layout {size: size_of::<GstStructureChangeType>(), alignment: align_of::<GstStructureChangeType>()}), ("GstSystemClock", Layout {size: size_of::<GstSystemClock>(), alignment: align_of::<GstSystemClock>()}), ("GstSystemClockClass", Layout {size: size_of::<GstSystemClockClass>(), alignment: align_of::<GstSystemClockClass>()}), ("GstTagFlag", Layout {size: size_of::<GstTagFlag>(), alignment: align_of::<GstTagFlag>()}), ("GstTagList", Layout {size: size_of::<GstTagList>(), alignment: align_of::<GstTagList>()}), ("GstTagMergeMode", Layout {size: size_of::<GstTagMergeMode>(), alignment: align_of::<GstTagMergeMode>()}), ("GstTagScope", Layout {size: size_of::<GstTagScope>(), alignment: align_of::<GstTagScope>()}), ("GstTagSetterInterface", Layout {size: size_of::<GstTagSetterInterface>(), alignment: align_of::<GstTagSetterInterface>()}), ("GstTask", Layout {size: size_of::<GstTask>(), alignment: align_of::<GstTask>()}), ("GstTaskClass", Layout {size: size_of::<GstTaskClass>(), alignment: align_of::<GstTaskClass>()}), ("GstTaskPool", Layout {size: size_of::<GstTaskPool>(), alignment: align_of::<GstTaskPool>()}), ("GstTaskPoolClass", Layout {size: size_of::<GstTaskPoolClass>(), alignment: align_of::<GstTaskPoolClass>()}), ("GstTaskState", Layout {size: size_of::<GstTaskState>(), alignment: align_of::<GstTaskState>()}), ("GstTimedValue", Layout {size: size_of::<GstTimedValue>(), alignment: align_of::<GstTimedValue>()}), ("GstTocEntryType", Layout {size: size_of::<GstTocEntryType>(), alignment: align_of::<GstTocEntryType>()}), ("GstTocLoopType", Layout {size: size_of::<GstTocLoopType>(), alignment: align_of::<GstTocLoopType>()}), ("GstTocScope", Layout {size: size_of::<GstTocScope>(), alignment: align_of::<GstTocScope>()}), ("GstTocSetterInterface", Layout {size: size_of::<GstTocSetterInterface>(), alignment: align_of::<GstTocSetterInterface>()}), ("GstTracer", Layout {size: size_of::<GstTracer>(), alignment: align_of::<GstTracer>()}), ("GstTracerClass", Layout {size: size_of::<GstTracerClass>(), alignment: align_of::<GstTracerClass>()}), ("GstTracerValueFlags", Layout {size: size_of::<GstTracerValueFlags>(), alignment: align_of::<GstTracerValueFlags>()}), ("GstTracerValueScope", Layout {size: size_of::<GstTracerValueScope>(), alignment: align_of::<GstTracerValueScope>()}), ("GstTypeFind", Layout {size: size_of::<GstTypeFind>(), alignment: align_of::<GstTypeFind>()}), ("GstTypeFindProbability", Layout {size: size_of::<GstTypeFindProbability>(), alignment: align_of::<GstTypeFindProbability>()}), ("GstURIError", Layout {size: size_of::<GstURIError>(), alignment: align_of::<GstURIError>()}), ("GstURIHandlerInterface", Layout {size: size_of::<GstURIHandlerInterface>(), alignment: align_of::<GstURIHandlerInterface>()}), ("GstURIType", Layout {size: size_of::<GstURIType>(), alignment: align_of::<GstURIType>()}), ("GstValueTable", Layout {size: size_of::<GstValueTable>(), alignment: align_of::<GstValueTable>()}), ]; const RUST_CONSTANTS: &[(&str, &str)] = &[ ("(guint) GST_ALLOCATOR_FLAG_CUSTOM_ALLOC", "16"), ("(guint) GST_ALLOCATOR_FLAG_LAST", "1048576"), ("GST_ALLOCATOR_SYSMEM", "SystemMemory"), ("(guint) GST_BIN_FLAG_LAST", "524288"), ("(guint) GST_BIN_FLAG_NO_RESYNC", "16384"), ("(guint) GST_BIN_FLAG_STREAMS_AWARE", "32768"), ("(gint) GST_BUFFERING_DOWNLOAD", "1"), ("(gint) GST_BUFFERING_LIVE", "3"), ("(gint) GST_BUFFERING_STREAM", "0"), ("(gint) GST_BUFFERING_TIMESHIFT", "2"), ("GST_BUFFER_COPY_ALL", "15"), ("(guint) GST_BUFFER_COPY_DEEP", "32"), ("(guint) GST_BUFFER_COPY_FLAGS", "1"), ("(guint) GST_BUFFER_COPY_MEMORY", "8"), ("(guint) GST_BUFFER_COPY_MERGE", "16"), ("(guint) GST_BUFFER_COPY_META", "4"), ("GST_BUFFER_COPY_METADATA", "7"), ("(guint) GST_BUFFER_COPY_NONE", "0"), ("(guint) GST_BUFFER_COPY_TIMESTAMPS", "2"), ("(guint) GST_BUFFER_FLAG_CORRUPTED", "256"), ("(guint) GST_BUFFER_FLAG_DECODE_ONLY", "32"), ("(guint) GST_BUFFER_FLAG_DELTA_UNIT", "8192"), ("(guint) GST_BUFFER_FLAG_DISCONT", "64"), ("(guint) GST_BUFFER_FLAG_DROPPABLE", "4096"), ("(guint) GST_BUFFER_FLAG_GAP", "2048"), ("(guint) GST_BUFFER_FLAG_HEADER", "1024"), ("(guint) GST_BUFFER_FLAG_LAST", "1048576"), ("(guint) GST_BUFFER_FLAG_LIVE", "16"), ("(guint) GST_BUFFER_FLAG_MARKER", "512"), ("(guint) GST_BUFFER_FLAG_NON_DROPPABLE", "65536"), ("(guint) GST_BUFFER_FLAG_RESYNC", "128"), ("(guint) GST_BUFFER_FLAG_SYNC_AFTER", "32768"), ("(guint) GST_BUFFER_FLAG_TAG_MEMORY", "16384"), ("GST_BUFFER_OFFSET_NONE", "18446744073709551615"), ("(guint) GST_BUFFER_POOL_ACQUIRE_FLAG_DISCONT", "4"), ("(guint) GST_BUFFER_POOL_ACQUIRE_FLAG_DONTWAIT", "2"), ("(guint) GST_BUFFER_POOL_ACQUIRE_FLAG_KEY_UNIT", "1"), ("(guint) GST_BUFFER_POOL_ACQUIRE_FLAG_LAST", "65536"), ("(guint) GST_BUFFER_POOL_ACQUIRE_FLAG_NONE", "0"), ("(gint) GST_BUS_ASYNC", "2"), ("(gint) GST_BUS_DROP", "0"), ("(guint) GST_BUS_FLAG_LAST", "32"), ("(guint) GST_BUS_FLUSHING", "16"), ("(gint) GST_BUS_PASS", "1"), ("GST_CAN_INLINE", "1"), ("GST_CAPS_FEATURE_MEMORY_SYSTEM_MEMORY", "memory:SystemMemory"), ("(guint) GST_CAPS_FLAG_ANY", "16"), ("(gint) GST_CAPS_INTERSECT_FIRST", "1"), ("(gint) GST_CAPS_INTERSECT_ZIG_ZAG", "0"), ("(gint) GST_CLOCK_BADTIME", "4"), ("(gint) GST_CLOCK_BUSY", "3"), ("(gint) GST_CLOCK_DONE", "7"), ("(gint) GST_CLOCK_EARLY", "1"), ("(gint) GST_CLOCK_ENTRY_PERIODIC", "1"), ("(gint) GST_CLOCK_ENTRY_SINGLE", "0"), ("(gint) GST_CLOCK_ERROR", "5"), ("(guint) GST_CLOCK_FLAG_CAN_DO_PERIODIC_ASYNC", "128"), ("(guint) GST_CLOCK_FLAG_CAN_DO_PERIODIC_SYNC", "64"), ("(guint) GST_CLOCK_FLAG_CAN_DO_SINGLE_ASYNC", "32"), ("(guint) GST_CLOCK_FLAG_CAN_DO_SINGLE_SYNC", "16"), ("(guint) GST_CLOCK_FLAG_CAN_SET_MASTER", "512"), ("(guint) GST_CLOCK_FLAG_CAN_SET_RESOLUTION", "256"), ("(guint) GST_CLOCK_FLAG_LAST", "4096"), ("(guint) GST_CLOCK_FLAG_NEEDS_STARTUP_SYNC", "1024"), ("(gint) GST_CLOCK_OK", "0"), ("GST_CLOCK_TIME_NONE", "18446744073709551615"), ("(gint) GST_CLOCK_TYPE_MONOTONIC", "1"), ("(gint) GST_CLOCK_TYPE_OTHER", "2"), ("(gint) GST_CLOCK_TYPE_REALTIME", "0"), ("(gint) GST_CLOCK_UNSCHEDULED", "2"), ("(gint) GST_CLOCK_UNSUPPORTED", "6"), ("(gint) GST_CORE_ERROR_CAPS", "10"), ("(gint) GST_CORE_ERROR_CLOCK", "13"), ("(gint) GST_CORE_ERROR_DISABLED", "14"), ("(gint) GST_CORE_ERROR_EVENT", "8"), ("(gint) GST_CORE_ERROR_FAILED", "1"), ("(gint) GST_CORE_ERROR_MISSING_PLUGIN", "12"), ("(gint) GST_CORE_ERROR_NEGOTIATION", "7"), ("(gint) GST_CORE_ERROR_NOT_IMPLEMENTED", "3"), ("(gint) GST_CORE_ERROR_NUM_ERRORS", "15"), ("(gint) GST_CORE_ERROR_PAD", "5"), ("(gint) GST_CORE_ERROR_SEEK", "9"), ("(gint) GST_CORE_ERROR_STATE_CHANGE", "4"), ("(gint) GST_CORE_ERROR_TAG", "11"), ("(gint) GST_CORE_ERROR_THREAD", "6"), ("(gint) GST_CORE_ERROR_TOO_LAZY", "2"), ("(guint) GST_DEBUG_BG_BLACK", "0"), ("(guint) GST_DEBUG_BG_BLUE", "64"), ("(guint) GST_DEBUG_BG_CYAN", "96"), ("(guint) GST_DEBUG_BG_GREEN", "32"), ("(guint) GST_DEBUG_BG_MAGENTA", "80"), ("GST_DEBUG_BG_MASK", "240"), ("(guint) GST_DEBUG_BG_RED", "16"), ("(guint) GST_DEBUG_BG_WHITE", "112"), ("(guint) GST_DEBUG_BG_YELLOW", "48"), ("(guint) GST_DEBUG_BOLD", "256"), ("(gint) GST_DEBUG_COLOR_MODE_OFF", "0"), ("(gint) GST_DEBUG_COLOR_MODE_ON", "1"), ("(gint) GST_DEBUG_COLOR_MODE_UNIX", "2"), ("(guint) GST_DEBUG_FG_BLACK", "0"), ("(guint) GST_DEBUG_FG_BLUE", "4"), ("(guint) GST_DEBUG_FG_CYAN", "6"), ("(guint) GST_DEBUG_FG_GREEN", "2"), ("(guint) GST_DEBUG_FG_MAGENTA", "5"), ("GST_DEBUG_FG_MASK", "15"), ("(guint) GST_DEBUG_FG_RED", "1"), ("(guint) GST_DEBUG_FG_WHITE", "7"), ("(guint) GST_DEBUG_FG_YELLOW", "3"), ("GST_DEBUG_FORMAT_MASK", "65280"), ("(guint) GST_DEBUG_GRAPH_SHOW_ALL", "15"), ("(guint) GST_DEBUG_GRAPH_SHOW_CAPS_DETAILS", "2"), ("(guint) GST_DEBUG_GRAPH_SHOW_FULL_PARAMS", "16"), ("(guint) GST_DEBUG_GRAPH_SHOW_MEDIA_TYPE", "1"), ("(guint) GST_DEBUG_GRAPH_SHOW_NON_DEFAULT_PARAMS", "4"), ("(guint) GST_DEBUG_GRAPH_SHOW_STATES", "8"), ("(guint) GST_DEBUG_GRAPH_SHOW_VERBOSE", "4294967295"), ("(guint) GST_DEBUG_UNDERLINE", "512"), ("GST_ELEMENT_FACTORY_KLASS_DECODER", "Decoder"), ("GST_ELEMENT_FACTORY_KLASS_DECRYPTOR", "Decryptor"), ("GST_ELEMENT_FACTORY_KLASS_DEMUXER", "Demuxer"), ("GST_ELEMENT_FACTORY_KLASS_DEPAYLOADER", "Depayloader"), ("GST_ELEMENT_FACTORY_KLASS_ENCODER", "Encoder"), ("GST_ELEMENT_FACTORY_KLASS_ENCRYPTOR", "Encryptor"), ("GST_ELEMENT_FACTORY_KLASS_FORMATTER", "Formatter"), ("GST_ELEMENT_FACTORY_KLASS_HARDWARE", "Hardware"), ("GST_ELEMENT_FACTORY_KLASS_MEDIA_AUDIO", "Audio"), ("GST_ELEMENT_FACTORY_KLASS_MEDIA_IMAGE", "Image"), ("GST_ELEMENT_FACTORY_KLASS_MEDIA_METADATA", "Metadata"), ("GST_ELEMENT_FACTORY_KLASS_MEDIA_SUBTITLE", "Subtitle"), ("GST_ELEMENT_FACTORY_KLASS_MEDIA_VIDEO", "Video"), ("GST_ELEMENT_FACTORY_KLASS_MUXER", "Muxer"), ("GST_ELEMENT_FACTORY_KLASS_PARSER", "Parser"), ("GST_ELEMENT_FACTORY_KLASS_PAYLOADER", "Payloader"), ("GST_ELEMENT_FACTORY_KLASS_SINK", "Sink"), ("GST_ELEMENT_FACTORY_KLASS_SRC", "Source"), ("GST_ELEMENT_FACTORY_TYPE_ANY", "562949953421311"), ("GST_ELEMENT_FACTORY_TYPE_AUDIOVIDEO_SINKS", "3940649673949188"), ("GST_ELEMENT_FACTORY_TYPE_AUDIO_ENCODER", "1125899906842626"), ("GST_ELEMENT_FACTORY_TYPE_DECODABLE", "1377"), ("GST_ELEMENT_FACTORY_TYPE_DECODER", "1"), ("GST_ELEMENT_FACTORY_TYPE_DECRYPTOR", "1024"), ("GST_ELEMENT_FACTORY_TYPE_DEMUXER", "32"), ("GST_ELEMENT_FACTORY_TYPE_DEPAYLOADER", "256"), ("GST_ELEMENT_FACTORY_TYPE_ENCODER", "2"), ("GST_ELEMENT_FACTORY_TYPE_ENCRYPTOR", "2048"), ("GST_ELEMENT_FACTORY_TYPE_FORMATTER", "512"), ("GST_ELEMENT_FACTORY_TYPE_MAX_ELEMENTS", "281474976710656"), ("GST_ELEMENT_FACTORY_TYPE_MEDIA_ANY", "18446462598732840960"), ("GST_ELEMENT_FACTORY_TYPE_MEDIA_AUDIO", "1125899906842624"), ("GST_ELEMENT_FACTORY_TYPE_MEDIA_IMAGE", "2251799813685248"), ("GST_ELEMENT_FACTORY_TYPE_MEDIA_METADATA", "9007199254740992"), ("GST_ELEMENT_FACTORY_TYPE_MEDIA_SUBTITLE", "4503599627370496"), ("GST_ELEMENT_FACTORY_TYPE_MEDIA_VIDEO", "562949953421312"), ("GST_ELEMENT_FACTORY_TYPE_MUXER", "16"), ("GST_ELEMENT_FACTORY_TYPE_PARSER", "64"), ("GST_ELEMENT_FACTORY_TYPE_PAYLOADER", "128"), ("GST_ELEMENT_FACTORY_TYPE_SINK", "4"), ("GST_ELEMENT_FACTORY_TYPE_SRC", "8"), ("GST_ELEMENT_FACTORY_TYPE_VIDEO_ENCODER", "2814749767106562"), ("(guint) GST_ELEMENT_FLAG_INDEXABLE", "512"), ("(guint) GST_ELEMENT_FLAG_LAST", "16384"), ("(guint) GST_ELEMENT_FLAG_LOCKED_STATE", "16"), ("(guint) GST_ELEMENT_FLAG_PROVIDE_CLOCK", "128"), ("(guint) GST_ELEMENT_FLAG_REQUIRE_CLOCK", "256"), ("(guint) GST_ELEMENT_FLAG_SINK", "32"), ("(guint) GST_ELEMENT_FLAG_SOURCE", "64"), ("GST_ELEMENT_METADATA_AUTHOR", "author"), ("GST_ELEMENT_METADATA_DESCRIPTION", "description"), ("GST_ELEMENT_METADATA_DOC_URI", "doc-uri"), ("GST_ELEMENT_METADATA_ICON_NAME", "icon-name"), ("GST_ELEMENT_METADATA_KLASS", "klass"), ("GST_ELEMENT_METADATA_LONGNAME", "long-name"), ("(gint) GST_EVENT_BUFFERSIZE", "23054"), ("(gint) GST_EVENT_CAPS", "12814"), ("(gint) GST_EVENT_CUSTOM_BOTH", "79367"), ("(gint) GST_EVENT_CUSTOM_BOTH_OOB", "81923"), ("(gint) GST_EVENT_CUSTOM_DOWNSTREAM", "71686"), ("(gint) GST_EVENT_CUSTOM_DOWNSTREAM_OOB", "74242"), ("(gint) GST_EVENT_CUSTOM_DOWNSTREAM_STICKY", "76830"), ("(gint) GST_EVENT_CUSTOM_UPSTREAM", "69121"), ("(gint) GST_EVENT_EOS", "28174"), ("(gint) GST_EVENT_FLUSH_START", "2563"), ("(gint) GST_EVENT_FLUSH_STOP", "5127"), ("(gint) GST_EVENT_GAP", "40966"), ("(gint) GST_EVENT_LATENCY", "56321"), ("(gint) GST_EVENT_NAVIGATION", "53761"), ("GST_EVENT_NUM_SHIFT", "8"), ("(gint) GST_EVENT_PROTECTION", "33310"), ("(gint) GST_EVENT_QOS", "48641"), ("(gint) GST_EVENT_RECONFIGURE", "61441"), ("(gint) GST_EVENT_SEEK", "51201"), ("(gint) GST_EVENT_SEGMENT", "17934"), ("(gint) GST_EVENT_SEGMENT_DONE", "38406"), ("(gint) GST_EVENT_SELECT_STREAMS", "66561"), ("(gint) GST_EVENT_SINK_MESSAGE", "25630"), ("(gint) GST_EVENT_STEP", "58881"), ("(gint) GST_EVENT_STREAM_COLLECTION", "19230"), ("(gint) GST_EVENT_STREAM_GROUP_DONE", "26894"), ("(gint) GST_EVENT_STREAM_START", "10254"), ("(gint) GST_EVENT_TAG", "20510"), ("(gint) GST_EVENT_TOC", "30750"), ("(gint) GST_EVENT_TOC_SELECT", "64001"), ("GST_EVENT_TYPE_BOTH", "3"), ("(guint) GST_EVENT_TYPE_DOWNSTREAM", "2"), ("(guint) GST_EVENT_TYPE_SERIALIZED", "4"), ("(guint) GST_EVENT_TYPE_STICKY", "8"), ("(guint) GST_EVENT_TYPE_STICKY_MULTI", "16"), ("(guint) GST_EVENT_TYPE_UPSTREAM", "1"), ("(gint) GST_EVENT_UNKNOWN", "0"), ("GST_FLAG_SET_MASK_EXACT", "4294967295"), ("(gint) GST_FLOW_CUSTOM_ERROR", "-100"), ("(gint) GST_FLOW_CUSTOM_ERROR_1", "-101"), ("(gint) GST_FLOW_CUSTOM_ERROR_2", "-102"), ("(gint) GST_FLOW_CUSTOM_SUCCESS", "100"), ("(gint) GST_FLOW_CUSTOM_SUCCESS_1", "101"), ("(gint) GST_FLOW_CUSTOM_SUCCESS_2", "102"), ("(gint) GST_FLOW_EOS", "-3"), ("(gint) GST_FLOW_ERROR", "-5"), ("(gint) GST_FLOW_FLUSHING", "-2"), ("(gint) GST_FLOW_NOT_LINKED", "-1"), ("(gint) GST_FLOW_NOT_NEGOTIATED", "-4"), ("(gint) GST_FLOW_NOT_SUPPORTED", "-6"), ("(gint) GST_FLOW_OK", "0"), ("(gint) GST_FORMAT_BUFFERS", "4"), ("(gint) GST_FORMAT_BYTES", "2"), ("(gint) GST_FORMAT_DEFAULT", "1"), ("(gint) GST_FORMAT_PERCENT", "5"), ("GST_FORMAT_PERCENT_MAX", "1000000"), ("GST_FORMAT_PERCENT_SCALE", "10000"), ("(gint) GST_FORMAT_TIME", "3"), ("(gint) GST_FORMAT_UNDEFINED", "0"), ("GST_GROUP_ID_INVALID", "0"), ("(gint) GST_ITERATOR_DONE", "0"), ("(gint) GST_ITERATOR_ERROR", "3"), ("(gint) GST_ITERATOR_ITEM_END", "2"), ("(gint) GST_ITERATOR_ITEM_PASS", "1"), ("(gint) GST_ITERATOR_ITEM_SKIP", "0"), ("(gint) GST_ITERATOR_OK", "1"), ("(gint) GST_ITERATOR_RESYNC", "2"), ("(gint) GST_LEVEL_COUNT", "10"), ("(gint) GST_LEVEL_DEBUG", "5"), ("(gint) GST_LEVEL_ERROR", "1"), ("(gint) GST_LEVEL_FIXME", "3"), ("(gint) GST_LEVEL_INFO", "4"), ("(gint) GST_LEVEL_LOG", "6"), ("(gint) GST_LEVEL_MEMDUMP", "9"), ("(gint) GST_LEVEL_NONE", "0"), ("(gint) GST_LEVEL_TRACE", "7"), ("(gint) GST_LEVEL_WARNING", "2"), ("(gint) GST_LIBRARY_ERROR_ENCODE", "6"), ("(gint) GST_LIBRARY_ERROR_FAILED", "1"), ("(gint) GST_LIBRARY_ERROR_INIT", "3"), ("(gint) GST_LIBRARY_ERROR_NUM_ERRORS", "7"), ("(gint) GST_LIBRARY_ERROR_SETTINGS", "5"), ("(gint) GST_LIBRARY_ERROR_SHUTDOWN", "4"), ("(gint) GST_LIBRARY_ERROR_TOO_LAZY", "2"), ("GST_LICENSE_UNKNOWN", "unknown"), ("(guint) GST_LOCK_FLAG_EXCLUSIVE", "4"), ("(guint) GST_LOCK_FLAG_LAST", "256"), ("(guint) GST_LOCK_FLAG_READ", "1"), ("GST_LOCK_FLAG_READWRITE", "3"), ("(guint) GST_LOCK_FLAG_WRITE", "2"), ("(guint) GST_MAP_FLAG_LAST", "65536"), ("(guint) GST_MAP_READ", "1"), ("GST_MAP_READWRITE", "3"), ("(guint) GST_MAP_WRITE", "2"), ("(guint) GST_MEMORY_FLAG_LAST", "1048576"), ("(guint) GST_MEMORY_FLAG_NOT_MAPPABLE", "256"), ("(guint) GST_MEMORY_FLAG_NO_SHARE", "16"), ("(guint) GST_MEMORY_FLAG_PHYSICALLY_CONTIGUOUS", "128"), ("(guint) GST_MEMORY_FLAG_READONLY", "2"), ("(guint) GST_MEMORY_FLAG_ZERO_PADDED", "64"), ("(guint) GST_MEMORY_FLAG_ZERO_PREFIXED", "32"), ("(guint) GST_MESSAGE_ANY", "4294967295"), ("(guint) GST_MESSAGE_APPLICATION", "16384"), ("(guint) GST_MESSAGE_ASYNC_DONE", "2097152"), ("(guint) GST_MESSAGE_ASYNC_START", "1048576"), ("(guint) GST_MESSAGE_BUFFERING", "32"), ("(guint) GST_MESSAGE_CLOCK_LOST", "1024"), ("(guint) GST_MESSAGE_CLOCK_PROVIDE", "512"), ("(guint) GST_MESSAGE_DEVICE_ADDED", "2147483649"), ("(guint) GST_MESSAGE_DEVICE_CHANGED", "2147483654"), ("(guint) GST_MESSAGE_DEVICE_REMOVED", "2147483650"), ("(guint) GST_MESSAGE_DURATION_CHANGED", "262144"), ("(guint) GST_MESSAGE_ELEMENT", "32768"), ("(guint) GST_MESSAGE_EOS", "1"), ("(guint) GST_MESSAGE_ERROR", "2"), ("(guint) GST_MESSAGE_EXTENDED", "2147483648"), ("(guint) GST_MESSAGE_HAVE_CONTEXT", "1073741824"), ("(guint) GST_MESSAGE_INFO", "8"), ("(guint) GST_MESSAGE_LATENCY", "524288"), ("(guint) GST_MESSAGE_NEED_CONTEXT", "536870912"), ("(guint) GST_MESSAGE_NEW_CLOCK", "2048"), ("(guint) GST_MESSAGE_PROGRESS", "33554432"), ("(guint) GST_MESSAGE_PROPERTY_NOTIFY", "2147483651"), ("(guint) GST_MESSAGE_QOS", "16777216"), ("(guint) GST_MESSAGE_REDIRECT", "2147483654"), ("(guint) GST_MESSAGE_REQUEST_STATE", "4194304"), ("(guint) GST_MESSAGE_RESET_TIME", "134217728"), ("(guint) GST_MESSAGE_SEGMENT_DONE", "131072"), ("(guint) GST_MESSAGE_SEGMENT_START", "65536"), ("(guint) GST_MESSAGE_STATE_CHANGED", "64"), ("(guint) GST_MESSAGE_STATE_DIRTY", "128"), ("(guint) GST_MESSAGE_STEP_DONE", "256"), ("(guint) GST_MESSAGE_STEP_START", "8388608"), ("(guint) GST_MESSAGE_STREAMS_SELECTED", "2147483653"), ("(guint) GST_MESSAGE_STREAM_COLLECTION", "2147483652"), ("(guint) GST_MESSAGE_STREAM_START", "268435456"), ("(guint) GST_MESSAGE_STREAM_STATUS", "8192"), ("(guint) GST_MESSAGE_STRUCTURE_CHANGE", "4096"), ("(guint) GST_MESSAGE_TAG", "16"), ("(guint) GST_MESSAGE_TOC", "67108864"), ("(guint) GST_MESSAGE_UNKNOWN", "0"), ("(guint) GST_MESSAGE_WARNING", "4"), ("(guint) GST_META_FLAG_LAST", "65536"), ("(guint) GST_META_FLAG_LOCKED", "4"), ("(guint) GST_META_FLAG_NONE", "0"), ("(guint) GST_META_FLAG_POOLED", "2"), ("(guint) GST_META_FLAG_READONLY", "1"), ("GST_META_TAG_MEMORY_STR", "memory"), ("(guint) GST_MINI_OBJECT_FLAG_LAST", "16"), ("(guint) GST_MINI_OBJECT_FLAG_LOCKABLE", "1"), ("(guint) GST_MINI_OBJECT_FLAG_LOCK_READONLY", "2"), ("(guint) GST_MINI_OBJECT_FLAG_MAY_BE_LEAKED", "4"), ("GST_MSECOND", "1000000"), ("GST_NSECOND", "1"), ("(guint) GST_OBJECT_FLAG_LAST", "16"), ("(guint) GST_OBJECT_FLAG_MAY_BE_LEAKED", "1"), ("(gint) GST_PAD_ALWAYS", "0"), ("(guint) GST_PAD_FLAG_ACCEPT_INTERSECT", "32768"), ("(guint) GST_PAD_FLAG_ACCEPT_TEMPLATE", "65536"), ("(guint) GST_PAD_FLAG_BLOCKED", "16"), ("(guint) GST_PAD_FLAG_BLOCKING", "128"), ("(guint) GST_PAD_FLAG_EOS", "64"), ("(guint) GST_PAD_FLAG_FIXED_CAPS", "2048"), ("(guint) GST_PAD_FLAG_FLUSHING", "32"), ("(guint) GST_PAD_FLAG_LAST", "1048576"), ("(guint) GST_PAD_FLAG_NEED_PARENT", "256"), ("(guint) GST_PAD_FLAG_NEED_RECONFIGURE", "512"), ("(guint) GST_PAD_FLAG_PENDING_EVENTS", "1024"), ("(guint) GST_PAD_FLAG_PROXY_ALLOCATION", "8192"), ("(guint) GST_PAD_FLAG_PROXY_CAPS", "4096"), ("(guint) GST_PAD_FLAG_PROXY_SCHEDULING", "16384"), ("(guint) GST_PAD_LINK_CHECK_CAPS", "4"), ("(guint) GST_PAD_LINK_CHECK_DEFAULT", "5"), ("(guint) GST_PAD_LINK_CHECK_HIERARCHY", "1"), ("(guint) GST_PAD_LINK_CHECK_NOTHING", "0"), ("(guint) GST_PAD_LINK_CHECK_NO_RECONFIGURE", "8"), ("(guint) GST_PAD_LINK_CHECK_TEMPLATE_CAPS", "2"), ("(gint) GST_PAD_LINK_NOFORMAT", "-4"), ("(gint) GST_PAD_LINK_NOSCHED", "-5"), ("(gint) GST_PAD_LINK_OK", "0"), ("(gint) GST_PAD_LINK_REFUSED", "-6"), ("(gint) GST_PAD_LINK_WAS_LINKED", "-2"), ("(gint) GST_PAD_LINK_WRONG_DIRECTION", "-3"), ("(gint) GST_PAD_LINK_WRONG_HIERARCHY", "-1"), ("(gint) GST_PAD_MODE_NONE", "0"), ("(gint) GST_PAD_MODE_PULL", "2"), ("(gint) GST_PAD_MODE_PUSH", "1"), ("(gint) GST_PAD_PROBE_DROP", "0"), ("(gint) GST_PAD_PROBE_HANDLED", "4"), ("(gint) GST_PAD_PROBE_OK", "1"), ("(gint) GST_PAD_PROBE_PASS", "3"), ("(gint) GST_PAD_PROBE_REMOVE", "2"), ("(guint) GST_PAD_PROBE_TYPE_ALL_BOTH", "1776"), ("(guint) GST_PAD_PROBE_TYPE_BLOCK", "2"), ("(guint) GST_PAD_PROBE_TYPE_BLOCKING", "3"), ("(guint) GST_PAD_PROBE_TYPE_BLOCK_DOWNSTREAM", "114"), ("(guint) GST_PAD_PROBE_TYPE_BLOCK_UPSTREAM", "130"), ("(guint) GST_PAD_PROBE_TYPE_BUFFER", "16"), ("(guint) GST_PAD_PROBE_TYPE_BUFFER_LIST", "32"), ("(guint) GST_PAD_PROBE_TYPE_DATA_BOTH", "240"), ("(guint) GST_PAD_PROBE_TYPE_DATA_DOWNSTREAM", "112"), ("(guint) GST_PAD_PROBE_TYPE_DATA_UPSTREAM", "128"), ("(guint) GST_PAD_PROBE_TYPE_EVENT_BOTH", "192"), ("(guint) GST_PAD_PROBE_TYPE_EVENT_DOWNSTREAM", "64"), ("(guint) GST_PAD_PROBE_TYPE_EVENT_FLUSH", "256"), ("(guint) GST_PAD_PROBE_TYPE_EVENT_UPSTREAM", "128"), ("(guint) GST_PAD_PROBE_TYPE_IDLE", "1"), ("(guint) GST_PAD_PROBE_TYPE_INVALID", "0"), ("(guint) GST_PAD_PROBE_TYPE_PULL", "8192"), ("(guint) GST_PAD_PROBE_TYPE_PUSH", "4096"), ("(guint) GST_PAD_PROBE_TYPE_QUERY_BOTH", "1536"), ("(guint) GST_PAD_PROBE_TYPE_QUERY_DOWNSTREAM", "512"), ("(guint) GST_PAD_PROBE_TYPE_QUERY_UPSTREAM", "1024"), ("(guint) GST_PAD_PROBE_TYPE_SCHEDULING", "12288"), ("(gint) GST_PAD_REQUEST", "2"), ("(gint) GST_PAD_SINK", "2"), ("(gint) GST_PAD_SOMETIMES", "1"), ("(gint) GST_PAD_SRC", "1"), ("(guint) GST_PAD_TEMPLATE_FLAG_LAST", "256"), ("(gint) GST_PAD_UNKNOWN", "0"), ("GST_PARAM_CONTROLLABLE", "512"), ("GST_PARAM_MUTABLE_PAUSED", "2048"), ("GST_PARAM_MUTABLE_PLAYING", "4096"), ("GST_PARAM_MUTABLE_READY", "1024"), ("GST_PARAM_USER_SHIFT", "65536"), ("(gint) GST_PARSE_ERROR_COULD_NOT_SET_PROPERTY", "4"), ("(gint) GST_PARSE_ERROR_DELAYED_LINK", "7"), ("(gint) GST_PARSE_ERROR_EMPTY", "6"), ("(gint) GST_PARSE_ERROR_EMPTY_BIN", "5"), ("(gint) GST_PARSE_ERROR_LINK", "3"), ("(gint) GST_PARSE_ERROR_NO_SUCH_ELEMENT", "1"), ("(gint) GST_PARSE_ERROR_NO_SUCH_PROPERTY", "2"), ("(gint) GST_PARSE_ERROR_SYNTAX", "0"), ("(guint) GST_PARSE_FLAG_FATAL_ERRORS", "1"), ("(guint) GST_PARSE_FLAG_NONE", "0"), ("(guint) GST_PARSE_FLAG_NO_SINGLE_ELEMENT_BINS", "2"), ("(guint) GST_PARSE_FLAG_PLACE_IN_BIN", "4"), ("(guint) GST_PIPELINE_FLAG_FIXED_CLOCK", "524288"), ("(guint) GST_PIPELINE_FLAG_LAST", "8388608"), ("(guint) GST_PLUGIN_DEPENDENCY_FLAG_FILE_NAME_IS_PREFIX", "8"), ("(guint) GST_PLUGIN_DEPENDENCY_FLAG_FILE_NAME_IS_SUFFIX", "4"), ("(guint) GST_PLUGIN_DEPENDENCY_FLAG_NONE", "0"), ("(guint) GST_PLUGIN_DEPENDENCY_FLAG_PATHS_ARE_DEFAULT_ONLY", "2"), ("(guint) GST_PLUGIN_DEPENDENCY_FLAG_PATHS_ARE_RELATIVE_TO_EXE", "16"), ("(guint) GST_PLUGIN_DEPENDENCY_FLAG_RECURSE", "1"), ("(gint) GST_PLUGIN_ERROR_DEPENDENCIES", "1"), ("(gint) GST_PLUGIN_ERROR_MODULE", "0"), ("(gint) GST_PLUGIN_ERROR_NAME_MISMATCH", "2"), ("(guint) GST_PLUGIN_FLAG_BLACKLISTED", "32"), ("(guint) GST_PLUGIN_FLAG_CACHED", "16"), ("(gint) GST_PROGRESS_TYPE_CANCELED", "3"), ("(gint) GST_PROGRESS_TYPE_COMPLETE", "2"), ("(gint) GST_PROGRESS_TYPE_CONTINUE", "1"), ("(gint) GST_PROGRESS_TYPE_ERROR", "4"), ("(gint) GST_PROGRESS_TYPE_START", "0"), ("(gint) GST_PROMISE_RESULT_EXPIRED", "3"), ("(gint) GST_PROMISE_RESULT_INTERRUPTED", "1"), ("(gint) GST_PROMISE_RESULT_PENDING", "0"), ("(gint) GST_PROMISE_RESULT_REPLIED", "2"), ("GST_PROTECTION_SYSTEM_ID_CAPS_FIELD", "protection-system"), ("GST_PROTECTION_UNSPECIFIED_SYSTEM_ID", "unspecified-system-id"), ("(gint) GST_QOS_TYPE_OVERFLOW", "0"), ("(gint) GST_QOS_TYPE_THROTTLE", "2"), ("(gint) GST_QOS_TYPE_UNDERFLOW", "1"), ("(gint) GST_QUERY_ACCEPT_CAPS", "40963"), ("(gint) GST_QUERY_ALLOCATION", "35846"), ("(gint) GST_QUERY_BITRATE", "51202"), ("(gint) GST_QUERY_BUFFERING", "28163"), ("(gint) GST_QUERY_CAPS", "43523"), ("(gint) GST_QUERY_CONTEXT", "48643"), ("(gint) GST_QUERY_CONVERT", "20483"), ("(gint) GST_QUERY_CUSTOM", "30723"), ("(gint) GST_QUERY_DRAIN", "46086"), ("(gint) GST_QUERY_DURATION", "5123"), ("(gint) GST_QUERY_FORMATS", "23043"), ("(gint) GST_QUERY_JITTER", "10243"), ("(gint) GST_QUERY_LATENCY", "7683"), ("GST_QUERY_NUM_SHIFT", "8"), ("(gint) GST_QUERY_POSITION", "2563"), ("(gint) GST_QUERY_RATE", "12803"), ("(gint) GST_QUERY_SCHEDULING", "38401"), ("(gint) GST_QUERY_SEEKING", "15363"), ("(gint) GST_QUERY_SEGMENT", "17923"), ("GST_QUERY_TYPE_BOTH", "3"), ("(guint) GST_QUERY_TYPE_DOWNSTREAM", "2"), ("(guint) GST_QUERY_TYPE_SERIALIZED", "4"), ("(guint) GST_QUERY_TYPE_UPSTREAM", "1"), ("(gint) GST_QUERY_UNKNOWN", "0"), ("(gint) GST_QUERY_URI", "33283"), ("(gint) GST_RANK_MARGINAL", "64"), ("(gint) GST_RANK_NONE", "0"), ("(gint) GST_RANK_PRIMARY", "256"), ("(gint) GST_RANK_SECONDARY", "128"), ("(gint) GST_RESOURCE_ERROR_BUSY", "4"), ("(gint) GST_RESOURCE_ERROR_CLOSE", "8"), ("(gint) GST_RESOURCE_ERROR_FAILED", "1"), ("(gint) GST_RESOURCE_ERROR_NOT_AUTHORIZED", "15"), ("(gint) GST_RESOURCE_ERROR_NOT_FOUND", "3"), ("(gint) GST_RESOURCE_ERROR_NO_SPACE_LEFT", "14"), ("(gint) GST_RESOURCE_ERROR_NUM_ERRORS", "16"), ("(gint) GST_RESOURCE_ERROR_OPEN_READ", "5"), ("(gint) GST_RESOURCE_ERROR_OPEN_READ_WRITE", "7"), ("(gint) GST_RESOURCE_ERROR_OPEN_WRITE", "6"), ("(gint) GST_RESOURCE_ERROR_READ", "9"), ("(gint) GST_RESOURCE_ERROR_SEEK", "11"), ("(gint) GST_RESOURCE_ERROR_SETTINGS", "13"), ("(gint) GST_RESOURCE_ERROR_SYNC", "12"), ("(gint) GST_RESOURCE_ERROR_TOO_LAZY", "2"), ("(gint) GST_RESOURCE_ERROR_WRITE", "10"), ("(guint) GST_SCHEDULING_FLAG_BANDWIDTH_LIMITED", "4"), ("(guint) GST_SCHEDULING_FLAG_SEEKABLE", "1"), ("(guint) GST_SCHEDULING_FLAG_SEQUENTIAL", "2"), ("(gint) GST_SEARCH_MODE_AFTER", "2"), ("(gint) GST_SEARCH_MODE_BEFORE", "1"), ("(gint) GST_SEARCH_MODE_EXACT", "0"), ("GST_SECOND", "1000000000"), ("(guint) GST_SEEK_FLAG_ACCURATE", "2"), ("(guint) GST_SEEK_FLAG_FLUSH", "1"), ("(guint) GST_SEEK_FLAG_KEY_UNIT", "4"), ("(guint) GST_SEEK_FLAG_NONE", "0"), ("(guint) GST_SEEK_FLAG_SEGMENT", "8"), ("(guint) GST_SEEK_FLAG_SKIP", "16"), ("(guint) GST_SEEK_FLAG_SNAP_AFTER", "64"), ("(guint) GST_SEEK_FLAG_SNAP_BEFORE", "32"), ("(guint) GST_SEEK_FLAG_SNAP_NEAREST", "96"), ("(guint) GST_SEEK_FLAG_TRICKMODE", "16"), ("(guint) GST_SEEK_FLAG_TRICKMODE_KEY_UNITS", "128"), ("(guint) GST_SEEK_FLAG_TRICKMODE_NO_AUDIO", "256"), ("(gint) GST_SEEK_TYPE_END", "2"), ("(gint) GST_SEEK_TYPE_NONE", "0"), ("(gint) GST_SEEK_TYPE_SET", "1"), ("(guint) GST_SEGMENT_FLAG_NONE", "0"), ("(guint) GST_SEGMENT_FLAG_RESET", "1"), ("(guint) GST_SEGMENT_FLAG_SEGMENT", "8"), ("(guint) GST_SEGMENT_FLAG_SKIP", "16"), ("(guint) GST_SEGMENT_FLAG_TRICKMODE", "16"), ("(guint) GST_SEGMENT_FLAG_TRICKMODE_KEY_UNITS", "128"), ("(guint) GST_SEGMENT_FLAG_TRICKMODE_NO_AUDIO", "256"), ("GST_SEQNUM_INVALID", "0"), ("(guint) GST_STACK_TRACE_SHOW_FULL", "1"), ("(gint) GST_STATE_CHANGE_ASYNC", "2"), ("(gint) GST_STATE_CHANGE_FAILURE", "0"), ("(gint) GST_STATE_CHANGE_NO_PREROLL", "3"), ("(gint) GST_STATE_CHANGE_NULL_TO_NULL", "9"), ("(gint) GST_STATE_CHANGE_NULL_TO_READY", "10"), ("(gint) GST_STATE_CHANGE_PAUSED_TO_PAUSED", "27"), ("(gint) GST_STATE_CHANGE_PAUSED_TO_PLAYING", "28"), ("(gint) GST_STATE_CHANGE_PAUSED_TO_READY", "26"), ("(gint) GST_STATE_CHANGE_PLAYING_TO_PAUSED", "35"), ("(gint) GST_STATE_CHANGE_PLAYING_TO_PLAYING", "36"), ("(gint) GST_STATE_CHANGE_READY_TO_NULL", "17"), ("(gint) GST_STATE_CHANGE_READY_TO_PAUSED", "19"), ("(gint) GST_STATE_CHANGE_READY_TO_READY", "18"), ("(gint) GST_STATE_CHANGE_SUCCESS", "1"), ("(gint) GST_STATE_NULL", "1"), ("(gint) GST_STATE_PAUSED", "3"), ("(gint) GST_STATE_PLAYING", "4"), ("(gint) GST_STATE_READY", "2"), ("(gint) GST_STATE_VOID_PENDING", "0"), ("(gint) GST_STREAM_ERROR_CODEC_NOT_FOUND", "6"), ("(gint) GST_STREAM_ERROR_DECODE", "7"), ("(gint) GST_STREAM_ERROR_DECRYPT", "12"), ("(gint) GST_STREAM_ERROR_DECRYPT_NOKEY", "13"), ("(gint) GST_STREAM_ERROR_DEMUX", "9"), ("(gint) GST_STREAM_ERROR_ENCODE", "8"), ("(gint) GST_STREAM_ERROR_FAILED", "1"), ("(gint) GST_STREAM_ERROR_FORMAT", "11"), ("(gint) GST_STREAM_ERROR_MUX", "10"), ("(gint) GST_STREAM_ERROR_NOT_IMPLEMENTED", "3"), ("(gint) GST_STREAM_ERROR_NUM_ERRORS", "14"), ("(gint) GST_STREAM_ERROR_TOO_LAZY", "2"), ("(gint) GST_STREAM_ERROR_TYPE_NOT_FOUND", "4"), ("(gint) GST_STREAM_ERROR_WRONG_TYPE", "5"), ("(guint) GST_STREAM_FLAG_NONE", "0"), ("(guint) GST_STREAM_FLAG_SELECT", "2"), ("(guint) GST_STREAM_FLAG_SPARSE", "1"), ("(guint) GST_STREAM_FLAG_UNSELECT", "4"), ("(gint) GST_STREAM_STATUS_TYPE_CREATE", "0"), ("(gint) GST_STREAM_STATUS_TYPE_DESTROY", "3"), ("(gint) GST_STREAM_STATUS_TYPE_ENTER", "1"), ("(gint) GST_STREAM_STATUS_TYPE_LEAVE", "2"), ("(gint) GST_STREAM_STATUS_TYPE_PAUSE", "9"), ("(gint) GST_STREAM_STATUS_TYPE_START", "8"), ("(gint) GST_STREAM_STATUS_TYPE_STOP", "10"), ("(guint) GST_STREAM_TYPE_AUDIO", "2"), ("(guint) GST_STREAM_TYPE_CONTAINER", "8"), ("(guint) GST_STREAM_TYPE_TEXT", "16"), ("(guint) GST_STREAM_TYPE_UNKNOWN", "1"), ("(guint) GST_STREAM_TYPE_VIDEO", "4"), ("(gint) GST_STRUCTURE_CHANGE_TYPE_PAD_LINK", "0"), ("(gint) GST_STRUCTURE_CHANGE_TYPE_PAD_UNLINK", "1"), ("GST_TAG_ALBUM", "album"), ("GST_TAG_ALBUM_ARTIST", "album-artist"), ("GST_TAG_ALBUM_ARTIST_SORTNAME", "album-artist-sortname"), ("GST_TAG_ALBUM_GAIN", "replaygain-album-gain"), ("GST_TAG_ALBUM_PEAK", "replaygain-album-peak"), ("GST_TAG_ALBUM_SORTNAME", "album-sortname"), ("GST_TAG_ALBUM_VOLUME_COUNT", "album-disc-count"), ("GST_TAG_ALBUM_VOLUME_NUMBER", "album-disc-number"), ("GST_TAG_APPLICATION_DATA", "application-data"), ("GST_TAG_APPLICATION_NAME", "application-name"), ("GST_TAG_ARTIST", "artist"), ("GST_TAG_ARTIST_SORTNAME", "artist-sortname"), ("GST_TAG_ATTACHMENT", "attachment"), ("GST_TAG_AUDIO_CODEC", "audio-codec"), ("GST_TAG_BEATS_PER_MINUTE", "beats-per-minute"), ("GST_TAG_BITRATE", "bitrate"), ("GST_TAG_CODEC", "codec"), ("GST_TAG_COMMENT", "comment"), ("GST_TAG_COMPOSER", "composer"), ("GST_TAG_COMPOSER_SORTNAME", "composer-sortname"), ("GST_TAG_CONDUCTOR", "conductor"), ("GST_TAG_CONTACT", "contact"), ("GST_TAG_CONTAINER_FORMAT", "container-format"), ("GST_TAG_COPYRIGHT", "copyright"), ("GST_TAG_COPYRIGHT_URI", "copyright-uri"), ("GST_TAG_DATE", "date"), ("GST_TAG_DATE_TIME", "datetime"), ("GST_TAG_DESCRIPTION", "description"), ("GST_TAG_DEVICE_MANUFACTURER", "device-manufacturer"), ("GST_TAG_DEVICE_MODEL", "device-model"), ("GST_TAG_DURATION", "duration"), ("GST_TAG_ENCODED_BY", "encoded-by"), ("GST_TAG_ENCODER", "encoder"), ("GST_TAG_ENCODER_VERSION", "encoder-version"), ("GST_TAG_EXTENDED_COMMENT", "extended-comment"), ("(gint) GST_TAG_FLAG_COUNT", "4"), ("(gint) GST_TAG_FLAG_DECODED", "3"), ("(gint) GST_TAG_FLAG_ENCODED", "2"), ("(gint) GST_TAG_FLAG_META", "1"), ("(gint) GST_TAG_FLAG_UNDEFINED", "0"), ("GST_TAG_GENRE", "genre"), ("GST_TAG_GEO_LOCATION_CAPTURE_DIRECTION", "geo-location-capture-direction"), ("GST_TAG_GEO_LOCATION_CITY", "geo-location-city"), ("GST_TAG_GEO_LOCATION_COUNTRY", "geo-location-country"), ("GST_TAG_GEO_LOCATION_ELEVATION", "geo-location-elevation"), ("GST_TAG_GEO_LOCATION_HORIZONTAL_ERROR", "geo-location-horizontal-error"), ("GST_TAG_GEO_LOCATION_LATITUDE", "geo-location-latitude"), ("GST_TAG_GEO_LOCATION_LONGITUDE", "geo-location-longitude"), ("GST_TAG_GEO_LOCATION_MOVEMENT_DIRECTION", "geo-location-movement-direction"), ("GST_TAG_GEO_LOCATION_MOVEMENT_SPEED", "geo-location-movement-speed"), ("GST_TAG_GEO_LOCATION_NAME", "geo-location-name"), ("GST_TAG_GEO_LOCATION_SUBLOCATION", "geo-location-sublocation"), ("GST_TAG_GROUPING", "grouping"), ("GST_TAG_HOMEPAGE", "homepage"), ("GST_TAG_IMAGE", "image"), ("GST_TAG_IMAGE_ORIENTATION", "image-orientation"), ("GST_TAG_INTERPRETED_BY", "interpreted-by"), ("GST_TAG_ISRC", "isrc"), ("GST_TAG_KEYWORDS", "keywords"), ("GST_TAG_LANGUAGE_CODE", "language-code"), ("GST_TAG_LANGUAGE_NAME", "language-name"), ("GST_TAG_LICENSE", "license"), ("GST_TAG_LICENSE_URI", "license-uri"), ("GST_TAG_LOCATION", "location"), ("GST_TAG_LYRICS", "lyrics"), ("GST_TAG_MAXIMUM_BITRATE", "maximum-bitrate"), ("(gint) GST_TAG_MERGE_APPEND", "3"), ("(gint) GST_TAG_MERGE_COUNT", "7"), ("(gint) GST_TAG_MERGE_KEEP", "5"), ("(gint) GST_TAG_MERGE_KEEP_ALL", "6"), ("(gint) GST_TAG_MERGE_PREPEND", "4"), ("(gint) GST_TAG_MERGE_REPLACE", "2"), ("(gint) GST_TAG_MERGE_REPLACE_ALL", "1"), ("(gint) GST_TAG_MERGE_UNDEFINED", "0"), ("GST_TAG_MIDI_BASE_NOTE", "midi-base-note"), ("GST_TAG_MINIMUM_BITRATE", "minimum-bitrate"), ("GST_TAG_NOMINAL_BITRATE", "nominal-bitrate"), ("GST_TAG_ORGANIZATION", "organization"), ("GST_TAG_PERFORMER", "performer"), ("GST_TAG_PREVIEW_IMAGE", "preview-image"), ("GST_TAG_PRIVATE_DATA", "private-data"), ("GST_TAG_PUBLISHER", "publisher"), ("GST_TAG_REFERENCE_LEVEL", "replaygain-reference-level"), ("(gint) GST_TAG_SCOPE_GLOBAL", "1"), ("(gint) GST_TAG_SCOPE_STREAM", "0"), ("GST_TAG_SERIAL", "serial"), ("GST_TAG_SHOW_EPISODE_NUMBER", "show-episode-number"), ("GST_TAG_SHOW_NAME", "show-name"), ("GST_TAG_SHOW_SEASON_NUMBER", "show-season-number"), ("GST_TAG_SHOW_SORTNAME", "show-sortname"), ("GST_TAG_SUBTITLE_CODEC", "subtitle-codec"), ("GST_TAG_TITLE", "title"), ("GST_TAG_TITLE_SORTNAME", "title-sortname"), ("GST_TAG_TRACK_COUNT", "track-count"), ("GST_TAG_TRACK_GAIN", "replaygain-track-gain"), ("GST_TAG_TRACK_NUMBER", "track-number"), ("GST_TAG_TRACK_PEAK", "replaygain-track-peak"), ("GST_TAG_USER_RATING", "user-rating"), ("GST_TAG_VERSION", "version"), ("GST_TAG_VIDEO_CODEC", "video-codec"), ("(gint) GST_TASK_PAUSED", "2"), ("(gint) GST_TASK_STARTED", "0"), ("(gint) GST_TASK_STOPPED", "1"), ("(gint) GST_TOC_ENTRY_TYPE_ANGLE", "-3"), ("(gint) GST_TOC_ENTRY_TYPE_CHAPTER", "3"), ("(gint) GST_TOC_ENTRY_TYPE_EDITION", "-1"), ("(gint) GST_TOC_ENTRY_TYPE_INVALID", "0"), ("(gint) GST_TOC_ENTRY_TYPE_TITLE", "1"), ("(gint) GST_TOC_ENTRY_TYPE_TRACK", "2"), ("(gint) GST_TOC_ENTRY_TYPE_VERSION", "-2"), ("(gint) GST_TOC_LOOP_FORWARD", "1"), ("(gint) GST_TOC_LOOP_NONE", "0"), ("(gint) GST_TOC_LOOP_PING_PONG", "3"), ("(gint) GST_TOC_LOOP_REVERSE", "2"), ("GST_TOC_REPEAT_COUNT_INFINITE", "-1"), ("(gint) GST_TOC_SCOPE_CURRENT", "2"), ("(gint) GST_TOC_SCOPE_GLOBAL", "1"), ("(guint) GST_TRACER_VALUE_FLAGS_AGGREGATED", "2"), ("(guint) GST_TRACER_VALUE_FLAGS_NONE", "0"), ("(guint) GST_TRACER_VALUE_FLAGS_OPTIONAL", "1"), ("(gint) GST_TRACER_VALUE_SCOPE_ELEMENT", "2"), ("(gint) GST_TRACER_VALUE_SCOPE_PAD", "3"), ("(gint) GST_TRACER_VALUE_SCOPE_PROCESS", "0"), ("(gint) GST_TRACER_VALUE_SCOPE_THREAD", "1"), ("(gint) GST_TYPE_FIND_LIKELY", "80"), ("(gint) GST_TYPE_FIND_MAXIMUM", "100"), ("(gint) GST_TYPE_FIND_MINIMUM", "1"), ("(gint) GST_TYPE_FIND_NEARLY_CERTAIN", "99"), ("(gint) GST_TYPE_FIND_NONE", "0"), ("(gint) GST_TYPE_FIND_POSSIBLE", "50"), ("(gint) GST_URI_ERROR_BAD_REFERENCE", "3"), ("(gint) GST_URI_ERROR_BAD_STATE", "2"), ("(gint) GST_URI_ERROR_BAD_URI", "1"), ("(gint) GST_URI_ERROR_UNSUPPORTED_PROTOCOL", "0"), ("GST_URI_NO_PORT", "0"), ("(gint) GST_URI_SINK", "1"), ("(gint) GST_URI_SRC", "2"), ("(gint) GST_URI_UNKNOWN", "0"), ("GST_USECOND", "1000"), ("GST_VALUE_EQUAL", "0"), ("GST_VALUE_GREATER_THAN", "1"), ("GST_VALUE_LESS_THAN", "-1"), ("GST_VALUE_UNORDERED", "2"), ];
53.444938
144
0.652836
1a592666d46d19cfd460c1f893028f3ff59f360b
14,980
//! A custom kubelet backend that can run [waSCC](https://wascc.dev/) based workloads //! //! The crate provides the [`WasccProvider`] type which can be used //! as a provider with [`kubelet`]. //! //! # Example //! ```rust,no_run //! use kubelet::{Kubelet, config::Config}; //! use kubelet::store::oci::FileStore; //! use std::sync::Arc; //! use wascc_provider::WasccProvider; //! //! async fn start() { //! // Get a configuration for the Kubelet //! let kubelet_config = Config::default(); //! let client = oci_distribution::Client::default(); //! let store = Arc::new(FileStore::new(client, &std::path::PathBuf::from(""))); //! //! // Load a kubernetes configuration //! let kubeconfig = kube::Config::infer().await.unwrap(); //! //! // Instantiate the provider type //! let provider = WasccProvider::new(store, &kubelet_config, kubeconfig.clone()).await.unwrap(); //! //! // Instantiate the Kubelet //! let kubelet = Kubelet::new(provider, kubeconfig, kubelet_config).await.unwrap(); //! // Start the Kubelet and block on it //! kubelet.start().await.unwrap(); //! } //! ``` #![deny(missing_docs)] use async_trait::async_trait; use kubelet::backoff::ExponentialBackoffStrategy; use kubelet::container::Handle as ContainerHandle; use kubelet::handle::StopHandler; use kubelet::node::Builder; use kubelet::pod::{Handle, Pod, PodKey}; use kubelet::provider::Provider; use kubelet::provider::ProviderError; use kubelet::store::Store; use kubelet::volume::Ref; use log::{debug, info}; use tempfile::NamedTempFile; use tokio::sync::RwLock; use wascc_fs::FileSystemProvider; use wascc_host::{Actor, Host, NativeCapability}; use wascc_httpsrv::HttpServerProvider; use wascc_logging::{LoggingProvider, LOG_PATH_KEY}; extern crate rand; use std::collections::{BTreeMap, HashMap}; use std::path::{Path, PathBuf}; use std::sync::{Arc, Mutex}; use tokio::sync::Mutex as TokioMutex; mod states; use states::registered::Registered; use states::terminated::Terminated; /// The architecture that the pod targets. const TARGET_WASM32_WASCC: &str = "wasm32-wascc"; /// The name of the Filesystem capability. const FS_CAPABILITY: &str = "wascc:blobstore"; /// The name of the HTTP capability. const HTTP_CAPABILITY: &str = "wascc:http_server"; /// The name of the Logging capability. const LOG_CAPABILITY: &str = "wascc:logging"; /// The root directory of waSCC logs. const LOG_DIR_NAME: &str = "wascc-logs"; /// The key used to define the root directory of the Filesystem capability. const FS_CONFIG_ROOTDIR: &str = "ROOT"; /// The root directory of waSCC volumes. const VOLUME_DIR: &str = "volumes"; /// Kubernetes' view of environment variables is an unordered map of string to string. type EnvVars = std::collections::HashMap<String, String>; /// A [kubelet::handle::Handle] implementation for a wascc actor pub struct ActorHandle { /// The public key of the wascc Actor that will be stopped pub key: String, host: Arc<Mutex<Host>>, volumes: Vec<VolumeBinding>, capabilities: Vec<String>, } #[async_trait::async_trait] impl StopHandler for ActorHandle { async fn stop(&mut self) -> anyhow::Result<()> { debug!("stopping wascc instance {}", self.key); let host = self.host.clone(); let key = self.key.clone(); let volumes: Vec<VolumeBinding> = self.volumes.drain(0..).collect(); let capabilities = self.capabilities.clone(); tokio::task::spawn_blocking(move || { let lock = host.lock().unwrap(); lock.remove_actor(&key) .map_err(|e| anyhow::anyhow!("unable to remove actor: {:?}", e))?; if capabilities.contains(&FS_CAPABILITY.to_owned()) { for volume in volumes.into_iter() { lock.remove_native_capability(FS_CAPABILITY, Some(volume.name.clone())) .map_err(|e| { anyhow::anyhow!( "unable to remove volume {:?} capability: {:?}", volume.name, e ) })?; } } Ok(()) }) .await? } async fn wait(&mut self) -> anyhow::Result<()> { // TODO: Figure out if there is a way to wait for an actor to be removed Ok(()) } } /// WasccProvider provides a Kubelet runtime implementation that executes WASM binaries. /// /// Currently, this runtime uses WASCC as a host, loading the primary container as an actor. /// TODO: In the future, we will look at loading capabilities using the "sidecar" metaphor /// from Kubernetes. #[derive(Clone)] pub struct WasccProvider { shared: SharedPodState, } #[derive(Clone)] struct SharedPodState { client: kube::Client, handles: Arc<RwLock<BTreeMap<PodKey, Handle<ActorHandle, LogHandleFactory>>>>, store: Arc<dyn Store + Sync + Send>, volume_path: PathBuf, log_path: PathBuf, host: Arc<Mutex<Host>>, port_map: Arc<TokioMutex<BTreeMap<u16, PodKey>>>, } impl WasccProvider { /// Returns a new wasCC provider configured to use the proper data directory /// (including creating it if necessary) pub async fn new( store: Arc<dyn Store + Sync + Send>, config: &kubelet::config::Config, kubeconfig: kube::Config, ) -> anyhow::Result<Self> { let client = kube::Client::new(kubeconfig); let host = Arc::new(Mutex::new(Host::new())); let log_path = config.data_dir.join(LOG_DIR_NAME); let volume_path = config.data_dir.join(VOLUME_DIR); let port_map = Arc::new(TokioMutex::new(BTreeMap::<u16, PodKey>::new())); tokio::fs::create_dir_all(&log_path).await?; tokio::fs::create_dir_all(&volume_path).await?; // wascc has native and portable capabilities. // // Native capabilities are either dynamic libraries (.so, .dylib, .dll) // or statically linked Rust libaries. If the native capabilty is a dynamic // library it must be loaded and configured through [`NativeCapability::from_file`]. // If it is a statically linked libary it can be configured through // [`NativeCapability::from_instance`]. // // Portable capabilities are WASM modules. Portable capabilities // don't fully work, and won't until the WASI spec has matured. // // Here we are using the native capabilties as statically linked libraries that will // be compiled into the wascc-provider binary. let cloned_host = host.clone(); tokio::task::spawn_blocking(move || { info!("Loading HTTP capability"); let http_provider = HttpServerProvider::new(); let data = NativeCapability::from_instance(http_provider, None) .map_err(|e| anyhow::anyhow!("Failed to instantiate HTTP capability: {}", e))?; cloned_host .lock() .unwrap() .add_native_capability(data) .map_err(|e| anyhow::anyhow!("Failed to add HTTP capability: {}", e))?; info!("Loading log capability"); let logging_provider = LoggingProvider::new(); let logging_capability = NativeCapability::from_instance(logging_provider, None) .map_err(|e| anyhow::anyhow!("Failed to instantiate log capability: {}", e))?; cloned_host .lock() .unwrap() .add_native_capability(logging_capability) .map_err(|e| anyhow::anyhow!("Failed to add log capability: {}", e)) }) .await??; Ok(Self { shared: SharedPodState { client, handles: Default::default(), store, volume_path, log_path, host, port_map, }, }) } } struct ModuleRunContext { modules: HashMap<String, Vec<u8>>, volumes: HashMap<String, Ref>, } /// State that is shared between pod state handlers. pub struct PodState { key: PodKey, run_context: ModuleRunContext, errors: usize, image_pull_backoff_strategy: ExponentialBackoffStrategy, crash_loop_backoff_strategy: ExponentialBackoffStrategy, shared: SharedPodState, } // No cleanup state needed, we clean up when dropping PodState. #[async_trait] impl kubelet::state::AsyncDrop for PodState { async fn async_drop(self) { { let mut lock = self.shared.port_map.lock().await; let ports_to_remove: Vec<u16> = lock .iter() .filter_map(|(k, v)| if v == &self.key { Some(*k) } else { None }) .collect(); debug!( "Pod {} in namespace {} releasing ports {:?}.", &self.key.name(), &self.key.namespace(), &ports_to_remove ); for port in ports_to_remove { lock.remove(&port); } } { let mut handles = self.shared.handles.write().await; handles.remove(&self.key); } } } #[async_trait] impl Provider for WasccProvider { type InitialState = Registered; type TerminatedState = Terminated; type PodState = PodState; const ARCH: &'static str = TARGET_WASM32_WASCC; async fn node(&self, builder: &mut Builder) -> anyhow::Result<()> { builder.set_architecture("wasm-wasi"); builder.add_taint("NoSchedule", "kubernetes.io/arch", Self::ARCH); builder.add_taint("NoExecute", "kubernetes.io/arch", Self::ARCH); Ok(()) } async fn initialize_pod_state(&self, pod: &Pod) -> anyhow::Result<Self::PodState> { let run_context = ModuleRunContext { modules: Default::default(), volumes: Default::default(), }; let key = PodKey::from(pod); Ok(PodState { key, run_context, errors: 0, image_pull_backoff_strategy: ExponentialBackoffStrategy::default(), crash_loop_backoff_strategy: ExponentialBackoffStrategy::default(), shared: self.shared.clone(), }) } async fn logs( &self, namespace: String, pod_name: String, container_name: String, sender: kubelet::log::Sender, ) -> anyhow::Result<()> { let mut handles = self.shared.handles.write().await; let handle = handles .get_mut(&PodKey::new(&namespace, &pod_name)) .ok_or_else(|| ProviderError::PodNotFound { pod_name: pod_name.clone(), })?; handle.output(&container_name, sender).await } } struct VolumeBinding { name: String, host_path: PathBuf, } /// Capability describes a waSCC capability. /// /// Capabilities are made available to actors through a two-part processthread: /// - They must be registered /// - For each actor, the capability must be configured struct Capability { name: &'static str, binding: Option<String>, env: EnvVars, } /// Holds our tempfile handle. struct LogHandleFactory { temp: NamedTempFile, } impl kubelet::log::HandleFactory<tokio::fs::File> for LogHandleFactory { /// Creates `tokio::fs::File` on demand for log reading. fn new_handle(&self) -> tokio::fs::File { tokio::fs::File::from_std(self.temp.reopen().unwrap()) } } /// Run the given WASM data as a waSCC actor with the given public key. /// /// The provided capabilities will be configured for this actor, but the capabilities /// must first be loaded into the host by some other process, such as register_native_capabilities(). fn wascc_run( host: Arc<Mutex<Host>>, data: Vec<u8>, env: EnvVars, volumes: Vec<VolumeBinding>, log_path: &Path, port_assigned: u16, ) -> anyhow::Result<ContainerHandle<ActorHandle, LogHandleFactory>> { let mut capabilities: Vec<Capability> = Vec::new(); info!("sending actor to wascc host"); let log_output = NamedTempFile::new_in(&log_path)?; let load = Actor::from_slice(&data).map_err(|e| anyhow::anyhow!("Error loading WASM: {}", e))?; let pk = load.public_key(); let actor_caps = load.capabilities(); if actor_caps.contains(&LOG_CAPABILITY.to_owned()) { let mut logenv = env.clone(); logenv.insert( LOG_PATH_KEY.to_string(), log_output.path().to_str().unwrap().to_owned(), ); capabilities.push(Capability { name: LOG_CAPABILITY, binding: None, env: logenv, }); } if actor_caps.contains(&HTTP_CAPABILITY.to_owned()) { let mut httpenv = env.clone(); httpenv.insert("PORT".to_string(), port_assigned.to_string()); capabilities.push(Capability { name: HTTP_CAPABILITY, binding: None, env: httpenv, }); } if actor_caps.contains(&FS_CAPABILITY.to_owned()) { for vol in &volumes { info!( "Loading File System capability for volume name: '{}' host_path: '{}'", vol.name, vol.host_path.display() ); let mut fsenv = env.clone(); fsenv.insert( FS_CONFIG_ROOTDIR.to_owned(), vol.host_path.as_path().to_str().unwrap().to_owned(), ); let fs_provider = FileSystemProvider::new(); let fs_capability = NativeCapability::from_instance(fs_provider, Some(vol.name.clone())).map_err( |e| anyhow::anyhow!("Failed to instantiate File System capability: {}", e), )?; host.lock() .unwrap() .add_native_capability(fs_capability) .map_err(|e| anyhow::anyhow!("Failed to add File System capability: {}", e))?; capabilities.push(Capability { name: FS_CAPABILITY, binding: Some(vol.name.clone()), env: fsenv, }); } } host.lock() .unwrap() .add_actor(load) .map_err(|e| anyhow::anyhow!("Error adding actor: {}", e))?; capabilities.iter().try_for_each(|cap| { info!("configuring capability {}", cap.name); host.lock() .unwrap() .set_binding(&pk, cap.name, cap.binding.clone(), cap.env.clone()) .map_err(|e| anyhow::anyhow!("Error configuring capabilities for module: {}", e)) })?; let log_handle_factory = LogHandleFactory { temp: log_output }; info!("wascc actor executing"); Ok(ContainerHandle::new( ActorHandle { host, key: pk, volumes, capabilities: actor_caps, }, log_handle_factory, )) }
34.279176
101
0.599466
ed6d45ffc2a9e4a2283cdfe4cc73672545e7be6d
4,151
use std::{cmp::Ordering, fmt, hash, ptr::NonNull}; /// A pointer for accessing data of a specific type. /// /// See [documentation](https://developer.apple.com/documentation/swift/unsafepointer). #[repr(transparent)] pub struct UnsafePointer<T> { inner: NonNull<T>, } impl<T> Clone for UnsafePointer<T> { #[inline] fn clone(&self) -> Self { *self } } impl<T> Copy for UnsafePointer<T> {} impl<T> fmt::Debug for UnsafePointer<T> { #[inline] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { self.inner.fmt(f) } } impl<T> fmt::Pointer for UnsafePointer<T> { #[inline] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { self.inner.fmt(f) } } impl<T> PartialEq for UnsafePointer<T> { #[inline] fn eq(&self, other: &Self) -> bool { self.inner == other.inner } } impl<T> Eq for UnsafePointer<T> {} impl<T> PartialOrd for UnsafePointer<T> { #[inline] fn partial_cmp(&self, other: &Self) -> Option<Ordering> { Some(self.cmp(other)) } } impl<T> Ord for UnsafePointer<T> { #[inline] fn cmp(&self, other: &Self) -> Ordering { self.inner.cmp(&other.inner) } } impl<T> hash::Hash for UnsafePointer<T> { #[inline] fn hash<H: hash::Hasher>(&self, state: &mut H) { self.inner.hash(state); } } impl<T> UnsafePointer<T> { /// Creates a new instance from an immutable Rust pointer. /// /// # Safety /// /// `ptr` must be non-null. #[inline] pub const unsafe fn new_unchecked(ptr: *const T) -> Self { Self { inner: NonNull::new_unchecked(ptr as *mut T), } } /// Creates a new instance from an immutable Rust pointer if it is non-null. #[inline] pub fn new(ptr: *const T) -> Option<Self> { Some(Self { inner: NonNull::new(ptr as *mut T)?, }) } /// Acquires the underlying immutable pointer. #[inline] pub const fn as_ptr(self) -> *const T { self.inner.as_ptr() } // TODO: `as_ref` } /// A pointer for accessing and manipulating data of a specific type. /// /// See [documentation](https://developer.apple.com/documentation/swift/unsafemutablepointer). #[repr(transparent)] pub struct UnsafeMutablePointer<T> { inner: NonNull<T>, } impl<T> Clone for UnsafeMutablePointer<T> { #[inline] fn clone(&self) -> Self { *self } } impl<T> Copy for UnsafeMutablePointer<T> {} impl<T> fmt::Debug for UnsafeMutablePointer<T> { #[inline] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { self.inner.fmt(f) } } impl<T> fmt::Pointer for UnsafeMutablePointer<T> { #[inline] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { self.inner.fmt(f) } } impl<T> PartialEq for UnsafeMutablePointer<T> { #[inline] fn eq(&self, other: &Self) -> bool { self.inner == other.inner } } impl<T> Eq for UnsafeMutablePointer<T> {} impl<T> PartialOrd for UnsafeMutablePointer<T> { #[inline] fn partial_cmp(&self, other: &Self) -> Option<Ordering> { Some(self.cmp(other)) } } impl<T> Ord for UnsafeMutablePointer<T> { #[inline] fn cmp(&self, other: &Self) -> Ordering { self.inner.cmp(&other.inner) } } impl<T> hash::Hash for UnsafeMutablePointer<T> { #[inline] fn hash<H: hash::Hasher>(&self, state: &mut H) { self.inner.hash(state); } } impl<T> UnsafeMutablePointer<T> { /// Creates a new instance from a mutable Rust pointer. /// /// # Safety /// /// `ptr` must be non-null. #[inline] pub const unsafe fn new_unchecked(ptr: *mut T) -> Self { Self { inner: NonNull::new_unchecked(ptr), } } /// Creates a new instance from a mutable Rust pointer if it is non-null. #[inline] pub fn new(ptr: *mut T) -> Option<Self> { Some(Self { inner: NonNull::new(ptr)?, }) } /// Acquires the underlying mutable pointer. #[inline] pub const fn as_ptr(self) -> *mut T { self.inner.as_ptr() } // TODO: `as_ref` and `as_mut` }
22.559783
94
0.582992
e5b8ab05c8fe115c84b18e50bdbb864202dfe917
3,120
use crate::{command::Commands, renderer::Renderer, virtual_dom::Html}; use log::info; use std::{cell::Ref, cell::RefCell, fmt, rc::Rc}; use wasm_bindgen::prelude::*; use wasm_bindgen_futures::spawn_local; struct State<Model, Message> { model: Model, html: Html<Message>, } impl<Model, Message> State<Model, Message> where Model: Clone, { fn new(model: Model, html: Html<Message>) -> Self { Self { model, html } } } type InitFn<Model, Message> = dyn Fn() -> (Model, Commands<Message>); type UpdateFn<Model, Message> = dyn Fn(Message, &mut Model) -> Commands<Message>; type ViewFn<Model, Message> = dyn Fn(&Model) -> Html<Message>; #[derive(Clone)] pub struct App<Model, Message> { init: Rc<InitFn<Model, Message>>, update: Rc<UpdateFn<Model, Message>>, view: Rc<ViewFn<Model, Message>>, root_id: String, state: Rc<RefCell<Option<State<Model, Message>>>>, } impl<Model, Message> App<Model, Message> where Model: 'static + Clone + fmt::Debug + Eq, Message: 'static + Clone + fmt::Debug, { pub fn new<Init, Update, View>(init: Init, update: Update, view: View, root_id: &str) -> Self where Init: 'static + Fn() -> (Model, Commands<Message>), Update: 'static + Fn(Message, &mut Model) -> Commands<Message>, View: 'static + Fn(&Model) -> Html<Message>, { Self { init: Rc::new(init), update: Rc::new(update), view: Rc::new(view), root_id: root_id.into(), state: Rc::new(RefCell::new(None)), } } pub fn start(&self) { let (model, commands) = (self.init)(); self.update(model, commands); } pub fn handle_message(&self, message: Message) { info!("message: {:#?}", message); let mut new_model = self.state().as_ref().unwrap().model.clone(); let commands = (self.update)(message, &mut new_model); if new_model == self.state().as_ref().unwrap().model { return; } self.update(new_model, commands); } fn update(&self, model: Model, commands: Commands<Message>) { info!("model: {:#?}", model); let new_html = (self.view)(&model); self.render_app(&new_html).unwrap(); self.handle_commands(commands); self.set_state(State::new(model, new_html)); } fn handle_commands(&self, commands: Commands<Message>) { for command in commands { let app = self.clone(); spawn_local(async move { let message = command.run().await; app.handle_message(message); }); } } fn state(&self) -> Ref<Option<State<Model, Message>>> { (self.state).borrow() } fn set_state(&self, state: State<Model, Message>) { self.state.replace(Some(state)); } fn render_app(&self, html: &Html<Message>) -> Result<(), JsValue> { let renderer = Renderer::new(self); renderer.render( self.state().as_ref().map(|state| &state.html), html, &self.root_id, ) } }
28.108108
97
0.574038
ef78881dd1d4cd9de34b95ffaaec516be5934685
1,298
// This file is part of network. It is subject to the license terms in the COPYRIGHT file found in the top-level directory of this distribution and at https://raw.githubusercontent.com/lemonrock/network/master/COPYRIGHT. No part of predicator, including this file, may be copied, modified, propagated, or distributed except according to the terms contained in the COPYRIGHT file. // Copyright © 2016-2018 The developers of network. See the COPYRIGHT file in the top-level directory of this distribution and at https://raw.githubusercontent.com/lemonrock/network/master/COPYRIGHT. /// Option class. #[derive(Debug, Copy, Clone, PartialOrd, Ord, PartialEq, Eq, Hash)] #[derive(Deserialize, Serialize)] #[repr(u8)] pub enum InternetProtocolVersion4OptionClass { /// Control. Control = 0b00, /// Reserved for future use. ReservedForFutureUse1 = 0b01, /// Debugging and measurement. DebuggingAndMeasurement = 0b10, /// Reserved for future use. ReservedForFutureUse2 = 0b11, } impl Into<u8> for InternetProtocolVersion4OptionClass { #[inline(always)] fn into(self) -> u8 { self as u8 } } impl InternetProtocolVersion4OptionClass { /// Is this a class reserved for future use? #[inline(always)] pub fn is_reserved_for_future_use(self) -> bool { (self as u8) & 0b01 != 0 } }
30.904762
379
0.751156
71fa2202c452c1732ff6fc7015b39333318d4a5e
1,076
use std::os::raw::c_void; use std::ptr::null; pub mod ffi { extern { pub fn sg_imgui_wrap_init(ctx: *mut super::SgImGui); pub fn sg_imgui_wrap_discard(ctx: *mut super::SgImGui); pub fn sg_imgui_wrap_draw(ctx: *mut super::SgImGui); } } #[repr(C)] #[derive(Debug)] pub struct SgImGui { _content: *const c_void, pub buffers: bool, pub images: bool, pub shaders: bool, pub pipelines: bool, pub passes: bool, pub capture: bool, } impl SgImGui { pub fn new() -> Self { SgImGui { _content: null(), buffers: false, images: false, shaders: false, pipelines: false, passes: false, capture: false, } } } pub fn sg_imgui_init(ctx: &mut SgImGui) { unsafe { ffi::sg_imgui_wrap_init(ctx); } } pub fn sg_imgui_discard(ctx: &mut SgImGui) { unsafe { ffi::sg_imgui_wrap_discard(ctx); } } pub fn sg_imgui_draw(ctx: &mut SgImGui) { unsafe { ffi::sg_imgui_wrap_draw(ctx); } }
19.563636
63
0.565985
56953aaf60b1088b6d0f2970b20ac996c3be765d
107
// compile-flags: -Z parse-only // error-pattern: unterminated double quote string fn main() { " }
10.7
50
0.64486
bb035dfd8e0a1cbacb9108eda34bdddde8ba23b6
15,833
//! Import from an another json format logic. extern crate serde_json; use serde::{Deserialize, Serialize}; use std::io::{BufReader, Read}; use vrp_pragmatic::format::problem::*; use vrp_pragmatic::format::{FormatError, Location}; mod hre { use super::*; #[derive(Clone, Debug, Deserialize, Serialize)] pub struct Location { /// Latitude. pub lat: f64, /// Longitude. pub lng: f64, } // region Plan /// Relation type. #[derive(Clone, Deserialize, Debug)] #[serde(rename_all = "camelCase")] pub enum RelationType { /// Tour relation locks jobs to specific vehicle in any order. Tour, /// Flexible relation locks jobs in specific order allowing insertion of other jobs in between. Flexible, /// Sequence relation locks jobs in strict order, no insertions in between are allowed. Sequence, } /// Relation is the way to lock specific jobs to specific vehicles. #[derive(Clone, Deserialize, Debug)] #[serde(rename_all = "camelCase")] pub struct Relation { /// Relation type. #[serde(rename(deserialize = "type"))] pub type_field: RelationType, /// List of job ids. pub jobs: Vec<String>, /// Vehicle id. pub vehicle_id: String, /// Vehicle shift index. pub shift_index: Option<usize>, } /// Defines specific job place. #[derive(Clone, Deserialize, Debug)] pub struct JobPlace { /// A list of job time windows with time specified in RFC3339 format. pub times: Option<Vec<Vec<String>>>, /// Job location. pub location: Location, /// Job duration (service time). pub duration: f64, /// An tag which will be propagated back within corresponding activity in solution. pub tag: Option<String>, } /// Specifies pickup and delivery places of the job. /// At least one place should be specified. If only delivery specified, then vehicle is loaded with /// job's demand at the start location. If only pickup specified, then loaded good is delivered to /// the last location on the route. When both, pickup and delivery, are specified, then it is classical /// pickup and delivery job. #[derive(Clone, Deserialize, Debug)] pub struct JobPlaces { /// Pickup place. pub pickup: Option<JobPlace>, /// Delivery place. pub delivery: Option<JobPlace>, } /// Specifies single job. #[derive(Clone, Deserialize, Debug)] pub struct Job { /// Job id. pub id: String, /// Job places. pub places: JobPlaces, /// Job demand. pub demand: Vec<i32>, /// Job priority, bigger value - less important. pub priority: Option<i32>, /// Job skills. pub skills: Option<Vec<String>>, } /// Specifies a place for sub job. #[derive(Clone, Deserialize, Debug)] pub struct MultiJobPlace { /// A list of sub job time windows with time specified in RFC3339 format. pub times: Option<Vec<Vec<String>>>, /// Sub job location. pub location: Location, /// Sub job duration (service time). pub duration: f64, /// Sub job demand. pub demand: Vec<i32>, /// An tag which will be propagated back within corresponding activity in solution. pub tag: Option<String>, } /// Specifies pickups and deliveries places of multi job. /// All of them should be completed or none of them. All pickups must be completed before any of deliveries. #[derive(Clone, Deserialize, Debug)] pub struct MultiJobPlaces { /// A list of pickups. pub pickups: Vec<MultiJobPlace>, /// A list of deliveries. pub deliveries: Vec<MultiJobPlace>, } /// Specifies multi job which has multiple child jobs. #[derive(Clone, Deserialize, Debug)] pub struct MultiJob { /// Multi job id. pub id: String, /// Multi job places. pub places: MultiJobPlaces, /// Job priority, bigger value - less important. pub priority: Option<i32>, /// Multi job skills. pub skills: Option<Vec<String>>, } /// Job variant type. #[derive(Clone, Deserialize, Debug)] #[serde(untagged)] pub enum JobVariant { /// Single job. Single(Job), /// Multi job. Multi(MultiJob), } /// A plan specifies work which has to be done. #[derive(Clone, Deserialize, Debug)] pub struct Plan { /// List of jobs. pub jobs: Vec<JobVariant>, /// List of relations between jobs and vehicles. pub relations: Option<Vec<Relation>>, } // endregion // region Fleet /// Specifies vehicle costs. #[derive(Clone, Deserialize, Debug)] pub struct VehicleCosts { /// Fixed is cost of vehicle usage per tour. pub fixed: Option<f64>, /// Cost per distance unit. pub distance: f64, /// Cost per time unit. pub time: f64, } /// Specifies vehicle place. #[derive(Clone, Deserialize, Debug)] pub struct VehiclePlace { /// Vehicle start or end time. pub time: String, /// Vehicle location. pub location: Location, } /// Specifies vehicle shift. #[derive(Clone, Deserialize, Debug)] #[serde(rename_all = "camelCase")] pub struct VehicleShift { /// Vehicle start place. pub start: VehiclePlace, /// Vehicle end place. pub end: Option<VehiclePlace>, /// Vehicle breaks. pub breaks: Option<Vec<VehicleBreak>>, /// Vehicle reloads which allows vehicle to return back to the depot (or any other place) in /// order to unload/load goods during single tour. pub reloads: Option<Vec<VehicleReload>>, } /// Vehicle reload. pub type VehicleReload = JobPlace; /// Vehicle limits. #[derive(Clone, Deserialize, Debug)] #[serde(rename_all = "camelCase")] pub struct VehicleLimits { /// Max traveling distance per shift/tour. pub max_distance: Option<f64>, /// Max time per shift/tour. pub shift_time: Option<f64>, } /// Vehicle break. #[derive(Clone, Deserialize, Debug)] pub struct VehicleBreak { /// Break time. pub times: Vec<Vec<String>>, /// Break duration. pub duration: f64, /// Break location. pub location: Option<Location>, } /// Specifies a vehicle type. #[derive(Clone, Deserialize, Debug)] pub struct VehicleType { /// Vehicle type id. pub id: String, /// Vehicle profile name. pub profile: String, /// Vehicle costs. pub costs: VehicleCosts, /// Vehicle shifts. pub shifts: Vec<VehicleShift>, /// Vehicle capacity. pub capacity: Vec<i32>, /// Vehicle amount. pub amount: i32, /// Vehicle skills. pub skills: Option<Vec<String>>, /// Vehicle limits. pub limits: Option<VehicleLimits>, } /// Specifies routing profile. #[derive(Clone, Deserialize, Debug)] pub struct Profile { /// Profile name. pub name: String, /// Profile type. #[serde(rename(deserialize = "type"))] pub profile_type: String, } /// Specifies fleet. #[derive(Clone, Deserialize, Debug)] pub struct Fleet { /// Vehicle types. pub types: Vec<VehicleType>, /// Routing profiles. pub profiles: Vec<Profile>, } // endregion // region Configuration /// Specifies extra configuration. #[derive(Clone, Deserialize, Debug)] pub struct Config { /// Features config. pub features: Option<Features>, } /// Specifies features config. #[derive(Clone, Deserialize, Debug)] #[serde(rename_all = "camelCase")] pub struct Features { /// Even distribution of the jobs across tours. By default, is off. pub even_distribution: Option<EvenDistribution>, /// Tweaks priority weight. Default value is 100. pub priority: Option<Priority>, } /// Configuration to tweak even distribution of the jobs across tours. #[derive(Clone, Deserialize, Debug)] pub struct EvenDistribution { /// Enable or disable. pub enabled: bool, /// A fraction of this cost is applied when jobs are assigned to the tour. pub extra_cost: Option<f64>, } /// Configuration to tweak even distribution of the jobs across tours. #[derive(Clone, Deserialize, Debug)] pub struct Priority { /// A cost for formula: `extra_cost = (priority - 1) * weight_cost`. pub weight_cost: f64, } // endregion // region Common /// A VRP problem definition. #[derive(Clone, Deserialize, Debug)] pub struct Problem { /// Problem plan: customers to serve. pub plan: Plan, /// Problem resources: vehicles to be used, routing info. pub fleet: Fleet, /// Extra configuration. pub config: Option<Config>, } // endregion } fn to_loc(loc: &hre::Location) -> Location { Location { lat: loc.lat, lng: loc.lng } } pub fn read_hre_problem<R: Read>(reader: BufReader<R>) -> Result<Problem, FormatError> { let job_place_mapper = |job: &hre::Job, place: &hre::JobPlace| JobTask { places: vec![JobPlace { location: to_loc(&place.location), duration: place.duration, times: place.times.clone(), }], demand: Some(job.demand.clone()), tag: place.tag.clone(), }; let multi_job_place_mapper = |places: &Vec<hre::MultiJobPlace>| { if places.is_empty() { None } else { Some( places .iter() .map(|place| JobTask { places: vec![JobPlace { location: to_loc(&place.location), duration: place.duration, times: place.times.clone(), }], demand: Some(place.demand.clone()), tag: place.tag.clone(), }) .collect(), ) } }; let hre_problem: hre::Problem = serde_json::from_reader(reader) .map_err(|err| FormatError::new("E0000".to_string(), err.to_string(), "Check input json".to_string()))?; Ok(Problem { plan: Plan { jobs: hre_problem .plan .jobs .iter() .map(|job| match job { hre::JobVariant::Single(job) => Job { id: job.id.clone(), pickups: job.places.pickup.as_ref().map(|place| vec![job_place_mapper(job, place)]), deliveries: job.places.delivery.as_ref().map(|place| vec![job_place_mapper(job, place)]), replacements: None, services: None, priority: job.priority.as_ref().map(|p| *p), skills: job.skills.clone(), }, hre::JobVariant::Multi(job) => Job { id: job.id.clone(), pickups: multi_job_place_mapper(&job.places.pickups), deliveries: multi_job_place_mapper(&job.places.deliveries), replacements: None, services: None, priority: job.priority.as_ref().map(|p| *p), skills: job.skills.clone(), }, }) .collect(), relations: hre_problem.plan.relations.map(|relations| { relations .iter() .map(|r| Relation { type_field: match r.type_field { hre::RelationType::Sequence => RelationType::Strict, hre::RelationType::Flexible => RelationType::Sequence, hre::RelationType::Tour => RelationType::Any, }, jobs: r.jobs.clone(), vehicle_id: r.vehicle_id.clone(), shift_index: r.shift_index.clone(), }) .collect() }), }, fleet: Fleet { vehicles: hre_problem .fleet .types .iter() .map(|v| VehicleType { type_id: v.id.clone(), vehicle_ids: (1..=v.amount).map(|seq| format!("{}_{}", v.id, seq)).collect(), profile: v.profile.clone(), costs: VehicleCosts { fixed: v.costs.fixed.clone(), distance: v.costs.distance, time: v.costs.time, }, shifts: v .shifts .iter() .map(|shift| VehicleShift { start: VehiclePlace { time: shift.start.time.clone(), location: to_loc(&shift.start.location), }, end: shift .end .as_ref() .map(|end| VehiclePlace { time: end.time.clone(), location: to_loc(&end.location) }), breaks: shift.breaks.as_ref().map(|breaks| { breaks .iter() .map(|b| VehicleBreak { time: VehicleBreakTime::TimeWindow(b.times.first().unwrap().clone()), duration: b.duration, locations: b.location.as_ref().map(|l| vec![to_loc(l)]), }) .collect() }), reloads: shift.reloads.as_ref().map(|reloads| { reloads .iter() .map(|r| VehicleReload { location: to_loc(&r.location), duration: r.duration.clone(), times: r.times.clone(), tag: r.tag.clone(), }) .collect() }), }) .collect(), capacity: v.capacity.clone(), skills: v.skills.clone(), limits: v.limits.as_ref().map(|l| VehicleLimits { max_distance: l.max_distance.clone(), shift_time: l.shift_time.clone(), allowed_areas: None, }), }) .collect(), profiles: hre_problem .fleet .profiles .iter() .map(|p| Profile { name: p.name.clone(), profile_type: p.profile_type.clone(), speed: None }) .collect(), }, objectives: None, config: None, }) }
34.645514
117
0.507169
f784c2604f3b5387f2a60ef9deb65013bbf5a14e
28,768
// Copyright 2020 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. /// This module tests pkg-resolver's resolve keeps working when /// MinFs is broken. use { fidl::endpoints::{Proxy, RequestStream, ServerEnd}, fidl_fuchsia_io::{ DirectoryControlHandle, DirectoryProxy, DirectoryRequest, DirectoryRequestStream, FileControlHandle, FileEvent, FileMarker, FileProxy, FileRequest, FileRequestStream, FileWriteResponder, NodeMarker, }, fidl_fuchsia_pkg_ext::RepositoryConfig, fidl_fuchsia_pkg_rewrite_ext::Rule, fuchsia_async as fasync, fuchsia_pkg_testing::{serve::ServedRepository, Package, PackageBuilder, RepositoryBuilder}, fuchsia_zircon::Status, futures::future::BoxFuture, futures::prelude::*, lib::{ get_repos, get_rules, mock_filesystem, DirOrProxy, EnableDynamicConfig, MountsBuilder, TestEnv, TestEnvBuilder, EMPTY_REPO_PATH, }, std::sync::{ atomic::{AtomicBool, AtomicU64}, Arc, }, }; trait OpenRequestHandler: Sized { fn handle_open_request( &self, flags: u32, mode: u32, path: String, object: ServerEnd<NodeMarker>, control_handle: DirectoryControlHandle, parent: Arc<DirectoryStreamHandler<Self>>, ); } struct DirectoryStreamHandler<O: Sized> { open_handler: Arc<O>, } impl<O> DirectoryStreamHandler<O> where O: OpenRequestHandler + Send + Sync + 'static, { fn new(open_handler: Arc<O>) -> Self { Self { open_handler } } fn handle_stream( self: Arc<Self>, mut stream: DirectoryRequestStream, ) -> BoxFuture<'static, ()> { async move { while let Some(req) = stream.next().await { match req.unwrap() { DirectoryRequest::Clone { flags, object, control_handle: _ } => { let stream = object.into_stream().unwrap().cast_stream(); mock_filesystem::describe_dir(flags, &stream); fasync::Task::spawn(Arc::clone(&self).handle_stream(stream)).detach(); } DirectoryRequest::Open { flags, mode, path, object, control_handle } => { self.open_handler.handle_open_request( flags, mode, path, object, control_handle, Arc::clone(&self), ) } DirectoryRequest::Close { .. } => (), req => panic!("DirectoryStreamHandler unhandled request {:?}", req), } } } .boxed() } } struct OpenFailOrTempFs { should_fail: AtomicBool, fail_count: AtomicU64, tempdir: tempfile::TempDir, } impl OpenFailOrTempFs { fn new_failing() -> Arc<Self> { Arc::new(Self { should_fail: AtomicBool::new(true), fail_count: AtomicU64::new(0), tempdir: tempfile::tempdir().expect("/tmp to exist"), }) } fn get_open_fail_count(&self) -> u64 { self.fail_count.load(std::sync::atomic::Ordering::SeqCst) } fn make_open_succeed(&self) { self.should_fail.store(false, std::sync::atomic::Ordering::SeqCst); } fn should_fail(&self) -> bool { self.should_fail.load(std::sync::atomic::Ordering::SeqCst) } } impl OpenRequestHandler for OpenFailOrTempFs { fn handle_open_request( &self, flags: u32, mode: u32, path: String, object: ServerEnd<NodeMarker>, _control_handle: DirectoryControlHandle, parent: Arc<DirectoryStreamHandler<Self>>, ) { if self.should_fail() { if path == "." { let stream = object.into_stream().unwrap().cast_stream(); mock_filesystem::describe_dir(flags, &stream); fasync::Task::spawn(parent.handle_stream(stream)).detach(); } else { self.fail_count.fetch_add(1, std::sync::atomic::Ordering::SeqCst); } } else { let (tempdir_proxy, server_end) = fidl::endpoints::create_proxy::<fidl_fuchsia_io::DirectoryMarker>().unwrap(); fdio::service_connect(self.tempdir.path().to_str().unwrap(), server_end.into_channel()) .unwrap(); tempdir_proxy.open(flags, mode, &path, object).unwrap(); } } } /// Implements OpenRequestHandler, proxying to a backing temp file and optionally failing writes /// to certain files. struct WriteFailOrTempFs { files_to_fail_writes: Vec<String>, should_fail: Arc<AtomicBool>, fail_count: Arc<AtomicU64>, tempdir_proxy: DirectoryProxy, // We don't read this, but need to keep it around otherwise the temp directory is torn down _tempdir: tempfile::TempDir, } impl WriteFailOrTempFs { fn new_failing(files_to_fail_writes: Vec<String>) -> Arc<Self> { let tempdir = tempfile::tempdir().expect("/tmp to exist"); let (tempdir_proxy, server_end) = fidl::endpoints::create_proxy::<fidl_fuchsia_io::DirectoryMarker>().unwrap(); fdio::open( tempdir.path().to_str().unwrap(), fidl_fuchsia_io::OPEN_FLAG_DIRECTORY | fidl_fuchsia_io::OPEN_RIGHT_READABLE | fidl_fuchsia_io::OPEN_RIGHT_WRITABLE, server_end.into_channel(), ) .expect("open temp directory"); Arc::new(Self { files_to_fail_writes, should_fail: Arc::new(AtomicBool::new(true)), fail_count: Arc::new(AtomicU64::new(0)), _tempdir: tempdir, tempdir_proxy, }) } fn get_write_fail_count(&self) -> u64 { self.fail_count.load(std::sync::atomic::Ordering::SeqCst) } fn make_write_succeed(&self) { self.should_fail.store(false, std::sync::atomic::Ordering::SeqCst); } fn should_fail(&self) -> bool { self.should_fail.load(std::sync::atomic::Ordering::SeqCst) } } impl OpenRequestHandler for WriteFailOrTempFs { fn handle_open_request( &self, flags: u32, mode: u32, path: String, object: ServerEnd<NodeMarker>, _control_handle: DirectoryControlHandle, parent: Arc<DirectoryStreamHandler<Self>>, ) { if path == "." && self.should_fail() { let stream = object.into_stream().unwrap().cast_stream(); mock_filesystem::describe_dir(flags, &stream); fasync::Task::spawn(parent.handle_stream(stream)).detach(); return; } if !self.files_to_fail_writes.contains(&path) { // We don't want to intercept file operations, so just open the file normally. self.tempdir_proxy.open(flags, mode, &path, object).unwrap(); return; } // This file matched our configured set of paths to intercept operations for, so open a // backing file and send all file operations which the client thinks it's sending // to the backing file instead to our FailingWriteFileStreamHandler. let (file_requests, file_control_handle) = ServerEnd::<FileMarker>::new(object.into_channel()) .into_stream_and_control_handle() .expect("split file server end"); // Create a proxy to the actual file we'll open to proxy to. let (backing_node_proxy, backing_node_server_end) = fidl::endpoints::create_proxy::<NodeMarker>().unwrap(); self.tempdir_proxy .open(flags, mode, &path, backing_node_server_end) .expect("open file requested by pkg-resolver"); // All the things pkg-resolver attempts to open in these tests are files, // not directories, so cast the NodeProxy to a FileProxy. If the pkg-resolver assumption // changes, this code will have to support both. let backing_file_proxy = FileProxy::new(backing_node_proxy.into_channel().unwrap()); let send_onopen = flags & fidl_fuchsia_io::OPEN_FLAG_DESCRIBE != 0; let file_handler = Arc::new(FailingWriteFileStreamHandler::new( backing_file_proxy, String::from(path), Arc::clone(&self.should_fail), Arc::clone(&self.fail_count), )); fasync::Task::spawn(file_handler.handle_stream( file_requests, file_control_handle, send_onopen, )) .detach(); } } /// Handles a stream of requests for a particular file, proxying to a backing file for all /// operations except writes, which it may decide to make fail. struct FailingWriteFileStreamHandler { backing_file: FileProxy, writes_should_fail: Arc<AtomicBool>, write_fail_count: Arc<AtomicU64>, path: String, } impl FailingWriteFileStreamHandler { fn new( backing_file: FileProxy, path: String, writes_should_fail: Arc<AtomicBool>, write_fail_count: Arc<AtomicU64>, ) -> Self { Self { backing_file, writes_should_fail, write_fail_count, path } } fn writes_should_fail(self: &Arc<Self>) -> bool { self.writes_should_fail.load(std::sync::atomic::Ordering::SeqCst) } async fn handle_write(self: &Arc<Self>, data: Vec<u8>, responder: FileWriteResponder) { if self.writes_should_fail() { self.write_fail_count.fetch_add(1, std::sync::atomic::Ordering::SeqCst); responder.send(Status::NO_MEMORY.into_raw(), 0u64).expect("send on write"); return; } // Don't fail, actually do the write. let (status, bytes_written) = self.backing_file.write(&data).await.unwrap(); responder.send(status, bytes_written).unwrap(); } fn handle_stream( self: Arc<Self>, mut stream: FileRequestStream, control_handle: FileControlHandle, send_onopen: bool, ) -> BoxFuture<'static, ()> { async move { if send_onopen { // The client end of the file is waiting for an OnOpen event, so send // one based on the actual OnOpen from the backing file. let mut event_stream = self.backing_file.take_event_stream(); let event = event_stream.try_next().await.unwrap(); match event.expect("failed to received file event") { FileEvent::OnOpen_ { s, mut info } => { // info comes as an Option<Box<NodeInfo>>, but we need to return an // Option<&mut NodeInfo>. Transform it. let node_info = info.as_mut().map(|b| &mut **b); control_handle .send_on_open_(s, node_info) .expect("send on open to fake file"); } FileEvent::OnConnectionInfo { info } => { control_handle .send_on_connection_info(info) .expect("send on open to fake file"); } } } while let Some(req) = stream.next().await { match req.unwrap() { FileRequest::Write { data, responder } => { self.handle_write(data, responder).await } FileRequest::GetAttr { responder } => { let (status, mut attrs) = self.backing_file.get_attr().await.unwrap(); responder.send(status, &mut attrs).unwrap(); } FileRequest::Read { count, responder } => { let (status, data) = self.backing_file.read(count).await.unwrap(); responder.send(status, &data).unwrap(); } FileRequest::Close { responder } => { let backing_file_close_response = self.backing_file.close().await.unwrap(); responder.send(backing_file_close_response).unwrap(); } FileRequest::Close2 { responder } => { let mut backing_file_close_response = self.backing_file.close2().await.unwrap(); responder.send(&mut backing_file_close_response).unwrap(); } other => { panic!("unhandled request type for path {:?}: {:?}", self.path, other); } } } } .boxed() } } /// Optionally fails renames of certain files. Otherwise, delegates /// DirectoryRequests to a backing tempdir. struct RenameFailOrTempFs { fail_count: Arc<AtomicU64>, files_to_fail_renames: Vec<String>, should_fail: Arc<AtomicBool>, tempdir: Arc<tempfile::TempDir>, } impl RenameFailOrTempFs { fn new_failing(files_to_fail_renames: Vec<String>) -> Arc<Self> { Arc::new(Self { fail_count: Arc::new(AtomicU64::new(0)), files_to_fail_renames, should_fail: Arc::new(AtomicBool::new(true)), tempdir: Arc::new(tempfile::tempdir().expect("/tmp to exist")), }) } fn get_rename_fail_count(&self) -> u64 { self.fail_count.load(std::sync::atomic::Ordering::SeqCst) } fn make_rename_succeed(&self) { self.should_fail.store(false, std::sync::atomic::Ordering::SeqCst); } fn should_fail(&self) -> bool { self.should_fail.load(std::sync::atomic::Ordering::SeqCst) } } impl OpenRequestHandler for RenameFailOrTempFs { fn handle_open_request( &self, flags: u32, mode: u32, path: String, object: ServerEnd<NodeMarker>, _control_handle: DirectoryControlHandle, parent: Arc<DirectoryStreamHandler<Self>>, ) { // Set up proxy to tmpdir and delegate to it on success. let (tempdir_proxy, server_end) = fidl::endpoints::create_proxy::<fidl_fuchsia_io::DirectoryMarker>().unwrap(); fdio::service_connect(self.tempdir.path().to_str().unwrap(), server_end.into_channel()) .unwrap(); if !self.should_fail() || path != "." { tempdir_proxy.open(flags, mode, &path, object).unwrap(); return; } // Prepare to handle the directory requests. We must call describe_dir, which sends an // OnOpen if OPEN_FLAG_DESCRIBE is set. Otherwise, the code will hang when reading from // the stream. let mut stream = object.into_stream().unwrap().cast_stream(); mock_filesystem::describe_dir(flags, &stream); let fail_count = Arc::clone(&self.fail_count); let files_to_fail_renames = Clone::clone(&self.files_to_fail_renames); // Handle the directory requests. fasync::Task::spawn(async move { while let Some(req) = stream.next().await { match req.unwrap() { DirectoryRequest::GetAttr { responder } => { let (status, mut attrs) = tempdir_proxy.get_attr().await.unwrap(); responder.send(status, &mut attrs).unwrap(); } DirectoryRequest::Close { responder } => { let status = tempdir_proxy.close().await.unwrap(); responder.send(status).unwrap(); } DirectoryRequest::Close2 { responder } => { let mut result = tempdir_proxy.close2().await.unwrap(); responder.send(&mut result).unwrap(); } DirectoryRequest::GetToken { responder } => { let (status, handle) = tempdir_proxy.get_token().await.unwrap(); responder.send(status, handle).unwrap(); } DirectoryRequest::Rename2 { src, dst, responder, .. } => { if !files_to_fail_renames.contains(&src) { panic!("unsupported rename from {} to {}", src, dst); } fail_count.fetch_add(1, std::sync::atomic::Ordering::SeqCst); responder.send(&mut Err(Status::NOT_FOUND.into_raw())).unwrap(); } DirectoryRequest::Open { flags, mode, path, object, control_handle } => { parent.open_handler.handle_open_request( flags, mode, path, object, control_handle, Arc::clone(&parent.clone()), ); } other => { panic!("unhandled request type for path {:?}: {:?}", path, other); } } } }) .detach(); } } async fn create_testenv_serves_repo<H: OpenRequestHandler + Send + Sync + 'static>( open_handler: Arc<H>, ) -> (TestEnv, RepositoryConfig, Package, ServedRepository) { // Create testenv with failing isolated-persistent-storage let directory_handler = Arc::new(DirectoryStreamHandler::new(open_handler)); let (proxy, stream) = fidl::endpoints::create_proxy_and_stream::<fidl_fuchsia_io::DirectoryMarker>().unwrap(); fasync::Task::spawn(directory_handler.handle_stream(stream)).detach(); let env = TestEnvBuilder::new() .mounts( MountsBuilder::new() .enable_dynamic_config(EnableDynamicConfig { enable_dynamic_configuration: true }) .pkg_resolver_data(DirOrProxy::Proxy(proxy)) .build(), ) .build() .await; // Serve repo with package let pkg = PackageBuilder::new("just_meta_far").build().await.expect("created pkg"); let repo = Arc::new( RepositoryBuilder::from_template_dir(EMPTY_REPO_PATH) .add_package(&pkg) .build() .await .unwrap(), ); let served_repository = repo.server().start().unwrap(); let repo_url = "fuchsia-pkg://example.com".parse().unwrap(); let config = served_repository.make_repo_config(repo_url); (env, config, pkg, served_repository) } async fn verify_pkg_resolution_succeeds_during_minfs_repo_config_failure< O, FailCountFn, MakeSucceedFn, >( open_handler: Arc<O>, fail_count_fn: FailCountFn, num_failures_before_first_restart: u64, num_failures_after_first_restart: u64, make_succeed_fn: MakeSucceedFn, ) where O: OpenRequestHandler + Send + Sync + 'static, FailCountFn: FnOnce() -> u64 + Copy, MakeSucceedFn: FnOnce(), { let (mut env, config, pkg, _served_repo) = create_testenv_serves_repo(Arc::clone(&open_handler)).await; // Verify we can resolve the package with a broken MinFs, and that repo configs do not persist let () = env.proxies.repo_manager.add(config.clone().into()).await.unwrap().unwrap(); let package_dir = env.resolve_package("fuchsia-pkg://example.com/just_meta_far").await.unwrap(); pkg.verify_contents(&package_dir).await.unwrap(); assert_eq!(fail_count_fn(), num_failures_before_first_restart); env.restart_pkg_resolver().await; assert_eq!(get_repos(&env.proxies.repo_manager).await, vec![]); assert_eq!(fail_count_fn(), num_failures_after_first_restart); // Now let MinFs recover and show how repo configs are saved on restart. // Note we know we are not executing the failure path anymore since // the failure count doesn't change. make_succeed_fn(); let () = env.proxies.repo_manager.add(config.clone().into()).await.unwrap().unwrap(); let package_dir = env.resolve_package("fuchsia-pkg://example.com/just_meta_far").await.unwrap(); pkg.verify_contents(&package_dir).await.unwrap(); assert_eq!(fail_count_fn(), num_failures_after_first_restart); env.restart_pkg_resolver().await; assert_eq!(get_repos(&env.proxies.repo_manager).await, vec![config.clone()]); env.stop().await; } async fn verify_pkg_resolution_succeeds_during_minfs_repo_config_and_rewrite_rule_failure< O, FailCountFn, MakeSucceedFn, >( open_handler: Arc<O>, fail_count_fn: FailCountFn, num_failures_before_first_restart: u64, num_failures_after_first_restart: u64, make_succeed_fn: MakeSucceedFn, ) where O: OpenRequestHandler + Send + Sync + 'static, FailCountFn: FnOnce() -> u64 + Copy, MakeSucceedFn: FnOnce(), { let (mut env, config, pkg, _served_repo) = create_testenv_serves_repo(Arc::clone(&open_handler)).await; // Add repo config and rewrite rules let () = env.proxies.repo_manager.add(config.clone().into()).await.unwrap().unwrap(); let (edit_transaction, edit_transaction_server) = fidl::endpoints::create_proxy().unwrap(); env.proxies.rewrite_engine.start_edit_transaction(edit_transaction_server).unwrap(); let rule = Rule::new("should_be_rewritten", "example.com", "/", "/").unwrap(); let () = edit_transaction.add(&mut rule.clone().into()).await.unwrap().unwrap(); let () = edit_transaction.commit().await.unwrap().unwrap(); // Verify we can resolve the package with a broken MinFs, and that rewrite rules do not // persist let package_dir = env.resolve_package("fuchsia-pkg://should_be_rewritten/just_meta_far").await.unwrap(); pkg.verify_contents(&package_dir).await.unwrap(); assert_eq!(fail_count_fn(), num_failures_before_first_restart); env.restart_pkg_resolver().await; assert_eq!(get_rules(&env.proxies.rewrite_engine).await, vec![]); assert_eq!(fail_count_fn(), num_failures_after_first_restart); // Now let MinFs recover and show how rewrite rules are saved on restart // Note we know we are not executing the failure path anymore since // the failure count doesn't change. make_succeed_fn(); let () = env.proxies.repo_manager.add(config.clone().into()).await.unwrap().unwrap(); let (edit_transaction, edit_transaction_server) = fidl::endpoints::create_proxy().unwrap(); env.proxies.rewrite_engine.start_edit_transaction(edit_transaction_server).unwrap(); let () = edit_transaction.add(&mut rule.clone().into()).await.unwrap().unwrap(); let () = edit_transaction.commit().await.unwrap().unwrap(); let package_dir = env.resolve_package("fuchsia-pkg://should_be_rewritten/just_meta_far").await.unwrap(); pkg.verify_contents(&package_dir).await.unwrap(); assert_eq!(fail_count_fn(), num_failures_after_first_restart); env.restart_pkg_resolver().await; assert_eq!(get_rules(&env.proxies.rewrite_engine).await, vec![rule.clone()]); env.stop().await; } // Test that when pkg-resolver can't open the file for dynamic repo configs, the resolver // still works. #[fasync::run_singlethreaded(test)] async fn minfs_fails_create_repo_configs() { let open_handler = OpenFailOrTempFs::new_failing(); verify_pkg_resolution_succeeds_during_minfs_repo_config_failure( Arc::clone(&open_handler), || open_handler.get_open_fail_count(), // Before the first pkg-resolver restart, we fail 3 times: // * when trying to open repositories.json on start // * when trying to open rewrites.json on start // * when trying to open repositories.json when adding a dynamic repo config 3, // We fail an additional 2 times after the restart to account for repositories.json // and rewrites.json failing to open again on startup. 5, || open_handler.make_open_succeed(), ) .await; } // Test that when pkg-resolver can open neither the file for rewrite rules // NOR the file for dynamic repositories, the resolver still works. #[fasync::run_singlethreaded(test)] async fn minfs_fails_create_rewrite_rules() { let open_handler = OpenFailOrTempFs::new_failing(); verify_pkg_resolution_succeeds_during_minfs_repo_config_and_rewrite_rule_failure( Arc::clone(&open_handler), || open_handler.get_open_fail_count(), // Before the first pkg-resolver restart, we fail 4 times: // * when trying to open repositories.json on start // * when trying to open rewrites.json on start // * when trying to open repositories.json when adding a dynamic repo config // * when trying to open rewrites.json when adding a dynamic rewrite rule 4, // We fail an additional 2 times after the restart to account for repositories.json // and rewrites.json failing to open again on startup. 6, || open_handler.make_open_succeed(), ) .await; } // Test that when pkg-resolver can't write to the file for dynamic repo configs, // package resolution still works. #[fasync::run_singlethreaded(test)] async fn minfs_fails_write_to_repo_configs() { let open_handler = WriteFailOrTempFs::new_failing(vec![String::from("repositories.json.new")]); verify_pkg_resolution_succeeds_during_minfs_repo_config_failure( Arc::clone(&open_handler), || open_handler.get_write_fail_count(), // The only time the test should hit the write failure path is when we add a repo config // when should_fail = true, in which case we fail at writing repositories.json.new. 1, 1, || open_handler.make_write_succeed(), ) .await; } // Test that when pkg-resolver can write to neither the file for dynamic repo configs // NOR the file for rewrite rules, package resolution still works. #[fasync::run_singlethreaded(test)] async fn minfs_fails_write_to_repo_configs_and_rewrite_rules() { let open_handler = WriteFailOrTempFs::new_failing(vec![ String::from("repositories.json.new"), String::from("rewrites.json.new"), ]); verify_pkg_resolution_succeeds_during_minfs_repo_config_and_rewrite_rule_failure( Arc::clone(&open_handler), || open_handler.get_write_fail_count(), // The only time the test should hit the write failure path is when we add a repo config // when should_fail = true, in which case we fail at writing both repositories.json.new and // rewrites.json.new. 2, 2, || open_handler.make_write_succeed(), ) .await; } // Test that when pkg-resolver can't rename file for dynamic repo configs, package resolution, // still works. Note this test might stop working if the pkg-resolver starts issuing Rename // directly to /data instead of going through std::fs::rename. If that's the case, consider // extending DirectoryStreamHandler to also have a RenameRequestHandler, and possibly use a // std::sync::Weak to coordinate between the DirectoryStreamHandler and RenameRequestHandler. #[fasync::run_singlethreaded(test)] async fn minfs_fails_rename_repo_configs() { let open_handler = RenameFailOrTempFs::new_failing(vec![String::from("repositories.json.new")]); verify_pkg_resolution_succeeds_during_minfs_repo_config_failure( Arc::clone(&open_handler), || open_handler.get_rename_fail_count(), // The only time the test should hit the rename failure path is when we add a // repo config when should_fail = true, in which case we fail at renaming // repositories.json.new. 1, 1, || open_handler.make_rename_succeed(), ) .await; } // Test that when pkg-resolver can rename neither the file for dynamic repo configs // NOR the file for rewrite rules, package resolution still works. #[fasync::run_singlethreaded(test)] async fn minfs_fails_rename_repo_configs_and_rewrite_rules() { let open_handler = RenameFailOrTempFs::new_failing(vec![ String::from("repositories.json.new"), String::from("rewrites.json.new"), ]); verify_pkg_resolution_succeeds_during_minfs_repo_config_and_rewrite_rule_failure( Arc::clone(&open_handler), || open_handler.get_rename_fail_count(), // The only time the test should hit the rename failure path is when we add a // repo config when should_fail = true, in which case we fail at renaming both // repositories.json.new and rewrites.json.new. 2, 2, || open_handler.make_rename_succeed(), ) .await; }
40.347826
100
0.618152
fbca8921e2d7ec218c24a13d4c66689919793744
6,260
//! Block watcher for efficient `get_latest_block`. use std::sync::{ atomic::{AtomicBool, Ordering}, Arc, Mutex, }; use futures::{prelude::*, stream::Fuse, try_ready}; use tokio::{spawn, sync::watch}; use super::snapshot::BlockSnapshot; /// Block watcher error. #[derive(Debug, Fail)] pub enum WatchError { #[fail(display = "block watcher closed")] WatcherClosed, } struct Inner { spawned: AtomicBool, current_block: watch::Receiver<Option<BlockSnapshot>>, current_block_tx: Mutex<Option<watch::Sender<Option<BlockSnapshot>>>>, } /// Block watcher. #[derive(Clone)] pub struct BlockWatcher { inner: Arc<Inner>, } impl BlockWatcher { /// Create new block watcher. pub fn new() -> Self { let (tx, rx) = watch::channel(None); Self { inner: Arc::new(Inner { spawned: AtomicBool::new(false), current_block: rx, current_block_tx: Mutex::new(Some(tx)), }), } } /// Atomically check if a new block watcher needs to be spawned and /// mark it as spawned/spawning. If this method returns true, then /// the caller must go ahead and call either `spawn` or `cancel_spawn`. pub fn start_spawn(&self) -> bool { !self .inner .spawned .compare_and_swap(false, true, Ordering::SeqCst) } /// Cancel a previous spawn started by `start_spawn`. pub fn cancel_spawn(&self) { assert!( self.inner.spawned.swap(false, Ordering::SeqCst), "must only be called in start_spawn" ); } /// Spawn a block watcher task. /// /// Must only be called after first calling `start_spawn`. pub fn spawn<T>(&self, blocks: T) where T: Stream<Item = BlockSnapshot> + Send + 'static, { let tx = self .inner .current_block_tx .lock() .unwrap() .take() .expect("must only be called in start_spawn"); let inner = self.inner.clone(); spawn( Watch::new(blocks.map(|blk| Some(blk)), tx) .map_err(|_err| ()) .and_then(move |tx| { // Watch has terminated which indicates that there is something wrong // with the stream. Put the sender back so we can retry the watch. inner.current_block_tx.lock().unwrap().replace(tx); assert!( inner.spawned.swap(false, Ordering::SeqCst), "must only be called in start_spawn" ); Ok(()) }), ); } /// Get the latest block. pub fn get_latest_block(&self) -> impl Future<Item = BlockSnapshot, Error = WatchError> { self.inner .current_block .clone() .skip_while(|block| Ok(block.is_none())) .take(1) .into_future() .map_err(|_err| WatchError::WatcherClosed) .and_then(|(maybe_block, _)| { Ok(maybe_block .ok_or(WatchError::WatcherClosed)? .expect("None blocks were skipped")) }) } } struct Watch<T: Stream, U> { stream: Option<Fuse<T>>, sink: Option<U>, buffered: Option<T::Item>, } impl<T: Stream, U> Watch<T, U> where U: Sink<SinkItem = T::Item>, T: Stream, { pub fn new(stream: T, sink: U) -> Self { Self { stream: Some(stream.fuse()), sink: Some(sink), buffered: None, } } /// Get a mutable reference to the inner sink. /// If this combinator has already been polled to completion, None will be returned. pub fn sink_mut(&mut self) -> Option<&mut U> { self.sink.as_mut() } /// Get a mutable reference to the inner stream. /// If this combinator has already been polled to completion, None will be returned. pub fn stream_mut(&mut self) -> Option<&mut T> { self.stream.as_mut().map(|x| x.get_mut()) } fn take_result(&mut self) -> U { let sink = self .sink .take() .expect("Attempted to poll Watch after completion"); sink } fn try_start_send(&mut self, item: T::Item) -> Poll<(), U::SinkError> { debug_assert!(self.buffered.is_none()); if let AsyncSink::NotReady(item) = self .sink_mut() .take() .expect("Attempted to poll Watch after completion") .start_send(item)? { self.buffered = Some(item); return Ok(Async::NotReady); } Ok(Async::Ready(())) } } impl<T: Stream, U> Future for Watch<T, U> where U: Sink<SinkItem = T::Item>, T: Stream, { type Item = U; type Error = U::SinkError; fn poll(&mut self) -> Poll<U, U::SinkError> { // If we've got an item buffered already, we need to write it to the // sink before we can do anything else. if let Some(item) = self.buffered.take() { try_ready!(self.try_start_send(item)) } loop { match self .stream_mut() .take() .expect("Attempted to poll Watch after completion") .poll() { Ok(Async::Ready(Some(item))) => try_ready!(self.try_start_send(item)), Ok(Async::Ready(None)) => { // Stream has completed, we return the sink without closing it. return Ok(Async::Ready(self.take_result())); } Ok(Async::NotReady) => { try_ready!(self .sink_mut() .take() .expect("Attempted to poll Watch after completion") .poll_complete()); return Ok(Async::NotReady); } Err(_) => { // In case of an error with the stream, we return the sink without closing it. return Ok(Async::Ready(self.take_result())); } } } } }
29.952153
98
0.519169
61dc3a7e3a2d36ed83fb6a2e6df9208f71157c67
198,291
#[doc = "Register `PH_INTEN` reader"] pub struct R(crate::R<PH_INTEN_SPEC>); impl core::ops::Deref for R { type Target = crate::R<PH_INTEN_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl From<crate::R<PH_INTEN_SPEC>> for R { #[inline(always)] fn from(reader: crate::R<PH_INTEN_SPEC>) -> Self { R(reader) } } #[doc = "Register `PH_INTEN` writer"] pub struct W(crate::W<PH_INTEN_SPEC>); impl core::ops::Deref for W { type Target = crate::W<PH_INTEN_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl core::ops::DerefMut for W { #[inline(always)] fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } impl From<crate::W<PH_INTEN_SPEC>> for W { #[inline(always)] fn from(writer: crate::W<PH_INTEN_SPEC>) -> Self { W(writer) } } #[doc = "Port A-H Pin\\[n\\] Falling Edge or Low Level Interrupt Trigger Type Enable Bit\nThe FLIEN (Px_INTEN\\[n\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function.\nWhen setting the FLIEN (Px_INTEN\\[n\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at low level.\nIf the interrupt is edge trigger(TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from high to low.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored.\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum FLIEN0_A { #[doc = "0: Px.n level low or high to low interrupt Disabled"] _0 = 0, #[doc = "1: Px.n level low or high to low interrupt Enabled"] _1 = 1, } impl From<FLIEN0_A> for bool { #[inline(always)] fn from(variant: FLIEN0_A) -> Self { variant as u8 != 0 } } #[doc = "Field `FLIEN0` reader - Port A-H Pin\\[n\\] Falling Edge or Low Level Interrupt Trigger Type Enable Bit\nThe FLIEN (Px_INTEN\\[n\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function.\nWhen setting the FLIEN (Px_INTEN\\[n\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at low level.\nIf the interrupt is edge trigger(TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from high to low.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct FLIEN0_R(crate::FieldReader<bool, FLIEN0_A>); impl FLIEN0_R { pub(crate) fn new(bits: bool) -> Self { FLIEN0_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> FLIEN0_A { match self.bits { false => FLIEN0_A::_0, true => FLIEN0_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { **self == FLIEN0_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { **self == FLIEN0_A::_1 } } impl core::ops::Deref for FLIEN0_R { type Target = crate::FieldReader<bool, FLIEN0_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `FLIEN0` writer - Port A-H Pin\\[n\\] Falling Edge or Low Level Interrupt Trigger Type Enable Bit\nThe FLIEN (Px_INTEN\\[n\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function.\nWhen setting the FLIEN (Px_INTEN\\[n\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at low level.\nIf the interrupt is edge trigger(TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from high to low.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct FLIEN0_W<'a> { w: &'a mut W, } impl<'a> FLIEN0_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: FLIEN0_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "Px.n level low or high to low interrupt Disabled"] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(FLIEN0_A::_0) } #[doc = "Px.n level low or high to low interrupt Enabled"] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(FLIEN0_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !0x01) | (value as u32 & 0x01); self.w } } #[doc = "Port A-H Pin\\[n\\] Falling Edge or Low Level Interrupt Trigger Type Enable Bit\nThe FLIEN (Px_INTEN\\[n\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function.\nWhen setting the FLIEN (Px_INTEN\\[n\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at low level.\nIf the interrupt is edge trigger(TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from high to low.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored.\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum FLIEN1_A { #[doc = "0: Px.n level low or high to low interrupt Disabled"] _0 = 0, #[doc = "1: Px.n level low or high to low interrupt Enabled"] _1 = 1, } impl From<FLIEN1_A> for bool { #[inline(always)] fn from(variant: FLIEN1_A) -> Self { variant as u8 != 0 } } #[doc = "Field `FLIEN1` reader - Port A-H Pin\\[n\\] Falling Edge or Low Level Interrupt Trigger Type Enable Bit\nThe FLIEN (Px_INTEN\\[n\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function.\nWhen setting the FLIEN (Px_INTEN\\[n\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at low level.\nIf the interrupt is edge trigger(TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from high to low.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct FLIEN1_R(crate::FieldReader<bool, FLIEN1_A>); impl FLIEN1_R { pub(crate) fn new(bits: bool) -> Self { FLIEN1_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> FLIEN1_A { match self.bits { false => FLIEN1_A::_0, true => FLIEN1_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { **self == FLIEN1_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { **self == FLIEN1_A::_1 } } impl core::ops::Deref for FLIEN1_R { type Target = crate::FieldReader<bool, FLIEN1_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `FLIEN1` writer - Port A-H Pin\\[n\\] Falling Edge or Low Level Interrupt Trigger Type Enable Bit\nThe FLIEN (Px_INTEN\\[n\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function.\nWhen setting the FLIEN (Px_INTEN\\[n\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at low level.\nIf the interrupt is edge trigger(TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from high to low.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct FLIEN1_W<'a> { w: &'a mut W, } impl<'a> FLIEN1_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: FLIEN1_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "Px.n level low or high to low interrupt Disabled"] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(FLIEN1_A::_0) } #[doc = "Px.n level low or high to low interrupt Enabled"] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(FLIEN1_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 1)) | ((value as u32 & 0x01) << 1); self.w } } #[doc = "Port A-H Pin\\[n\\] Falling Edge or Low Level Interrupt Trigger Type Enable Bit\nThe FLIEN (Px_INTEN\\[n\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function.\nWhen setting the FLIEN (Px_INTEN\\[n\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at low level.\nIf the interrupt is edge trigger(TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from high to low.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored.\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum FLIEN2_A { #[doc = "0: Px.n level low or high to low interrupt Disabled"] _0 = 0, #[doc = "1: Px.n level low or high to low interrupt Enabled"] _1 = 1, } impl From<FLIEN2_A> for bool { #[inline(always)] fn from(variant: FLIEN2_A) -> Self { variant as u8 != 0 } } #[doc = "Field `FLIEN2` reader - Port A-H Pin\\[n\\] Falling Edge or Low Level Interrupt Trigger Type Enable Bit\nThe FLIEN (Px_INTEN\\[n\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function.\nWhen setting the FLIEN (Px_INTEN\\[n\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at low level.\nIf the interrupt is edge trigger(TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from high to low.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct FLIEN2_R(crate::FieldReader<bool, FLIEN2_A>); impl FLIEN2_R { pub(crate) fn new(bits: bool) -> Self { FLIEN2_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> FLIEN2_A { match self.bits { false => FLIEN2_A::_0, true => FLIEN2_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { **self == FLIEN2_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { **self == FLIEN2_A::_1 } } impl core::ops::Deref for FLIEN2_R { type Target = crate::FieldReader<bool, FLIEN2_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `FLIEN2` writer - Port A-H Pin\\[n\\] Falling Edge or Low Level Interrupt Trigger Type Enable Bit\nThe FLIEN (Px_INTEN\\[n\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function.\nWhen setting the FLIEN (Px_INTEN\\[n\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at low level.\nIf the interrupt is edge trigger(TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from high to low.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct FLIEN2_W<'a> { w: &'a mut W, } impl<'a> FLIEN2_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: FLIEN2_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "Px.n level low or high to low interrupt Disabled"] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(FLIEN2_A::_0) } #[doc = "Px.n level low or high to low interrupt Enabled"] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(FLIEN2_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 2)) | ((value as u32 & 0x01) << 2); self.w } } #[doc = "Port A-H Pin\\[n\\] Falling Edge or Low Level Interrupt Trigger Type Enable Bit\nThe FLIEN (Px_INTEN\\[n\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function.\nWhen setting the FLIEN (Px_INTEN\\[n\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at low level.\nIf the interrupt is edge trigger(TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from high to low.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored.\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum FLIEN3_A { #[doc = "0: Px.n level low or high to low interrupt Disabled"] _0 = 0, #[doc = "1: Px.n level low or high to low interrupt Enabled"] _1 = 1, } impl From<FLIEN3_A> for bool { #[inline(always)] fn from(variant: FLIEN3_A) -> Self { variant as u8 != 0 } } #[doc = "Field `FLIEN3` reader - Port A-H Pin\\[n\\] Falling Edge or Low Level Interrupt Trigger Type Enable Bit\nThe FLIEN (Px_INTEN\\[n\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function.\nWhen setting the FLIEN (Px_INTEN\\[n\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at low level.\nIf the interrupt is edge trigger(TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from high to low.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct FLIEN3_R(crate::FieldReader<bool, FLIEN3_A>); impl FLIEN3_R { pub(crate) fn new(bits: bool) -> Self { FLIEN3_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> FLIEN3_A { match self.bits { false => FLIEN3_A::_0, true => FLIEN3_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { **self == FLIEN3_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { **self == FLIEN3_A::_1 } } impl core::ops::Deref for FLIEN3_R { type Target = crate::FieldReader<bool, FLIEN3_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `FLIEN3` writer - Port A-H Pin\\[n\\] Falling Edge or Low Level Interrupt Trigger Type Enable Bit\nThe FLIEN (Px_INTEN\\[n\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function.\nWhen setting the FLIEN (Px_INTEN\\[n\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at low level.\nIf the interrupt is edge trigger(TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from high to low.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct FLIEN3_W<'a> { w: &'a mut W, } impl<'a> FLIEN3_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: FLIEN3_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "Px.n level low or high to low interrupt Disabled"] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(FLIEN3_A::_0) } #[doc = "Px.n level low or high to low interrupt Enabled"] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(FLIEN3_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 3)) | ((value as u32 & 0x01) << 3); self.w } } #[doc = "Port A-H Pin\\[n\\] Falling Edge or Low Level Interrupt Trigger Type Enable Bit\nThe FLIEN (Px_INTEN\\[n\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function.\nWhen setting the FLIEN (Px_INTEN\\[n\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at low level.\nIf the interrupt is edge trigger(TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from high to low.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored.\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum FLIEN4_A { #[doc = "0: Px.n level low or high to low interrupt Disabled"] _0 = 0, #[doc = "1: Px.n level low or high to low interrupt Enabled"] _1 = 1, } impl From<FLIEN4_A> for bool { #[inline(always)] fn from(variant: FLIEN4_A) -> Self { variant as u8 != 0 } } #[doc = "Field `FLIEN4` reader - Port A-H Pin\\[n\\] Falling Edge or Low Level Interrupt Trigger Type Enable Bit\nThe FLIEN (Px_INTEN\\[n\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function.\nWhen setting the FLIEN (Px_INTEN\\[n\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at low level.\nIf the interrupt is edge trigger(TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from high to low.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct FLIEN4_R(crate::FieldReader<bool, FLIEN4_A>); impl FLIEN4_R { pub(crate) fn new(bits: bool) -> Self { FLIEN4_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> FLIEN4_A { match self.bits { false => FLIEN4_A::_0, true => FLIEN4_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { **self == FLIEN4_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { **self == FLIEN4_A::_1 } } impl core::ops::Deref for FLIEN4_R { type Target = crate::FieldReader<bool, FLIEN4_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `FLIEN4` writer - Port A-H Pin\\[n\\] Falling Edge or Low Level Interrupt Trigger Type Enable Bit\nThe FLIEN (Px_INTEN\\[n\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function.\nWhen setting the FLIEN (Px_INTEN\\[n\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at low level.\nIf the interrupt is edge trigger(TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from high to low.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct FLIEN4_W<'a> { w: &'a mut W, } impl<'a> FLIEN4_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: FLIEN4_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "Px.n level low or high to low interrupt Disabled"] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(FLIEN4_A::_0) } #[doc = "Px.n level low or high to low interrupt Enabled"] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(FLIEN4_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 4)) | ((value as u32 & 0x01) << 4); self.w } } #[doc = "Port A-H Pin\\[n\\] Falling Edge or Low Level Interrupt Trigger Type Enable Bit\nThe FLIEN (Px_INTEN\\[n\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function.\nWhen setting the FLIEN (Px_INTEN\\[n\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at low level.\nIf the interrupt is edge trigger(TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from high to low.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored.\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum FLIEN5_A { #[doc = "0: Px.n level low or high to low interrupt Disabled"] _0 = 0, #[doc = "1: Px.n level low or high to low interrupt Enabled"] _1 = 1, } impl From<FLIEN5_A> for bool { #[inline(always)] fn from(variant: FLIEN5_A) -> Self { variant as u8 != 0 } } #[doc = "Field `FLIEN5` reader - Port A-H Pin\\[n\\] Falling Edge or Low Level Interrupt Trigger Type Enable Bit\nThe FLIEN (Px_INTEN\\[n\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function.\nWhen setting the FLIEN (Px_INTEN\\[n\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at low level.\nIf the interrupt is edge trigger(TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from high to low.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct FLIEN5_R(crate::FieldReader<bool, FLIEN5_A>); impl FLIEN5_R { pub(crate) fn new(bits: bool) -> Self { FLIEN5_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> FLIEN5_A { match self.bits { false => FLIEN5_A::_0, true => FLIEN5_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { **self == FLIEN5_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { **self == FLIEN5_A::_1 } } impl core::ops::Deref for FLIEN5_R { type Target = crate::FieldReader<bool, FLIEN5_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `FLIEN5` writer - Port A-H Pin\\[n\\] Falling Edge or Low Level Interrupt Trigger Type Enable Bit\nThe FLIEN (Px_INTEN\\[n\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function.\nWhen setting the FLIEN (Px_INTEN\\[n\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at low level.\nIf the interrupt is edge trigger(TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from high to low.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct FLIEN5_W<'a> { w: &'a mut W, } impl<'a> FLIEN5_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: FLIEN5_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "Px.n level low or high to low interrupt Disabled"] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(FLIEN5_A::_0) } #[doc = "Px.n level low or high to low interrupt Enabled"] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(FLIEN5_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 5)) | ((value as u32 & 0x01) << 5); self.w } } #[doc = "Port A-H Pin\\[n\\] Falling Edge or Low Level Interrupt Trigger Type Enable Bit\nThe FLIEN (Px_INTEN\\[n\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function.\nWhen setting the FLIEN (Px_INTEN\\[n\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at low level.\nIf the interrupt is edge trigger(TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from high to low.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored.\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum FLIEN6_A { #[doc = "0: Px.n level low or high to low interrupt Disabled"] _0 = 0, #[doc = "1: Px.n level low or high to low interrupt Enabled"] _1 = 1, } impl From<FLIEN6_A> for bool { #[inline(always)] fn from(variant: FLIEN6_A) -> Self { variant as u8 != 0 } } #[doc = "Field `FLIEN6` reader - Port A-H Pin\\[n\\] Falling Edge or Low Level Interrupt Trigger Type Enable Bit\nThe FLIEN (Px_INTEN\\[n\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function.\nWhen setting the FLIEN (Px_INTEN\\[n\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at low level.\nIf the interrupt is edge trigger(TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from high to low.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct FLIEN6_R(crate::FieldReader<bool, FLIEN6_A>); impl FLIEN6_R { pub(crate) fn new(bits: bool) -> Self { FLIEN6_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> FLIEN6_A { match self.bits { false => FLIEN6_A::_0, true => FLIEN6_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { **self == FLIEN6_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { **self == FLIEN6_A::_1 } } impl core::ops::Deref for FLIEN6_R { type Target = crate::FieldReader<bool, FLIEN6_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `FLIEN6` writer - Port A-H Pin\\[n\\] Falling Edge or Low Level Interrupt Trigger Type Enable Bit\nThe FLIEN (Px_INTEN\\[n\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function.\nWhen setting the FLIEN (Px_INTEN\\[n\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at low level.\nIf the interrupt is edge trigger(TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from high to low.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct FLIEN6_W<'a> { w: &'a mut W, } impl<'a> FLIEN6_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: FLIEN6_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "Px.n level low or high to low interrupt Disabled"] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(FLIEN6_A::_0) } #[doc = "Px.n level low or high to low interrupt Enabled"] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(FLIEN6_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 6)) | ((value as u32 & 0x01) << 6); self.w } } #[doc = "Port A-H Pin\\[n\\] Falling Edge or Low Level Interrupt Trigger Type Enable Bit\nThe FLIEN (Px_INTEN\\[n\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function.\nWhen setting the FLIEN (Px_INTEN\\[n\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at low level.\nIf the interrupt is edge trigger(TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from high to low.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored.\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum FLIEN7_A { #[doc = "0: Px.n level low or high to low interrupt Disabled"] _0 = 0, #[doc = "1: Px.n level low or high to low interrupt Enabled"] _1 = 1, } impl From<FLIEN7_A> for bool { #[inline(always)] fn from(variant: FLIEN7_A) -> Self { variant as u8 != 0 } } #[doc = "Field `FLIEN7` reader - Port A-H Pin\\[n\\] Falling Edge or Low Level Interrupt Trigger Type Enable Bit\nThe FLIEN (Px_INTEN\\[n\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function.\nWhen setting the FLIEN (Px_INTEN\\[n\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at low level.\nIf the interrupt is edge trigger(TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from high to low.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct FLIEN7_R(crate::FieldReader<bool, FLIEN7_A>); impl FLIEN7_R { pub(crate) fn new(bits: bool) -> Self { FLIEN7_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> FLIEN7_A { match self.bits { false => FLIEN7_A::_0, true => FLIEN7_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { **self == FLIEN7_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { **self == FLIEN7_A::_1 } } impl core::ops::Deref for FLIEN7_R { type Target = crate::FieldReader<bool, FLIEN7_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `FLIEN7` writer - Port A-H Pin\\[n\\] Falling Edge or Low Level Interrupt Trigger Type Enable Bit\nThe FLIEN (Px_INTEN\\[n\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function.\nWhen setting the FLIEN (Px_INTEN\\[n\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at low level.\nIf the interrupt is edge trigger(TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from high to low.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct FLIEN7_W<'a> { w: &'a mut W, } impl<'a> FLIEN7_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: FLIEN7_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "Px.n level low or high to low interrupt Disabled"] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(FLIEN7_A::_0) } #[doc = "Px.n level low or high to low interrupt Enabled"] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(FLIEN7_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 7)) | ((value as u32 & 0x01) << 7); self.w } } #[doc = "Port A-H Pin\\[n\\] Falling Edge or Low Level Interrupt Trigger Type Enable Bit\nThe FLIEN (Px_INTEN\\[n\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function.\nWhen setting the FLIEN (Px_INTEN\\[n\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at low level.\nIf the interrupt is edge trigger(TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from high to low.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored.\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum FLIEN8_A { #[doc = "0: Px.n level low or high to low interrupt Disabled"] _0 = 0, #[doc = "1: Px.n level low or high to low interrupt Enabled"] _1 = 1, } impl From<FLIEN8_A> for bool { #[inline(always)] fn from(variant: FLIEN8_A) -> Self { variant as u8 != 0 } } #[doc = "Field `FLIEN8` reader - Port A-H Pin\\[n\\] Falling Edge or Low Level Interrupt Trigger Type Enable Bit\nThe FLIEN (Px_INTEN\\[n\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function.\nWhen setting the FLIEN (Px_INTEN\\[n\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at low level.\nIf the interrupt is edge trigger(TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from high to low.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct FLIEN8_R(crate::FieldReader<bool, FLIEN8_A>); impl FLIEN8_R { pub(crate) fn new(bits: bool) -> Self { FLIEN8_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> FLIEN8_A { match self.bits { false => FLIEN8_A::_0, true => FLIEN8_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { **self == FLIEN8_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { **self == FLIEN8_A::_1 } } impl core::ops::Deref for FLIEN8_R { type Target = crate::FieldReader<bool, FLIEN8_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `FLIEN8` writer - Port A-H Pin\\[n\\] Falling Edge or Low Level Interrupt Trigger Type Enable Bit\nThe FLIEN (Px_INTEN\\[n\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function.\nWhen setting the FLIEN (Px_INTEN\\[n\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at low level.\nIf the interrupt is edge trigger(TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from high to low.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct FLIEN8_W<'a> { w: &'a mut W, } impl<'a> FLIEN8_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: FLIEN8_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "Px.n level low or high to low interrupt Disabled"] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(FLIEN8_A::_0) } #[doc = "Px.n level low or high to low interrupt Enabled"] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(FLIEN8_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 8)) | ((value as u32 & 0x01) << 8); self.w } } #[doc = "Port A-H Pin\\[n\\] Falling Edge or Low Level Interrupt Trigger Type Enable Bit\nThe FLIEN (Px_INTEN\\[n\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function.\nWhen setting the FLIEN (Px_INTEN\\[n\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at low level.\nIf the interrupt is edge trigger(TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from high to low.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored.\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum FLIEN9_A { #[doc = "0: Px.n level low or high to low interrupt Disabled"] _0 = 0, #[doc = "1: Px.n level low or high to low interrupt Enabled"] _1 = 1, } impl From<FLIEN9_A> for bool { #[inline(always)] fn from(variant: FLIEN9_A) -> Self { variant as u8 != 0 } } #[doc = "Field `FLIEN9` reader - Port A-H Pin\\[n\\] Falling Edge or Low Level Interrupt Trigger Type Enable Bit\nThe FLIEN (Px_INTEN\\[n\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function.\nWhen setting the FLIEN (Px_INTEN\\[n\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at low level.\nIf the interrupt is edge trigger(TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from high to low.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct FLIEN9_R(crate::FieldReader<bool, FLIEN9_A>); impl FLIEN9_R { pub(crate) fn new(bits: bool) -> Self { FLIEN9_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> FLIEN9_A { match self.bits { false => FLIEN9_A::_0, true => FLIEN9_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { **self == FLIEN9_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { **self == FLIEN9_A::_1 } } impl core::ops::Deref for FLIEN9_R { type Target = crate::FieldReader<bool, FLIEN9_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `FLIEN9` writer - Port A-H Pin\\[n\\] Falling Edge or Low Level Interrupt Trigger Type Enable Bit\nThe FLIEN (Px_INTEN\\[n\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function.\nWhen setting the FLIEN (Px_INTEN\\[n\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at low level.\nIf the interrupt is edge trigger(TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from high to low.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct FLIEN9_W<'a> { w: &'a mut W, } impl<'a> FLIEN9_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: FLIEN9_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "Px.n level low or high to low interrupt Disabled"] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(FLIEN9_A::_0) } #[doc = "Px.n level low or high to low interrupt Enabled"] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(FLIEN9_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 9)) | ((value as u32 & 0x01) << 9); self.w } } #[doc = "Port A-H Pin\\[n\\] Falling Edge or Low Level Interrupt Trigger Type Enable Bit\nThe FLIEN (Px_INTEN\\[n\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function.\nWhen setting the FLIEN (Px_INTEN\\[n\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at low level.\nIf the interrupt is edge trigger(TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from high to low.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored.\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum FLIEN10_A { #[doc = "0: Px.n level low or high to low interrupt Disabled"] _0 = 0, #[doc = "1: Px.n level low or high to low interrupt Enabled"] _1 = 1, } impl From<FLIEN10_A> for bool { #[inline(always)] fn from(variant: FLIEN10_A) -> Self { variant as u8 != 0 } } #[doc = "Field `FLIEN10` reader - Port A-H Pin\\[n\\] Falling Edge or Low Level Interrupt Trigger Type Enable Bit\nThe FLIEN (Px_INTEN\\[n\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function.\nWhen setting the FLIEN (Px_INTEN\\[n\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at low level.\nIf the interrupt is edge trigger(TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from high to low.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct FLIEN10_R(crate::FieldReader<bool, FLIEN10_A>); impl FLIEN10_R { pub(crate) fn new(bits: bool) -> Self { FLIEN10_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> FLIEN10_A { match self.bits { false => FLIEN10_A::_0, true => FLIEN10_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { **self == FLIEN10_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { **self == FLIEN10_A::_1 } } impl core::ops::Deref for FLIEN10_R { type Target = crate::FieldReader<bool, FLIEN10_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `FLIEN10` writer - Port A-H Pin\\[n\\] Falling Edge or Low Level Interrupt Trigger Type Enable Bit\nThe FLIEN (Px_INTEN\\[n\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function.\nWhen setting the FLIEN (Px_INTEN\\[n\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at low level.\nIf the interrupt is edge trigger(TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from high to low.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct FLIEN10_W<'a> { w: &'a mut W, } impl<'a> FLIEN10_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: FLIEN10_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "Px.n level low or high to low interrupt Disabled"] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(FLIEN10_A::_0) } #[doc = "Px.n level low or high to low interrupt Enabled"] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(FLIEN10_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 10)) | ((value as u32 & 0x01) << 10); self.w } } #[doc = "Port A-H Pin\\[n\\] Falling Edge or Low Level Interrupt Trigger Type Enable Bit\nThe FLIEN (Px_INTEN\\[n\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function.\nWhen setting the FLIEN (Px_INTEN\\[n\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at low level.\nIf the interrupt is edge trigger(TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from high to low.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored.\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum FLIEN11_A { #[doc = "0: Px.n level low or high to low interrupt Disabled"] _0 = 0, #[doc = "1: Px.n level low or high to low interrupt Enabled"] _1 = 1, } impl From<FLIEN11_A> for bool { #[inline(always)] fn from(variant: FLIEN11_A) -> Self { variant as u8 != 0 } } #[doc = "Field `FLIEN11` reader - Port A-H Pin\\[n\\] Falling Edge or Low Level Interrupt Trigger Type Enable Bit\nThe FLIEN (Px_INTEN\\[n\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function.\nWhen setting the FLIEN (Px_INTEN\\[n\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at low level.\nIf the interrupt is edge trigger(TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from high to low.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct FLIEN11_R(crate::FieldReader<bool, FLIEN11_A>); impl FLIEN11_R { pub(crate) fn new(bits: bool) -> Self { FLIEN11_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> FLIEN11_A { match self.bits { false => FLIEN11_A::_0, true => FLIEN11_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { **self == FLIEN11_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { **self == FLIEN11_A::_1 } } impl core::ops::Deref for FLIEN11_R { type Target = crate::FieldReader<bool, FLIEN11_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `FLIEN11` writer - Port A-H Pin\\[n\\] Falling Edge or Low Level Interrupt Trigger Type Enable Bit\nThe FLIEN (Px_INTEN\\[n\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function.\nWhen setting the FLIEN (Px_INTEN\\[n\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at low level.\nIf the interrupt is edge trigger(TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from high to low.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct FLIEN11_W<'a> { w: &'a mut W, } impl<'a> FLIEN11_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: FLIEN11_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "Px.n level low or high to low interrupt Disabled"] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(FLIEN11_A::_0) } #[doc = "Px.n level low or high to low interrupt Enabled"] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(FLIEN11_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 11)) | ((value as u32 & 0x01) << 11); self.w } } #[doc = "Port A-H Pin\\[n\\] Falling Edge or Low Level Interrupt Trigger Type Enable Bit\nThe FLIEN (Px_INTEN\\[n\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function.\nWhen setting the FLIEN (Px_INTEN\\[n\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at low level.\nIf the interrupt is edge trigger(TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from high to low.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored.\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum FLIEN12_A { #[doc = "0: Px.n level low or high to low interrupt Disabled"] _0 = 0, #[doc = "1: Px.n level low or high to low interrupt Enabled"] _1 = 1, } impl From<FLIEN12_A> for bool { #[inline(always)] fn from(variant: FLIEN12_A) -> Self { variant as u8 != 0 } } #[doc = "Field `FLIEN12` reader - Port A-H Pin\\[n\\] Falling Edge or Low Level Interrupt Trigger Type Enable Bit\nThe FLIEN (Px_INTEN\\[n\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function.\nWhen setting the FLIEN (Px_INTEN\\[n\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at low level.\nIf the interrupt is edge trigger(TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from high to low.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct FLIEN12_R(crate::FieldReader<bool, FLIEN12_A>); impl FLIEN12_R { pub(crate) fn new(bits: bool) -> Self { FLIEN12_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> FLIEN12_A { match self.bits { false => FLIEN12_A::_0, true => FLIEN12_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { **self == FLIEN12_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { **self == FLIEN12_A::_1 } } impl core::ops::Deref for FLIEN12_R { type Target = crate::FieldReader<bool, FLIEN12_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `FLIEN12` writer - Port A-H Pin\\[n\\] Falling Edge or Low Level Interrupt Trigger Type Enable Bit\nThe FLIEN (Px_INTEN\\[n\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function.\nWhen setting the FLIEN (Px_INTEN\\[n\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at low level.\nIf the interrupt is edge trigger(TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from high to low.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct FLIEN12_W<'a> { w: &'a mut W, } impl<'a> FLIEN12_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: FLIEN12_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "Px.n level low or high to low interrupt Disabled"] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(FLIEN12_A::_0) } #[doc = "Px.n level low or high to low interrupt Enabled"] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(FLIEN12_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 12)) | ((value as u32 & 0x01) << 12); self.w } } #[doc = "Port A-H Pin\\[n\\] Falling Edge or Low Level Interrupt Trigger Type Enable Bit\nThe FLIEN (Px_INTEN\\[n\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function.\nWhen setting the FLIEN (Px_INTEN\\[n\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at low level.\nIf the interrupt is edge trigger(TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from high to low.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored.\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum FLIEN13_A { #[doc = "0: Px.n level low or high to low interrupt Disabled"] _0 = 0, #[doc = "1: Px.n level low or high to low interrupt Enabled"] _1 = 1, } impl From<FLIEN13_A> for bool { #[inline(always)] fn from(variant: FLIEN13_A) -> Self { variant as u8 != 0 } } #[doc = "Field `FLIEN13` reader - Port A-H Pin\\[n\\] Falling Edge or Low Level Interrupt Trigger Type Enable Bit\nThe FLIEN (Px_INTEN\\[n\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function.\nWhen setting the FLIEN (Px_INTEN\\[n\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at low level.\nIf the interrupt is edge trigger(TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from high to low.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct FLIEN13_R(crate::FieldReader<bool, FLIEN13_A>); impl FLIEN13_R { pub(crate) fn new(bits: bool) -> Self { FLIEN13_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> FLIEN13_A { match self.bits { false => FLIEN13_A::_0, true => FLIEN13_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { **self == FLIEN13_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { **self == FLIEN13_A::_1 } } impl core::ops::Deref for FLIEN13_R { type Target = crate::FieldReader<bool, FLIEN13_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `FLIEN13` writer - Port A-H Pin\\[n\\] Falling Edge or Low Level Interrupt Trigger Type Enable Bit\nThe FLIEN (Px_INTEN\\[n\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function.\nWhen setting the FLIEN (Px_INTEN\\[n\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at low level.\nIf the interrupt is edge trigger(TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from high to low.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct FLIEN13_W<'a> { w: &'a mut W, } impl<'a> FLIEN13_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: FLIEN13_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "Px.n level low or high to low interrupt Disabled"] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(FLIEN13_A::_0) } #[doc = "Px.n level low or high to low interrupt Enabled"] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(FLIEN13_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 13)) | ((value as u32 & 0x01) << 13); self.w } } #[doc = "Port A-H Pin\\[n\\] Falling Edge or Low Level Interrupt Trigger Type Enable Bit\nThe FLIEN (Px_INTEN\\[n\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function.\nWhen setting the FLIEN (Px_INTEN\\[n\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at low level.\nIf the interrupt is edge trigger(TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from high to low.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored.\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum FLIEN14_A { #[doc = "0: Px.n level low or high to low interrupt Disabled"] _0 = 0, #[doc = "1: Px.n level low or high to low interrupt Enabled"] _1 = 1, } impl From<FLIEN14_A> for bool { #[inline(always)] fn from(variant: FLIEN14_A) -> Self { variant as u8 != 0 } } #[doc = "Field `FLIEN14` reader - Port A-H Pin\\[n\\] Falling Edge or Low Level Interrupt Trigger Type Enable Bit\nThe FLIEN (Px_INTEN\\[n\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function.\nWhen setting the FLIEN (Px_INTEN\\[n\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at low level.\nIf the interrupt is edge trigger(TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from high to low.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct FLIEN14_R(crate::FieldReader<bool, FLIEN14_A>); impl FLIEN14_R { pub(crate) fn new(bits: bool) -> Self { FLIEN14_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> FLIEN14_A { match self.bits { false => FLIEN14_A::_0, true => FLIEN14_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { **self == FLIEN14_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { **self == FLIEN14_A::_1 } } impl core::ops::Deref for FLIEN14_R { type Target = crate::FieldReader<bool, FLIEN14_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `FLIEN14` writer - Port A-H Pin\\[n\\] Falling Edge or Low Level Interrupt Trigger Type Enable Bit\nThe FLIEN (Px_INTEN\\[n\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function.\nWhen setting the FLIEN (Px_INTEN\\[n\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at low level.\nIf the interrupt is edge trigger(TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from high to low.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct FLIEN14_W<'a> { w: &'a mut W, } impl<'a> FLIEN14_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: FLIEN14_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "Px.n level low or high to low interrupt Disabled"] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(FLIEN14_A::_0) } #[doc = "Px.n level low or high to low interrupt Enabled"] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(FLIEN14_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 14)) | ((value as u32 & 0x01) << 14); self.w } } #[doc = "Port A-H Pin\\[n\\] Falling Edge or Low Level Interrupt Trigger Type Enable Bit\nThe FLIEN (Px_INTEN\\[n\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function.\nWhen setting the FLIEN (Px_INTEN\\[n\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at low level.\nIf the interrupt is edge trigger(TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from high to low.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored.\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum FLIEN15_A { #[doc = "0: Px.n level low or high to low interrupt Disabled"] _0 = 0, #[doc = "1: Px.n level low or high to low interrupt Enabled"] _1 = 1, } impl From<FLIEN15_A> for bool { #[inline(always)] fn from(variant: FLIEN15_A) -> Self { variant as u8 != 0 } } #[doc = "Field `FLIEN15` reader - Port A-H Pin\\[n\\] Falling Edge or Low Level Interrupt Trigger Type Enable Bit\nThe FLIEN (Px_INTEN\\[n\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function.\nWhen setting the FLIEN (Px_INTEN\\[n\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at low level.\nIf the interrupt is edge trigger(TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from high to low.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct FLIEN15_R(crate::FieldReader<bool, FLIEN15_A>); impl FLIEN15_R { pub(crate) fn new(bits: bool) -> Self { FLIEN15_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> FLIEN15_A { match self.bits { false => FLIEN15_A::_0, true => FLIEN15_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { **self == FLIEN15_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { **self == FLIEN15_A::_1 } } impl core::ops::Deref for FLIEN15_R { type Target = crate::FieldReader<bool, FLIEN15_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `FLIEN15` writer - Port A-H Pin\\[n\\] Falling Edge or Low Level Interrupt Trigger Type Enable Bit\nThe FLIEN (Px_INTEN\\[n\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function.\nWhen setting the FLIEN (Px_INTEN\\[n\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at low level.\nIf the interrupt is edge trigger(TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from high to low.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct FLIEN15_W<'a> { w: &'a mut W, } impl<'a> FLIEN15_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: FLIEN15_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "Px.n level low or high to low interrupt Disabled"] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(FLIEN15_A::_0) } #[doc = "Px.n level low or high to low interrupt Enabled"] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(FLIEN15_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 15)) | ((value as u32 & 0x01) << 15); self.w } } #[doc = "Port A-H Pin\\[n\\] Rising Edge or High Level Interrupt Trigger Type Enable Bit\nThe RHIEN (Px_INTEN\\[n+16\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. \nWhen setting the RHIEN (Px_INTEN\\[n+16\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at high level.\nIf the interrupt is edge trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from low to high.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored.\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum RHIEN0_A { #[doc = "0: Px.n level high or low to high interrupt Disabled"] _0 = 0, #[doc = "1: Px.n level high or low to high interrupt Enabled"] _1 = 1, } impl From<RHIEN0_A> for bool { #[inline(always)] fn from(variant: RHIEN0_A) -> Self { variant as u8 != 0 } } #[doc = "Field `RHIEN0` reader - Port A-H Pin\\[n\\] Rising Edge or High Level Interrupt Trigger Type Enable Bit\nThe RHIEN (Px_INTEN\\[n+16\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. \nWhen setting the RHIEN (Px_INTEN\\[n+16\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at high level.\nIf the interrupt is edge trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from low to high.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct RHIEN0_R(crate::FieldReader<bool, RHIEN0_A>); impl RHIEN0_R { pub(crate) fn new(bits: bool) -> Self { RHIEN0_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> RHIEN0_A { match self.bits { false => RHIEN0_A::_0, true => RHIEN0_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { **self == RHIEN0_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { **self == RHIEN0_A::_1 } } impl core::ops::Deref for RHIEN0_R { type Target = crate::FieldReader<bool, RHIEN0_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `RHIEN0` writer - Port A-H Pin\\[n\\] Rising Edge or High Level Interrupt Trigger Type Enable Bit\nThe RHIEN (Px_INTEN\\[n+16\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. \nWhen setting the RHIEN (Px_INTEN\\[n+16\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at high level.\nIf the interrupt is edge trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from low to high.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct RHIEN0_W<'a> { w: &'a mut W, } impl<'a> RHIEN0_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: RHIEN0_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "Px.n level high or low to high interrupt Disabled"] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(RHIEN0_A::_0) } #[doc = "Px.n level high or low to high interrupt Enabled"] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(RHIEN0_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 16)) | ((value as u32 & 0x01) << 16); self.w } } #[doc = "Port A-H Pin\\[n\\] Rising Edge or High Level Interrupt Trigger Type Enable Bit\nThe RHIEN (Px_INTEN\\[n+16\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. \nWhen setting the RHIEN (Px_INTEN\\[n+16\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at high level.\nIf the interrupt is edge trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from low to high.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored.\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum RHIEN1_A { #[doc = "0: Px.n level high or low to high interrupt Disabled"] _0 = 0, #[doc = "1: Px.n level high or low to high interrupt Enabled"] _1 = 1, } impl From<RHIEN1_A> for bool { #[inline(always)] fn from(variant: RHIEN1_A) -> Self { variant as u8 != 0 } } #[doc = "Field `RHIEN1` reader - Port A-H Pin\\[n\\] Rising Edge or High Level Interrupt Trigger Type Enable Bit\nThe RHIEN (Px_INTEN\\[n+16\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. \nWhen setting the RHIEN (Px_INTEN\\[n+16\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at high level.\nIf the interrupt is edge trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from low to high.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct RHIEN1_R(crate::FieldReader<bool, RHIEN1_A>); impl RHIEN1_R { pub(crate) fn new(bits: bool) -> Self { RHIEN1_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> RHIEN1_A { match self.bits { false => RHIEN1_A::_0, true => RHIEN1_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { **self == RHIEN1_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { **self == RHIEN1_A::_1 } } impl core::ops::Deref for RHIEN1_R { type Target = crate::FieldReader<bool, RHIEN1_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `RHIEN1` writer - Port A-H Pin\\[n\\] Rising Edge or High Level Interrupt Trigger Type Enable Bit\nThe RHIEN (Px_INTEN\\[n+16\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. \nWhen setting the RHIEN (Px_INTEN\\[n+16\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at high level.\nIf the interrupt is edge trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from low to high.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct RHIEN1_W<'a> { w: &'a mut W, } impl<'a> RHIEN1_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: RHIEN1_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "Px.n level high or low to high interrupt Disabled"] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(RHIEN1_A::_0) } #[doc = "Px.n level high or low to high interrupt Enabled"] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(RHIEN1_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 17)) | ((value as u32 & 0x01) << 17); self.w } } #[doc = "Port A-H Pin\\[n\\] Rising Edge or High Level Interrupt Trigger Type Enable Bit\nThe RHIEN (Px_INTEN\\[n+16\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. \nWhen setting the RHIEN (Px_INTEN\\[n+16\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at high level.\nIf the interrupt is edge trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from low to high.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored.\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum RHIEN2_A { #[doc = "0: Px.n level high or low to high interrupt Disabled"] _0 = 0, #[doc = "1: Px.n level high or low to high interrupt Enabled"] _1 = 1, } impl From<RHIEN2_A> for bool { #[inline(always)] fn from(variant: RHIEN2_A) -> Self { variant as u8 != 0 } } #[doc = "Field `RHIEN2` reader - Port A-H Pin\\[n\\] Rising Edge or High Level Interrupt Trigger Type Enable Bit\nThe RHIEN (Px_INTEN\\[n+16\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. \nWhen setting the RHIEN (Px_INTEN\\[n+16\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at high level.\nIf the interrupt is edge trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from low to high.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct RHIEN2_R(crate::FieldReader<bool, RHIEN2_A>); impl RHIEN2_R { pub(crate) fn new(bits: bool) -> Self { RHIEN2_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> RHIEN2_A { match self.bits { false => RHIEN2_A::_0, true => RHIEN2_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { **self == RHIEN2_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { **self == RHIEN2_A::_1 } } impl core::ops::Deref for RHIEN2_R { type Target = crate::FieldReader<bool, RHIEN2_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `RHIEN2` writer - Port A-H Pin\\[n\\] Rising Edge or High Level Interrupt Trigger Type Enable Bit\nThe RHIEN (Px_INTEN\\[n+16\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. \nWhen setting the RHIEN (Px_INTEN\\[n+16\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at high level.\nIf the interrupt is edge trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from low to high.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct RHIEN2_W<'a> { w: &'a mut W, } impl<'a> RHIEN2_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: RHIEN2_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "Px.n level high or low to high interrupt Disabled"] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(RHIEN2_A::_0) } #[doc = "Px.n level high or low to high interrupt Enabled"] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(RHIEN2_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 18)) | ((value as u32 & 0x01) << 18); self.w } } #[doc = "Port A-H Pin\\[n\\] Rising Edge or High Level Interrupt Trigger Type Enable Bit\nThe RHIEN (Px_INTEN\\[n+16\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. \nWhen setting the RHIEN (Px_INTEN\\[n+16\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at high level.\nIf the interrupt is edge trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from low to high.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored.\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum RHIEN3_A { #[doc = "0: Px.n level high or low to high interrupt Disabled"] _0 = 0, #[doc = "1: Px.n level high or low to high interrupt Enabled"] _1 = 1, } impl From<RHIEN3_A> for bool { #[inline(always)] fn from(variant: RHIEN3_A) -> Self { variant as u8 != 0 } } #[doc = "Field `RHIEN3` reader - Port A-H Pin\\[n\\] Rising Edge or High Level Interrupt Trigger Type Enable Bit\nThe RHIEN (Px_INTEN\\[n+16\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. \nWhen setting the RHIEN (Px_INTEN\\[n+16\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at high level.\nIf the interrupt is edge trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from low to high.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct RHIEN3_R(crate::FieldReader<bool, RHIEN3_A>); impl RHIEN3_R { pub(crate) fn new(bits: bool) -> Self { RHIEN3_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> RHIEN3_A { match self.bits { false => RHIEN3_A::_0, true => RHIEN3_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { **self == RHIEN3_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { **self == RHIEN3_A::_1 } } impl core::ops::Deref for RHIEN3_R { type Target = crate::FieldReader<bool, RHIEN3_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `RHIEN3` writer - Port A-H Pin\\[n\\] Rising Edge or High Level Interrupt Trigger Type Enable Bit\nThe RHIEN (Px_INTEN\\[n+16\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. \nWhen setting the RHIEN (Px_INTEN\\[n+16\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at high level.\nIf the interrupt is edge trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from low to high.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct RHIEN3_W<'a> { w: &'a mut W, } impl<'a> RHIEN3_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: RHIEN3_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "Px.n level high or low to high interrupt Disabled"] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(RHIEN3_A::_0) } #[doc = "Px.n level high or low to high interrupt Enabled"] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(RHIEN3_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 19)) | ((value as u32 & 0x01) << 19); self.w } } #[doc = "Port A-H Pin\\[n\\] Rising Edge or High Level Interrupt Trigger Type Enable Bit\nThe RHIEN (Px_INTEN\\[n+16\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. \nWhen setting the RHIEN (Px_INTEN\\[n+16\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at high level.\nIf the interrupt is edge trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from low to high.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored.\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum RHIEN4_A { #[doc = "0: Px.n level high or low to high interrupt Disabled"] _0 = 0, #[doc = "1: Px.n level high or low to high interrupt Enabled"] _1 = 1, } impl From<RHIEN4_A> for bool { #[inline(always)] fn from(variant: RHIEN4_A) -> Self { variant as u8 != 0 } } #[doc = "Field `RHIEN4` reader - Port A-H Pin\\[n\\] Rising Edge or High Level Interrupt Trigger Type Enable Bit\nThe RHIEN (Px_INTEN\\[n+16\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. \nWhen setting the RHIEN (Px_INTEN\\[n+16\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at high level.\nIf the interrupt is edge trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from low to high.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct RHIEN4_R(crate::FieldReader<bool, RHIEN4_A>); impl RHIEN4_R { pub(crate) fn new(bits: bool) -> Self { RHIEN4_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> RHIEN4_A { match self.bits { false => RHIEN4_A::_0, true => RHIEN4_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { **self == RHIEN4_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { **self == RHIEN4_A::_1 } } impl core::ops::Deref for RHIEN4_R { type Target = crate::FieldReader<bool, RHIEN4_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `RHIEN4` writer - Port A-H Pin\\[n\\] Rising Edge or High Level Interrupt Trigger Type Enable Bit\nThe RHIEN (Px_INTEN\\[n+16\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. \nWhen setting the RHIEN (Px_INTEN\\[n+16\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at high level.\nIf the interrupt is edge trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from low to high.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct RHIEN4_W<'a> { w: &'a mut W, } impl<'a> RHIEN4_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: RHIEN4_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "Px.n level high or low to high interrupt Disabled"] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(RHIEN4_A::_0) } #[doc = "Px.n level high or low to high interrupt Enabled"] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(RHIEN4_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 20)) | ((value as u32 & 0x01) << 20); self.w } } #[doc = "Port A-H Pin\\[n\\] Rising Edge or High Level Interrupt Trigger Type Enable Bit\nThe RHIEN (Px_INTEN\\[n+16\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. \nWhen setting the RHIEN (Px_INTEN\\[n+16\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at high level.\nIf the interrupt is edge trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from low to high.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored.\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum RHIEN5_A { #[doc = "0: Px.n level high or low to high interrupt Disabled"] _0 = 0, #[doc = "1: Px.n level high or low to high interrupt Enabled"] _1 = 1, } impl From<RHIEN5_A> for bool { #[inline(always)] fn from(variant: RHIEN5_A) -> Self { variant as u8 != 0 } } #[doc = "Field `RHIEN5` reader - Port A-H Pin\\[n\\] Rising Edge or High Level Interrupt Trigger Type Enable Bit\nThe RHIEN (Px_INTEN\\[n+16\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. \nWhen setting the RHIEN (Px_INTEN\\[n+16\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at high level.\nIf the interrupt is edge trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from low to high.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct RHIEN5_R(crate::FieldReader<bool, RHIEN5_A>); impl RHIEN5_R { pub(crate) fn new(bits: bool) -> Self { RHIEN5_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> RHIEN5_A { match self.bits { false => RHIEN5_A::_0, true => RHIEN5_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { **self == RHIEN5_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { **self == RHIEN5_A::_1 } } impl core::ops::Deref for RHIEN5_R { type Target = crate::FieldReader<bool, RHIEN5_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `RHIEN5` writer - Port A-H Pin\\[n\\] Rising Edge or High Level Interrupt Trigger Type Enable Bit\nThe RHIEN (Px_INTEN\\[n+16\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. \nWhen setting the RHIEN (Px_INTEN\\[n+16\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at high level.\nIf the interrupt is edge trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from low to high.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct RHIEN5_W<'a> { w: &'a mut W, } impl<'a> RHIEN5_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: RHIEN5_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "Px.n level high or low to high interrupt Disabled"] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(RHIEN5_A::_0) } #[doc = "Px.n level high or low to high interrupt Enabled"] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(RHIEN5_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 21)) | ((value as u32 & 0x01) << 21); self.w } } #[doc = "Port A-H Pin\\[n\\] Rising Edge or High Level Interrupt Trigger Type Enable Bit\nThe RHIEN (Px_INTEN\\[n+16\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. \nWhen setting the RHIEN (Px_INTEN\\[n+16\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at high level.\nIf the interrupt is edge trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from low to high.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored.\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum RHIEN6_A { #[doc = "0: Px.n level high or low to high interrupt Disabled"] _0 = 0, #[doc = "1: Px.n level high or low to high interrupt Enabled"] _1 = 1, } impl From<RHIEN6_A> for bool { #[inline(always)] fn from(variant: RHIEN6_A) -> Self { variant as u8 != 0 } } #[doc = "Field `RHIEN6` reader - Port A-H Pin\\[n\\] Rising Edge or High Level Interrupt Trigger Type Enable Bit\nThe RHIEN (Px_INTEN\\[n+16\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. \nWhen setting the RHIEN (Px_INTEN\\[n+16\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at high level.\nIf the interrupt is edge trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from low to high.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct RHIEN6_R(crate::FieldReader<bool, RHIEN6_A>); impl RHIEN6_R { pub(crate) fn new(bits: bool) -> Self { RHIEN6_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> RHIEN6_A { match self.bits { false => RHIEN6_A::_0, true => RHIEN6_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { **self == RHIEN6_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { **self == RHIEN6_A::_1 } } impl core::ops::Deref for RHIEN6_R { type Target = crate::FieldReader<bool, RHIEN6_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `RHIEN6` writer - Port A-H Pin\\[n\\] Rising Edge or High Level Interrupt Trigger Type Enable Bit\nThe RHIEN (Px_INTEN\\[n+16\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. \nWhen setting the RHIEN (Px_INTEN\\[n+16\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at high level.\nIf the interrupt is edge trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from low to high.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct RHIEN6_W<'a> { w: &'a mut W, } impl<'a> RHIEN6_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: RHIEN6_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "Px.n level high or low to high interrupt Disabled"] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(RHIEN6_A::_0) } #[doc = "Px.n level high or low to high interrupt Enabled"] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(RHIEN6_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 22)) | ((value as u32 & 0x01) << 22); self.w } } #[doc = "Port A-H Pin\\[n\\] Rising Edge or High Level Interrupt Trigger Type Enable Bit\nThe RHIEN (Px_INTEN\\[n+16\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. \nWhen setting the RHIEN (Px_INTEN\\[n+16\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at high level.\nIf the interrupt is edge trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from low to high.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored.\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum RHIEN7_A { #[doc = "0: Px.n level high or low to high interrupt Disabled"] _0 = 0, #[doc = "1: Px.n level high or low to high interrupt Enabled"] _1 = 1, } impl From<RHIEN7_A> for bool { #[inline(always)] fn from(variant: RHIEN7_A) -> Self { variant as u8 != 0 } } #[doc = "Field `RHIEN7` reader - Port A-H Pin\\[n\\] Rising Edge or High Level Interrupt Trigger Type Enable Bit\nThe RHIEN (Px_INTEN\\[n+16\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. \nWhen setting the RHIEN (Px_INTEN\\[n+16\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at high level.\nIf the interrupt is edge trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from low to high.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct RHIEN7_R(crate::FieldReader<bool, RHIEN7_A>); impl RHIEN7_R { pub(crate) fn new(bits: bool) -> Self { RHIEN7_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> RHIEN7_A { match self.bits { false => RHIEN7_A::_0, true => RHIEN7_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { **self == RHIEN7_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { **self == RHIEN7_A::_1 } } impl core::ops::Deref for RHIEN7_R { type Target = crate::FieldReader<bool, RHIEN7_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `RHIEN7` writer - Port A-H Pin\\[n\\] Rising Edge or High Level Interrupt Trigger Type Enable Bit\nThe RHIEN (Px_INTEN\\[n+16\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. \nWhen setting the RHIEN (Px_INTEN\\[n+16\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at high level.\nIf the interrupt is edge trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from low to high.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct RHIEN7_W<'a> { w: &'a mut W, } impl<'a> RHIEN7_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: RHIEN7_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "Px.n level high or low to high interrupt Disabled"] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(RHIEN7_A::_0) } #[doc = "Px.n level high or low to high interrupt Enabled"] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(RHIEN7_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 23)) | ((value as u32 & 0x01) << 23); self.w } } #[doc = "Port A-H Pin\\[n\\] Rising Edge or High Level Interrupt Trigger Type Enable Bit\nThe RHIEN (Px_INTEN\\[n+16\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. \nWhen setting the RHIEN (Px_INTEN\\[n+16\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at high level.\nIf the interrupt is edge trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from low to high.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored.\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum RHIEN8_A { #[doc = "0: Px.n level high or low to high interrupt Disabled"] _0 = 0, #[doc = "1: Px.n level high or low to high interrupt Enabled"] _1 = 1, } impl From<RHIEN8_A> for bool { #[inline(always)] fn from(variant: RHIEN8_A) -> Self { variant as u8 != 0 } } #[doc = "Field `RHIEN8` reader - Port A-H Pin\\[n\\] Rising Edge or High Level Interrupt Trigger Type Enable Bit\nThe RHIEN (Px_INTEN\\[n+16\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. \nWhen setting the RHIEN (Px_INTEN\\[n+16\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at high level.\nIf the interrupt is edge trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from low to high.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct RHIEN8_R(crate::FieldReader<bool, RHIEN8_A>); impl RHIEN8_R { pub(crate) fn new(bits: bool) -> Self { RHIEN8_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> RHIEN8_A { match self.bits { false => RHIEN8_A::_0, true => RHIEN8_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { **self == RHIEN8_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { **self == RHIEN8_A::_1 } } impl core::ops::Deref for RHIEN8_R { type Target = crate::FieldReader<bool, RHIEN8_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `RHIEN8` writer - Port A-H Pin\\[n\\] Rising Edge or High Level Interrupt Trigger Type Enable Bit\nThe RHIEN (Px_INTEN\\[n+16\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. \nWhen setting the RHIEN (Px_INTEN\\[n+16\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at high level.\nIf the interrupt is edge trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from low to high.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct RHIEN8_W<'a> { w: &'a mut W, } impl<'a> RHIEN8_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: RHIEN8_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "Px.n level high or low to high interrupt Disabled"] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(RHIEN8_A::_0) } #[doc = "Px.n level high or low to high interrupt Enabled"] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(RHIEN8_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 24)) | ((value as u32 & 0x01) << 24); self.w } } #[doc = "Port A-H Pin\\[n\\] Rising Edge or High Level Interrupt Trigger Type Enable Bit\nThe RHIEN (Px_INTEN\\[n+16\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. \nWhen setting the RHIEN (Px_INTEN\\[n+16\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at high level.\nIf the interrupt is edge trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from low to high.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored.\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum RHIEN9_A { #[doc = "0: Px.n level high or low to high interrupt Disabled"] _0 = 0, #[doc = "1: Px.n level high or low to high interrupt Enabled"] _1 = 1, } impl From<RHIEN9_A> for bool { #[inline(always)] fn from(variant: RHIEN9_A) -> Self { variant as u8 != 0 } } #[doc = "Field `RHIEN9` reader - Port A-H Pin\\[n\\] Rising Edge or High Level Interrupt Trigger Type Enable Bit\nThe RHIEN (Px_INTEN\\[n+16\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. \nWhen setting the RHIEN (Px_INTEN\\[n+16\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at high level.\nIf the interrupt is edge trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from low to high.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct RHIEN9_R(crate::FieldReader<bool, RHIEN9_A>); impl RHIEN9_R { pub(crate) fn new(bits: bool) -> Self { RHIEN9_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> RHIEN9_A { match self.bits { false => RHIEN9_A::_0, true => RHIEN9_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { **self == RHIEN9_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { **self == RHIEN9_A::_1 } } impl core::ops::Deref for RHIEN9_R { type Target = crate::FieldReader<bool, RHIEN9_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `RHIEN9` writer - Port A-H Pin\\[n\\] Rising Edge or High Level Interrupt Trigger Type Enable Bit\nThe RHIEN (Px_INTEN\\[n+16\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. \nWhen setting the RHIEN (Px_INTEN\\[n+16\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at high level.\nIf the interrupt is edge trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from low to high.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct RHIEN9_W<'a> { w: &'a mut W, } impl<'a> RHIEN9_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: RHIEN9_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "Px.n level high or low to high interrupt Disabled"] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(RHIEN9_A::_0) } #[doc = "Px.n level high or low to high interrupt Enabled"] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(RHIEN9_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 25)) | ((value as u32 & 0x01) << 25); self.w } } #[doc = "Port A-H Pin\\[n\\] Rising Edge or High Level Interrupt Trigger Type Enable Bit\nThe RHIEN (Px_INTEN\\[n+16\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. \nWhen setting the RHIEN (Px_INTEN\\[n+16\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at high level.\nIf the interrupt is edge trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from low to high.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored.\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum RHIEN10_A { #[doc = "0: Px.n level high or low to high interrupt Disabled"] _0 = 0, #[doc = "1: Px.n level high or low to high interrupt Enabled"] _1 = 1, } impl From<RHIEN10_A> for bool { #[inline(always)] fn from(variant: RHIEN10_A) -> Self { variant as u8 != 0 } } #[doc = "Field `RHIEN10` reader - Port A-H Pin\\[n\\] Rising Edge or High Level Interrupt Trigger Type Enable Bit\nThe RHIEN (Px_INTEN\\[n+16\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. \nWhen setting the RHIEN (Px_INTEN\\[n+16\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at high level.\nIf the interrupt is edge trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from low to high.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct RHIEN10_R(crate::FieldReader<bool, RHIEN10_A>); impl RHIEN10_R { pub(crate) fn new(bits: bool) -> Self { RHIEN10_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> RHIEN10_A { match self.bits { false => RHIEN10_A::_0, true => RHIEN10_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { **self == RHIEN10_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { **self == RHIEN10_A::_1 } } impl core::ops::Deref for RHIEN10_R { type Target = crate::FieldReader<bool, RHIEN10_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `RHIEN10` writer - Port A-H Pin\\[n\\] Rising Edge or High Level Interrupt Trigger Type Enable Bit\nThe RHIEN (Px_INTEN\\[n+16\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. \nWhen setting the RHIEN (Px_INTEN\\[n+16\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at high level.\nIf the interrupt is edge trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from low to high.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct RHIEN10_W<'a> { w: &'a mut W, } impl<'a> RHIEN10_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: RHIEN10_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "Px.n level high or low to high interrupt Disabled"] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(RHIEN10_A::_0) } #[doc = "Px.n level high or low to high interrupt Enabled"] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(RHIEN10_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 26)) | ((value as u32 & 0x01) << 26); self.w } } #[doc = "Port A-H Pin\\[n\\] Rising Edge or High Level Interrupt Trigger Type Enable Bit\nThe RHIEN (Px_INTEN\\[n+16\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. \nWhen setting the RHIEN (Px_INTEN\\[n+16\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at high level.\nIf the interrupt is edge trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from low to high.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored.\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum RHIEN11_A { #[doc = "0: Px.n level high or low to high interrupt Disabled"] _0 = 0, #[doc = "1: Px.n level high or low to high interrupt Enabled"] _1 = 1, } impl From<RHIEN11_A> for bool { #[inline(always)] fn from(variant: RHIEN11_A) -> Self { variant as u8 != 0 } } #[doc = "Field `RHIEN11` reader - Port A-H Pin\\[n\\] Rising Edge or High Level Interrupt Trigger Type Enable Bit\nThe RHIEN (Px_INTEN\\[n+16\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. \nWhen setting the RHIEN (Px_INTEN\\[n+16\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at high level.\nIf the interrupt is edge trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from low to high.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct RHIEN11_R(crate::FieldReader<bool, RHIEN11_A>); impl RHIEN11_R { pub(crate) fn new(bits: bool) -> Self { RHIEN11_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> RHIEN11_A { match self.bits { false => RHIEN11_A::_0, true => RHIEN11_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { **self == RHIEN11_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { **self == RHIEN11_A::_1 } } impl core::ops::Deref for RHIEN11_R { type Target = crate::FieldReader<bool, RHIEN11_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `RHIEN11` writer - Port A-H Pin\\[n\\] Rising Edge or High Level Interrupt Trigger Type Enable Bit\nThe RHIEN (Px_INTEN\\[n+16\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. \nWhen setting the RHIEN (Px_INTEN\\[n+16\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at high level.\nIf the interrupt is edge trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from low to high.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct RHIEN11_W<'a> { w: &'a mut W, } impl<'a> RHIEN11_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: RHIEN11_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "Px.n level high or low to high interrupt Disabled"] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(RHIEN11_A::_0) } #[doc = "Px.n level high or low to high interrupt Enabled"] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(RHIEN11_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 27)) | ((value as u32 & 0x01) << 27); self.w } } #[doc = "Port A-H Pin\\[n\\] Rising Edge or High Level Interrupt Trigger Type Enable Bit\nThe RHIEN (Px_INTEN\\[n+16\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. \nWhen setting the RHIEN (Px_INTEN\\[n+16\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at high level.\nIf the interrupt is edge trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from low to high.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored.\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum RHIEN12_A { #[doc = "0: Px.n level high or low to high interrupt Disabled"] _0 = 0, #[doc = "1: Px.n level high or low to high interrupt Enabled"] _1 = 1, } impl From<RHIEN12_A> for bool { #[inline(always)] fn from(variant: RHIEN12_A) -> Self { variant as u8 != 0 } } #[doc = "Field `RHIEN12` reader - Port A-H Pin\\[n\\] Rising Edge or High Level Interrupt Trigger Type Enable Bit\nThe RHIEN (Px_INTEN\\[n+16\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. \nWhen setting the RHIEN (Px_INTEN\\[n+16\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at high level.\nIf the interrupt is edge trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from low to high.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct RHIEN12_R(crate::FieldReader<bool, RHIEN12_A>); impl RHIEN12_R { pub(crate) fn new(bits: bool) -> Self { RHIEN12_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> RHIEN12_A { match self.bits { false => RHIEN12_A::_0, true => RHIEN12_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { **self == RHIEN12_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { **self == RHIEN12_A::_1 } } impl core::ops::Deref for RHIEN12_R { type Target = crate::FieldReader<bool, RHIEN12_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `RHIEN12` writer - Port A-H Pin\\[n\\] Rising Edge or High Level Interrupt Trigger Type Enable Bit\nThe RHIEN (Px_INTEN\\[n+16\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. \nWhen setting the RHIEN (Px_INTEN\\[n+16\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at high level.\nIf the interrupt is edge trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from low to high.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct RHIEN12_W<'a> { w: &'a mut W, } impl<'a> RHIEN12_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: RHIEN12_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "Px.n level high or low to high interrupt Disabled"] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(RHIEN12_A::_0) } #[doc = "Px.n level high or low to high interrupt Enabled"] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(RHIEN12_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 28)) | ((value as u32 & 0x01) << 28); self.w } } #[doc = "Port A-H Pin\\[n\\] Rising Edge or High Level Interrupt Trigger Type Enable Bit\nThe RHIEN (Px_INTEN\\[n+16\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. \nWhen setting the RHIEN (Px_INTEN\\[n+16\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at high level.\nIf the interrupt is edge trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from low to high.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored.\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum RHIEN13_A { #[doc = "0: Px.n level high or low to high interrupt Disabled"] _0 = 0, #[doc = "1: Px.n level high or low to high interrupt Enabled"] _1 = 1, } impl From<RHIEN13_A> for bool { #[inline(always)] fn from(variant: RHIEN13_A) -> Self { variant as u8 != 0 } } #[doc = "Field `RHIEN13` reader - Port A-H Pin\\[n\\] Rising Edge or High Level Interrupt Trigger Type Enable Bit\nThe RHIEN (Px_INTEN\\[n+16\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. \nWhen setting the RHIEN (Px_INTEN\\[n+16\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at high level.\nIf the interrupt is edge trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from low to high.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct RHIEN13_R(crate::FieldReader<bool, RHIEN13_A>); impl RHIEN13_R { pub(crate) fn new(bits: bool) -> Self { RHIEN13_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> RHIEN13_A { match self.bits { false => RHIEN13_A::_0, true => RHIEN13_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { **self == RHIEN13_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { **self == RHIEN13_A::_1 } } impl core::ops::Deref for RHIEN13_R { type Target = crate::FieldReader<bool, RHIEN13_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `RHIEN13` writer - Port A-H Pin\\[n\\] Rising Edge or High Level Interrupt Trigger Type Enable Bit\nThe RHIEN (Px_INTEN\\[n+16\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. \nWhen setting the RHIEN (Px_INTEN\\[n+16\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at high level.\nIf the interrupt is edge trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from low to high.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct RHIEN13_W<'a> { w: &'a mut W, } impl<'a> RHIEN13_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: RHIEN13_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "Px.n level high or low to high interrupt Disabled"] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(RHIEN13_A::_0) } #[doc = "Px.n level high or low to high interrupt Enabled"] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(RHIEN13_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 29)) | ((value as u32 & 0x01) << 29); self.w } } #[doc = "Port A-H Pin\\[n\\] Rising Edge or High Level Interrupt Trigger Type Enable Bit\nThe RHIEN (Px_INTEN\\[n+16\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. \nWhen setting the RHIEN (Px_INTEN\\[n+16\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at high level.\nIf the interrupt is edge trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from low to high.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored.\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum RHIEN14_A { #[doc = "0: Px.n level high or low to high interrupt Disabled"] _0 = 0, #[doc = "1: Px.n level high or low to high interrupt Enabled"] _1 = 1, } impl From<RHIEN14_A> for bool { #[inline(always)] fn from(variant: RHIEN14_A) -> Self { variant as u8 != 0 } } #[doc = "Field `RHIEN14` reader - Port A-H Pin\\[n\\] Rising Edge or High Level Interrupt Trigger Type Enable Bit\nThe RHIEN (Px_INTEN\\[n+16\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. \nWhen setting the RHIEN (Px_INTEN\\[n+16\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at high level.\nIf the interrupt is edge trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from low to high.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct RHIEN14_R(crate::FieldReader<bool, RHIEN14_A>); impl RHIEN14_R { pub(crate) fn new(bits: bool) -> Self { RHIEN14_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> RHIEN14_A { match self.bits { false => RHIEN14_A::_0, true => RHIEN14_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { **self == RHIEN14_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { **self == RHIEN14_A::_1 } } impl core::ops::Deref for RHIEN14_R { type Target = crate::FieldReader<bool, RHIEN14_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `RHIEN14` writer - Port A-H Pin\\[n\\] Rising Edge or High Level Interrupt Trigger Type Enable Bit\nThe RHIEN (Px_INTEN\\[n+16\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. \nWhen setting the RHIEN (Px_INTEN\\[n+16\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at high level.\nIf the interrupt is edge trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from low to high.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct RHIEN14_W<'a> { w: &'a mut W, } impl<'a> RHIEN14_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: RHIEN14_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "Px.n level high or low to high interrupt Disabled"] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(RHIEN14_A::_0) } #[doc = "Px.n level high or low to high interrupt Enabled"] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(RHIEN14_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 30)) | ((value as u32 & 0x01) << 30); self.w } } #[doc = "Port A-H Pin\\[n\\] Rising Edge or High Level Interrupt Trigger Type Enable Bit\nThe RHIEN (Px_INTEN\\[n+16\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. \nWhen setting the RHIEN (Px_INTEN\\[n+16\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at high level.\nIf the interrupt is edge trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from low to high.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored.\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum RHIEN15_A { #[doc = "0: Px.n level high or low to high interrupt Disabled"] _0 = 0, #[doc = "1: Px.n level high or low to high interrupt Enabled"] _1 = 1, } impl From<RHIEN15_A> for bool { #[inline(always)] fn from(variant: RHIEN15_A) -> Self { variant as u8 != 0 } } #[doc = "Field `RHIEN15` reader - Port A-H Pin\\[n\\] Rising Edge or High Level Interrupt Trigger Type Enable Bit\nThe RHIEN (Px_INTEN\\[n+16\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. \nWhen setting the RHIEN (Px_INTEN\\[n+16\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at high level.\nIf the interrupt is edge trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from low to high.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct RHIEN15_R(crate::FieldReader<bool, RHIEN15_A>); impl RHIEN15_R { pub(crate) fn new(bits: bool) -> Self { RHIEN15_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> RHIEN15_A { match self.bits { false => RHIEN15_A::_0, true => RHIEN15_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { **self == RHIEN15_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { **self == RHIEN15_A::_1 } } impl core::ops::Deref for RHIEN15_R { type Target = crate::FieldReader<bool, RHIEN15_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `RHIEN15` writer - Port A-H Pin\\[n\\] Rising Edge or High Level Interrupt Trigger Type Enable Bit\nThe RHIEN (Px_INTEN\\[n+16\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. \nWhen setting the RHIEN (Px_INTEN\\[n+16\\]) bit to 1 :\nIf the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at high level.\nIf the interrupt is edge trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from low to high.\nNote: \nThe PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] pub struct RHIEN15_W<'a> { w: &'a mut W, } impl<'a> RHIEN15_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: RHIEN15_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "Px.n level high or low to high interrupt Disabled"] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(RHIEN15_A::_0) } #[doc = "Px.n level high or low to high interrupt Enabled"] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(RHIEN15_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 31)) | ((value as u32 & 0x01) << 31); self.w } } impl R { #[doc = "Bit 0 - Port A-H Pin\\[n\\] Falling Edge or Low Level Interrupt Trigger Type Enable Bit The FLIEN (Px_INTEN\\[n\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. When setting the FLIEN (Px_INTEN\\[n\\]) bit to 1 : If the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at low level. If the interrupt is edge trigger(TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from high to low. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn flien0(&self) -> FLIEN0_R { FLIEN0_R::new((self.bits & 0x01) != 0) } #[doc = "Bit 1 - Port A-H Pin\\[n\\] Falling Edge or Low Level Interrupt Trigger Type Enable Bit The FLIEN (Px_INTEN\\[n\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. When setting the FLIEN (Px_INTEN\\[n\\]) bit to 1 : If the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at low level. If the interrupt is edge trigger(TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from high to low. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn flien1(&self) -> FLIEN1_R { FLIEN1_R::new(((self.bits >> 1) & 0x01) != 0) } #[doc = "Bit 2 - Port A-H Pin\\[n\\] Falling Edge or Low Level Interrupt Trigger Type Enable Bit The FLIEN (Px_INTEN\\[n\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. When setting the FLIEN (Px_INTEN\\[n\\]) bit to 1 : If the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at low level. If the interrupt is edge trigger(TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from high to low. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn flien2(&self) -> FLIEN2_R { FLIEN2_R::new(((self.bits >> 2) & 0x01) != 0) } #[doc = "Bit 3 - Port A-H Pin\\[n\\] Falling Edge or Low Level Interrupt Trigger Type Enable Bit The FLIEN (Px_INTEN\\[n\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. When setting the FLIEN (Px_INTEN\\[n\\]) bit to 1 : If the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at low level. If the interrupt is edge trigger(TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from high to low. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn flien3(&self) -> FLIEN3_R { FLIEN3_R::new(((self.bits >> 3) & 0x01) != 0) } #[doc = "Bit 4 - Port A-H Pin\\[n\\] Falling Edge or Low Level Interrupt Trigger Type Enable Bit The FLIEN (Px_INTEN\\[n\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. When setting the FLIEN (Px_INTEN\\[n\\]) bit to 1 : If the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at low level. If the interrupt is edge trigger(TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from high to low. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn flien4(&self) -> FLIEN4_R { FLIEN4_R::new(((self.bits >> 4) & 0x01) != 0) } #[doc = "Bit 5 - Port A-H Pin\\[n\\] Falling Edge or Low Level Interrupt Trigger Type Enable Bit The FLIEN (Px_INTEN\\[n\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. When setting the FLIEN (Px_INTEN\\[n\\]) bit to 1 : If the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at low level. If the interrupt is edge trigger(TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from high to low. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn flien5(&self) -> FLIEN5_R { FLIEN5_R::new(((self.bits >> 5) & 0x01) != 0) } #[doc = "Bit 6 - Port A-H Pin\\[n\\] Falling Edge or Low Level Interrupt Trigger Type Enable Bit The FLIEN (Px_INTEN\\[n\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. When setting the FLIEN (Px_INTEN\\[n\\]) bit to 1 : If the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at low level. If the interrupt is edge trigger(TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from high to low. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn flien6(&self) -> FLIEN6_R { FLIEN6_R::new(((self.bits >> 6) & 0x01) != 0) } #[doc = "Bit 7 - Port A-H Pin\\[n\\] Falling Edge or Low Level Interrupt Trigger Type Enable Bit The FLIEN (Px_INTEN\\[n\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. When setting the FLIEN (Px_INTEN\\[n\\]) bit to 1 : If the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at low level. If the interrupt is edge trigger(TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from high to low. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn flien7(&self) -> FLIEN7_R { FLIEN7_R::new(((self.bits >> 7) & 0x01) != 0) } #[doc = "Bit 8 - Port A-H Pin\\[n\\] Falling Edge or Low Level Interrupt Trigger Type Enable Bit The FLIEN (Px_INTEN\\[n\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. When setting the FLIEN (Px_INTEN\\[n\\]) bit to 1 : If the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at low level. If the interrupt is edge trigger(TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from high to low. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn flien8(&self) -> FLIEN8_R { FLIEN8_R::new(((self.bits >> 8) & 0x01) != 0) } #[doc = "Bit 9 - Port A-H Pin\\[n\\] Falling Edge or Low Level Interrupt Trigger Type Enable Bit The FLIEN (Px_INTEN\\[n\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. When setting the FLIEN (Px_INTEN\\[n\\]) bit to 1 : If the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at low level. If the interrupt is edge trigger(TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from high to low. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn flien9(&self) -> FLIEN9_R { FLIEN9_R::new(((self.bits >> 9) & 0x01) != 0) } #[doc = "Bit 10 - Port A-H Pin\\[n\\] Falling Edge or Low Level Interrupt Trigger Type Enable Bit The FLIEN (Px_INTEN\\[n\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. When setting the FLIEN (Px_INTEN\\[n\\]) bit to 1 : If the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at low level. If the interrupt is edge trigger(TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from high to low. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn flien10(&self) -> FLIEN10_R { FLIEN10_R::new(((self.bits >> 10) & 0x01) != 0) } #[doc = "Bit 11 - Port A-H Pin\\[n\\] Falling Edge or Low Level Interrupt Trigger Type Enable Bit The FLIEN (Px_INTEN\\[n\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. When setting the FLIEN (Px_INTEN\\[n\\]) bit to 1 : If the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at low level. If the interrupt is edge trigger(TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from high to low. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn flien11(&self) -> FLIEN11_R { FLIEN11_R::new(((self.bits >> 11) & 0x01) != 0) } #[doc = "Bit 12 - Port A-H Pin\\[n\\] Falling Edge or Low Level Interrupt Trigger Type Enable Bit The FLIEN (Px_INTEN\\[n\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. When setting the FLIEN (Px_INTEN\\[n\\]) bit to 1 : If the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at low level. If the interrupt is edge trigger(TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from high to low. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn flien12(&self) -> FLIEN12_R { FLIEN12_R::new(((self.bits >> 12) & 0x01) != 0) } #[doc = "Bit 13 - Port A-H Pin\\[n\\] Falling Edge or Low Level Interrupt Trigger Type Enable Bit The FLIEN (Px_INTEN\\[n\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. When setting the FLIEN (Px_INTEN\\[n\\]) bit to 1 : If the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at low level. If the interrupt is edge trigger(TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from high to low. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn flien13(&self) -> FLIEN13_R { FLIEN13_R::new(((self.bits >> 13) & 0x01) != 0) } #[doc = "Bit 14 - Port A-H Pin\\[n\\] Falling Edge or Low Level Interrupt Trigger Type Enable Bit The FLIEN (Px_INTEN\\[n\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. When setting the FLIEN (Px_INTEN\\[n\\]) bit to 1 : If the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at low level. If the interrupt is edge trigger(TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from high to low. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn flien14(&self) -> FLIEN14_R { FLIEN14_R::new(((self.bits >> 14) & 0x01) != 0) } #[doc = "Bit 15 - Port A-H Pin\\[n\\] Falling Edge or Low Level Interrupt Trigger Type Enable Bit The FLIEN (Px_INTEN\\[n\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. When setting the FLIEN (Px_INTEN\\[n\\]) bit to 1 : If the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at low level. If the interrupt is edge trigger(TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from high to low. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn flien15(&self) -> FLIEN15_R { FLIEN15_R::new(((self.bits >> 15) & 0x01) != 0) } #[doc = "Bit 16 - Port A-H Pin\\[n\\] Rising Edge or High Level Interrupt Trigger Type Enable Bit The RHIEN (Px_INTEN\\[n+16\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. When setting the RHIEN (Px_INTEN\\[n+16\\]) bit to 1 : If the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at high level. If the interrupt is edge trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from low to high. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn rhien0(&self) -> RHIEN0_R { RHIEN0_R::new(((self.bits >> 16) & 0x01) != 0) } #[doc = "Bit 17 - Port A-H Pin\\[n\\] Rising Edge or High Level Interrupt Trigger Type Enable Bit The RHIEN (Px_INTEN\\[n+16\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. When setting the RHIEN (Px_INTEN\\[n+16\\]) bit to 1 : If the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at high level. If the interrupt is edge trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from low to high. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn rhien1(&self) -> RHIEN1_R { RHIEN1_R::new(((self.bits >> 17) & 0x01) != 0) } #[doc = "Bit 18 - Port A-H Pin\\[n\\] Rising Edge or High Level Interrupt Trigger Type Enable Bit The RHIEN (Px_INTEN\\[n+16\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. When setting the RHIEN (Px_INTEN\\[n+16\\]) bit to 1 : If the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at high level. If the interrupt is edge trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from low to high. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn rhien2(&self) -> RHIEN2_R { RHIEN2_R::new(((self.bits >> 18) & 0x01) != 0) } #[doc = "Bit 19 - Port A-H Pin\\[n\\] Rising Edge or High Level Interrupt Trigger Type Enable Bit The RHIEN (Px_INTEN\\[n+16\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. When setting the RHIEN (Px_INTEN\\[n+16\\]) bit to 1 : If the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at high level. If the interrupt is edge trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from low to high. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn rhien3(&self) -> RHIEN3_R { RHIEN3_R::new(((self.bits >> 19) & 0x01) != 0) } #[doc = "Bit 20 - Port A-H Pin\\[n\\] Rising Edge or High Level Interrupt Trigger Type Enable Bit The RHIEN (Px_INTEN\\[n+16\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. When setting the RHIEN (Px_INTEN\\[n+16\\]) bit to 1 : If the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at high level. If the interrupt is edge trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from low to high. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn rhien4(&self) -> RHIEN4_R { RHIEN4_R::new(((self.bits >> 20) & 0x01) != 0) } #[doc = "Bit 21 - Port A-H Pin\\[n\\] Rising Edge or High Level Interrupt Trigger Type Enable Bit The RHIEN (Px_INTEN\\[n+16\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. When setting the RHIEN (Px_INTEN\\[n+16\\]) bit to 1 : If the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at high level. If the interrupt is edge trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from low to high. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn rhien5(&self) -> RHIEN5_R { RHIEN5_R::new(((self.bits >> 21) & 0x01) != 0) } #[doc = "Bit 22 - Port A-H Pin\\[n\\] Rising Edge or High Level Interrupt Trigger Type Enable Bit The RHIEN (Px_INTEN\\[n+16\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. When setting the RHIEN (Px_INTEN\\[n+16\\]) bit to 1 : If the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at high level. If the interrupt is edge trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from low to high. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn rhien6(&self) -> RHIEN6_R { RHIEN6_R::new(((self.bits >> 22) & 0x01) != 0) } #[doc = "Bit 23 - Port A-H Pin\\[n\\] Rising Edge or High Level Interrupt Trigger Type Enable Bit The RHIEN (Px_INTEN\\[n+16\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. When setting the RHIEN (Px_INTEN\\[n+16\\]) bit to 1 : If the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at high level. If the interrupt is edge trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from low to high. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn rhien7(&self) -> RHIEN7_R { RHIEN7_R::new(((self.bits >> 23) & 0x01) != 0) } #[doc = "Bit 24 - Port A-H Pin\\[n\\] Rising Edge or High Level Interrupt Trigger Type Enable Bit The RHIEN (Px_INTEN\\[n+16\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. When setting the RHIEN (Px_INTEN\\[n+16\\]) bit to 1 : If the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at high level. If the interrupt is edge trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from low to high. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn rhien8(&self) -> RHIEN8_R { RHIEN8_R::new(((self.bits >> 24) & 0x01) != 0) } #[doc = "Bit 25 - Port A-H Pin\\[n\\] Rising Edge or High Level Interrupt Trigger Type Enable Bit The RHIEN (Px_INTEN\\[n+16\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. When setting the RHIEN (Px_INTEN\\[n+16\\]) bit to 1 : If the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at high level. If the interrupt is edge trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from low to high. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn rhien9(&self) -> RHIEN9_R { RHIEN9_R::new(((self.bits >> 25) & 0x01) != 0) } #[doc = "Bit 26 - Port A-H Pin\\[n\\] Rising Edge or High Level Interrupt Trigger Type Enable Bit The RHIEN (Px_INTEN\\[n+16\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. When setting the RHIEN (Px_INTEN\\[n+16\\]) bit to 1 : If the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at high level. If the interrupt is edge trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from low to high. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn rhien10(&self) -> RHIEN10_R { RHIEN10_R::new(((self.bits >> 26) & 0x01) != 0) } #[doc = "Bit 27 - Port A-H Pin\\[n\\] Rising Edge or High Level Interrupt Trigger Type Enable Bit The RHIEN (Px_INTEN\\[n+16\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. When setting the RHIEN (Px_INTEN\\[n+16\\]) bit to 1 : If the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at high level. If the interrupt is edge trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from low to high. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn rhien11(&self) -> RHIEN11_R { RHIEN11_R::new(((self.bits >> 27) & 0x01) != 0) } #[doc = "Bit 28 - Port A-H Pin\\[n\\] Rising Edge or High Level Interrupt Trigger Type Enable Bit The RHIEN (Px_INTEN\\[n+16\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. When setting the RHIEN (Px_INTEN\\[n+16\\]) bit to 1 : If the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at high level. If the interrupt is edge trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from low to high. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn rhien12(&self) -> RHIEN12_R { RHIEN12_R::new(((self.bits >> 28) & 0x01) != 0) } #[doc = "Bit 29 - Port A-H Pin\\[n\\] Rising Edge or High Level Interrupt Trigger Type Enable Bit The RHIEN (Px_INTEN\\[n+16\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. When setting the RHIEN (Px_INTEN\\[n+16\\]) bit to 1 : If the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at high level. If the interrupt is edge trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from low to high. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn rhien13(&self) -> RHIEN13_R { RHIEN13_R::new(((self.bits >> 29) & 0x01) != 0) } #[doc = "Bit 30 - Port A-H Pin\\[n\\] Rising Edge or High Level Interrupt Trigger Type Enable Bit The RHIEN (Px_INTEN\\[n+16\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. When setting the RHIEN (Px_INTEN\\[n+16\\]) bit to 1 : If the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at high level. If the interrupt is edge trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from low to high. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn rhien14(&self) -> RHIEN14_R { RHIEN14_R::new(((self.bits >> 30) & 0x01) != 0) } #[doc = "Bit 31 - Port A-H Pin\\[n\\] Rising Edge or High Level Interrupt Trigger Type Enable Bit The RHIEN (Px_INTEN\\[n+16\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. When setting the RHIEN (Px_INTEN\\[n+16\\]) bit to 1 : If the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at high level. If the interrupt is edge trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from low to high. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn rhien15(&self) -> RHIEN15_R { RHIEN15_R::new(((self.bits >> 31) & 0x01) != 0) } } impl W { #[doc = "Bit 0 - Port A-H Pin\\[n\\] Falling Edge or Low Level Interrupt Trigger Type Enable Bit The FLIEN (Px_INTEN\\[n\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. When setting the FLIEN (Px_INTEN\\[n\\]) bit to 1 : If the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at low level. If the interrupt is edge trigger(TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from high to low. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn flien0(&mut self) -> FLIEN0_W { FLIEN0_W { w: self } } #[doc = "Bit 1 - Port A-H Pin\\[n\\] Falling Edge or Low Level Interrupt Trigger Type Enable Bit The FLIEN (Px_INTEN\\[n\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. When setting the FLIEN (Px_INTEN\\[n\\]) bit to 1 : If the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at low level. If the interrupt is edge trigger(TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from high to low. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn flien1(&mut self) -> FLIEN1_W { FLIEN1_W { w: self } } #[doc = "Bit 2 - Port A-H Pin\\[n\\] Falling Edge or Low Level Interrupt Trigger Type Enable Bit The FLIEN (Px_INTEN\\[n\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. When setting the FLIEN (Px_INTEN\\[n\\]) bit to 1 : If the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at low level. If the interrupt is edge trigger(TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from high to low. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn flien2(&mut self) -> FLIEN2_W { FLIEN2_W { w: self } } #[doc = "Bit 3 - Port A-H Pin\\[n\\] Falling Edge or Low Level Interrupt Trigger Type Enable Bit The FLIEN (Px_INTEN\\[n\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. When setting the FLIEN (Px_INTEN\\[n\\]) bit to 1 : If the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at low level. If the interrupt is edge trigger(TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from high to low. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn flien3(&mut self) -> FLIEN3_W { FLIEN3_W { w: self } } #[doc = "Bit 4 - Port A-H Pin\\[n\\] Falling Edge or Low Level Interrupt Trigger Type Enable Bit The FLIEN (Px_INTEN\\[n\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. When setting the FLIEN (Px_INTEN\\[n\\]) bit to 1 : If the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at low level. If the interrupt is edge trigger(TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from high to low. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn flien4(&mut self) -> FLIEN4_W { FLIEN4_W { w: self } } #[doc = "Bit 5 - Port A-H Pin\\[n\\] Falling Edge or Low Level Interrupt Trigger Type Enable Bit The FLIEN (Px_INTEN\\[n\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. When setting the FLIEN (Px_INTEN\\[n\\]) bit to 1 : If the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at low level. If the interrupt is edge trigger(TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from high to low. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn flien5(&mut self) -> FLIEN5_W { FLIEN5_W { w: self } } #[doc = "Bit 6 - Port A-H Pin\\[n\\] Falling Edge or Low Level Interrupt Trigger Type Enable Bit The FLIEN (Px_INTEN\\[n\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. When setting the FLIEN (Px_INTEN\\[n\\]) bit to 1 : If the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at low level. If the interrupt is edge trigger(TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from high to low. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn flien6(&mut self) -> FLIEN6_W { FLIEN6_W { w: self } } #[doc = "Bit 7 - Port A-H Pin\\[n\\] Falling Edge or Low Level Interrupt Trigger Type Enable Bit The FLIEN (Px_INTEN\\[n\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. When setting the FLIEN (Px_INTEN\\[n\\]) bit to 1 : If the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at low level. If the interrupt is edge trigger(TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from high to low. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn flien7(&mut self) -> FLIEN7_W { FLIEN7_W { w: self } } #[doc = "Bit 8 - Port A-H Pin\\[n\\] Falling Edge or Low Level Interrupt Trigger Type Enable Bit The FLIEN (Px_INTEN\\[n\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. When setting the FLIEN (Px_INTEN\\[n\\]) bit to 1 : If the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at low level. If the interrupt is edge trigger(TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from high to low. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn flien8(&mut self) -> FLIEN8_W { FLIEN8_W { w: self } } #[doc = "Bit 9 - Port A-H Pin\\[n\\] Falling Edge or Low Level Interrupt Trigger Type Enable Bit The FLIEN (Px_INTEN\\[n\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. When setting the FLIEN (Px_INTEN\\[n\\]) bit to 1 : If the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at low level. If the interrupt is edge trigger(TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from high to low. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn flien9(&mut self) -> FLIEN9_W { FLIEN9_W { w: self } } #[doc = "Bit 10 - Port A-H Pin\\[n\\] Falling Edge or Low Level Interrupt Trigger Type Enable Bit The FLIEN (Px_INTEN\\[n\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. When setting the FLIEN (Px_INTEN\\[n\\]) bit to 1 : If the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at low level. If the interrupt is edge trigger(TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from high to low. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn flien10(&mut self) -> FLIEN10_W { FLIEN10_W { w: self } } #[doc = "Bit 11 - Port A-H Pin\\[n\\] Falling Edge or Low Level Interrupt Trigger Type Enable Bit The FLIEN (Px_INTEN\\[n\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. When setting the FLIEN (Px_INTEN\\[n\\]) bit to 1 : If the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at low level. If the interrupt is edge trigger(TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from high to low. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn flien11(&mut self) -> FLIEN11_W { FLIEN11_W { w: self } } #[doc = "Bit 12 - Port A-H Pin\\[n\\] Falling Edge or Low Level Interrupt Trigger Type Enable Bit The FLIEN (Px_INTEN\\[n\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. When setting the FLIEN (Px_INTEN\\[n\\]) bit to 1 : If the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at low level. If the interrupt is edge trigger(TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from high to low. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn flien12(&mut self) -> FLIEN12_W { FLIEN12_W { w: self } } #[doc = "Bit 13 - Port A-H Pin\\[n\\] Falling Edge or Low Level Interrupt Trigger Type Enable Bit The FLIEN (Px_INTEN\\[n\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. When setting the FLIEN (Px_INTEN\\[n\\]) bit to 1 : If the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at low level. If the interrupt is edge trigger(TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from high to low. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn flien13(&mut self) -> FLIEN13_W { FLIEN13_W { w: self } } #[doc = "Bit 14 - Port A-H Pin\\[n\\] Falling Edge or Low Level Interrupt Trigger Type Enable Bit The FLIEN (Px_INTEN\\[n\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. When setting the FLIEN (Px_INTEN\\[n\\]) bit to 1 : If the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at low level. If the interrupt is edge trigger(TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from high to low. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn flien14(&mut self) -> FLIEN14_W { FLIEN14_W { w: self } } #[doc = "Bit 15 - Port A-H Pin\\[n\\] Falling Edge or Low Level Interrupt Trigger Type Enable Bit The FLIEN (Px_INTEN\\[n\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. When setting the FLIEN (Px_INTEN\\[n\\]) bit to 1 : If the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at low level. If the interrupt is edge trigger(TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from high to low. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn flien15(&mut self) -> FLIEN15_W { FLIEN15_W { w: self } } #[doc = "Bit 16 - Port A-H Pin\\[n\\] Rising Edge or High Level Interrupt Trigger Type Enable Bit The RHIEN (Px_INTEN\\[n+16\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. When setting the RHIEN (Px_INTEN\\[n+16\\]) bit to 1 : If the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at high level. If the interrupt is edge trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from low to high. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn rhien0(&mut self) -> RHIEN0_W { RHIEN0_W { w: self } } #[doc = "Bit 17 - Port A-H Pin\\[n\\] Rising Edge or High Level Interrupt Trigger Type Enable Bit The RHIEN (Px_INTEN\\[n+16\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. When setting the RHIEN (Px_INTEN\\[n+16\\]) bit to 1 : If the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at high level. If the interrupt is edge trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from low to high. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn rhien1(&mut self) -> RHIEN1_W { RHIEN1_W { w: self } } #[doc = "Bit 18 - Port A-H Pin\\[n\\] Rising Edge or High Level Interrupt Trigger Type Enable Bit The RHIEN (Px_INTEN\\[n+16\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. When setting the RHIEN (Px_INTEN\\[n+16\\]) bit to 1 : If the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at high level. If the interrupt is edge trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from low to high. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn rhien2(&mut self) -> RHIEN2_W { RHIEN2_W { w: self } } #[doc = "Bit 19 - Port A-H Pin\\[n\\] Rising Edge or High Level Interrupt Trigger Type Enable Bit The RHIEN (Px_INTEN\\[n+16\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. When setting the RHIEN (Px_INTEN\\[n+16\\]) bit to 1 : If the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at high level. If the interrupt is edge trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from low to high. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn rhien3(&mut self) -> RHIEN3_W { RHIEN3_W { w: self } } #[doc = "Bit 20 - Port A-H Pin\\[n\\] Rising Edge or High Level Interrupt Trigger Type Enable Bit The RHIEN (Px_INTEN\\[n+16\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. When setting the RHIEN (Px_INTEN\\[n+16\\]) bit to 1 : If the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at high level. If the interrupt is edge trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from low to high. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn rhien4(&mut self) -> RHIEN4_W { RHIEN4_W { w: self } } #[doc = "Bit 21 - Port A-H Pin\\[n\\] Rising Edge or High Level Interrupt Trigger Type Enable Bit The RHIEN (Px_INTEN\\[n+16\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. When setting the RHIEN (Px_INTEN\\[n+16\\]) bit to 1 : If the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at high level. If the interrupt is edge trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from low to high. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn rhien5(&mut self) -> RHIEN5_W { RHIEN5_W { w: self } } #[doc = "Bit 22 - Port A-H Pin\\[n\\] Rising Edge or High Level Interrupt Trigger Type Enable Bit The RHIEN (Px_INTEN\\[n+16\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. When setting the RHIEN (Px_INTEN\\[n+16\\]) bit to 1 : If the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at high level. If the interrupt is edge trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from low to high. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn rhien6(&mut self) -> RHIEN6_W { RHIEN6_W { w: self } } #[doc = "Bit 23 - Port A-H Pin\\[n\\] Rising Edge or High Level Interrupt Trigger Type Enable Bit The RHIEN (Px_INTEN\\[n+16\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. When setting the RHIEN (Px_INTEN\\[n+16\\]) bit to 1 : If the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at high level. If the interrupt is edge trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from low to high. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn rhien7(&mut self) -> RHIEN7_W { RHIEN7_W { w: self } } #[doc = "Bit 24 - Port A-H Pin\\[n\\] Rising Edge or High Level Interrupt Trigger Type Enable Bit The RHIEN (Px_INTEN\\[n+16\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. When setting the RHIEN (Px_INTEN\\[n+16\\]) bit to 1 : If the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at high level. If the interrupt is edge trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from low to high. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn rhien8(&mut self) -> RHIEN8_W { RHIEN8_W { w: self } } #[doc = "Bit 25 - Port A-H Pin\\[n\\] Rising Edge or High Level Interrupt Trigger Type Enable Bit The RHIEN (Px_INTEN\\[n+16\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. When setting the RHIEN (Px_INTEN\\[n+16\\]) bit to 1 : If the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at high level. If the interrupt is edge trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from low to high. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn rhien9(&mut self) -> RHIEN9_W { RHIEN9_W { w: self } } #[doc = "Bit 26 - Port A-H Pin\\[n\\] Rising Edge or High Level Interrupt Trigger Type Enable Bit The RHIEN (Px_INTEN\\[n+16\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. When setting the RHIEN (Px_INTEN\\[n+16\\]) bit to 1 : If the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at high level. If the interrupt is edge trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from low to high. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn rhien10(&mut self) -> RHIEN10_W { RHIEN10_W { w: self } } #[doc = "Bit 27 - Port A-H Pin\\[n\\] Rising Edge or High Level Interrupt Trigger Type Enable Bit The RHIEN (Px_INTEN\\[n+16\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. When setting the RHIEN (Px_INTEN\\[n+16\\]) bit to 1 : If the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at high level. If the interrupt is edge trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from low to high. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn rhien11(&mut self) -> RHIEN11_W { RHIEN11_W { w: self } } #[doc = "Bit 28 - Port A-H Pin\\[n\\] Rising Edge or High Level Interrupt Trigger Type Enable Bit The RHIEN (Px_INTEN\\[n+16\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. When setting the RHIEN (Px_INTEN\\[n+16\\]) bit to 1 : If the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at high level. If the interrupt is edge trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from low to high. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn rhien12(&mut self) -> RHIEN12_W { RHIEN12_W { w: self } } #[doc = "Bit 29 - Port A-H Pin\\[n\\] Rising Edge or High Level Interrupt Trigger Type Enable Bit The RHIEN (Px_INTEN\\[n+16\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. When setting the RHIEN (Px_INTEN\\[n+16\\]) bit to 1 : If the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at high level. If the interrupt is edge trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from low to high. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn rhien13(&mut self) -> RHIEN13_W { RHIEN13_W { w: self } } #[doc = "Bit 30 - Port A-H Pin\\[n\\] Rising Edge or High Level Interrupt Trigger Type Enable Bit The RHIEN (Px_INTEN\\[n+16\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. When setting the RHIEN (Px_INTEN\\[n+16\\]) bit to 1 : If the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at high level. If the interrupt is edge trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from low to high. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn rhien14(&mut self) -> RHIEN14_W { RHIEN14_W { w: self } } #[doc = "Bit 31 - Port A-H Pin\\[n\\] Rising Edge or High Level Interrupt Trigger Type Enable Bit The RHIEN (Px_INTEN\\[n+16\\]) bit is used to enable the interrupt for each of the corresponding input Px.n pin. Set bit to 1 also enable the pin wake-up function. When setting the RHIEN (Px_INTEN\\[n+16\\]) bit to 1 : If the interrupt is level trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 1), the input Px.n pin will generate the interrupt while this pin state is at high level. If the interrupt is edge trigger (TYPE (Px_INTTYPE\\[n\\]) bit is set to 0), the input Px.n pin will generate the interrupt while this pin state changed from low to high. Note: The PC.15/PF.12~13/PG.0~1,5~8/PH.0~3,12~15 pin is ignored."] #[inline(always)] pub fn rhien15(&mut self) -> RHIEN15_W { RHIEN15_W { w: self } } #[doc = "Writes raw bits to the register."] #[inline(always)] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.0.bits(bits); self } } #[doc = "PH Interrupt Enable Control Register\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [ph_inten](index.html) module"] pub struct PH_INTEN_SPEC; impl crate::RegisterSpec for PH_INTEN_SPEC { type Ux = u32; } #[doc = "`read()` method returns [ph_inten::R](R) reader structure"] impl crate::Readable for PH_INTEN_SPEC { type Reader = R; } #[doc = "`write(|w| ..)` method takes [ph_inten::W](W) writer structure"] impl crate::Writable for PH_INTEN_SPEC { type Writer = W; } #[doc = "`reset()` method sets PH_INTEN to value 0"] impl crate::Resettable for PH_INTEN_SPEC { #[inline(always)] fn reset_value() -> Self::Ux { 0 } }
61.907899
709
0.651653
035b222e6ce62073fc38e1194584f029c4236a88
2,001
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // Microbenchmark for the smallintmap library use std::collections::VecMap; use std::os; use std::str::from_str; use std::time::Duration; use std::uint; fn append_sequential(min: uint, max: uint, map: &mut VecMap<uint>) { for i in range(min, max) { map.insert(i, i + 22u); } } fn check_sequential(min: uint, max: uint, map: &VecMap<uint>) { for i in range(min, max) { assert_eq!(map[i], i + 22u); } } fn main() { let args = os::args(); let args = if os::getenv("RUST_BENCH").is_some() { vec!("".to_string(), "100000".to_string(), "100".to_string()) } else if args.len() <= 1u { vec!("".to_string(), "10000".to_string(), "50".to_string()) } else { args.into_iter().collect() }; let max = from_str::<uint>(args[1].as_slice()).unwrap(); let rep = from_str::<uint>(args[2].as_slice()).unwrap(); let mut checkf = Duration::seconds(0); let mut appendf = Duration::seconds(0); for _ in range(0u, rep) { let mut map = VecMap::new(); let d1 = Duration::span(|| append_sequential(0u, max, &mut map)); let d2 = Duration::span(|| check_sequential(0u, max, &map)); checkf = checkf + d2; appendf = appendf + d1; } let maxf = max as f64; println!("insert(): {} seconds\n", checkf); println!(" : {} op/ms\n", maxf / checkf.num_milliseconds() as f64); println!("get() : {} seconds\n", appendf); println!(" : {} op/ms\n", maxf / appendf.num_milliseconds() as f64); }
32.274194
79
0.611694
678ee9b0afc6b503313bc388f10f4909302df585
15,176
// Copyright (c) The Libra Core Contributors // SPDX-License-Identifier: Apache-2.0 //! An implementation of x25519 elliptic curve key pairs required for //! [Diffie-Hellman key //! exchange](https://en.wikipedia.org/wiki/Diffie%E2%80%93Hellman_key_exchange) //! in the Libra project. //! //! This is an API for [Elliptic Curves for Security - RFC //! 7748](https://tools.ietf.org/html/rfc7748) and which deals with //! long-term key generation and handling (`X25519StaticPrivateKey`, //! `X25519StaticPublicKey`) as well as short-term keys (`X25519EphemeralPrivateKey`, //! `X25519PublicKey`). //! //! The default type for a Diffie-Hellman secret is an ephemeral //! one, forming a `PrivateKey`-`PublicKey` pair with `X25519Publickey`, //! and is not serializable, since the use of fresh DH secrets is //! recommended for various reasons including PFS. //! //! We also provide a "static" implementation `X25519StaticPrivateKey`, //! which supports serialization, forming a `PrivateKey`-`PublicKey` pair //! with `X25519StaticPublickey`. This later type is precisely a //! [newtype](https://doc.rust-lang.org/1.5.0/style/features/types/newtype.html) //! wrapper around `X25519PublicKey`, to which it coerces through `Deref`. //! //! # Examples //! //! ``` //! use crypto::x25519::*; //! use rand::{rngs::StdRng, SeedableRng}; //! //! // Derive an X25519 static key pair from seed using the extract-then-expand HKDF method from RFC 5869. //! let salt = &b"some salt"[..]; //! // In production, ensure seed has at least 256 bits of entropy. //! let seed = [5u8; 32]; // seed is denoted as IKM in HKDF RFC 5869. //! let info = &b"some app info"[..]; //! //! let (private_key1, public_key1) = X25519StaticPrivateKey::derive_keypair_from_seed(Some(salt), &seed, Some(info)); //! let (private_key2, public_key2) = X25519StaticPrivateKey::derive_keypair_from_seed(Some(salt), &seed, Some(info)); //! assert_eq!(public_key1, public_key2); //! //! // Generate a random X25519 ephemeral key pair from an RNG (in this example a StdRng) //! use crypto::Uniform; //! let seed = [1u8; 32]; //! let mut rng: StdRng = SeedableRng::from_seed(seed); //! let private_key = X25519StaticPrivateKey::generate_for_testing(&mut rng); //! let public_key: X25519StaticPublicKey = (&private_key).into(); //! //! // Generate an X25519 key pair from an RNG and a user-provided seed. //! let salt = &b"some salt"[..]; //! // In production, ensure seed has at least 256 bits of entropy. //! let seed = [5u8; 32]; // seed is denoted as IKM in HKDF RFC 5869. //! let info = &b"some app info"[..]; //! let (private_key1, public_key1) = X25519StaticPrivateKey::generate_keypair_hybrid(Some(salt), &seed, Some(info)); //! let (private_key2, public_key2) = X25519StaticPrivateKey::generate_keypair_hybrid(Some(salt), &seed, Some(info)); //! assert_ne!(public_key1, public_key2); //! ``` use crate::{hkdf::Hkdf, traits::*}; use crypto_derive::{Deref, SilentDebug, SilentDisplay}; use rand::{rngs::EntropyRng, RngCore}; use serde::{de, export, ser}; use sha2::Sha256; use std::{convert::TryFrom, fmt, ops::Deref}; use x25519_dalek; /// TODO: move traits to the right file (possibly traits.rs) /// Key interfaces for Diffie-Hellman key exchange protocol build on top /// of the key APIs in traits.rs /// x25519 implementation /// The length of the DHPublicKey pub const X25519_PUBLIC_KEY_LENGTH: usize = 32; /// The length of the DHPrivateKey pub const X25519_PRIVATE_KEY_LENGTH: usize = 32; /// An x25519 ephemeral private (secret) key #[derive(SilentDisplay, SilentDebug)] pub struct X25519EphemeralPrivateKey(x25519_dalek::EphemeralSecret); /// An x25519 static private (secret) key #[derive(SilentDisplay, SilentDebug, Clone)] pub struct X25519StaticPrivateKey(x25519_dalek::StaticSecret); /// An x25519 public key #[derive(Clone, Debug, Deref)] pub struct X25519PublicKey(x25519_dalek::PublicKey); /// An x25519 public key to match the X25519Static key type, which /// dereferences to an X25519PublicKey #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub struct X25519StaticPublicKey(X25519PublicKey); /// An x25519 shared key #[derive(SilentDisplay, SilentDebug)] pub struct X25519SharedKey(x25519_dalek::SharedSecret); ///////////////////////// // X25519EphemeralPrivateKey Traits // ///////////////////////// impl Uniform for X25519EphemeralPrivateKey { fn generate_for_testing<R>(rng: &mut R) -> Self where R: ::rand::SeedableRng + ::rand::RngCore + ::rand::CryptoRng, { X25519EphemeralPrivateKey(x25519_dalek::EphemeralSecret::new(rng)) } } impl PrivateKey for X25519EphemeralPrivateKey { type PublicKeyMaterial = X25519PublicKey; } impl ExchangeKey for X25519EphemeralPrivateKey { type DHPublicKeyMaterial = X25519PublicKey; type DHSharedKeyMaterial = X25519SharedKey; // Diffie-Hellman exchange fn dh(self, their_public: &X25519PublicKey) -> X25519SharedKey { let shared_secret = self.0.diffie_hellman(&their_public.0); X25519SharedKey(shared_secret) } } ////////////////////// // X25519StaticPrivateKey Traits // ////////////////////// impl X25519StaticPrivateKey { /// Derives a keypair `(X25519PrivateKey, X25519PublicKey)` from /// a) salt (optional) - denoted as 'salt' in RFC 5869 /// b) seed - denoted as 'IKM' in RFC 5869 /// c) application info (optional) - denoted as 'info' in RFC 5869 /// /// using the HKDF key derivation protocol, as defined in RFC 5869. /// This implementation uses the full extract-then-expand HKDF steps /// based on the SHA-256 hash function. pub fn derive_keypair_from_seed( salt: Option<&[u8]>, seed: &[u8], app_info: Option<&[u8]>, ) -> (X25519StaticPrivateKey, X25519StaticPublicKey) { let derived_bytes = Hkdf::<Sha256>::extract_then_expand(salt, seed, app_info, X25519_PRIVATE_KEY_LENGTH); let mut key_bytes = [0u8; X25519_PRIVATE_KEY_LENGTH]; key_bytes.copy_from_slice(derived_bytes.unwrap().as_slice()); let secret: x25519_dalek::StaticSecret = x25519_dalek::StaticSecret::from(key_bytes); let public: x25519_dalek::PublicKey = (&secret).into(); ( X25519StaticPrivateKey(secret), X25519StaticPublicKey(X25519PublicKey(public)), ) } /// Generates a random keypair `(PrivateKey, PublicKey)` by combining the output of `EntropyRng` /// with a user-provided seed. This concatenated seed is used as the seed to HKDF (RFC 5869). /// /// Similarly to `derive_keypair_from_seed` the user provides the following inputs: /// a) salt (optional) - denoted as 'salt' in RFC 5869 /// b) seed - denoted as 'IKM' in RFC 5869 /// c) application info (optional) - denoted as 'info' in RFC 5869 /// /// Note that this method is not deterministic, but the (random + static seed) key /// generation makes it safer against low entropy pools and weak RNGs. pub fn generate_keypair_hybrid( salt: Option<&[u8]>, seed: &[u8], app_info: Option<&[u8]>, ) -> (X25519StaticPrivateKey, X25519StaticPublicKey) { let mut rng = EntropyRng::new(); let mut seed_from_rng = [0u8; X25519_PRIVATE_KEY_LENGTH]; rng.fill_bytes(&mut seed_from_rng); let mut final_seed = seed.to_vec(); final_seed.extend_from_slice(&seed_from_rng); X25519StaticPrivateKey::derive_keypair_from_seed(salt, &final_seed, app_info) } } impl Uniform for X25519StaticPrivateKey { fn generate_for_testing<R>(rng: &mut R) -> Self where R: ::rand::SeedableRng + ::rand::RngCore + ::rand::CryptoRng, { X25519StaticPrivateKey(x25519_dalek::StaticSecret::new(rng)) } } impl PrivateKey for X25519StaticPrivateKey { type PublicKeyMaterial = X25519StaticPublicKey; } impl ExchangeKey for X25519StaticPrivateKey { type DHPublicKeyMaterial = X25519StaticPublicKey; type DHSharedKeyMaterial = X25519SharedKey; // Diffie-Hellman exchange fn dh(self, their_public: &X25519StaticPublicKey) -> X25519SharedKey { let shared_secret = self.0.diffie_hellman(&(their_public.deref()).0); X25519SharedKey(shared_secret) } } impl TryFrom<&[u8]> for X25519StaticPrivateKey { type Error = CryptoMaterialError; fn try_from(bytes: &[u8]) -> std::result::Result<X25519StaticPrivateKey, CryptoMaterialError> { if bytes.len() != X25519_PRIVATE_KEY_LENGTH { return Err(CryptoMaterialError::DeserializationError); } let mut bits = [0u8; X25519_PRIVATE_KEY_LENGTH]; bits.copy_from_slice(&bytes[..X25519_PRIVATE_KEY_LENGTH]); Ok(X25519StaticPrivateKey(x25519_dalek::StaticSecret::from( bits, ))) } } impl ValidKey for X25519StaticPrivateKey { fn to_bytes(&self) -> Vec<u8> { self.0.to_bytes().to_vec() } } ////////////////////// // X25519PublicKey Traits // ////////////////////// impl<'a> From<&'a X25519EphemeralPrivateKey> for X25519PublicKey { fn from(ephemeral: &'a X25519EphemeralPrivateKey) -> X25519PublicKey { X25519PublicKey(x25519_dalek::PublicKey::from(&ephemeral.0)) } } impl<'a> From<&'a X25519StaticPrivateKey> for X25519StaticPublicKey { fn from(ephemeral: &'a X25519StaticPrivateKey) -> X25519StaticPublicKey { X25519StaticPublicKey(X25519PublicKey(x25519_dalek::PublicKey::from(&ephemeral.0))) } } impl std::hash::Hash for X25519PublicKey { fn hash<H: std::hash::Hasher>(&self, state: &mut H) { let encoded_pubkey = self.0.as_bytes(); state.write(encoded_pubkey); } } impl PartialEq for X25519PublicKey { fn eq(&self, other: &X25519PublicKey) -> bool { *self.0.as_bytes() == *other.0.as_bytes() } } impl Eq for X25519PublicKey {} impl PublicKey for X25519PublicKey { type PrivateKeyMaterial = X25519EphemeralPrivateKey; } impl PublicKey for X25519StaticPublicKey { type PrivateKeyMaterial = X25519StaticPrivateKey; } impl TryFrom<&[u8]> for X25519StaticPublicKey { type Error = CryptoMaterialError; fn try_from(bytes: &[u8]) -> std::result::Result<X25519StaticPublicKey, CryptoMaterialError> { if bytes.len() != X25519_PUBLIC_KEY_LENGTH { return Err(CryptoMaterialError::DeserializationError); } let mut bits = [0u8; X25519_PRIVATE_KEY_LENGTH]; bits.copy_from_slice(&bytes[..X25519_PRIVATE_KEY_LENGTH]); Ok(X25519StaticPublicKey(X25519PublicKey( x25519_dalek::PublicKey::from(bits), ))) } } impl ValidKey for X25519StaticPublicKey { fn to_bytes(&self) -> Vec<u8> { self.deref().0.as_bytes().to_vec() } } ////////////////////// // SharedKey Traits // ////////////////////// ////////////////////////////// // Compact Serialization // ////////////////////////////// impl ser::Serialize for X25519StaticPrivateKey { fn serialize<S>(&self, serializer: S) -> export::Result<S::Ok, S::Error> where S: ser::Serializer, { serializer.serialize_bytes(&self.to_bytes()) } } impl ser::Serialize for X25519StaticPublicKey { fn serialize<S>(&self, serializer: S) -> export::Result<S::Ok, S::Error> where S: ser::Serializer, { serializer.serialize_bytes(&self.to_bytes()) } } struct X25519StaticPrivateKeyVisitor; struct X25519StaticPublicKeyVisitor; impl<'de> de::Visitor<'de> for X25519StaticPrivateKeyVisitor { type Value = X25519StaticPrivateKey; fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { formatter.write_str("x25519_dalek static key in bytes") } fn visit_bytes<E>(self, value: &[u8]) -> export::Result<X25519StaticPrivateKey, E> where E: de::Error, { X25519StaticPrivateKey::try_from(value).map_err(E::custom) } } impl<'de> de::Visitor<'de> for X25519StaticPublicKeyVisitor { type Value = X25519StaticPublicKey; fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { formatter.write_str("x25519_dalek public key in bytes") } fn visit_bytes<E>(self, value: &[u8]) -> export::Result<X25519StaticPublicKey, E> where E: de::Error, { X25519StaticPublicKey::try_from(value).map_err(E::custom) } } impl<'de> de::Deserialize<'de> for X25519StaticPrivateKey { fn deserialize<D>(deserializer: D) -> export::Result<Self, D::Error> where D: de::Deserializer<'de>, { deserializer.deserialize_bytes(X25519StaticPrivateKeyVisitor {}) } } impl<'de> de::Deserialize<'de> for X25519StaticPublicKey { fn deserialize<D>(deserializer: D) -> export::Result<Self, D::Error> where D: de::Deserializer<'de>, { deserializer.deserialize_bytes(X25519StaticPublicKeyVisitor {}) } } ////////////////////////// // Compatibility Traits // ////////////////////////// /// Those transitory traits are meant to help with the progressive /// migration of the code base to the crypto module and will /// disappear after. pub mod compat { use crate::traits::*; #[cfg(any(test, feature = "testing"))] use proptest::strategy::LazyJust; #[cfg(any(test, feature = "testing"))] use proptest::{prelude::*, strategy::Strategy}; use crate::x25519::{X25519StaticPrivateKey, X25519StaticPublicKey}; use rand::{rngs::StdRng, SeedableRng}; /// Generate an arbitrary key pair, with possible Rng input /// /// Warning: if you pass in None, this will not return distinct /// results every time! Should you want to write non-deterministic /// tests, look at config::config_builder::util::get_test_config pub fn generate_keypair<'a, T>(opt_rng: T) -> (X25519StaticPrivateKey, X25519StaticPublicKey) where T: Into<Option<&'a mut StdRng>> + Sized, { if let Some(rng_mut_ref) = opt_rng.into() { <(X25519StaticPrivateKey, X25519StaticPublicKey)>::generate_for_testing(rng_mut_ref) } else { let mut rng = StdRng::from_seed(crate::test_utils::TEST_SEED); <(X25519StaticPrivateKey, X25519StaticPublicKey)>::generate_for_testing(&mut rng) } } /// Used to produce keypairs from a seed for testing purposes #[cfg(any(test, feature = "testing"))] pub fn keypair_strategy( ) -> impl Strategy<Value = (X25519StaticPrivateKey, X25519StaticPublicKey)> { // The no_shrink is because keypairs should be fixed -- shrinking would cause a different // keypair to be generated, which appears to not be very useful. any::<[u8; 32]>() .prop_map(|seed| { let mut rng: StdRng = SeedableRng::from_seed(seed); let (private_key, public_key) = generate_keypair(&mut rng); (private_key, public_key) }) .no_shrink() } #[cfg(any(test, feature = "testing"))] impl Arbitrary for X25519StaticPublicKey { type Parameters = (); fn arbitrary_with(_args: Self::Parameters) -> Self::Strategy { LazyJust::new(|| generate_keypair(None).1).boxed() } type Strategy = BoxedStrategy<Self>; } }
35.792453
118
0.66961
891522cc24f7e1d08159c0f1713b35bf13dfbbc3
55,781
// This file contains code from external sources. // Attributions: https://github.com/wasmerio/wasmer/blob/master/ATTRIBUTIONS.md use crate::translator::{ type_to_irtype, FuncEnvironment as BaseFuncEnvironment, GlobalVariable, TargetEnvironment, }; use cranelift_codegen::cursor::FuncCursor; use cranelift_codegen::ir; use cranelift_codegen::ir::condcodes::*; use cranelift_codegen::ir::immediates::{Offset32, Uimm64}; use cranelift_codegen::ir::types::*; use cranelift_codegen::ir::{AbiParam, ArgumentPurpose, Function, InstBuilder, Signature}; use cranelift_codegen::isa::TargetFrontendConfig; use cranelift_frontend::{FunctionBuilder, Variable}; use std::convert::TryFrom; use wasmer_compiler::wasmparser::Type; use wasmer_compiler::{WasmError, WasmResult}; use wasmer_types::entity::EntityRef; use wasmer_types::entity::PrimaryMap; use wasmer_types::VMBuiltinFunctionIndex; use wasmer_types::VMOffsets; use wasmer_types::{ FunctionIndex, FunctionType, GlobalIndex, LocalFunctionIndex, MemoryIndex, ModuleInfo, SignatureIndex, TableIndex, Type as WasmerType, }; use wasmer_types::{MemoryStyle, TableStyle}; /// Compute an `ir::ExternalName` for a given wasm function index. pub fn get_function_name(func_index: FunctionIndex) -> ir::ExternalName { ir::ExternalName::user(0, func_index.as_u32()) } /// The type of the `current_elements` field. pub fn type_of_vmtable_definition_current_elements(vmoffsets: &VMOffsets) -> ir::Type { ir::Type::int(u16::from(vmoffsets.size_of_vmtable_definition_current_elements()) * 8).unwrap() } /// The `FuncEnvironment` implementation for use by the `ModuleEnvironment`. pub struct FuncEnvironment<'module_environment> { /// Target-specified configuration. target_config: TargetFrontendConfig, /// The module-level environment which this function-level environment belongs to. module: &'module_environment ModuleInfo, /// A stack tracking the type of local variables. type_stack: Vec<WasmerType>, /// The module function signatures signatures: &'module_environment PrimaryMap<SignatureIndex, ir::Signature>, /// The Cranelift global holding the vmctx address. vmctx: Option<ir::GlobalValue>, /// The external function signature for implementing wasm's `memory.size` /// for locally-defined 32-bit memories. memory32_size_sig: Option<ir::SigRef>, /// The external function signature for implementing wasm's `table.size` /// for locally-defined tables. table_size_sig: Option<ir::SigRef>, /// The external function signature for implementing wasm's `memory.grow` /// for locally-defined memories. memory_grow_sig: Option<ir::SigRef>, /// The external function signature for implementing wasm's `table.grow` /// for locally-defined tables. table_grow_sig: Option<ir::SigRef>, /// The external function signature for implementing wasm's `table.copy` /// (it's the same for both local and imported tables). table_copy_sig: Option<ir::SigRef>, /// The external function signature for implementing wasm's `table.init`. table_init_sig: Option<ir::SigRef>, /// The external function signature for implementing wasm's `elem.drop`. elem_drop_sig: Option<ir::SigRef>, /// The external function signature for implementing wasm's `memory.copy` /// (it's the same for both local and imported memories). memory_copy_sig: Option<ir::SigRef>, /// The external function signature for implementing wasm's `memory.fill` /// (it's the same for both local and imported memories). memory_fill_sig: Option<ir::SigRef>, /// The external function signature for implementing wasm's `memory.init`. memory_init_sig: Option<ir::SigRef>, /// The external function signature for implementing wasm's `data.drop`. data_drop_sig: Option<ir::SigRef>, /// The external function signature for implementing wasm's `table.get`. table_get_sig: Option<ir::SigRef>, /// The external function signature for implementing wasm's `table.set`. table_set_sig: Option<ir::SigRef>, /// The external function signature for implementing wasm's `func.ref`. func_ref_sig: Option<ir::SigRef>, /// The external function signature for implementing wasm's `table.fill`. table_fill_sig: Option<ir::SigRef>, /// The external function signature for implementing reference increment for `extern.ref`. externref_inc_sig: Option<ir::SigRef>, /// The external function signature for implementing reference decrement for `extern.ref`. externref_dec_sig: Option<ir::SigRef>, /// Offsets to struct fields accessed by JIT code. offsets: VMOffsets, /// The memory styles memory_styles: &'module_environment PrimaryMap<MemoryIndex, MemoryStyle>, /// The table styles table_styles: &'module_environment PrimaryMap<TableIndex, TableStyle>, } impl<'module_environment> FuncEnvironment<'module_environment> { pub fn new( target_config: TargetFrontendConfig, module: &'module_environment ModuleInfo, signatures: &'module_environment PrimaryMap<SignatureIndex, ir::Signature>, memory_styles: &'module_environment PrimaryMap<MemoryIndex, MemoryStyle>, table_styles: &'module_environment PrimaryMap<TableIndex, TableStyle>, ) -> Self { Self { target_config, module, signatures, type_stack: vec![], vmctx: None, memory32_size_sig: None, table_size_sig: None, memory_grow_sig: None, table_grow_sig: None, table_copy_sig: None, table_init_sig: None, elem_drop_sig: None, memory_copy_sig: None, memory_fill_sig: None, memory_init_sig: None, table_get_sig: None, table_set_sig: None, data_drop_sig: None, func_ref_sig: None, table_fill_sig: None, externref_inc_sig: None, externref_dec_sig: None, offsets: VMOffsets::new(target_config.pointer_bytes(), module), memory_styles, table_styles, } } fn pointer_type(&self) -> ir::Type { self.target_config.pointer_type() } fn vmctx(&mut self, func: &mut Function) -> ir::GlobalValue { self.vmctx.unwrap_or_else(|| { let vmctx = func.create_global_value(ir::GlobalValueData::VMContext); self.vmctx = Some(vmctx); vmctx }) } fn get_table_fill_sig(&mut self, func: &mut Function) -> ir::SigRef { let sig = self.table_fill_sig.unwrap_or_else(|| { func.import_signature(Signature { params: vec![ AbiParam::special(self.pointer_type(), ArgumentPurpose::VMContext), // table index AbiParam::new(I32), // dst AbiParam::new(I32), // value AbiParam::new(R64), // len AbiParam::new(I32), ], returns: vec![], call_conv: self.target_config.default_call_conv, }) }); self.table_fill_sig = Some(sig); sig } fn get_table_fill_func( &mut self, func: &mut Function, table_index: TableIndex, ) -> (ir::SigRef, usize, VMBuiltinFunctionIndex) { ( self.get_table_fill_sig(func), table_index.index(), VMBuiltinFunctionIndex::get_table_fill_index(), ) } fn get_externref_inc_sig(&mut self, func: &mut Function) -> ir::SigRef { let sig = self.externref_inc_sig.unwrap_or_else(|| { func.import_signature(Signature { params: vec![AbiParam::new(R64)], returns: vec![], call_conv: self.target_config.default_call_conv, }) }); self.externref_inc_sig = Some(sig); sig } fn get_externref_inc_func( &mut self, func: &mut Function, ) -> (ir::SigRef, VMBuiltinFunctionIndex) { ( self.get_externref_inc_sig(func), VMBuiltinFunctionIndex::get_externref_inc_index(), ) } fn get_externref_dec_sig(&mut self, func: &mut Function) -> ir::SigRef { let sig = self.externref_dec_sig.unwrap_or_else(|| { func.import_signature(Signature { params: vec![AbiParam::new(R64)], returns: vec![], call_conv: self.target_config.default_call_conv, }) }); self.externref_dec_sig = Some(sig); sig } fn get_externref_dec_func( &mut self, func: &mut Function, ) -> (ir::SigRef, VMBuiltinFunctionIndex) { ( self.get_externref_dec_sig(func), VMBuiltinFunctionIndex::get_externref_dec_index(), ) } fn get_func_ref_sig(&mut self, func: &mut Function) -> ir::SigRef { let sig = self.func_ref_sig.unwrap_or_else(|| { func.import_signature(Signature { params: vec![ AbiParam::special(self.pointer_type(), ArgumentPurpose::VMContext), AbiParam::new(I32), ], returns: vec![AbiParam::new(R64)], call_conv: self.target_config.default_call_conv, }) }); self.func_ref_sig = Some(sig); sig } fn get_func_ref_func( &mut self, func: &mut Function, function_index: FunctionIndex, ) -> (ir::SigRef, usize, VMBuiltinFunctionIndex) { ( self.get_func_ref_sig(func), function_index.index(), VMBuiltinFunctionIndex::get_func_ref_index(), ) } fn get_table_get_sig(&mut self, func: &mut Function) -> ir::SigRef { let sig = self.table_get_sig.unwrap_or_else(|| { func.import_signature(Signature { params: vec![ AbiParam::special(self.pointer_type(), ArgumentPurpose::VMContext), AbiParam::new(I32), AbiParam::new(I32), ], returns: vec![AbiParam::new(R64)], call_conv: self.target_config.default_call_conv, }) }); self.table_get_sig = Some(sig); sig } fn get_table_get_func( &mut self, func: &mut Function, table_index: TableIndex, ) -> (ir::SigRef, usize, VMBuiltinFunctionIndex) { if self.module.is_imported_table(table_index) { ( self.get_table_get_sig(func), table_index.index(), VMBuiltinFunctionIndex::get_imported_table_get_index(), ) } else { ( self.get_table_get_sig(func), self.module.local_table_index(table_index).unwrap().index(), VMBuiltinFunctionIndex::get_table_get_index(), ) } } fn get_table_set_sig(&mut self, func: &mut Function) -> ir::SigRef { let sig = self.table_set_sig.unwrap_or_else(|| { func.import_signature(Signature { params: vec![ AbiParam::special(self.pointer_type(), ArgumentPurpose::VMContext), AbiParam::new(I32), AbiParam::new(I32), AbiParam::new(R64), ], returns: vec![], call_conv: self.target_config.default_call_conv, }) }); self.table_set_sig = Some(sig); sig } fn get_table_set_func( &mut self, func: &mut Function, table_index: TableIndex, ) -> (ir::SigRef, usize, VMBuiltinFunctionIndex) { if self.module.is_imported_table(table_index) { ( self.get_table_set_sig(func), table_index.index(), VMBuiltinFunctionIndex::get_imported_table_set_index(), ) } else { ( self.get_table_set_sig(func), self.module.local_table_index(table_index).unwrap().index(), VMBuiltinFunctionIndex::get_table_set_index(), ) } } fn get_table_grow_sig(&mut self, func: &mut Function) -> ir::SigRef { let sig = self.table_grow_sig.unwrap_or_else(|| { func.import_signature(Signature { params: vec![ AbiParam::special(self.pointer_type(), ArgumentPurpose::VMContext), // TODO: figure out what the representation of a Wasm value is AbiParam::new(R64), AbiParam::new(I32), AbiParam::new(I32), ], returns: vec![AbiParam::new(I32)], call_conv: self.target_config.default_call_conv, }) }); self.table_grow_sig = Some(sig); sig } /// Return the table.grow function signature to call for the given index, along with the /// translated index value to pass to it and its index in `VMBuiltinFunctionsArray`. fn get_table_grow_func( &mut self, func: &mut Function, index: TableIndex, ) -> (ir::SigRef, usize, VMBuiltinFunctionIndex) { if self.module.is_imported_table(index) { ( self.get_table_grow_sig(func), index.index(), VMBuiltinFunctionIndex::get_imported_table_grow_index(), ) } else { ( self.get_table_grow_sig(func), self.module.local_table_index(index).unwrap().index(), VMBuiltinFunctionIndex::get_table_grow_index(), ) } } fn get_memory_grow_sig(&mut self, func: &mut Function) -> ir::SigRef { let sig = self.memory_grow_sig.unwrap_or_else(|| { func.import_signature(Signature { params: vec![ AbiParam::special(self.pointer_type(), ArgumentPurpose::VMContext), AbiParam::new(I32), AbiParam::new(I32), ], returns: vec![AbiParam::new(I32)], call_conv: self.target_config.default_call_conv, }) }); self.memory_grow_sig = Some(sig); sig } /// Return the memory.grow function signature to call for the given index, along with the /// translated index value to pass to it and its index in `VMBuiltinFunctionsArray`. fn get_memory_grow_func( &mut self, func: &mut Function, index: MemoryIndex, ) -> (ir::SigRef, usize, VMBuiltinFunctionIndex) { if self.module.is_imported_memory(index) { ( self.get_memory_grow_sig(func), index.index(), VMBuiltinFunctionIndex::get_imported_memory32_grow_index(), ) } else { ( self.get_memory_grow_sig(func), self.module.local_memory_index(index).unwrap().index(), VMBuiltinFunctionIndex::get_memory32_grow_index(), ) } } fn get_table_size_sig(&mut self, func: &mut Function) -> ir::SigRef { let sig = self.table_size_sig.unwrap_or_else(|| { func.import_signature(Signature { params: vec![ AbiParam::special(self.pointer_type(), ArgumentPurpose::VMContext), AbiParam::new(I32), ], returns: vec![AbiParam::new(I32)], call_conv: self.target_config.default_call_conv, }) }); self.table_size_sig = Some(sig); sig } /// Return the memory.size function signature to call for the given index, along with the /// translated index value to pass to it and its index in `VMBuiltinFunctionsArray`. fn get_table_size_func( &mut self, func: &mut Function, index: TableIndex, ) -> (ir::SigRef, usize, VMBuiltinFunctionIndex) { if self.module.is_imported_table(index) { ( self.get_table_size_sig(func), index.index(), VMBuiltinFunctionIndex::get_imported_table_size_index(), ) } else { ( self.get_table_size_sig(func), self.module.local_table_index(index).unwrap().index(), VMBuiltinFunctionIndex::get_table_size_index(), ) } } fn get_memory32_size_sig(&mut self, func: &mut Function) -> ir::SigRef { let sig = self.memory32_size_sig.unwrap_or_else(|| { func.import_signature(Signature { params: vec![ AbiParam::special(self.pointer_type(), ArgumentPurpose::VMContext), AbiParam::new(I32), ], returns: vec![AbiParam::new(I32)], call_conv: self.target_config.default_call_conv, }) }); self.memory32_size_sig = Some(sig); sig } /// Return the memory.size function signature to call for the given index, along with the /// translated index value to pass to it and its index in `VMBuiltinFunctionsArray`. fn get_memory_size_func( &mut self, func: &mut Function, index: MemoryIndex, ) -> (ir::SigRef, usize, VMBuiltinFunctionIndex) { if self.module.is_imported_memory(index) { ( self.get_memory32_size_sig(func), index.index(), VMBuiltinFunctionIndex::get_imported_memory32_size_index(), ) } else { ( self.get_memory32_size_sig(func), self.module.local_memory_index(index).unwrap().index(), VMBuiltinFunctionIndex::get_memory32_size_index(), ) } } fn get_table_copy_sig(&mut self, func: &mut Function) -> ir::SigRef { let sig = self.table_copy_sig.unwrap_or_else(|| { func.import_signature(Signature { params: vec![ AbiParam::special(self.pointer_type(), ArgumentPurpose::VMContext), // Destination table index. AbiParam::new(I32), // Source table index. AbiParam::new(I32), // Index within destination table. AbiParam::new(I32), // Index within source table. AbiParam::new(I32), // Number of elements to copy. AbiParam::new(I32), ], returns: vec![], call_conv: self.target_config.default_call_conv, }) }); self.table_copy_sig = Some(sig); sig } fn get_table_copy_func( &mut self, func: &mut Function, dst_table_index: TableIndex, src_table_index: TableIndex, ) -> (ir::SigRef, usize, usize, VMBuiltinFunctionIndex) { let sig = self.get_table_copy_sig(func); ( sig, dst_table_index.as_u32() as usize, src_table_index.as_u32() as usize, VMBuiltinFunctionIndex::get_table_copy_index(), ) } fn get_table_init_sig(&mut self, func: &mut Function) -> ir::SigRef { let sig = self.table_init_sig.unwrap_or_else(|| { func.import_signature(Signature { params: vec![ AbiParam::special(self.pointer_type(), ArgumentPurpose::VMContext), // Table index. AbiParam::new(I32), // Segment index. AbiParam::new(I32), // Destination index within table. AbiParam::new(I32), // Source index within segment. AbiParam::new(I32), // Number of elements to initialize. AbiParam::new(I32), ], returns: vec![], call_conv: self.target_config.default_call_conv, }) }); self.table_init_sig = Some(sig); sig } fn get_table_init_func( &mut self, func: &mut Function, table_index: TableIndex, ) -> (ir::SigRef, usize, VMBuiltinFunctionIndex) { let sig = self.get_table_init_sig(func); let table_index = table_index.as_u32() as usize; ( sig, table_index, VMBuiltinFunctionIndex::get_table_init_index(), ) } fn get_elem_drop_sig(&mut self, func: &mut Function) -> ir::SigRef { let sig = self.elem_drop_sig.unwrap_or_else(|| { func.import_signature(Signature { params: vec![ AbiParam::special(self.pointer_type(), ArgumentPurpose::VMContext), // Element index. AbiParam::new(I32), ], returns: vec![], call_conv: self.target_config.default_call_conv, }) }); self.elem_drop_sig = Some(sig); sig } fn get_elem_drop_func(&mut self, func: &mut Function) -> (ir::SigRef, VMBuiltinFunctionIndex) { let sig = self.get_elem_drop_sig(func); (sig, VMBuiltinFunctionIndex::get_elem_drop_index()) } fn get_memory_copy_sig(&mut self, func: &mut Function) -> ir::SigRef { let sig = self.memory_copy_sig.unwrap_or_else(|| { func.import_signature(Signature { params: vec![ AbiParam::special(self.pointer_type(), ArgumentPurpose::VMContext), // Memory index. AbiParam::new(I32), // Destination address. AbiParam::new(I32), // Source address. AbiParam::new(I32), // Length. AbiParam::new(I32), ], returns: vec![], call_conv: self.target_config.default_call_conv, }) }); self.memory_copy_sig = Some(sig); sig } fn get_memory_copy_func( &mut self, func: &mut Function, memory_index: MemoryIndex, ) -> (ir::SigRef, usize, VMBuiltinFunctionIndex) { let sig = self.get_memory_copy_sig(func); if let Some(local_memory_index) = self.module.local_memory_index(memory_index) { ( sig, local_memory_index.index(), VMBuiltinFunctionIndex::get_memory_copy_index(), ) } else { ( sig, memory_index.index(), VMBuiltinFunctionIndex::get_imported_memory_copy_index(), ) } } fn get_memory_fill_sig(&mut self, func: &mut Function) -> ir::SigRef { let sig = self.memory_fill_sig.unwrap_or_else(|| { func.import_signature(Signature { params: vec![ AbiParam::special(self.pointer_type(), ArgumentPurpose::VMContext), // Memory index. AbiParam::new(I32), // Destination address. AbiParam::new(I32), // Value. AbiParam::new(I32), // Length. AbiParam::new(I32), ], returns: vec![], call_conv: self.target_config.default_call_conv, }) }); self.memory_fill_sig = Some(sig); sig } fn get_memory_fill_func( &mut self, func: &mut Function, memory_index: MemoryIndex, ) -> (ir::SigRef, usize, VMBuiltinFunctionIndex) { let sig = self.get_memory_fill_sig(func); if let Some(local_memory_index) = self.module.local_memory_index(memory_index) { ( sig, local_memory_index.index(), VMBuiltinFunctionIndex::get_memory_fill_index(), ) } else { ( sig, memory_index.index(), VMBuiltinFunctionIndex::get_imported_memory_fill_index(), ) } } fn get_memory_init_sig(&mut self, func: &mut Function) -> ir::SigRef { let sig = self.memory_init_sig.unwrap_or_else(|| { func.import_signature(Signature { params: vec![ AbiParam::special(self.pointer_type(), ArgumentPurpose::VMContext), // Memory index. AbiParam::new(I32), // Data index. AbiParam::new(I32), // Destination address. AbiParam::new(I32), // Source index within the data segment. AbiParam::new(I32), // Length. AbiParam::new(I32), ], returns: vec![], call_conv: self.target_config.default_call_conv, }) }); self.memory_init_sig = Some(sig); sig } fn get_memory_init_func( &mut self, func: &mut Function, ) -> (ir::SigRef, VMBuiltinFunctionIndex) { let sig = self.get_memory_init_sig(func); (sig, VMBuiltinFunctionIndex::get_memory_init_index()) } fn get_data_drop_sig(&mut self, func: &mut Function) -> ir::SigRef { let sig = self.data_drop_sig.unwrap_or_else(|| { func.import_signature(Signature { params: vec![ AbiParam::special(self.pointer_type(), ArgumentPurpose::VMContext), // Data index. AbiParam::new(I32), ], returns: vec![], call_conv: self.target_config.default_call_conv, }) }); self.data_drop_sig = Some(sig); sig } fn get_data_drop_func(&mut self, func: &mut Function) -> (ir::SigRef, VMBuiltinFunctionIndex) { let sig = self.get_data_drop_sig(func); (sig, VMBuiltinFunctionIndex::get_data_drop_index()) } /// Translates load of builtin function and returns a pair of values `vmctx` /// and address of the loaded function. fn translate_load_builtin_function_address( &mut self, pos: &mut FuncCursor<'_>, callee_func_idx: VMBuiltinFunctionIndex, ) -> (ir::Value, ir::Value) { // We use an indirect call so that we don't have to patch the code at runtime. let pointer_type = self.pointer_type(); let vmctx = self.vmctx(&mut pos.func); let base = pos.ins().global_value(pointer_type, vmctx); let mut mem_flags = ir::MemFlags::trusted(); mem_flags.set_readonly(); // Load the callee address. let body_offset = i32::try_from(self.offsets.vmctx_builtin_function(callee_func_idx)).unwrap(); let func_addr = pos.ins().load(pointer_type, mem_flags, base, body_offset); (base, func_addr) } } impl<'module_environment> TargetEnvironment for FuncEnvironment<'module_environment> { fn target_config(&self) -> TargetFrontendConfig { self.target_config } } impl<'module_environment> BaseFuncEnvironment for FuncEnvironment<'module_environment> { fn is_wasm_parameter(&self, _signature: &ir::Signature, index: usize) -> bool { // The first parameter is the vmctx. The rest are the wasm parameters. index >= 1 } fn make_table(&mut self, func: &mut ir::Function, index: TableIndex) -> WasmResult<ir::Table> { let pointer_type = self.pointer_type(); let (ptr, base_offset, current_elements_offset) = { let vmctx = self.vmctx(func); if let Some(def_index) = self.module.local_table_index(index) { let base_offset = i32::try_from(self.offsets.vmctx_vmtable_definition_base(def_index)).unwrap(); let current_elements_offset = i32::try_from( self.offsets .vmctx_vmtable_definition_current_elements(def_index), ) .unwrap(); (vmctx, base_offset, current_elements_offset) } else { let from_offset = self.offsets.vmctx_vmtable_import_definition(index); let table = func.create_global_value(ir::GlobalValueData::Load { base: vmctx, offset: Offset32::new(i32::try_from(from_offset).unwrap()), global_type: pointer_type, readonly: true, }); let base_offset = i32::from(self.offsets.vmtable_definition_base()); let current_elements_offset = i32::from(self.offsets.vmtable_definition_current_elements()); (table, base_offset, current_elements_offset) } }; let base_gv = func.create_global_value(ir::GlobalValueData::Load { base: ptr, offset: Offset32::new(base_offset), global_type: pointer_type, readonly: false, }); let bound_gv = func.create_global_value(ir::GlobalValueData::Load { base: ptr, offset: Offset32::new(current_elements_offset), global_type: type_of_vmtable_definition_current_elements(&self.offsets), readonly: false, }); let element_size = match self.table_styles[index] { TableStyle::CallerChecksSignature => u64::from(self.offsets.size_of_vm_funcref()), }; Ok(func.create_table(ir::TableData { base_gv, min_size: Uimm64::new(0), bound_gv, element_size: Uimm64::new(element_size), index_type: I32, })) } fn translate_table_grow( &mut self, mut pos: cranelift_codegen::cursor::FuncCursor<'_>, table_index: TableIndex, _table: ir::Table, delta: ir::Value, init_value: ir::Value, ) -> WasmResult<ir::Value> { let (func_sig, index_arg, func_idx) = self.get_table_grow_func(&mut pos.func, table_index); let table_index = pos.ins().iconst(I32, index_arg as i64); let (vmctx, func_addr) = self.translate_load_builtin_function_address(&mut pos, func_idx); let call_inst = pos.ins().call_indirect( func_sig, func_addr, &[vmctx, init_value, delta, table_index], ); Ok(*pos.func.dfg.inst_results(call_inst).first().unwrap()) } fn translate_table_get( &mut self, builder: &mut FunctionBuilder, table_index: TableIndex, _table: ir::Table, index: ir::Value, ) -> WasmResult<ir::Value> { let mut pos = builder.cursor(); let (func_sig, table_index_arg, func_idx) = self.get_table_get_func(&mut pos.func, table_index); let table_index = pos.ins().iconst(I32, table_index_arg as i64); let (vmctx, func_addr) = self.translate_load_builtin_function_address(&mut pos, func_idx); let call_inst = pos .ins() .call_indirect(func_sig, func_addr, &[vmctx, table_index, index]); Ok(*pos.func.dfg.inst_results(call_inst).first().unwrap()) } fn translate_table_set( &mut self, builder: &mut FunctionBuilder, table_index: TableIndex, _table: ir::Table, value: ir::Value, index: ir::Value, ) -> WasmResult<()> { let mut pos = builder.cursor(); let (func_sig, table_index_arg, func_idx) = self.get_table_set_func(&mut pos.func, table_index); let table_index = pos.ins().iconst(I32, table_index_arg as i64); let (vmctx, func_addr) = self.translate_load_builtin_function_address(&mut pos, func_idx); pos.ins() .call_indirect(func_sig, func_addr, &[vmctx, table_index, index, value]); Ok(()) } fn translate_table_fill( &mut self, mut pos: cranelift_codegen::cursor::FuncCursor<'_>, table_index: TableIndex, dst: ir::Value, val: ir::Value, len: ir::Value, ) -> WasmResult<()> { let (func_sig, table_index_arg, func_idx) = self.get_table_fill_func(&mut pos.func, table_index); let (vmctx, func_addr) = self.translate_load_builtin_function_address(&mut pos, func_idx); let table_index_arg = pos.ins().iconst(I32, table_index_arg as i64); pos.ins().call_indirect( func_sig, func_addr, &[vmctx, table_index_arg, dst, val, len], ); Ok(()) } fn translate_externref_inc( &mut self, mut pos: cranelift_codegen::cursor::FuncCursor<'_>, externref: ir::Value, ) -> WasmResult<()> { let (func_sig, func_idx) = self.get_externref_inc_func(&mut pos.func); let (_vmctx, func_addr) = self.translate_load_builtin_function_address(&mut pos, func_idx); pos.ins().call_indirect(func_sig, func_addr, &[externref]); Ok(()) } fn translate_externref_dec( &mut self, mut pos: cranelift_codegen::cursor::FuncCursor<'_>, externref: ir::Value, ) -> WasmResult<()> { let (func_sig, func_idx) = self.get_externref_dec_func(&mut pos.func); let (_vmctx, func_addr) = self.translate_load_builtin_function_address(&mut pos, func_idx); pos.ins().call_indirect(func_sig, func_addr, &[externref]); Ok(()) } fn translate_ref_null( &mut self, mut pos: cranelift_codegen::cursor::FuncCursor, ty: Type, ) -> WasmResult<ir::Value> { Ok(match ty { Type::FuncRef => pos.ins().null(self.reference_type()), Type::ExternRef => pos.ins().null(self.reference_type()), _ => { return Err(WasmError::Unsupported( "`ref.null T` that is not a `funcref` or an `externref`".into(), )); } }) } fn translate_ref_is_null( &mut self, mut pos: cranelift_codegen::cursor::FuncCursor, value: ir::Value, ) -> WasmResult<ir::Value> { let bool_is_null = match pos.func.dfg.value_type(value) { // `externref` ty if ty.is_ref() => pos.ins().is_null(value), // `funcref` ty if ty == self.pointer_type() => { pos.ins() .icmp_imm(cranelift_codegen::ir::condcodes::IntCC::Equal, value, 0) } _ => unreachable!(), }; Ok(pos.ins().bint(ir::types::I32, bool_is_null)) } fn translate_ref_func( &mut self, mut pos: cranelift_codegen::cursor::FuncCursor<'_>, func_index: FunctionIndex, ) -> WasmResult<ir::Value> { // TODO: optimize this by storing a pointer to local func_index funcref metadata // so that local funcref is just (*global + offset) instead of a function call // // Actually we can do the above for both local and imported functions because // all of those are known statically. // // prototyping with a function call though let (func_sig, func_index_arg, func_idx) = self.get_func_ref_func(&mut pos.func, func_index); let (vmctx, func_addr) = self.translate_load_builtin_function_address(&mut pos, func_idx); let func_index_arg = pos.ins().iconst(I32, func_index_arg as i64); let call_inst = pos .ins() .call_indirect(func_sig, func_addr, &[vmctx, func_index_arg]); Ok(*pos.func.dfg.inst_results(call_inst).first().unwrap()) } fn translate_custom_global_get( &mut self, mut _pos: cranelift_codegen::cursor::FuncCursor<'_>, _index: GlobalIndex, ) -> WasmResult<ir::Value> { unreachable!("we don't make any custom globals") } fn translate_custom_global_set( &mut self, mut _pos: cranelift_codegen::cursor::FuncCursor<'_>, _index: GlobalIndex, _value: ir::Value, ) -> WasmResult<()> { unreachable!("we don't make any custom globals") } fn make_heap(&mut self, func: &mut ir::Function, index: MemoryIndex) -> WasmResult<ir::Heap> { let pointer_type = self.pointer_type(); let (ptr, base_offset, current_length_offset) = { let vmctx = self.vmctx(func); if let Some(def_index) = self.module.local_memory_index(index) { let base_offset = i32::try_from(self.offsets.vmctx_vmmemory_definition_base(def_index)).unwrap(); let current_length_offset = i32::try_from( self.offsets .vmctx_vmmemory_definition_current_length(def_index), ) .unwrap(); (vmctx, base_offset, current_length_offset) } else { let from_offset = self.offsets.vmctx_vmmemory_import_definition(index); let memory = func.create_global_value(ir::GlobalValueData::Load { base: vmctx, offset: Offset32::new(i32::try_from(from_offset).unwrap()), global_type: pointer_type, readonly: true, }); let base_offset = i32::from(self.offsets.vmmemory_definition_base()); let current_length_offset = i32::from(self.offsets.vmmemory_definition_current_length()); (memory, base_offset, current_length_offset) } }; // If we have a declared maximum, we can make this a "static" heap, which is // allocated up front and never moved. let (offset_guard_size, heap_style, readonly_base) = match self.memory_styles[index] { MemoryStyle::Dynamic { offset_guard_size } => { let heap_bound = func.create_global_value(ir::GlobalValueData::Load { base: ptr, offset: Offset32::new(current_length_offset), global_type: pointer_type, readonly: false, }); ( Uimm64::new(offset_guard_size), ir::HeapStyle::Dynamic { bound_gv: heap_bound, }, false, ) } MemoryStyle::Static { bound, offset_guard_size, } => ( Uimm64::new(offset_guard_size), ir::HeapStyle::Static { bound: Uimm64::new(bound.bytes().0 as u64), }, true, ), }; let heap_base = func.create_global_value(ir::GlobalValueData::Load { base: ptr, offset: Offset32::new(base_offset), global_type: pointer_type, readonly: readonly_base, }); Ok(func.create_heap(ir::HeapData { base: heap_base, min_size: 0.into(), offset_guard_size, style: heap_style, index_type: I32, })) } fn make_global( &mut self, func: &mut ir::Function, index: GlobalIndex, ) -> WasmResult<GlobalVariable> { let pointer_type = self.pointer_type(); let (ptr, offset) = { let vmctx = self.vmctx(func); let from_offset = if let Some(def_index) = self.module.local_global_index(index) { self.offsets.vmctx_vmglobal_definition(def_index) } else { self.offsets.vmctx_vmglobal_import_definition(index) }; let global = func.create_global_value(ir::GlobalValueData::Load { base: vmctx, offset: Offset32::new(i32::try_from(from_offset).unwrap()), global_type: pointer_type, readonly: true, }); (global, 0) }; Ok(GlobalVariable::Memory { gv: ptr, offset: offset.into(), ty: type_to_irtype(self.module.globals[index].ty, self.target_config())?, }) } fn make_indirect_sig( &mut self, func: &mut ir::Function, index: SignatureIndex, ) -> WasmResult<ir::SigRef> { Ok(func.import_signature(self.signatures[index].clone())) } fn make_direct_func( &mut self, func: &mut ir::Function, index: FunctionIndex, ) -> WasmResult<ir::FuncRef> { let sigidx = self.module.functions[index]; let signature = func.import_signature(self.signatures[sigidx].clone()); let name = get_function_name(index); Ok(func.import_function(ir::ExtFuncData { name, signature, colocated: true, })) } fn translate_call_indirect( &mut self, mut pos: FuncCursor<'_>, table_index: TableIndex, table: ir::Table, sig_index: SignatureIndex, sig_ref: ir::SigRef, callee: ir::Value, call_args: &[ir::Value], ) -> WasmResult<ir::Inst> { let pointer_type = self.pointer_type(); let table_entry_addr = pos.ins().table_addr(pointer_type, table, callee, 0); // Dereference table_entry_addr to get the function address. let mem_flags = ir::MemFlags::trusted(); let table_entry_addr = pos.ins().load( pointer_type, mem_flags, table_entry_addr, i32::from(self.offsets.vm_funcref_anyfunc_ptr()), ); // check if the funcref is null pos.ins() .trapz(table_entry_addr, ir::TrapCode::IndirectCallToNull); let func_addr = pos.ins().load( pointer_type, mem_flags, table_entry_addr, i32::from(self.offsets.vmcaller_checked_anyfunc_func_ptr()), ); // If necessary, check the signature. match self.table_styles[table_index] { TableStyle::CallerChecksSignature => { let sig_id_size = self.offsets.size_of_vmshared_signature_index(); let sig_id_type = ir::Type::int(u16::from(sig_id_size) * 8).unwrap(); let vmctx = self.vmctx(pos.func); let base = pos.ins().global_value(pointer_type, vmctx); let offset = i32::try_from(self.offsets.vmctx_vmshared_signature_id(sig_index)).unwrap(); // Load the caller ID. let mut mem_flags = ir::MemFlags::trusted(); mem_flags.set_readonly(); let caller_sig_id = pos.ins().load(sig_id_type, mem_flags, base, offset); // Load the callee ID. let mem_flags = ir::MemFlags::trusted(); let callee_sig_id = pos.ins().load( sig_id_type, mem_flags, table_entry_addr, i32::from(self.offsets.vmcaller_checked_anyfunc_type_index()), ); // Check that they match. let cmp = pos.ins().icmp(IntCC::Equal, callee_sig_id, caller_sig_id); pos.ins().trapz(cmp, ir::TrapCode::BadSignature); } } let mut real_call_args = Vec::with_capacity(call_args.len() + 2); // First append the callee vmctx address. let vmctx = pos.ins().load( pointer_type, mem_flags, table_entry_addr, i32::from(self.offsets.vmcaller_checked_anyfunc_vmctx()), ); real_call_args.push(vmctx); // Then append the regular call arguments. real_call_args.extend_from_slice(call_args); Ok(pos.ins().call_indirect(sig_ref, func_addr, &real_call_args)) } fn translate_call( &mut self, mut pos: FuncCursor<'_>, callee_index: FunctionIndex, callee: ir::FuncRef, call_args: &[ir::Value], ) -> WasmResult<ir::Inst> { let mut real_call_args = Vec::with_capacity(call_args.len() + 2); // Handle direct calls to locally-defined functions. if !self.module.is_imported_function(callee_index) { // Let's get the caller vmctx let caller_vmctx = pos.func.special_param(ArgumentPurpose::VMContext).unwrap(); // First append the callee vmctx address, which is the same as the caller vmctx in // this case. real_call_args.push(caller_vmctx); // Then append the regular call arguments. real_call_args.extend_from_slice(call_args); return Ok(pos.ins().call(callee, &real_call_args)); } // Handle direct calls to imported functions. We use an indirect call // so that we don't have to patch the code at runtime. let pointer_type = self.pointer_type(); let sig_ref = pos.func.dfg.ext_funcs[callee].signature; let vmctx = self.vmctx(&mut pos.func); let base = pos.ins().global_value(pointer_type, vmctx); let mem_flags = ir::MemFlags::trusted(); // Load the callee address. let body_offset = i32::try_from(self.offsets.vmctx_vmfunction_import_body(callee_index)).unwrap(); let func_addr = pos.ins().load(pointer_type, mem_flags, base, body_offset); // First append the callee vmctx address. let vmctx_offset = i32::try_from(self.offsets.vmctx_vmfunction_import_vmctx(callee_index)).unwrap(); let vmctx = pos.ins().load(pointer_type, mem_flags, base, vmctx_offset); real_call_args.push(vmctx); // Then append the regular call arguments. real_call_args.extend_from_slice(call_args); Ok(pos.ins().call_indirect(sig_ref, func_addr, &real_call_args)) } fn translate_memory_grow( &mut self, mut pos: FuncCursor<'_>, index: MemoryIndex, _heap: ir::Heap, val: ir::Value, ) -> WasmResult<ir::Value> { let (func_sig, index_arg, func_idx) = self.get_memory_grow_func(&mut pos.func, index); let memory_index = pos.ins().iconst(I32, index_arg as i64); let (vmctx, func_addr) = self.translate_load_builtin_function_address(&mut pos, func_idx); let call_inst = pos .ins() .call_indirect(func_sig, func_addr, &[vmctx, val, memory_index]); Ok(*pos.func.dfg.inst_results(call_inst).first().unwrap()) } fn translate_memory_size( &mut self, mut pos: FuncCursor<'_>, index: MemoryIndex, _heap: ir::Heap, ) -> WasmResult<ir::Value> { let (func_sig, index_arg, func_idx) = self.get_memory_size_func(&mut pos.func, index); let memory_index = pos.ins().iconst(I32, index_arg as i64); let (vmctx, func_addr) = self.translate_load_builtin_function_address(&mut pos, func_idx); let call_inst = pos .ins() .call_indirect(func_sig, func_addr, &[vmctx, memory_index]); Ok(*pos.func.dfg.inst_results(call_inst).first().unwrap()) } fn translate_memory_copy( &mut self, mut pos: FuncCursor, src_index: MemoryIndex, _src_heap: ir::Heap, _dst_index: MemoryIndex, _dst_heap: ir::Heap, dst: ir::Value, src: ir::Value, len: ir::Value, ) -> WasmResult<()> { let (func_sig, src_index, func_idx) = self.get_memory_copy_func(&mut pos.func, src_index); let src_index_arg = pos.ins().iconst(I32, src_index as i64); let (vmctx, func_addr) = self.translate_load_builtin_function_address(&mut pos, func_idx); pos.ins() .call_indirect(func_sig, func_addr, &[vmctx, src_index_arg, dst, src, len]); Ok(()) } fn translate_memory_fill( &mut self, mut pos: FuncCursor, memory_index: MemoryIndex, _heap: ir::Heap, dst: ir::Value, val: ir::Value, len: ir::Value, ) -> WasmResult<()> { let (func_sig, memory_index, func_idx) = self.get_memory_fill_func(&mut pos.func, memory_index); let memory_index_arg = pos.ins().iconst(I32, memory_index as i64); let (vmctx, func_addr) = self.translate_load_builtin_function_address(&mut pos, func_idx); pos.ins().call_indirect( func_sig, func_addr, &[vmctx, memory_index_arg, dst, val, len], ); Ok(()) } fn translate_memory_init( &mut self, mut pos: FuncCursor, memory_index: MemoryIndex, _heap: ir::Heap, seg_index: u32, dst: ir::Value, src: ir::Value, len: ir::Value, ) -> WasmResult<()> { let (func_sig, func_idx) = self.get_memory_init_func(&mut pos.func); let memory_index_arg = pos.ins().iconst(I32, memory_index.index() as i64); let seg_index_arg = pos.ins().iconst(I32, seg_index as i64); let (vmctx, func_addr) = self.translate_load_builtin_function_address(&mut pos, func_idx); pos.ins().call_indirect( func_sig, func_addr, &[vmctx, memory_index_arg, seg_index_arg, dst, src, len], ); Ok(()) } fn translate_data_drop(&mut self, mut pos: FuncCursor, seg_index: u32) -> WasmResult<()> { let (func_sig, func_idx) = self.get_data_drop_func(&mut pos.func); let seg_index_arg = pos.ins().iconst(I32, seg_index as i64); let (vmctx, func_addr) = self.translate_load_builtin_function_address(&mut pos, func_idx); pos.ins() .call_indirect(func_sig, func_addr, &[vmctx, seg_index_arg]); Ok(()) } fn translate_table_size( &mut self, mut pos: FuncCursor, table_index: TableIndex, _table: ir::Table, ) -> WasmResult<ir::Value> { let (func_sig, index_arg, func_idx) = self.get_table_size_func(&mut pos.func, table_index); let table_index = pos.ins().iconst(I32, index_arg as i64); let (vmctx, func_addr) = self.translate_load_builtin_function_address(&mut pos, func_idx); let call_inst = pos .ins() .call_indirect(func_sig, func_addr, &[vmctx, table_index]); Ok(*pos.func.dfg.inst_results(call_inst).first().unwrap()) } fn translate_table_copy( &mut self, mut pos: FuncCursor, dst_table_index: TableIndex, _dst_table: ir::Table, src_table_index: TableIndex, _src_table: ir::Table, dst: ir::Value, src: ir::Value, len: ir::Value, ) -> WasmResult<()> { let (func_sig, dst_table_index_arg, src_table_index_arg, func_idx) = self.get_table_copy_func(&mut pos.func, dst_table_index, src_table_index); let dst_table_index_arg = pos.ins().iconst(I32, dst_table_index_arg as i64); let src_table_index_arg = pos.ins().iconst(I32, src_table_index_arg as i64); let (vmctx, func_addr) = self.translate_load_builtin_function_address(&mut pos, func_idx); pos.ins().call_indirect( func_sig, func_addr, &[ vmctx, dst_table_index_arg, src_table_index_arg, dst, src, len, ], ); Ok(()) } fn translate_table_init( &mut self, mut pos: FuncCursor, seg_index: u32, table_index: TableIndex, _table: ir::Table, dst: ir::Value, src: ir::Value, len: ir::Value, ) -> WasmResult<()> { let (func_sig, table_index_arg, func_idx) = self.get_table_init_func(&mut pos.func, table_index); let table_index_arg = pos.ins().iconst(I32, table_index_arg as i64); let seg_index_arg = pos.ins().iconst(I32, seg_index as i64); let (vmctx, func_addr) = self.translate_load_builtin_function_address(&mut pos, func_idx); pos.ins().call_indirect( func_sig, func_addr, &[vmctx, table_index_arg, seg_index_arg, dst, src, len], ); Ok(()) } fn translate_elem_drop(&mut self, mut pos: FuncCursor, elem_index: u32) -> WasmResult<()> { let (func_sig, func_idx) = self.get_elem_drop_func(&mut pos.func); let elem_index_arg = pos.ins().iconst(I32, elem_index as i64); let (vmctx, func_addr) = self.translate_load_builtin_function_address(&mut pos, func_idx); pos.ins() .call_indirect(func_sig, func_addr, &[vmctx, elem_index_arg]); Ok(()) } fn translate_atomic_wait( &mut self, _pos: FuncCursor, _index: MemoryIndex, _heap: ir::Heap, _addr: ir::Value, _expected: ir::Value, _timeout: ir::Value, ) -> WasmResult<ir::Value> { Err(WasmError::Unsupported( "wasm atomics (fn translate_atomic_wait)".to_string(), )) } fn translate_atomic_notify( &mut self, _pos: FuncCursor, _index: MemoryIndex, _heap: ir::Heap, _addr: ir::Value, _count: ir::Value, ) -> WasmResult<ir::Value> { Err(WasmError::Unsupported( "wasm atomics (fn translate_atomic_notify)".to_string(), )) } fn get_global_type(&self, global_index: GlobalIndex) -> Option<WasmerType> { Some(self.module.globals.get(global_index)?.ty) } fn push_local_decl_on_stack(&mut self, ty: WasmerType) { self.type_stack.push(ty); } fn push_params_on_stack(&mut self, function_index: LocalFunctionIndex) { let func_index = self.module.func_index(function_index); let sig_idx = self.module.functions[func_index]; let signature = &self.module.signatures[sig_idx]; for param in signature.params() { self.type_stack.push(*param); } } fn get_local_type(&self, local_index: u32) -> Option<WasmerType> { self.type_stack.get(local_index as usize).cloned() } fn get_local_types(&self) -> &[WasmerType] { &self.type_stack } fn get_function_type(&self, function_index: FunctionIndex) -> Option<&FunctionType> { let sig_idx = self.module.functions.get(function_index)?; Some(&self.module.signatures[*sig_idx]) } fn get_function_sig(&self, sig_index: SignatureIndex) -> Option<&FunctionType> { self.module.signatures.get(sig_index) } fn translate_drop_locals(&mut self, builder: &mut FunctionBuilder) -> WasmResult<()> { // TODO: this allocation can be removed without too much effort but it will require // maneuvering around the borrow checker for (local_index, local_type) in self.type_stack.to_vec().iter().enumerate() { if *local_type == WasmerType::ExternRef { let val = builder.use_var(Variable::with_u32(local_index as _)); self.translate_externref_dec(builder.cursor(), val)?; } } Ok(()) } }
36.080854
99
0.574335
e557953b796b09edb59a261851ebf7eafedbf281
27,703
// This file is lightly modified from project_variables.rs from cargo-generate // source: https://github.com/cargo-generate/cargo-generate // version: 0.9.0 // license: MIT/Apache-2.0 // use crate::generate::{config::Config, ParamMap, TomlMap, PROJECT_NAME_REGEX}; use anyhow::{anyhow, Result}; use regex::Regex; use serde_json::Value; use thiserror::Error; use weld_codegen::render::Renderer; #[derive(Debug)] pub(crate) struct TemplateSlots { pub(crate) var_name: String, pub(crate) var_info: VarInfo, pub(crate) prompt: String, } #[derive(Debug, Clone)] pub(crate) enum VarInfo { Bool { default: Option<bool> }, String { entry: StringEntry }, } #[derive(Debug, Clone)] pub(crate) struct StringEntry { pub(crate) default: Option<String>, pub(crate) choices: Option<Vec<String>>, pub(crate) regex: Option<Regex>, } #[derive(Error, Debug, PartialEq)] pub enum ConversionError { #[error("parameter `{parameter}` of placeholder `{var_name}` should be a `{correct_type}`")] WrongTypeParameter { var_name: String, parameter: String, correct_type: String, }, //#[error("placeholder `{var_name}` should be a table")] //InvalidPlaceholderFormat { var_name: String }, #[error("missing prompt question for `{var_name}`")] MissingPrompt { var_name: String }, #[error("placeholder is missing name {map_dump}")] MissingPlaceholderName { map_dump: String }, #[error("choices array empty for `{var_name}`")] EmptyChoices { var_name: String }, #[error("default is `{default}`, but is not a valid value in choices array `{choices:?}` for `{var_name}`")] InvalidDefault { var_name: String, default: String, choices: Vec<String>, }, #[error( "invalid type for variable `{var_name}`: `{value}` possible values are `bool` and `string`" )] InvalidVariableType { var_name: String, value: String }, #[error("bool type does not support `choices` field")] ChoicesOnBool { var_name: String }, #[error("bool type does not support `regex` field")] RegexOnBool { var_name: String }, //#[error("variable `{var_name}` was missing in config file running on silent mode")] //MissingPlaceholderVariable { var_name: String }, #[error("field `{field}` of variable `{var_name}` does not match configured regex")] RegexDoesntMatchField { var_name: String, field: String }, #[error("regex of `{var_name}` is not a valid regex")] InvalidRegex { var_name: String, regex: String }, #[error("placeholder `{var_name}` is not valid as you can't override `project-name`, `crate_name`, `crate_type`, `authors` and `os-arch`")] InvalidPlaceholderName { var_name: String }, #[error("template expansion of value {value} failed: {err}")] TemplateExpansion { value: String, err: String }, } pub(crate) fn validate_project_name(name: &str) -> Result<()> { let exp = regex::Regex::new(PROJECT_NAME_REGEX).unwrap(); match exp.is_match(name) { true => Ok(()), false => Err(anyhow!("Invalid project name. Project name must begin with an ascii letter and contain letters, digits, underscores('_'), or dashes('-').")) } } #[derive(Debug, Clone, PartialEq)] enum SupportedVarValue { Bool(bool), String(String), } #[derive(Debug, Clone, Copy, PartialEq)] enum SupportedVarType { Bool, String, } const RESERVED_NAMES: [&str; 5] = [ "authors", "os-arch", "project-name", "crate_name", "crate_type", ]; /// Determines values of all variables: /// - if map already contains a value, do nothing /// - if silent and prompt has a default, use the prompt's default /// - if !silent, prompt user for value /// Returns list of undefined variables pub(crate) fn fill_project_variables<F>( config: &Config, values: &mut ParamMap, renderer: &weld_codegen::render::Renderer, silent: bool, value_provider: F, ) -> Result<Vec<String>> where F: Fn(&TemplateSlots) -> Result<Value>, { let mut undefined = Vec::new(); for placeholder in config.placeholders.iter() { let mut slot = try_placeholder_into_slot(placeholder, values, renderer)?; let key = slot.var_name.clone(); match values.get(&key) { Some(_) => {} // we already have value for this field None => { // expand string default values in case they are templates match slot.var_info { VarInfo::String { entry } if entry.default.is_some() => { slot.var_info = VarInfo::String { entry: expand_default_value(entry, values, renderer)?, }; } _ => {} } match (silent, &slot.var_info) { (false, _) => { let value = value_provider(&slot)?; values.insert(key, value); } (true, VarInfo::Bool { default: Some(b) }) => { values.insert(key, Value::Bool(*b)); } ( true, VarInfo::String { entry: StringEntry { default: Some(s), .. }, }, ) => { values.insert(key, Value::String(s.clone())); } (true, _) => undefined.push(key), } } } } Ok(undefined) } // if default value is a template, expand it fn expand_default_value( entry: StringEntry, values: &ParamMap, renderer: &Renderer, ) -> Result<StringEntry> { if let Some(default) = &entry.default { let new_def = renderer.render_template(default, values)?; Ok(StringEntry { default: Some(new_def), ..entry }) } else { Ok(entry) } } fn try_placeholder_into_slot( table: &TomlMap, values: &ParamMap, renderer: &Renderer, ) -> Result<TemplateSlots, ConversionError> { let key = match table.get("name") { Some(toml::Value::String(key)) => key, _ => { return Err(ConversionError::MissingPlaceholderName { map_dump: format!("{:?}", table), }); } }; if RESERVED_NAMES.contains(&key.as_str()) { return Err(ConversionError::InvalidPlaceholderName { var_name: key.to_string(), }); } let var_type = extract_type(key, table.get("type"))?; let regex = extract_regex(key, var_type, table.get("regex"))?; let prompt = extract_prompt(key, table.get("prompt"))?; let choices = extract_choices(key, var_type, regex.as_ref(), table.get("choices"))?; let default_choice = extract_default( key, var_type, regex.as_ref(), table.get("default"), choices.as_ref(), values, renderer, )?; let var_info = match (var_type, default_choice) { (SupportedVarType::Bool, Some(SupportedVarValue::Bool(value))) => VarInfo::Bool { default: Some(value), }, (SupportedVarType::String, Some(SupportedVarValue::String(value))) => VarInfo::String { entry: StringEntry { default: Some(value), choices, regex, }, }, (SupportedVarType::Bool, None) => VarInfo::Bool { default: None }, (SupportedVarType::String, None) => VarInfo::String { entry: StringEntry { default: None, choices, regex, }, }, _ => unreachable!("It should not have come to this..."), }; Ok(TemplateSlots { var_name: key.to_string(), var_info, prompt, }) } fn extract_regex( var_name: &str, var_type: SupportedVarType, table_entry: Option<&toml::Value>, ) -> Result<Option<Regex>, ConversionError> { match (var_type, table_entry) { (SupportedVarType::Bool, Some(_)) => Err(ConversionError::RegexOnBool { var_name: var_name.into(), }), (SupportedVarType::String, Some(toml::Value::String(value))) => match Regex::new(value) { Ok(regex) => Ok(Some(regex)), Err(_) => Err(ConversionError::InvalidRegex { var_name: var_name.into(), regex: value.clone(), }), }, (SupportedVarType::String, Some(_)) => Err(ConversionError::WrongTypeParameter { var_name: var_name.into(), parameter: "regex".to_string(), correct_type: "String".to_string(), }), (_, None) => Ok(None), } } fn extract_type( var_name: &str, table_entry: Option<&toml::Value>, ) -> Result<SupportedVarType, ConversionError> { match table_entry { None => Ok(SupportedVarType::String), Some(toml::Value::String(value)) if value == "string" => Ok(SupportedVarType::String), Some(toml::Value::String(value)) if value == "bool" => Ok(SupportedVarType::Bool), Some(toml::Value::String(value)) => Err(ConversionError::InvalidVariableType { var_name: var_name.into(), value: value.clone(), }), Some(_) => Err(ConversionError::WrongTypeParameter { var_name: var_name.into(), parameter: "type".to_string(), correct_type: "String".to_string(), }), } } fn extract_prompt( var_name: &str, table_entry: Option<&toml::Value>, ) -> Result<String, ConversionError> { match table_entry { Some(toml::Value::String(value)) => Ok(value.clone()), Some(_) => Err(ConversionError::WrongTypeParameter { var_name: var_name.into(), parameter: "prompt".into(), correct_type: "String".into(), }), None => Err(ConversionError::MissingPrompt { var_name: var_name.into(), }), } } fn extract_default( var_name: &str, var_type: SupportedVarType, regex: Option<&Regex>, table_entry: Option<&toml::Value>, choices: Option<&Vec<String>>, values: &ParamMap, renderer: &Renderer, ) -> Result<Option<SupportedVarValue>, ConversionError> { match (table_entry, choices, var_type) { // no default set (None, _, _) => Ok(None), // default set without choices (Some(toml::Value::Boolean(value)), _, SupportedVarType::Bool) => { Ok(Some(SupportedVarValue::Bool(*value))) } (Some(toml::Value::String(value)), None, SupportedVarType::String) => { // perform template expansion on default value let value = renderer.render_template(value, values).map_err(|e| { ConversionError::TemplateExpansion { value: value.clone(), err: e.to_string(), } })?; if let Some(reg) = regex { if !reg.is_match(&value) { return Err(ConversionError::RegexDoesntMatchField { var_name: var_name.into(), field: "default".to_string(), }); } } Ok(Some(SupportedVarValue::String(value))) } // default and choices set // No need to check bool because it always has a choices vec with two values (Some(toml::Value::String(value)), Some(choices), SupportedVarType::String) => { if !choices.contains(value) { Err(ConversionError::InvalidDefault { var_name: var_name.into(), default: value.clone(), choices: choices.clone(), }) } else { // perform template expansion on default value let value = renderer.render_template(value, values).map_err(|e| { ConversionError::TemplateExpansion { value: value.to_string(), err: e.to_string(), } })?; if let Some(reg) = regex { if !reg.is_match(&value) { return Err(ConversionError::RegexDoesntMatchField { var_name: var_name.into(), field: "default".to_string(), }); } } Ok(Some(SupportedVarValue::String(value))) } } // Wrong type of variables (Some(_), _, type_name) => Err(ConversionError::WrongTypeParameter { var_name: var_name.into(), parameter: "default".to_string(), correct_type: match type_name { SupportedVarType::Bool => "bool".to_string(), SupportedVarType::String => "string".to_string(), }, }), } } fn extract_choices( var_name: &str, var_type: SupportedVarType, regex: Option<&Regex>, table_entry: Option<&toml::Value>, ) -> Result<Option<Vec<String>>, ConversionError> { match (table_entry, var_type) { (None, SupportedVarType::Bool) => Ok(None), (Some(_), SupportedVarType::Bool) => Err(ConversionError::ChoicesOnBool { var_name: var_name.into(), }), (Some(toml::Value::Array(arr)), SupportedVarType::String) if arr.is_empty() => { Err(ConversionError::EmptyChoices { var_name: var_name.into(), }) } (Some(toml::Value::Array(arr)), SupportedVarType::String) => { // Checks if very entry in the array is a String let converted = arr .iter() .map(|entry| match entry { toml::Value::String(s) => Ok(s.clone()), _ => Err(()), }) .collect::<Vec<_>>(); if converted.iter().any(|v| v.is_err()) { return Err(ConversionError::WrongTypeParameter { var_name: var_name.into(), parameter: "choices".to_string(), correct_type: "String Array".to_string(), }); } let strings = converted .iter() .cloned() .map(|v| v.unwrap()) .collect::<Vec<_>>(); // check if regex matches every choice if let Some(reg) = regex { if strings.iter().any(|v| !reg.is_match(v)) { return Err(ConversionError::RegexDoesntMatchField { var_name: var_name.into(), field: "choices".to_string(), }); } } Ok(Some(strings)) } (Some(_), SupportedVarType::String) => Err(ConversionError::WrongTypeParameter { var_name: var_name.into(), parameter: "choices".to_string(), correct_type: "String Array".to_string(), }), (None, SupportedVarType::String) => Ok(None), } } #[cfg(test)] mod tests { use super::*; const IDENT_REGEX: &str = crate::generate::PROJECT_NAME_REGEX; #[test] fn no_choices_boolean() { let result = extract_choices("foo", SupportedVarType::Bool, None, None); assert_eq!(result, Ok(None)); } #[test] fn boolean_cant_have_choices() { let result = extract_choices( "foo", SupportedVarType::Bool, None, Some(&toml::Value::Array(vec![ toml::Value::Boolean(true), toml::Value::Boolean(false), ])), ); assert_eq!( result, Err(ConversionError::ChoicesOnBool { var_name: "foo".into() }) ); } #[test] fn choices_cant_be_an_empty_array() { let result = extract_choices( "foo", SupportedVarType::String, None, Some(&toml::Value::Array(Vec::new())), ); assert_eq!( result, Err(ConversionError::EmptyChoices { var_name: "foo".into() }) ); } #[test] fn choices_array_cant_have_anything_but_strings() { let result = extract_choices( "foo", SupportedVarType::String, None, Some(&toml::Value::Array(vec![ toml::Value::String("bar".into()), toml::Value::Boolean(false), ])), ); assert_eq!( result, Err(ConversionError::WrongTypeParameter { var_name: "foo".into(), parameter: "choices".into(), correct_type: "String Array".into() }) ); } #[test] fn choices_is_array_string_no_regex_is_fine() { let result = extract_choices( "foo", SupportedVarType::String, None, Some(&toml::Value::Array(vec![ toml::Value::String("bar".into()), toml::Value::String("zoo".into()), ])), ); assert_eq!(result, Ok(Some(vec!["bar".to_string(), "zoo".to_string()]))); } #[test] fn choices_is_array_string_that_doesnt_match_regex_is_error() { let valid_ident = regex::Regex::new(IDENT_REGEX).unwrap(); let result = extract_choices( "foo", SupportedVarType::String, Some(&valid_ident), Some(&toml::Value::Array(vec![ toml::Value::String("0bar".into()), toml::Value::String("zoo".into()), ])), ); assert_eq!( result, Err(ConversionError::RegexDoesntMatchField { var_name: "foo".into(), field: "choices".into() }) ); } #[test] fn choices_is_array_string_that_all_match_regex_is_good() { let valid_ident = regex::Regex::new(IDENT_REGEX).unwrap(); let result = extract_choices( "foo", SupportedVarType::String, Some(&valid_ident), Some(&toml::Value::Array(vec![ toml::Value::String("bar0".into()), toml::Value::String("zoo".into()), ])), ); assert_eq!( result, Ok(Some(vec!["bar0".to_string(), "zoo".to_string()])) ); } #[test] fn choices_is_not_array_string_is_error() { let result = extract_choices( "foo", SupportedVarType::String, None, Some(&toml::Value::String("bar".into())), ); assert_eq!( result, Err(ConversionError::WrongTypeParameter { var_name: "foo".into(), parameter: "choices".into(), correct_type: "String Array".into() }) ); } #[test] fn no_choices_for_type_string() { let result = extract_choices("foo", SupportedVarType::String, None, None); assert_eq!(result, Ok(None)); } #[test] fn empty_default_is_fine() { let result = extract_default( "foo", SupportedVarType::String, None, None, None, &ParamMap::default(), &Renderer::default(), ); assert_eq!(result, Ok(None)); } #[test] fn default_for_boolean_is_fine() { let result = extract_default( "foo", SupportedVarType::Bool, None, Some(&toml::Value::Boolean(true)), None, &ParamMap::default(), &Renderer::default(), ); assert_eq!(result, Ok(Some(SupportedVarValue::Bool(true)))) } #[test] fn default_for_string_with_no_choices_and_no_regex() { let result = extract_default( "foo", SupportedVarType::String, None, Some(&toml::Value::String("bar".to_string())), None, &ParamMap::default(), &Renderer::default(), ); assert_eq!( result, Ok(Some(SupportedVarValue::String("bar".to_string()))) ) } #[test] fn default_for_string_with_no_choices_and_matching_regex() { let valid_ident = regex::Regex::new(IDENT_REGEX).unwrap(); let result = extract_default( "foo", SupportedVarType::String, Some(&valid_ident), Some(&toml::Value::String("bar".to_string())), None, &ParamMap::default(), &Renderer::default(), ); assert_eq!( result, Ok(Some(SupportedVarValue::String("bar".to_string()))) ) } #[test] fn default_for_string_with_no_choices_and_regex_doesnt_match() { let valid_ident = regex::Regex::new(IDENT_REGEX).unwrap(); let result = extract_default( "foo", SupportedVarType::String, Some(&valid_ident), Some(&toml::Value::String("0bar".to_string())), None, &ParamMap::default(), &Renderer::default(), ); assert_eq!( result, Err(ConversionError::RegexDoesntMatchField { var_name: "foo".into(), field: "default".into() }) ) } #[test] fn default_for_string_isnt_on_choices() { let result = extract_default( "foo", SupportedVarType::String, None, Some(&toml::Value::String("bar".to_string())), Some(&vec!["zoo".to_string(), "far".to_string()]), &ParamMap::default(), &Renderer::default(), ); assert_eq!( result, Err(ConversionError::InvalidDefault { var_name: "foo".into(), default: "bar".into(), choices: vec!["zoo".to_string(), "far".to_string()] }) ) } #[test] fn default_for_string_is_on_choices() { let result = extract_default( "foo", SupportedVarType::String, None, Some(&toml::Value::String("bar".to_string())), Some(&vec!["zoo".to_string(), "bar".to_string()]), &ParamMap::default(), &Renderer::default(), ); assert_eq!(result, Ok(Some(SupportedVarValue::String("bar".into())))) } #[test] fn default_for_string_is_on_choices_and_matches_regex() { let valid_ident = regex::Regex::new(IDENT_REGEX).unwrap(); let result = extract_default( "foo", SupportedVarType::String, Some(&valid_ident), Some(&toml::Value::String("bar".to_string())), Some(&vec!["zoo".to_string(), "bar".to_string()]), &ParamMap::default(), &Renderer::default(), ); assert_eq!(result, Ok(Some(SupportedVarValue::String("bar".into())))) } #[test] fn default_for_string_only_accepts_strings() { let result = extract_default( "foo", SupportedVarType::String, None, Some(&toml::Value::Integer(0)), None, &ParamMap::default(), &Renderer::default(), ); assert_eq!( result, Err(ConversionError::WrongTypeParameter { var_name: "foo".into(), parameter: "default".into(), correct_type: "string".into() }) ) } #[test] fn default_for_bool_only_accepts_bool() { let result = extract_default( "foo", SupportedVarType::Bool, None, Some(&toml::Value::Integer(0)), None, &ParamMap::default(), &Renderer::default(), ); assert_eq!( result, Err(ConversionError::WrongTypeParameter { var_name: "foo".into(), parameter: "default".into(), correct_type: "bool".into() }) ) } #[test] fn prompt_cant_be_empty() { let result = extract_prompt("foo", None); assert_eq!( result, Err(ConversionError::MissingPrompt { var_name: "foo".into(), }) ) } #[test] fn prompt_must_be_string() { let result = extract_prompt("foo", Some(&toml::Value::Integer(0))); assert_eq!( result, Err(ConversionError::WrongTypeParameter { var_name: "foo".into(), parameter: "prompt".into(), correct_type: "String".into() }) ) } #[test] fn prompt_as_string_is_ok() { let result = extract_prompt("foo", Some(&toml::Value::String("hello world".into()))); assert_eq!(result, Ok("hello world".into())) } #[test] fn empty_type_is_string() { let result = extract_type("foo", None); assert_eq!(result, Ok(SupportedVarType::String)); } #[test] fn type_must_be_string_type() { let result = extract_type("foo", Some(&toml::Value::Integer(0))); assert_eq!( result, Err(ConversionError::WrongTypeParameter { var_name: "foo".into(), parameter: "type".into(), correct_type: "String".into() }) ); } #[test] fn type_must_either_be_string_or_bool() { let result_bool = extract_type("foo", Some(&toml::Value::String("bool".into()))); let result_string = extract_type("foo", Some(&toml::Value::String("string".into()))); let result_err = extract_type("foo", Some(&toml::Value::String("bar".into()))); assert_eq!(result_bool, Ok(SupportedVarType::Bool)); assert_eq!(result_string, Ok(SupportedVarType::String)); assert_eq!( result_err, Err(ConversionError::InvalidVariableType { var_name: "foo".into(), value: "bar".into() }) ) } #[test] fn bools_cant_have_regex() { let result = extract_regex( "foo", SupportedVarType::Bool, Some(&toml::Value::String("".into())), ); assert!(result.is_err()) } #[test] fn no_regex_is_ok() { let result_bool = extract_regex("foo", SupportedVarType::Bool, None); let result_string = extract_regex("foo", SupportedVarType::String, None); assert!(result_bool.is_ok()); assert!(result_string.is_ok()) } #[test] fn strings_can_have_regex() { let result = extract_regex( "foo", SupportedVarType::String, Some(&toml::Value::String(IDENT_REGEX.into())), ); assert!(result.is_ok()) } #[test] fn invalid_regex_is_err() { let result = extract_regex( "foo", SupportedVarType::String, Some(&toml::Value::String("*".into())), ); assert!(result.is_err()) } }
30.987696
162
0.520954
79f8844295a6c826b0df1e397c3bdd1da9a57228
18,385
// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT. /// All possible error types for this service. #[non_exhaustive] #[derive(std::fmt::Debug)] pub enum Error { /// <p>This engine is not compatible with the voice that you have designated. /// Choose a new voice that is compatible with the engine or change the engine /// and restart the operation.</p> EngineNotSupportedException(crate::error::EngineNotSupportedException), /// <p>Amazon Polly can't find the specified lexicon. Verify that the lexicon's /// name is spelled correctly, and then try again.</p> InvalidLexiconException(crate::error::InvalidLexiconException), /// <p>The NextToken is invalid. Verify that it's spelled correctly, and /// then try again.</p> InvalidNextTokenException(crate::error::InvalidNextTokenException), /// <p>The provided Amazon S3 bucket name is invalid. Please check your input /// with S3 bucket naming requirements and try again.</p> InvalidS3BucketException(crate::error::InvalidS3BucketException), /// <p>The provided Amazon S3 key prefix is invalid. Please provide a valid /// S3 object key name.</p> InvalidS3KeyException(crate::error::InvalidS3KeyException), /// <p>The specified sample rate is not valid.</p> InvalidSampleRateException(crate::error::InvalidSampleRateException), /// <p>The provided SNS topic ARN is invalid. Please provide a valid SNS /// topic ARN and try again.</p> InvalidSnsTopicArnException(crate::error::InvalidSnsTopicArnException), /// <p>The SSML you provided is invalid. Verify the SSML syntax, spelling /// of tags and values, and then try again.</p> InvalidSsmlException(crate::error::InvalidSsmlException), /// <p>The provided Task ID is not valid. Please provide a valid Task ID and /// try again.</p> InvalidTaskIdException(crate::error::InvalidTaskIdException), /// <p>The language specified is not currently supported by Amazon Polly in this /// capacity.</p> LanguageNotSupportedException(crate::error::LanguageNotSupportedException), /// <p>Amazon Polly can't find the specified lexicon. This could be caused by a /// lexicon that is missing, its name is misspelled or specifying a lexicon /// that is in a different region.</p> /// <p>Verify that the lexicon exists, is in the region (see <a>ListLexicons</a>) and that you spelled its name is spelled /// correctly. Then try again.</p> LexiconNotFoundException(crate::error::LexiconNotFoundException), /// <p>The maximum size of the specified lexicon would be exceeded by this /// operation.</p> LexiconSizeExceededException(crate::error::LexiconSizeExceededException), /// <p>Speech marks are not supported for the <code>OutputFormat</code> /// selected. Speech marks are only available for content in <code>json</code> /// format.</p> MarksNotSupportedForFormatException(crate::error::MarksNotSupportedForFormatException), /// <p>The maximum size of the lexeme would be exceeded by this /// operation.</p> MaxLexemeLengthExceededException(crate::error::MaxLexemeLengthExceededException), /// <p>The maximum number of lexicons would be exceeded by this /// operation.</p> MaxLexiconsNumberExceededException(crate::error::MaxLexiconsNumberExceededException), /// <p>An unknown condition has caused a service failure.</p> ServiceFailureException(crate::error::ServiceFailureException), /// <p>SSML speech marks are not supported for plain text-type /// input.</p> SsmlMarksNotSupportedForTextTypeException( crate::error::SsmlMarksNotSupportedForTextTypeException, ), /// <p>The Speech Synthesis task with requested Task ID cannot be /// found.</p> SynthesisTaskNotFoundException(crate::error::SynthesisTaskNotFoundException), /// <p>The value of the "Text" parameter is longer than the accepted /// limits. For the <code>SynthesizeSpeech</code> API, the limit for input /// text is a maximum of 6000 characters total, of which no more than 3000 can /// be billed characters. For the <code>StartSpeechSynthesisTask</code> API, /// the maximum is 200,000 characters, of which no more than 100,000 can be /// billed characters. SSML tags are not counted as billed /// characters.</p> TextLengthExceededException(crate::error::TextLengthExceededException), /// <p>The alphabet specified by the lexicon is not a supported alphabet. /// Valid values are <code>x-sampa</code> and <code>ipa</code>.</p> UnsupportedPlsAlphabetException(crate::error::UnsupportedPlsAlphabetException), /// <p>The language specified in the lexicon is unsupported. For a list of /// supported languages, see <a href="https://docs.aws.amazon.com/polly/latest/dg/API_LexiconAttributes.html">Lexicon Attributes</a>.</p> UnsupportedPlsLanguageException(crate::error::UnsupportedPlsLanguageException), /// An unhandled error occurred. Unhandled(Box<dyn std::error::Error + Send + Sync + 'static>), } impl std::fmt::Display for Error { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { Error::EngineNotSupportedException(inner) => inner.fmt(f), Error::InvalidLexiconException(inner) => inner.fmt(f), Error::InvalidNextTokenException(inner) => inner.fmt(f), Error::InvalidS3BucketException(inner) => inner.fmt(f), Error::InvalidS3KeyException(inner) => inner.fmt(f), Error::InvalidSampleRateException(inner) => inner.fmt(f), Error::InvalidSnsTopicArnException(inner) => inner.fmt(f), Error::InvalidSsmlException(inner) => inner.fmt(f), Error::InvalidTaskIdException(inner) => inner.fmt(f), Error::LanguageNotSupportedException(inner) => inner.fmt(f), Error::LexiconNotFoundException(inner) => inner.fmt(f), Error::LexiconSizeExceededException(inner) => inner.fmt(f), Error::MarksNotSupportedForFormatException(inner) => inner.fmt(f), Error::MaxLexemeLengthExceededException(inner) => inner.fmt(f), Error::MaxLexiconsNumberExceededException(inner) => inner.fmt(f), Error::ServiceFailureException(inner) => inner.fmt(f), Error::SsmlMarksNotSupportedForTextTypeException(inner) => inner.fmt(f), Error::SynthesisTaskNotFoundException(inner) => inner.fmt(f), Error::TextLengthExceededException(inner) => inner.fmt(f), Error::UnsupportedPlsAlphabetException(inner) => inner.fmt(f), Error::UnsupportedPlsLanguageException(inner) => inner.fmt(f), Error::Unhandled(inner) => inner.fmt(f), } } } impl<R> From<aws_smithy_http::result::SdkError<crate::error::DeleteLexiconError, R>> for Error where R: Send + Sync + std::fmt::Debug + 'static, { fn from(err: aws_smithy_http::result::SdkError<crate::error::DeleteLexiconError, R>) -> Self { match err { aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind { crate::error::DeleteLexiconErrorKind::LexiconNotFoundException(inner) => { Error::LexiconNotFoundException(inner) } crate::error::DeleteLexiconErrorKind::ServiceFailureException(inner) => { Error::ServiceFailureException(inner) } crate::error::DeleteLexiconErrorKind::Unhandled(inner) => Error::Unhandled(inner), }, _ => Error::Unhandled(err.into()), } } } impl<R> From<aws_smithy_http::result::SdkError<crate::error::DescribeVoicesError, R>> for Error where R: Send + Sync + std::fmt::Debug + 'static, { fn from(err: aws_smithy_http::result::SdkError<crate::error::DescribeVoicesError, R>) -> Self { match err { aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind { crate::error::DescribeVoicesErrorKind::InvalidNextTokenException(inner) => { Error::InvalidNextTokenException(inner) } crate::error::DescribeVoicesErrorKind::ServiceFailureException(inner) => { Error::ServiceFailureException(inner) } crate::error::DescribeVoicesErrorKind::Unhandled(inner) => Error::Unhandled(inner), }, _ => Error::Unhandled(err.into()), } } } impl<R> From<aws_smithy_http::result::SdkError<crate::error::GetLexiconError, R>> for Error where R: Send + Sync + std::fmt::Debug + 'static, { fn from(err: aws_smithy_http::result::SdkError<crate::error::GetLexiconError, R>) -> Self { match err { aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind { crate::error::GetLexiconErrorKind::LexiconNotFoundException(inner) => { Error::LexiconNotFoundException(inner) } crate::error::GetLexiconErrorKind::ServiceFailureException(inner) => { Error::ServiceFailureException(inner) } crate::error::GetLexiconErrorKind::Unhandled(inner) => Error::Unhandled(inner), }, _ => Error::Unhandled(err.into()), } } } impl<R> From<aws_smithy_http::result::SdkError<crate::error::GetSpeechSynthesisTaskError, R>> for Error where R: Send + Sync + std::fmt::Debug + 'static, { fn from( err: aws_smithy_http::result::SdkError<crate::error::GetSpeechSynthesisTaskError, R>, ) -> Self { match err { aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind { crate::error::GetSpeechSynthesisTaskErrorKind::InvalidTaskIdException(inner) => { Error::InvalidTaskIdException(inner) } crate::error::GetSpeechSynthesisTaskErrorKind::ServiceFailureException(inner) => { Error::ServiceFailureException(inner) } crate::error::GetSpeechSynthesisTaskErrorKind::SynthesisTaskNotFoundException( inner, ) => Error::SynthesisTaskNotFoundException(inner), crate::error::GetSpeechSynthesisTaskErrorKind::Unhandled(inner) => { Error::Unhandled(inner) } }, _ => Error::Unhandled(err.into()), } } } impl<R> From<aws_smithy_http::result::SdkError<crate::error::ListLexiconsError, R>> for Error where R: Send + Sync + std::fmt::Debug + 'static, { fn from(err: aws_smithy_http::result::SdkError<crate::error::ListLexiconsError, R>) -> Self { match err { aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind { crate::error::ListLexiconsErrorKind::InvalidNextTokenException(inner) => { Error::InvalidNextTokenException(inner) } crate::error::ListLexiconsErrorKind::ServiceFailureException(inner) => { Error::ServiceFailureException(inner) } crate::error::ListLexiconsErrorKind::Unhandled(inner) => Error::Unhandled(inner), }, _ => Error::Unhandled(err.into()), } } } impl<R> From<aws_smithy_http::result::SdkError<crate::error::ListSpeechSynthesisTasksError, R>> for Error where R: Send + Sync + std::fmt::Debug + 'static, { fn from( err: aws_smithy_http::result::SdkError<crate::error::ListSpeechSynthesisTasksError, R>, ) -> Self { match err { aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind { crate::error::ListSpeechSynthesisTasksErrorKind::InvalidNextTokenException( inner, ) => Error::InvalidNextTokenException(inner), crate::error::ListSpeechSynthesisTasksErrorKind::ServiceFailureException(inner) => { Error::ServiceFailureException(inner) } crate::error::ListSpeechSynthesisTasksErrorKind::Unhandled(inner) => { Error::Unhandled(inner) } }, _ => Error::Unhandled(err.into()), } } } impl<R> From<aws_smithy_http::result::SdkError<crate::error::PutLexiconError, R>> for Error where R: Send + Sync + std::fmt::Debug + 'static, { fn from(err: aws_smithy_http::result::SdkError<crate::error::PutLexiconError, R>) -> Self { match err { aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind { crate::error::PutLexiconErrorKind::InvalidLexiconException(inner) => { Error::InvalidLexiconException(inner) } crate::error::PutLexiconErrorKind::LexiconSizeExceededException(inner) => { Error::LexiconSizeExceededException(inner) } crate::error::PutLexiconErrorKind::MaxLexemeLengthExceededException(inner) => { Error::MaxLexemeLengthExceededException(inner) } crate::error::PutLexiconErrorKind::MaxLexiconsNumberExceededException(inner) => { Error::MaxLexiconsNumberExceededException(inner) } crate::error::PutLexiconErrorKind::ServiceFailureException(inner) => { Error::ServiceFailureException(inner) } crate::error::PutLexiconErrorKind::UnsupportedPlsAlphabetException(inner) => { Error::UnsupportedPlsAlphabetException(inner) } crate::error::PutLexiconErrorKind::UnsupportedPlsLanguageException(inner) => { Error::UnsupportedPlsLanguageException(inner) } crate::error::PutLexiconErrorKind::Unhandled(inner) => Error::Unhandled(inner), }, _ => Error::Unhandled(err.into()), } } } impl<R> From<aws_smithy_http::result::SdkError<crate::error::StartSpeechSynthesisTaskError, R>> for Error where R: Send + Sync + std::fmt::Debug + 'static, { fn from( err: aws_smithy_http::result::SdkError<crate::error::StartSpeechSynthesisTaskError, R>, ) -> Self { match err { aws_smithy_http::result::SdkError::ServiceError { err, ..} => match err.kind { crate::error::StartSpeechSynthesisTaskErrorKind::EngineNotSupportedException(inner) => Error::EngineNotSupportedException(inner), crate::error::StartSpeechSynthesisTaskErrorKind::InvalidS3BucketException(inner) => Error::InvalidS3BucketException(inner), crate::error::StartSpeechSynthesisTaskErrorKind::InvalidS3KeyException(inner) => Error::InvalidS3KeyException(inner), crate::error::StartSpeechSynthesisTaskErrorKind::InvalidSampleRateException(inner) => Error::InvalidSampleRateException(inner), crate::error::StartSpeechSynthesisTaskErrorKind::InvalidSnsTopicArnException(inner) => Error::InvalidSnsTopicArnException(inner), crate::error::StartSpeechSynthesisTaskErrorKind::InvalidSsmlException(inner) => Error::InvalidSsmlException(inner), crate::error::StartSpeechSynthesisTaskErrorKind::LanguageNotSupportedException(inner) => Error::LanguageNotSupportedException(inner), crate::error::StartSpeechSynthesisTaskErrorKind::LexiconNotFoundException(inner) => Error::LexiconNotFoundException(inner), crate::error::StartSpeechSynthesisTaskErrorKind::MarksNotSupportedForFormatException(inner) => Error::MarksNotSupportedForFormatException(inner), crate::error::StartSpeechSynthesisTaskErrorKind::ServiceFailureException(inner) => Error::ServiceFailureException(inner), crate::error::StartSpeechSynthesisTaskErrorKind::SsmlMarksNotSupportedForTextTypeException(inner) => Error::SsmlMarksNotSupportedForTextTypeException(inner), crate::error::StartSpeechSynthesisTaskErrorKind::TextLengthExceededException(inner) => Error::TextLengthExceededException(inner), crate::error::StartSpeechSynthesisTaskErrorKind::Unhandled(inner) => Error::Unhandled(inner), } _ => Error::Unhandled(err.into()), } } } impl<R> From<aws_smithy_http::result::SdkError<crate::error::SynthesizeSpeechError, R>> for Error where R: Send + Sync + std::fmt::Debug + 'static, { fn from( err: aws_smithy_http::result::SdkError<crate::error::SynthesizeSpeechError, R>, ) -> Self { match err { aws_smithy_http::result::SdkError::ServiceError { err, ..} => match err.kind { crate::error::SynthesizeSpeechErrorKind::EngineNotSupportedException(inner) => Error::EngineNotSupportedException(inner), crate::error::SynthesizeSpeechErrorKind::InvalidSampleRateException(inner) => Error::InvalidSampleRateException(inner), crate::error::SynthesizeSpeechErrorKind::InvalidSsmlException(inner) => Error::InvalidSsmlException(inner), crate::error::SynthesizeSpeechErrorKind::LanguageNotSupportedException(inner) => Error::LanguageNotSupportedException(inner), crate::error::SynthesizeSpeechErrorKind::LexiconNotFoundException(inner) => Error::LexiconNotFoundException(inner), crate::error::SynthesizeSpeechErrorKind::MarksNotSupportedForFormatException(inner) => Error::MarksNotSupportedForFormatException(inner), crate::error::SynthesizeSpeechErrorKind::ServiceFailureException(inner) => Error::ServiceFailureException(inner), crate::error::SynthesizeSpeechErrorKind::SsmlMarksNotSupportedForTextTypeException(inner) => Error::SsmlMarksNotSupportedForTextTypeException(inner), crate::error::SynthesizeSpeechErrorKind::TextLengthExceededException(inner) => Error::TextLengthExceededException(inner), crate::error::SynthesizeSpeechErrorKind::Unhandled(inner) => Error::Unhandled(inner), } _ => Error::Unhandled(err.into()), } } } impl std::error::Error for Error {}
56.743827
173
0.659559
1ca4a7379b8ed03e4fe23398f57e708c7597625f
9,864
#![allow(dead_code)] use libc::*; use super::ffi::*; use std::ptr::NonNull; #[repr(transparent)] pub struct Context(NonNull<udev>); impl Context { pub fn new() -> Option<Self> { unsafe { NonNull::new(udev_new()).map(Self) } } pub fn as_ptr(&self) -> *mut udev { self.0.as_ptr() } pub unsafe fn from_ptr(p: *mut udev) -> Option<Self> { NonNull::new(p).map(Self) } pub fn get_userdata(&self) -> *mut libc::c_void { unsafe { udev_get_userdata(self.0.as_ptr()) } } pub fn set_userdata(&mut self, d: *mut libc::c_void) { unsafe { udev_set_userdata(self.0.as_ptr(), d); } } } impl Drop for Context { fn drop(&mut self) { unsafe { udev_unref(self.0.as_ptr()); } } } impl Clone for Context { fn clone(&self) -> Self { unsafe { Self(NonNull::new_unchecked(udev_ref(self.0.as_ptr()))) } } } #[repr(transparent)] pub struct ListEntry(NonNull<udev_list_entry>); impl ListEntry { pub unsafe fn from_ptr(p: *mut udev_list_entry) -> Option<Self> { NonNull::new(p).map(Self) } pub fn next(&self) -> Option<Self> { unsafe { Self::from_ptr(udev_list_entry_get_next(self.0.as_ptr())) } } pub fn get_by_name(&self, name: &std::ffi::CStr) -> Option<Self> { unsafe { NonNull::new(udev_list_entry_get_by_name(self.0.as_ptr(), name.as_ptr())).map(Self) } } pub fn name(&self) -> Option<&std::ffi::CStr> { unsafe { let p = udev_list_entry_get_name(self.0.as_ptr()); if p.is_null() { None } else { Some(std::ffi::CStr::from_ptr(p)) } } } pub fn value(&self) -> Option<&std::ffi::CStr> { unsafe { let p = udev_list_entry_get_value(self.0.as_ptr()); if p.is_null() { None } else { Some(std::ffi::CStr::from_ptr(p)) } } } } #[repr(transparent)] pub struct ListIterator(Option<ListEntry>); impl From<ListEntry> for ListIterator { fn from(v: ListEntry) -> Self { Self(Some(v)) } } impl From<Option<ListEntry>> for ListIterator { fn from(v: Option<ListEntry>) -> Self { Self(v) } } impl Iterator for ListIterator { type Item = ListEntry; fn next(&mut self) -> Option<ListEntry> { let c = self.0.take(); self.0 = c.as_ref().and_then(|c| c.next()); c } } #[repr(transparent)] pub struct Monitor(NonNull<udev_monitor>); impl Monitor { pub unsafe fn from_ptr(p: *mut udev_monitor) -> Option<Self> { NonNull::new(p).map(Self) } pub fn as_ptr(&self) -> *mut udev_monitor { self.0.as_ptr() } pub fn from_netlink(context: &Context, name: &std::ffi::CStr) -> Option<Self> { unsafe { Self::from_ptr(udev_monitor_new_from_netlink(context.as_ptr(), name.as_ptr())) } } pub fn udev(&self) -> Context { unsafe { Context::from_ptr(udev_monitor_get_udev(self.as_ptr())).expect("no context?") } } pub fn enable_receiving(&self) -> Result<(), c_int> { let r = unsafe { udev_monitor_enable_receiving(self.as_ptr()) }; if r >= 0 { Ok(()) } else { Err(r) } } pub fn set_receive_buffer_size(&self, size: c_int) -> Result<(), c_int> { let r = unsafe { udev_monitor_set_receive_buffer_size(self.as_ptr(), size) }; if r >= 0 { Ok(()) } else { Err(r) } } pub fn fd(&self) -> Result<c_int, c_int> { let r = unsafe { udev_monitor_get_fd(self.as_ptr()) }; if r >= 0 { Ok(r) } else { Err(r) } } pub fn receive_device(&self) -> Option<super::Device> { unsafe { super::Device::from_ptr(udev_monitor_receive_device(self.as_ptr())) } } pub fn filter_add_match_subsystem_devtype( &self, subsystem: &std::ffi::CStr, devtype: Option<&std::ffi::CStr> ) -> Result<(), c_int> { let r = unsafe { udev_monitor_filter_add_match_subsystem_devtype( self.as_ptr(), subsystem.as_ptr(), devtype.map_or_else(std::ptr::null, |s| s.as_ptr()) ) }; if r >= 0 { Ok(()) } else { Err(r) } } pub fn filter_add_match_tag(&self, tag: &std::ffi::CStr) -> Result<(), c_int> { let r = unsafe { udev_monitor_filter_add_match_tag(self.as_ptr(), tag.as_ptr()) }; if r >= 0 { Ok(()) } else { Err(r) } } pub fn filter_update(&self) -> Result<(), c_int> { let r = unsafe { udev_monitor_filter_update(self.as_ptr()) }; if r >= 0 { Ok(()) } else { Err(r) } } pub fn filter_remove(&self) -> Result<(), c_int> { let r = unsafe { udev_monitor_filter_remove(self.as_ptr()) }; if r >= 0 { Ok(()) } else { Err(r) } } } impl Drop for Monitor { fn drop(&mut self) { unsafe { udev_monitor_unref(self.as_ptr()); } } } impl Clone for Monitor { fn clone(&self) -> Self { unsafe { Self::from_ptr(udev_monitor_ref(self.as_ptr())).expect("clone failed") } } } #[repr(transparent)] pub struct Enumerate(NonNull<udev_enumerate>); impl Enumerate { pub unsafe fn from_ptr(p: *mut udev_enumerate) -> Option<Self> { NonNull::new(p).map(Self) } pub fn as_ptr(&self) -> *mut udev_enumerate { self.0.as_ptr() } pub fn new(context: &Context) -> Option<Self> { unsafe { Self::from_ptr(udev_enumerate_new(context.as_ptr())) } } pub fn udev(&self) -> Context { unsafe { Context::from_ptr(udev_enumerate_get_udev(self.as_ptr())).expect("no context?") } } pub fn add_match_subsystem(&self, subsystem: &std::ffi::CStr) -> Result<(), c_int> { let r = unsafe { udev_enumerate_add_match_subsystem(self.as_ptr(), subsystem.as_ptr()) }; if r >= 0 { Ok(()) } else { Err(r) } } pub fn add_nomatch_subsystem(&self, subsystem: &std::ffi::CStr) -> Result<(), c_int> { let r = unsafe { udev_enumerate_add_nomatch_subsystem(self.as_ptr(), subsystem.as_ptr()) }; if r >= 0 { Ok(()) } else { Err(r) } } pub fn add_match_sysattr(&self, sysattr: &std::ffi::CStr, value: &std::ffi::CStr) -> Result<(), c_int> { let r = unsafe { udev_enumerate_add_match_sysattr(self.as_ptr(), sysattr.as_ptr(), value.as_ptr()) }; if r >= 0 { Ok(()) } else { Err(r) } } pub fn add_nomatch_sysattr(&self, sysattr: &std::ffi::CStr, value: &std::ffi::CStr) -> Result<(), c_int> { let r = unsafe { udev_enumerate_add_nomatch_sysattr(self.as_ptr(), sysattr.as_ptr(), value.as_ptr()) }; if r >= 0 { Ok(()) } else { Err(r) } } pub fn add_match_property(&self, property: &std::ffi::CStr, value: &std::ffi::CStr) -> Result<(), c_int> { let r = unsafe { udev_enumerate_add_match_property(self.as_ptr(), property.as_ptr(), value.as_ptr()) }; if r >= 0 { Ok(()) } else { Err(r) } } pub fn add_match_sysname(&self, sysname: &std::ffi::CStr) -> Result<(), c_int> { let r = unsafe { udev_enumerate_add_match_sysname(self.as_ptr(), sysname.as_ptr()) }; if r >= 0 { Ok(()) } else { Err(r) } } pub fn add_match_tag(&self, tag: &std::ffi::CStr) -> Result<(), c_int> { let r = unsafe { udev_enumerate_add_match_tag(self.as_ptr(), tag.as_ptr()) }; if r >= 0 { Ok(()) } else { Err(r) } } pub fn add_match_parent(&self, parent: &super::Device) -> Result<(), c_int> { let r = unsafe { udev_enumerate_add_match_parent(self.as_ptr(), parent.as_ptr()) }; if r >= 0 { Ok(()) } else { Err(r) } } pub fn add_match_is_initialized(&self) -> Result<(), c_int> { let r = unsafe { udev_enumerate_add_match_is_initialized(self.as_ptr()) }; if r >= 0 { Ok(()) } else { Err(r) } } pub fn add_syspath(&self, syspath: &std::ffi::CStr) -> Result<(), c_int> { let r = unsafe { udev_enumerate_add_syspath(self.as_ptr(), syspath.as_ptr()) }; if r >= 0 { Ok(()) } else { Err(r) } } pub fn scan_devices(&self) -> Result<(), c_int> { let r = unsafe { udev_enumerate_scan_devices(self.as_ptr()) }; if r >= 0 { Ok(()) } else { Err(r) } } pub fn scan_subsystems(&self) -> Result<(), c_int> { let r = unsafe { udev_enumerate_scan_subsystems(self.as_ptr()) }; if r >= 0 { Ok(()) } else { Err(r) } } pub fn iter(&self) -> ListIterator { unsafe { ListIterator::from(ListEntry::from_ptr(udev_enumerate_get_list_entry(self.as_ptr()))) } } } impl Drop for Enumerate { fn drop(&mut self) { unsafe { udev_enumerate_unref(self.as_ptr()); } } } impl Clone for Enumerate { fn clone(&self) -> Self { unsafe { Self::from_ptr(udev_enumerate_ref(self.as_ptr())).expect("clone failed") } } } #[repr(transparent)] pub struct Queue(NonNull<udev_queue>); impl Queue { pub unsafe fn from_ptr(p: *mut udev_queue) -> Option<Self> { NonNull::new(p).map(Self) } pub fn as_ptr(&self) -> *mut udev_queue { self.0.as_ptr() } pub fn new(context: &Context) -> Option<Self> { unsafe { Self::from_ptr(udev_queue_new(context.as_ptr())) } } pub fn udev(&self) -> Context { unsafe { Context::from_ptr(udev_queue_get_udev(self.as_ptr())).expect("no context?") } } pub fn udev_is_active(&self) -> bool { unsafe { udev_queue_get_udev_is_active(self.as_ptr()) == 1 } } pub fn is_empty(&self) -> bool { unsafe { udev_queue_get_queue_is_empty(self.as_ptr()) == 1 } } pub fn fd(&self) -> Result<c_int, c_int> { let r = unsafe { udev_queue_get_fd(self.as_ptr()) }; if r >= 0 { Ok(r) } else { Err(r) } } pub fn flush(&self) -> Result<(), c_int> { let r = unsafe { udev_queue_flush(self.as_ptr()) }; if r >= 0 { Ok(()) } else { Err(r) } } } impl Drop for Queue { fn drop(&mut self) { unsafe { udev_queue_unref(self.as_ptr()); } } } impl Clone for Queue { fn clone(&self) -> Self { unsafe { Self::from_ptr(udev_queue_ref(self.as_ptr())).expect("clone failed") } } } #[repr(transparent)] pub struct HardwareDatabase(NonNull<udev_hwdb>); impl HardwareDatabase { pub unsafe fn from_ptr(p: *mut udev_hwdb) -> Option<Self> { NonNull::new(p).map(Self) } pub fn as_ptr(&self) -> *mut udev_hwdb { self.0.as_ptr() } pub fn new(context: &Context) -> Option<Self> { unsafe { Self::from_ptr(udev_hwdb_new(context.as_ptr())) } } pub fn iter_properties(&self, modalias: &std::ffi::CStr, flags: c_uint) -> ListIterator { unsafe { ListIterator::from( ListEntry::from_ptr(udev_hwdb_get_properties_list_entry(self.as_ptr(), modalias.as_ptr(), flags)) ) } } } impl Drop for HardwareDatabase { fn drop(&mut self) { unsafe { udev_hwdb_unref(self.as_ptr()); } } } impl Clone for HardwareDatabase { fn clone(&self) -> Self { unsafe { Self::from_ptr(udev_hwdb_ref(self.as_ptr())).expect("clone failed") } } }
34.25
107
0.649635
2f95adb8a31200c0061d974355231f887dbebea0
277
use unicode_truncate::Alignment; use unicode_truncate::UnicodeTruncateStr; #[test] fn main() { let (rv, w) = "你好吗".unicode_truncate(5); assert_eq!(rv, "你好"); assert_eq!(w, 4); let rv = "你好吗".unicode_pad(5, Alignment::Left, true); assert_eq!(rv, "你好 "); }
21.307692
57
0.631769
7681e20fe4ab42547a3dd2ae39e45199e336732b
7,339
//! Parks the runtime. //! //! A combination of the various resource driver park handles. use crate::loom::sync::{Arc, Mutex, Condvar}; use crate::loom::sync::atomic::AtomicUsize; use crate::park::{Park, Unpark}; use crate::runtime::time; use crate::util::TryLock; use std::sync::atomic::Ordering::SeqCst; use std::time::Duration; pub(crate) struct Parker { inner: Arc<Inner>, } pub(crate) struct Unparker { inner: Arc<Inner>, } struct Inner { /// Avoids entering the park if possible state: AtomicUsize, /// Used to coordinate access to the driver / condvar mutex: Mutex<()>, /// Condvar to block on if the driver is unavailable. condvar: Condvar, /// Resource (I/O, time, ...) driver shared: Arc<Shared>, } const EMPTY: usize = 0; const PARKED_CONDVAR: usize = 1; const PARKED_DRIVER: usize = 2; const NOTIFIED: usize = 3; /// Shared across multiple Parker handles struct Shared { /// Shared driver. Only one thread at a time can use this driver: TryLock<time::Driver>, /// Unpark handle handle: <time::Driver as Park>::Unpark, } impl Parker { pub(crate) fn new(driver: time::Driver) -> Parker { let handle = driver.unpark(); Parker { inner: Arc::new(Inner { state: AtomicUsize::new(EMPTY), mutex: Mutex::new(()), condvar: Condvar::new(), shared: Arc::new(Shared { driver: TryLock::new(driver), handle, }), }), } } } impl Clone for Parker { fn clone(&self) -> Parker { Parker { inner: Arc::new(Inner { state: AtomicUsize::new(EMPTY), mutex: Mutex::new(()), condvar: Condvar::new(), shared: self.inner.shared.clone(), }), } } } impl Park for Parker { type Unpark = Unparker; type Error = (); fn unpark(&self) -> Unparker { Unparker { inner: self.inner.clone() } } fn park(&mut self) -> Result<(), Self::Error> { self.inner.park(); Ok(()) } fn park_timeout(&mut self, duration: Duration) -> Result<(), Self::Error> { // Only parking with zero is supported... assert_eq!(duration, Duration::from_millis(0)); if let Some(mut driver) = self.inner.shared.driver.try_lock() { driver.park_timeout(duration) .map_err(|_| ()) } else { Ok(()) } } } impl Unpark for Unparker { fn unpark(&self) { self.inner.unpark(); } } impl Inner { /// Park the current thread for at most `dur`. fn park(&self) { // If we were previously notified then we consume this notification and // return quickly. if self.state.compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst).is_ok() { return; } if let Some(mut driver) = self.shared.driver.try_lock() { self.park_driver(&mut driver); } else { self.park_condvar(); } } fn park_condvar(&self) { // Otherwise we need to coordinate going to sleep let mut m = self.mutex.lock().unwrap(); match self.state.compare_exchange(EMPTY, PARKED_CONDVAR, SeqCst, SeqCst) { Ok(_) => {} Err(NOTIFIED) => { // We must read here, even though we know it will be `NOTIFIED`. // This is because `unpark` may have been called again since we read // `NOTIFIED` in the `compare_exchange` above. We must perform an // acquire operation that synchronizes with that `unpark` to observe // any writes it made before the call to unpark. To do that we must // read from the write it made to `state`. let old = self.state.swap(EMPTY, SeqCst); debug_assert_eq!(old, NOTIFIED, "park state changed unexpectedly"); return; } Err(actual) => panic!("inconsistent park state; actual = {}", actual), } loop { m = self.condvar.wait(m).unwrap(); if self.state.compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst).is_ok() { // got a notification return; } // spurious wakeup, go back to sleep } } fn park_driver(&self, driver: &mut time::Driver) { match self.state.compare_exchange(EMPTY, PARKED_DRIVER, SeqCst, SeqCst) { Ok(_) => {} Err(NOTIFIED) => { // We must read here, even though we know it will be `NOTIFIED`. // This is because `unpark` may have been called again since we read // `NOTIFIED` in the `compare_exchange` above. We must perform an // acquire operation that synchronizes with that `unpark` to observe // any writes it made before the call to unpark. To do that we must // read from the write it made to `state`. let old = self.state.swap(EMPTY, SeqCst); debug_assert_eq!(old, NOTIFIED, "park state changed unexpectedly"); return; } Err(actual) => panic!("inconsistent park state; actual = {}", actual), } // TODO: don't unwrap driver.park().unwrap(); match self.state.swap(EMPTY, SeqCst) { NOTIFIED => {} // got a notification, hurray! PARKED_DRIVER => {} // no notification, alas n => panic!("inconsistent park_timeout state: {}", n), } } fn unpark(&self) { // To ensure the unparked thread will observe any writes we made before // this call, we must perform a release operation that `park` can // synchronize with. To do that we must write `NOTIFIED` even if `state` // is already `NOTIFIED`. That is why this must be a swap rather than a // compare-and-swap that returns if it reads `NOTIFIED` on failure. match self.state.swap(NOTIFIED, SeqCst) { EMPTY => {}, // no one was waiting NOTIFIED => {}, // already unparked PARKED_CONDVAR => self.unpark_condvar(), PARKED_DRIVER => self.unpark_driver(), actual => panic!("inconsistent state in unpark; actual = {}", actual), } } fn unpark_condvar(&self) { // There is a period between when the parked thread sets `state` to // `PARKED` (or last checked `state` in the case of a spurious wake // up) and when it actually waits on `cvar`. If we were to notify // during this period it would be ignored and then when the parked // thread went to sleep it would never wake up. Fortunately, it has // `lock` locked at this stage so we can acquire `lock` to wait until // it is ready to receive the notification. // // Releasing `lock` before the call to `notify_one` means that when the // parked thread wakes it doesn't get woken only to have to wait for us // to release `lock`. drop(self.mutex.lock().unwrap()); self.condvar.notify_one() } fn unpark_driver(&self) { self.shared.handle.unpark(); } }
32.473451
85
0.562474
8733f7de244385f6bdb550c87ae27b3b2a40e70a
255
use super::*; use crate::Hash256; impl TestRandom for Hash256 { fn random_for_test(rng: &mut impl RngCore) -> Self { let mut key_bytes = vec![0; 32]; rng.fill_bytes(&mut key_bytes); Hash256::from_slice(&key_bytes[..]) } }
23.181818
56
0.615686
def49253b9102865c1b2e311d043e51c2ea7669d
4,063
use svm_types::{BytesPrimitive, Sections, TemplateAddr}; use crate::{GlobalState, StorageResult}; /// A [`GlobalState`] wrapper, enriched with utility methods to access and /// modify [`Template`](svm_types::Template) data. pub struct TemplateStorage { /// The internal [`GlobalState`] instance used to access the database layer. pub gs: GlobalState, /// The [`TemplateAddr`] of `self`. pub addr: TemplateAddr, } impl TemplateStorage { /// Loads a [`TemplateStorage`] from the given address and /// [`GlobalState`] instance. pub fn load(gs: GlobalState, template_addr: &TemplateAddr) -> StorageResult<Self> { Ok(Self { gs, addr: template_addr.clone(), }) } /// Saves a [`TemplateStorage`] at the given address and /// on the given [`GlobalState`] instance. pub fn create( mut gs: GlobalState, template_addr: &TemplateAddr, core_sections: Sections, noncore_sections: Sections, ) -> StorageResult<Self> { gs.encode_and_write(&core_sections, &key_core(&template_addr)); gs.encode_and_write(&noncore_sections, &key_noncore(&template_addr)); Ok(Self { gs, addr: template_addr.clone(), }) } /// Reads, decodes and finally returns all [`Sections`] of `self`. pub fn sections(&self) -> StorageResult<Sections> { read_sections(&self.gs, &self.addr) } /// Overwrites the "core" (mandatory) [`Sections`] associated with /// `self`. pub fn set_core(&mut self, sections: &Sections) -> StorageResult<()> { let key = key_core(&self.addr); self.gs.encode_and_write(sections, &key); Ok(()) } /// Overwrites the "non-core" (optional) [`Sections`] associated with /// `self`. pub fn set_noncore(&mut self, sections: &Sections) -> StorageResult<()> { let key = key_noncore(&self.addr); self.gs.encode_and_write(sections, &key); Ok(()) } } fn key_core(template_addr: &TemplateAddr) -> String { format!("templates:{}:core", template_addr.to_string()) } fn key_noncore(template_addr: &TemplateAddr) -> String { format!("templates:{}:noncore", template_addr.to_string()) } fn read_sections(gs: &GlobalState, addr: &TemplateAddr) -> StorageResult<Sections> { let mut sections = gs.read_and_decode::<Sections>(&key_core(addr))?; let noncore = gs.read_and_decode::<Sections>(&key_noncore(addr))?; for s in noncore.iter().cloned() { sections.insert(s); } Ok(sections) } #[cfg(test)] mod test { use svm_layout::{FixedLayout, Layout}; use svm_types::{CodeSection, CtorsSection, DataSection, SectionKind, Sections, Template}; use super::*; fn fixed_layout() -> FixedLayout { FixedLayout::from_byte_sizes(0, &[10, 20, 4, 30, 64, 31, 100, 4, 8, 8]) } fn new_template(gs: &GlobalState) -> TemplateAddr { let template_addr = TemplateAddr::repeat(0x80); let code_section = CodeSection::new( svm_types::CodeKind::Wasm, vec![], 0, svm_types::GasMode::Fixed, 0, ); let data_section = DataSection::with_layout(Layout::Fixed(fixed_layout())); let ctors_section = CtorsSection::new(vec![]); let core_sections = Template::new(code_section, data_section, ctors_section) .sections() .clone(); let noncore_sections = Sections::with_capacity(0); TemplateStorage::create(gs.clone(), &template_addr, core_sections, noncore_sections) .unwrap(); template_addr } #[test] fn create_then_load() { let gs = GlobalState::in_memory(); let template_addr = new_template(&gs); let template_storage = TemplateStorage::load(gs, &template_addr).unwrap(); assert!(template_storage .sections() .unwrap() .get(SectionKind::Code) .as_code() .code() .is_empty()); } }
29.875
93
0.610879
38d21c25b43696421edd92d2f54fccb7b0a2f6d1
6,534
use crate::Error; use algebra::{bytes::ToBytes, to_bytes, ToConstraintField}; use r1cs_core::{ConstraintSynthesizer, ConstraintSystemRef, SynthesisError}; use crypto_primitives::{CommitmentScheme, FixedLengthCRH}; use crate::{ constraints::{plain_dpc::execute_proof_check_gadget, Assignment}, dpc::plain_dpc::{CommAndCRHPublicParameters, PlainDPCComponents, PrivatePredInput}, }; #[derive(Derivative)] #[derivative(Clone(bound = "C: PlainDPCComponents"))] pub struct ProofCheckVerifierInput<C: PlainDPCComponents> { pub comm_and_crh_pp: CommAndCRHPublicParameters<C>, pub predicate_comm: <C::PredVkComm as CommitmentScheme>::Output, pub local_data_comm: <C::LocalDataComm as CommitmentScheme>::Output, } impl<C: PlainDPCComponents> ToConstraintField<C::ProofCheckF> for ProofCheckVerifierInput<C> where <C::PredVkComm as CommitmentScheme>::Parameters: ToConstraintField<C::ProofCheckF>, <C::PredVkComm as CommitmentScheme>::Output: ToConstraintField<C::ProofCheckF>, <C::PredVkH as FixedLengthCRH>::Parameters: ToConstraintField<C::ProofCheckF>, <C::LocalDataComm as CommitmentScheme>::Parameters: ToConstraintField<C::CoreCheckF>, <C::LocalDataComm as CommitmentScheme>::Output: ToConstraintField<C::CoreCheckF>, { fn to_field_elements(&self) -> Result<Vec<C::ProofCheckF>, Error> { let mut v = Vec::new(); v.extend_from_slice(&self.comm_and_crh_pp.pred_vk_comm_pp.to_field_elements()?); v.extend_from_slice(&self.comm_and_crh_pp.pred_vk_crh_pp.to_field_elements()?); let local_data_comm_pp_fe = ToConstraintField::<C::CoreCheckF>::to_field_elements( &self.comm_and_crh_pp.local_data_comm_pp, ) .map_err(|_| SynthesisError::AssignmentMissing)?; let local_data_comm_fe = ToConstraintField::<C::CoreCheckF>::to_field_elements(&self.local_data_comm) .map_err(|_| SynthesisError::AssignmentMissing)?; // Then we convert these field elements into bytes let pred_input = [ to_bytes![local_data_comm_pp_fe].map_err(|_| SynthesisError::AssignmentMissing)?, to_bytes![local_data_comm_fe].map_err(|_| SynthesisError::AssignmentMissing)?, ]; // Then we convert them into `C::ProofCheckF::Fr` elements. v.extend_from_slice(&ToConstraintField::<C::ProofCheckF>::to_field_elements( pred_input[0].as_slice(), )?); v.extend_from_slice(&ToConstraintField::<C::ProofCheckF>::to_field_elements( pred_input[1].as_slice(), )?); v.extend_from_slice(&self.predicate_comm.to_field_elements()?); Ok(v) } } #[derive(Derivative)] #[derivative(Clone(bound = "C: PlainDPCComponents"))] pub struct ProofCheckCircuit<C: PlainDPCComponents> { comm_and_crh_parameters: Option<CommAndCRHPublicParameters<C>>, old_private_pred_inputs: Option<Vec<PrivatePredInput<C>>>, new_private_pred_inputs: Option<Vec<PrivatePredInput<C>>>, predicate_comm: Option<<C::PredVkComm as CommitmentScheme>::Output>, predicate_rand: Option<<C::PredVkComm as CommitmentScheme>::Randomness>, local_data_comm: Option<<C::LocalDataComm as CommitmentScheme>::Output>, } impl<C: PlainDPCComponents> ProofCheckCircuit<C> { pub fn blank( comm_and_crh_parameters: &CommAndCRHPublicParameters<C>, predicate_nizk_vk_and_proof: &PrivatePredInput<C>, ) -> Self { let num_input_records = C::NUM_INPUT_RECORDS; let num_output_records = C::NUM_OUTPUT_RECORDS; let old_private_pred_inputs = Some(vec![predicate_nizk_vk_and_proof.clone(); num_input_records]); let new_private_pred_inputs = Some(vec![ predicate_nizk_vk_and_proof.clone(); num_output_records ]); let predicate_comm = Some(<C::PredVkComm as CommitmentScheme>::Output::default()); let predicate_rand = Some(<C::PredVkComm as CommitmentScheme>::Randomness::default()); let local_data_comm = Some(<C::LocalDataComm as CommitmentScheme>::Output::default()); Self { comm_and_crh_parameters: Some(comm_and_crh_parameters.clone()), old_private_pred_inputs, new_private_pred_inputs, predicate_comm, predicate_rand, local_data_comm, } } pub fn new( comm_and_crh_parameters: &CommAndCRHPublicParameters<C>, // Private pred input = Verification key and input // Commitment contains commitment to hash of death predicate vk. old_private_pred_inputs: &[PrivatePredInput<C>], // Private pred input = Verification key and input // Commitment contains commitment to hash of birth predicate vk. new_private_pred_inputs: &[PrivatePredInput<C>], predicate_comm: &<C::PredVkComm as CommitmentScheme>::Output, predicate_rand: &<C::PredVkComm as CommitmentScheme>::Randomness, local_data_comm: &<C::LocalDataComm as CommitmentScheme>::Output, ) -> Self { let num_input_records = C::NUM_INPUT_RECORDS; let num_output_records = C::NUM_OUTPUT_RECORDS; assert_eq!(num_input_records, old_private_pred_inputs.len()); assert_eq!(num_output_records, new_private_pred_inputs.len()); Self { comm_and_crh_parameters: Some(comm_and_crh_parameters.clone()), old_private_pred_inputs: Some(old_private_pred_inputs.to_vec()), new_private_pred_inputs: Some(new_private_pred_inputs.to_vec()), predicate_comm: Some(predicate_comm.clone()), predicate_rand: Some(predicate_rand.clone()), local_data_comm: Some(local_data_comm.clone()), } } } impl<C: PlainDPCComponents> ConstraintSynthesizer<C::ProofCheckF> for ProofCheckCircuit<C> where <C::LocalDataComm as CommitmentScheme>::Output: ToConstraintField<C::CoreCheckF>, <C::LocalDataComm as CommitmentScheme>::Parameters: ToConstraintField<C::CoreCheckF>, { fn generate_constraints( self, cs: ConstraintSystemRef<C::ProofCheckF>, ) -> Result<(), SynthesisError> { execute_proof_check_gadget::<C>( cs, self.comm_and_crh_parameters.get()?, self.old_private_pred_inputs.get()?.as_slice(), self.new_private_pred_inputs.get()?.as_slice(), self.predicate_comm.get()?, self.predicate_rand.get()?, self.local_data_comm.get()?, )?; Ok(()) } }
39.6
94
0.690695
f48bf523fea357fd1b845e5e1ab1ca7dbdfd2e61
1,286
// Autogenerated from KST: please remove this line if doing any edits by hand! extern crate kaitai_struct; extern crate rust; use kaitai_struct::KaitaiStruct; use rust::DefaultEndianExprInherited; #[test] fn test_default_endian_expr_inherited() { if let Ok(r) = DefaultEndianExprInherited::from_file("src/endian_expr.bin") { assert_eq!(r.docs[0].indicator, vec!([0x49, 0x49])); assert_eq!(r.docs[0].main.insides.some_int, 66); assert_eq!(r.docs[0].main.insides.more.some_int1, 16896); assert_eq!(r.docs[0].main.insides.more.some_int2, 66); assert_eq!(r.docs[0].main.insides.more.some_inst, 66); assert_eq!(r.docs[1].indicator, vec!([0x4d, 0x4d])); assert_eq!(r.docs[1].main.insides.some_int, 66); assert_eq!(r.docs[1].main.insides.more.some_int1, 66); assert_eq!(r.docs[1].main.insides.more.some_int2, 16896); assert_eq!(r.docs[1].main.insides.more.some_inst, 1107296256); assert_eq!(r.docs[2].indicator, vec!([0x58, 0x58])); assert_eq!(r.docs[2].main.insides.some_int, 66); assert_eq!(r.docs[2].main.insides.more.some_int1, 66); assert_eq!(r.docs[2].main.insides.more.some_int2, 16896); assert_eq!(r.docs[2].main.insides.more.some_inst, 1107296256); } }
44.344828
81
0.674961
5d51fc3983415547f312c85d4dc9c9e56c3953a8
12,534
use std::ops::{Add, Sub}; use std::time::{Duration, SystemTime}; use std::default::Default; use std::thread; use redis; use rand::{thread_rng, Rng}; use scripts::{LOCK, UNLOCK, EXTEND}; use errors::{RedlockResult, RedlockError}; use util; #[derive(Debug)] enum RequestInfo<'a> { Lock, Extend { resource_value: &'a str }, } // Lock represents a acquired lock for specified resource. #[derive(Debug)] pub struct Lock<'a> { redlock: &'a Redlock, resource_name: String, value: String, expiration: SystemTime, } impl<'a> Lock<'a> { // Release the acquired lock. pub fn unlock(&self) -> RedlockResult<()> { self.redlock.unlock(&self.resource_name, &self.value) } // Extend the TTL of acquired lock. pub fn extend(&self, ttl: Duration) -> RedlockResult<Lock> { if self.expiration < SystemTime::now() { return Err(RedlockError::LockExpired); } Ok(self.redlock.extend(&self.resource_name, &self.value, ttl)?) } } // Configuration of Redlock pub struct Config<T> where T: redis::IntoConnectionInfo { pub addrs: Vec<T>, pub retry_count: u32, pub retry_delay: Duration, pub retry_jitter: u32, pub drift_factor: f32, } impl Default for Config<&'static str> { fn default() -> Self { Config { addrs: vec!["redis://127.0.0.1"], retry_count: 10, retry_delay: Duration::from_millis(400), retry_jitter: 400, drift_factor: 0.01, } } } #[derive(Debug)] pub struct Redlock { clients: Vec<redis::Client>, retry_count: u32, retry_delay: Duration, retry_jitter: u32, drift_factor: f32, quorum: usize, } impl Redlock { // Create a new redlock instance. pub fn new<T: redis::IntoConnectionInfo>(config: Config<T>) -> RedlockResult<Redlock> { if config.addrs.is_empty() { return Err(RedlockError::NoServerError); } let mut clients = Vec::with_capacity(config.addrs.len()); for addr in config.addrs { clients.push(redis::Client::open(addr)?) } let quorum = (clients.len() as f64 / 2_f64).floor() as usize + 1; Ok(Redlock { clients, retry_count: config.retry_count, retry_delay: config.retry_delay, retry_jitter: config.retry_jitter, drift_factor: config.drift_factor, quorum, }) } // Locks the given resource using the Redlock algorithm. pub fn lock(&self, resource_name: &str, ttl: Duration) -> RedlockResult<Lock> { self.request(RequestInfo::Lock, resource_name, ttl) } fn extend(&self, resource_name: &str, value: &str, ttl: Duration) -> RedlockResult<Lock> { self.request(RequestInfo::Extend { resource_value: value }, resource_name, ttl) } fn request(&self, info: RequestInfo, resource_name: &str, ttl: Duration) -> RedlockResult<(Lock)> { let mut attempts = 0; let drift = Duration::from_millis((self.drift_factor as f64 * util::num_milliseconds(&ttl) as f64) .round() as u64 + 2); 'attempts: while attempts < self.retry_count { attempts += 1; // Start time of this attempt let start = SystemTime::now(); let mut waitings = self.clients.len(); let mut votes = 0; let mut errors = 0; let value: String = match info { RequestInfo::Lock => util::get_random_string(32), RequestInfo::Extend { resource_value } => String::from(resource_value), }; for client in &self.clients { let request_result = match info { RequestInfo::Lock => lock(client, resource_name, &value, &ttl), RequestInfo::Extend { .. } => extend(client, resource_name, &value, &ttl), }; let lock = Lock { redlock: self, resource_name: String::from(resource_name), value: value.clone(), expiration: start + ttl - drift, }; match request_result { Ok(success) => { waitings -= 1; if !success { continue; } votes += 1; if waitings > 0 { continue; } // suceess: aquire the lock if votes >= self.quorum && lock.expiration > SystemTime::now() { return Ok(lock); } // fail: releases all aquired locks and retry lock.unlock().is_ok(); // Just ingore the result thread::sleep(self.get_retry_timeout()); continue 'attempts; } Err(_) => { errors += 1; // This attempt is doomed to fail, will retry after // the timeout if errors > self.quorum { lock.unlock().is_ok(); // Just ingore the result thread::sleep(self.get_retry_timeout()); continue 'attempts; } } } } } // Exceed the retry count, return the error match info { RequestInfo::Lock => Err(RedlockError::UnableToLock), RequestInfo::Extend { .. } => Err(RedlockError::UnableToExtend), } } fn unlock(&self, resource_name: &str, value: &str) -> RedlockResult<()> { let mut attempts = 0; 'attempts: while attempts < self.retry_count { attempts += 1; let mut waitings = self.clients.len(); let mut votes = 0; let mut errors = 0; for client in &self.clients { match unlock(client, resource_name, value) { Ok(success) => { waitings -= 1; if !success { continue; } votes += 1; if waitings > 0 { continue; } if votes >= self.quorum { return Ok(()); } } Err(_) => { errors += 1; // This attempt is doomed to fail, will retry after // the timeout if errors >= self.quorum { thread::sleep(self.get_retry_timeout()); continue 'attempts; } } } } } // Exceed the retry count, return the error Err(RedlockError::UnableToUnlock) } fn get_retry_timeout(&self) -> Duration { let jitter = self.retry_jitter as i32 * thread_rng().gen_range(-1, 1); if jitter >= 0 { self.retry_delay.add(Duration::from_millis(jitter as u64)) } else { self.retry_delay.sub(Duration::from_millis(-jitter as u64)) } } } fn lock(client: &redis::Client, resource_name: &str, value: &str, ttl: &Duration) -> RedlockResult<bool> { match LOCK.key(String::from(resource_name)) .arg(String::from(value)) .arg(util::num_milliseconds(ttl)) .invoke::<Option<()>>(&client.get_connection()?)? { Some(_) => Ok(true), _ => Ok(false), } } fn unlock(client: &redis::Client, resource_name: &str, value: &str) -> RedlockResult<bool> { match UNLOCK .key(resource_name) .arg(value) .invoke::<i32>(&client.get_connection()?)? { 1 => Ok(true), _ => Ok(false), } } fn extend(client: &redis::Client, resource_name: &str, value: &str, ttl: &Duration) -> RedlockResult<bool> { match EXTEND .key(resource_name) .arg(value) .arg(util::num_milliseconds(ttl)) .invoke::<i32>(&client.get_connection()?)? { 1 => Ok(true), _ => Ok(false), } } #[cfg(test)] mod tests { use super::*; use redis::Commands; lazy_static! { static ref REDLOCK: Redlock = Redlock::new::<&str>(Config { addrs: vec!["redis://127.0.0.1"], retry_count: 10, retry_delay: Duration::from_millis(400), retry_jitter: 400, drift_factor: 0.01, }).unwrap(); static ref REDIS_CLI: redis::Client = redis::Client::open("redis://127.0.0.1").unwrap(); } #[test] fn test_config_default() { let default_config = Config::default(); assert_eq!(default_config.addrs, vec!["redis://127.0.0.1"]); assert_eq!(default_config.retry_count, 10); assert_eq!(default_config.retry_delay, Duration::from_millis(400)); assert_eq!(default_config.retry_jitter, 400); assert_eq!(default_config.drift_factor, 0.01); } #[test] #[should_panic] fn test_new_with_no_server() { Redlock::new::<&str>(Config { addrs: vec![], retry_count: 10, retry_delay: Duration::from_millis(400), retry_jitter: 400, drift_factor: 0.01, }) .unwrap(); } #[test] fn test_new() { let redlock = Redlock::new(Config::default()).unwrap(); assert_eq!(redlock.clients.len(), 1); assert_eq!(redlock.retry_count, 10); assert_eq!(redlock.retry_delay, Duration::from_millis(400)); } #[test] fn test_lock() { let resource_name = "test_lock"; let one_second = Duration::from_millis(1000); let lock = REDLOCK.lock(resource_name, one_second).unwrap(); assert!(lock.expiration < SystemTime::now().add(one_second)); } #[test] fn test_lock_twice() { let resource_name = "test_lock_twice"; let one_second = Duration::from_millis(1000); let start = SystemTime::now(); let lock = REDLOCK.lock(resource_name, one_second).unwrap(); assert!(lock.expiration > start); assert!(lock.expiration < start.add(one_second)); assert!(REDLOCK.lock(resource_name, one_second).is_err()); thread::sleep(one_second); assert!(REDLOCK.lock(resource_name, one_second).is_ok()); } #[test] fn test_unlock() { let resource_name = "test_unlock"; let lock = REDLOCK .lock(resource_name, Duration::from_millis(2000)) .unwrap(); let value: String = REDIS_CLI .get_connection() .unwrap() .get(resource_name) .unwrap(); assert_eq!(value.len(), 32); lock.unlock().unwrap(); let res: Option<String> = REDIS_CLI .get_connection() .unwrap() .get(resource_name) .unwrap(); assert!(res.is_none()); } #[test] fn test_extend() { let resource_name = "test_extend"; let lock = REDLOCK .lock(resource_name, Duration::from_millis(2000)) .unwrap(); let lock_extended = lock.extend(Duration::from_millis(2000)).unwrap(); assert!(lock_extended.expiration < SystemTime::now().add(Duration::from_millis(2000))); } #[test] fn test_extend_expired_resource() { let one_second = Duration::from_millis(1000); let resource_name = "test_extend_expired_resource"; let lock = REDLOCK.lock(resource_name, one_second).unwrap(); thread::sleep(one_second * 2); assert!(lock.extend(one_second).is_err()); } }
31.492462
96
0.50008
c1c62854e945e0bfeada21ea9c60cbd0a9450a3d
1,388
extern crate weathergov; use std::{thread, time}; use std::env; fn main() { if let Some(station) = env::args().nth(1) { println!("{}", current_weather(&station)); } else { println!("Please specify station."); } } pub fn current_weather(station: &str) -> String { // Gets the current temperature and finds an icon appropriate to describe the conditions let mut error_message = String::new(); for _ in 0..30 { // Retrieve the weather and format it as an icon and a temperature in celsius. // If an error occurs, retry every 30 seconds for up to 15 minutes. match weathergov::get_current_observation(station) { Ok(data) => { let temperature = match data.temp_c { Some(d) => format!("{}°C", d.round() as i64), None => "N/A".to_owned() }; let weather = match data.weather { Some(d) => d.to_string(), None => "N/A".to_owned() }; return format!("{} {}", weather, temperature); } Err(e) => { error_message = format!("{:?}", e); let thirty_seconds = time::Duration::from_secs(30); thread::sleep(thirty_seconds); } } } format!("Error: {}", error_message) }
32.27907
92
0.512248
89edee13a255f35247bb3b309cc282cc6b09401a
1,748
use crate::service_actor::{ServiceActor, ServiceActorContext}; use crate::{RoomActor, Server}; use actix::dev::channel::channel; use actix::{Addr, Arbiter, Context}; use actix_web::Result; use stateroom::{StateroomService, StateroomServiceFactory}; const MAILBOX_SIZE: usize = 16; pub struct ServerState { pub room_addr: Addr<RoomActor>, pub settings: Server, } impl ServerState { pub fn new<J>( service_factory: impl StateroomServiceFactory<ServiceActorContext, Service = J>, settings: Server, ) -> Result<Self> where J: StateroomService, { let arbiter = Arbiter::new(); let (room_tx, room_rx) = channel(MAILBOX_SIZE); let (service_tx, service_rx) = channel(MAILBOX_SIZE); let room_addr = Addr::new(room_tx); let service_addr = Addr::new(service_tx); { let room_addr = room_addr.clone(); arbiter.spawn_fn(move || { let room_ctx = Context::with_receiver(room_rx); let service_ctx = Context::with_receiver(service_rx); let service_actor = ServiceActor::<J>::new( &service_ctx, service_factory, room_addr.clone().recipient(), ); let room_actor = RoomActor::new(service_addr.recipient()); room_ctx.run(room_actor); if let Some(service_actor) = service_actor { service_ctx.run(service_actor); } else { tracing::error!("Could not create service actor for room"); } }); } Ok(ServerState { settings, room_addr, }) } }
29.627119
88
0.567506
f491e685f5a6002d114b5daf02b31133731fcaae
5,300
use ethane::rpc::{self, Rpc}; use ethane::types::{Bytes, PrivateKey, TransactionRequest, H160, H256, U256}; use rand::Rng; use serde::de::DeserializeOwned; use serde_json::Value; use std::fmt::Debug; use std::path::Path; use std::process::Command; use std::str::FromStr; use tiny_keccak::{Hasher, Keccak}; mod spin_up; pub use spin_up::{ConnectorNodeBundle, ConnectorWrapper, NodeProcess}; mod fixtures; pub use fixtures::*; pub fn wait_for_transaction(client: &mut ConnectorWrapper, tx_hash: H256) { loop { let transaction = client .call(rpc::eth_get_transaction_by_hash(tx_hash)) .unwrap(); if transaction.block_hash.is_some() { break; } } } pub fn create_secret() -> H256 { const HEX_CHARSET: &[u8] = b"abcdef0123456789"; const PK_LEN: usize = 64; let mut rng = rand::thread_rng(); let secret: String = (0..PK_LEN) .map(|_| { let idx = rng.gen_range(0..HEX_CHARSET.len()); HEX_CHARSET[idx] as char }) .collect(); H256::from_str(&secret).unwrap() } pub fn import_account(client: &mut ConnectorWrapper, secret: H256) -> H160 { client .call(rpc::personal_import_raw_key( PrivateKey::ZeroXPrefixed(secret), String::from(ACCOUNTS_PASSWORD), )) .unwrap() } pub fn unlock_account(client: &mut ConnectorWrapper, address: H160) -> bool { client .call(rpc::personal_unlock_account( address, String::from(ACCOUNTS_PASSWORD), None, )) .unwrap() } pub fn prefund_account(client: &mut ConnectorWrapper, address: H160) -> H256 { let coinbase = client.call(rpc::eth_coinbase()).unwrap(); let tx = TransactionRequest { from: coinbase, to: Some(address), value: Some(U256::exp10(20)), ..Default::default() }; let tx_hash = client.call(rpc::eth_send_transaction(tx)).unwrap(); wait_for_transaction(client, tx_hash); tx_hash } pub fn create_account(client: &mut ConnectorWrapper) -> (H256, H160) { let secret = create_secret(); let address = import_account(client, secret); unlock_account(client, address); prefund_account(client, address); (secret, address) } pub fn compile_contract(path: &Path, contract_name: &str) -> Value { let path_as_str = path.to_str().unwrap(); let output = Command::new("solc") .args(&[path_as_str, "--optimize", "--combined-json", "abi,bin"]) .output() .expect("Failed to compile contract. Is Solidity compiler solc installed?") .stdout; let output: Value = serde_json::from_slice(output.as_slice()).expect("Failed to deserialize compiled contract"); output["contracts"][String::from(path_as_str) + ":" + contract_name].clone() } pub fn deploy_contract( client: &mut ConnectorWrapper, address: H160, path: &Path, contract_name: &str, ) -> (H160, Value) { let raw_contract = compile_contract(path, contract_name); let bin = bin(raw_contract.clone()); let abi = abi(raw_contract); let contract_bytes = Bytes::from_str(&bin).unwrap(); let transaction = TransactionRequest { from: address, data: Some(contract_bytes), gas: Some(U256::from(1000000 as u64)), ..Default::default() }; let transaction_hash = client.call(rpc::eth_send_transaction(transaction)).unwrap(); wait_for_transaction(client, transaction_hash); let receipt = client .call(rpc::eth_get_transaction_receipt(transaction_hash)) .unwrap().unwrap(); let contract_address = receipt.contract_address.unwrap(); (contract_address, abi) } pub fn simulate_transaction(client: &mut ConnectorWrapper, from: H160, to: &str, value: U256) -> H256 { let transaction = TransactionRequest { from: from, to: Some(to.parse().unwrap()), value: Some(value), ..Default::default() }; let tx_hash = client.call(rpc::eth_send_transaction(transaction)).unwrap(); wait_for_transaction(client, tx_hash); tx_hash } pub fn bin(contract_input: Value) -> String { contract_input["bin"].as_str().unwrap().to_string() } pub fn abi(contract_input: Value) -> Value { contract_input["abi"].clone() } pub fn keccak(input: &[u8]) -> [u8; 32] { let mut hasher = Keccak::v256(); hasher.update(input); let mut out = [0u8; 32]; hasher.finalize(&mut out); out } pub fn rpc_call_test_expected<T: DeserializeOwned + Debug + PartialEq>( client: &mut ConnectorWrapper, rpc: Rpc<T>, expected: T, ) { match client.call(rpc) { Ok(res) => { assert_eq!(res, expected); } Err(err) => panic!("{:?}", err), } } pub fn rpc_call_test_some<T: DeserializeOwned + Debug + PartialEq>( client: &mut ConnectorWrapper, rpc: Rpc<T>, ) { match client.call(rpc) { Ok(res) => { println!("{:?}", res); } Err(err) => panic!("{:?}", err), } } pub fn rpc_call_with_return<T: DeserializeOwned + Debug + PartialEq>( client: &mut ConnectorWrapper, rpc: Rpc<T>, ) -> T { match client.call(rpc) { Ok(res) => { res } Err(err) => panic!("{:?}", err), } }
28.494624
103
0.620189
8f560e35ad8ee8dba85bf222b38043d999d89599
644
// Copyright 2019 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use { fidl_fuchsia_io2::{self as fio2}, lazy_static::lazy_static, }; // TODO(https://fxbug.dev/71901): remove aliases once the routing lib has a stable API. pub type Rights = ::routing::rights::Rights; lazy_static! { /// All rights corresponding to r*. pub static ref READ_RIGHTS: fio2::Operations = *::routing::rights::READ_RIGHTS; /// All rights corresponding to w*. pub static ref WRITE_RIGHTS: fio2::Operations = *::routing::rights::WRITE_RIGHTS; }
33.894737
87
0.709627
878faf2072bf6006df143a166088b2158f260a76
4,629
// Copyright 2018 Developers of the Rand project. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // https://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! Math helper functions use crate::ziggurat_tables; use rand::distributions::hidden_export::IntoFloat; use rand::Rng; /// Calculates ln(gamma(x)) (natural logarithm of the gamma /// function) using the Lanczos approximation. /// /// The approximation expresses the gamma function as: /// `gamma(z+1) = sqrt(2*pi)*(z+g+0.5)^(z+0.5)*exp(-z-g-0.5)*Ag(z)` /// `g` is an arbitrary constant; we use the approximation with `g=5`. /// /// Noting that `gamma(z+1) = z*gamma(z)` and applying `ln` to both sides: /// `ln(gamma(z)) = (z+0.5)*ln(z+g+0.5)-(z+g+0.5) + ln(sqrt(2*pi)*Ag(z)/z)` /// /// `Ag(z)` is an infinite series with coefficients that can be calculated /// ahead of time - we use just the first 6 terms, which is good enough /// for most purposes. pub(crate) fn log_gamma<F: num_traits::Float>(x: F) -> F { // precalculated 6 coefficients for the first 6 terms of the series let coefficients: [F; 6] = [ F::from(76.18009172947146).unwrap(), F::from(-86.50532032941677).unwrap(), F::from(24.01409824083091).unwrap(), F::from(-1.231739572450155).unwrap(), F::from(0.1208650973866179e-2).unwrap(), F::from(-0.5395239384953e-5).unwrap(), ]; // (x+0.5)*ln(x+g+0.5)-(x+g+0.5) let tmp = x + F::from(5.5).unwrap(); let log = (x + F::from(0.5).unwrap()) * tmp.ln() - tmp; // the first few terms of the series for Ag(x) let mut a = F::from(1.000000000190015).unwrap(); let mut denom = x; for &coeff in &coefficients { denom = denom + F::one(); a = a + (coeff / denom); } // get everything together // a is Ag(x) // 2.5066... is sqrt(2pi) log + (F::from(2.5066282746310005).unwrap() * a / x).ln() } /// Sample a random number using the Ziggurat method (specifically the /// ZIGNOR variant from Doornik 2005). Most of the arguments are /// directly from the paper: /// /// * `rng`: source of randomness /// * `symmetric`: whether this is a symmetric distribution, or one-sided with P(x < 0) = 0. /// * `X`: the $x_i$ abscissae. /// * `F`: precomputed values of the PDF at the $x_i$, (i.e. $f(x_i)$) /// * `F_DIFF`: precomputed values of $f(x_i) - f(x_{i+1})$ /// * `pdf`: the probability density function /// * `zero_case`: manual sampling from the tail when we chose the /// bottom box (i.e. i == 0) // the perf improvement (25-50%) is definitely worth the extra code // size from force-inlining. #[inline(always)] pub(crate) fn ziggurat<R: Rng + ?Sized, P, Z>( rng: &mut R, symmetric: bool, x_tab: ziggurat_tables::ZigTable, f_tab: ziggurat_tables::ZigTable, mut pdf: P, mut zero_case: Z ) -> f64 where P: FnMut(f64) -> f64, Z: FnMut(&mut R, f64) -> f64, { loop { // As an optimisation we re-implement the conversion to a f64. // From the remaining 12 most significant bits we use 8 to construct `i`. // This saves us generating a whole extra random number, while the added // precision of using 64 bits for f64 does not buy us much. let bits = rng.next_u64(); let i = bits as usize & 0xff; let u = if symmetric { // Convert to a value in the range [2,4) and substract to get [-1,1) // We can't convert to an open range directly, that would require // substracting `3.0 - EPSILON`, which is not representable. // It is possible with an extra step, but an open range does not // seem neccesary for the ziggurat algorithm anyway. (bits >> 12).into_float_with_exponent(1) - 3.0 } else { // Convert to a value in the range [1,2) and substract to get (0,1) (bits >> 12).into_float_with_exponent(0) - (1.0 - core::f64::EPSILON / 2.0) }; let x = u * x_tab[i]; let test_x = if symmetric { x.abs() } else { x }; // algebraically equivalent to |u| < x_tab[i+1]/x_tab[i] (or u < x_tab[i+1]/x_tab[i]) if test_x < x_tab[i + 1] { return x; } if i == 0 { return zero_case(rng, u); } // algebraically equivalent to f1 + DRanU()*(f0 - f1) < 1 if f_tab[i + 1] + (f_tab[i] - f_tab[i + 1]) * rng.gen::<f64>() < pdf(x) { return x; } } }
38.256198
93
0.600562
c118515f1f04ff205f315cf5110b2afbcb410680
22,091
// Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. //! Contains `ArrayData`, a generic representation of Arrow array data which encapsulates //! common attributes and operations for Arrow array. use std::mem; use std::sync::Arc; use crate::datatypes::{DataType, IntervalUnit}; use crate::{bitmap::Bitmap, datatypes::ArrowNativeType}; use crate::{ buffer::{Buffer, MutableBuffer}, util::bit_util, }; use super::equal::equal; #[inline] pub(crate) fn count_nulls( null_bit_buffer: Option<&Buffer>, offset: usize, len: usize, ) -> usize { if let Some(buf) = null_bit_buffer { len.checked_sub(buf.count_set_bits_offset(offset, len)) .unwrap() } else { 0 } } /// creates 2 [`MutableBuffer`]s with a given `capacity` (in slots). #[inline] pub(crate) fn new_buffers(data_type: &DataType, capacity: usize) -> [MutableBuffer; 2] { let empty_buffer = MutableBuffer::new(0); match data_type { DataType::Null => [empty_buffer, MutableBuffer::new(0)], DataType::Boolean => { let bytes = bit_util::ceil(capacity, 8); let buffer = MutableBuffer::new(bytes); [buffer, empty_buffer] } DataType::UInt8 => [ MutableBuffer::new(capacity * mem::size_of::<u8>()), empty_buffer, ], DataType::UInt16 => [ MutableBuffer::new(capacity * mem::size_of::<u16>()), empty_buffer, ], DataType::UInt32 => [ MutableBuffer::new(capacity * mem::size_of::<u32>()), empty_buffer, ], DataType::UInt64 => [ MutableBuffer::new(capacity * mem::size_of::<u64>()), empty_buffer, ], DataType::Int8 => [ MutableBuffer::new(capacity * mem::size_of::<i8>()), empty_buffer, ], DataType::Int16 => [ MutableBuffer::new(capacity * mem::size_of::<i16>()), empty_buffer, ], DataType::Int32 => [ MutableBuffer::new(capacity * mem::size_of::<i32>()), empty_buffer, ], DataType::Int64 => [ MutableBuffer::new(capacity * mem::size_of::<i64>()), empty_buffer, ], DataType::Float32 => [ MutableBuffer::new(capacity * mem::size_of::<f32>()), empty_buffer, ], DataType::Float64 => [ MutableBuffer::new(capacity * mem::size_of::<f64>()), empty_buffer, ], DataType::Date32 | DataType::Time32(_) => [ MutableBuffer::new(capacity * mem::size_of::<i32>()), empty_buffer, ], DataType::Date64 | DataType::Time64(_) | DataType::Duration(_) | DataType::Timestamp(_, _) => [ MutableBuffer::new(capacity * mem::size_of::<i64>()), empty_buffer, ], DataType::Interval(IntervalUnit::YearMonth) => [ MutableBuffer::new(capacity * mem::size_of::<i32>()), empty_buffer, ], DataType::Interval(IntervalUnit::DayTime) => [ MutableBuffer::new(capacity * mem::size_of::<i64>()), empty_buffer, ], DataType::Utf8 | DataType::Binary => { let mut buffer = MutableBuffer::new((1 + capacity) * mem::size_of::<i32>()); // safety: `unsafe` code assumes that this buffer is initialized with one element buffer.push(0i32); [buffer, MutableBuffer::new(capacity * mem::size_of::<u8>())] } DataType::LargeUtf8 | DataType::LargeBinary => { let mut buffer = MutableBuffer::new((1 + capacity) * mem::size_of::<i64>()); // safety: `unsafe` code assumes that this buffer is initialized with one element buffer.push(0i64); [buffer, MutableBuffer::new(capacity * mem::size_of::<u8>())] } DataType::List(_) => { // offset buffer always starts with a zero let mut buffer = MutableBuffer::new((1 + capacity) * mem::size_of::<i32>()); buffer.push(0i32); [buffer, empty_buffer] } DataType::LargeList(_) => { // offset buffer always starts with a zero let mut buffer = MutableBuffer::new((1 + capacity) * mem::size_of::<i64>()); buffer.push(0i64); [buffer, empty_buffer] } DataType::FixedSizeBinary(size) => { [MutableBuffer::new(capacity * *size as usize), empty_buffer] } DataType::Dictionary(child_data_type, _) => match child_data_type.as_ref() { DataType::UInt8 => [ MutableBuffer::new(capacity * mem::size_of::<u8>()), empty_buffer, ], DataType::UInt16 => [ MutableBuffer::new(capacity * mem::size_of::<u16>()), empty_buffer, ], DataType::UInt32 => [ MutableBuffer::new(capacity * mem::size_of::<u32>()), empty_buffer, ], DataType::UInt64 => [ MutableBuffer::new(capacity * mem::size_of::<u64>()), empty_buffer, ], DataType::Int8 => [ MutableBuffer::new(capacity * mem::size_of::<i8>()), empty_buffer, ], DataType::Int16 => [ MutableBuffer::new(capacity * mem::size_of::<i16>()), empty_buffer, ], DataType::Int32 => [ MutableBuffer::new(capacity * mem::size_of::<i32>()), empty_buffer, ], DataType::Int64 => [ MutableBuffer::new(capacity * mem::size_of::<i64>()), empty_buffer, ], _ => unreachable!(), }, DataType::Float16 => unreachable!(), DataType::FixedSizeList(_, _) | DataType::Struct(_) => { [empty_buffer, MutableBuffer::new(0)] } DataType::Decimal(_, _) => [ MutableBuffer::new(capacity * mem::size_of::<u8>()), empty_buffer, ], DataType::Union(_) => unimplemented!(), } } /// Maps 2 [`MutableBuffer`]s into a vector of [Buffer]s whose size depends on `data_type`. #[inline] pub(crate) fn into_buffers( data_type: &DataType, buffer1: MutableBuffer, buffer2: MutableBuffer, ) -> Vec<Buffer> { match data_type { DataType::Null | DataType::Struct(_) => vec![], DataType::Utf8 | DataType::Binary | DataType::LargeUtf8 | DataType::LargeBinary => vec![buffer1.into(), buffer2.into()], _ => vec![buffer1.into()], } } /// An generic representation of Arrow array data which encapsulates common attributes and /// operations for Arrow array. Specific operations for different arrays types (e.g., /// primitive, list, struct) are implemented in `Array`. #[derive(Debug, Clone)] pub struct ArrayData { /// The data type for this array data data_type: DataType, /// The number of elements in this array data len: usize, /// The number of null elements in this array data null_count: usize, /// The offset into this array data, in number of items offset: usize, /// The buffers for this array data. Note that depending on the array types, this /// could hold different kinds of buffers (e.g., value buffer, value offset buffer) /// at different positions. buffers: Vec<Buffer>, /// The child(ren) of this array. Only non-empty for nested types, currently /// `ListArray` and `StructArray`. child_data: Vec<ArrayDataRef>, /// The null bitmap. A `None` value for this indicates all values are non-null in /// this array. null_bitmap: Option<Bitmap>, } pub type ArrayDataRef = Arc<ArrayData>; impl ArrayData { pub fn new( data_type: DataType, len: usize, null_count: Option<usize>, null_bit_buffer: Option<Buffer>, offset: usize, buffers: Vec<Buffer>, child_data: Vec<ArrayDataRef>, ) -> Self { let null_count = match null_count { None => count_nulls(null_bit_buffer.as_ref(), offset, len), Some(null_count) => null_count, }; let null_bitmap = null_bit_buffer.map(Bitmap::from); Self { data_type, len, null_count, offset, buffers, child_data, null_bitmap, } } /// Returns a builder to construct a `ArrayData` instance. #[inline] pub const fn builder(data_type: DataType) -> ArrayDataBuilder { ArrayDataBuilder::new(data_type) } /// Returns a reference to the data type of this array data #[inline] pub const fn data_type(&self) -> &DataType { &self.data_type } /// Returns a slice of buffers for this array data pub fn buffers(&self) -> &[Buffer] { &self.buffers[..] } /// Returns a slice of children data arrays pub fn child_data(&self) -> &[ArrayDataRef] { &self.child_data[..] } /// Returns whether the element at index `i` is null pub fn is_null(&self, i: usize) -> bool { if let Some(ref b) = self.null_bitmap { return !b.is_set(self.offset + i); } false } /// Returns a reference to the null bitmap of this array data #[inline] pub const fn null_bitmap(&self) -> &Option<Bitmap> { &self.null_bitmap } /// Returns a reference to the null buffer of this array data. pub fn null_buffer(&self) -> Option<&Buffer> { self.null_bitmap().as_ref().map(|b| b.buffer_ref()) } /// Returns whether the element at index `i` is not null pub fn is_valid(&self, i: usize) -> bool { if let Some(ref b) = self.null_bitmap { return b.is_set(self.offset + i); } true } /// Returns the length (i.e., number of elements) of this array #[inline] pub const fn len(&self) -> usize { self.len } // Returns whether array data is empty #[inline] pub const fn is_empty(&self) -> bool { self.len == 0 } /// Returns the offset of this array #[inline] pub const fn offset(&self) -> usize { self.offset } /// Returns the total number of nulls in this array #[inline] pub const fn null_count(&self) -> usize { self.null_count } /// Returns the total number of bytes of memory occupied by the buffers owned by this [ArrayData]. pub fn get_buffer_memory_size(&self) -> usize { let mut size = 0; for buffer in &self.buffers { size += buffer.capacity(); } if let Some(bitmap) = &self.null_bitmap { size += bitmap.get_buffer_memory_size() } for child in &self.child_data { size += child.get_buffer_memory_size(); } size } /// Returns the total number of bytes of memory occupied physically by this [ArrayData]. pub fn get_array_memory_size(&self) -> usize { let mut size = 0; // Calculate size of the fields that don't have [get_array_memory_size] method internally. size += mem::size_of_val(self) - mem::size_of_val(&self.buffers) - mem::size_of_val(&self.null_bitmap) - mem::size_of_val(&self.child_data); // Calculate rest of the fields top down which contain actual data for buffer in &self.buffers { size += mem::size_of_val(&buffer); size += buffer.capacity(); } if let Some(bitmap) = &self.null_bitmap { size += bitmap.get_array_memory_size() } for child in &self.child_data { size += child.get_array_memory_size(); } size } /// Creates a zero-copy slice of itself. This creates a new [ArrayData] /// with a different offset, len and a shifted null bitmap. /// /// # Panics /// /// Panics if `offset + length > self.len()`. pub fn slice(&self, offset: usize, length: usize) -> ArrayData { assert!((offset + length) <= self.len()); let mut new_data = self.clone(); new_data.len = length; new_data.offset = offset + self.offset; new_data.null_count = count_nulls(new_data.null_buffer(), new_data.offset, new_data.len); new_data } /// Returns the `buffer` as a slice of type `T` starting at self.offset /// # Panics /// This function panics if: /// * the buffer is not byte-aligned with type T, or /// * the datatype is `Boolean` (it corresponds to a bit-packed buffer where the offset is not applicable) #[inline] pub(super) fn buffer<T: ArrowNativeType>(&self, buffer: usize) -> &[T] { let values = unsafe { self.buffers[buffer].as_slice().align_to::<T>() }; if !values.0.is_empty() || !values.2.is_empty() { panic!("The buffer is not byte-aligned with its interpretation") }; assert_ne!(self.data_type, DataType::Boolean); &values.1[self.offset..] } /// Returns a new empty [ArrayData] valid for `data_type`. pub(super) fn new_empty(data_type: &DataType) -> Self { let buffers = new_buffers(data_type, 0); let [buffer1, buffer2] = buffers; let buffers = into_buffers(data_type, buffer1, buffer2); let child_data = match data_type { DataType::Null | DataType::Boolean | DataType::UInt8 | DataType::UInt16 | DataType::UInt32 | DataType::UInt64 | DataType::Int8 | DataType::Int16 | DataType::Int32 | DataType::Int64 | DataType::Float32 | DataType::Float64 | DataType::Date32 | DataType::Date64 | DataType::Time32(_) | DataType::Time64(_) | DataType::Duration(_) | DataType::Timestamp(_, _) | DataType::Utf8 | DataType::Binary | DataType::LargeUtf8 | DataType::LargeBinary | DataType::Interval(_) | DataType::FixedSizeBinary(_) | DataType::Decimal(_, _) => vec![], DataType::List(field) => { vec![Arc::new(Self::new_empty(field.data_type()))] } DataType::FixedSizeList(field, _) => { vec![Arc::new(Self::new_empty(field.data_type()))] } DataType::LargeList(field) => { vec![Arc::new(Self::new_empty(field.data_type()))] } DataType::Struct(fields) => fields .iter() .map(|field| Arc::new(Self::new_empty(field.data_type()))) .collect(), DataType::Union(_) => unimplemented!(), DataType::Dictionary(_, data_type) => { vec![Arc::new(Self::new_empty(data_type))] } DataType::Float16 => unreachable!(), }; Self::new(data_type.clone(), 0, Some(0), None, 0, buffers, child_data) } } impl PartialEq for ArrayData { fn eq(&self, other: &Self) -> bool { equal(self, other) } } /// Builder for `ArrayData` type #[derive(Debug)] pub struct ArrayDataBuilder { data_type: DataType, len: usize, null_count: Option<usize>, null_bit_buffer: Option<Buffer>, offset: usize, buffers: Vec<Buffer>, child_data: Vec<ArrayDataRef>, } impl ArrayDataBuilder { #[inline] pub const fn new(data_type: DataType) -> Self { Self { data_type, len: 0, null_count: None, null_bit_buffer: None, offset: 0, buffers: vec![], child_data: vec![], } } #[inline] pub const fn len(mut self, n: usize) -> Self { self.len = n; self } pub fn null_bit_buffer(mut self, buf: Buffer) -> Self { self.null_bit_buffer = Some(buf); self } #[inline] pub const fn offset(mut self, n: usize) -> Self { self.offset = n; self } pub fn buffers(mut self, v: Vec<Buffer>) -> Self { self.buffers = v; self } pub fn add_buffer(mut self, b: Buffer) -> Self { self.buffers.push(b); self } pub fn child_data(mut self, v: Vec<ArrayDataRef>) -> Self { self.child_data = v; self } pub fn add_child_data(mut self, r: ArrayDataRef) -> Self { self.child_data.push(r); self } pub fn build(self) -> ArrayDataRef { let data = ArrayData::new( self.data_type, self.len, self.null_count, self.null_bit_buffer, self.offset, self.buffers, self.child_data, ); Arc::new(data) } } #[cfg(test)] mod tests { use super::*; use std::sync::Arc; use crate::buffer::Buffer; use crate::util::bit_util; #[test] fn test_new() { let arr_data = ArrayData::new(DataType::Boolean, 10, Some(1), None, 2, vec![], vec![]); assert_eq!(10, arr_data.len()); assert_eq!(1, arr_data.null_count()); assert_eq!(2, arr_data.offset()); assert_eq!(0, arr_data.buffers().len()); assert_eq!(0, arr_data.child_data().len()); } #[test] fn test_builder() { let child_arr_data = Arc::new(ArrayData::new( DataType::Int32, 5, Some(0), None, 0, vec![Buffer::from_slice_ref(&[1i32, 2, 3, 4, 5])], vec![], )); let v = vec![0, 1, 2, 3]; let b1 = Buffer::from(&v[..]); let arr_data = ArrayData::builder(DataType::Int32) .len(20) .offset(5) .add_buffer(b1) .null_bit_buffer(Buffer::from(vec![ 0b01011111, 0b10110101, 0b01100011, 0b00011110, ])) .add_child_data(child_arr_data.clone()) .build(); assert_eq!(20, arr_data.len()); assert_eq!(10, arr_data.null_count()); assert_eq!(5, arr_data.offset()); assert_eq!(1, arr_data.buffers().len()); assert_eq!(&[0, 1, 2, 3], arr_data.buffers()[0].as_slice()); assert_eq!(1, arr_data.child_data().len()); assert_eq!(child_arr_data, arr_data.child_data()[0]); } #[test] fn test_null_count() { let mut bit_v: [u8; 2] = [0; 2]; bit_util::set_bit(&mut bit_v, 0); bit_util::set_bit(&mut bit_v, 3); bit_util::set_bit(&mut bit_v, 10); let arr_data = ArrayData::builder(DataType::Int32) .len(16) .null_bit_buffer(Buffer::from(bit_v)) .build(); assert_eq!(13, arr_data.null_count()); // Test with offset let mut bit_v: [u8; 2] = [0; 2]; bit_util::set_bit(&mut bit_v, 0); bit_util::set_bit(&mut bit_v, 3); bit_util::set_bit(&mut bit_v, 10); let arr_data = ArrayData::builder(DataType::Int32) .len(12) .offset(2) .null_bit_buffer(Buffer::from(bit_v)) .build(); assert_eq!(10, arr_data.null_count()); } #[test] fn test_null_buffer_ref() { let mut bit_v: [u8; 2] = [0; 2]; bit_util::set_bit(&mut bit_v, 0); bit_util::set_bit(&mut bit_v, 3); bit_util::set_bit(&mut bit_v, 10); let arr_data = ArrayData::builder(DataType::Int32) .len(16) .null_bit_buffer(Buffer::from(bit_v)) .build(); assert!(arr_data.null_buffer().is_some()); assert_eq!(&bit_v, arr_data.null_buffer().unwrap().as_slice()); } #[test] fn test_slice() { let mut bit_v: [u8; 2] = [0; 2]; bit_util::set_bit(&mut bit_v, 0); bit_util::set_bit(&mut bit_v, 3); bit_util::set_bit(&mut bit_v, 10); let data = ArrayData::builder(DataType::Int32) .len(16) .null_bit_buffer(Buffer::from(bit_v)) .build(); let data = data.as_ref(); let new_data = data.slice(1, 15); assert_eq!(data.len() - 1, new_data.len()); assert_eq!(1, new_data.offset()); assert_eq!(data.null_count(), new_data.null_count()); // slice of a slice (removes one null) let new_data = new_data.slice(1, 14); assert_eq!(data.len() - 2, new_data.len()); assert_eq!(2, new_data.offset()); assert_eq!(data.null_count() - 1, new_data.null_count()); } #[test] fn test_equality() { let int_data = ArrayData::builder(DataType::Int32).build(); let float_data = ArrayData::builder(DataType::Float32).build(); assert_ne!(int_data, float_data); } #[test] fn test_count_nulls() { let null_buffer = Some(Buffer::from(vec![0b00010110, 0b10011111])); let count = count_nulls(null_buffer.as_ref(), 0, 16); assert_eq!(count, 7); let count = count_nulls(null_buffer.as_ref(), 4, 8); assert_eq!(count, 3); } }
32.296784
110
0.553845
db2567a889e19844e2004866424e8edfd8b9e6e4
1,214
use std::io::{self, Read}; use clap::{Arg, App}; mod day1; mod day2; mod day3; mod day4; mod util; fn main() -> io::Result<()> { let matches = App::new("aoc2020") .arg(Arg::with_name("day") .long("day") .required(true) .takes_value(true)) .arg(Arg::with_name("part") .long("part") .required(true) .takes_value(true)) .get_matches(); let day = matches.value_of("day").unwrap(); let part = matches.value_of("part").unwrap(); let mut buffer = String::new(); io::stdin().read_to_string(&mut buffer)?; let result = match (day, part) { ("1", "1") => day1::part1::day1part1(&buffer), ("1", "2") => day1::part2::day1part2(&buffer), ("2", "1") => day2::part1::day2part1(&buffer), ("2", "2") => day2::part2::day2part2(&buffer), ("3", "1") => day3::part1::day3part1(&buffer), ("3", "2") => day3::part2::day3part2(&buffer), ("4", "1") => day4::part1::day4part1(&buffer), _ => None, }; std::process::exit(match result { Some(x) => { print!("{}", x); 0 }, None => 1, }) }
25.829787
54
0.478583
eb0a8ddc0bdea55b65be6c4e918c4670987f718f
2,665
//! //! The transaction data, which is sent between Zandbox, Zargo, and front-ends. //! pub mod error; pub mod msg; use serde::Deserialize; use serde::Serialize; use zksync_types::tx::PackedEthSignature; use zksync_types::tx::ZkSyncTx; use zksync_types::TokenLike; use self::error::Error; use self::msg::Msg; use crate::num_compat_backward; /// /// The transaction, understandable by zkSync, front-end, Zandbox, and Zargo. /// #[derive(Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct Transaction { /// The transaction itself. pub tx: ZkSyncTx, /// The Ethereum signature of the transaction. pub ethereum_signature: Option<EthereumSignature>, } impl Transaction { /// /// A shortcut constructor. /// pub fn new(tx: ZkSyncTx, signature: Option<PackedEthSignature>) -> Self { Self { tx, ethereum_signature: signature.map(EthereumSignature::new), } } /// /// Converts the transaction into an intrinsic `zksync::msg` variable representation. /// pub fn try_to_msg( &self, wallet: &zksync::Wallet<zksync_eth_signer::PrivateKeySigner, zksync::RpcProvider>, ) -> Result<Msg, Error> { match self.tx { ZkSyncTx::Transfer(ref transfer) => { let token = wallet .tokens .resolve(TokenLike::Id(transfer.token)) .ok_or(Error::UnsupportedToken(transfer.token))?; Ok(Msg::new( transfer.from, transfer.to, token.address, num_compat_backward(zksync::utils::closest_packable_token_amount(&transfer.amount)), )) } ZkSyncTx::Withdraw(..) => Err(Error::UnsupportedTransaction("Withdraw")), ZkSyncTx::Close(..) => Err(Error::UnsupportedTransaction("Close")), ZkSyncTx::ChangePubKey(..) => Err(Error::UnsupportedTransaction("ChangePubKey")), ZkSyncTx::ForcedExit(..) => Err(Error::UnsupportedTransaction("ForcedExit")), } } } /// /// The transaction Ethereum signature. /// #[derive(Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct EthereumSignature { /// The default signature type. pub r#type: String, /// The signature as a hex string. pub signature: PackedEthSignature, } impl EthereumSignature { /// /// A shortcut constructor. /// pub fn new(signature: PackedEthSignature) -> Self { Self { r#type: "EthereumSignature".to_owned(), signature, } } }
28.351064
104
0.604128
14ef07c8c1ab1711becdabbdd549362ad1d19766
129
extern crate log; extern crate naia_derive; pub mod behavior; pub mod protocol; mod shared; pub use shared::get_shared_config;
14.333333
34
0.790698
8a1ab6c2f50569fe9ed43f62cef042b38430d3be
3,254
#![no_std] extern crate atsamd_hal as hal; pub use hal::atsamd21g18a::*; use hal::prelude::*; pub use hal::*; use gpio::{Floating, Input, Output, Port, PushPull}; use hal::clock::GenericClockController; use hal::sercom::{I2CMaster5, PadPin, SPIMaster3}; use hal::time::Hertz; define_pins!( /// Maps the pins to their arduino names and /// the numbers printed on the board. struct Pins, target_device: atsamd21g18a, /// Pin 0, rx. Also analog input (A6) pin rx = b9, /// Pin 1, tx. Also analog input (A7) pin tx = b8, /// Pin 4, button A. pin d4 = a28, /// Pin 5, button B. pin d5 = a14, /// Pin 7, slide switch. pin d7 = a15, /// Pin 11, speaker enable. pin d11 = a30, /// Digital pin number 13, which is also attached to the red LED. PWM capable. pin d13 = a17, /// The I2C SDA. Also D2 and A5. pin sda = b2, /// The I2C SCL. Also D3 and A4 pin scl = b3, /// The data line attached to the neopixel. Also D8. pin neopixel = b23, /// The line attached to the speaker. Also D12 and A0. pin speaker = a2, /// The SPI SCK. Also D6 and A1 pin sck = a5, /// The SPI MOSI. Also D10 and A3 pin mosi = a7, /// The SPI MISO. Also D9 and A2 pin miso = a6, /// The SCK pin attached to the on-board SPI flash pin flash_sck = a21, /// The MOSI pin attached to the on-board SPI flash pin flash_mosi = a20, /// The MISO pin attached to the on-board SPI flash pin flash_miso = a16, /// The CS pin attached to the on-board SPI flash pin flash_cs = b22, ); /// Convenience for accessing the on-board SPI Flash device. /// This powers up SERCOM5 and configures it for use as an /// SPI Master. pub fn flash_spi_master( clocks: &mut GenericClockController, sercom3: SERCOM3, pm: &mut PM, sck: gpio::Pa21<Input<Floating>>, mosi: gpio::Pa20<Input<Floating>>, miso: gpio::Pa16<Input<Floating>>, cs: gpio::Pb22<Input<Floating>>, port: &mut Port, ) -> (SPIMaster3, gpio::Pb22<Output<PushPull>>) { let gclk0 = clocks.gclk0(); let flash = SPIMaster3::new( &clocks.sercom3_core(&gclk0).unwrap(), 48.mhz(), hal::hal::spi::Mode { phase: hal::hal::spi::Phase::CaptureOnFirstTransition, polarity: hal::hal::spi::Polarity::IdleLow, }, sercom3, pm, hal::sercom::SPI3Pinout::Dipo0Dopo1 { miso: miso.into_pad(port), mosi: mosi.into_pad(port), sck: sck.into_pad(port), }, ); let mut cs = cs.into_push_pull_output(port); cs.set_high(); (flash, cs) } /// Convenience for setting up the labelled SDA, SCL pins to /// operate as an I2C master running at the specified frequency. pub fn i2c_master<F: Into<Hertz>>( clocks: &mut GenericClockController, bus_speed: F, sercom5: SERCOM5, pm: &mut PM, sda: gpio::Pb2<Input<Floating>>, scl: gpio::Pb3<Input<Floating>>, port: &mut Port, ) -> I2CMaster5 { let gclk0 = clocks.gclk0(); I2CMaster5::new( &clocks.sercom5_core(&gclk0).unwrap(), bus_speed.into(), sercom5, pm, sda.into_pad(port), scl.into_pad(port), ) }
27.576271
82
0.603565
118ba1711191b3f086b9fe5dc73efb62c9d92ba4
2,170
// force-host #![feature(plugin_registrar, rustc_private)] #![feature(box_syntax)] #[macro_use] extern crate rustc; #[macro_use] extern crate rustc_session; extern crate rustc_driver; extern crate syntax; use rustc::lint::{LateContext, LintContext, LintPass, LateLintPass}; use rustc_driver::plugin::Registry; use rustc::hir; use syntax::attr; use syntax::symbol::Symbol; macro_rules! fake_lint_pass { ($struct:ident, $($attr:expr),*) => { struct $struct; impl LintPass for $struct { fn name(&self) -> &'static str { stringify!($struct) } } impl<'a, 'tcx> LateLintPass<'a, 'tcx> for $struct { fn check_crate(&mut self, cx: &LateContext, krate: &hir::Crate) { $( if !attr::contains_name(&krate.attrs, $attr) { cx.span_lint(CRATE_NOT_OKAY, krate.span, &format!("crate is not marked with #![{}]", $attr)); } )* } } } } declare_lint!(CRATE_NOT_OKAY, Warn, "crate not marked with #![crate_okay]"); declare_lint!(CRATE_NOT_RED, Warn, "crate not marked with #![crate_red]"); declare_lint!(CRATE_NOT_BLUE, Warn, "crate not marked with #![crate_blue]"); declare_lint!(CRATE_NOT_GREY, Warn, "crate not marked with #![crate_grey]"); declare_lint!(CRATE_NOT_GREEN, Warn, "crate not marked with #![crate_green]"); fake_lint_pass! { PassOkay, Symbol::intern("rustc_crate_okay") } fake_lint_pass! { PassRedBlue, Symbol::intern("rustc_crate_red"), Symbol::intern("rustc_crate_blue") } fake_lint_pass! { PassGreyGreen, Symbol::intern("rustc_crate_grey"), Symbol::intern("rustc_crate_green") } #[plugin_registrar] pub fn plugin_registrar(reg: &mut Registry) { reg.lint_store.register_lints(&[ &CRATE_NOT_OKAY, &CRATE_NOT_RED, &CRATE_NOT_BLUE, &CRATE_NOT_GREY, &CRATE_NOT_GREEN, ]); reg.lint_store.register_late_pass(|| box PassOkay); reg.lint_store.register_late_pass(|| box PassRedBlue); reg.lint_store.register_late_pass(|| box PassGreyGreen); }
29.324324
89
0.627189
1c9ed126251e930c49a751f7ccdb229ec56ca897
3,866
use std::sync::{Arc, Mutex}; use cubesql::CubeError; #[cfg(build = "debug")] use log::trace; use neon::prelude::*; use tokio::sync::oneshot; use crate::utils::bind_method; type JsAsyncChannelCallback = Box<dyn Fn(Result<String, CubeError>) + Send>; pub struct JsAsyncChannel { callback: JsAsyncChannelCallback, } impl Finalize for JsAsyncChannel {} fn js_async_channel_resolve(mut cx: FunctionContext) -> JsResult<JsUndefined> { #[cfg(build = "debug")] trace!("JsAsyncChannel.resolved"); let this = cx .this() .downcast_or_throw::<JsBox<JsAsyncChannel>, _>(&mut cx)?; let result = cx.argument::<JsString>(0)?; this.resolve(result.value(&mut cx)); Ok(cx.undefined()) } fn js_async_channel_reject(mut cx: FunctionContext) -> JsResult<JsUndefined> { #[cfg(build = "debug")] trace!("JsAsyncChannel.reject"); let this = cx .this() .downcast_or_throw::<JsBox<JsAsyncChannel>, _>(&mut cx)?; let error = cx.argument::<JsString>(0)?; this.reject(error.value(&mut cx)); Ok(cx.undefined()) } impl JsAsyncChannel { pub fn new(callback: JsAsyncChannelCallback) -> Self { Self { callback } } #[allow(clippy::wrong_self_convention)] fn to_object<'a, C: Context<'a>>(self, cx: &mut C) -> JsResult<'a, JsObject> { let obj = cx.empty_object(); // Pass JsAsyncChannel as this, because JsFunction cannot use closure (fn with move) let obj_this = cx.boxed(self).upcast::<JsValue>(); let resolve_fn = JsFunction::new(cx, js_async_channel_resolve)?; let resolve = bind_method(cx, resolve_fn, obj_this)?; obj.set(cx, "resolve", resolve)?; let reject_fn = JsFunction::new(cx, js_async_channel_reject)?; let reject = bind_method(cx, reject_fn, obj_this)?; obj.set(cx, "reject", reject)?; Ok(obj) } fn resolve(&self, result: String) { let callback = &self.callback; callback(Ok(result)); } fn reject(&self, error: String) { let callback = &self.callback; callback(Err(CubeError::internal(error))); } } pub async fn call_js_with_channel_as_callback<R>( channel: Arc<Channel>, js_method: Arc<Root<JsFunction>>, query: Option<String>, ) -> Result<R, CubeError> where R: 'static + serde::de::DeserializeOwned + Send + std::fmt::Debug, { let channel = channel.clone(); let (tx, rx) = oneshot::channel::<Result<R, CubeError>>(); let tx_mutex = Arc::new(Mutex::new(Some(tx))); let async_channel = JsAsyncChannel::new(Box::new(move |result| { let to_channel = match result { // @todo Optimize? Into? Ok(buffer_as_str) => match serde_json::from_str::<R>(&buffer_as_str) { Ok(json) => Ok(json), Err(err) => Err(CubeError::internal(err.to_string())), }, Err(err) => Err(CubeError::internal(err.to_string())), }; if let Some(tx) = tx_mutex.lock().unwrap().take() { tx.send(to_channel).unwrap(); } else { panic!("Resolve/Reject was called on AsyncChannel that was already resolved"); } })); channel.send(move |mut cx| { // https://github.com/neon-bindings/neon/issues/672 let method = match Arc::try_unwrap(js_method) { Ok(v) => v.into_inner(&mut cx), Err(v) => v.as_ref().to_inner(&mut cx), }; let this = cx.undefined(); let args: Vec<Handle<JsValue>> = vec![ if let Some(q) = query { cx.string(q).upcast::<JsValue>() } else { cx.null().upcast::<JsValue>() }, async_channel.to_object(&mut cx)?.upcast::<JsValue>(), ]; method.call(&mut cx, this, args)?; Ok(()) }); rx.await? }
28.850746
92
0.589498
5b2e998c451a6f20578b9225a6c57491a31ade8d
18,656
use alloc::boxed::Box; use alloc::format; use alloc::string::String; use alloc::vec::Vec; use core::any::Any; use core::cmp::min; use arrayref::array_ref; use crate::math::vector::Vector; use crate::rsp::rsp::RSP; use crate::rsp::rsp_assembler::{E, GPR, RSPAssembler, VR}; use crate::rsp::spmem::SPMEM; use crate::tests::{Level, Test}; use crate::tests::soft_asserts::{soft_assert_eq, soft_assert_eq2, soft_assert_eq_vector}; // Load some data via a load instruction and store it back via SQV. Then verify the result // Findings (for loads): // - The element specifier specifies the starting element. If there isn't enough room after e, // there is no wrap-around but the number of bytes load is reduced // - Apart from the "enough room after e" restriction above, the following number of bytes is being loaded: // - LQV: Loads until the end of the current 16 byte region // - LDV: 8 bytes // - LLV: 4 bytes // - LSV: 2 bytes // - LBV: 1 byte // - LPV loads 1 bytes into the higher half of a word. It has some weird element-handling wrt. overflow // - LUV is identical to LPV, but the result is shifted down by 1 bit // - LHV is close to LUV, but the source index is multiplied by 2. Also the offset is shifted by 4 instead of 3 // - LFV is complicated // - LWV doesn't exist - it does nothing // - LTV loads data into multiple vectors (touching always a quarter of the vectors at a time) // Only three instructions can overflow (meaning: read from the end and the beginning of DMEM): LSV, LLV, LDB. // All the others use alignment to stay with 16b. fn test<F: Fn(&mut RSPAssembler, E), F2: Fn(&mut [u8; 16], &[u8; 256], E, u32)>(base_offset: usize, load_emitter: F, calculate_expected: F2) -> Result<(), String> { // Alignment and element specifiers to test. If we pass these, we'll probably pass everything const TEST_MISALIGNMENTS: [u32; 11] = [0, 1, 6, 7, 8, 10, 11, 12, 13, 14, 15]; const TEST_ELEMENTS: [E; 9] = [E::_0, E::_1, E::_4, E::_5, E::_6, E::_8, E::_12, E::_14, E::_15]; const OUTPUT_MEMORY_START: u32 = 0x100; // Prepare input data: Each byte will simply be its index let mut test_data: [u8; 256] = [0; 256]; for i in 0..test_data.len() { test_data[i] = i as u8; } let clear_vector: [u8; 16] = *array_ref![test_data, 0, 16]; // Write into DMEM SPMEM::write_vector8_into_dmem(base_offset, &test_data); // Assemble RSP program let mut assembler = RSPAssembler::new(0); // Guard V0 and V1 by clearing them and verifying them below as well assembler.write_li(GPR::AT, base_offset as u32); assembler.write_lqv(VR::V0, E::_0, 0, GPR::AT); assembler.write_lqv(VR::V2, E::_0, 0, GPR::AT); // AT: base_offset (e.g. 0 or 0xFF0 for overflow tests) // A0: Offset in memory to read from // A1: Address to write result to assembler.write_li(GPR::A1, OUTPUT_MEMORY_START); for misalignment in TEST_MISALIGNMENTS { assembler.write_li(GPR::A0, base_offset as u32 + misalignment); for e in TEST_ELEMENTS { // clear assembler.write_lqv(VR::V1, E::_0, 0x000, GPR::AT); // load load_emitter(&mut assembler, e); // save result assembler.write_sqv(VR::V1, E::_0, 0x000, GPR::A1); assembler.write_addiu(GPR::A1, GPR::A1, 0x10); } } // Emulators might write out-of-bounds and accidentally modify the next register. Verify by writing back V1 assembler.write_sqv(VR::V0, E::_0, 0x030, GPR::R0); assembler.write_sqv(VR::V2, E::_0, 0x040, GPR::R0); assembler.write_break(); RSP::run_and_wait(0); soft_assert_eq(SPMEM::read_vector8_from_dmem(0x030), clear_vector, "V0 was modified eventhough it wasn't being written to")?; soft_assert_eq(SPMEM::read_vector8_from_dmem(0x040), clear_vector, "V2 was modified eventhough it wasn't being written to")?; let mut memory_address = OUTPUT_MEMORY_START as usize; for offset in TEST_MISALIGNMENTS { assembler.write_li(GPR::A0, offset); for e in TEST_ELEMENTS { let mut expected = clear_vector; calculate_expected(&mut expected, &test_data, e, offset); soft_assert_eq2(SPMEM::read_vector8_from_dmem(memory_address), expected, || format!("Load with e={:?} from memory location {:x}", e, base_offset as u32 + offset))?; memory_address += 0x10; } } Ok(()) } fn test_simple<F: Fn(&mut RSPAssembler, E), F2: Fn(u32) -> u32>(base_offset: usize, load_emitter: F, maximum_bytes_from_offset: F2) -> Result<(), String> { test(base_offset, load_emitter, |expected, test_data, e, offset| { let remaining_in_vector = 16 - (e as u32); let remaining_bytes_from_offset = maximum_bytes_from_offset(offset); for i in 0..min(remaining_in_vector, remaining_bytes_from_offset) { expected[(e as usize) + i as usize] = test_data[(offset + i) as usize + 0x20]; } }) } fn test_unpack<F: Fn(&mut RSPAssembler, E), const SHIFT: u32, const IFACTOR: u32>(base_offset: usize, load_emitter: F) -> Result<(), String> { test(base_offset, load_emitter, |expected, test_data, e, offset| { let misalignment = offset & 7; let aligned_offset = offset & !7; for i in 0..8 { let element_offset = (16 - (e as u32) + ((i as u32) * IFACTOR) + misalignment) & 0xF; let address = 0x20 + aligned_offset + element_offset; let data = ((test_data[address as usize]) as u16) << SHIFT; expected[i * 2] = (data >> 8) as u8; expected[i * 2 + 1] = data as u8; } }) } pub struct LBV {} impl Test for LBV { fn name(&self) -> &str { "RSP LBV" } fn level(&self) -> Level { Level::BasicFunctionality } fn values(&self) -> Vec<Box<dyn Any>> { Vec::new() } fn run(&self, _value: &Box<dyn Any>) -> Result<(), String> { test_simple(0x1000, |assembler, e| assembler.write_lbv(VR::V1, e, 0x020, GPR::A0), |_offset| 1) } } pub struct LSV {} impl Test for LSV { fn name(&self) -> &str { "RSP LSV" } fn level(&self) -> Level { Level::BasicFunctionality } fn values(&self) -> Vec<Box<dyn Any>> { Vec::new() } fn run(&self, _value: &Box<dyn Any>) -> Result<(), String> { test_simple(0x1000, |assembler, e| assembler.write_lsv(VR::V1, e, 0x020, GPR::A0), |_offset| 2) } } pub struct LSVOverflow {} impl Test for LSVOverflow { fn name(&self) -> &str { "RSP LSV (overflow)" } fn level(&self) -> Level { Level::RarelyUsed } fn values(&self) -> Vec<Box<dyn Any>> { Vec::new() } fn run(&self, _value: &Box<dyn Any>) -> Result<(), String> { test_simple(0xFD0, |assembler, e| assembler.write_lsv(VR::V1, e, 0x020, GPR::A0), |_offset| 2) } } pub struct LLV {} impl Test for LLV { fn name(&self) -> &str { "RSP LLV" } fn level(&self) -> Level { Level::BasicFunctionality } fn values(&self) -> Vec<Box<dyn Any>> { Vec::new() } fn run(&self, _value: &Box<dyn Any>) -> Result<(), String> { test_simple(0x1000, |assembler, e| assembler.write_llv(VR::V1, e, 0x020, GPR::A0), |_offset| 4) } } pub struct LLVOverflow {} impl Test for LLVOverflow { fn name(&self) -> &str { "RSP LLV (overflow)" } fn level(&self) -> Level { Level::RarelyUsed } fn values(&self) -> Vec<Box<dyn Any>> { Vec::new() } fn run(&self, _value: &Box<dyn Any>) -> Result<(), String> { test_simple(0xFD0, |assembler, e| assembler.write_llv(VR::V1, e, 0x020, GPR::A0), |_offset| 4) } } pub struct LDV {} impl Test for LDV { fn name(&self) -> &str { "RSP LDV" } fn level(&self) -> Level { Level::BasicFunctionality } fn values(&self) -> Vec<Box<dyn Any>> { Vec::new() } fn run(&self, _value: &Box<dyn Any>) -> Result<(), String> { test_simple(0x1000, |assembler, e| assembler.write_ldv(VR::V1, e, 0x020, GPR::A0), |_offset| 8) } } pub struct LDVOverflow {} impl Test for LDVOverflow { fn name(&self) -> &str { "RSP LDV (overflow)" } fn level(&self) -> Level { Level::RarelyUsed } fn values(&self) -> Vec<Box<dyn Any>> { Vec::new() } fn run(&self, _value: &Box<dyn Any>) -> Result<(), String> { test_simple(0xFD0, |assembler, e| assembler.write_ldv(VR::V1, e, 0x020, GPR::A0), |_offset| 8) } } pub struct LQV {} impl Test for LQV { fn name(&self) -> &str { "RSP LQV" } fn level(&self) -> Level { Level::BasicFunctionality } fn values(&self) -> Vec<Box<dyn Any>> { Vec::new() } fn run(&self, _value: &Box<dyn Any>) -> Result<(), String> { test_simple(0x1000, |assembler, e| assembler.write_lqv(VR::V1, e, 0x020, GPR::A0), |offset| 16 - (offset & 0xF)) } } // LQV can not overflow, but we can test the behavior at the very end pub struct LQVEndOfDMEM {} impl Test for LQVEndOfDMEM { fn name(&self) -> &str { "RSP LQV (end of DMEM)" } fn level(&self) -> Level { Level::BasicFunctionality } fn values(&self) -> Vec<Box<dyn Any>> { Vec::new() } fn run(&self, _value: &Box<dyn Any>) -> Result<(), String> { test_simple(0xFD0, |assembler, e| assembler.write_lqv(VR::V1, e, 0x020, GPR::A0), |offset| 16 - (offset & 0xF)) } } pub struct LRV {} impl Test for LRV { fn name(&self) -> &str { "RSP LRV" } fn level(&self) -> Level { Level::BasicFunctionality } fn values(&self) -> Vec<Box<dyn Any>> { Vec::new() } fn run(&self, _value: &Box<dyn Any>) -> Result<(), String> { test(0x1000, |assembler, e| assembler.write_lrv(VR::V1, e, 0x020, GPR::A0), |expected, test_data, e, offset| { let bytes_from_offset = offset & 0xF; for i in (16 - bytes_from_offset)..16 { if (e as u32) + i > 15 { break; } expected[(e as usize) + i as usize] = test_data[(16 + offset + i) as usize]; } }) } } // LRV can not overflow, but we can test the behavior at the very start of DMEM pub struct LRVStartOfDMEM {} impl Test for LRVStartOfDMEM { fn name(&self) -> &str { "RSP LRV (start of DMEM)" } fn level(&self) -> Level { Level::BasicFunctionality } fn values(&self) -> Vec<Box<dyn Any>> { Vec::new() } fn run(&self, _value: &Box<dyn Any>) -> Result<(), String> { test(0xFE0, |assembler, e| assembler.write_lrv(VR::V1, e, 0x020, GPR::A0), |expected, test_data, e, offset| { let bytes_from_offset = offset & 0xF; for i in (16 - bytes_from_offset)..16 { if (e as u32) + i > 15 { break; } expected[(e as usize) + i as usize] = test_data[(16 + offset + i) as usize]; } }) } } pub struct LPV {} impl Test for LPV { fn name(&self) -> &str { "RSP LPV" } fn level(&self) -> Level { Level::BasicFunctionality } fn values(&self) -> Vec<Box<dyn Any>> { Vec::new() } fn run(&self, _value: &Box<dyn Any>) -> Result<(), String> { test_unpack::<_, 8, 1>(0x1000, |assembler, e| assembler.write_lpv(VR::V1, e, 0x020, GPR::A0)) } } pub struct LPVEndOfDMEM {} impl Test for LPVEndOfDMEM { fn name(&self) -> &str { "RSP LPV (end of DMEM)" } fn level(&self) -> Level { Level::BasicFunctionality } fn values(&self) -> Vec<Box<dyn Any>> { Vec::new() } fn run(&self, _value: &Box<dyn Any>) -> Result<(), String> { test_unpack::<_, 8, 1>(0xFD0, |assembler, e| assembler.write_lpv(VR::V1, e, 0x020, GPR::A0)) } } pub struct LUV {} impl Test for LUV { fn name(&self) -> &str { "RSP LUV" } fn level(&self) -> Level { Level::BasicFunctionality } fn values(&self) -> Vec<Box<dyn Any>> { Vec::new() } fn run(&self, _value: &Box<dyn Any>) -> Result<(), String> { test_unpack::<_, 7, 1>(0x1000, |assembler, e| assembler.write_luv(VR::V1, e, 0x020, GPR::A0)) } } pub struct LUVEndOfDMEM {} impl Test for LUVEndOfDMEM { fn name(&self) -> &str { "RSP LUV (end of DMEM)" } fn level(&self) -> Level { Level::BasicFunctionality } fn values(&self) -> Vec<Box<dyn Any>> { Vec::new() } fn run(&self, _value: &Box<dyn Any>) -> Result<(), String> { test_unpack::<_, 7, 1>(0xFD0, |assembler, e| assembler.write_luv(VR::V1, e, 0x020, GPR::A0)) } } pub struct LHV {} impl Test for LHV { fn name(&self) -> &str { "RSP LHV" } fn level(&self) -> Level { Level::BasicFunctionality } fn values(&self) -> Vec<Box<dyn Any>> { Vec::new() } fn run(&self, _value: &Box<dyn Any>) -> Result<(), String> { test_unpack::<_, 7, 2>(0x1000, |assembler, e| assembler.write_lhv(VR::V1, e, 0x020, GPR::A0)) } } pub struct LHVEndOfDMEM {} impl Test for LHVEndOfDMEM { fn name(&self) -> &str { "RSP LHV (end of DMEM)" } fn level(&self) -> Level { Level::BasicFunctionality } fn values(&self) -> Vec<Box<dyn Any>> { Vec::new() } fn run(&self, _value: &Box<dyn Any>) -> Result<(), String> { test_unpack::<_, 7, 2>(0xFD0, |assembler, e| assembler.write_lhv(VR::V1, e, 0x020, GPR::A0)) } } pub struct LFV {} impl Test for LFV { fn name(&self) -> &str { "RSP LFV" } fn level(&self) -> Level { Level::BasicFunctionality } fn values(&self) -> Vec<Box<dyn Any>> { Vec::new() } fn run(&self, _value: &Box<dyn Any>) -> Result<(), String> { test(0, |assembler, e| assembler.write_lfv(VR::V1, e, 0x020, GPR::A0), |expected, test_data, e, offset| { let mut temp = Vector::new(); let address = 0x20 + offset as usize; let aligned_address = address & !0x7; let misalignment = address & 0x7; let e_ = e as usize; temp.set16(0, (test_data[aligned_address + ((misalignment + e_) & 0xF)] as u16) << 7); temp.set16(1, (test_data[aligned_address + ((misalignment + 4 - e_) & 0xF)] as u16) << 7); temp.set16(2, (test_data[aligned_address + ((misalignment + 8 - e_) & 0xF)] as u16) << 7); temp.set16(3, (test_data[aligned_address + ((misalignment + 12 - e_) & 0xF)] as u16) << 7); temp.set16(4, (test_data[aligned_address + ((misalignment + 8 - e_) & 0xF)] as u16) << 7); temp.set16(5, (test_data[aligned_address + ((misalignment + 12 - e_) & 0xF)] as u16) << 7); temp.set16(6, (test_data[aligned_address + ((misalignment - e_) & 0xF)] as u16) << 7); temp.set16(7, (test_data[aligned_address + ((misalignment + 4 - e_) & 0xF)] as u16) << 7); let length = min(8, 16 - e_); for i in e_..length + e_ { expected[i] = temp.get8(i); } }) } } pub struct LWV {} impl Test for LWV { fn name(&self) -> &str { "RSP LWV" } fn level(&self) -> Level { Level::BasicFunctionality } fn values(&self) -> Vec<Box<dyn Any>> { Vec::new() } fn run(&self, _value: &Box<dyn Any>) -> Result<(), String> { test(0, |assembler, e| assembler.write_lwv(VR::V1, e, 0x020, GPR::A0), |_, _, _, _| { // This doesn't seem to exist - the register will be unchanged }) } } pub struct LTV {} impl Test for LTV { fn name(&self) -> &str { "RSP LTV" } fn level(&self) -> Level { Level::BasicFunctionality } fn values(&self) -> Vec<Box<dyn Any>> { Vec::new() } fn run(&self, _value: &Box<dyn Any>) -> Result<(), String> { // There are 16 values for E, 32 target registers and 16 possible misalignments, resulting in 8192 tests. // Let's cut this down somewhat to 576 const TEST_ELEMENT: [E; 8] = [E::_0, E::_1, E::_2, E::_7, E::_8, E::_9, E::_14, E::_15]; const TEST_OFFSETS: [u32; 8] = [0, 1, 2, 7, 8, 14, 15, 16]; const TEST_VT: [VR; 9] = [VR::V0, VR::V1, VR::V2, VR::V7, VR::V8, VR::V9, VR::V18, VR::V25, VR::V31]; let mut test_data: [u8; 256] = [0; 256]; for i in 0..test_data.len() { test_data[i] = i as u8; } let clear_vector= Vector::from_u8(*array_ref![test_data, 0, 16]); SPMEM::write_vector8_into_dmem(0, &test_data); for offset in TEST_OFFSETS { for vt in TEST_VT { for e in TEST_ELEMENT { let mut assembler = RSPAssembler::new(0); // Clear all registers with 0, 1, 2, 3, 4, 5, 6, 7, 8 for vr in VR::V0..=VR::V31 { assembler.write_lqv(vr, E::_0, 0x000, GPR::R0); } // Load register via LTV assembler.write_li(GPR::A0, offset); assembler.write_ltv(vt, e, 0x20, GPR::A0); // Write back all registers for vr in VR::V0..=VR::V31 { assembler.write_sqv(vr, E::_0, (0x100 + vr.index() * 0x10) as i32, GPR::R0); } assembler.write_break(); RSP::start_running(0); // Simulate on the CPU let mut expected: [Vector; 32] = [clear_vector; 32]; let vt_base_index = vt.index() & !7; // Every other vector is rotated by 8 bytes let odd_offset = if (offset & 8) != 0 { 8 } else { 0 }; let base_address = 0x20 + ((offset as usize) & !7); for i in 0..8 { let reg_offset = ((e.index() >> 1) + i) & 0x7; expected[vt_base_index + reg_offset].set8(i * 2, test_data[base_address + ((odd_offset + e.index() + i * 2) & 0xF)]); expected[vt_base_index + reg_offset].set8(i * 2 + 1, test_data[base_address + ((odd_offset + e.index() + i * 2 + 1) & 0xF)]); } RSP::wait_until_rsp_is_halted(); // Verify results for i in 0..31 { soft_assert_eq_vector(SPMEM::read_vector_from_dmem(0x100 + i * 0x10), expected[i], || format!("Register[{}] after LTV {:?}[{}] from 0x{:x}", i, vt, e.index(), 0x20 + offset))?; } } } } Ok(()) } }
36.155039
200
0.560838
ffbf6b93cca441e256e4fd540222d4515ad1b1ad
1,348
use disn::CONFIG; use std::time::Duration; use tokio::time::sleep; mod helpers; #[tokio::test] async fn did_api_works() { let endpoint = helpers::spawn_app().await; sleep(Duration::from_millis(1000)).await; let client = reqwest::Client::new(); // Act let response = client .post(format!("{}/did/create", endpoint)) .header("x-api-key", &CONFIG.did.api_key) .send() .await .expect("Failed to execute request."); // Assert assert!(response.status().is_success()); let did = response.text().await.unwrap(); println!("created did:{}", did); let response = client .get(format!("{}/did/resolve/{}", endpoint, did)) .header("x-api-key", &CONFIG.did.api_key) .send() .await .expect("Failed to execute request."); assert!(response.status().is_success()); println!( "did resolve content length:{:?}", response.content_length().unwrap() ); println!("did document:{}", response.text().await.unwrap()); // incorrect api key should fail let response = client .post(format!("{}/did/create", endpoint)) .header("x-api-key", " ".to_string()) .send() .await .expect("Failed to execute request."); // Assert assert!(!response.status().is_success()); }
26.96
64
0.579377
012e0ec9e3167f0249cab827a174bd230933167d
3,386
use rafx::render_feature_extract_job_predule::*; use super::{ ExtractedSpriteData, SpritePrepareJob, SpriteRenderNode, SpriteRenderNodeSet, SpriteStaticResources, }; use rafx::assets::AssetManagerRenderResource; use rafx::base::slab::RawSlabKey; pub struct SpriteExtractJob {} impl SpriteExtractJob { pub fn new() -> Self { Self {} } } impl ExtractJob for SpriteExtractJob { fn extract( self: Box<Self>, extract_context: &RenderJobExtractContext, frame_packet: &FramePacket, _views: &[RenderView], ) -> Box<dyn PrepareJob> { profiling::scope!(super::EXTRACT_SCOPE_NAME); let asset_manager = extract_context .render_resources .fetch::<AssetManagerRenderResource>(); // Update the mesh render nodes. This could be done earlier as part of a system let mut sprite_render_nodes = extract_context .extract_resources .fetch_mut::<SpriteRenderNodeSet>(); sprite_render_nodes.update(); let mut extracted_frame_node_sprite_data = Vec::<Option<ExtractedSpriteData>>::with_capacity( frame_packet.frame_node_count(self.feature_index()) as usize, ); { profiling::scope!("per frame node"); for frame_node in frame_packet.frame_nodes(self.feature_index()) { let render_node_index = frame_node.render_node_index(); let render_node_handle = RawSlabKey::<SpriteRenderNode>::new(render_node_index); let sprite_render_node = sprite_render_nodes .sprites .get_raw(render_node_handle) .unwrap(); let image_asset = asset_manager.committed_asset(&sprite_render_node.image); let extracted_frame_node = image_asset.and_then(|image_asset| { let texture_extents = image_asset.image.get_raw().image.texture_def().extents; Some(ExtractedSpriteData { position: sprite_render_node.position, texture_size: glam::Vec2::new( texture_extents.width as f32, texture_extents.height as f32, ), scale: sprite_render_node.scale, rotation: sprite_render_node.rotation, color: sprite_render_node.tint.extend(sprite_render_node.alpha), image_view: image_asset.image_view.clone(), }) }); extracted_frame_node_sprite_data.push(extracted_frame_node); } } let static_resources = extract_context .render_resources .fetch::<SpriteStaticResources>(); let sprite_material = asset_manager .committed_asset(&static_resources.sprite_material) .unwrap() .get_single_material_pass() .unwrap(); let prepare_impl = SpritePrepareJob::new(extracted_frame_node_sprite_data, sprite_material); Box::new(prepare_impl) } fn feature_debug_name(&self) -> &'static str { super::render_feature_debug_name() } fn feature_index(&self) -> RenderFeatureIndex { super::render_feature_index() } }
34.55102
100
0.601595
f9dc0fce09fc0550502c0ac86971b4d9e59ed8b3
13,412
#[doc = "Reader of register LSR"] pub type R = crate::R<u32, super::LSR>; #[doc = "Receiver Data Ready. LSR\\[0\\] is set when the RBR holds an unread character and is cleared when the UART1 RBR FIFO is empty.\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum RDR_A { #[doc = "0: The UART1 receiver FIFO is empty."] EMPTY = 0, #[doc = "1: The UART1 receiver FIFO is not empty."] NOTEMPTY = 1, } impl From<RDR_A> for bool { #[inline(always)] fn from(variant: RDR_A) -> Self { variant as u8 != 0 } } #[doc = "Reader of field `RDR`"] pub type RDR_R = crate::R<bool, RDR_A>; impl RDR_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> RDR_A { match self.bits { false => RDR_A::EMPTY, true => RDR_A::NOTEMPTY, } } #[doc = "Checks if the value of the field is `EMPTY`"] #[inline(always)] pub fn is_empty(&self) -> bool { *self == RDR_A::EMPTY } #[doc = "Checks if the value of the field is `NOTEMPTY`"] #[inline(always)] pub fn is_notempty(&self) -> bool { *self == RDR_A::NOTEMPTY } } #[doc = "Overrun Error. The overrun error condition is set as soon as it occurs. An LSR read clears LSR\\[1\\]. LSR\\[1\\] is set when UART1 RSR has a new character assembled and the UART1 RBR FIFO is full. In this case, the UART1 RBR FIFO will not be overwritten and the character in the UART1 RSR will be lost.\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum OE_A { #[doc = "0: Overrun error status is inactive."] INACTIVE = 0, #[doc = "1: Overrun error status is active."] ACTIVE = 1, } impl From<OE_A> for bool { #[inline(always)] fn from(variant: OE_A) -> Self { variant as u8 != 0 } } #[doc = "Reader of field `OE`"] pub type OE_R = crate::R<bool, OE_A>; impl OE_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> OE_A { match self.bits { false => OE_A::INACTIVE, true => OE_A::ACTIVE, } } #[doc = "Checks if the value of the field is `INACTIVE`"] #[inline(always)] pub fn is_inactive(&self) -> bool { *self == OE_A::INACTIVE } #[doc = "Checks if the value of the field is `ACTIVE`"] #[inline(always)] pub fn is_active(&self) -> bool { *self == OE_A::ACTIVE } } #[doc = "Parity Error. When the parity bit of a received character is in the wrong state, a parity error occurs. An LSR read clears LSR\\[2\\]. Time of parity error detection is dependent on FCR\\[0\\]. Note: A parity error is associated with the character at the top of the UART1 RBR FIFO.\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum PE_A { #[doc = "0: Parity error status is inactive."] INACTIVE = 0, #[doc = "1: Parity error status is active."] ACTIVE = 1, } impl From<PE_A> for bool { #[inline(always)] fn from(variant: PE_A) -> Self { variant as u8 != 0 } } #[doc = "Reader of field `PE`"] pub type PE_R = crate::R<bool, PE_A>; impl PE_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> PE_A { match self.bits { false => PE_A::INACTIVE, true => PE_A::ACTIVE, } } #[doc = "Checks if the value of the field is `INACTIVE`"] #[inline(always)] pub fn is_inactive(&self) -> bool { *self == PE_A::INACTIVE } #[doc = "Checks if the value of the field is `ACTIVE`"] #[inline(always)] pub fn is_active(&self) -> bool { *self == PE_A::ACTIVE } } #[doc = "Framing Error. When the stop bit of a received character is a logic 0, a framing error occurs. An LSR read clears LSR\\[3\\]. The time of the framing error detection is dependent on FCR0. Upon detection of a framing error, the RX will attempt to resynchronize to the data and assume that the bad stop bit is actually an early start bit. However, it cannot be assumed that the next received byte will be correct even if there is no Framing Error. Note: A framing error is associated with the character at the top of the UART1 RBR FIFO.\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum FE_A { #[doc = "0: Framing error status is inactive."] INACTIVE = 0, #[doc = "1: Framing error status is active."] ACTIVE = 1, } impl From<FE_A> for bool { #[inline(always)] fn from(variant: FE_A) -> Self { variant as u8 != 0 } } #[doc = "Reader of field `FE`"] pub type FE_R = crate::R<bool, FE_A>; impl FE_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> FE_A { match self.bits { false => FE_A::INACTIVE, true => FE_A::ACTIVE, } } #[doc = "Checks if the value of the field is `INACTIVE`"] #[inline(always)] pub fn is_inactive(&self) -> bool { *self == FE_A::INACTIVE } #[doc = "Checks if the value of the field is `ACTIVE`"] #[inline(always)] pub fn is_active(&self) -> bool { *self == FE_A::ACTIVE } } #[doc = "Break Interrupt. When RXD1 is held in the spacing state (all zeroes) for one full character transmission (start, data, parity, stop), a break interrupt occurs. Once the break condition has been detected, the receiver goes idle until RXD1 goes to marking state (all ones). An LSR read clears this status bit. The time of break detection is dependent on FCR\\[0\\]. Note: The break interrupt is associated with the character at the top of the UART1 RBR FIFO.\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum BI_A { #[doc = "0: Break interrupt status is inactive."] INACTIVE = 0, #[doc = "1: Break interrupt status is active."] ACTIVE = 1, } impl From<BI_A> for bool { #[inline(always)] fn from(variant: BI_A) -> Self { variant as u8 != 0 } } #[doc = "Reader of field `BI`"] pub type BI_R = crate::R<bool, BI_A>; impl BI_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> BI_A { match self.bits { false => BI_A::INACTIVE, true => BI_A::ACTIVE, } } #[doc = "Checks if the value of the field is `INACTIVE`"] #[inline(always)] pub fn is_inactive(&self) -> bool { *self == BI_A::INACTIVE } #[doc = "Checks if the value of the field is `ACTIVE`"] #[inline(always)] pub fn is_active(&self) -> bool { *self == BI_A::ACTIVE } } #[doc = "Transmitter Holding Register Empty. THRE is set immediately upon detection of an empty UART1 THR and is cleared on a THR write.\n\nValue on reset: 1"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum THRE_A { #[doc = "0: THR contains valid data."] VALID = 0, #[doc = "1: THR is empty."] THR_IS_EMPTY_ = 1, } impl From<THRE_A> for bool { #[inline(always)] fn from(variant: THRE_A) -> Self { variant as u8 != 0 } } #[doc = "Reader of field `THRE`"] pub type THRE_R = crate::R<bool, THRE_A>; impl THRE_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> THRE_A { match self.bits { false => THRE_A::VALID, true => THRE_A::THR_IS_EMPTY_, } } #[doc = "Checks if the value of the field is `VALID`"] #[inline(always)] pub fn is_valid(&self) -> bool { *self == THRE_A::VALID } #[doc = "Checks if the value of the field is `THR_IS_EMPTY_`"] #[inline(always)] pub fn is_thr_is_empty_(&self) -> bool { *self == THRE_A::THR_IS_EMPTY_ } } #[doc = "Transmitter Empty. TEMT is set when both THR and TSR are empty; TEMT is cleared when either the TSR or the THR contain valid data.\n\nValue on reset: 1"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum TEMT_A { #[doc = "0: THR and/or the TSR contains valid data."] VALID = 0, #[doc = "1: THR and the TSR are empty."] EMPTY = 1, } impl From<TEMT_A> for bool { #[inline(always)] fn from(variant: TEMT_A) -> Self { variant as u8 != 0 } } #[doc = "Reader of field `TEMT`"] pub type TEMT_R = crate::R<bool, TEMT_A>; impl TEMT_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> TEMT_A { match self.bits { false => TEMT_A::VALID, true => TEMT_A::EMPTY, } } #[doc = "Checks if the value of the field is `VALID`"] #[inline(always)] pub fn is_valid(&self) -> bool { *self == TEMT_A::VALID } #[doc = "Checks if the value of the field is `EMPTY`"] #[inline(always)] pub fn is_empty(&self) -> bool { *self == TEMT_A::EMPTY } } #[doc = "Error in RX FIFO. LSR\\[7\\] is set when a character with a RX error such as framing error, parity error or break interrupt, is loaded into the RBR. This bit is cleared when the LSR register is read and there are no subsequent errors in the UART1 FIFO.\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum RXFE_A { #[doc = "0: RBR contains no UART1 RX errors or FCR\\[0\\]=0."] NOERROR = 0, #[doc = "1: UART1 RBR contains at least one UART1 RX error."] ERRORS = 1, } impl From<RXFE_A> for bool { #[inline(always)] fn from(variant: RXFE_A) -> Self { variant as u8 != 0 } } #[doc = "Reader of field `RXFE`"] pub type RXFE_R = crate::R<bool, RXFE_A>; impl RXFE_R { #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> RXFE_A { match self.bits { false => RXFE_A::NOERROR, true => RXFE_A::ERRORS, } } #[doc = "Checks if the value of the field is `NOERROR`"] #[inline(always)] pub fn is_noerror(&self) -> bool { *self == RXFE_A::NOERROR } #[doc = "Checks if the value of the field is `ERRORS`"] #[inline(always)] pub fn is_errors(&self) -> bool { *self == RXFE_A::ERRORS } } impl R { #[doc = "Bit 0 - Receiver Data Ready. LSR\\[0\\] is set when the RBR holds an unread character and is cleared when the UART1 RBR FIFO is empty."] #[inline(always)] pub fn rdr(&self) -> RDR_R { RDR_R::new((self.bits & 0x01) != 0) } #[doc = "Bit 1 - Overrun Error. The overrun error condition is set as soon as it occurs. An LSR read clears LSR\\[1\\]. LSR\\[1\\] is set when UART1 RSR has a new character assembled and the UART1 RBR FIFO is full. In this case, the UART1 RBR FIFO will not be overwritten and the character in the UART1 RSR will be lost."] #[inline(always)] pub fn oe(&self) -> OE_R { OE_R::new(((self.bits >> 1) & 0x01) != 0) } #[doc = "Bit 2 - Parity Error. When the parity bit of a received character is in the wrong state, a parity error occurs. An LSR read clears LSR\\[2\\]. Time of parity error detection is dependent on FCR\\[0\\]. Note: A parity error is associated with the character at the top of the UART1 RBR FIFO."] #[inline(always)] pub fn pe(&self) -> PE_R { PE_R::new(((self.bits >> 2) & 0x01) != 0) } #[doc = "Bit 3 - Framing Error. When the stop bit of a received character is a logic 0, a framing error occurs. An LSR read clears LSR\\[3\\]. The time of the framing error detection is dependent on FCR0. Upon detection of a framing error, the RX will attempt to resynchronize to the data and assume that the bad stop bit is actually an early start bit. However, it cannot be assumed that the next received byte will be correct even if there is no Framing Error. Note: A framing error is associated with the character at the top of the UART1 RBR FIFO."] #[inline(always)] pub fn fe(&self) -> FE_R { FE_R::new(((self.bits >> 3) & 0x01) != 0) } #[doc = "Bit 4 - Break Interrupt. When RXD1 is held in the spacing state (all zeroes) for one full character transmission (start, data, parity, stop), a break interrupt occurs. Once the break condition has been detected, the receiver goes idle until RXD1 goes to marking state (all ones). An LSR read clears this status bit. The time of break detection is dependent on FCR\\[0\\]. Note: The break interrupt is associated with the character at the top of the UART1 RBR FIFO."] #[inline(always)] pub fn bi(&self) -> BI_R { BI_R::new(((self.bits >> 4) & 0x01) != 0) } #[doc = "Bit 5 - Transmitter Holding Register Empty. THRE is set immediately upon detection of an empty UART1 THR and is cleared on a THR write."] #[inline(always)] pub fn thre(&self) -> THRE_R { THRE_R::new(((self.bits >> 5) & 0x01) != 0) } #[doc = "Bit 6 - Transmitter Empty. TEMT is set when both THR and TSR are empty; TEMT is cleared when either the TSR or the THR contain valid data."] #[inline(always)] pub fn temt(&self) -> TEMT_R { TEMT_R::new(((self.bits >> 6) & 0x01) != 0) } #[doc = "Bit 7 - Error in RX FIFO. LSR\\[7\\] is set when a character with a RX error such as framing error, parity error or break interrupt, is loaded into the RBR. This bit is cleared when the LSR register is read and there are no subsequent errors in the UART1 FIFO."] #[inline(always)] pub fn rxfe(&self) -> RXFE_R { RXFE_R::new(((self.bits >> 7) & 0x01) != 0) } }
39.563422
566
0.616836
1ad3a8a15c6522be632d0a59fbc8920ac4f4a3ff
3,668
use super::driver::*; use super::XlsxError; use quick_xml::events::Event; use quick_xml::Reader; use std::{io, result}; use structs::DefinedName; use structs::Spreadsheet; use structs::WorkbookView; use structs::Worksheet; const FILE_PATH: &str = "xl/workbook.xml"; pub(crate) fn read<R: io::Read + io::Seek>( arv: &mut zip::read::ZipArchive<R>, ) -> result::Result<Spreadsheet, XlsxError> { let r = io::BufReader::new(arv.by_name(FILE_PATH)?); let mut reader = Reader::from_reader(r); reader.trim_text(true); let mut buf = Vec::new(); let mut spreadsheet = Spreadsheet::default(); let mut defined_name_value = String::from(""); let mut is_local_only = false; let mut string_value = String::from(""); let mut defined_names: Vec<DefinedName> = Vec::new(); loop { match reader.read_event(&mut buf) { Ok(Event::Empty(ref e)) => match e.name() { b"workbookView" => { let mut obj = WorkbookView::default(); obj.set_attributes(&mut reader, e); spreadsheet.set_workbook_view(obj); } b"sheet" => { let name_value = get_attribute(e, b"name").unwrap(); let sheet_id_value = get_attribute(e, b"sheetId").unwrap(); let r_id_value = get_attribute(e, b"r:id").unwrap(); let mut worksheet = Worksheet::default(); worksheet.set_name(name_value); worksheet.set_sheet_id(sheet_id_value); worksheet.set_r_id(r_id_value); let _ = spreadsheet.add_sheet(worksheet); } b"pivotCache" => { let cache_id = get_attribute(e, b"cacheId").unwrap(); let r_id = get_attribute(e, b"r:id").unwrap(); spreadsheet.add_pivot_caches((r_id, cache_id, String::from(""))); } _ => (), }, Ok(Event::Start(ref e)) => match e.name() { b"definedName" => { defined_name_value = get_attribute(e, b"name").unwrap(); is_local_only = match get_attribute(e, b"localSheetId") { Some(_) => true, None => false, }; } _ => (), }, Ok(Event::Text(e)) => string_value = e.unescape_and_decode(&reader).unwrap(), Ok(Event::End(ref e)) => match e.name() { b"definedName" => { let mut defined_name = DefinedName::default(); defined_name.set_name(defined_name_value); defined_name.set_address(string_value); defined_name.set_is_local_only(is_local_only); defined_names.push(defined_name); defined_name_value = String::from(""); string_value = String::from(""); is_local_only = false; } _ => (), }, Ok(Event::Eof) => break, Err(e) => panic!("Error at position {}: {:?}", reader.buffer_position(), e), _ => (), } buf.clear(); } for sheet in spreadsheet.get_sheet_collection_mut() { for defined_name in &defined_names { let def_sheet_name = defined_name.get_address_obj().get_sheet_name(); if sheet.get_name() == def_sheet_name { sheet.add_defined_names(defined_name.clone()); } } } Ok(spreadsheet) }
39.021277
89
0.512268
1d371453c1c9707f2bb48be05933c5622e114b0c
1,975
//! Progress-Bar implementation. /// Progress-bar structure. pub struct ProgressBar { /// Graphics. pb: indicatif::ProgressBar, /// Current value. count: usize, /// Total target value. total: usize, } impl ProgressBar { /// Construct a new instance. #[inline] #[must_use] pub fn new(msg: &'static str, total: usize) -> Self { debug_assert!(total > 0); let pb = indicatif::ProgressBar::new(total as u64); pb.set_style( indicatif::ProgressStyle::default_bar() .template("{spinner:.green} [{elapsed_precise}] [{bar:40.green/red}] [{pos}/{len}] {percent}% ({eta}) {msg}") .progress_chars("\\/") ); pb.set_message(msg); Self { pb, count: 0, total, } } /// Tick the bar forward a single increment. #[inline] pub fn tick(&mut self) { self.count += 1; self.pb.inc(1); } /// Request a block of values to work on. /// Return the requested block if available. /// If there is not enough, return the remaining block. /// If there are none at all, return None. #[inline] pub fn block(&mut self, size: usize) -> Option<(usize, usize)> { debug_assert!(size > 0); if self.count >= self.total { None } else { let remaining = self.total - self.count; let alloc = size.min(remaining); let start = self.count; let end = start + alloc; self.count += alloc; self.pb.inc(alloc as u64); Some((start, end)) } } /// Check if the progress bar is complete. #[inline] #[must_use] pub const fn is_done(&self) -> bool { self.count >= self.total } /// Finish with a message. #[inline] pub fn finish_with_message(&mut self, msg: &'static str) { self.pb.finish_with_message(msg); } }
24.6875
121
0.53519
2238861f13b14d60b5c2f784e377cce15aa0e0cc
3,982
// Copyright (c) The Diem Core Contributors // SPDX-License-Identifier: Apache-2.0 use move_package::BuildConfig; pub mod base; pub mod experimental; pub mod package; pub mod sandbox; /// Default directory where saved Move resources live pub const DEFAULT_STORAGE_DIR: &str = "storage"; /// Default directory for build output pub const DEFAULT_BUILD_DIR: &str = "."; /// Extension for resource and event files, which are in BCS format const BCS_EXTENSION: &str = "bcs"; use anyhow::Result; use move_core_types::{ account_address::AccountAddress, errmap::ErrorMapping, gas_schedule::CostTable, identifier::Identifier, }; use move_vm_runtime::native_functions::NativeFunction; use std::path::PathBuf; use structopt::StructOpt; type NativeFunctionRecord = (AccountAddress, Identifier, Identifier, NativeFunction); #[derive(StructOpt)] #[structopt( name = "move", about = "CLI frontend for Move compiler and VM", rename_all = "kebab-case" )] pub struct Move { /// Path to a package which the command should be run with respect to. #[structopt( long = "path", short = "p", global = true, parse(from_os_str), default_value = "." )] package_path: PathBuf, /// Print additional diagnostics if available. #[structopt(short = "v", global = true)] verbose: bool, /// Package build options #[structopt(flatten)] build_config: BuildConfig, } /// MoveCLI is the CLI that will be executed by the `move-cli` command /// The `cmd` argument is added here rather than in `Move` to make it /// easier for other crates to extend `move-cli` #[derive(StructOpt)] pub struct MoveCLI { #[structopt(flatten)] move_args: Move, #[structopt(subcommand)] cmd: Command, } #[derive(StructOpt)] pub enum Command { /// Execute a package command. Executed in the current directory or the closest containing Move /// package. #[structopt(name = "package")] Package { #[structopt(subcommand)] cmd: package::cli::PackageCommand, }, /// Execute a sandbox command. #[structopt(name = "sandbox")] Sandbox { /// Directory storing Move resources, events, and module bytecodes produced by module publishing /// and script execution. #[structopt(long, default_value = DEFAULT_STORAGE_DIR, parse(from_os_str))] storage_dir: PathBuf, #[structopt(subcommand)] cmd: sandbox::cli::SandboxCommand, }, /// (Experimental) Run static analyses on Move source or bytecode. #[structopt(name = "experimental")] Experimental { /// Directory storing Move resources, events, and module bytecodes produced by module publishing /// and script execution. #[structopt(long, default_value = DEFAULT_STORAGE_DIR, parse(from_os_str))] storage_dir: PathBuf, #[structopt(subcommand)] cmd: experimental::cli::ExperimentalCommand, }, } pub fn run_cli( natives: Vec<NativeFunctionRecord>, cost_table: &CostTable, error_descriptions: &ErrorMapping, move_args: &Move, cmd: &Command, ) -> Result<()> { match cmd { Command::Sandbox { storage_dir, cmd } => cmd.handle_command( natives, cost_table, error_descriptions, move_args, storage_dir, ), Command::Experimental { storage_dir, cmd } => cmd.handle_command(move_args, storage_dir), Command::Package { cmd } => package::cli::handle_package_commands( &move_args.package_path, move_args.build_config.clone(), cmd, natives, ), } } pub fn move_cli( natives: Vec<NativeFunctionRecord>, cost_table: &CostTable, error_descriptions: &ErrorMapping, ) -> Result<()> { let args = MoveCLI::from_args(); run_cli( natives, cost_table, error_descriptions, &args.move_args, &args.cmd, ) }
28.647482
104
0.650427
08bf83096c9fc9d04bb22a2e33fbf322bbb7a639
492
use rlua::prelude::*; pub mod git; pub mod log; pub mod markdown; pub mod tera; pub mod diff; #[cfg(feature = "tantivy_bindings")] pub mod tantivy; // Dummy modules #[cfg(not(feature = "tantivy_bindings"))] pub mod tantivy { pub fn init(_: &rlua::Lua) -> rlua::Result<()> { Ok(()) } } pub fn init(lua: &Lua) -> LuaResult<()> { git::init(&lua)?; log::init(&lua)?; markdown::init(&lua)?; tantivy::init(&lua)?; tera::init(&lua)?; diff::init(&lua)?; Ok(()) }
17.571429
61
0.579268
28bdfc362a99d2e59c2744528f9c1b60b530b306
2,039
#![allow(non_snake_case)] #![allow(dead_code)] #![allow(unused_macros)] use lexer::Lexer; use stmt::Stmt; use expr::Expr; use token::*; #[derive(Debug)] pub struct Parser { lex: Lexer } impl Parser { pub fn new(lex: Lexer) -> Parser { Parser { lex } } fn skipNewLine(&mut self) { let isNL = if let Some(t) = self.lex.peek() { if let TokenType::NewLine = t.ttype { true } else { false } } else { false }; if isNL { self.lex.next(); } } pub fn parseStmt(&mut self) -> Result<Stmt, String> { match self.lex.next() { Some(token) => { match token.ttype { TokenType::Kprint => self.parsePrintStmt(&token), _ => Err(format!("({}:{}) Failed to parse statement: unexpected token {:?}", token.line, token.column, token.ttype)) } }, None => Err(format!("Failed to parse statement: reached end of file")) } } fn parsePrintStmt(&mut self, token: &Token) -> Result<Stmt, String> { self.skipNewLine(); match self.parseExpr() { Ok(expr) => Ok(Stmt::Print(expr)), Err(err) => Err(format!("({}:{}) Failed to parse print statement: {}", token.line, token.column, err)) } } pub fn parseExpr(&mut self) -> Result<Expr, String> { self.parsePrimary() } pub fn parsePrimary(&mut self) -> Result<Expr, String> { use TokenType::*; match self.lex.next() { Some(token) => { match token.ttype { IntLiteral(i) => Ok(Expr::Int(i)), _ => Err(format!("({}:{}) Failed to parse primary expression: unexpected token {:?}", token.line, token.column, token.ttype)) } }, Option::None => { Err(format!("Failed to parse primary expression: reached end of file")) } } } }
27.931507
145
0.495341
71cfb170bf6aa37adcf426d280deec067e6ba0c3
850
// run-pass // compile-flags: -Zdrop-tracking // Based on addassign-yield.rs, but with drop tracking enabled. Originally we did not implement // the fake_read callback on ExprUseVisitor which caused this case to break. #![feature(generators)] fn foo() { let _y = static || { let x = &mut 0; *{ yield; x } += match String::new() { _ => 0, }; }; // Please don't ever actually write something like this let _z = static || { let x = &mut 0; *{ let inner = &mut 1; *{ yield (); inner } += match String::new() { _ => 1, }; yield; x } += match String::new() { _ => 2, }; }; } fn main() { foo() }
20.238095
95
0.429412
38972aad8bef285d05af6373776d6ddaed05fe52
237
//TODO While in debug mode allow for a snapshot of the program to be created? //TODO Should there be a separate debugging runtime? //TODO For now the compiler will have a hta binary creator and the runtime will have a hta binary reader.
59.25
105
0.780591
8a96a577c501f99638521624ea51aef2a20b9f26
7,279
/* * Copyright 2020 Fluence Labs Limited * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ use log::LevelFilter; use std::collections::HashMap; /// A logger filter. /// /// This struct can be used to determine whether or not /// a log record should be written to the output. #[derive(Clone, Default, Eq, PartialEq, Debug)] pub(crate) struct LoggerFilter<'env_string> { default_log_level: Option<LevelFilter>, module_levels: HashMap<&'env_string str, LevelFilter>, } impl<'env_string> LoggerFilter<'env_string> { /// Parses a content of supplied variable in form of "module_name_1=log_level,module_name_2". pub(crate) fn from_env_string(env: &'env_string str) -> Self { let mut module_levels = HashMap::new(); let mut default_log_level: Option<LevelFilter> = None; for module_log in env.split(',') { if module_log.is_empty() { continue; } let mut module_log_parts = module_log.split('='); let part_0 = module_log_parts.next(); let part_1 = module_log_parts.next().map(|s| s.trim()); if let Some(part_3) = module_log_parts.next() { eprintln!( "logger warning: invalid directive '{}', ignoring it", part_3 ); continue; } let (module_name, module_log_level) = match (part_0, part_1) { // "info" // "1" (Some(part), None) => match part.parse() { Ok(num) => (None, num), Err(_) => (Some(part), LevelFilter::max()), }, // "module_name=" (Some(module_name), Some("")) => (Some(module_name), LevelFilter::max()), // "module_name=info" (Some(module_name), Some(log_level)) => match log_level.parse() { Ok(log_level) => (Some(module_name), log_level), Err(e) => { eprintln!( "logger warning: invalid directive '{}', error '{}', ignoring it", log_level, e ); continue; } }, d => { eprintln!("logger warning: invalid directive '{:?}', ignoring it", d); continue; } }; match (module_name, &mut default_log_level) { (Some(module_name), _) => { module_levels.insert(module_name, module_log_level); } (None, Some(_)) => { eprintln!( "logger warning: can't set default level twice, '{}' ignored", module_log_level ); } (None, w) => *w = Some(module_log_level), } } Self { default_log_level, module_levels, } } pub(crate) fn module_level(&self, module_name: &str) -> Option<LevelFilter> { self.module_levels .get(module_name) .map_or_else(|| self.default_log_level, |l| Some(*l)) } } #[cfg(test)] mod tests { use super::LoggerFilter; use log::LevelFilter; fn test_one_level_filter(unparsed_level: &str, expected_level: LevelFilter) { let logger_filter = LoggerFilter::from_env_string(unparsed_level); let actual_level = logger_filter .module_level("some_module_name") .expect("global option should work"); assert_eq!(actual_level, expected_level); } #[test] fn one_default_filter() { use LevelFilter::*; test_one_level_filter("off", Off); test_one_level_filter("error", Error); test_one_level_filter("warn", Warn); test_one_level_filter("info", Info); test_one_level_filter("debug", Debug); test_one_level_filter("trace", Trace); } #[test] fn module_levels() { use LevelFilter::*; let logger_filter = LoggerFilter::from_env_string( "module_1=off,module_2=error,module_3=warn,module_4=info,module_5=debug,module_6=trace", ); let actual_level = logger_filter .module_level("module_1") .expect("module option should work"); assert_eq!(actual_level, Off); let actual_level = logger_filter .module_level("module_2") .expect("module option should work"); assert_eq!(actual_level, Error); let actual_level = logger_filter .module_level("module_3") .expect("module option should work"); assert_eq!(actual_level, Warn); let actual_level = logger_filter .module_level("module_4") .expect("module option should work"); assert_eq!(actual_level, Info); let actual_level = logger_filter .module_level("module_5") .expect("module option should work"); assert_eq!(actual_level, Debug); let actual_level = logger_filter .module_level("module_6") .expect("module option should work"); assert_eq!(actual_level, Trace); } #[test] fn mixed_default_and_module_levels() { use LevelFilter::*; let logger_filter = LoggerFilter::from_env_string("module_1=off,module_2=error,module_3=warn,module_4=info,module_5=debug,module_6=trace,off"); let actual_level = logger_filter .module_level("module_1") .expect("module option should work"); assert_eq!(actual_level, Off); let actual_level = logger_filter .module_level("module_2") .expect("module option should work"); assert_eq!(actual_level, Error); let actual_level = logger_filter .module_level("module_3") .expect("module option should work"); assert_eq!(actual_level, Warn); let actual_level = logger_filter .module_level("module_4") .expect("module option should work"); assert_eq!(actual_level, Info); let actual_level = logger_filter .module_level("module_5") .expect("module option should work"); assert_eq!(actual_level, Debug); let actual_level = logger_filter .module_level("module_6") .expect("module option should work"); assert_eq!(actual_level, Trace); let actual_level = logger_filter .module_level("some_module_name") .expect("global option should work"); assert_eq!(actual_level, Off); } }
34.827751
151
0.566699
2291316118bee2131c79b302f929f08ba481ff35
5,372
use std::collections::HashMap; use std::collections::VecDeque; fn main() { let prog = std::fs::read_to_string("input.txt") .unwrap() .trim() .split(',') .map(|x| x.parse::<i64>().unwrap()) .collect::<Vec<i64>>(); let mut computers : Vec<Computer> = Vec::new(); let mut queues : Vec<VecDeque<i64>> = Vec::new(); for i in 0..50 { computers.push(Computer::new(&prog)); queues.push(VecDeque::new()); queues[i].push_back(i as i64); } let mut iteration : i64 = 0; let mut mx : i64 = 0; let mut my : i64 = 0; let mut ly : i64 = 0; let mut has_nat = false; loop { let mut is_idle = true; for i in 0..50 { let addr = computers[i].run(&mut queues[i]); if addr < 0 { continue; } is_idle = false; let x = computers[i].run(&mut queues[i]); let y = computers[i].run(&mut queues[i]); if addr == 255 { mx = x; my = y; has_nat = true; continue; } queues[addr as usize].push_back(x); queues[addr as usize].push_back(y); } if is_idle && has_nat { if ly == my { println!("{}", my); return; } println!("pushed {} <{}, {}>", iteration, mx, my); queues[0].push_back(mx); queues[0].push_back(my); ly = my; has_nat = false; } iteration += 1; } } struct Computer { index: usize, base: i64, mem: HashMap<usize, i64>, } impl Computer { fn new(prog: &Vec<i64>) -> Computer { let mut computer = Computer { index: 0, base: 0, mem: HashMap::new(), }; for (index, v) in prog.iter().enumerate() { computer.mem.insert(index, *v); } return computer; } fn fetch(&self, address: usize) -> i64 { if let Some(v) = self.mem.get(&address) { return *v; } return 0i64; } fn decode(&self) -> (i64, usize, usize, usize) { let opv = self.fetch(self.index); let c = (opv / 100) % 10; let d = (opv / 1000) % 10; let e = (opv / 10000) % 10; let op = opv % 100; let la = if c == 1 { self.index + 1 } else if c == 0 { self.fetch(self.index + 1) as usize } else { (self.base + self.fetch(self.index + 1)) as usize }; let ra = if d == 1 { self.index + 2 } else if d == 0 { self.fetch(self.index + 2) as usize } else { (self.base + self.fetch(self.index + 2)) as usize }; let ta = if e == 1 { self.index + 3 } else if e == 0 { self.fetch(self.index + 3) as usize } else { (self.base + self.fetch(self.index + 3)) as usize }; return (op, la, ra, ta); } fn run(&mut self, inputs : &mut VecDeque<i64>) -> i64 { loop { let (op, la, ra, ta) = self.decode(); match op { 1 | 2 => { if op == 1 { self.mem.insert(ta, self.fetch(la) + self.fetch(ra)); } else { self.mem.insert(ta, self.fetch(la) * self.fetch(ra)); } self.index += 4; } 3 => { self.index += 2; if let Some(v) = inputs.pop_front() { self.mem.insert(la, v); } else { self.mem.insert(la, -1); return -1; } } 4 => { let out = self.fetch(la); self.index += 2; return out; } 5 | 6 => { self.index = match (op == 5, self.fetch(la) != 0) { (true, true) => self.fetch(ra) as usize, (true, false) => self.index + 3, (false, true) => self.index + 3, (false, false) => self.fetch(ra) as usize, } } 7 | 8 => { if op == 7 { let v: i64 = if self.fetch(la) < self.fetch(ra) { 1 } else { 0 }; self.mem.insert(ta, v); } else { let v: i64 = if self.fetch(la) == self.fetch(ra) { 1 } else { 0 }; self.mem.insert(ta, v); } self.index += 4; } 9 => { self.base += self.fetch(la); self.index += 2; } 99 => break, _ => panic!("Unexpected op code"), } } return -1; } }
28.273684
77
0.364296
647cdbc8667434d147d736d3cb690210aaabc36c
97
use crate::compound_state::*; use std::rc::Rc; pub type Connection = (Rc<CompoundState>, char);
19.4
48
0.701031
bb87d7e0c2ab4f264471f3b555f29df268f439af
6,486
use dominator::{clone, html, Dom}; use std::rc::Rc; use utils::prelude::*; use super::state::*; use crate::{ hebrew_buttons::HebrewButtons, overlay::handle::OverlayHandle, tooltip::{ callbacks::TooltipErrorCallbacks, state::{ Anchor, ContentAnchor, MoveStrategy, State as TooltipState, TooltipData, TooltipError, TooltipTarget, }, }, }; use futures_signals::{map_ref, signal::SignalExt, signal_vec::SignalVecExt}; const STR_DELETE_TITLE: &str = "Warning"; const STR_DELETE_CONTENT: &str = "Are you sure you want to delete this list?"; const STR_DELETE_CONFIRM: &str = "Yes, go ahead!"; const STR_DELETE_CANCEL: &str = "No, keep this list"; pub fn render(state: Rc<State>) -> Dom { html!("sidebar-widget-single-list", { .children(&mut [ HebrewButtons::full().render(Some("hebrew-buttons")), html!("button-rect", { .property("slot", "clear") .property("kind", "text") .property("color", "blue") .text(super::strings::STR_CLEAR) .event(clone!(state => move |_evt:events::Click| { state.confirm_clear.set_neq(true); })) }), html!("button-rect", { .property_signal("disabled", state.is_valid_signal().map(|valid| !valid)) .property("size", "small") .property("iconAfter", "done") .property("slot", "done-btn") .text(super::strings::STR_DONE) .event(clone!(state => move |_evt:events::Click| { match state.derive_list() { Some(list) => { (state.callbacks.replace_list) (list); }, None => { (state.callbacks.set_tooltip_error) (Some( Rc::new(TooltipState::new( TooltipTarget::Element( state.error_element_ref.borrow().as_ref().unwrap_ji().clone(), MoveStrategy::None ), TooltipData::Error(Rc::new(TooltipError { max_width: Some(185.0), target_anchor: Anchor::MiddleRight, content_anchor: ContentAnchor::OppositeH, body: super::strings::error::STR_NUM_WORDS.to_string(), callbacks: TooltipErrorCallbacks::new( Some(clone!(state => move || { (state.callbacks.set_tooltip_error) (None); })) ) })) )) )); } } })) }) ]) .children_signal_vec( state.list.signal_vec_cloned() .enumerate() .map(clone!(state => move |(index, value)| { let index = index.get().unwrap_or_default(); html!("sidebar-widget-single-list-input", { .property_signal("value", { clone!(state => map_ref! { let value = value.signal_cloned(), let is_placeholder = state.is_placeholder.signal() => move { if *is_placeholder { (state.callbacks.get_placeholder) (index) .unwrap_or_else(|| "".to_string()) } else { value.clone() } } }) }) .property("constrain", state.callbacks.constrain.as_ref()) .property_signal("placeholder", state.is_placeholder.signal()) .event(clone!(state => move |_evt:events::Focus| { //log::info!("got focus!"); state.is_placeholder.set_neq(false); })) .event(move |evt:events::CustomInput| { value.set_neq(evt.value()); }) .after_inserted(clone!(index, state => move |elem| { if index == 2 { *state.error_element_ref.borrow_mut() = Some(elem); } })) }) })) ) .child_signal(state.confirm_clear.signal_cloned().map(clone!(state => move |confirm_clear| { if confirm_clear { Some(html!("empty-fragment", { .style("display", "none") .apply(OverlayHandle::lifecycle(clone!(state => move || { html!("modal-confirm", { .property("dangerous", true) .property("title", STR_DELETE_TITLE) .property("content", STR_DELETE_CONTENT) .property("cancel_text", STR_DELETE_CANCEL) .property("confirm_text", STR_DELETE_CONFIRM) .event(clone!(state => move |_evt: events::CustomCancel| state.confirm_clear.set_neq(false))) .event(clone!(state => move |_evt: events::CustomConfirm| { state.confirm_clear.set_neq(false); state.clear(); })) }) }))) })) } else { None } }))) }) }
46.661871
121
0.388375
21703e00a23a13b7a3b32d29afdbdae4bdda4528
65,603
//! A module for working with processes. //! //! This module is mostly concerned with spawning and interacting with child //! processes, but it also provides [`abort`] and [`exit`] for terminating the //! current process. //! //! # Spawning a process //! //! The [`Command`] struct is used to configure and spawn processes: //! //! ```no_run //! use std::process::Command; //! //! let output = Command::new("echo") //! .arg("Hello world") //! .output() //! .expect("Failed to execute command"); //! //! assert_eq!(b"Hello world\n", output.stdout.as_slice()); //! ``` //! //! Several methods on [`Command`], such as [`spawn`] or [`output`], can be used //! to spawn a process. In particular, [`output`] spawns the child process and //! waits until the process terminates, while [`spawn`] will return a [`Child`] //! that represents the spawned child process. //! //! # Handling I/O //! //! The [`stdout`], [`stdin`], and [`stderr`] of a child process can be //! configured by passing an [`Stdio`] to the corresponding method on //! [`Command`]. Once spawned, they can be accessed from the [`Child`]. For //! example, piping output from one command into another command can be done //! like so: //! //! ```no_run //! use std::process::{Command, Stdio}; //! //! // stdout must be configured with `Stdio::piped` in order to use //! // `echo_child.stdout` //! let echo_child = Command::new("echo") //! .arg("Oh no, a tpyo!") //! .stdout(Stdio::piped()) //! .spawn() //! .expect("Failed to start echo process"); //! //! // Note that `echo_child` is moved here, but we won't be needing //! // `echo_child` anymore //! let echo_out = echo_child.stdout.expect("Failed to open echo stdout"); //! //! let mut sed_child = Command::new("sed") //! .arg("s/tpyo/typo/") //! .stdin(Stdio::from(echo_out)) //! .stdout(Stdio::piped()) //! .spawn() //! .expect("Failed to start sed process"); //! //! let output = sed_child.wait_with_output().expect("Failed to wait on sed"); //! assert_eq!(b"Oh no, a typo!\n", output.stdout.as_slice()); //! ``` //! //! Note that [`ChildStderr`] and [`ChildStdout`] implement [`Read`] and //! [`ChildStdin`] implements [`Write`]: //! //! ```no_run //! use std::process::{Command, Stdio}; //! use std::io::Write; //! //! let mut child = Command::new("/bin/cat") //! .stdin(Stdio::piped()) //! .stdout(Stdio::piped()) //! .spawn() //! .expect("failed to execute child"); //! //! // If the child process fills its stdout buffer, it may end up //! // waiting until the parent reads the stdout, and not be able to //! // read stdin in the meantime, causing a deadlock. //! // Writing from another thread ensures that stdout is being read //! // at the same time, avoiding the problem. //! let mut stdin = child.stdin.take().expect("failed to get stdin"); //! std::thread::spawn(move || { //! stdin.write_all(b"test").expect("failed to write to stdin"); //! }); //! //! let output = child //! .wait_with_output() //! .expect("failed to wait on child"); //! //! assert_eq!(b"test", output.stdout.as_slice()); //! ``` //! //! [`spawn`]: Command::spawn //! [`output`]: Command::output //! //! [`stdout`]: Command::stdout //! [`stdin`]: Command::stdin //! [`stderr`]: Command::stderr //! //! [`Write`]: io::Write //! [`Read`]: io::Read #![stable(feature = "process", since = "1.0.0")] #![deny(unsafe_op_in_unsafe_fn)] #[cfg(all(test, not(any(target_os = "emscripten", target_env = "sgx"))))] mod tests; use crate::io::prelude::*; use crate::ffi::OsStr; use crate::fmt; use crate::fs; use crate::io::{self, Initializer, IoSlice, IoSliceMut}; use crate::num::NonZeroI32; use crate::path::Path; use crate::str; use crate::sys::pipe::{read2, AnonPipe}; use crate::sys::process as imp; #[unstable(feature = "command_access", issue = "44434")] pub use crate::sys_common::process::CommandEnvs; use crate::sys_common::{AsInner, AsInnerMut, FromInner, IntoInner}; /// Representation of a running or exited child process. /// /// This structure is used to represent and manage child processes. A child /// process is created via the [`Command`] struct, which configures the /// spawning process and can itself be constructed using a builder-style /// interface. /// /// There is no implementation of [`Drop`] for child processes, /// so if you do not ensure the `Child` has exited then it will continue to /// run, even after the `Child` handle to the child process has gone out of /// scope. /// /// Calling [`wait`] (or other functions that wrap around it) will make /// the parent process wait until the child has actually exited before /// continuing. /// /// # Warning /// /// On some systems, calling [`wait`] or similar is necessary for the OS to /// release resources. A process that terminated but has not been waited on is /// still around as a "zombie". Leaving too many zombies around may exhaust /// global resources (for example process IDs). /// /// The standard library does *not* automatically wait on child processes (not /// even if the `Child` is dropped), it is up to the application developer to do /// so. As a consequence, dropping `Child` handles without waiting on them first /// is not recommended in long-running applications. /// /// # Examples /// /// ```should_panic /// use std::process::Command; /// /// let mut child = Command::new("/bin/cat") /// .arg("file.txt") /// .spawn() /// .expect("failed to execute child"); /// /// let ecode = child.wait() /// .expect("failed to wait on child"); /// /// assert!(ecode.success()); /// ``` /// /// [`wait`]: Child::wait #[stable(feature = "process", since = "1.0.0")] pub struct Child { pub(crate) handle: imp::Process, /// The handle for writing to the child's standard input (stdin), if it has /// been captured. To avoid partially moving /// the `child` and thus blocking yourself from calling /// functions on `child` while using `stdin`, /// you might find it helpful: /// /// ```compile_fail,E0425 /// let stdin = child.stdin.take().unwrap(); /// ``` #[stable(feature = "process", since = "1.0.0")] pub stdin: Option<ChildStdin>, /// The handle for reading from the child's standard output (stdout), if it /// has been captured. You might find it helpful to do /// /// ```compile_fail,E0425 /// let stdout = child.stdout.take().unwrap(); /// ``` /// /// to avoid partially moving the `child` and thus blocking yourself from calling /// functions on `child` while using `stdout`. #[stable(feature = "process", since = "1.0.0")] pub stdout: Option<ChildStdout>, /// The handle for reading from the child's standard error (stderr), if it /// has been captured. You might find it helpful to do /// /// ```compile_fail,E0425 /// let stderr = child.stderr.take().unwrap(); /// ``` /// /// to avoid partially moving the `child` and thus blocking yourself from calling /// functions on `child` while using `stderr`. #[stable(feature = "process", since = "1.0.0")] pub stderr: Option<ChildStderr>, } /// Allows extension traits within `std`. #[unstable(feature = "sealed", issue = "none")] impl crate::sealed::Sealed for Child {} impl AsInner<imp::Process> for Child { fn as_inner(&self) -> &imp::Process { &self.handle } } impl FromInner<(imp::Process, imp::StdioPipes)> for Child { fn from_inner((handle, io): (imp::Process, imp::StdioPipes)) -> Child { Child { handle, stdin: io.stdin.map(ChildStdin::from_inner), stdout: io.stdout.map(ChildStdout::from_inner), stderr: io.stderr.map(ChildStderr::from_inner), } } } impl IntoInner<imp::Process> for Child { fn into_inner(self) -> imp::Process { self.handle } } #[stable(feature = "std_debug", since = "1.16.0")] impl fmt::Debug for Child { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Child") .field("stdin", &self.stdin) .field("stdout", &self.stdout) .field("stderr", &self.stderr) .finish_non_exhaustive() } } /// A handle to a child process's standard input (stdin). /// /// This struct is used in the [`stdin`] field on [`Child`]. /// /// When an instance of `ChildStdin` is [dropped], the `ChildStdin`'s underlying /// file handle will be closed. If the child process was blocked on input prior /// to being dropped, it will become unblocked after dropping. /// /// [`stdin`]: Child::stdin /// [dropped]: Drop #[stable(feature = "process", since = "1.0.0")] pub struct ChildStdin { inner: AnonPipe, } // In addition to the `impl`s here, `ChildStdin` also has `impl`s for // `AsFd`/`From<OwnedFd>`/`Into<OwnedFd>` and // `AsRawFd`/`IntoRawFd`/`FromRawFd`, on Unix and WASI, and // `AsHandle`/`From<OwnedHandle>`/`Into<OwnedHandle>` and // `AsRawHandle`/`IntoRawHandle`/`FromRawHandle` on Windows. #[stable(feature = "process", since = "1.0.0")] impl Write for ChildStdin { fn write(&mut self, buf: &[u8]) -> io::Result<usize> { (&*self).write(buf) } fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> { (&*self).write_vectored(bufs) } fn is_write_vectored(&self) -> bool { io::Write::is_write_vectored(&&*self) } fn flush(&mut self) -> io::Result<()> { (&*self).flush() } } #[stable(feature = "write_mt", since = "1.48.0")] impl Write for &ChildStdin { fn write(&mut self, buf: &[u8]) -> io::Result<usize> { self.inner.write(buf) } fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> { self.inner.write_vectored(bufs) } fn is_write_vectored(&self) -> bool { self.inner.is_write_vectored() } fn flush(&mut self) -> io::Result<()> { Ok(()) } } impl AsInner<AnonPipe> for ChildStdin { fn as_inner(&self) -> &AnonPipe { &self.inner } } impl IntoInner<AnonPipe> for ChildStdin { fn into_inner(self) -> AnonPipe { self.inner } } impl FromInner<AnonPipe> for ChildStdin { fn from_inner(pipe: AnonPipe) -> ChildStdin { ChildStdin { inner: pipe } } } #[stable(feature = "std_debug", since = "1.16.0")] impl fmt::Debug for ChildStdin { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("ChildStdin").finish_non_exhaustive() } } /// A handle to a child process's standard output (stdout). /// /// This struct is used in the [`stdout`] field on [`Child`]. /// /// When an instance of `ChildStdout` is [dropped], the `ChildStdout`'s /// underlying file handle will be closed. /// /// [`stdout`]: Child::stdout /// [dropped]: Drop #[stable(feature = "process", since = "1.0.0")] pub struct ChildStdout { inner: AnonPipe, } // In addition to the `impl`s here, `ChildStdout` also has `impl`s for // `AsFd`/`From<OwnedFd>`/`Into<OwnedFd>` and // `AsRawFd`/`IntoRawFd`/`FromRawFd`, on Unix and WASI, and // `AsHandle`/`From<OwnedHandle>`/`Into<OwnedHandle>` and // `AsRawHandle`/`IntoRawHandle`/`FromRawHandle` on Windows. #[stable(feature = "process", since = "1.0.0")] impl Read for ChildStdout { fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { self.inner.read(buf) } fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> { self.inner.read_vectored(bufs) } #[inline] fn is_read_vectored(&self) -> bool { self.inner.is_read_vectored() } #[inline] unsafe fn initializer(&self) -> Initializer { // SAFETY: Read is guaranteed to work on uninitialized memory unsafe { Initializer::nop() } } } impl AsInner<AnonPipe> for ChildStdout { fn as_inner(&self) -> &AnonPipe { &self.inner } } impl IntoInner<AnonPipe> for ChildStdout { fn into_inner(self) -> AnonPipe { self.inner } } impl FromInner<AnonPipe> for ChildStdout { fn from_inner(pipe: AnonPipe) -> ChildStdout { ChildStdout { inner: pipe } } } #[stable(feature = "std_debug", since = "1.16.0")] impl fmt::Debug for ChildStdout { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("ChildStdout").finish_non_exhaustive() } } /// A handle to a child process's stderr. /// /// This struct is used in the [`stderr`] field on [`Child`]. /// /// When an instance of `ChildStderr` is [dropped], the `ChildStderr`'s /// underlying file handle will be closed. /// /// [`stderr`]: Child::stderr /// [dropped]: Drop #[stable(feature = "process", since = "1.0.0")] pub struct ChildStderr { inner: AnonPipe, } // In addition to the `impl`s here, `ChildStderr` also has `impl`s for // `AsFd`/`From<OwnedFd>`/`Into<OwnedFd>` and // `AsRawFd`/`IntoRawFd`/`FromRawFd`, on Unix and WASI, and // `AsHandle`/`From<OwnedHandle>`/`Into<OwnedHandle>` and // `AsRawHandle`/`IntoRawHandle`/`FromRawHandle` on Windows. #[stable(feature = "process", since = "1.0.0")] impl Read for ChildStderr { fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { self.inner.read(buf) } fn read_vectored(&mut self, bufs: &mut [IoSliceMut<'_>]) -> io::Result<usize> { self.inner.read_vectored(bufs) } #[inline] fn is_read_vectored(&self) -> bool { self.inner.is_read_vectored() } #[inline] unsafe fn initializer(&self) -> Initializer { // SAFETY: Read is guaranteed to work on uninitialized memory unsafe { Initializer::nop() } } } impl AsInner<AnonPipe> for ChildStderr { fn as_inner(&self) -> &AnonPipe { &self.inner } } impl IntoInner<AnonPipe> for ChildStderr { fn into_inner(self) -> AnonPipe { self.inner } } impl FromInner<AnonPipe> for ChildStderr { fn from_inner(pipe: AnonPipe) -> ChildStderr { ChildStderr { inner: pipe } } } #[stable(feature = "std_debug", since = "1.16.0")] impl fmt::Debug for ChildStderr { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("ChildStderr").finish_non_exhaustive() } } /// A process builder, providing fine-grained control /// over how a new process should be spawned. /// /// A default configuration can be /// generated using `Command::new(program)`, where `program` gives a path to the /// program to be executed. Additional builder methods allow the configuration /// to be changed (for example, by adding arguments) prior to spawning: /// /// ``` /// use std::process::Command; /// /// let output = if cfg!(target_os = "windows") { /// Command::new("cmd") /// .args(["/C", "echo hello"]) /// .output() /// .expect("failed to execute process") /// } else { /// Command::new("sh") /// .arg("-c") /// .arg("echo hello") /// .output() /// .expect("failed to execute process") /// }; /// /// let hello = output.stdout; /// ``` /// /// `Command` can be reused to spawn multiple processes. The builder methods /// change the command without needing to immediately spawn the process. /// /// ```no_run /// use std::process::Command; /// /// let mut echo_hello = Command::new("sh"); /// echo_hello.arg("-c") /// .arg("echo hello"); /// let hello_1 = echo_hello.output().expect("failed to execute process"); /// let hello_2 = echo_hello.output().expect("failed to execute process"); /// ``` /// /// Similarly, you can call builder methods after spawning a process and then /// spawn a new process with the modified settings. /// /// ```no_run /// use std::process::Command; /// /// let mut list_dir = Command::new("ls"); /// /// // Execute `ls` in the current directory of the program. /// list_dir.status().expect("process failed to execute"); /// /// println!(); /// /// // Change `ls` to execute in the root directory. /// list_dir.current_dir("/"); /// /// // And then execute `ls` again but in the root directory. /// list_dir.status().expect("process failed to execute"); /// ``` #[stable(feature = "process", since = "1.0.0")] pub struct Command { inner: imp::Command, } /// Allows extension traits within `std`. #[unstable(feature = "sealed", issue = "none")] impl crate::sealed::Sealed for Command {} impl Command { /// Constructs a new `Command` for launching the program at /// path `program`, with the following default configuration: /// /// * No arguments to the program /// * Inherit the current process's environment /// * Inherit the current process's working directory /// * Inherit stdin/stdout/stderr for `spawn` or `status`, but create pipes for `output` /// /// Builder methods are provided to change these defaults and /// otherwise configure the process. /// /// If `program` is not an absolute path, the `PATH` will be searched in /// an OS-defined way. /// /// The search path to be used may be controlled by setting the /// `PATH` environment variable on the Command, /// but this has some implementation limitations on Windows /// (see issue #37519). /// /// # Examples /// /// Basic usage: /// /// ```no_run /// use std::process::Command; /// /// Command::new("sh") /// .spawn() /// .expect("sh command failed to start"); /// ``` #[stable(feature = "process", since = "1.0.0")] pub fn new<S: AsRef<OsStr>>(program: S) -> Command { Command { inner: imp::Command::new(program.as_ref()) } } /// Adds an argument to pass to the program. /// /// Only one argument can be passed per use. So instead of: /// /// ```no_run /// # std::process::Command::new("sh") /// .arg("-C /path/to/repo") /// # ; /// ``` /// /// usage would be: /// /// ```no_run /// # std::process::Command::new("sh") /// .arg("-C") /// .arg("/path/to/repo") /// # ; /// ``` /// /// To pass multiple arguments see [`args`]. /// /// [`args`]: Command::args /// /// Note that the argument is not passed through a shell, but given /// literally to the program. This means that shell syntax like quotes, /// escaped characters, word splitting, glob patterns, substitution, etc. /// have no effect. /// /// # Examples /// /// Basic usage: /// /// ```no_run /// use std::process::Command; /// /// Command::new("ls") /// .arg("-l") /// .arg("-a") /// .spawn() /// .expect("ls command failed to start"); /// ``` #[stable(feature = "process", since = "1.0.0")] pub fn arg<S: AsRef<OsStr>>(&mut self, arg: S) -> &mut Command { self.inner.arg(arg.as_ref()); self } /// Adds multiple arguments to pass to the program. /// /// To pass a single argument see [`arg`]. /// /// [`arg`]: Command::arg /// /// Note that the arguments are not passed through a shell, but given /// literally to the program. This means that shell syntax like quotes, /// escaped characters, word splitting, glob patterns, substitution, etc. /// have no effect. /// /// # Examples /// /// Basic usage: /// /// ```no_run /// use std::process::Command; /// /// Command::new("ls") /// .args(["-l", "-a"]) /// .spawn() /// .expect("ls command failed to start"); /// ``` #[stable(feature = "process", since = "1.0.0")] pub fn args<I, S>(&mut self, args: I) -> &mut Command where I: IntoIterator<Item = S>, S: AsRef<OsStr>, { for arg in args { self.arg(arg.as_ref()); } self } /// Inserts or updates an environment variable mapping. /// /// Note that environment variable names are case-insensitive (but case-preserving) on Windows, /// and case-sensitive on all other platforms. /// /// # Examples /// /// Basic usage: /// /// ```no_run /// use std::process::Command; /// /// Command::new("ls") /// .env("PATH", "/bin") /// .spawn() /// .expect("ls command failed to start"); /// ``` #[stable(feature = "process", since = "1.0.0")] pub fn env<K, V>(&mut self, key: K, val: V) -> &mut Command where K: AsRef<OsStr>, V: AsRef<OsStr>, { self.inner.env_mut().set(key.as_ref(), val.as_ref()); self } /// Adds or updates multiple environment variable mappings. /// /// # Examples /// /// Basic usage: /// /// ```no_run /// use std::process::{Command, Stdio}; /// use std::env; /// use std::collections::HashMap; /// /// let filtered_env : HashMap<String, String> = /// env::vars().filter(|&(ref k, _)| /// k == "TERM" || k == "TZ" || k == "LANG" || k == "PATH" /// ).collect(); /// /// Command::new("printenv") /// .stdin(Stdio::null()) /// .stdout(Stdio::inherit()) /// .env_clear() /// .envs(&filtered_env) /// .spawn() /// .expect("printenv failed to start"); /// ``` #[stable(feature = "command_envs", since = "1.19.0")] pub fn envs<I, K, V>(&mut self, vars: I) -> &mut Command where I: IntoIterator<Item = (K, V)>, K: AsRef<OsStr>, V: AsRef<OsStr>, { for (ref key, ref val) in vars { self.inner.env_mut().set(key.as_ref(), val.as_ref()); } self } /// Removes an environment variable mapping. /// /// # Examples /// /// Basic usage: /// /// ```no_run /// use std::process::Command; /// /// Command::new("ls") /// .env_remove("PATH") /// .spawn() /// .expect("ls command failed to start"); /// ``` #[stable(feature = "process", since = "1.0.0")] pub fn env_remove<K: AsRef<OsStr>>(&mut self, key: K) -> &mut Command { self.inner.env_mut().remove(key.as_ref()); self } /// Clears the entire environment map for the child process. /// /// # Examples /// /// Basic usage: /// /// ```no_run /// use std::process::Command; /// /// Command::new("ls") /// .env_clear() /// .spawn() /// .expect("ls command failed to start"); /// ``` #[stable(feature = "process", since = "1.0.0")] pub fn env_clear(&mut self) -> &mut Command { self.inner.env_mut().clear(); self } /// Sets the working directory for the child process. /// /// # Platform-specific behavior /// /// If the program path is relative (e.g., `"./script.sh"`), it's ambiguous /// whether it should be interpreted relative to the parent's working /// directory or relative to `current_dir`. The behavior in this case is /// platform specific and unstable, and it's recommended to use /// [`canonicalize`] to get an absolute program path instead. /// /// # Examples /// /// Basic usage: /// /// ```no_run /// use std::process::Command; /// /// Command::new("ls") /// .current_dir("/bin") /// .spawn() /// .expect("ls command failed to start"); /// ``` /// /// [`canonicalize`]: crate::fs::canonicalize #[stable(feature = "process", since = "1.0.0")] pub fn current_dir<P: AsRef<Path>>(&mut self, dir: P) -> &mut Command { self.inner.cwd(dir.as_ref().as_ref()); self } /// Configuration for the child process's standard input (stdin) handle. /// /// Defaults to [`inherit`] when used with `spawn` or `status`, and /// defaults to [`piped`] when used with `output`. /// /// [`inherit`]: Stdio::inherit /// [`piped`]: Stdio::piped /// /// # Examples /// /// Basic usage: /// /// ```no_run /// use std::process::{Command, Stdio}; /// /// Command::new("ls") /// .stdin(Stdio::null()) /// .spawn() /// .expect("ls command failed to start"); /// ``` #[stable(feature = "process", since = "1.0.0")] pub fn stdin<T: Into<Stdio>>(&mut self, cfg: T) -> &mut Command { self.inner.stdin(cfg.into().0); self } /// Configuration for the child process's standard output (stdout) handle. /// /// Defaults to [`inherit`] when used with `spawn` or `status`, and /// defaults to [`piped`] when used with `output`. /// /// [`inherit`]: Stdio::inherit /// [`piped`]: Stdio::piped /// /// # Examples /// /// Basic usage: /// /// ```no_run /// use std::process::{Command, Stdio}; /// /// Command::new("ls") /// .stdout(Stdio::null()) /// .spawn() /// .expect("ls command failed to start"); /// ``` #[stable(feature = "process", since = "1.0.0")] pub fn stdout<T: Into<Stdio>>(&mut self, cfg: T) -> &mut Command { self.inner.stdout(cfg.into().0); self } /// Configuration for the child process's standard error (stderr) handle. /// /// Defaults to [`inherit`] when used with `spawn` or `status`, and /// defaults to [`piped`] when used with `output`. /// /// [`inherit`]: Stdio::inherit /// [`piped`]: Stdio::piped /// /// # Examples /// /// Basic usage: /// /// ```no_run /// use std::process::{Command, Stdio}; /// /// Command::new("ls") /// .stderr(Stdio::null()) /// .spawn() /// .expect("ls command failed to start"); /// ``` #[stable(feature = "process", since = "1.0.0")] pub fn stderr<T: Into<Stdio>>(&mut self, cfg: T) -> &mut Command { self.inner.stderr(cfg.into().0); self } /// Executes the command as a child process, returning a handle to it. /// /// By default, stdin, stdout and stderr are inherited from the parent. /// /// # Examples /// /// Basic usage: /// /// ```no_run /// use std::process::Command; /// /// Command::new("ls") /// .spawn() /// .expect("ls command failed to start"); /// ``` #[stable(feature = "process", since = "1.0.0")] pub fn spawn(&mut self) -> io::Result<Child> { self.inner.spawn(imp::Stdio::Inherit, true).map(Child::from_inner) } /// Executes the command as a child process, waiting for it to finish and /// collecting all of its output. /// /// By default, stdout and stderr are captured (and used to provide the /// resulting output). Stdin is not inherited from the parent and any /// attempt by the child process to read from the stdin stream will result /// in the stream immediately closing. /// /// # Examples /// /// ```should_panic /// use std::process::Command; /// use std::io::{self, Write}; /// let output = Command::new("/bin/cat") /// .arg("file.txt") /// .output() /// .expect("failed to execute process"); /// /// println!("status: {}", output.status); /// io::stdout().write_all(&output.stdout).unwrap(); /// io::stderr().write_all(&output.stderr).unwrap(); /// /// assert!(output.status.success()); /// ``` #[stable(feature = "process", since = "1.0.0")] pub fn output(&mut self) -> io::Result<Output> { self.inner .spawn(imp::Stdio::MakePipe, false) .map(Child::from_inner) .and_then(|p| p.wait_with_output()) } /// Executes a command as a child process, waiting for it to finish and /// collecting its status. /// /// By default, stdin, stdout and stderr are inherited from the parent. /// /// # Examples /// /// ```should_panic /// use std::process::Command; /// /// let status = Command::new("/bin/cat") /// .arg("file.txt") /// .status() /// .expect("failed to execute process"); /// /// println!("process finished with: {}", status); /// /// assert!(status.success()); /// ``` #[stable(feature = "process", since = "1.0.0")] pub fn status(&mut self) -> io::Result<ExitStatus> { self.inner .spawn(imp::Stdio::Inherit, true) .map(Child::from_inner) .and_then(|mut p| p.wait()) } /// Returns the path to the program that was given to [`Command::new`]. /// /// # Examples /// /// ``` /// # #![feature(command_access)] /// use std::process::Command; /// /// let cmd = Command::new("echo"); /// assert_eq!(cmd.get_program(), "echo"); /// ``` #[unstable(feature = "command_access", issue = "44434")] pub fn get_program(&self) -> &OsStr { self.inner.get_program() } /// Returns an iterator of the arguments that will be passed to the program. /// /// This does not include the path to the program as the first argument; /// it only includes the arguments specified with [`Command::arg`] and /// [`Command::args`]. /// /// # Examples /// /// ``` /// # #![feature(command_access)] /// use std::ffi::OsStr; /// use std::process::Command; /// /// let mut cmd = Command::new("echo"); /// cmd.arg("first").arg("second"); /// let args: Vec<&OsStr> = cmd.get_args().collect(); /// assert_eq!(args, &["first", "second"]); /// ``` #[unstable(feature = "command_access", issue = "44434")] pub fn get_args(&self) -> CommandArgs<'_> { CommandArgs { inner: self.inner.get_args() } } /// Returns an iterator of the environment variables that will be set when /// the process is spawned. /// /// Each element is a tuple `(&OsStr, Option<&OsStr>)`, where the first /// value is the key, and the second is the value, which is [`None`] if /// the environment variable is to be explicitly removed. /// /// This only includes environment variables explicitly set with /// [`Command::env`], [`Command::envs`], and [`Command::env_remove`]. It /// does not include environment variables that will be inherited by the /// child process. /// /// # Examples /// /// ``` /// # #![feature(command_access)] /// use std::ffi::OsStr; /// use std::process::Command; /// /// let mut cmd = Command::new("ls"); /// cmd.env("TERM", "dumb").env_remove("TZ"); /// let envs: Vec<(&OsStr, Option<&OsStr>)> = cmd.get_envs().collect(); /// assert_eq!(envs, &[ /// (OsStr::new("TERM"), Some(OsStr::new("dumb"))), /// (OsStr::new("TZ"), None) /// ]); /// ``` #[unstable(feature = "command_access", issue = "44434")] pub fn get_envs(&self) -> CommandEnvs<'_> { self.inner.get_envs() } /// Returns the working directory for the child process. /// /// This returns [`None`] if the working directory will not be changed. /// /// # Examples /// /// ``` /// # #![feature(command_access)] /// use std::path::Path; /// use std::process::Command; /// /// let mut cmd = Command::new("ls"); /// assert_eq!(cmd.get_current_dir(), None); /// cmd.current_dir("/bin"); /// assert_eq!(cmd.get_current_dir(), Some(Path::new("/bin"))); /// ``` #[unstable(feature = "command_access", issue = "44434")] pub fn get_current_dir(&self) -> Option<&Path> { self.inner.get_current_dir() } } #[stable(feature = "rust1", since = "1.0.0")] impl fmt::Debug for Command { /// Format the program and arguments of a Command for display. Any /// non-utf8 data is lossily converted using the utf8 replacement /// character. fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { self.inner.fmt(f) } } impl AsInner<imp::Command> for Command { fn as_inner(&self) -> &imp::Command { &self.inner } } impl AsInnerMut<imp::Command> for Command { fn as_inner_mut(&mut self) -> &mut imp::Command { &mut self.inner } } /// An iterator over the command arguments. /// /// This struct is created by [`Command::get_args`]. See its documentation for /// more. #[unstable(feature = "command_access", issue = "44434")] #[derive(Debug)] pub struct CommandArgs<'a> { inner: imp::CommandArgs<'a>, } #[unstable(feature = "command_access", issue = "44434")] impl<'a> Iterator for CommandArgs<'a> { type Item = &'a OsStr; fn next(&mut self) -> Option<&'a OsStr> { self.inner.next() } fn size_hint(&self) -> (usize, Option<usize>) { self.inner.size_hint() } } #[unstable(feature = "command_access", issue = "44434")] impl<'a> ExactSizeIterator for CommandArgs<'a> { fn len(&self) -> usize { self.inner.len() } fn is_empty(&self) -> bool { self.inner.is_empty() } } /// The output of a finished process. /// /// This is returned in a Result by either the [`output`] method of a /// [`Command`], or the [`wait_with_output`] method of a [`Child`] /// process. /// /// [`output`]: Command::output /// [`wait_with_output`]: Child::wait_with_output #[derive(PartialEq, Eq, Clone)] #[stable(feature = "process", since = "1.0.0")] pub struct Output { /// The status (exit code) of the process. #[stable(feature = "process", since = "1.0.0")] pub status: ExitStatus, /// The data that the process wrote to stdout. #[stable(feature = "process", since = "1.0.0")] pub stdout: Vec<u8>, /// The data that the process wrote to stderr. #[stable(feature = "process", since = "1.0.0")] pub stderr: Vec<u8>, } // If either stderr or stdout are valid utf8 strings it prints the valid // strings, otherwise it prints the byte sequence instead #[stable(feature = "process_output_debug", since = "1.7.0")] impl fmt::Debug for Output { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { let stdout_utf8 = str::from_utf8(&self.stdout); let stdout_debug: &dyn fmt::Debug = match stdout_utf8 { Ok(ref str) => str, Err(_) => &self.stdout, }; let stderr_utf8 = str::from_utf8(&self.stderr); let stderr_debug: &dyn fmt::Debug = match stderr_utf8 { Ok(ref str) => str, Err(_) => &self.stderr, }; fmt.debug_struct("Output") .field("status", &self.status) .field("stdout", stdout_debug) .field("stderr", stderr_debug) .finish() } } /// Describes what to do with a standard I/O stream for a child process when /// passed to the [`stdin`], [`stdout`], and [`stderr`] methods of [`Command`]. /// /// [`stdin`]: Command::stdin /// [`stdout`]: Command::stdout /// [`stderr`]: Command::stderr #[stable(feature = "process", since = "1.0.0")] pub struct Stdio(imp::Stdio); impl Stdio { /// A new pipe should be arranged to connect the parent and child processes. /// /// # Examples /// /// With stdout: /// /// ```no_run /// use std::process::{Command, Stdio}; /// /// let output = Command::new("echo") /// .arg("Hello, world!") /// .stdout(Stdio::piped()) /// .output() /// .expect("Failed to execute command"); /// /// assert_eq!(String::from_utf8_lossy(&output.stdout), "Hello, world!\n"); /// // Nothing echoed to console /// ``` /// /// With stdin: /// /// ```no_run /// use std::io::Write; /// use std::process::{Command, Stdio}; /// /// let mut child = Command::new("rev") /// .stdin(Stdio::piped()) /// .stdout(Stdio::piped()) /// .spawn() /// .expect("Failed to spawn child process"); /// /// let mut stdin = child.stdin.take().expect("Failed to open stdin"); /// std::thread::spawn(move || { /// stdin.write_all("Hello, world!".as_bytes()).expect("Failed to write to stdin"); /// }); /// /// let output = child.wait_with_output().expect("Failed to read stdout"); /// assert_eq!(String::from_utf8_lossy(&output.stdout), "!dlrow ,olleH"); /// ``` /// /// Writing more than a pipe buffer's worth of input to stdin without also reading /// stdout and stderr at the same time may cause a deadlock. /// This is an issue when running any program that doesn't guarantee that it reads /// its entire stdin before writing more than a pipe buffer's worth of output. /// The size of a pipe buffer varies on different targets. /// #[stable(feature = "process", since = "1.0.0")] pub fn piped() -> Stdio { Stdio(imp::Stdio::MakePipe) } /// The child inherits from the corresponding parent descriptor. /// /// # Examples /// /// With stdout: /// /// ```no_run /// use std::process::{Command, Stdio}; /// /// let output = Command::new("echo") /// .arg("Hello, world!") /// .stdout(Stdio::inherit()) /// .output() /// .expect("Failed to execute command"); /// /// assert_eq!(String::from_utf8_lossy(&output.stdout), ""); /// // "Hello, world!" echoed to console /// ``` /// /// With stdin: /// /// ```no_run /// use std::process::{Command, Stdio}; /// use std::io::{self, Write}; /// /// let output = Command::new("rev") /// .stdin(Stdio::inherit()) /// .stdout(Stdio::piped()) /// .output() /// .expect("Failed to execute command"); /// /// print!("You piped in the reverse of: "); /// io::stdout().write_all(&output.stdout).unwrap(); /// ``` #[stable(feature = "process", since = "1.0.0")] pub fn inherit() -> Stdio { Stdio(imp::Stdio::Inherit) } /// This stream will be ignored. This is the equivalent of attaching the /// stream to `/dev/null`. /// /// # Examples /// /// With stdout: /// /// ```no_run /// use std::process::{Command, Stdio}; /// /// let output = Command::new("echo") /// .arg("Hello, world!") /// .stdout(Stdio::null()) /// .output() /// .expect("Failed to execute command"); /// /// assert_eq!(String::from_utf8_lossy(&output.stdout), ""); /// // Nothing echoed to console /// ``` /// /// With stdin: /// /// ```no_run /// use std::process::{Command, Stdio}; /// /// let output = Command::new("rev") /// .stdin(Stdio::null()) /// .stdout(Stdio::piped()) /// .output() /// .expect("Failed to execute command"); /// /// assert_eq!(String::from_utf8_lossy(&output.stdout), ""); /// // Ignores any piped-in input /// ``` #[stable(feature = "process", since = "1.0.0")] pub fn null() -> Stdio { Stdio(imp::Stdio::Null) } } impl FromInner<imp::Stdio> for Stdio { fn from_inner(inner: imp::Stdio) -> Stdio { Stdio(inner) } } #[stable(feature = "std_debug", since = "1.16.0")] impl fmt::Debug for Stdio { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Stdio").finish_non_exhaustive() } } #[stable(feature = "stdio_from", since = "1.20.0")] impl From<ChildStdin> for Stdio { /// Converts a `ChildStdin` into a `Stdio` /// /// # Examples /// /// `ChildStdin` will be converted to `Stdio` using `Stdio::from` under the hood. /// /// ```rust,no_run /// use std::process::{Command, Stdio}; /// /// let reverse = Command::new("rev") /// .stdin(Stdio::piped()) /// .spawn() /// .expect("failed reverse command"); /// /// let _echo = Command::new("echo") /// .arg("Hello, world!") /// .stdout(reverse.stdin.unwrap()) // Converted into a Stdio here /// .output() /// .expect("failed echo command"); /// /// // "!dlrow ,olleH" echoed to console /// ``` fn from(child: ChildStdin) -> Stdio { Stdio::from_inner(child.into_inner().into()) } } #[stable(feature = "stdio_from", since = "1.20.0")] impl From<ChildStdout> for Stdio { /// Converts a `ChildStdout` into a `Stdio` /// /// # Examples /// /// `ChildStdout` will be converted to `Stdio` using `Stdio::from` under the hood. /// /// ```rust,no_run /// use std::process::{Command, Stdio}; /// /// let hello = Command::new("echo") /// .arg("Hello, world!") /// .stdout(Stdio::piped()) /// .spawn() /// .expect("failed echo command"); /// /// let reverse = Command::new("rev") /// .stdin(hello.stdout.unwrap()) // Converted into a Stdio here /// .output() /// .expect("failed reverse command"); /// /// assert_eq!(reverse.stdout, b"!dlrow ,olleH\n"); /// ``` fn from(child: ChildStdout) -> Stdio { Stdio::from_inner(child.into_inner().into()) } } #[stable(feature = "stdio_from", since = "1.20.0")] impl From<ChildStderr> for Stdio { /// Converts a `ChildStderr` into a `Stdio` /// /// # Examples /// /// ```rust,no_run /// use std::process::{Command, Stdio}; /// /// let reverse = Command::new("rev") /// .arg("non_existing_file.txt") /// .stderr(Stdio::piped()) /// .spawn() /// .expect("failed reverse command"); /// /// let cat = Command::new("cat") /// .arg("-") /// .stdin(reverse.stderr.unwrap()) // Converted into a Stdio here /// .output() /// .expect("failed echo command"); /// /// assert_eq!( /// String::from_utf8_lossy(&cat.stdout), /// "rev: cannot open non_existing_file.txt: No such file or directory\n" /// ); /// ``` fn from(child: ChildStderr) -> Stdio { Stdio::from_inner(child.into_inner().into()) } } #[stable(feature = "stdio_from", since = "1.20.0")] impl From<fs::File> for Stdio { /// Converts a `File` into a `Stdio` /// /// # Examples /// /// `File` will be converted to `Stdio` using `Stdio::from` under the hood. /// /// ```rust,no_run /// use std::fs::File; /// use std::process::Command; /// /// // With the `foo.txt` file containing `Hello, world!" /// let file = File::open("foo.txt").unwrap(); /// /// let reverse = Command::new("rev") /// .stdin(file) // Implicit File conversion into a Stdio /// .output() /// .expect("failed reverse command"); /// /// assert_eq!(reverse.stdout, b"!dlrow ,olleH"); /// ``` fn from(file: fs::File) -> Stdio { Stdio::from_inner(file.into_inner().into()) } } /// Describes the result of a process after it has terminated. /// /// This `struct` is used to represent the exit status or other termination of a child process. /// Child processes are created via the [`Command`] struct and their exit /// status is exposed through the [`status`] method, or the [`wait`] method /// of a [`Child`] process. /// /// An `ExitStatus` represents every possible disposition of a process. On Unix this /// is the **wait status**. It is *not* simply an *exit status* (a value passed to `exit`). /// /// For proper error reporting of failed processes, print the value of `ExitStatus` or /// `ExitStatusError` using their implementations of [`Display`](crate::fmt::Display). /// /// [`status`]: Command::status /// [`wait`]: Child::wait #[derive(PartialEq, Eq, Clone, Copy, Debug)] #[stable(feature = "process", since = "1.0.0")] pub struct ExitStatus(imp::ExitStatus); /// Allows extension traits within `std`. #[unstable(feature = "sealed", issue = "none")] impl crate::sealed::Sealed for ExitStatus {} impl ExitStatus { /// Was termination successful? Returns a `Result`. /// /// # Examples /// /// ``` /// #![feature(exit_status_error)] /// # if cfg!(unix) { /// use std::process::Command; /// /// let status = Command::new("ls") /// .arg("/dev/nonexistent") /// .status() /// .expect("ls could not be executed"); /// /// println!("ls: {}", status); /// status.exit_ok().expect_err("/dev/nonexistent could be listed!"); /// # } // cfg!(unix) /// ``` #[unstable(feature = "exit_status_error", issue = "84908")] pub fn exit_ok(&self) -> Result<(), ExitStatusError> { self.0.exit_ok().map_err(ExitStatusError) } /// Was termination successful? Signal termination is not considered a /// success, and success is defined as a zero exit status. /// /// # Examples /// /// ```rust,no_run /// use std::process::Command; /// /// let status = Command::new("mkdir") /// .arg("projects") /// .status() /// .expect("failed to execute mkdir"); /// /// if status.success() { /// println!("'projects/' directory created"); /// } else { /// println!("failed to create 'projects/' directory: {}", status); /// } /// ``` #[stable(feature = "process", since = "1.0.0")] pub fn success(&self) -> bool { self.0.exit_ok().is_ok() } /// Returns the exit code of the process, if any. /// /// In Unix terms the return value is the **exit status**: the value passed to `exit`, if the /// process finished by calling `exit`. Note that on Unix the exit status is truncated to 8 /// bits, and that values that didn't come from a program's call to `exit` may be invented by the /// runtime system (often, for example, 255, 254, 127 or 126). /// /// On Unix, this will return `None` if the process was terminated by a signal. /// [`ExitStatusExt`](crate::os::unix::process::ExitStatusExt) is an /// extension trait for extracting any such signal, and other details, from the `ExitStatus`. /// /// # Examples /// /// ```no_run /// use std::process::Command; /// /// let status = Command::new("mkdir") /// .arg("projects") /// .status() /// .expect("failed to execute mkdir"); /// /// match status.code() { /// Some(code) => println!("Exited with status code: {}", code), /// None => println!("Process terminated by signal") /// } /// ``` #[stable(feature = "process", since = "1.0.0")] pub fn code(&self) -> Option<i32> { self.0.code() } } impl AsInner<imp::ExitStatus> for ExitStatus { fn as_inner(&self) -> &imp::ExitStatus { &self.0 } } impl FromInner<imp::ExitStatus> for ExitStatus { fn from_inner(s: imp::ExitStatus) -> ExitStatus { ExitStatus(s) } } #[stable(feature = "process", since = "1.0.0")] impl fmt::Display for ExitStatus { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { self.0.fmt(f) } } /// Allows extension traits within `std`. #[unstable(feature = "sealed", issue = "none")] impl crate::sealed::Sealed for ExitStatusError {} /// Describes the result of a process after it has failed /// /// Produced by the [`.exit_ok`](ExitStatus::exit_ok) method on [`ExitStatus`]. /// /// # Examples /// /// ``` /// #![feature(exit_status_error)] /// # if cfg!(unix) { /// use std::process::{Command, ExitStatusError}; /// /// fn run(cmd: &str) -> Result<(),ExitStatusError> { /// Command::new(cmd).status().unwrap().exit_ok()?; /// Ok(()) /// } /// /// run("true").unwrap(); /// run("false").unwrap_err(); /// # } // cfg!(unix) /// ``` #[derive(PartialEq, Eq, Clone, Copy, Debug)] #[unstable(feature = "exit_status_error", issue = "84908")] // The definition of imp::ExitStatusError should ideally be such that // Result<(), imp::ExitStatusError> has an identical representation to imp::ExitStatus. pub struct ExitStatusError(imp::ExitStatusError); #[unstable(feature = "exit_status_error", issue = "84908")] impl ExitStatusError { /// Reports the exit code, if applicable, from an `ExitStatusError`. /// /// In Unix terms the return value is the **exit status**: the value passed to `exit`, if the /// process finished by calling `exit`. Note that on Unix the exit status is truncated to 8 /// bits, and that values that didn't come from a program's call to `exit` may be invented by the /// runtime system (often, for example, 255, 254, 127 or 126). /// /// On Unix, this will return `None` if the process was terminated by a signal. If you want to /// handle such situations specially, consider using methods from /// [`ExitStatusExt`](crate::os::unix::process::ExitStatusExt). /// /// If the process finished by calling `exit` with a nonzero value, this will return /// that exit status. /// /// If the error was something else, it will return `None`. /// /// If the process exited successfully (ie, by calling `exit(0)`), there is no /// `ExitStatusError`. So the return value from `ExitStatusError::code()` is always nonzero. /// /// # Examples /// /// ``` /// #![feature(exit_status_error)] /// # #[cfg(unix)] { /// use std::process::Command; /// /// let bad = Command::new("false").status().unwrap().exit_ok().unwrap_err(); /// assert_eq!(bad.code(), Some(1)); /// # } // #[cfg(unix)] /// ``` pub fn code(&self) -> Option<i32> { self.code_nonzero().map(Into::into) } /// Reports the exit code, if applicable, from an `ExitStatusError`, as a `NonZero` /// /// This is exactly like [`code()`](Self::code), except that it returns a `NonZeroI32`. /// /// Plain `code`, returning a plain integer, is provided because is is often more convenient. /// The returned value from `code()` is indeed also nonzero; use `code_nonzero()` when you want /// a type-level guarantee of nonzeroness. /// /// # Examples /// /// ``` /// #![feature(exit_status_error)] /// # if cfg!(unix) { /// use std::convert::TryFrom; /// use std::num::NonZeroI32; /// use std::process::Command; /// /// let bad = Command::new("false").status().unwrap().exit_ok().unwrap_err(); /// assert_eq!(bad.code_nonzero().unwrap(), NonZeroI32::try_from(1).unwrap()); /// # } // cfg!(unix) /// ``` pub fn code_nonzero(&self) -> Option<NonZeroI32> { self.0.code() } /// Converts an `ExitStatusError` (back) to an `ExitStatus`. pub fn into_status(&self) -> ExitStatus { ExitStatus(self.0.into()) } } #[unstable(feature = "exit_status_error", issue = "84908")] impl Into<ExitStatus> for ExitStatusError { fn into(self) -> ExitStatus { ExitStatus(self.0.into()) } } #[unstable(feature = "exit_status_error", issue = "84908")] impl fmt::Display for ExitStatusError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "process exited unsuccessfully: {}", self.into_status()) } } #[unstable(feature = "exit_status_error", issue = "84908")] impl crate::error::Error for ExitStatusError {} /// This type represents the status code a process can return to its /// parent under normal termination. /// /// Numeric values used in this type don't have portable meanings, and /// different platforms may mask different amounts of them. /// /// For the platform's canonical successful and unsuccessful codes, see /// the [`SUCCESS`] and [`FAILURE`] associated items. /// /// [`SUCCESS`]: ExitCode::SUCCESS /// [`FAILURE`]: ExitCode::FAILURE /// /// **Warning**: While various forms of this were discussed in [RFC #1937], /// it was ultimately cut from that RFC, and thus this type is more subject /// to change even than the usual unstable item churn. /// /// [RFC #1937]: https://github.com/rust-lang/rfcs/pull/1937 #[derive(Clone, Copy, Debug)] #[unstable(feature = "process_exitcode_placeholder", issue = "48711")] pub struct ExitCode(imp::ExitCode); #[unstable(feature = "process_exitcode_placeholder", issue = "48711")] impl ExitCode { /// The canonical ExitCode for successful termination on this platform. /// /// Note that a `()`-returning `main` implicitly results in a successful /// termination, so there's no need to return this from `main` unless /// you're also returning other possible codes. #[unstable(feature = "process_exitcode_placeholder", issue = "48711")] pub const SUCCESS: ExitCode = ExitCode(imp::ExitCode::SUCCESS); /// The canonical ExitCode for unsuccessful termination on this platform. /// /// If you're only returning this and `SUCCESS` from `main`, consider /// instead returning `Err(_)` and `Ok(())` respectively, which will /// return the same codes (but will also `eprintln!` the error). #[unstable(feature = "process_exitcode_placeholder", issue = "48711")] pub const FAILURE: ExitCode = ExitCode(imp::ExitCode::FAILURE); } impl Child { /// Forces the child process to exit. If the child has already exited, an [`InvalidInput`] /// error is returned. /// /// The mapping to [`ErrorKind`]s is not part of the compatibility contract of the function. /// /// This is equivalent to sending a SIGKILL on Unix platforms. /// /// # Examples /// /// Basic usage: /// /// ```no_run /// use std::process::Command; /// /// let mut command = Command::new("yes"); /// if let Ok(mut child) = command.spawn() { /// child.kill().expect("command wasn't running"); /// } else { /// println!("yes command didn't start"); /// } /// ``` /// /// [`ErrorKind`]: io::ErrorKind /// [`InvalidInput`]: io::ErrorKind::InvalidInput #[stable(feature = "process", since = "1.0.0")] pub fn kill(&mut self) -> io::Result<()> { self.handle.kill() } /// Returns the OS-assigned process identifier associated with this child. /// /// # Examples /// /// Basic usage: /// /// ```no_run /// use std::process::Command; /// /// let mut command = Command::new("ls"); /// if let Ok(child) = command.spawn() { /// println!("Child's ID is {}", child.id()); /// } else { /// println!("ls command didn't start"); /// } /// ``` #[stable(feature = "process_id", since = "1.3.0")] pub fn id(&self) -> u32 { self.handle.id() } /// Waits for the child to exit completely, returning the status that it /// exited with. This function will continue to have the same return value /// after it has been called at least once. /// /// The stdin handle to the child process, if any, will be closed /// before waiting. This helps avoid deadlock: it ensures that the /// child does not block waiting for input from the parent, while /// the parent waits for the child to exit. /// /// # Examples /// /// Basic usage: /// /// ```no_run /// use std::process::Command; /// /// let mut command = Command::new("ls"); /// if let Ok(mut child) = command.spawn() { /// child.wait().expect("command wasn't running"); /// println!("Child has finished its execution!"); /// } else { /// println!("ls command didn't start"); /// } /// ``` #[stable(feature = "process", since = "1.0.0")] pub fn wait(&mut self) -> io::Result<ExitStatus> { drop(self.stdin.take()); self.handle.wait().map(ExitStatus) } /// Attempts to collect the exit status of the child if it has already /// exited. /// /// This function will not block the calling thread and will only /// check to see if the child process has exited or not. If the child has /// exited then on Unix the process ID is reaped. This function is /// guaranteed to repeatedly return a successful exit status so long as the /// child has already exited. /// /// If the child has exited, then `Ok(Some(status))` is returned. If the /// exit status is not available at this time then `Ok(None)` is returned. /// If an error occurs, then that error is returned. /// /// Note that unlike `wait`, this function will not attempt to drop stdin. /// /// # Examples /// /// Basic usage: /// /// ```no_run /// use std::process::Command; /// /// let mut child = Command::new("ls").spawn().unwrap(); /// /// match child.try_wait() { /// Ok(Some(status)) => println!("exited with: {}", status), /// Ok(None) => { /// println!("status not ready yet, let's really wait"); /// let res = child.wait(); /// println!("result: {:?}", res); /// } /// Err(e) => println!("error attempting to wait: {}", e), /// } /// ``` #[stable(feature = "process_try_wait", since = "1.18.0")] pub fn try_wait(&mut self) -> io::Result<Option<ExitStatus>> { Ok(self.handle.try_wait()?.map(ExitStatus)) } /// Simultaneously waits for the child to exit and collect all remaining /// output on the stdout/stderr handles, returning an `Output` /// instance. /// /// The stdin handle to the child process, if any, will be closed /// before waiting. This helps avoid deadlock: it ensures that the /// child does not block waiting for input from the parent, while /// the parent waits for the child to exit. /// /// By default, stdin, stdout and stderr are inherited from the parent. /// In order to capture the output into this `Result<Output>` it is /// necessary to create new pipes between parent and child. Use /// `stdout(Stdio::piped())` or `stderr(Stdio::piped())`, respectively. /// /// # Examples /// /// ```should_panic /// use std::process::{Command, Stdio}; /// /// let child = Command::new("/bin/cat") /// .arg("file.txt") /// .stdout(Stdio::piped()) /// .spawn() /// .expect("failed to execute child"); /// /// let output = child /// .wait_with_output() /// .expect("failed to wait on child"); /// /// assert!(output.status.success()); /// ``` /// #[stable(feature = "process", since = "1.0.0")] pub fn wait_with_output(mut self) -> io::Result<Output> { drop(self.stdin.take()); let (mut stdout, mut stderr) = (Vec::new(), Vec::new()); match (self.stdout.take(), self.stderr.take()) { (None, None) => {} (Some(mut out), None) => { let res = out.read_to_end(&mut stdout); res.unwrap(); } (None, Some(mut err)) => { let res = err.read_to_end(&mut stderr); res.unwrap(); } (Some(out), Some(err)) => { let res = read2(out.inner, &mut stdout, err.inner, &mut stderr); res.unwrap(); } } let status = self.wait()?; Ok(Output { status, stdout, stderr }) } } /// Terminates the current process with the specified exit code. /// /// This function will never return and will immediately terminate the current /// process. The exit code is passed through to the underlying OS and will be /// available for consumption by another process. /// /// Note that because this function never returns, and that it terminates the /// process, no destructors on the current stack or any other thread's stack /// will be run. If a clean shutdown is needed it is recommended to only call /// this function at a known point where there are no more destructors left /// to run. /// /// ## Platform-specific behavior /// /// **Unix**: On Unix-like platforms, it is unlikely that all 32 bits of `exit` /// will be visible to a parent process inspecting the exit code. On most /// Unix-like platforms, only the eight least-significant bits are considered. /// /// # Examples /// /// Due to this function’s behavior regarding destructors, a conventional way /// to use the function is to extract the actual computation to another /// function and compute the exit code from its return value: /// /// ``` /// fn run_app() -> Result<(), ()> { /// // Application logic here /// Ok(()) /// } /// /// fn main() { /// std::process::exit(match run_app() { /// Ok(_) => 0, /// Err(err) => { /// eprintln!("error: {:?}", err); /// 1 /// } /// }); /// } /// ``` /// /// Due to [platform-specific behavior], the exit code for this example will be /// `0` on Linux, but `256` on Windows: /// /// ```no_run /// use std::process; /// /// process::exit(0x0100); /// ``` /// /// [platform-specific behavior]: #platform-specific-behavior #[stable(feature = "rust1", since = "1.0.0")] pub fn exit(code: i32) -> ! { crate::sys_common::rt::cleanup(); crate::sys::os::exit(code) } /// Terminates the process in an abnormal fashion. /// /// The function will never return and will immediately terminate the current /// process in a platform specific "abnormal" manner. /// /// Note that because this function never returns, and that it terminates the /// process, no destructors on the current stack or any other thread's stack /// will be run. /// /// Rust IO buffers (eg, from `BufWriter`) will not be flushed. /// Likewise, C stdio buffers will (on most platforms) not be flushed. /// /// This is in contrast to the default behaviour of [`panic!`] which unwinds /// the current thread's stack and calls all destructors. /// When `panic="abort"` is set, either as an argument to `rustc` or in a /// crate's Cargo.toml, [`panic!`] and `abort` are similar. However, /// [`panic!`] will still call the [panic hook] while `abort` will not. /// /// If a clean shutdown is needed it is recommended to only call /// this function at a known point where there are no more destructors left /// to run. /// /// The process's termination will be similar to that from the C `abort()` /// function. On Unix, the process will terminate with signal `SIGABRT`, which /// typically means that the shell prints "Aborted". /// /// # Examples /// /// ```no_run /// use std::process; /// /// fn main() { /// println!("aborting"); /// /// process::abort(); /// /// // execution never gets here /// } /// ``` /// /// The `abort` function terminates the process, so the destructor will not /// get run on the example below: /// /// ```no_run /// use std::process; /// /// struct HasDrop; /// /// impl Drop for HasDrop { /// fn drop(&mut self) { /// println!("This will never be printed!"); /// } /// } /// /// fn main() { /// let _x = HasDrop; /// process::abort(); /// // the destructor implemented for HasDrop will never get run /// } /// ``` /// /// [panic hook]: crate::panic::set_hook #[stable(feature = "process_abort", since = "1.17.0")] #[cold] #[allow(unused_unsafe)] pub fn abort() -> ! { unsafe { crate::sys::abort_internal(); } } /// Returns the OS-assigned process identifier associated with this process. /// /// # Examples /// /// Basic usage: /// /// ```no_run /// use std::process; /// /// println!("My pid is {}", process::id()); /// ``` /// /// #[stable(feature = "getpid", since = "1.26.0")] pub fn id() -> u32 { crate::sys::os::getpid() } /// A trait for implementing arbitrary return types in the `main` function. /// /// The C-main function only supports to return integers as return type. /// So, every type implementing the `Termination` trait has to be converted /// to an integer. /// /// The default implementations are returning `libc::EXIT_SUCCESS` to indicate /// a successful execution. In case of a failure, `libc::EXIT_FAILURE` is returned. #[cfg_attr(not(test), lang = "termination")] #[unstable(feature = "termination_trait_lib", issue = "43301")] #[rustc_on_unimplemented( message = "`main` has invalid return type `{Self}`", label = "`main` can only return types that implement `{Termination}`" )] pub trait Termination { /// Is called to get the representation of the value as status code. /// This status code is returned to the operating system. fn report(self) -> i32; } #[unstable(feature = "termination_trait_lib", issue = "43301")] impl Termination for () { #[inline] fn report(self) -> i32 { ExitCode::SUCCESS.report() } } #[unstable(feature = "termination_trait_lib", issue = "43301")] impl<E: fmt::Debug> Termination for Result<(), E> { fn report(self) -> i32 { match self { Ok(()) => ().report(), Err(err) => Err::<!, _>(err).report(), } } } #[unstable(feature = "termination_trait_lib", issue = "43301")] impl Termination for ! { fn report(self) -> i32 { self } } #[unstable(feature = "termination_trait_lib", issue = "43301")] impl<E: fmt::Debug> Termination for Result<!, E> { fn report(self) -> i32 { let Err(err) = self; eprintln!("Error: {:?}", err); ExitCode::FAILURE.report() } } #[unstable(feature = "termination_trait_lib", issue = "43301")] impl Termination for ExitCode { #[inline] fn report(self) -> i32 { self.0.as_i32() } }
31.815228
101
0.573099
1c80f5770423b363e2fb7b52ea6ddb386d54c946
97
#![cfg(test)] #![allow(dead_code)] pub mod common; pub mod constants; pub mod random_generator;
13.857143
25
0.721649
abfcfe0d36f2aa0bf1862b35d419f2a9033d7f44
2,033
// Copyright Cryptape Technologies LLC. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use cita_types::Address; use libproto::BlackList as ProtoBlackList; #[derive(PartialEq, Clone, Debug, Default)] pub struct BlackList { black_list: Vec<Address>, clear_list: Vec<Address>, } impl BlackList { pub fn new() -> Self { BlackList { black_list: Vec::new(), clear_list: Vec::new(), } } pub fn black_list(&self) -> &Vec<Address> { &self.black_list } pub fn clear_list(&self) -> &Vec<Address> { &self.clear_list } pub fn set_black_list(mut self, black_list: Vec<Address>) -> Self { self.black_list = black_list; self } pub fn set_clear_list(mut self, clear_list: Vec<Address>) -> Self { self.clear_list = clear_list; self } pub fn is_empty(&self) -> bool { self.len() == 0 } pub fn len(&self) -> usize { self.black_list.len() + self.clear_list.len() } pub fn protobuf(&self) -> ProtoBlackList { let mut bl = ProtoBlackList::new(); bl.set_black_list( self.black_list .clone() .into_iter() .map(|address| address.to_vec()) .collect(), ); bl.set_clear_list( self.clear_list .clone() .into_iter() .map(|address| address.to_vec()) .collect(), ); bl } }
26.402597
75
0.584358
8fec2e7b106935ce85a8c531663756ea86c2deb1
448
use scale_info::TypeInfo; use crate::*; pub trait DemocracyGovernanceDelegate<T: SystemConfig, Proposal, Balance> { fn propose(origin: OriginFor<T>, proposal: Proposal, value: Balance) -> DispatchResult; } #[cfg_attr(feature = "std", derive(Debug))] #[derive(Clone, Encode, Decode, PartialEq, TypeInfo)] pub struct LinkProposal<ChainId, TreeId> { pub target_chain_id: ChainId, pub target_tree_id: Option<TreeId>, pub local_tree_id: TreeId, }
29.866667
88
0.754464
03a47f5a6d0b7113f18957d7eea75b0242100351
11,452
// Copyright (c) Aptos // SPDX-License-Identifier: Apache-2.0 use aptos_types::transaction::{ ExecutionStatus, Module, SignedTransaction, Transaction, TransactionStatus, }; use language_e2e_tests::{ account::AccountData, compile::compile_script, current_function_name, executor::FakeExecutor, }; use move_deps::{ move_binary_format::CompiledModule, move_bytecode_verifier::verify_module, move_ir_compiler::Compiler, }; #[test] fn move_from_across_blocks() { let mut executor = FakeExecutor::from_genesis_file(); executor.set_golden_file(current_function_name!()); let sender = executor.create_raw_account_data(1_000_000, 10); executor.add_account_data(&sender); // publish module with add and remove resource let (module, txn) = add_module_txn(&sender, 10); executor.execute_and_apply(txn); // remove resource fails given no resource were published let rem_txn = remove_resource_txn(&sender, 11, vec![module.clone()]); let output = executor.execute_transaction(rem_txn); assert!(matches!( output.status().status(), // StatusCode::MISSING_DATA Ok(ExecutionStatus::ExecutionFailure { .. }) )); executor.apply_write_set(output.write_set()); // publish resource let add_txn = add_resource_txn(&sender, 12, vec![module.clone()]); executor.execute_and_apply(add_txn); // borrow resource let borrow_txn = borrow_resource_txn(&sender, 13, vec![module.clone()]); executor.execute_and_apply(borrow_txn); // remove resource let rem_txn = remove_resource_txn(&sender, 14, vec![module.clone()]); executor.execute_and_apply(rem_txn); // remove resource fails given it was removed already let rem_txn = remove_resource_txn(&sender, 15, vec![module.clone()]); let output = executor.execute_transaction(rem_txn); assert!(matches!( output.status().status(), // StatusCode::MISSING_DATA Ok(ExecutionStatus::ExecutionFailure { .. }) )); executor.apply_write_set(output.write_set()); // borrow resource fail given it was removed let borrow_txn = borrow_resource_txn(&sender, 16, vec![module.clone()]); let output = executor.execute_transaction(borrow_txn); assert!(matches!( output.status().status(), // StatusCode::MISSING_DATA Ok(ExecutionStatus::ExecutionFailure { .. }) )); executor.apply_write_set(output.write_set()); // publish resource again let add_txn = add_resource_txn(&sender, 17, vec![module.clone()]); executor.execute_and_apply(add_txn); // create 2 remove resource transaction over the same resource in one block let txns = vec![ Transaction::UserTransaction(remove_resource_txn(&sender, 18, vec![module.clone()])), Transaction::UserTransaction(remove_resource_txn(&sender, 19, vec![module])), ]; let output = executor .execute_transaction_block(txns) .expect("Must execute transactions"); assert_eq!( output[0].status(), &TransactionStatus::Keep(ExecutionStatus::Success) ); assert!(matches!( output[1].status().status(), // StatusCode::MISSING_DATA Ok(ExecutionStatus::ExecutionFailure { .. }) )); for out in output { executor.apply_write_set(out.write_set()); } } #[test] fn borrow_after_move() { let mut executor = FakeExecutor::from_genesis_file(); executor.set_golden_file(current_function_name!()); let sender = executor.create_raw_account_data(1_000_000, 10); executor.add_account_data(&sender); // publish module with add and remove resource let (module, txn) = add_module_txn(&sender, 10); executor.execute_and_apply(txn); // remove resource fails given no resource were published let rem_txn = remove_resource_txn(&sender, 11, vec![module.clone()]); let output = executor.execute_transaction(rem_txn); assert!(matches!( output.status().status(), // StatusCode::MISSING_DATA Ok(ExecutionStatus::ExecutionFailure { .. }) )); executor.apply_write_set(output.write_set()); // publish resource let add_txn = add_resource_txn(&sender, 12, vec![module.clone()]); executor.execute_and_apply(add_txn); // borrow resource let borrow_txn = borrow_resource_txn(&sender, 13, vec![module.clone()]); executor.execute_and_apply(borrow_txn); // create a remove and a borrow resource transaction over the same resource in one block let txns = vec![ Transaction::UserTransaction(remove_resource_txn(&sender, 14, vec![module.clone()])), Transaction::UserTransaction(borrow_resource_txn(&sender, 15, vec![module])), ]; let output = executor .execute_transaction_block(txns) .expect("Must execute transactions"); assert_eq!( output[0].status(), &TransactionStatus::Keep(ExecutionStatus::Success) ); assert!(matches!( output[1].status().status(), // StatusCode::MISSING_DATA Ok(ExecutionStatus::ExecutionFailure { .. }) )); for out in output { executor.apply_write_set(out.write_set()); } } #[test] fn change_after_move() { let mut executor = FakeExecutor::from_genesis_file(); executor.set_golden_file(current_function_name!()); let sender = executor.create_raw_account_data(1_000_000, 10); executor.add_account_data(&sender); // publish module with add and remove resource let (module, txn) = add_module_txn(&sender, 10); executor.execute_and_apply(txn); // remove resource fails given no resource were published let rem_txn = remove_resource_txn(&sender, 11, vec![module.clone()]); let output = executor.execute_transaction(rem_txn); assert!(matches!( output.status().status(), // StatusCode::MISSING_DATA Ok(ExecutionStatus::ExecutionFailure { .. }) )); executor.apply_write_set(output.write_set()); // publish resource let add_txn = add_resource_txn(&sender, 12, vec![module.clone()]); executor.execute_and_apply(add_txn); // borrow resource let borrow_txn = borrow_resource_txn(&sender, 13, vec![module.clone()]); executor.execute_and_apply(borrow_txn); // create a remove and a change resource transaction over the same resource in one block let txns = vec![ Transaction::UserTransaction(remove_resource_txn(&sender, 14, vec![module.clone()])), Transaction::UserTransaction(change_resource_txn(&sender, 15, vec![module.clone()])), ]; let output = executor .execute_transaction_block(txns) .expect("Must execute transactions"); assert_eq!( output[0].status(), &TransactionStatus::Keep(ExecutionStatus::Success) ); assert!(matches!( output[1].status().status(), // StatusCode::MISSING_DATA Ok(ExecutionStatus::ExecutionFailure { .. }) )); for out in output { executor.apply_write_set(out.write_set()); } // borrow resource let borrow_txn = borrow_resource_txn(&sender, 16, vec![module]); let output = executor.execute_transaction(borrow_txn); assert!(matches!( output.status().status(), // StatusCode::MISSING_DATA Ok(ExecutionStatus::ExecutionFailure { .. }) )); executor.apply_write_set(output.write_set()); } fn add_module_txn(sender: &AccountData, seq_num: u64) -> (CompiledModule, SignedTransaction) { let module_code = format!( " module 0x{}.M {{ import 0x1.Signer; struct T1 has key {{ v: u64 }} public borrow_t1(account: &signer) acquires T1 {{ let t1: &Self.T1; label b0: t1 = borrow_global<T1>(Signer.address_of(move(account))); return; }} public change_t1(account: &signer, v: u64) acquires T1 {{ let t1: &mut Self.T1; label b0: t1 = borrow_global_mut<T1>(Signer.address_of(move(account))); *&mut move(t1).T1::v = move(v); return; }} public remove_t1(account: &signer) acquires T1 {{ let v: u64; label b0: T1 {{ v }} = move_from<T1>(Signer.address_of(move(account))); return; }} public publish_t1(account: &signer) {{ label b0: move_to<T1>(move(account), T1 {{ v: 3 }}); return; }} }} ", sender.address(), ); let compiler = Compiler { deps: cached_framework_packages::modules().iter().collect(), }; let module = compiler .into_compiled_module(module_code.as_str()) .expect("Module compilation failed"); let mut module_blob = vec![]; module .serialize(&mut module_blob) .expect("Module must serialize"); verify_module(&module).expect("Module must verify"); ( module, sender .account() .transaction() .module(Module::new(module_blob)) .sequence_number(seq_num) .sign(), ) } fn add_resource_txn( sender: &AccountData, seq_num: u64, extra_deps: Vec<CompiledModule>, ) -> SignedTransaction { let program = format!( " import 0x{}.M; main(account: signer) {{ label b0: M.publish_t1(&account); return; }} ", sender.address(), ); let module = compile_script(&program, extra_deps); sender .account() .transaction() .script(module) .sequence_number(seq_num) .sign() } fn remove_resource_txn( sender: &AccountData, seq_num: u64, extra_deps: Vec<CompiledModule>, ) -> SignedTransaction { let program = format!( " import 0x{}.M; main(account: signer) {{ label b0: M.remove_t1(&account); return; }} ", sender.address(), ); let module = compile_script(&program, extra_deps); sender .account() .transaction() .script(module) .sequence_number(seq_num) .sign() } fn borrow_resource_txn( sender: &AccountData, seq_num: u64, extra_deps: Vec<CompiledModule>, ) -> SignedTransaction { let program = format!( " import 0x{}.M; main(account: signer) {{ label b0: M.borrow_t1(&account); return; }} ", sender.address(), ); let module = compile_script(&program, extra_deps); sender .account() .transaction() .script(module) .sequence_number(seq_num) .sign() } fn change_resource_txn( sender: &AccountData, seq_num: u64, extra_deps: Vec<CompiledModule>, ) -> SignedTransaction { let program = format!( " import 0x{}.M; main(account: signer) {{ label b0: M.change_t1(&account, 20); return; }} ", sender.address(), ); let module = compile_script(&program, extra_deps); sender .account() .transaction() .script(module) .sequence_number(seq_num) .sign() }
30.620321
97
0.613255
9c90b433c8c439a852692f869b942cd54dd52e6d
2,038
struct R2cf { n1: i64, n2: i64, } // This iterator generates the continued fraction representation from the // specified rational number. impl Iterator for R2cf { type Item = i64; fn next(&mut self) -> Option<i64> { if self.n2 == 0 { None } else { let t1 = self.n1 / self.n2; let t2 = self.n2; self.n2 = self.n1 - t1 * t2; self.n1 = t2; Some(t1) } } } fn r2cf(n1: i64, n2: i64) -> R2cf { R2cf { n1: n1, n2: n2 } } macro_rules! printcf { ($x:expr, $y:expr) => { println!("{:?}", r2cf($x, $y).collect::<Vec<_>>()) }; } fn main() { printcf!(1, 2); printcf!(3, 1); printcf!(23, 8); printcf!(13, 11); printcf!(22, 7); printcf!(-152, 77); printcf!(14_142, 10_000); printcf!(141_421, 100_000); printcf!(1_414_214, 1_000_000); printcf!(14_142_136, 10_000_000); printcf!(31, 10); printcf!(314, 100); printcf!(3142, 1000); printcf!(31_428, 10_000); printcf!(314_285, 100_000); printcf!(3_142_857, 1_000_000); printcf!(31_428_571, 10_000_000); printcf!(314_285_714, 100_000_000); } #[cfg(test)] mod tests { use super::r2cf; use std::iter::Iterator; #[test] fn test_misc() { assert!(Iterator::eq(r2cf(-151, 77), vec![-1, -1, -24, -1, -2])); assert!(Iterator::eq(r2cf(22, 7), vec![3, 7])); assert!(Iterator::eq(r2cf(23, 8), vec![2, 1, 7])); } #[test] fn test_sqrt2() { assert!(Iterator::eq( r2cf(14_142, 10_000), vec![1, 2, 2, 2, 2, 2, 1, 1, 29] )); assert!(Iterator::eq( r2cf(14_142_136, 10_000_000), vec![1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 6, 1, 2, 4, 1, 1, 2] )); } #[test] fn test_pi() { assert!(Iterator::eq(r2cf(31, 10), vec![3, 10])); assert!(Iterator::eq(r2cf(314, 100), vec![3, 7, 7])); assert!(Iterator::eq(r2cf(3_142, 1_000), vec![3, 7, 23, 1, 2])); } }
23.159091
73
0.500981
87abca49873df5d7f5810ecd7148e1216e7b6215
4,261
mod ticket { /// We will begin our journey of building our own JIRA clone defining the cornerstone of /// JIRA's experience: the ticket. /// For now we want to limit ourselves to the essentials: each ticket will have a title /// and a description. /// No, not an ID yet. We will get to that in due time. /// /// There are various ways to represent a set of related pieces of information in Rust. /// We'll go for a `struct`: a struct is quite similar to what you would call a class or /// an object in object-oriented programming languages. /// It is a collection of fields, each one with its own name. /// Given that Rust is a strongly-typed language, we also need to specify a type for each /// of those fields. /// /// Our definition of Ticket is incomplete - can you replace __ with what is missing to make /// this snippet compile and the tests below succeed? /// /// You can find more about structs in the Rust Book: https://doc.rust-lang.org/book/ch05-01-defining-structs.html pub struct Ticket { title: String, description: String, } /// `cfg` stands for configuration flag. /// The #[cfg(_)] attribute is used to mark a section of the code for conditional compilation /// based on the value of the specified flag. /// #[cfg(test)] is used to mark sections of our codebase that should only be compiled /// when running `cargo test`... /// Yes, tests! /// /// You can put tests in different places in a Rust project, depending on what you are /// trying to do: unit testing of private functions and methods, testing an internal API, /// integration testing your crate from the outside, etc. /// You can find more details on test organisation in the Rust book: /// https://doc.rust-lang.org/book/ch11-03-test-organization.html /// /// Let it be said that tests are first-class citizens in the Rust ecosystem and you are /// provided with a barebone test framework out of the box. #[cfg(test)] mod tests { use super::*; /// The #[test] attribute is used to mark a function as a test for the compiler. /// Tests take no arguments: when we run `cargo test`, this function will be invoked. /// If it runs without raising any issue, the test is considered green - it passed. /// If it panics (raises a fatal exception), then the test is considered red - it failed. /// /// `cargo test` reports on the number of failed tests at the end of each run, with some /// associated diagnostics to make it easier to understand what went wrong exactly. #[test] fn your_first_ticket() { /// `let` is used to create a variable: we are binding a new `Ticket` struct /// to the name `ticket_one`. /// /// We said before that Rust is strongly typed, nonetheless we haven't specified /// a type for `ticket_one`. /// As most modern strongly typed programming languages, Rust provides type inference: /// the compiler is smart enough to figure out the type of variables based on /// their usage and it won't bother you unless the type is ambiguous. let ticket_one = Ticket { /// This `.into()` method call is here for a reason, but give us time. /// We'll get there when it's the right moment. title: "A ticket title".into(), description: "A heart-breaking description".into() }; /// `assert_eq` is a macro (notice the ! at the end of the name). /// It checks that the left argument (the expected value) is identical /// to the right argument (the computed value). /// If they are not, it panics - Rust's (almost) non-recoverable way to terminate a program. /// In the case of tests, this is caught by the test framework and the test is marked as failed. assert_eq!(ticket_one.title, "A ticket title"); /// Field syntax: you use a dot to access the field of a struct. assert_eq!(ticket_one.description, "A heart-breaking description"); } } }
53.936709
118
0.635766
cc892fd589eb7da77506a86d744dab42c697bc82
10,099
// The following code is from (scipr-lab's zexe)[https://github.com/scipr-lab/zexe] and thanks for their work use crate::{ bytes::{FromBytes, ToBytes}, fields::{Field, PrimeField, SquareRootField}, groups::Group, UniformRand, Vec, }; use core::{ fmt::{Debug, Display}, hash::Hash, ops::{Add, AddAssign, MulAssign, Neg, Sub, SubAssign}, }; use num_traits::Zero; pub mod models; pub use self::models::*; pub trait PairingEngine: Sized + 'static + Copy + Debug + Sync + Send { /// This is the scalar field of the G1/G2 groups. type Fr: PrimeField + SquareRootField; /// The projective representation of an element in G1. type G1Projective: ProjectiveCurve<BaseField = Self::Fq, ScalarField = Self::Fr, Affine = Self::G1Affine> + From<Self::G1Affine> + Into<Self::G1Affine> + MulAssign<Self::Fr>; // needed due to https://github.com/rust-lang/rust/issues/69640 /// The affine representation of an element in G1. type G1Affine: AffineCurve<BaseField = Self::Fq, ScalarField = Self::Fr, Projective = Self::G1Projective> + From<Self::G1Projective> + Into<Self::G1Projective> + Into<Self::G1Prepared>; /// A G1 element that has been preprocessed for use in a pairing. type G1Prepared: ToBytes + Default + Clone + Send + Sync + Debug + From<Self::G1Affine>; /// The projective representation of an element in G2. type G2Projective: ProjectiveCurve<BaseField = Self::Fqe, ScalarField = Self::Fr, Affine = Self::G2Affine> + From<Self::G2Affine> + Into<Self::G2Affine> + MulAssign<Self::Fr>; // needed due to https://github.com/rust-lang/rust/issues/69640 /// The affine representation of an element in G2. type G2Affine: AffineCurve<BaseField = Self::Fqe, ScalarField = Self::Fr, Projective = Self::G2Projective> + From<Self::G2Projective> + Into<Self::G2Projective> + Into<Self::G2Prepared>; /// A G2 element that has been preprocessed for use in a pairing. type G2Prepared: FromBytes + ToBytes + Default + Clone + Send + Sync + Debug + From<Self::G2Affine>; /// The base field that hosts G1. type Fq: PrimeField + SquareRootField; /// The extension field that hosts G2. type Fqe: SquareRootField; /// The extension field that hosts the target group of the pairing. type Fqk: Field; /// Perform a miller loop with some number of (G1, G2) pairs. #[must_use] fn miller_loop<'a, I>(i: I) -> Self::Fqk where I: IntoIterator<Item = &'a (Self::G1Prepared, Self::G2Prepared)>; /// Perform final exponentiation of the result of a miller loop. #[must_use] fn final_exponentiation(_: &Self::Fqk) -> Option<Self::Fqk>; /// Computes a product of pairings. #[must_use] fn product_of_pairings<'a, I>(i: I) -> Self::Fqk where I: IntoIterator<Item = &'a (Self::G1Prepared, Self::G2Prepared)>, { Self::final_exponentiation(&Self::miller_loop(i)).unwrap() } /// Performs multiple pairing operations #[must_use] fn pairing<G1, G2>(p: G1, q: G2) -> Self::Fqk where G1: Into<Self::G1Affine>, G2: Into<Self::G2Affine>, { let g1_prep = Self::G1Prepared::from(p.into()); let g2_prep = Self::G2Prepared::from(q.into()); Self::product_of_pairings(core::iter::once(&(g1_prep, g2_prep))) } } /// Projective representation of an elliptic curve point guaranteed to be /// in the correct prime order subgroup. pub trait ProjectiveCurve: Eq + 'static + Sized + ToBytes + FromBytes + Copy + Clone + Default + Send + Sync + Hash + Debug + Display + UniformRand + Zero + Neg<Output = Self> + Add<Self, Output = Self> + Sub<Self, Output = Self> + AddAssign<Self> + SubAssign<Self> + MulAssign<<Self as ProjectiveCurve>::ScalarField> + for<'a> Add<&'a Self, Output = Self> + for<'a> Sub<&'a Self, Output = Self> + for<'a> AddAssign<&'a Self> + for<'a> SubAssign<&'a Self> + core::iter::Sum<Self> + for<'a> core::iter::Sum<&'a Self> + From<<Self as ProjectiveCurve>::Affine> + serde::Serialize + for<'a> serde::Deserialize<'a> { type ScalarField: PrimeField + SquareRootField; type BaseField: Field; type Affine: AffineCurve<Projective = Self, ScalarField = Self::ScalarField, BaseField = Self::BaseField> + From<Self> + Into<Self>; /// Returns a fixed generator of unknown exponent. #[must_use] fn prime_subgroup_generator() -> Self; /// Normalizes a slice of projective elements so that /// conversion to affine is cheap. fn batch_normalization(v: &mut [Self]); /// Normalizes a slice of projective elements and outputs a vector /// containing the affine equivalents. fn batch_normalization_into_affine(v: &[Self]) -> Vec<Self::Affine> { let mut v = v.to_vec(); Self::batch_normalization(&mut v); v.into_iter().map(|v| v.into()).collect() } /// Checks if the point is already "normalized" so that /// cheap affine conversion is possible. #[must_use] fn is_normalized(&self) -> bool; /// Doubles this element. #[must_use] fn double(&self) -> Self { let mut copy = *self; copy.double_in_place(); copy } /// Doubles this element in place. fn double_in_place(&mut self) -> &mut Self; /// Converts self into the affine representation. fn into_affine(&self) -> Self::Affine { (*self).into() } /// Set `self` to be `self + other`, where `other: Self::Affine`. /// This is usually faster than adding `other` in projective form. fn add_mixed(mut self, other: &Self::Affine) -> Self { self.add_assign_mixed(other); self } /// Set `self` to be `self + other`, where `other: Self::Affine`. /// This is usually faster than adding `other` in projective form. fn add_assign_mixed(&mut self, other: &Self::Affine); /// Performs scalar multiplication of this element. fn mul<S: Into<<Self::ScalarField as PrimeField>::BigInt>>(mut self, other: S) -> Self { let mut res = Self::zero(); let mut found_one = false; for i in crate::fields::BitIterator::new(other.into()) { if found_one { res.double_in_place(); } else { found_one = i; } if i { res += self; } } self = res; self } } /// Affine representation of an elliptic curve point guaranteed to be /// in the correct prime order subgroup. pub trait AffineCurve: Eq + 'static + Sized + ToBytes + FromBytes + Copy + Clone + Default + Send + Sync + Hash + Debug + Display + Zero + Neg<Output = Self> + From<<Self as AffineCurve>::Projective> + serde::Serialize + for<'a> serde::Deserialize<'a> { type ScalarField: PrimeField + SquareRootField + Into<<Self::ScalarField as PrimeField>::BigInt>; type BaseField: Field; type Projective: ProjectiveCurve<Affine = Self, ScalarField = Self::ScalarField, BaseField = Self::BaseField> + From<Self> + Into<Self> + MulAssign<Self::ScalarField>; // needed due to https://github.com/rust-lang/rust/issues/69640 /// Returns a fixed generator of unknown exponent. #[must_use] fn prime_subgroup_generator() -> Self; /// Converts self into the projective representation. fn into_projective(&self) -> Self::Projective { (*self).into() } /// Returns a group element if the set of bytes forms a valid group element, /// otherwise returns None. This function is primarily intended for sampling /// random group elements from a hash-function or RNG output. fn from_random_bytes(bytes: &[u8]) -> Option<Self>; /// Performs scalar multiplication of this element with mixed addition. #[must_use] fn mul<S: Into<<Self::ScalarField as PrimeField>::BigInt>>(&self, other: S) -> Self::Projective; /// Multiply this element by the cofactor. #[must_use] fn mul_by_cofactor(&self) -> Self; /// Multiply this element by the inverse of the cofactor in /// `Self::ScalarField`. #[must_use] fn mul_by_cofactor_inv(&self) -> Self; } pub trait Curve: 'static { /// The base field that hosts. type Fq: PrimeField + SquareRootField; /// This is the scalar field of the groups. type Fr: PrimeField + SquareRootField; /// The projective representation of an element. type Projective: ProjectiveCurve<BaseField = Self::Fq, ScalarField = Self::Fr, Affine = Self::Affine> + From<Self::Affine> + Into<Self::Affine> + MulAssign<Self::Fr>; // needed due to https://github.com/rust-lang/rust/issues/69640 /// The affine representation of an element. type Affine: AffineCurve<BaseField = Self::Fq, ScalarField = Self::Fr, Projective = Self::Projective> + From<Self::Projective> + Into<Self::Projective>; } impl<P: PairingEngine> Curve for P { type Fq = P::Fq; type Fr = P::Fr; type Projective = P::G1Projective; type Affine = P::G1Affine; } impl<C: ProjectiveCurve> Group for C { type ScalarField = C::ScalarField; #[inline] #[must_use] fn double(&self) -> Self { let mut tmp = *self; tmp += self; tmp } #[inline] fn double_in_place(&mut self) -> &mut Self { <C as ProjectiveCurve>::double_in_place(self) } } /// Preprocess a G1 element for use in a pairing. pub fn prepare_g1<E: PairingEngine>(g: impl Into<E::G1Affine>) -> E::G1Prepared { let g: E::G1Affine = g.into(); E::G1Prepared::from(g) } /// Preprocess a G2 element for use in a pairing. pub fn prepare_g2<E: PairingEngine>(g: impl Into<E::G2Affine>) -> E::G2Prepared { let g: E::G2Affine = g.into(); E::G2Prepared::from(g) }
31.073846
113
0.621151
082da65f9bbc29e985499b334e71ad9a29a2bc27
1,157
use crate::le::att::pdus::{PackablePDU, Response, UnpackablePDU}; use crate::le::att::Opcode; use crate::le::connection::MTU; use crate::PackError; use core::convert::TryInto; #[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Debug, Hash)] pub struct ExchangeMTURsp(pub MTU); impl ExchangeMTURsp { pub const BYTE_LEN: usize = MTU::BYTE_LEN; } impl PackablePDU for ExchangeMTURsp { const OPCODE: Opcode = Opcode::ExchangeMTURsp; fn byte_len(&self) -> usize { Self::BYTE_LEN } fn pack_into(&self, buf: &mut [u8]) -> Result<(), PackError> { PackError::expect_length(Self::BYTE_LEN, buf)?; buf.copy_from_slice(u16::from(self.0).to_le_bytes().as_ref()); Ok(()) } } impl UnpackablePDU for ExchangeMTURsp { fn unpack_from(buf: &[u8]) -> Result<Self, PackError> where Self: Sized, { PackError::expect_length(Self::BYTE_LEN, buf)?; let mtu = MTU::new_checked(u16::from_le_bytes( buf.try_into().expect("length checked above"), )) .ok_or(PackError::bad_index(0))?; Ok(ExchangeMTURsp(mtu)) } } impl Response for ExchangeMTURsp {}
29.666667
70
0.643907
fbd296f27d1129ece66e951f35ebbf1e43a9ed4c
3,891
mod conf; mod nuconfig; #[cfg(test)] pub mod tests; pub(crate) use conf::Conf; pub(crate) use nuconfig::NuConfig; use crate::commands::from_toml::convert_toml_value_to_nu_value; use crate::commands::to_toml::value_to_toml_value; use crate::prelude::*; use directories::ProjectDirs; use indexmap::IndexMap; use log::trace; use nu_errors::ShellError; use nu_protocol::{Dictionary, ShellTypeName, UntaggedValue, Value}; use nu_source::Tag; use std::fs::{self, OpenOptions}; use std::io; use std::path::{Path, PathBuf}; pub fn config_path() -> Result<PathBuf, ShellError> { app_path("config", ProjectDirs::config_dir) } pub fn default_path() -> Result<PathBuf, ShellError> { default_path_for(&None) } pub fn default_path_for(file: &Option<PathBuf>) -> Result<PathBuf, ShellError> { let mut filename = config_path()?; let file: &Path = file .as_ref() .map(AsRef::as_ref) .unwrap_or_else(|| "config.toml".as_ref()); filename.push(file); Ok(filename) } pub fn user_data() -> Result<PathBuf, ShellError> { app_path("user data", ProjectDirs::data_local_dir) } fn app_path<F: FnOnce(&ProjectDirs) -> &Path>(display: &str, f: F) -> Result<PathBuf, ShellError> { let dir = ProjectDirs::from("org", "nushell", "nu") .ok_or_else(|| ShellError::untagged_runtime_error("Couldn't find project directory"))?; let path = f(&dir).to_owned(); std::fs::create_dir_all(&path).map_err(|err| { ShellError::untagged_runtime_error(&format!("Couldn't create {} path:\n{}", display, err)) })?; Ok(path) } pub fn read( tag: impl Into<Tag>, at: &Option<PathBuf>, ) -> Result<IndexMap<String, Value>, ShellError> { let filename = default_path()?; let filename = match at { None => filename, Some(ref file) => file.clone(), }; if !filename.exists() && touch(&filename).is_err() { // If we can't create configs, let's just return an empty indexmap instead as we may be in // a readonly environment return Ok(IndexMap::new()); } trace!("config file = {}", filename.display()); let tag = tag.into(); let contents = fs::read_to_string(filename) .map(|v| v.tagged(&tag)) .map_err(|err| { ShellError::labeled_error( &format!("Couldn't read config file:\n{}", err), "file name", &tag, ) })?; let parsed: toml::Value = toml::from_str(&contents).map_err(|err| { ShellError::labeled_error( &format!("Couldn't parse config file:\n{}", err), "file name", &tag, ) })?; let value = convert_toml_value_to_nu_value(&parsed, tag); let tag = value.tag(); match value.value { UntaggedValue::Row(Dictionary { entries }) => Ok(entries), other => Err(ShellError::type_error( "Dictionary", other.type_name().spanned(tag.span), )), } } pub(crate) fn config(tag: impl Into<Tag>) -> Result<IndexMap<String, Value>, ShellError> { read(tag, &None) } pub fn write(config: &IndexMap<String, Value>, at: &Option<PathBuf>) -> Result<(), ShellError> { let filename = &mut default_path()?; let filename = match at { None => filename, Some(file) => { filename.pop(); filename.push(file); filename } }; let contents = value_to_toml_value( &UntaggedValue::Row(Dictionary::new(config.clone())).into_untagged_value(), )?; let contents = toml::to_string(&contents)?; fs::write(&filename, &contents)?; Ok(()) } // A simple implementation of `% touch path` (ignores existing files) fn touch(path: &Path) -> io::Result<()> { match OpenOptions::new().create(true).write(true).open(path) { Ok(_) => Ok(()), Err(e) => Err(e), } }
27.992806
99
0.603701
391b11c4d16c58441b6c2be53952baf642377861
17,920
//! This algorithm is not intended to be an optimization, it is rather for legalization. //! Specifically, spir-v disallows things like a `StorageClass::Function` pointer to a //! `StorageClass::Input` pointer. Our frontend definitely allows it, though, this is like taking a //! `&Input<T>` in a function! So, we inline all functions (see inline.rs) that take these //! "illegal" pointers, then run mem2reg on the result to "unwrap" the Function pointer. //! //! Because it's merely a legalization pass, this computes "minimal" SSA form, *not* "pruned" SSA //! form. The difference is that "minimal" may include extra phi nodes that aren't actually used //! anywhere - we assume that later optimization passes will take care of these (relying on what //! wikipedia calls "treat pruning as a dead code elimination problem"). use super::simple_passes::outgoing_edges; use super::{apply_rewrite_rules, id}; use rspirv::dr::{Block, Function, Instruction, ModuleHeader, Operand}; use rspirv::spirv::{Op, Word}; use std::collections::{hash_map, HashMap, HashSet}; pub fn mem2reg( header: &mut ModuleHeader, types_global_values: &mut Vec<Instruction>, pointer_to_pointee: &HashMap<Word, Word>, constants: &HashMap<Word, u32>, func: &mut Function, ) { let preds = compute_preds(&func.blocks); let idom = compute_idom(&preds); let dominance_frontier = compute_dominance_frontier(&preds, &idom); insert_phis_all( header, types_global_values, pointer_to_pointee, constants, &mut func.blocks, dominance_frontier, ); } pub fn compute_preds(blocks: &[Block]) -> Vec<Vec<usize>> { let mut result = vec![vec![]; blocks.len()]; for (source_idx, source) in blocks.iter().enumerate() { let mut edges = outgoing_edges(source); // HACK(eddyb) treat `OpSelectionMerge` as an edge, in case it points // to an otherwise-unreachable block. if let Some(before_last_idx) = source.instructions.len().checked_sub(2) { if let Some(before_last) = source.instructions.get(before_last_idx) { if before_last.class.opcode == Op::SelectionMerge { edges.push(before_last.operands[0].unwrap_id_ref()); } } } for dest_id in edges { let dest_idx = blocks .iter() .position(|b| b.label_id().unwrap() == dest_id) .unwrap(); result[dest_idx].push(source_idx); } } result } // Paper: A Simple, Fast Dominance Algorithm // https://www.cs.rice.edu/~keith/EMBED/dom.pdf // Note: requires nodes in reverse postorder fn compute_idom(preds: &[Vec<usize>]) -> Vec<usize> { fn intersect(doms: &[Option<usize>], mut finger1: usize, mut finger2: usize) -> usize { // TODO: This may return an optional result? while finger1 != finger2 { // Note: The comparisons here are inverted from the paper, because the paper uses // comparison to be postorder index. However, we have reverse postorder indices. while finger1 > finger2 { finger1 = doms[finger1].unwrap(); } while finger2 > finger1 { finger2 = doms[finger2].unwrap(); } } finger1 } let mut idom = vec![None; preds.len()]; idom[0] = Some(0); let mut changed = true; while changed { changed = false; for node in 1..(preds.len()) { let mut new_idom: Option<usize> = None; for &pred in &preds[node] { if idom[pred].is_some() { new_idom = Some(new_idom.map_or(pred, |new_idom| intersect(&idom, pred, new_idom))); } } // TODO: This may return an optional result? let new_idom = new_idom.unwrap(); if idom[node] != Some(new_idom) { idom[node] = Some(new_idom); changed = true; } } } idom.iter().map(|x| x.unwrap()).collect() } // Same paper as above fn compute_dominance_frontier(preds: &[Vec<usize>], idom: &[usize]) -> Vec<HashSet<usize>> { assert_eq!(preds.len(), idom.len()); let mut dominance_frontier = vec![HashSet::new(); preds.len()]; for node in 0..preds.len() { if preds[node].len() >= 2 { for &pred in &preds[node] { let mut runner = pred; while runner != idom[node] { dominance_frontier[runner].insert(node); runner = idom[runner]; } } } } dominance_frontier } fn insert_phis_all( header: &mut ModuleHeader, types_global_values: &mut Vec<Instruction>, pointer_to_pointee: &HashMap<Word, Word>, constants: &HashMap<Word, u32>, blocks: &mut [Block], dominance_frontier: Vec<HashSet<usize>>, ) { let thing = blocks[0] .instructions .iter() .filter(|inst| inst.class.opcode == Op::Variable) .filter_map(|inst| { let var = inst.result_id.unwrap(); let var_ty = *pointer_to_pointee.get(&inst.result_type.unwrap()).unwrap(); Some(( collect_access_chains(pointer_to_pointee, constants, blocks, var, var_ty)?, var_ty, )) }) .collect::<Vec<_>>(); for &(ref var_map, base_var_type) in &thing { let blocks_with_phi = insert_phis(blocks, &dominance_frontier, var_map); let mut renamer = Renamer { header, types_global_values, blocks, blocks_with_phi, base_var_type, var_map, phi_defs: HashSet::new(), visited: HashSet::new(), stack: Vec::new(), rewrite_rules: HashMap::new(), }; renamer.rename(0, None); apply_rewrite_rules(&renamer.rewrite_rules, blocks); remove_nops(blocks); } remove_old_variables(blocks, &thing); } #[derive(Debug)] struct VarInfo { // Type of the *dereferenced* variable. ty: Word, // OpAccessChain indexes off the base variable indices: Vec<u32>, } fn collect_access_chains( pointer_to_pointee: &HashMap<Word, Word>, constants: &HashMap<Word, u32>, blocks: &[Block], base_var: Word, base_var_ty: Word, ) -> Option<HashMap<Word, VarInfo>> { fn construct_access_chain_info( pointer_to_pointee: &HashMap<Word, Word>, constants: &HashMap<Word, u32>, inst: &Instruction, base: &VarInfo, ) -> Option<VarInfo> { Some(VarInfo { ty: *pointer_to_pointee.get(&inst.result_type.unwrap()).unwrap(), indices: { let mut base_indicies = base.indices.clone(); for op in inst.operands.iter().skip(1) { base_indicies.push(*constants.get(&op.id_ref_any().unwrap())?) } base_indicies }, }) } let mut variables = HashMap::new(); variables.insert( base_var, VarInfo { ty: base_var_ty, indices: vec![], }, ); // Loop in case a previous block references a later AccessChain loop { let mut changed = false; for inst in blocks.iter().flat_map(|b| &b.instructions) { for (index, op) in inst.operands.iter().enumerate() { if let Operand::IdRef(id) = op { if variables.contains_key(id) { match inst.class.opcode { // Only allow store if pointer is the lhs, not rhs Op::Store if index == 0 => {} Op::Load | Op::AccessChain | Op::InBoundsAccessChain => {} _ => return None, } } } } if let Op::AccessChain | Op::InBoundsAccessChain = inst.class.opcode { if let Some(base) = variables.get(&inst.operands[0].id_ref_any().unwrap()) { let info = construct_access_chain_info(pointer_to_pointee, constants, inst, base)?; match variables.entry(inst.result_id.unwrap()) { hash_map::Entry::Vacant(entry) => { entry.insert(info); changed = true; } hash_map::Entry::Occupied(_) => {} } } } } if !changed { break; } } Some(variables) } fn has_store(block: &Block, var_map: &HashMap<Word, VarInfo>) -> bool { block.instructions.iter().any(|inst| { let ptr = match inst.class.opcode { Op::Store => inst.operands[0].id_ref_any().unwrap(), Op::Variable if inst.operands.len() < 2 => return false, Op::Variable => inst.result_id.unwrap(), _ => return false, }; var_map.contains_key(&ptr) }) } fn insert_phis( blocks: &[Block], dominance_frontier: &[HashSet<usize>], var_map: &HashMap<Word, VarInfo>, ) -> HashSet<usize> { // TODO: Some algorithms check if the var is trivial in some way, e.g. all loads and stores are // in a single block. We should probably do that too. let mut ever_on_work_list = HashSet::new(); let mut work_list = Vec::new(); let mut blocks_with_phi = HashSet::new(); for (block_idx, block) in blocks.iter().enumerate() { if has_store(block, var_map) { ever_on_work_list.insert(block_idx); work_list.push(block_idx); } } while let Some(x) = work_list.pop() { for &y in &dominance_frontier[x] { if blocks_with_phi.insert(y) && ever_on_work_list.insert(y) { work_list.push(y) } } } blocks_with_phi } // These can't be part of the Renamer impl due to borrowck rules. fn undef_for( header: &mut ModuleHeader, types_global_values: &mut Vec<Instruction>, ty: Word, ) -> Word { // TODO: This is horribly slow, fix this let existing = types_global_values .iter() .find(|inst| inst.class.opcode == Op::Undef && inst.result_type.unwrap() == ty); if let Some(existing) = existing { return existing.result_id.unwrap(); } let inst_id = id(header); types_global_values.push(Instruction::new(Op::Undef, Some(ty), Some(inst_id), vec![])); inst_id } fn top_stack_or_undef( header: &mut ModuleHeader, types_global_values: &mut Vec<Instruction>, stack: &[Word], ty: Word, ) -> Word { match stack.last() { Some(&top) => top, None => undef_for(header, types_global_values, ty), } } struct Renamer<'a> { header: &'a mut ModuleHeader, types_global_values: &'a mut Vec<Instruction>, blocks: &'a mut [Block], blocks_with_phi: HashSet<usize>, base_var_type: Word, var_map: &'a HashMap<Word, VarInfo>, phi_defs: HashSet<Word>, visited: HashSet<usize>, stack: Vec<Word>, rewrite_rules: HashMap<Word, Word>, } impl Renamer<'_> { // Returns the phi definition. fn insert_phi_value(&mut self, block: usize, from_block: usize) -> Word { let from_block_label = self.blocks[from_block].label_id().unwrap(); let phi_defs = &self.phi_defs; let existing_phi = self.blocks[block].instructions.iter_mut().find(|inst| { inst.class.opcode == Op::Phi && phi_defs.contains(&inst.result_id.unwrap()) }); let top_def = top_stack_or_undef( self.header, self.types_global_values, &self.stack, self.base_var_type, ); match existing_phi { None => { let new_id = id(self.header); self.blocks[block].instructions.insert( 0, Instruction::new( Op::Phi, Some(self.base_var_type), Some(new_id), vec![Operand::IdRef(top_def), Operand::IdRef(from_block_label)], ), ); self.phi_defs.insert(new_id); new_id } Some(existing_phi) => { existing_phi.operands.extend_from_slice(&[ Operand::IdRef(top_def), Operand::IdRef(from_block_label), ]); existing_phi.result_id.unwrap() } } } fn rename(&mut self, block: usize, from_block: Option<usize>) { let original_stack = self.stack.len(); if let Some(from_block) = from_block { if self.blocks_with_phi.contains(&block) { let new_top = self.insert_phi_value(block, from_block); self.stack.push(new_top); } } if !self.visited.insert(block) { while self.stack.len() > original_stack { self.stack.pop(); } return; } for inst in &mut self.blocks[block].instructions { if inst.class.opcode == Op::Variable && inst.operands.len() > 1 { let ptr = inst.result_id.unwrap(); let val = inst.operands[1].id_ref_any().unwrap(); if let Some(var_info) = self.var_map.get(&ptr) { assert_eq!(var_info.indices, Vec::<u32>::new()); self.stack.push(val); } } else if inst.class.opcode == Op::Store { let ptr = inst.operands[0].id_ref_any().unwrap(); let val = inst.operands[1].id_ref_any().unwrap(); if let Some(var_info) = self.var_map.get(&ptr) { if var_info.indices.is_empty() { *inst = Instruction::new(Op::Nop, None, None, vec![]); self.stack.push(val); } else { let new_id = id(self.header); let prev_comp = top_stack_or_undef( self.header, self.types_global_values, &self.stack, self.base_var_type, ); let mut operands = vec![Operand::IdRef(val), Operand::IdRef(prev_comp)]; operands .extend(var_info.indices.iter().copied().map(Operand::LiteralInt32)); *inst = Instruction::new( Op::CompositeInsert, Some(self.base_var_type), Some(new_id), operands, ); self.stack.push(new_id); } } } else if inst.class.opcode == Op::Load { let ptr = inst.operands[0].id_ref_any().unwrap(); if let Some(var_info) = self.var_map.get(&ptr) { let loaded_val = inst.result_id.unwrap(); // TODO: Should this do something more sane if it's undef? let current_obj = top_stack_or_undef( self.header, self.types_global_values, &self.stack, self.base_var_type, ); if var_info.indices.is_empty() { *inst = Instruction::new(Op::Nop, None, None, vec![]); self.rewrite_rules.insert(loaded_val, current_obj); } else { let new_id = id(self.header); let mut operands = vec![Operand::IdRef(current_obj)]; operands .extend(var_info.indices.iter().copied().map(Operand::LiteralInt32)); *inst = Instruction::new( Op::CompositeExtract, Some(var_info.ty), Some(new_id), operands, ); self.rewrite_rules.insert(loaded_val, new_id); } } } } for dest_id in outgoing_edges(&self.blocks[block]) { // TODO: Don't do this find let dest_idx = self .blocks .iter() .position(|b| b.label_id().unwrap() == dest_id) .unwrap(); self.rename(dest_idx, Some(block)); } while self.stack.len() > original_stack { self.stack.pop(); } } } fn remove_nops(blocks: &mut [Block]) { for block in blocks { block .instructions .retain(|inst| inst.class.opcode != Op::Nop); } } fn remove_old_variables(blocks: &mut [Block], thing: &[(HashMap<u32, VarInfo>, u32)]) { blocks[0].instructions.retain(|inst| { inst.class.opcode != Op::Variable || { let result_id = inst.result_id.unwrap(); thing .iter() .all(|(var_map, _)| !var_map.contains_key(&result_id)) } }); for block in blocks { block.instructions.retain(|inst| { !matches!(inst.class.opcode, Op::AccessChain | Op::InBoundsAccessChain) || inst.operands.iter().all(|op| { op.id_ref_any().map_or(true, |id| { thing.iter().all(|(var_map, _)| !var_map.contains_key(&id)) }) }) }) } }
36.646217
99
0.524275
ed567ad597abad406296ad36b798a2f154a777f5
5,767
//! disjoint sparse table。 use std::convert::From; use std::ops::{Index, Range, RangeBounds}; use binop::Monoid; use buf_range::bounds_within; use fold::Fold; /// disjoint sparse table。 /// /// 要素数 $n$ の配列の任意の区間について、モノイド積の値を計算できる。 /// 値の更新はできない。 /// 半群を返すことにしてもよいが、要検討。 /// /// # Idea /// 各 $k$ ($1\\le k\< \\log\_2(n)$) について、区間 /// $[i\\cdot 2\^k-j, i\\cdot 2\^k)$ および $[i\\cdot 2\^k, i\\cdot 2\^k+j)$ /// ($2\\le j\\le 2\^k$、$i$ は区間の終端が $n$ 以下になる各奇数) /// におけるモノイド積を予め計算しておく。 /// 任意の区間は、上記の区間を高々 $2$ つ合わせることで表現できる。 /// /// # Implementation notes /// 前処理では、異なる段で同じ区間のモノイド積を複数回計算するのを避けるための工夫をしている。 /// その処理のオーバーヘッドにより、モノイド積のコストが高くない場合は、 /// 毎回計算する方が高速かもしれない。クエリ処理についても同様の工夫をしている。 /// /// # Complexity /// |演算|時間計算量| /// |---|---| /// |`from`|$\\Theta(n\\log(n))$| /// |`fold`|$\\Theta(1)$| /// /// # Precise analysis /// 前処理におけるモノイド積の計算回数は以下の値で上から抑えられる。 /// $$ n\\cdot\\lceil{\\log\_2(n)-3}\\rceil + 2\\cdot\\lceil{\\log\_2(n)}\\rceil + 2. $$ /// /// これは、$n = 1000$ で $7022$ であり、 /// [Secret](http://s3-ap-northeast-1.amazonaws.com/data.cms.ioi-jp.org/open-2014/2014-open-d2-secret.pdf) /// の「$n = 1000$ でクエリ $8000$ 回以下」に余裕を持って間に合う。 /// /// クエリ処理の際には、 /// 与えられた区間が前処理で計算した区間であるか、長さが $1$ 以下の場合は、 /// 新たにモノイド積は計算せずに答えを返す。 /// そうでない場合はちょうど $1$ 回のモノイド積を計算する。 /// /// ## More precise analysis /// /// 前処理の実際の計算回数は、以下のコードにより $O(\\log(n))$ 時間で計算できるはず。 /// コード長が長いので隔離したいかも。 /// ``` /// /// 要素数 `n` での前処理における計算回数を返す。 /// fn count(n: usize) -> usize { /// if n <= 2 { /// return 0; /// } /// g(n - 1) /// + if n.is_power_of_two() { /// n.trailing_zeros() as usize /// } else { /// n.next_power_of_two() / 2 /// } /// - 1 /// } /// /// assert_eq!(count(3), 1); /// assert_eq!(count(10), 14); /// assert_eq!(count(1000), 7008); /// assert_eq!(count(1_000_000), 16_980_635); /// /// /// 各段における寄与分の和を返す。 /// fn g(n: usize) -> usize { /// (0..) /// .take_while(|&k| n >= 2_usize.pow(k + 1)) /// .map(|k| f(k, n - 2_usize.pow(k + 1))) /// .sum() /// } /// /// /// k 段目における寄与分を返す。 /// fn f(k: u32, n: usize) -> usize { /// let p = 2_usize.pow(k); /// n / (2 * p) * p /// + if n / p % 2 == 1 { n % p + 1 } else { 0 } /// + (n + 1) / (2 * p) * (p - 1) /// } /// ``` /// /// # Examples /// ``` /// use nekolib::ds::DisjointSparseTable; /// use nekolib::traits::Fold; /// use nekolib::utils::OpRollHash; /// /// let op_rh = OpRollHash::<998244353>::default(); /// let value_of = |s| op_rh.value_of(s); /// /// let base: Vec<_> = ["abra", "cad", "abra"].iter().map(|s| value_of(s)).collect(); /// let dst: DisjointSparseTable<_> = (base, op_rh).into(); /// assert_eq!(dst.fold(1..=2), value_of("cadabra")); /// assert_eq!(dst.fold(..), value_of("abracadabra")); /// ``` pub struct DisjointSparseTable<M: Monoid> { buf: Vec<Vec<M::Set>>, monoid: M, } impl<M, B> Fold<B> for DisjointSparseTable<M> where M: Monoid, M::Set: Clone, B: RangeBounds<usize>, { type Output = M; fn fold(&self, b: B) -> M::Set { let Range { start, end } = bounds_within(b, self.buf[0].len()); if start >= end { return self.monoid.id(); } let len = end - start; let end = end - 1; if start == end { return self.buf[0][start].clone(); } let row = ((start ^ end) + 1).next_power_of_two().trailing_zeros() - 1; let row_len = 1_usize << row; let row = row as usize; if len <= 2 * row_len && row + 1 < self.buf.len() { if start.is_power_of_two() && end >> (row + 1) == 1 { return self.buf[row + 1][end].clone(); } if (end + 1).is_power_of_two() && start >> (row + 1) == 0 { return self.buf[row + 1][start].clone(); } } self.monoid.op(self.buf[row][start].clone(), self.buf[row][end].clone()) } } impl<M> From<Vec<M::Set>> for DisjointSparseTable<M> where M: Monoid + Default, M::Set: Clone, { fn from(base: Vec<M::Set>) -> Self { Self::from((base, M::default())) } } impl<M> From<(Vec<M::Set>, M)> for DisjointSparseTable<M> where M: Monoid, M::Set: Clone, { fn from((base, monoid): (Vec<M::Set>, M)) -> Self { let len = base.len(); let height = len.next_power_of_two().trailing_zeros().max(1) as usize; let mut buf = vec![base; height]; for i in 1..height { let w = 1 << i; for j in (1..).step_by(2).take_while(|&j| j * w <= len) { let mid = j * w; for r in (1..w).take_while(|r| mid + r < len) { buf[i][mid + r] = monoid.op( buf[i][mid + r - 1].clone(), buf[0][mid + r].clone(), ); } } } for i in 1..height { let w = 1 << i; for j in (1..).step_by(2).take_while(|&j| j * w <= len) { let mid = j * w - 1; for l in 1..w { buf[i][mid - l] = if mid > l && (l + 1).is_power_of_two() { let ei = (mid - l).trailing_zeros() as usize; let ej = mid; buf[ei][ej].clone() } else { monoid.op( buf[0][mid - l].clone(), buf[i][mid - l + 1].clone(), ) }; } } } Self { buf, monoid } } } impl<M> Index<usize> for DisjointSparseTable<M> where M: Monoid, M::Set: Clone, { type Output = M::Set; fn index(&self, i: usize) -> &Self::Output { &self.buf[0][i] } }
28.408867
106
0.48188
f4f8ef3ce33b81e99e40ae082dd63d62865b0a69
2,155
//! Types for parsers use alloc::string::String; use alloc::vec::Vec; use core::ops::Range; /// Statements #[derive(Clone, Debug, PartialEq)] pub struct Statement<L> { /// The kind of the statement pub kind: StatementKind<L>, /// The range in the file pub span: Range<L>, } /// Kinds of statements #[derive(Clone, Debug, PartialEq)] pub enum StatementKind<L> { /// A value binding like `pat = "expr"` ValueBinding(Pattern<L>, Expression<L>), /// A table header TableHeader(TableHeaderKind, Pattern<L>, Option<Expression<L>>), } /// Kinds of table headers #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum TableHeaderKind { /// A normal table header like `[table]` Normal, /// An array of tables like `[[array]]` Array, } /// Patterns #[derive(Clone, Debug, PartialEq, Eq)] pub struct Pattern<L> { /// The kind of the pattern pub kind: PatternKind<L>, /// The range in the file pub span: Range<L>, } /// Kinds of patterns #[derive(Clone, Debug, PartialEq, Eq)] pub enum PatternKind<L> { /// A key pattern Key(Key<L>), } /// Keys #[derive(Clone, Debug, PartialEq, Eq)] pub struct Key<L> { /// The kind of the key pub kind: KeyKind, /// The name of the key pub name: String, /// The range in the file pub span: Range<L>, } /// Kinds of keys #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum KeyKind { /// A normal key like `key` Normal, /// A local key like `_key` Local, /// A built-in key like `@key` Builtin, } /// Expressions #[derive(Clone, Debug, PartialEq)] pub struct Expression<L> { /// The kind of the expression pub kind: ExpressionKind<L>, /// The range in the file pub span: Range<L>, } /// Kinds of expressions #[derive(Clone, Debug, PartialEq)] pub enum ExpressionKind<L> { /// A literal Literal(Literal), /// An array Array(Vec<Expression<L>>), /// An inline table InlineTable(Vec<(Key<L>, Expression<L>)>), } /// Values of literals #[derive(Clone, Debug, PartialEq)] pub enum Literal { Character(char), String(String), Integer(u64), Float(f64), }
21.767677
68
0.623666
094b40e1ce4d02b087ae3429ed5bf618e99997d1
174
// functions1.rs // Make me compile! Execute `rustlings hint functions1` for hints :) fn call_me() { println!("I'm not main function!"); } fn main() { call_me(); }
15.818182
68
0.626437
7550c0eddd90176b22d243b4ad5ea03aa32e4912
413,788
use std::collections::HashMap; use std::cell::RefCell; use std::borrow::BorrowMut; use std::default::Default; use std::collections::BTreeMap; use serde_json as json; use std::io; use std::fs; use std::mem; use std::thread::sleep; use crate::client; // ############## // UTILITIES ### // ############ /// Identifies the an OAuth2 authorization scope. /// A scope is needed when requesting an /// [authorization token](https://developers.google.com/youtube/v3/guides/authentication). #[derive(PartialEq, Eq, Hash)] pub enum Scope { /// View and manage your data across Google Cloud Platform services CloudPlatform, } impl AsRef<str> for Scope { fn as_ref(&self) -> &str { match *self { Scope::CloudPlatform => "https://www.googleapis.com/auth/cloud-platform", } } } impl Default for Scope { fn default() -> Scope { Scope::CloudPlatform } } // ######## // HUB ### // ###### /// Central instance to access all CloudTasks related resource activities /// /// # Examples /// /// Instantiate a new hub /// /// ```test_harness,no_run /// extern crate hyper; /// extern crate hyper_rustls; /// extern crate yup_oauth2 as oauth2; /// extern crate google_cloudtasks2_beta2 as cloudtasks2_beta2; /// use cloudtasks2_beta2::api::Queue; /// use cloudtasks2_beta2::{Result, Error}; /// # #[test] fn egal() { /// use std::default::Default; /// use oauth2; /// use cloudtasks2_beta2::CloudTasks; /// /// // Get an ApplicationSecret instance by some means. It contains the `client_id` and /// // `client_secret`, among other things. /// let secret: ApplicationSecret = Default::default(); /// // Instantiate the authenticator. It will choose a suitable authentication flow for you, /// // unless you replace `None` with the desired Flow. /// // Provide your own `AuthenticatorDelegate` to adjust the way it operates and get feedback about /// // what's going on. You probably want to bring in your own `TokenStorage` to persist tokens and /// // retrieve them from storage. /// let auth = yup_oauth2::InstalledFlowAuthenticator::builder( /// secret, /// yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect, /// ).build().await.unwrap(); /// let mut hub = CloudTasks::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth); /// // As the method needs a request, you would usually fill it with the desired information /// // into the respective structure. Some of the parts shown here might not be applicable ! /// // Values shown here are possibly random and not representative ! /// let mut req = Queue::default(); /// /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.projects().locations_queues_patch(req, "name") /// .update_mask("At") /// .doit(); /// /// match result { /// Err(e) => match e { /// // The Error enum provides details about what exactly happened. /// // You can also just use its `Debug`, `Display` or `Error` traits /// Error::HttpError(_) /// |Error::MissingAPIKey /// |Error::MissingToken(_) /// |Error::Cancelled /// |Error::UploadSizeLimitExceeded(_, _) /// |Error::Failure(_) /// |Error::BadRequest(_) /// |Error::FieldClash(_) /// |Error::JsonDecodeError(_, _) => println!("{}", e), /// }, /// Ok(res) => println!("Success: {:?}", res), /// } /// # } /// ``` pub struct CloudTasks<C> { client: RefCell<C>, auth: RefCell<oauth2::authenticator::Authenticator<hyper_rustls::HttpsConnector<hyper::client::connect::HttpConnector>>>, _user_agent: String, _base_url: String, _root_url: String, } impl<'a, C> client::Hub for CloudTasks<C> {} impl<'a, C> CloudTasks<C> where C: BorrowMut<hyper::Client<hyper_rustls::HttpsConnector<hyper::client::connect::HttpConnector>, hyper::body::Body>> { pub fn new(client: C, authenticator: oauth2::authenticator::Authenticator<hyper_rustls::HttpsConnector<hyper::client::connect::HttpConnector>>) -> CloudTasks<C> { CloudTasks { client: RefCell::new(client), auth: RefCell::new(authenticator), _user_agent: "google-api-rust-client/1.0.14".to_string(), _base_url: "https://cloudtasks.googleapis.com/".to_string(), _root_url: "https://cloudtasks.googleapis.com/".to_string(), } } pub fn projects(&'a self) -> ProjectMethods<'a, C> { ProjectMethods { hub: &self } } /// Set the user-agent header field to use in all requests to the server. /// It defaults to `google-api-rust-client/1.0.14`. /// /// Returns the previously set user-agent. pub fn user_agent(&mut self, agent_name: String) -> String { mem::replace(&mut self._user_agent, agent_name) } /// Set the base url to use in all requests to the server. /// It defaults to `https://cloudtasks.googleapis.com/`. /// /// Returns the previously set base url. pub fn base_url(&mut self, new_base_url: String) -> String { mem::replace(&mut self._base_url, new_base_url) } /// Set the root url to use in all requests to the server. /// It defaults to `https://cloudtasks.googleapis.com/`. /// /// Returns the previously set root url. pub fn root_url(&mut self, new_root_url: String) -> String { mem::replace(&mut self._root_url, new_root_url) } } // ############ // SCHEMAS ### // ########## /// Request message for acknowledging a task using /// AcknowledgeTask. /// /// # Activities /// /// This type is used in activities, which are methods you may call on this type or where this type is involved in. /// The list links the activity name, along with information about where it is used (one of *request* and *response*). /// /// * [locations queues tasks acknowledge projects](ProjectLocationQueueTaskAcknowledgeCall) (request) /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct AcknowledgeTaskRequest { /// Required. The task's current schedule time, available in the /// schedule_time returned by /// LeaseTasks response or /// RenewLease response. This restriction is /// to ensure that your worker currently holds the lease. #[serde(rename="scheduleTime")] pub schedule_time: Option<String>, } impl client::RequestValue for AcknowledgeTaskRequest {} /// App Engine HTTP request. /// /// The message defines the HTTP request that is sent to an App Engine app when /// the task is dispatched. /// /// This proto can only be used for tasks in a queue which has /// app_engine_http_target set. /// /// Using AppEngineHttpRequest requires /// [`appengine.applications.get`](https://cloud.google.com/appengine/docs/admin-api/access-control) /// Google IAM permission for the project /// and the following scope: /// /// `https://www.googleapis.com/auth/cloud-platform` /// /// The task will be delivered to the App Engine app which belongs to the same /// project as the queue. For more information, see /// [How Requests are /// Routed](https://cloud.google.com/appengine/docs/standard/python/how-requests-are-routed) /// and how routing is affected by /// [dispatch /// files](https://cloud.google.com/appengine/docs/python/config/dispatchref). /// Traffic is encrypted during transport and never leaves Google datacenters. /// Because this traffic is carried over a communication mechanism internal to /// Google, you cannot explicitly set the protocol (for example, HTTP or HTTPS). /// The request to the handler, however, will appear to have used the HTTP /// protocol. /// /// The AppEngineRouting used to construct the URL that the task is /// delivered to can be set at the queue-level or task-level: /// /// * If set, /// app_engine_routing_override /// is used for all tasks in the queue, no matter what the setting /// is for the /// task-level app_engine_routing. /// /// /// The `url` that the task will be sent to is: /// /// * `url =` host `+` /// relative_url /// /// Tasks can be dispatched to secure app handlers, unsecure app handlers, and /// URIs restricted with /// [`login: /// admin`](https://cloud.google.com/appengine/docs/standard/python/config/appref). /// Because tasks are not run as any user, they cannot be dispatched to URIs /// restricted with /// [`login: /// required`](https://cloud.google.com/appengine/docs/standard/python/config/appref) /// Task dispatches also do not follow redirects. /// /// The task attempt has succeeded if the app's request handler returns an HTTP /// response code in the range [`200` - `299`]. The task attempt has failed if /// the app's handler returns a non-2xx response code or Cloud Tasks does /// not receive response before the deadline. Failed /// tasks will be retried according to the /// retry configuration. `503` (Service Unavailable) is /// considered an App Engine system error instead of an application error and /// will cause Cloud Tasks' traffic congestion control to temporarily throttle /// the queue's dispatches. Unlike other types of task targets, a `429` (Too Many /// Requests) response from an app handler does not cause traffic congestion /// control to throttle the queue. /// /// This type is not used in any activity, and only used as *part* of another schema. /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct AppEngineHttpRequest { /// Task-level setting for App Engine routing. /// /// If set, /// app_engine_routing_override /// is used for all tasks in the queue, no matter what the setting is for the /// task-level app_engine_routing. #[serde(rename="appEngineRouting")] pub app_engine_routing: Option<AppEngineRouting>, /// HTTP request headers. /// /// This map contains the header field names and values. /// Headers can be set when the /// task is created. /// Repeated headers are not supported but a header value can contain commas. /// /// Cloud Tasks sets some headers to default values: /// /// * `User-Agent`: By default, this header is /// `"AppEngine-Google; (+http://code.google.com/appengine)"`. /// This header can be modified, but Cloud Tasks will append /// `"AppEngine-Google; (+http://code.google.com/appengine)"` to the /// modified `User-Agent`. /// /// If the task has a payload, Cloud /// Tasks sets the following headers: /// /// * `Content-Type`: By default, the `Content-Type` header is set to /// `"application/octet-stream"`. The default can be overridden by explicitly /// setting `Content-Type` to a particular media type when the /// task is created. /// For example, `Content-Type` can be set to `"application/json"`. /// * `Content-Length`: This is computed by Cloud Tasks. This value is /// output only. It cannot be changed. /// /// The headers below cannot be set or overridden: /// /// * `Host` /// * `X-Google-*` /// * `X-AppEngine-*` /// /// In addition, Cloud Tasks sets some headers when the task is dispatched, /// such as headers containing information about the task; see /// [request /// headers](https://cloud.google.com/appengine/docs/python/taskqueue/push/creating-handlers#reading_request_headers). /// These headers are set only when the task is dispatched, so they are not /// visible when the task is returned in a Cloud Tasks response. /// /// Although there is no specific limit for the maximum number of headers or /// the size, there is a limit on the maximum size of the Task. For more /// information, see the CreateTask documentation. pub headers: Option<HashMap<String, String>>, /// The HTTP method to use for the request. The default is POST. /// /// The app's request handler for the task's target URL must be able to handle /// HTTP requests with this http_method, otherwise the task attempt fails with /// error code 405 (Method Not Allowed). See [Writing a push task request /// handler](https://cloud.google.com/appengine/docs/java/taskqueue/push/creating-handlers#writing_a_push_task_request_handler) /// and the App Engine documentation for your runtime on [How Requests are /// Handled](https://cloud.google.com/appengine/docs/standard/python3/how-requests-are-handled). #[serde(rename="httpMethod")] pub http_method: Option<String>, /// Payload. /// /// The payload will be sent as the HTTP message body. A message /// body, and thus a payload, is allowed only if the HTTP method is /// POST or PUT. It is an error to set a data payload on a task with /// an incompatible HttpMethod. pub payload: Option<String>, /// The relative URL. /// /// The relative URL must begin with "/" and must be a valid HTTP relative URL. /// It can contain a path and query string arguments. /// If the relative URL is empty, then the root path "/" will be used. /// No spaces are allowed, and the maximum length allowed is 2083 characters. #[serde(rename="relativeUrl")] pub relative_url: Option<String>, } impl client::Part for AppEngineHttpRequest {} /// App Engine HTTP target. /// /// The task will be delivered to the App Engine application hostname /// specified by its AppEngineHttpTarget and AppEngineHttpRequest. /// The documentation for AppEngineHttpRequest explains how the /// task's host URL is constructed. /// /// Using AppEngineHttpTarget requires /// [`appengine.applications.get`](https://cloud.google.com/appengine/docs/admin-api/access-control) /// Google IAM permission for the project /// and the following scope: /// /// `https://www.googleapis.com/auth/cloud-platform` /// /// This type is not used in any activity, and only used as *part* of another schema. /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct AppEngineHttpTarget { /// Overrides for the /// task-level app_engine_routing. /// /// If set, `app_engine_routing_override` is used for all tasks in /// the queue, no matter what the setting is for the /// task-level app_engine_routing. #[serde(rename="appEngineRoutingOverride")] pub app_engine_routing_override: Option<AppEngineRouting>, } impl client::Part for AppEngineHttpTarget {} /// App Engine Routing. /// /// Defines routing characteristics specific to App Engine - service, version, /// and instance. /// /// For more information about services, versions, and instances see /// [An Overview of App /// Engine](https://cloud.google.com/appengine/docs/python/an-overview-of-app-engine), /// [Microservices Architecture on Google App /// Engine](https://cloud.google.com/appengine/docs/python/microservices-on-app-engine), /// [App Engine Standard request /// routing](https://cloud.google.com/appengine/docs/standard/python/how-requests-are-routed), /// and [App Engine Flex request /// routing](https://cloud.google.com/appengine/docs/flexible/python/how-requests-are-routed). /// /// This type is not used in any activity, and only used as *part* of another schema. /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct AppEngineRouting { /// Output only. The host that the task is sent to. /// /// For more information, see /// [How Requests are /// Routed](https://cloud.google.com/appengine/docs/standard/python/how-requests-are-routed). /// /// The host is constructed as: /// /// /// * `host = [application_domain_name]`</br> /// `| [service] + '.' + [application_domain_name]`</br> /// `| [version] + '.' + [application_domain_name]`</br> /// `| [version_dot_service]+ '.' + [application_domain_name]`</br> /// `| [instance] + '.' + [application_domain_name]`</br> /// `| [instance_dot_service] + '.' + [application_domain_name]`</br> /// `| [instance_dot_version] + '.' + [application_domain_name]`</br> /// `| [instance_dot_version_dot_service] + '.' + [application_domain_name]` /// /// * `application_domain_name` = The domain name of the app, for /// example <app-id>.appspot.com, which is associated with the /// queue's project ID. Some tasks which were created using the App Engine /// SDK use a custom domain name. /// /// * `service =` service /// /// * `version =` version /// /// * `version_dot_service =` /// version `+ '.' +` /// service /// /// * `instance =` instance /// /// * `instance_dot_service =` /// instance `+ '.' +` /// service /// /// * `instance_dot_version =` /// instance `+ '.' +` /// version /// /// * `instance_dot_version_dot_service =` /// instance `+ '.' +` /// version `+ '.' +` /// service /// /// If service is empty, then the task will be sent /// to the service which is the default service when the task is attempted. /// /// If version is empty, then the task will be sent /// to the version which is the default version when the task is attempted. /// /// If instance is empty, then the task /// will be sent to an instance which is available when the task is /// attempted. /// /// If service, /// version, or /// instance is invalid, then the task /// will be sent to the default version of the default service when /// the task is attempted. pub host: Option<String>, /// App instance. /// /// By default, the task is sent to an instance which is available when /// the task is attempted. /// /// Requests can only be sent to a specific instance if /// [manual scaling is used in App Engine /// Standard](https://cloud.google.com/appengine/docs/python/an-overview-of-app-engine?hl=en_US#scaling_types_and_instance_classes). /// App Engine Flex does not support instances. For more information, see /// [App Engine Standard request /// routing](https://cloud.google.com/appengine/docs/standard/python/how-requests-are-routed) /// and [App Engine Flex request /// routing](https://cloud.google.com/appengine/docs/flexible/python/how-requests-are-routed). pub instance: Option<String>, /// App service. /// /// By default, the task is sent to the service which is the default /// service when the task is attempted. /// /// For some queues or tasks which were created using the App Engine /// Task Queue API, host is not parsable /// into service, /// version, and /// instance. For example, some tasks /// which were created using the App Engine SDK use a custom domain /// name; custom domains are not parsed by Cloud Tasks. If /// host is not parsable, then /// service, /// version, and /// instance are the empty string. pub service: Option<String>, /// App version. /// /// By default, the task is sent to the version which is the default /// version when the task is attempted. /// /// For some queues or tasks which were created using the App Engine /// Task Queue API, host is not parsable /// into service, /// version, and /// instance. For example, some tasks /// which were created using the App Engine SDK use a custom domain /// name; custom domains are not parsed by Cloud Tasks. If /// host is not parsable, then /// service, /// version, and /// instance are the empty string. pub version: Option<String>, } impl client::Part for AppEngineRouting {} /// The status of a task attempt. /// /// This type is not used in any activity, and only used as *part* of another schema. /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct AttemptStatus { /// Output only. The time that this attempt was dispatched. /// /// `dispatch_time` will be truncated to the nearest microsecond. #[serde(rename="dispatchTime")] pub dispatch_time: Option<String>, /// Output only. The response from the target for this attempt. /// /// If the task has not been attempted or the task is currently running /// then the response status is unset. #[serde(rename="responseStatus")] pub response_status: Option<Status>, /// Output only. The time that this attempt response was received. /// /// `response_time` will be truncated to the nearest microsecond. #[serde(rename="responseTime")] pub response_time: Option<String>, /// Output only. The time that this attempt was scheduled. /// /// `schedule_time` will be truncated to the nearest microsecond. #[serde(rename="scheduleTime")] pub schedule_time: Option<String>, } impl client::Part for AttemptStatus {} /// Associates `members` with a `role`. /// /// This type is not used in any activity, and only used as *part* of another schema. /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct Binding { /// The condition that is associated with this binding. /// /// If the condition evaluates to `true`, then this binding applies to the /// current request. /// /// If the condition evaluates to `false`, then this binding does not apply to /// the current request. However, a different role binding might grant the same /// role to one or more of the members in this binding. /// /// To learn which resources support conditions in their IAM policies, see the /// [IAM /// documentation](https://cloud.google.com/iam/help/conditions/resource-policies). pub condition: Option<Expr>, /// Specifies the identities requesting access for a Cloud Platform resource. /// `members` can have the following values: /// /// * `allUsers`: A special identifier that represents anyone who is /// on the internet; with or without a Google account. /// /// * `allAuthenticatedUsers`: A special identifier that represents anyone /// who is authenticated with a Google account or a service account. /// /// * `user:{emailid}`: An email address that represents a specific Google /// account. For example, `alice@example.com` . /// /// /// * `serviceAccount:{emailid}`: An email address that represents a service /// account. For example, `my-other-app@appspot.gserviceaccount.com`. /// /// * `group:{emailid}`: An email address that represents a Google group. /// For example, `admins@example.com`. /// /// * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique /// identifier) representing a user that has been recently deleted. For /// example, `alice@example.com?uid=123456789012345678901`. If the user is /// recovered, this value reverts to `user:{emailid}` and the recovered user /// retains the role in the binding. /// /// * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus /// unique identifier) representing a service account that has been recently /// deleted. For example, /// `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. /// If the service account is undeleted, this value reverts to /// `serviceAccount:{emailid}` and the undeleted service account retains the /// role in the binding. /// /// * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique /// identifier) representing a Google group that has been recently /// deleted. For example, `admins@example.com?uid=123456789012345678901`. If /// the group is recovered, this value reverts to `group:{emailid}` and the /// recovered group retains the role in the binding. /// /// /// * `domain:{domain}`: The G Suite domain (primary) that represents all the /// users of that domain. For example, `google.com` or `example.com`. /// /// pub members: Option<Vec<String>>, /// Role that is assigned to `members`. /// For example, `roles/viewer`, `roles/editor`, or `roles/owner`. pub role: Option<String>, } impl client::Part for Binding {} /// Request message for canceling a lease using /// CancelLease. /// /// # Activities /// /// This type is used in activities, which are methods you may call on this type or where this type is involved in. /// The list links the activity name, along with information about where it is used (one of *request* and *response*). /// /// * [locations queues tasks cancel lease projects](ProjectLocationQueueTaskCancelLeaseCall) (request) /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct CancelLeaseRequest { /// The response_view specifies which subset of the Task will be /// returned. /// /// By default response_view is BASIC; not all /// information is retrieved by default because some data, such as /// payloads, might be desirable to return only when needed because /// of its large size or because of the sensitivity of data that it /// contains. /// /// Authorization for FULL requires /// `cloudtasks.tasks.fullView` [Google IAM](https://cloud.google.com/iam/) /// permission on the Task resource. #[serde(rename="responseView")] pub response_view: Option<String>, /// Required. The task's current schedule time, available in the /// schedule_time returned by /// LeaseTasks response or /// RenewLease response. This restriction is /// to ensure that your worker currently holds the lease. #[serde(rename="scheduleTime")] pub schedule_time: Option<String>, } impl client::RequestValue for CancelLeaseRequest {} /// Request message for CreateTask. /// /// # Activities /// /// This type is used in activities, which are methods you may call on this type or where this type is involved in. /// The list links the activity name, along with information about where it is used (one of *request* and *response*). /// /// * [locations queues tasks create projects](ProjectLocationQueueTaskCreateCall) (request) /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct CreateTaskRequest { /// The response_view specifies which subset of the Task will be /// returned. /// /// By default response_view is BASIC; not all /// information is retrieved by default because some data, such as /// payloads, might be desirable to return only when needed because /// of its large size or because of the sensitivity of data that it /// contains. /// /// Authorization for FULL requires /// `cloudtasks.tasks.fullView` [Google IAM](https://cloud.google.com/iam/) /// permission on the Task resource. #[serde(rename="responseView")] pub response_view: Option<String>, /// Required. The task to add. /// /// Task names have the following format: /// `projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks/TASK_ID`. /// The user can optionally specify a task name. If a /// name is not specified then the system will generate a random /// unique task id, which will be set in the task returned in the /// response. /// /// If schedule_time is not set or is in the /// past then Cloud Tasks will set it to the current time. /// /// Task De-duplication: /// /// Explicitly specifying a task ID enables task de-duplication. If /// a task's ID is identical to that of an existing task or a task /// that was deleted or completed recently then the call will fail /// with ALREADY_EXISTS. /// If the task's queue was created using Cloud Tasks, then another task with /// the same name can't be created for ~1hour after the original task was /// deleted or completed. If the task's queue was created using queue.yaml or /// queue.xml, then another task with the same name can't be created /// for ~9days after the original task was deleted or completed. /// /// Because there is an extra lookup cost to identify duplicate task /// names, these CreateTask calls have significantly /// increased latency. Using hashed strings for the task id or for /// the prefix of the task id is recommended. Choosing task ids that /// are sequential or have sequential prefixes, for example using a /// timestamp, causes an increase in latency and error rates in all /// task commands. The infrastructure relies on an approximately /// uniform distribution of task ids to store and serve tasks /// efficiently. pub task: Option<Task>, } impl client::RequestValue for CreateTaskRequest {} /// A generic empty message that you can re-use to avoid defining duplicated /// empty messages in your APIs. A typical example is to use it as the request /// or the response type of an API method. For instance: /// /// ````text /// service Foo { /// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); /// } /// ```` /// /// The JSON representation for `Empty` is empty JSON object `{}`. /// /// # Activities /// /// This type is used in activities, which are methods you may call on this type or where this type is involved in. /// The list links the activity name, along with information about where it is used (one of *request* and *response*). /// /// * [locations queues tasks acknowledge projects](ProjectLocationQueueTaskAcknowledgeCall) (response) /// * [locations queues tasks delete projects](ProjectLocationQueueTaskDeleteCall) (response) /// * [locations queues delete projects](ProjectLocationQueueDeleteCall) (response) #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct Empty { _never_set: Option<bool> } impl client::ResponseResult for Empty {} /// Represents a textual expression in the Common Expression Language (CEL) /// syntax. CEL is a C-like expression language. The syntax and semantics of CEL /// are documented at https://github.com/google/cel-spec. /// /// Example (Comparison): /// /// ````text /// title: "Summary size limit" /// description: "Determines if a summary is less than 100 chars" /// expression: "document.summary.size() < 100" /// ```` /// /// Example (Equality): /// /// ````text /// title: "Requestor is owner" /// description: "Determines if requestor is the document owner" /// expression: "document.owner == request.auth.claims.email" /// ```` /// /// Example (Logic): /// /// ````text /// title: "Public documents" /// description: "Determine whether the document should be publicly visible" /// expression: "document.type != 'private' && document.type != 'internal'" /// ```` /// /// Example (Data Manipulation): /// /// ````text /// title: "Notification string" /// description: "Create a notification string with a timestamp." /// expression: "'New message received at ' + string(document.create_time)" /// ```` /// /// The exact variables and functions that may be referenced within an expression /// are determined by the service that evaluates it. See the service /// documentation for additional information. /// /// This type is not used in any activity, and only used as *part* of another schema. #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct Expr { /// Optional. Description of the expression. This is a longer text which /// describes the expression, e.g. when hovered over it in a UI. pub description: Option<String>, /// Textual representation of an expression in Common Expression Language /// syntax. pub expression: Option<String>, /// Optional. String indicating the location of the expression for error /// reporting, e.g. a file name and a position in the file. pub location: Option<String>, /// Optional. Title for the expression, i.e. a short string describing /// its purpose. This can be used e.g. in UIs which allow to enter the /// expression. pub title: Option<String>, } impl client::Part for Expr {} /// Request message for `GetIamPolicy` method. /// /// # Activities /// /// This type is used in activities, which are methods you may call on this type or where this type is involved in. /// The list links the activity name, along with information about where it is used (one of *request* and *response*). /// /// * [locations queues get iam policy projects](ProjectLocationQueueGetIamPolicyCall) (request) /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct GetIamPolicyRequest { /// OPTIONAL: A `GetPolicyOptions` object for specifying options to /// `GetIamPolicy`. pub options: Option<GetPolicyOptions>, } impl client::RequestValue for GetIamPolicyRequest {} /// Encapsulates settings provided to GetIamPolicy. /// /// This type is not used in any activity, and only used as *part* of another schema. /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct GetPolicyOptions { /// Optional. The policy format version to be returned. /// /// Valid values are 0, 1, and 3. Requests specifying an invalid value will be /// rejected. /// /// Requests for policies with any conditional bindings must specify version 3. /// Policies without any conditional bindings may specify any valid value or /// leave the field unset. /// /// To learn which resources support conditions in their IAM policies, see the /// [IAM /// documentation](https://cloud.google.com/iam/help/conditions/resource-policies). #[serde(rename="requestedPolicyVersion")] pub requested_policy_version: Option<i32>, } impl client::Part for GetPolicyOptions {} /// Request message for leasing tasks using LeaseTasks. /// /// # Activities /// /// This type is used in activities, which are methods you may call on this type or where this type is involved in. /// The list links the activity name, along with information about where it is used (one of *request* and *response*). /// /// * [locations queues tasks lease projects](ProjectLocationQueueTaskLeaseCall) (request) /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct LeaseTasksRequest { /// `filter` can be used to specify a subset of tasks to lease. /// /// When `filter` is set to `tag=<my-tag>` then the /// response will contain only tasks whose /// tag is equal to `<my-tag>`. `<my-tag>` must be /// less than 500 characters. /// /// When `filter` is set to `tag_function=oldest_tag()`, only tasks which have /// the same tag as the task with the oldest /// schedule_time will be returned. /// /// Grammar Syntax: /// /// * `filter = "tag=" tag | "tag_function=" function` /// /// * `tag = string` /// /// * `function = "oldest_tag()"` /// /// The `oldest_tag()` function returns tasks which have the same tag as the /// oldest task (ordered by schedule time). /// /// SDK compatibility: Although the SDK allows tags to be either /// string or /// [bytes](https://cloud.google.com/appengine/docs/standard/java/javadoc/com/google/appengine/api/taskqueue/TaskOptions.html#tag-byte:A-), /// only UTF-8 encoded tags can be used in Cloud Tasks. Tag which /// aren't UTF-8 encoded can't be used in the /// filter and the task's /// tag will be displayed as empty in Cloud Tasks. pub filter: Option<String>, /// Required. The duration of the lease. /// /// Each task returned in the response will /// have its schedule_time set to the current /// time plus the `lease_duration`. The task is leased until its /// schedule_time; thus, the task will not be /// returned to another LeaseTasks call /// before its schedule_time. /// /// /// After the worker has successfully finished the work associated /// with the task, the worker must call via /// AcknowledgeTask before the /// schedule_time. Otherwise the task will be /// returned to a later LeaseTasks call so /// that another worker can retry it. /// /// The maximum lease duration is 1 week. /// `lease_duration` will be truncated to the nearest second. #[serde(rename="leaseDuration")] pub lease_duration: Option<String>, /// The maximum number of tasks to lease. /// /// The system will make a best effort to return as close to as /// `max_tasks` as possible. /// /// The largest that `max_tasks` can be is 1000. /// /// The maximum total size of a lease tasks response is /// 32 MB. If the sum of all task sizes requested reaches this limit, /// fewer tasks than requested are returned. #[serde(rename="maxTasks")] pub max_tasks: Option<i32>, /// The response_view specifies which subset of the Task will be /// returned. /// /// By default response_view is BASIC; not all /// information is retrieved by default because some data, such as /// payloads, might be desirable to return only when needed because /// of its large size or because of the sensitivity of data that it /// contains. /// /// Authorization for FULL requires /// `cloudtasks.tasks.fullView` [Google IAM](https://cloud.google.com/iam/) /// permission on the Task resource. #[serde(rename="responseView")] pub response_view: Option<String>, } impl client::RequestValue for LeaseTasksRequest {} /// Response message for leasing tasks using LeaseTasks. /// /// # Activities /// /// This type is used in activities, which are methods you may call on this type or where this type is involved in. /// The list links the activity name, along with information about where it is used (one of *request* and *response*). /// /// * [locations queues tasks lease projects](ProjectLocationQueueTaskLeaseCall) (response) /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct LeaseTasksResponse { /// The leased tasks. pub tasks: Option<Vec<Task>>, } impl client::ResponseResult for LeaseTasksResponse {} /// The response message for Locations.ListLocations. /// /// # Activities /// /// This type is used in activities, which are methods you may call on this type or where this type is involved in. /// The list links the activity name, along with information about where it is used (one of *request* and *response*). /// /// * [locations list projects](ProjectLocationListCall) (response) /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct ListLocationsResponse { /// A list of locations that matches the specified filter in the request. pub locations: Option<Vec<Location>>, /// The standard List next-page token. #[serde(rename="nextPageToken")] pub next_page_token: Option<String>, } impl client::ResponseResult for ListLocationsResponse {} /// Response message for ListQueues. /// /// # Activities /// /// This type is used in activities, which are methods you may call on this type or where this type is involved in. /// The list links the activity name, along with information about where it is used (one of *request* and *response*). /// /// * [locations queues list projects](ProjectLocationQueueListCall) (response) /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct ListQueuesResponse { /// A token to retrieve next page of results. /// /// To return the next page of results, call /// ListQueues with this value as the /// page_token. /// /// If the next_page_token is empty, there are no more results. /// /// The page token is valid for only 2 hours. #[serde(rename="nextPageToken")] pub next_page_token: Option<String>, /// The list of queues. pub queues: Option<Vec<Queue>>, } impl client::ResponseResult for ListQueuesResponse {} /// Response message for listing tasks using ListTasks. /// /// # Activities /// /// This type is used in activities, which are methods you may call on this type or where this type is involved in. /// The list links the activity name, along with information about where it is used (one of *request* and *response*). /// /// * [locations queues tasks list projects](ProjectLocationQueueTaskListCall) (response) /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct ListTasksResponse { /// A token to retrieve next page of results. /// /// To return the next page of results, call /// ListTasks with this value as the /// page_token. /// /// If the next_page_token is empty, there are no more results. #[serde(rename="nextPageToken")] pub next_page_token: Option<String>, /// The list of tasks. pub tasks: Option<Vec<Task>>, } impl client::ResponseResult for ListTasksResponse {} /// A resource that represents Google Cloud Platform location. /// /// # Activities /// /// This type is used in activities, which are methods you may call on this type or where this type is involved in. /// The list links the activity name, along with information about where it is used (one of *request* and *response*). /// /// * [locations get projects](ProjectLocationGetCall) (response) /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct Location { /// The friendly name for this location, typically a nearby city name. /// For example, "Tokyo". #[serde(rename="displayName")] pub display_name: Option<String>, /// Cross-service attributes for the location. For example /// /// ````text /// {"cloud.googleapis.com/region": "us-east1"}```` pub labels: Option<HashMap<String, String>>, /// The canonical id for this location. For example: `"us-east1"`. #[serde(rename="locationId")] pub location_id: Option<String>, /// Service-specific metadata. For example the available capacity at the given /// location. pub metadata: Option<HashMap<String, String>>, /// Resource name for the location, which may vary between implementations. /// For example: `"projects/example-project/locations/us-east1"` pub name: Option<String>, } impl client::ResponseResult for Location {} /// Request message for PauseQueue. /// /// # Activities /// /// This type is used in activities, which are methods you may call on this type or where this type is involved in. /// The list links the activity name, along with information about where it is used (one of *request* and *response*). /// /// * [locations queues pause projects](ProjectLocationQueuePauseCall) (request) /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct PauseQueueRequest { _never_set: Option<bool> } impl client::RequestValue for PauseQueueRequest {} /// An Identity and Access Management (IAM) policy, which specifies access /// controls for Google Cloud resources. /// /// A `Policy` is a collection of `bindings`. A `binding` binds one or more /// `members` to a single `role`. Members can be user accounts, service accounts, /// Google groups, and domains (such as G Suite). A `role` is a named list of /// permissions; each `role` can be an IAM predefined role or a user-created /// custom role. /// /// For some types of Google Cloud resources, a `binding` can also specify a /// `condition`, which is a logical expression that allows access to a resource /// only if the expression evaluates to `true`. A condition can add constraints /// based on attributes of the request, the resource, or both. To learn which /// resources support conditions in their IAM policies, see the /// [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). /// /// **JSON example:** /// /// ````text /// { /// "bindings": [ /// { /// "role": "roles/resourcemanager.organizationAdmin", /// "members": [ /// "user:mike@example.com", /// "group:admins@example.com", /// "domain:google.com", /// "serviceAccount:my-project-id@appspot.gserviceaccount.com" /// ] /// }, /// { /// "role": "roles/resourcemanager.organizationViewer", /// "members": [ /// "user:eve@example.com" /// ], /// "condition": { /// "title": "expirable access", /// "description": "Does not grant access after Sep 2020", /// "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", /// } /// } /// ], /// "etag": "BwWWja0YfJA=", /// "version": 3 /// } /// ```` /// /// **YAML example:** /// /// ````text /// bindings: /// - members: /// - user:mike@example.com /// - group:admins@example.com /// - domain:google.com /// - serviceAccount:my-project-id@appspot.gserviceaccount.com /// role: roles/resourcemanager.organizationAdmin /// - members: /// - user:eve@example.com /// role: roles/resourcemanager.organizationViewer /// condition: /// title: expirable access /// description: Does not grant access after Sep 2020 /// expression: request.time < timestamp('2020-10-01T00:00:00.000Z') /// - etag: BwWWja0YfJA= /// - version: 3 /// ```` /// /// For a description of IAM and its features, see the /// [IAM documentation](https://cloud.google.com/iam/docs/). /// /// # Activities /// /// This type is used in activities, which are methods you may call on this type or where this type is involved in. /// The list links the activity name, along with information about where it is used (one of *request* and *response*). /// /// * [locations queues get iam policy projects](ProjectLocationQueueGetIamPolicyCall) (response) /// * [locations queues set iam policy projects](ProjectLocationQueueSetIamPolicyCall) (response) #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct Policy { /// Associates a list of `members` to a `role`. Optionally, may specify a /// `condition` that determines how and when the `bindings` are applied. Each /// of the `bindings` must contain at least one member. pub bindings: Option<Vec<Binding>>, /// `etag` is used for optimistic concurrency control as a way to help /// prevent simultaneous updates of a policy from overwriting each other. /// It is strongly suggested that systems make use of the `etag` in the /// read-modify-write cycle to perform policy updates in order to avoid race /// conditions: An `etag` is returned in the response to `getIamPolicy`, and /// systems are expected to put that etag in the request to `setIamPolicy` to /// ensure that their change will be applied to the same version of the policy. /// /// **Important:** If you use IAM Conditions, you must include the `etag` field /// whenever you call `setIamPolicy`. If you omit this field, then IAM allows /// you to overwrite a version `3` policy with a version `1` policy, and all of /// the conditions in the version `3` policy are lost. pub etag: Option<String>, /// Specifies the format of the policy. /// /// Valid values are `0`, `1`, and `3`. Requests that specify an invalid value /// are rejected. /// /// Any operation that affects conditional role bindings must specify version /// `3`. This requirement applies to the following operations: /// /// * Getting a policy that includes a conditional role binding /// * Adding a conditional role binding to a policy /// * Changing a conditional role binding in a policy /// * Removing any role binding, with or without a condition, from a policy /// that includes conditions /// /// **Important:** If you use IAM Conditions, you must include the `etag` field /// whenever you call `setIamPolicy`. If you omit this field, then IAM allows /// you to overwrite a version `3` policy with a version `1` policy, and all of /// the conditions in the version `3` policy are lost. /// /// If a policy does not include any conditions, operations on that policy may /// specify any valid version or leave the field unset. /// /// To learn which resources support conditions in their IAM policies, see the /// [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). pub version: Option<i32>, } impl client::ResponseResult for Policy {} /// The pull message contains data that can be used by the caller of /// LeaseTasks to process the task. /// /// This proto can only be used for tasks in a queue which has /// pull_target set. /// /// This type is not used in any activity, and only used as *part* of another schema. /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct PullMessage { /// A data payload consumed by the worker to execute the task. pub payload: Option<String>, /// The task's tag. /// /// Tags allow similar tasks to be processed in a batch. If you label /// tasks with a tag, your worker can /// lease tasks with the same tag using /// filter. For example, if you want to /// aggregate the events associated with a specific user once a day, /// you could tag tasks with the user ID. /// /// The task's tag can only be set when the /// task is created. /// /// The tag must be less than 500 characters. /// /// SDK compatibility: Although the SDK allows tags to be either /// string or /// [bytes](https://cloud.google.com/appengine/docs/standard/java/javadoc/com/google/appengine/api/taskqueue/TaskOptions.html#tag-byte:A-), /// only UTF-8 encoded tags can be used in Cloud Tasks. If a tag isn't UTF-8 /// encoded, the tag will be empty when the task is returned by Cloud Tasks. pub tag: Option<String>, } impl client::Part for PullMessage {} /// Pull target. /// /// This type is not used in any activity, and only used as *part* of another schema. /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct PullTarget { _never_set: Option<bool> } impl client::Part for PullTarget {} /// Request message for PurgeQueue. /// /// # Activities /// /// This type is used in activities, which are methods you may call on this type or where this type is involved in. /// The list links the activity name, along with information about where it is used (one of *request* and *response*). /// /// * [locations queues purge projects](ProjectLocationQueuePurgeCall) (request) /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct PurgeQueueRequest { _never_set: Option<bool> } impl client::RequestValue for PurgeQueueRequest {} /// A queue is a container of related tasks. Queues are configured to manage /// how those tasks are dispatched. Configurable properties include rate limits, /// retry options, target types, and others. /// /// # Activities /// /// This type is used in activities, which are methods you may call on this type or where this type is involved in. /// The list links the activity name, along with information about where it is used (one of *request* and *response*). /// /// * [locations queues create projects](ProjectLocationQueueCreateCall) (request|response) /// * [locations queues get projects](ProjectLocationQueueGetCall) (response) /// * [locations queues patch projects](ProjectLocationQueuePatchCall) (request|response) /// * [locations queues pause projects](ProjectLocationQueuePauseCall) (response) /// * [locations queues purge projects](ProjectLocationQueuePurgeCall) (response) /// * [locations queues resume projects](ProjectLocationQueueResumeCall) (response) /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct Queue { /// App Engine HTTP target. /// /// An App Engine queue is a queue that has an AppEngineHttpTarget. #[serde(rename="appEngineHttpTarget")] pub app_engine_http_target: Option<AppEngineHttpTarget>, /// Caller-specified and required in CreateQueue, /// after which it becomes output only. /// /// The queue name. /// /// The queue name must have the following format: /// `projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID` /// /// * `PROJECT_ID` can contain letters ([A-Za-z]), numbers ([0-9]), /// hyphens (-), colons (:), or periods (.). /// For more information, see /// [Identifying /// projects](https://cloud.google.com/resource-manager/docs/creating-managing-projects#identifying_projects) /// * `LOCATION_ID` is the canonical ID for the queue's location. /// The list of available locations can be obtained by calling /// ListLocations. /// For more information, see https://cloud.google.com/about/locations/. /// * `QUEUE_ID` can contain letters ([A-Za-z]), numbers ([0-9]), or /// hyphens (-). The maximum length is 100 characters. pub name: Option<String>, /// Pull target. /// /// A pull queue is a queue that has a PullTarget. #[serde(rename="pullTarget")] pub pull_target: Option<PullTarget>, /// Output only. The last time this queue was purged. /// /// All tasks that were created before this time /// were purged. /// /// A queue can be purged using PurgeQueue, the /// [App Engine Task Queue SDK, or the Cloud /// Console](https://cloud.google.com/appengine/docs/standard/python/taskqueue/push/deleting-tasks-and-queues#purging_all_tasks_from_a_queue). /// /// Purge time will be truncated to the nearest microsecond. Purge /// time will be unset if the queue has never been purged. #[serde(rename="purgeTime")] pub purge_time: Option<String>, /// Rate limits for task dispatches. /// /// rate_limits and /// retry_config are related because they both /// control task attempts however they control how tasks are /// attempted in different ways: /// /// * rate_limits controls the total rate of /// dispatches from a queue (i.e. all traffic dispatched from the /// queue, regardless of whether the dispatch is from a first /// attempt or a retry). /// * retry_config controls what happens to /// particular a task after its first attempt fails. That is, /// retry_config controls task retries (the /// second attempt, third attempt, etc). #[serde(rename="rateLimits")] pub rate_limits: Option<RateLimits>, /// Settings that determine the retry behavior. /// /// * For tasks created using Cloud Tasks: the queue-level retry settings /// apply to all tasks in the queue that were created using Cloud Tasks. /// Retry settings cannot be set on individual tasks. /// * For tasks created using the App Engine SDK: the queue-level retry /// settings apply to all tasks in the queue which do not have retry settings /// explicitly set on the task and were created by the App Engine SDK. See /// [App Engine /// documentation](https://cloud.google.com/appengine/docs/standard/python/taskqueue/push/retrying-tasks). #[serde(rename="retryConfig")] pub retry_config: Option<RetryConfig>, /// Output only. The state of the queue. /// /// `state` can only be changed by called /// PauseQueue, /// ResumeQueue, or uploading /// [queue.yaml/xml](https://cloud.google.com/appengine/docs/python/config/queueref). /// UpdateQueue cannot be used to change `state`. pub state: Option<String>, } impl client::RequestValue for Queue {} impl client::ResponseResult for Queue {} /// Rate limits. /// /// This message determines the maximum rate that tasks can be dispatched by a /// queue, regardless of whether the dispatch is a first task attempt or a retry. /// /// Note: The debugging command, RunTask, will run a task /// even if the queue has reached its RateLimits. /// /// This type is not used in any activity, and only used as *part* of another schema. /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct RateLimits { /// Output only. The max burst size. /// /// Max burst size limits how fast tasks in queue are processed when /// many tasks are in the queue and the rate is high. This field /// allows the queue to have a high rate so processing starts shortly /// after a task is enqueued, but still limits resource usage when /// many tasks are enqueued in a short period of time. /// /// The [token bucket](https://wikipedia.org/wiki/Token_Bucket) /// algorithm is used to control the rate of task dispatches. Each /// queue has a token bucket that holds tokens, up to the maximum /// specified by `max_burst_size`. Each time a task is dispatched, a /// token is removed from the bucket. Tasks will be dispatched until /// the queue's bucket runs out of tokens. The bucket will be /// continuously refilled with new tokens based on /// max_tasks_dispatched_per_second. /// /// Cloud Tasks will pick the value of `max_burst_size` based on the /// value of /// max_tasks_dispatched_per_second. /// /// For App Engine queues that were created or updated using /// `queue.yaml/xml`, `max_burst_size` is equal to /// [bucket_size](https://cloud.google.com/appengine/docs/standard/python/config/queueref#bucket_size). /// Since `max_burst_size` is output only, if /// UpdateQueue is called on a queue /// created by `queue.yaml/xml`, `max_burst_size` will be reset based /// on the value of /// max_tasks_dispatched_per_second, /// regardless of whether /// max_tasks_dispatched_per_second /// is updated. /// #[serde(rename="maxBurstSize")] pub max_burst_size: Option<i32>, /// The maximum number of concurrent tasks that Cloud Tasks allows /// to be dispatched for this queue. After this threshold has been /// reached, Cloud Tasks stops dispatching tasks until the number of /// concurrent requests decreases. /// /// If unspecified when the queue is created, Cloud Tasks will pick the /// default. /// /// /// The maximum allowed value is 5,000. /// /// This field is output only for /// pull queues and always -1, which indicates no limit. No other /// queue types can have `max_concurrent_tasks` set to -1. /// /// /// This field has the same meaning as /// [max_concurrent_requests in /// queue.yaml/xml](https://cloud.google.com/appengine/docs/standard/python/config/queueref#max_concurrent_requests). #[serde(rename="maxConcurrentTasks")] pub max_concurrent_tasks: Option<i32>, /// The maximum rate at which tasks are dispatched from this queue. /// /// If unspecified when the queue is created, Cloud Tasks will pick the /// default. /// /// * For App Engine queues, the maximum allowed value /// is 500. /// * This field is output only for pull queues. In addition to the /// `max_tasks_dispatched_per_second` limit, a maximum of 10 QPS of /// LeaseTasks requests are allowed per pull queue. /// /// /// This field has the same meaning as /// [rate in /// queue.yaml/xml](https://cloud.google.com/appengine/docs/standard/python/config/queueref#rate). #[serde(rename="maxTasksDispatchedPerSecond")] pub max_tasks_dispatched_per_second: Option<f64>, } impl client::Part for RateLimits {} /// Request message for renewing a lease using /// RenewLease. /// /// # Activities /// /// This type is used in activities, which are methods you may call on this type or where this type is involved in. /// The list links the activity name, along with information about where it is used (one of *request* and *response*). /// /// * [locations queues tasks renew lease projects](ProjectLocationQueueTaskRenewLeaseCall) (request) /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct RenewLeaseRequest { /// Required. The desired new lease duration, starting from now. /// /// /// The maximum lease duration is 1 week. /// `lease_duration` will be truncated to the nearest second. #[serde(rename="leaseDuration")] pub lease_duration: Option<String>, /// The response_view specifies which subset of the Task will be /// returned. /// /// By default response_view is BASIC; not all /// information is retrieved by default because some data, such as /// payloads, might be desirable to return only when needed because /// of its large size or because of the sensitivity of data that it /// contains. /// /// Authorization for FULL requires /// `cloudtasks.tasks.fullView` [Google IAM](https://cloud.google.com/iam/) /// permission on the Task resource. #[serde(rename="responseView")] pub response_view: Option<String>, /// Required. The task's current schedule time, available in the /// schedule_time returned by /// LeaseTasks response or /// RenewLease response. This restriction is /// to ensure that your worker currently holds the lease. #[serde(rename="scheduleTime")] pub schedule_time: Option<String>, } impl client::RequestValue for RenewLeaseRequest {} /// Request message for ResumeQueue. /// /// # Activities /// /// This type is used in activities, which are methods you may call on this type or where this type is involved in. /// The list links the activity name, along with information about where it is used (one of *request* and *response*). /// /// * [locations queues resume projects](ProjectLocationQueueResumeCall) (request) /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct ResumeQueueRequest { _never_set: Option<bool> } impl client::RequestValue for ResumeQueueRequest {} /// Retry config. /// /// These settings determine how a failed task attempt is retried. /// /// This type is not used in any activity, and only used as *part* of another schema. /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct RetryConfig { /// The maximum number of attempts for a task. /// /// Cloud Tasks will attempt the task `max_attempts` times (that /// is, if the first attempt fails, then there will be /// `max_attempts - 1` retries). Must be > 0. #[serde(rename="maxAttempts")] pub max_attempts: Option<i32>, /// A task will be scheduled for retry between /// min_backoff and /// max_backoff duration after it fails, /// if the queue's RetryConfig specifies that the task should be /// retried. /// /// If unspecified when the queue is created, Cloud Tasks will pick the /// default. /// /// This field is output only for pull queues. /// /// /// `max_backoff` will be truncated to the nearest second. /// /// This field has the same meaning as /// [max_backoff_seconds in /// queue.yaml/xml](https://cloud.google.com/appengine/docs/standard/python/config/queueref#retry_parameters). #[serde(rename="maxBackoff")] pub max_backoff: Option<String>, /// The time between retries will double `max_doublings` times. /// /// A task's retry interval starts at /// min_backoff, then doubles /// `max_doublings` times, then increases linearly, and finally /// retries at intervals of /// max_backoff up to /// max_attempts times. /// /// For example, if min_backoff is 10s, /// max_backoff is 300s, and /// `max_doublings` is 3, then the a task will first be retried in /// 10s. The retry interval will double three times, and then /// increase linearly by 2^3 * 10s. Finally, the task will retry at /// intervals of max_backoff until the /// task has been attempted max_attempts /// times. Thus, the requests will retry at 10s, 20s, 40s, 80s, 160s, /// 240s, 300s, 300s, .... /// /// If unspecified when the queue is created, Cloud Tasks will pick the /// default. /// /// This field is output only for pull queues. /// /// /// This field has the same meaning as /// [max_doublings in /// queue.yaml/xml](https://cloud.google.com/appengine/docs/standard/python/config/queueref#retry_parameters). #[serde(rename="maxDoublings")] pub max_doublings: Option<i32>, /// If positive, `max_retry_duration` specifies the time limit for /// retrying a failed task, measured from when the task was first /// attempted. Once `max_retry_duration` time has passed *and* the /// task has been attempted max_attempts /// times, no further attempts will be made and the task will be /// deleted. /// /// If zero, then the task age is unlimited. /// /// If unspecified when the queue is created, Cloud Tasks will pick the /// default. /// /// This field is output only for pull queues. /// /// /// `max_retry_duration` will be truncated to the nearest second. /// /// This field has the same meaning as /// [task_age_limit in /// queue.yaml/xml](https://cloud.google.com/appengine/docs/standard/python/config/queueref#retry_parameters). #[serde(rename="maxRetryDuration")] pub max_retry_duration: Option<String>, /// A task will be scheduled for retry between /// min_backoff and /// max_backoff duration after it fails, /// if the queue's RetryConfig specifies that the task should be /// retried. /// /// If unspecified when the queue is created, Cloud Tasks will pick the /// default. /// /// This field is output only for pull queues. /// /// /// `min_backoff` will be truncated to the nearest second. /// /// This field has the same meaning as /// [min_backoff_seconds in /// queue.yaml/xml](https://cloud.google.com/appengine/docs/standard/python/config/queueref#retry_parameters). #[serde(rename="minBackoff")] pub min_backoff: Option<String>, /// If true, then the number of attempts is unlimited. #[serde(rename="unlimitedAttempts")] pub unlimited_attempts: Option<bool>, } impl client::Part for RetryConfig {} /// Request message for forcing a task to run now using /// RunTask. /// /// # Activities /// /// This type is used in activities, which are methods you may call on this type or where this type is involved in. /// The list links the activity name, along with information about where it is used (one of *request* and *response*). /// /// * [locations queues tasks run projects](ProjectLocationQueueTaskRunCall) (request) /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct RunTaskRequest { /// The response_view specifies which subset of the Task will be /// returned. /// /// By default response_view is BASIC; not all /// information is retrieved by default because some data, such as /// payloads, might be desirable to return only when needed because /// of its large size or because of the sensitivity of data that it /// contains. /// /// Authorization for FULL requires /// `cloudtasks.tasks.fullView` [Google IAM](https://cloud.google.com/iam/) /// permission on the Task resource. #[serde(rename="responseView")] pub response_view: Option<String>, } impl client::RequestValue for RunTaskRequest {} /// Request message for `SetIamPolicy` method. /// /// # Activities /// /// This type is used in activities, which are methods you may call on this type or where this type is involved in. /// The list links the activity name, along with information about where it is used (one of *request* and *response*). /// /// * [locations queues set iam policy projects](ProjectLocationQueueSetIamPolicyCall) (request) /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct SetIamPolicyRequest { /// REQUIRED: The complete policy to be applied to the `resource`. The size of /// the policy is limited to a few 10s of KB. An empty policy is a /// valid policy but certain Cloud Platform services (such as Projects) /// might reject them. pub policy: Option<Policy>, } impl client::RequestValue for SetIamPolicyRequest {} /// The `Status` type defines a logical error model that is suitable for /// different programming environments, including REST APIs and RPC APIs. It is /// used by [gRPC](https://github.com/grpc). Each `Status` message contains /// three pieces of data: error code, error message, and error details. /// /// You can find out more about this error model and how to work with it in the /// [API Design Guide](https://cloud.google.com/apis/design/errors). /// /// This type is not used in any activity, and only used as *part* of another schema. /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct Status { /// The status code, which should be an enum value of google.rpc.Code. pub code: Option<i32>, /// A list of messages that carry the error details. There is a common set of /// message types for APIs to use. pub details: Option<Vec<HashMap<String, String>>>, /// A developer-facing error message, which should be in English. Any /// user-facing error message should be localized and sent in the /// google.rpc.Status.details field, or localized by the client. pub message: Option<String>, } impl client::Part for Status {} /// A unit of scheduled work. /// /// # Activities /// /// This type is used in activities, which are methods you may call on this type or where this type is involved in. /// The list links the activity name, along with information about where it is used (one of *request* and *response*). /// /// * [locations queues tasks cancel lease projects](ProjectLocationQueueTaskCancelLeaseCall) (response) /// * [locations queues tasks create projects](ProjectLocationQueueTaskCreateCall) (response) /// * [locations queues tasks get projects](ProjectLocationQueueTaskGetCall) (response) /// * [locations queues tasks renew lease projects](ProjectLocationQueueTaskRenewLeaseCall) (response) /// * [locations queues tasks run projects](ProjectLocationQueueTaskRunCall) (response) /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct Task { /// App Engine HTTP request that is sent to the task's target. Can /// be set only if /// app_engine_http_target is set /// on the queue. /// /// An App Engine task is a task that has AppEngineHttpRequest set. #[serde(rename="appEngineHttpRequest")] pub app_engine_http_request: Option<AppEngineHttpRequest>, /// Output only. The time that the task was created. /// /// `create_time` will be truncated to the nearest second. #[serde(rename="createTime")] pub create_time: Option<String>, /// Optionally caller-specified in CreateTask. /// /// The task name. /// /// The task name must have the following format: /// `projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks/TASK_ID` /// /// * `PROJECT_ID` can contain letters ([A-Za-z]), numbers ([0-9]), /// hyphens (-), colons (:), or periods (.). /// For more information, see /// [Identifying /// projects](https://cloud.google.com/resource-manager/docs/creating-managing-projects#identifying_projects) /// * `LOCATION_ID` is the canonical ID for the task's location. /// The list of available locations can be obtained by calling /// ListLocations. /// For more information, see https://cloud.google.com/about/locations/. /// * `QUEUE_ID` can contain letters ([A-Za-z]), numbers ([0-9]), or /// hyphens (-). The maximum length is 100 characters. /// * `TASK_ID` can contain only letters ([A-Za-z]), numbers ([0-9]), /// hyphens (-), or underscores (_). The maximum length is 500 characters. pub name: Option<String>, /// LeaseTasks to process the task. Can be /// set only if pull_target is set on the queue. /// /// A pull task is a task that has PullMessage set. #[serde(rename="pullMessage")] pub pull_message: Option<PullMessage>, /// The time when the task is scheduled to be attempted. /// /// For App Engine queues, this is when the task will be attempted or retried. /// /// For pull queues, this is the time when the task is available to /// be leased; if a task is currently leased, this is the time when /// the current lease expires, that is, the time that the task was /// leased plus the lease_duration. /// /// `schedule_time` will be truncated to the nearest microsecond. #[serde(rename="scheduleTime")] pub schedule_time: Option<String>, /// Output only. The task status. pub status: Option<TaskStatus>, /// Output only. The view specifies which subset of the Task has /// been returned. pub view: Option<String>, } impl client::ResponseResult for Task {} /// Status of the task. /// /// This type is not used in any activity, and only used as *part* of another schema. /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct TaskStatus { /// Output only. The number of attempts dispatched. /// /// This count includes attempts which have been dispatched but haven't /// received a response. #[serde(rename="attemptDispatchCount")] pub attempt_dispatch_count: Option<i32>, /// Output only. The number of attempts which have received a response. /// /// This field is not calculated for pull tasks. #[serde(rename="attemptResponseCount")] pub attempt_response_count: Option<i32>, /// Output only. The status of the task's first attempt. /// /// Only dispatch_time will be set. /// The other AttemptStatus information is not retained by Cloud Tasks. /// /// This field is not calculated for pull tasks. #[serde(rename="firstAttemptStatus")] pub first_attempt_status: Option<AttemptStatus>, /// Output only. The status of the task's last attempt. /// /// This field is not calculated for pull tasks. #[serde(rename="lastAttemptStatus")] pub last_attempt_status: Option<AttemptStatus>, } impl client::Part for TaskStatus {} /// Request message for `TestIamPermissions` method. /// /// # Activities /// /// This type is used in activities, which are methods you may call on this type or where this type is involved in. /// The list links the activity name, along with information about where it is used (one of *request* and *response*). /// /// * [locations queues test iam permissions projects](ProjectLocationQueueTestIamPermissionCall) (request) /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct TestIamPermissionsRequest { /// The set of permissions to check for the `resource`. Permissions with /// wildcards (such as '*' or 'storage.*') are not allowed. For more /// information see /// [IAM Overview](https://cloud.google.com/iam/docs/overview#permissions). pub permissions: Option<Vec<String>>, } impl client::RequestValue for TestIamPermissionsRequest {} /// Response message for `TestIamPermissions` method. /// /// # Activities /// /// This type is used in activities, which are methods you may call on this type or where this type is involved in. /// The list links the activity name, along with information about where it is used (one of *request* and *response*). /// /// * [locations queues test iam permissions projects](ProjectLocationQueueTestIamPermissionCall) (response) /// #[derive(Default, Clone, Debug, Serialize, Deserialize)] pub struct TestIamPermissionsResponse { /// A subset of `TestPermissionsRequest.permissions` that the caller is /// allowed. pub permissions: Option<Vec<String>>, } impl client::ResponseResult for TestIamPermissionsResponse {} // ################### // MethodBuilders ### // ################# /// A builder providing access to all methods supported on *project* resources. /// It is not used directly, but through the `CloudTasks` hub. /// /// # Example /// /// Instantiate a resource builder /// /// ```test_harness,no_run /// extern crate hyper; /// extern crate hyper_rustls; /// extern crate yup_oauth2 as oauth2; /// extern crate google_cloudtasks2_beta2 as cloudtasks2_beta2; /// /// # #[test] fn egal() { /// use std::default::Default; /// use oauth2; /// use cloudtasks2_beta2::CloudTasks; /// /// let secret: ApplicationSecret = Default::default(); /// let auth = yup_oauth2::InstalledFlowAuthenticator::builder( /// secret, /// yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect, /// ).build().await.unwrap(); /// let mut hub = CloudTasks::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth); /// // Usually you wouldn't bind this to a variable, but keep calling *CallBuilders* /// // like `locations_get(...)`, `locations_list(...)`, `locations_queues_create(...)`, `locations_queues_delete(...)`, `locations_queues_get(...)`, `locations_queues_get_iam_policy(...)`, `locations_queues_list(...)`, `locations_queues_patch(...)`, `locations_queues_pause(...)`, `locations_queues_purge(...)`, `locations_queues_resume(...)`, `locations_queues_set_iam_policy(...)`, `locations_queues_tasks_acknowledge(...)`, `locations_queues_tasks_cancel_lease(...)`, `locations_queues_tasks_create(...)`, `locations_queues_tasks_delete(...)`, `locations_queues_tasks_get(...)`, `locations_queues_tasks_lease(...)`, `locations_queues_tasks_list(...)`, `locations_queues_tasks_renew_lease(...)`, `locations_queues_tasks_run(...)` and `locations_queues_test_iam_permissions(...)` /// // to build up your call. /// let rb = hub.projects(); /// # } /// ``` pub struct ProjectMethods<'a, C> where C: 'a { hub: &'a CloudTasks<C>, } impl<'a, C> client::MethodsBuilder for ProjectMethods<'a, C> {} impl<'a, C> ProjectMethods<'a, C> { /// Create a builder to help you perform the following task: /// /// Acknowledges a pull task. /// /// The worker, that is, the entity that /// leased this task must call this method /// to indicate that the work associated with the task has finished. /// /// The worker must acknowledge a task within the /// lease_duration or the lease /// will expire and the task will become available to be leased /// again. After the task is acknowledged, it will not be returned /// by a later LeaseTasks, /// GetTask, or /// ListTasks. /// /// # Arguments /// /// * `request` - No description provided. /// * `name` - Required. The task name. For example: /// `projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks/TASK_ID` pub fn locations_queues_tasks_acknowledge(&self, request: AcknowledgeTaskRequest, name: &str) -> ProjectLocationQueueTaskAcknowledgeCall<'a, C> { ProjectLocationQueueTaskAcknowledgeCall { hub: self.hub, _request: request, _name: name.to_string(), _delegate: Default::default(), _additional_params: Default::default(), _scopes: Default::default(), } } /// Create a builder to help you perform the following task: /// /// Cancel a pull task's lease. /// /// The worker can use this method to cancel a task's lease by /// setting its schedule_time to now. This will /// make the task available to be leased to the next caller of /// LeaseTasks. /// /// # Arguments /// /// * `request` - No description provided. /// * `name` - Required. The task name. For example: /// `projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks/TASK_ID` pub fn locations_queues_tasks_cancel_lease(&self, request: CancelLeaseRequest, name: &str) -> ProjectLocationQueueTaskCancelLeaseCall<'a, C> { ProjectLocationQueueTaskCancelLeaseCall { hub: self.hub, _request: request, _name: name.to_string(), _delegate: Default::default(), _additional_params: Default::default(), _scopes: Default::default(), } } /// Create a builder to help you perform the following task: /// /// Creates a task and adds it to a queue. /// /// Tasks cannot be updated after creation; there is no UpdateTask command. /// /// * For App Engine queues, the maximum task size is /// 100KB. /// * For pull queues, the maximum task size is 1MB. /// /// # Arguments /// /// * `request` - No description provided. /// * `parent` - Required. The queue name. For example: /// `projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID` /// The queue must already exist. pub fn locations_queues_tasks_create(&self, request: CreateTaskRequest, parent: &str) -> ProjectLocationQueueTaskCreateCall<'a, C> { ProjectLocationQueueTaskCreateCall { hub: self.hub, _request: request, _parent: parent.to_string(), _delegate: Default::default(), _additional_params: Default::default(), _scopes: Default::default(), } } /// Create a builder to help you perform the following task: /// /// Deletes a task. /// /// A task can be deleted if it is scheduled or dispatched. A task /// cannot be deleted if it has completed successfully or permanently /// failed. /// /// # Arguments /// /// * `name` - Required. The task name. For example: /// `projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks/TASK_ID` pub fn locations_queues_tasks_delete(&self, name: &str) -> ProjectLocationQueueTaskDeleteCall<'a, C> { ProjectLocationQueueTaskDeleteCall { hub: self.hub, _name: name.to_string(), _delegate: Default::default(), _additional_params: Default::default(), _scopes: Default::default(), } } /// Create a builder to help you perform the following task: /// /// Gets a task. /// /// # Arguments /// /// * `name` - Required. The task name. For example: /// `projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks/TASK_ID` pub fn locations_queues_tasks_get(&self, name: &str) -> ProjectLocationQueueTaskGetCall<'a, C> { ProjectLocationQueueTaskGetCall { hub: self.hub, _name: name.to_string(), _response_view: Default::default(), _delegate: Default::default(), _additional_params: Default::default(), _scopes: Default::default(), } } /// Create a builder to help you perform the following task: /// /// Leases tasks from a pull queue for /// lease_duration. /// /// This method is invoked by the worker to obtain a lease. The /// worker must acknowledge the task via /// AcknowledgeTask after they have /// performed the work associated with the task. /// /// The payload is intended to store data that /// the worker needs to perform the work associated with the task. To /// return the payloads in the response, set /// response_view to /// FULL. /// /// A maximum of 10 qps of LeaseTasks /// requests are allowed per /// queue. RESOURCE_EXHAUSTED /// is returned when this limit is /// exceeded. RESOURCE_EXHAUSTED /// is also returned when /// max_tasks_dispatched_per_second /// is exceeded. /// /// # Arguments /// /// * `request` - No description provided. /// * `parent` - Required. The queue name. For example: /// `projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID` pub fn locations_queues_tasks_lease(&self, request: LeaseTasksRequest, parent: &str) -> ProjectLocationQueueTaskLeaseCall<'a, C> { ProjectLocationQueueTaskLeaseCall { hub: self.hub, _request: request, _parent: parent.to_string(), _delegate: Default::default(), _additional_params: Default::default(), _scopes: Default::default(), } } /// Create a builder to help you perform the following task: /// /// Lists the tasks in a queue. /// /// By default, only the BASIC view is retrieved /// due to performance considerations; /// response_view controls the /// subset of information which is returned. /// /// The tasks may be returned in any order. The ordering may change at any /// time. /// /// # Arguments /// /// * `parent` - Required. The queue name. For example: /// `projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID` pub fn locations_queues_tasks_list(&self, parent: &str) -> ProjectLocationQueueTaskListCall<'a, C> { ProjectLocationQueueTaskListCall { hub: self.hub, _parent: parent.to_string(), _response_view: Default::default(), _page_token: Default::default(), _page_size: Default::default(), _delegate: Default::default(), _additional_params: Default::default(), _scopes: Default::default(), } } /// Create a builder to help you perform the following task: /// /// Renew the current lease of a pull task. /// /// The worker can use this method to extend the lease by a new /// duration, starting from now. The new task lease will be /// returned in the task's schedule_time. /// /// # Arguments /// /// * `request` - No description provided. /// * `name` - Required. The task name. For example: /// `projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks/TASK_ID` pub fn locations_queues_tasks_renew_lease(&self, request: RenewLeaseRequest, name: &str) -> ProjectLocationQueueTaskRenewLeaseCall<'a, C> { ProjectLocationQueueTaskRenewLeaseCall { hub: self.hub, _request: request, _name: name.to_string(), _delegate: Default::default(), _additional_params: Default::default(), _scopes: Default::default(), } } /// Create a builder to help you perform the following task: /// /// Forces a task to run now. /// /// When this method is called, Cloud Tasks will dispatch the task, even if /// the task is already running, the queue has reached its RateLimits or /// is PAUSED. /// /// This command is meant to be used for manual debugging. For /// example, RunTask can be used to retry a failed /// task after a fix has been made or to manually force a task to be /// dispatched now. /// /// The dispatched task is returned. That is, the task that is returned /// contains the status after the task is dispatched but /// before the task is received by its target. /// /// If Cloud Tasks receives a successful response from the task's /// target, then the task will be deleted; otherwise the task's /// schedule_time will be reset to the time that /// RunTask was called plus the retry delay specified /// in the queue's RetryConfig. /// /// RunTask returns /// NOT_FOUND when it is called on a /// task that has already succeeded or permanently failed. /// /// RunTask cannot be called on a /// pull task. /// /// # Arguments /// /// * `request` - No description provided. /// * `name` - Required. The task name. For example: /// `projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks/TASK_ID` pub fn locations_queues_tasks_run(&self, request: RunTaskRequest, name: &str) -> ProjectLocationQueueTaskRunCall<'a, C> { ProjectLocationQueueTaskRunCall { hub: self.hub, _request: request, _name: name.to_string(), _delegate: Default::default(), _additional_params: Default::default(), _scopes: Default::default(), } } /// Create a builder to help you perform the following task: /// /// Creates a queue. /// /// Queues created with this method allow tasks to live for a maximum of 31 /// days. After a task is 31 days old, the task will be deleted regardless of whether /// it was dispatched or not. /// /// WARNING: Using this method may have unintended side effects if you are /// using an App Engine `queue.yaml` or `queue.xml` file to manage your queues. /// Read /// [Overview of Queue Management and /// queue.yaml](https://cloud.google.com/tasks/docs/queue-yaml) before using /// this method. /// /// # Arguments /// /// * `request` - No description provided. /// * `parent` - Required. The location name in which the queue will be created. /// For example: `projects/PROJECT_ID/locations/LOCATION_ID` /// The list of allowed locations can be obtained by calling Cloud /// Tasks' implementation of /// ListLocations. pub fn locations_queues_create(&self, request: Queue, parent: &str) -> ProjectLocationQueueCreateCall<'a, C> { ProjectLocationQueueCreateCall { hub: self.hub, _request: request, _parent: parent.to_string(), _delegate: Default::default(), _additional_params: Default::default(), _scopes: Default::default(), } } /// Create a builder to help you perform the following task: /// /// Deletes a queue. /// /// This command will delete the queue even if it has tasks in it. /// /// Note: If you delete a queue, a queue with the same name can't be created /// for 7 days. /// /// WARNING: Using this method may have unintended side effects if you are /// using an App Engine `queue.yaml` or `queue.xml` file to manage your queues. /// Read /// [Overview of Queue Management and /// queue.yaml](https://cloud.google.com/tasks/docs/queue-yaml) before using /// this method. /// /// # Arguments /// /// * `name` - Required. The queue name. For example: /// `projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID` pub fn locations_queues_delete(&self, name: &str) -> ProjectLocationQueueDeleteCall<'a, C> { ProjectLocationQueueDeleteCall { hub: self.hub, _name: name.to_string(), _delegate: Default::default(), _additional_params: Default::default(), _scopes: Default::default(), } } /// Create a builder to help you perform the following task: /// /// Gets a queue. /// /// # Arguments /// /// * `name` - Required. The resource name of the queue. For example: /// `projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID` pub fn locations_queues_get(&self, name: &str) -> ProjectLocationQueueGetCall<'a, C> { ProjectLocationQueueGetCall { hub: self.hub, _name: name.to_string(), _read_mask: Default::default(), _delegate: Default::default(), _additional_params: Default::default(), _scopes: Default::default(), } } /// Create a builder to help you perform the following task: /// /// Gets the access control policy for a Queue. /// Returns an empty policy if the resource exists and does not have a policy /// set. /// /// Authorization requires the following /// [Google IAM](https://cloud.google.com/iam) permission on the specified /// resource parent: /// /// * `cloudtasks.queues.getIamPolicy` /// /// # Arguments /// /// * `request` - No description provided. /// * `resource` - REQUIRED: The resource for which the policy is being requested. /// See the operation documentation for the appropriate value for this field. pub fn locations_queues_get_iam_policy(&self, request: GetIamPolicyRequest, resource: &str) -> ProjectLocationQueueGetIamPolicyCall<'a, C> { ProjectLocationQueueGetIamPolicyCall { hub: self.hub, _request: request, _resource: resource.to_string(), _delegate: Default::default(), _additional_params: Default::default(), _scopes: Default::default(), } } /// Create a builder to help you perform the following task: /// /// Lists queues. /// /// Queues are returned in lexicographical order. /// /// # Arguments /// /// * `parent` - Required. The location name. /// For example: `projects/PROJECT_ID/locations/LOCATION_ID` pub fn locations_queues_list(&self, parent: &str) -> ProjectLocationQueueListCall<'a, C> { ProjectLocationQueueListCall { hub: self.hub, _parent: parent.to_string(), _page_token: Default::default(), _page_size: Default::default(), _filter: Default::default(), _delegate: Default::default(), _additional_params: Default::default(), _scopes: Default::default(), } } /// Create a builder to help you perform the following task: /// /// Updates a queue. /// /// This method creates the queue if it does not exist and updates /// the queue if it does exist. /// /// Queues created with this method allow tasks to live for a maximum of 31 /// days. After a task is 31 days old, the task will be deleted regardless of whether /// it was dispatched or not. /// /// WARNING: Using this method may have unintended side effects if you are /// using an App Engine `queue.yaml` or `queue.xml` file to manage your queues. /// Read /// [Overview of Queue Management and /// queue.yaml](https://cloud.google.com/tasks/docs/queue-yaml) before using /// this method. /// /// # Arguments /// /// * `request` - No description provided. /// * `name` - Caller-specified and required in CreateQueue, /// after which it becomes output only. /// The queue name. /// The queue name must have the following format: /// `projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID` /// * `PROJECT_ID` can contain letters ([A-Za-z]), numbers ([0-9]), /// hyphens (-), colons (:), or periods (.). /// For more information, see /// [Identifying /// projects](https://cloud.google.com/resource-manager/docs/creating-managing-projects#identifying_projects) /// * `LOCATION_ID` is the canonical ID for the queue's location. /// The list of available locations can be obtained by calling /// ListLocations. /// For more information, see https://cloud.google.com/about/locations/. /// * `QUEUE_ID` can contain letters ([A-Za-z]), numbers ([0-9]), or /// hyphens (-). The maximum length is 100 characters. pub fn locations_queues_patch(&self, request: Queue, name: &str) -> ProjectLocationQueuePatchCall<'a, C> { ProjectLocationQueuePatchCall { hub: self.hub, _request: request, _name: name.to_string(), _update_mask: Default::default(), _delegate: Default::default(), _additional_params: Default::default(), _scopes: Default::default(), } } /// Create a builder to help you perform the following task: /// /// Pauses the queue. /// /// If a queue is paused then the system will stop dispatching tasks /// until the queue is resumed via /// ResumeQueue. Tasks can still be added /// when the queue is paused. A queue is paused if its /// state is PAUSED. /// /// # Arguments /// /// * `request` - No description provided. /// * `name` - Required. The queue name. For example: /// `projects/PROJECT_ID/location/LOCATION_ID/queues/QUEUE_ID` pub fn locations_queues_pause(&self, request: PauseQueueRequest, name: &str) -> ProjectLocationQueuePauseCall<'a, C> { ProjectLocationQueuePauseCall { hub: self.hub, _request: request, _name: name.to_string(), _delegate: Default::default(), _additional_params: Default::default(), _scopes: Default::default(), } } /// Create a builder to help you perform the following task: /// /// Purges a queue by deleting all of its tasks. /// /// All tasks created before this method is called are permanently deleted. /// /// Purge operations can take up to one minute to take effect. Tasks /// might be dispatched before the purge takes effect. A purge is irreversible. /// /// # Arguments /// /// * `request` - No description provided. /// * `name` - Required. The queue name. For example: /// `projects/PROJECT_ID/location/LOCATION_ID/queues/QUEUE_ID` pub fn locations_queues_purge(&self, request: PurgeQueueRequest, name: &str) -> ProjectLocationQueuePurgeCall<'a, C> { ProjectLocationQueuePurgeCall { hub: self.hub, _request: request, _name: name.to_string(), _delegate: Default::default(), _additional_params: Default::default(), _scopes: Default::default(), } } /// Create a builder to help you perform the following task: /// /// Resume a queue. /// /// This method resumes a queue after it has been /// PAUSED or /// DISABLED. The state of a queue is stored /// in the queue's state; after calling this method it /// will be set to RUNNING. /// /// WARNING: Resuming many high-QPS queues at the same time can /// lead to target overloading. If you are resuming high-QPS /// queues, follow the 500/50/5 pattern described in /// [Managing Cloud Tasks Scaling /// Risks](https://cloud.google.com/tasks/docs/manage-cloud-task-scaling). /// /// # Arguments /// /// * `request` - No description provided. /// * `name` - Required. The queue name. For example: /// `projects/PROJECT_ID/location/LOCATION_ID/queues/QUEUE_ID` pub fn locations_queues_resume(&self, request: ResumeQueueRequest, name: &str) -> ProjectLocationQueueResumeCall<'a, C> { ProjectLocationQueueResumeCall { hub: self.hub, _request: request, _name: name.to_string(), _delegate: Default::default(), _additional_params: Default::default(), _scopes: Default::default(), } } /// Create a builder to help you perform the following task: /// /// Sets the access control policy for a Queue. Replaces any existing /// policy. /// /// Note: The Cloud Console does not check queue-level IAM permissions yet. /// Project-level permissions are required to use the Cloud Console. /// /// Authorization requires the following /// [Google IAM](https://cloud.google.com/iam) permission on the specified /// resource parent: /// /// * `cloudtasks.queues.setIamPolicy` /// /// # Arguments /// /// * `request` - No description provided. /// * `resource` - REQUIRED: The resource for which the policy is being specified. /// See the operation documentation for the appropriate value for this field. pub fn locations_queues_set_iam_policy(&self, request: SetIamPolicyRequest, resource: &str) -> ProjectLocationQueueSetIamPolicyCall<'a, C> { ProjectLocationQueueSetIamPolicyCall { hub: self.hub, _request: request, _resource: resource.to_string(), _delegate: Default::default(), _additional_params: Default::default(), _scopes: Default::default(), } } /// Create a builder to help you perform the following task: /// /// Returns permissions that a caller has on a Queue. /// If the resource does not exist, this will return an empty set of /// permissions, not a NOT_FOUND error. /// /// Note: This operation is designed to be used for building permission-aware /// UIs and command-line tools, not for authorization checking. This operation /// may "fail open" without warning. /// /// # Arguments /// /// * `request` - No description provided. /// * `resource` - REQUIRED: The resource for which the policy detail is being requested. /// See the operation documentation for the appropriate value for this field. pub fn locations_queues_test_iam_permissions(&self, request: TestIamPermissionsRequest, resource: &str) -> ProjectLocationQueueTestIamPermissionCall<'a, C> { ProjectLocationQueueTestIamPermissionCall { hub: self.hub, _request: request, _resource: resource.to_string(), _delegate: Default::default(), _additional_params: Default::default(), _scopes: Default::default(), } } /// Create a builder to help you perform the following task: /// /// Gets information about a location. /// /// # Arguments /// /// * `name` - Resource name for the location. pub fn locations_get(&self, name: &str) -> ProjectLocationGetCall<'a, C> { ProjectLocationGetCall { hub: self.hub, _name: name.to_string(), _delegate: Default::default(), _additional_params: Default::default(), _scopes: Default::default(), } } /// Create a builder to help you perform the following task: /// /// Lists information about the supported locations for this service. /// /// # Arguments /// /// * `name` - The resource that owns the locations collection, if applicable. pub fn locations_list(&self, name: &str) -> ProjectLocationListCall<'a, C> { ProjectLocationListCall { hub: self.hub, _name: name.to_string(), _page_token: Default::default(), _page_size: Default::default(), _filter: Default::default(), _delegate: Default::default(), _additional_params: Default::default(), _scopes: Default::default(), } } } // ################### // CallBuilders ### // ################# /// Acknowledges a pull task. /// /// The worker, that is, the entity that /// leased this task must call this method /// to indicate that the work associated with the task has finished. /// /// The worker must acknowledge a task within the /// lease_duration or the lease /// will expire and the task will become available to be leased /// again. After the task is acknowledged, it will not be returned /// by a later LeaseTasks, /// GetTask, or /// ListTasks. /// /// A builder for the *locations.queues.tasks.acknowledge* method supported by a *project* resource. /// It is not used directly, but through a `ProjectMethods` instance. /// /// # Example /// /// Instantiate a resource method builder /// /// ```test_harness,no_run /// # extern crate hyper; /// # extern crate hyper_rustls; /// # extern crate yup_oauth2 as oauth2; /// # extern crate google_cloudtasks2_beta2 as cloudtasks2_beta2; /// use cloudtasks2_beta2::api::AcknowledgeTaskRequest; /// # #[test] fn egal() { /// # use std::default::Default; /// # use oauth2; /// # use cloudtasks2_beta2::CloudTasks; /// /// # let secret: ApplicationSecret = Default::default(); /// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder( /// # secret, /// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect, /// # ).build().await.unwrap(); /// # let mut hub = CloudTasks::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth); /// // As the method needs a request, you would usually fill it with the desired information /// // into the respective structure. Some of the parts shown here might not be applicable ! /// // Values shown here are possibly random and not representative ! /// let mut req = AcknowledgeTaskRequest::default(); /// /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.projects().locations_queues_tasks_acknowledge(req, "name") /// .doit(); /// # } /// ``` pub struct ProjectLocationQueueTaskAcknowledgeCall<'a, C> where C: 'a { hub: &'a CloudTasks<C>, _request: AcknowledgeTaskRequest, _name: String, _delegate: Option<&'a mut dyn client::Delegate>, _additional_params: HashMap<String, String>, _scopes: BTreeMap<String, ()> } impl<'a, C> client::CallBuilder for ProjectLocationQueueTaskAcknowledgeCall<'a, C> {} impl<'a, C> ProjectLocationQueueTaskAcknowledgeCall<'a, C> where C: BorrowMut<hyper::Client<hyper_rustls::HttpsConnector<hyper::client::connect::HttpConnector>, hyper::body::Body>> { /// Perform the operation you have build so far. pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, Empty)> { use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET}; use std::io::{Read, Seek}; use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION}; use client::ToParts; let mut dd = client::DefaultDelegate; let mut dlg: &mut dyn client::Delegate = match self._delegate { Some(d) => d, None => &mut dd }; dlg.begin(client::MethodInfo { id: "cloudtasks.projects.locations.queues.tasks.acknowledge", http_method: hyper::Method::POST }); let mut params: Vec<(&str, String)> = Vec::with_capacity(4 + self._additional_params.len()); params.push(("name", self._name.to_string())); for &field in ["alt", "name"].iter() { if self._additional_params.contains_key(field) { dlg.finished(false); return Err(client::Error::FieldClash(field)); } } for (name, value) in self._additional_params.iter() { params.push((&name, value.clone())); } params.push(("alt", "json".to_string())); let mut url = self.hub._base_url.clone() + "v2beta2/{+name}:acknowledge"; if self._scopes.len() == 0 { self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ()); } for &(find_this, param_name) in [("{+name}", "name")].iter() { let mut replace_with = String::new(); for &(name, ref value) in params.iter() { if name == param_name { replace_with = value.to_string(); break; } } if find_this.as_bytes()[1] == '+' as u8 { replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string(); } url = url.replace(find_this, &replace_with); } { let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1); for param_name in ["name"].iter() { if let Some(index) = params.iter().position(|t| &t.0 == param_name) { indices_for_removal.push(index); } } for &index in indices_for_removal.iter() { params.remove(index); } } let url = url::Url::parse_with_params(&url, params).unwrap(); let mut json_mime_type: mime::Mime = "application/json".parse().unwrap(); let mut request_value_reader = { let mut value = json::value::to_value(&self._request).expect("serde to work"); client::remove_json_null_values(&mut value); let mut dst = io::Cursor::new(Vec::with_capacity(128)); json::to_writer(&mut dst, &value).unwrap(); dst }; let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap(); request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); loop { let authenticator = self.hub.auth.borrow_mut(); let token = match authenticator.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await { Ok(token) => token.clone(), Err(err) => { match dlg.token(&err) { Some(token) => token, None => { dlg.finished(false); return Err(client::Error::MissingToken(err)) } } } }; request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); let mut req_result = { let mut client = &mut *self.hub.client.borrow_mut(); dlg.pre_request(); let mut req_builder = hyper::Request::builder().method(hyper::Method::POST).uri(url.clone().into_string()) .header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str())); let request = req_builder .header(CONTENT_TYPE, format!("{}", json_mime_type)) .header(CONTENT_LENGTH, request_size as u64) .body(hyper::body::Body::from(request_value_reader.get_ref().clone())); client.borrow_mut().request(request.unwrap()).await }; match req_result { Err(err) => { if let client::Retry::After(d) = dlg.http_error(&err) { sleep(d); continue; } dlg.finished(false); return Err(client::Error::HttpError(err)) } Ok(mut res) => { let (res_parts, res_body) = res.into_parts(); let res_body_string: String = String::from_utf8( hyper::body::to_bytes(res_body) .await .unwrap() .into_iter() .collect(), ) .unwrap(); let reconstructed_result = hyper::Response::from_parts(res_parts, res_body_string.clone().into()); if !reconstructed_result.status().is_success() { let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok(); let server_error = json::from_str::<client::ServerError>(&res_body_string) .or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error)) .ok(); if let client::Retry::After(d) = dlg.http_failure(&reconstructed_result, json_server_error, server_error) { sleep(d); continue; } dlg.finished(false); return match json::from_str::<client::ErrorResponse>(&res_body_string){ Err(_) => Err(client::Error::Failure(reconstructed_result)), Ok(serr) => Err(client::Error::BadRequest(serr)) } } let result_value = { match json::from_str(&res_body_string) { Ok(decoded) => (reconstructed_result, decoded), Err(err) => { dlg.response_json_decode_error(&res_body_string, &err); return Err(client::Error::JsonDecodeError(res_body_string, err)); } } }; dlg.finished(true); return Ok(result_value) } } } } /// /// Sets the *request* property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn request(mut self, new_value: AcknowledgeTaskRequest) -> ProjectLocationQueueTaskAcknowledgeCall<'a, C> { self._request = new_value; self } /// Required. The task name. For example: /// `projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks/TASK_ID` /// /// Sets the *name* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn name(mut self, new_value: &str) -> ProjectLocationQueueTaskAcknowledgeCall<'a, C> { self._name = new_value.to_string(); self } /// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong /// while executing the actual API request. /// /// It should be used to handle progress information, and to implement a certain level of resilience. /// /// Sets the *delegate* property to the given value. pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ProjectLocationQueueTaskAcknowledgeCall<'a, C> { self._delegate = Some(new_value); self } /// Set any additional parameter of the query string used in the request. /// It should be used to set parameters which are not yet available through their own /// setters. /// /// Please note that this method must not be used to set any of the known parameters /// which have their own setter method. If done anyway, the request will fail. /// /// # Additional Parameters /// /// * *$.xgafv* (query-string) - V1 error format. /// * *access_token* (query-string) - OAuth access token. /// * *alt* (query-string) - Data format for response. /// * *callback* (query-string) - JSONP /// * *fields* (query-string) - Selector specifying which fields to include in a partial response. /// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. /// * *oauth_token* (query-string) - OAuth 2.0 token for the current user. /// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks. /// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. /// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart"). /// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart"). pub fn param<T>(mut self, name: T, value: T) -> ProjectLocationQueueTaskAcknowledgeCall<'a, C> where T: AsRef<str> { self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string()); self } /// Identifies the authorization scope for the method you are building. /// /// Use this method to actively specify which scope should be used, instead the default `Scope` variant /// `Scope::CloudPlatform`. /// /// The `scope` will be added to a set of scopes. This is important as one can maintain access /// tokens for more than one scope. /// If `None` is specified, then all scopes will be removed and no default scope will be used either. /// In that case, you have to specify your API-key using the `key` parameter (see the `param()` /// function for details). /// /// Usually there is more than one suitable scope to authorize an operation, some of which may /// encompass more rights than others. For example, for listing resources, a *read-only* scope will be /// sufficient, a read-write scope will do as well. pub fn add_scope<T, S>(mut self, scope: T) -> ProjectLocationQueueTaskAcknowledgeCall<'a, C> where T: Into<Option<S>>, S: AsRef<str> { match scope.into() { Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()), None => None, }; self } } /// Cancel a pull task's lease. /// /// The worker can use this method to cancel a task's lease by /// setting its schedule_time to now. This will /// make the task available to be leased to the next caller of /// LeaseTasks. /// /// A builder for the *locations.queues.tasks.cancelLease* method supported by a *project* resource. /// It is not used directly, but through a `ProjectMethods` instance. /// /// # Example /// /// Instantiate a resource method builder /// /// ```test_harness,no_run /// # extern crate hyper; /// # extern crate hyper_rustls; /// # extern crate yup_oauth2 as oauth2; /// # extern crate google_cloudtasks2_beta2 as cloudtasks2_beta2; /// use cloudtasks2_beta2::api::CancelLeaseRequest; /// # #[test] fn egal() { /// # use std::default::Default; /// # use oauth2; /// # use cloudtasks2_beta2::CloudTasks; /// /// # let secret: ApplicationSecret = Default::default(); /// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder( /// # secret, /// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect, /// # ).build().await.unwrap(); /// # let mut hub = CloudTasks::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth); /// // As the method needs a request, you would usually fill it with the desired information /// // into the respective structure. Some of the parts shown here might not be applicable ! /// // Values shown here are possibly random and not representative ! /// let mut req = CancelLeaseRequest::default(); /// /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.projects().locations_queues_tasks_cancel_lease(req, "name") /// .doit(); /// # } /// ``` pub struct ProjectLocationQueueTaskCancelLeaseCall<'a, C> where C: 'a { hub: &'a CloudTasks<C>, _request: CancelLeaseRequest, _name: String, _delegate: Option<&'a mut dyn client::Delegate>, _additional_params: HashMap<String, String>, _scopes: BTreeMap<String, ()> } impl<'a, C> client::CallBuilder for ProjectLocationQueueTaskCancelLeaseCall<'a, C> {} impl<'a, C> ProjectLocationQueueTaskCancelLeaseCall<'a, C> where C: BorrowMut<hyper::Client<hyper_rustls::HttpsConnector<hyper::client::connect::HttpConnector>, hyper::body::Body>> { /// Perform the operation you have build so far. pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, Task)> { use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET}; use std::io::{Read, Seek}; use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION}; use client::ToParts; let mut dd = client::DefaultDelegate; let mut dlg: &mut dyn client::Delegate = match self._delegate { Some(d) => d, None => &mut dd }; dlg.begin(client::MethodInfo { id: "cloudtasks.projects.locations.queues.tasks.cancelLease", http_method: hyper::Method::POST }); let mut params: Vec<(&str, String)> = Vec::with_capacity(4 + self._additional_params.len()); params.push(("name", self._name.to_string())); for &field in ["alt", "name"].iter() { if self._additional_params.contains_key(field) { dlg.finished(false); return Err(client::Error::FieldClash(field)); } } for (name, value) in self._additional_params.iter() { params.push((&name, value.clone())); } params.push(("alt", "json".to_string())); let mut url = self.hub._base_url.clone() + "v2beta2/{+name}:cancelLease"; if self._scopes.len() == 0 { self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ()); } for &(find_this, param_name) in [("{+name}", "name")].iter() { let mut replace_with = String::new(); for &(name, ref value) in params.iter() { if name == param_name { replace_with = value.to_string(); break; } } if find_this.as_bytes()[1] == '+' as u8 { replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string(); } url = url.replace(find_this, &replace_with); } { let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1); for param_name in ["name"].iter() { if let Some(index) = params.iter().position(|t| &t.0 == param_name) { indices_for_removal.push(index); } } for &index in indices_for_removal.iter() { params.remove(index); } } let url = url::Url::parse_with_params(&url, params).unwrap(); let mut json_mime_type: mime::Mime = "application/json".parse().unwrap(); let mut request_value_reader = { let mut value = json::value::to_value(&self._request).expect("serde to work"); client::remove_json_null_values(&mut value); let mut dst = io::Cursor::new(Vec::with_capacity(128)); json::to_writer(&mut dst, &value).unwrap(); dst }; let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap(); request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); loop { let authenticator = self.hub.auth.borrow_mut(); let token = match authenticator.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await { Ok(token) => token.clone(), Err(err) => { match dlg.token(&err) { Some(token) => token, None => { dlg.finished(false); return Err(client::Error::MissingToken(err)) } } } }; request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); let mut req_result = { let mut client = &mut *self.hub.client.borrow_mut(); dlg.pre_request(); let mut req_builder = hyper::Request::builder().method(hyper::Method::POST).uri(url.clone().into_string()) .header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str())); let request = req_builder .header(CONTENT_TYPE, format!("{}", json_mime_type)) .header(CONTENT_LENGTH, request_size as u64) .body(hyper::body::Body::from(request_value_reader.get_ref().clone())); client.borrow_mut().request(request.unwrap()).await }; match req_result { Err(err) => { if let client::Retry::After(d) = dlg.http_error(&err) { sleep(d); continue; } dlg.finished(false); return Err(client::Error::HttpError(err)) } Ok(mut res) => { let (res_parts, res_body) = res.into_parts(); let res_body_string: String = String::from_utf8( hyper::body::to_bytes(res_body) .await .unwrap() .into_iter() .collect(), ) .unwrap(); let reconstructed_result = hyper::Response::from_parts(res_parts, res_body_string.clone().into()); if !reconstructed_result.status().is_success() { let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok(); let server_error = json::from_str::<client::ServerError>(&res_body_string) .or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error)) .ok(); if let client::Retry::After(d) = dlg.http_failure(&reconstructed_result, json_server_error, server_error) { sleep(d); continue; } dlg.finished(false); return match json::from_str::<client::ErrorResponse>(&res_body_string){ Err(_) => Err(client::Error::Failure(reconstructed_result)), Ok(serr) => Err(client::Error::BadRequest(serr)) } } let result_value = { match json::from_str(&res_body_string) { Ok(decoded) => (reconstructed_result, decoded), Err(err) => { dlg.response_json_decode_error(&res_body_string, &err); return Err(client::Error::JsonDecodeError(res_body_string, err)); } } }; dlg.finished(true); return Ok(result_value) } } } } /// /// Sets the *request* property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn request(mut self, new_value: CancelLeaseRequest) -> ProjectLocationQueueTaskCancelLeaseCall<'a, C> { self._request = new_value; self } /// Required. The task name. For example: /// `projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks/TASK_ID` /// /// Sets the *name* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn name(mut self, new_value: &str) -> ProjectLocationQueueTaskCancelLeaseCall<'a, C> { self._name = new_value.to_string(); self } /// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong /// while executing the actual API request. /// /// It should be used to handle progress information, and to implement a certain level of resilience. /// /// Sets the *delegate* property to the given value. pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ProjectLocationQueueTaskCancelLeaseCall<'a, C> { self._delegate = Some(new_value); self } /// Set any additional parameter of the query string used in the request. /// It should be used to set parameters which are not yet available through their own /// setters. /// /// Please note that this method must not be used to set any of the known parameters /// which have their own setter method. If done anyway, the request will fail. /// /// # Additional Parameters /// /// * *$.xgafv* (query-string) - V1 error format. /// * *access_token* (query-string) - OAuth access token. /// * *alt* (query-string) - Data format for response. /// * *callback* (query-string) - JSONP /// * *fields* (query-string) - Selector specifying which fields to include in a partial response. /// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. /// * *oauth_token* (query-string) - OAuth 2.0 token for the current user. /// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks. /// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. /// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart"). /// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart"). pub fn param<T>(mut self, name: T, value: T) -> ProjectLocationQueueTaskCancelLeaseCall<'a, C> where T: AsRef<str> { self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string()); self } /// Identifies the authorization scope for the method you are building. /// /// Use this method to actively specify which scope should be used, instead the default `Scope` variant /// `Scope::CloudPlatform`. /// /// The `scope` will be added to a set of scopes. This is important as one can maintain access /// tokens for more than one scope. /// If `None` is specified, then all scopes will be removed and no default scope will be used either. /// In that case, you have to specify your API-key using the `key` parameter (see the `param()` /// function for details). /// /// Usually there is more than one suitable scope to authorize an operation, some of which may /// encompass more rights than others. For example, for listing resources, a *read-only* scope will be /// sufficient, a read-write scope will do as well. pub fn add_scope<T, S>(mut self, scope: T) -> ProjectLocationQueueTaskCancelLeaseCall<'a, C> where T: Into<Option<S>>, S: AsRef<str> { match scope.into() { Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()), None => None, }; self } } /// Creates a task and adds it to a queue. /// /// Tasks cannot be updated after creation; there is no UpdateTask command. /// /// * For App Engine queues, the maximum task size is /// 100KB. /// * For pull queues, the maximum task size is 1MB. /// /// A builder for the *locations.queues.tasks.create* method supported by a *project* resource. /// It is not used directly, but through a `ProjectMethods` instance. /// /// # Example /// /// Instantiate a resource method builder /// /// ```test_harness,no_run /// # extern crate hyper; /// # extern crate hyper_rustls; /// # extern crate yup_oauth2 as oauth2; /// # extern crate google_cloudtasks2_beta2 as cloudtasks2_beta2; /// use cloudtasks2_beta2::api::CreateTaskRequest; /// # #[test] fn egal() { /// # use std::default::Default; /// # use oauth2; /// # use cloudtasks2_beta2::CloudTasks; /// /// # let secret: ApplicationSecret = Default::default(); /// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder( /// # secret, /// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect, /// # ).build().await.unwrap(); /// # let mut hub = CloudTasks::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth); /// // As the method needs a request, you would usually fill it with the desired information /// // into the respective structure. Some of the parts shown here might not be applicable ! /// // Values shown here are possibly random and not representative ! /// let mut req = CreateTaskRequest::default(); /// /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.projects().locations_queues_tasks_create(req, "parent") /// .doit(); /// # } /// ``` pub struct ProjectLocationQueueTaskCreateCall<'a, C> where C: 'a { hub: &'a CloudTasks<C>, _request: CreateTaskRequest, _parent: String, _delegate: Option<&'a mut dyn client::Delegate>, _additional_params: HashMap<String, String>, _scopes: BTreeMap<String, ()> } impl<'a, C> client::CallBuilder for ProjectLocationQueueTaskCreateCall<'a, C> {} impl<'a, C> ProjectLocationQueueTaskCreateCall<'a, C> where C: BorrowMut<hyper::Client<hyper_rustls::HttpsConnector<hyper::client::connect::HttpConnector>, hyper::body::Body>> { /// Perform the operation you have build so far. pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, Task)> { use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET}; use std::io::{Read, Seek}; use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION}; use client::ToParts; let mut dd = client::DefaultDelegate; let mut dlg: &mut dyn client::Delegate = match self._delegate { Some(d) => d, None => &mut dd }; dlg.begin(client::MethodInfo { id: "cloudtasks.projects.locations.queues.tasks.create", http_method: hyper::Method::POST }); let mut params: Vec<(&str, String)> = Vec::with_capacity(4 + self._additional_params.len()); params.push(("parent", self._parent.to_string())); for &field in ["alt", "parent"].iter() { if self._additional_params.contains_key(field) { dlg.finished(false); return Err(client::Error::FieldClash(field)); } } for (name, value) in self._additional_params.iter() { params.push((&name, value.clone())); } params.push(("alt", "json".to_string())); let mut url = self.hub._base_url.clone() + "v2beta2/{+parent}/tasks"; if self._scopes.len() == 0 { self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ()); } for &(find_this, param_name) in [("{+parent}", "parent")].iter() { let mut replace_with = String::new(); for &(name, ref value) in params.iter() { if name == param_name { replace_with = value.to_string(); break; } } if find_this.as_bytes()[1] == '+' as u8 { replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string(); } url = url.replace(find_this, &replace_with); } { let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1); for param_name in ["parent"].iter() { if let Some(index) = params.iter().position(|t| &t.0 == param_name) { indices_for_removal.push(index); } } for &index in indices_for_removal.iter() { params.remove(index); } } let url = url::Url::parse_with_params(&url, params).unwrap(); let mut json_mime_type: mime::Mime = "application/json".parse().unwrap(); let mut request_value_reader = { let mut value = json::value::to_value(&self._request).expect("serde to work"); client::remove_json_null_values(&mut value); let mut dst = io::Cursor::new(Vec::with_capacity(128)); json::to_writer(&mut dst, &value).unwrap(); dst }; let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap(); request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); loop { let authenticator = self.hub.auth.borrow_mut(); let token = match authenticator.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await { Ok(token) => token.clone(), Err(err) => { match dlg.token(&err) { Some(token) => token, None => { dlg.finished(false); return Err(client::Error::MissingToken(err)) } } } }; request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); let mut req_result = { let mut client = &mut *self.hub.client.borrow_mut(); dlg.pre_request(); let mut req_builder = hyper::Request::builder().method(hyper::Method::POST).uri(url.clone().into_string()) .header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str())); let request = req_builder .header(CONTENT_TYPE, format!("{}", json_mime_type)) .header(CONTENT_LENGTH, request_size as u64) .body(hyper::body::Body::from(request_value_reader.get_ref().clone())); client.borrow_mut().request(request.unwrap()).await }; match req_result { Err(err) => { if let client::Retry::After(d) = dlg.http_error(&err) { sleep(d); continue; } dlg.finished(false); return Err(client::Error::HttpError(err)) } Ok(mut res) => { let (res_parts, res_body) = res.into_parts(); let res_body_string: String = String::from_utf8( hyper::body::to_bytes(res_body) .await .unwrap() .into_iter() .collect(), ) .unwrap(); let reconstructed_result = hyper::Response::from_parts(res_parts, res_body_string.clone().into()); if !reconstructed_result.status().is_success() { let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok(); let server_error = json::from_str::<client::ServerError>(&res_body_string) .or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error)) .ok(); if let client::Retry::After(d) = dlg.http_failure(&reconstructed_result, json_server_error, server_error) { sleep(d); continue; } dlg.finished(false); return match json::from_str::<client::ErrorResponse>(&res_body_string){ Err(_) => Err(client::Error::Failure(reconstructed_result)), Ok(serr) => Err(client::Error::BadRequest(serr)) } } let result_value = { match json::from_str(&res_body_string) { Ok(decoded) => (reconstructed_result, decoded), Err(err) => { dlg.response_json_decode_error(&res_body_string, &err); return Err(client::Error::JsonDecodeError(res_body_string, err)); } } }; dlg.finished(true); return Ok(result_value) } } } } /// /// Sets the *request* property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn request(mut self, new_value: CreateTaskRequest) -> ProjectLocationQueueTaskCreateCall<'a, C> { self._request = new_value; self } /// Required. The queue name. For example: /// `projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID` /// /// The queue must already exist. /// /// Sets the *parent* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn parent(mut self, new_value: &str) -> ProjectLocationQueueTaskCreateCall<'a, C> { self._parent = new_value.to_string(); self } /// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong /// while executing the actual API request. /// /// It should be used to handle progress information, and to implement a certain level of resilience. /// /// Sets the *delegate* property to the given value. pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ProjectLocationQueueTaskCreateCall<'a, C> { self._delegate = Some(new_value); self } /// Set any additional parameter of the query string used in the request. /// It should be used to set parameters which are not yet available through their own /// setters. /// /// Please note that this method must not be used to set any of the known parameters /// which have their own setter method. If done anyway, the request will fail. /// /// # Additional Parameters /// /// * *$.xgafv* (query-string) - V1 error format. /// * *access_token* (query-string) - OAuth access token. /// * *alt* (query-string) - Data format for response. /// * *callback* (query-string) - JSONP /// * *fields* (query-string) - Selector specifying which fields to include in a partial response. /// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. /// * *oauth_token* (query-string) - OAuth 2.0 token for the current user. /// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks. /// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. /// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart"). /// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart"). pub fn param<T>(mut self, name: T, value: T) -> ProjectLocationQueueTaskCreateCall<'a, C> where T: AsRef<str> { self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string()); self } /// Identifies the authorization scope for the method you are building. /// /// Use this method to actively specify which scope should be used, instead the default `Scope` variant /// `Scope::CloudPlatform`. /// /// The `scope` will be added to a set of scopes. This is important as one can maintain access /// tokens for more than one scope. /// If `None` is specified, then all scopes will be removed and no default scope will be used either. /// In that case, you have to specify your API-key using the `key` parameter (see the `param()` /// function for details). /// /// Usually there is more than one suitable scope to authorize an operation, some of which may /// encompass more rights than others. For example, for listing resources, a *read-only* scope will be /// sufficient, a read-write scope will do as well. pub fn add_scope<T, S>(mut self, scope: T) -> ProjectLocationQueueTaskCreateCall<'a, C> where T: Into<Option<S>>, S: AsRef<str> { match scope.into() { Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()), None => None, }; self } } /// Deletes a task. /// /// A task can be deleted if it is scheduled or dispatched. A task /// cannot be deleted if it has completed successfully or permanently /// failed. /// /// A builder for the *locations.queues.tasks.delete* method supported by a *project* resource. /// It is not used directly, but through a `ProjectMethods` instance. /// /// # Example /// /// Instantiate a resource method builder /// /// ```test_harness,no_run /// # extern crate hyper; /// # extern crate hyper_rustls; /// # extern crate yup_oauth2 as oauth2; /// # extern crate google_cloudtasks2_beta2 as cloudtasks2_beta2; /// # #[test] fn egal() { /// # use std::default::Default; /// # use oauth2; /// # use cloudtasks2_beta2::CloudTasks; /// /// # let secret: ApplicationSecret = Default::default(); /// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder( /// # secret, /// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect, /// # ).build().await.unwrap(); /// # let mut hub = CloudTasks::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth); /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.projects().locations_queues_tasks_delete("name") /// .doit(); /// # } /// ``` pub struct ProjectLocationQueueTaskDeleteCall<'a, C> where C: 'a { hub: &'a CloudTasks<C>, _name: String, _delegate: Option<&'a mut dyn client::Delegate>, _additional_params: HashMap<String, String>, _scopes: BTreeMap<String, ()> } impl<'a, C> client::CallBuilder for ProjectLocationQueueTaskDeleteCall<'a, C> {} impl<'a, C> ProjectLocationQueueTaskDeleteCall<'a, C> where C: BorrowMut<hyper::Client<hyper_rustls::HttpsConnector<hyper::client::connect::HttpConnector>, hyper::body::Body>> { /// Perform the operation you have build so far. pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, Empty)> { use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET}; use std::io::{Read, Seek}; use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION}; use client::ToParts; let mut dd = client::DefaultDelegate; let mut dlg: &mut dyn client::Delegate = match self._delegate { Some(d) => d, None => &mut dd }; dlg.begin(client::MethodInfo { id: "cloudtasks.projects.locations.queues.tasks.delete", http_method: hyper::Method::DELETE }); let mut params: Vec<(&str, String)> = Vec::with_capacity(3 + self._additional_params.len()); params.push(("name", self._name.to_string())); for &field in ["alt", "name"].iter() { if self._additional_params.contains_key(field) { dlg.finished(false); return Err(client::Error::FieldClash(field)); } } for (name, value) in self._additional_params.iter() { params.push((&name, value.clone())); } params.push(("alt", "json".to_string())); let mut url = self.hub._base_url.clone() + "v2beta2/{+name}"; if self._scopes.len() == 0 { self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ()); } for &(find_this, param_name) in [("{+name}", "name")].iter() { let mut replace_with = String::new(); for &(name, ref value) in params.iter() { if name == param_name { replace_with = value.to_string(); break; } } if find_this.as_bytes()[1] == '+' as u8 { replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string(); } url = url.replace(find_this, &replace_with); } { let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1); for param_name in ["name"].iter() { if let Some(index) = params.iter().position(|t| &t.0 == param_name) { indices_for_removal.push(index); } } for &index in indices_for_removal.iter() { params.remove(index); } } let url = url::Url::parse_with_params(&url, params).unwrap(); loop { let authenticator = self.hub.auth.borrow_mut(); let token = match authenticator.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await { Ok(token) => token.clone(), Err(err) => { match dlg.token(&err) { Some(token) => token, None => { dlg.finished(false); return Err(client::Error::MissingToken(err)) } } } }; let mut req_result = { let mut client = &mut *self.hub.client.borrow_mut(); dlg.pre_request(); let mut req_builder = hyper::Request::builder().method(hyper::Method::DELETE).uri(url.clone().into_string()) .header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str())); let request = req_builder .body(hyper::body::Body::empty()); client.borrow_mut().request(request.unwrap()).await }; match req_result { Err(err) => { if let client::Retry::After(d) = dlg.http_error(&err) { sleep(d); continue; } dlg.finished(false); return Err(client::Error::HttpError(err)) } Ok(mut res) => { let (res_parts, res_body) = res.into_parts(); let res_body_string: String = String::from_utf8( hyper::body::to_bytes(res_body) .await .unwrap() .into_iter() .collect(), ) .unwrap(); let reconstructed_result = hyper::Response::from_parts(res_parts, res_body_string.clone().into()); if !reconstructed_result.status().is_success() { let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok(); let server_error = json::from_str::<client::ServerError>(&res_body_string) .or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error)) .ok(); if let client::Retry::After(d) = dlg.http_failure(&reconstructed_result, json_server_error, server_error) { sleep(d); continue; } dlg.finished(false); return match json::from_str::<client::ErrorResponse>(&res_body_string){ Err(_) => Err(client::Error::Failure(reconstructed_result)), Ok(serr) => Err(client::Error::BadRequest(serr)) } } let result_value = { match json::from_str(&res_body_string) { Ok(decoded) => (reconstructed_result, decoded), Err(err) => { dlg.response_json_decode_error(&res_body_string, &err); return Err(client::Error::JsonDecodeError(res_body_string, err)); } } }; dlg.finished(true); return Ok(result_value) } } } } /// Required. The task name. For example: /// `projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks/TASK_ID` /// /// Sets the *name* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn name(mut self, new_value: &str) -> ProjectLocationQueueTaskDeleteCall<'a, C> { self._name = new_value.to_string(); self } /// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong /// while executing the actual API request. /// /// It should be used to handle progress information, and to implement a certain level of resilience. /// /// Sets the *delegate* property to the given value. pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ProjectLocationQueueTaskDeleteCall<'a, C> { self._delegate = Some(new_value); self } /// Set any additional parameter of the query string used in the request. /// It should be used to set parameters which are not yet available through their own /// setters. /// /// Please note that this method must not be used to set any of the known parameters /// which have their own setter method. If done anyway, the request will fail. /// /// # Additional Parameters /// /// * *$.xgafv* (query-string) - V1 error format. /// * *access_token* (query-string) - OAuth access token. /// * *alt* (query-string) - Data format for response. /// * *callback* (query-string) - JSONP /// * *fields* (query-string) - Selector specifying which fields to include in a partial response. /// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. /// * *oauth_token* (query-string) - OAuth 2.0 token for the current user. /// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks. /// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. /// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart"). /// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart"). pub fn param<T>(mut self, name: T, value: T) -> ProjectLocationQueueTaskDeleteCall<'a, C> where T: AsRef<str> { self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string()); self } /// Identifies the authorization scope for the method you are building. /// /// Use this method to actively specify which scope should be used, instead the default `Scope` variant /// `Scope::CloudPlatform`. /// /// The `scope` will be added to a set of scopes. This is important as one can maintain access /// tokens for more than one scope. /// If `None` is specified, then all scopes will be removed and no default scope will be used either. /// In that case, you have to specify your API-key using the `key` parameter (see the `param()` /// function for details). /// /// Usually there is more than one suitable scope to authorize an operation, some of which may /// encompass more rights than others. For example, for listing resources, a *read-only* scope will be /// sufficient, a read-write scope will do as well. pub fn add_scope<T, S>(mut self, scope: T) -> ProjectLocationQueueTaskDeleteCall<'a, C> where T: Into<Option<S>>, S: AsRef<str> { match scope.into() { Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()), None => None, }; self } } /// Gets a task. /// /// A builder for the *locations.queues.tasks.get* method supported by a *project* resource. /// It is not used directly, but through a `ProjectMethods` instance. /// /// # Example /// /// Instantiate a resource method builder /// /// ```test_harness,no_run /// # extern crate hyper; /// # extern crate hyper_rustls; /// # extern crate yup_oauth2 as oauth2; /// # extern crate google_cloudtasks2_beta2 as cloudtasks2_beta2; /// # #[test] fn egal() { /// # use std::default::Default; /// # use oauth2; /// # use cloudtasks2_beta2::CloudTasks; /// /// # let secret: ApplicationSecret = Default::default(); /// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder( /// # secret, /// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect, /// # ).build().await.unwrap(); /// # let mut hub = CloudTasks::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth); /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.projects().locations_queues_tasks_get("name") /// .response_view("duo") /// .doit(); /// # } /// ``` pub struct ProjectLocationQueueTaskGetCall<'a, C> where C: 'a { hub: &'a CloudTasks<C>, _name: String, _response_view: Option<String>, _delegate: Option<&'a mut dyn client::Delegate>, _additional_params: HashMap<String, String>, _scopes: BTreeMap<String, ()> } impl<'a, C> client::CallBuilder for ProjectLocationQueueTaskGetCall<'a, C> {} impl<'a, C> ProjectLocationQueueTaskGetCall<'a, C> where C: BorrowMut<hyper::Client<hyper_rustls::HttpsConnector<hyper::client::connect::HttpConnector>, hyper::body::Body>> { /// Perform the operation you have build so far. pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, Task)> { use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET}; use std::io::{Read, Seek}; use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION}; use client::ToParts; let mut dd = client::DefaultDelegate; let mut dlg: &mut dyn client::Delegate = match self._delegate { Some(d) => d, None => &mut dd }; dlg.begin(client::MethodInfo { id: "cloudtasks.projects.locations.queues.tasks.get", http_method: hyper::Method::GET }); let mut params: Vec<(&str, String)> = Vec::with_capacity(4 + self._additional_params.len()); params.push(("name", self._name.to_string())); if let Some(value) = self._response_view { params.push(("responseView", value.to_string())); } for &field in ["alt", "name", "responseView"].iter() { if self._additional_params.contains_key(field) { dlg.finished(false); return Err(client::Error::FieldClash(field)); } } for (name, value) in self._additional_params.iter() { params.push((&name, value.clone())); } params.push(("alt", "json".to_string())); let mut url = self.hub._base_url.clone() + "v2beta2/{+name}"; if self._scopes.len() == 0 { self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ()); } for &(find_this, param_name) in [("{+name}", "name")].iter() { let mut replace_with = String::new(); for &(name, ref value) in params.iter() { if name == param_name { replace_with = value.to_string(); break; } } if find_this.as_bytes()[1] == '+' as u8 { replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string(); } url = url.replace(find_this, &replace_with); } { let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1); for param_name in ["name"].iter() { if let Some(index) = params.iter().position(|t| &t.0 == param_name) { indices_for_removal.push(index); } } for &index in indices_for_removal.iter() { params.remove(index); } } let url = url::Url::parse_with_params(&url, params).unwrap(); loop { let authenticator = self.hub.auth.borrow_mut(); let token = match authenticator.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await { Ok(token) => token.clone(), Err(err) => { match dlg.token(&err) { Some(token) => token, None => { dlg.finished(false); return Err(client::Error::MissingToken(err)) } } } }; let mut req_result = { let mut client = &mut *self.hub.client.borrow_mut(); dlg.pre_request(); let mut req_builder = hyper::Request::builder().method(hyper::Method::GET).uri(url.clone().into_string()) .header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str())); let request = req_builder .body(hyper::body::Body::empty()); client.borrow_mut().request(request.unwrap()).await }; match req_result { Err(err) => { if let client::Retry::After(d) = dlg.http_error(&err) { sleep(d); continue; } dlg.finished(false); return Err(client::Error::HttpError(err)) } Ok(mut res) => { let (res_parts, res_body) = res.into_parts(); let res_body_string: String = String::from_utf8( hyper::body::to_bytes(res_body) .await .unwrap() .into_iter() .collect(), ) .unwrap(); let reconstructed_result = hyper::Response::from_parts(res_parts, res_body_string.clone().into()); if !reconstructed_result.status().is_success() { let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok(); let server_error = json::from_str::<client::ServerError>(&res_body_string) .or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error)) .ok(); if let client::Retry::After(d) = dlg.http_failure(&reconstructed_result, json_server_error, server_error) { sleep(d); continue; } dlg.finished(false); return match json::from_str::<client::ErrorResponse>(&res_body_string){ Err(_) => Err(client::Error::Failure(reconstructed_result)), Ok(serr) => Err(client::Error::BadRequest(serr)) } } let result_value = { match json::from_str(&res_body_string) { Ok(decoded) => (reconstructed_result, decoded), Err(err) => { dlg.response_json_decode_error(&res_body_string, &err); return Err(client::Error::JsonDecodeError(res_body_string, err)); } } }; dlg.finished(true); return Ok(result_value) } } } } /// Required. The task name. For example: /// `projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks/TASK_ID` /// /// Sets the *name* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn name(mut self, new_value: &str) -> ProjectLocationQueueTaskGetCall<'a, C> { self._name = new_value.to_string(); self } /// The response_view specifies which subset of the Task will be /// returned. /// /// By default response_view is BASIC; not all /// information is retrieved by default because some data, such as /// payloads, might be desirable to return only when needed because /// of its large size or because of the sensitivity of data that it /// contains. /// /// Authorization for FULL requires /// `cloudtasks.tasks.fullView` [Google IAM](https://cloud.google.com/iam/) /// permission on the Task resource. /// /// Sets the *response view* query property to the given value. pub fn response_view(mut self, new_value: &str) -> ProjectLocationQueueTaskGetCall<'a, C> { self._response_view = Some(new_value.to_string()); self } /// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong /// while executing the actual API request. /// /// It should be used to handle progress information, and to implement a certain level of resilience. /// /// Sets the *delegate* property to the given value. pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ProjectLocationQueueTaskGetCall<'a, C> { self._delegate = Some(new_value); self } /// Set any additional parameter of the query string used in the request. /// It should be used to set parameters which are not yet available through their own /// setters. /// /// Please note that this method must not be used to set any of the known parameters /// which have their own setter method. If done anyway, the request will fail. /// /// # Additional Parameters /// /// * *$.xgafv* (query-string) - V1 error format. /// * *access_token* (query-string) - OAuth access token. /// * *alt* (query-string) - Data format for response. /// * *callback* (query-string) - JSONP /// * *fields* (query-string) - Selector specifying which fields to include in a partial response. /// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. /// * *oauth_token* (query-string) - OAuth 2.0 token for the current user. /// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks. /// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. /// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart"). /// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart"). pub fn param<T>(mut self, name: T, value: T) -> ProjectLocationQueueTaskGetCall<'a, C> where T: AsRef<str> { self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string()); self } /// Identifies the authorization scope for the method you are building. /// /// Use this method to actively specify which scope should be used, instead the default `Scope` variant /// `Scope::CloudPlatform`. /// /// The `scope` will be added to a set of scopes. This is important as one can maintain access /// tokens for more than one scope. /// If `None` is specified, then all scopes will be removed and no default scope will be used either. /// In that case, you have to specify your API-key using the `key` parameter (see the `param()` /// function for details). /// /// Usually there is more than one suitable scope to authorize an operation, some of which may /// encompass more rights than others. For example, for listing resources, a *read-only* scope will be /// sufficient, a read-write scope will do as well. pub fn add_scope<T, S>(mut self, scope: T) -> ProjectLocationQueueTaskGetCall<'a, C> where T: Into<Option<S>>, S: AsRef<str> { match scope.into() { Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()), None => None, }; self } } /// Leases tasks from a pull queue for /// lease_duration. /// /// This method is invoked by the worker to obtain a lease. The /// worker must acknowledge the task via /// AcknowledgeTask after they have /// performed the work associated with the task. /// /// The payload is intended to store data that /// the worker needs to perform the work associated with the task. To /// return the payloads in the response, set /// response_view to /// FULL. /// /// A maximum of 10 qps of LeaseTasks /// requests are allowed per /// queue. RESOURCE_EXHAUSTED /// is returned when this limit is /// exceeded. RESOURCE_EXHAUSTED /// is also returned when /// max_tasks_dispatched_per_second /// is exceeded. /// /// A builder for the *locations.queues.tasks.lease* method supported by a *project* resource. /// It is not used directly, but through a `ProjectMethods` instance. /// /// # Example /// /// Instantiate a resource method builder /// /// ```test_harness,no_run /// # extern crate hyper; /// # extern crate hyper_rustls; /// # extern crate yup_oauth2 as oauth2; /// # extern crate google_cloudtasks2_beta2 as cloudtasks2_beta2; /// use cloudtasks2_beta2::api::LeaseTasksRequest; /// # #[test] fn egal() { /// # use std::default::Default; /// # use oauth2; /// # use cloudtasks2_beta2::CloudTasks; /// /// # let secret: ApplicationSecret = Default::default(); /// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder( /// # secret, /// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect, /// # ).build().await.unwrap(); /// # let mut hub = CloudTasks::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth); /// // As the method needs a request, you would usually fill it with the desired information /// // into the respective structure. Some of the parts shown here might not be applicable ! /// // Values shown here are possibly random and not representative ! /// let mut req = LeaseTasksRequest::default(); /// /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.projects().locations_queues_tasks_lease(req, "parent") /// .doit(); /// # } /// ``` pub struct ProjectLocationQueueTaskLeaseCall<'a, C> where C: 'a { hub: &'a CloudTasks<C>, _request: LeaseTasksRequest, _parent: String, _delegate: Option<&'a mut dyn client::Delegate>, _additional_params: HashMap<String, String>, _scopes: BTreeMap<String, ()> } impl<'a, C> client::CallBuilder for ProjectLocationQueueTaskLeaseCall<'a, C> {} impl<'a, C> ProjectLocationQueueTaskLeaseCall<'a, C> where C: BorrowMut<hyper::Client<hyper_rustls::HttpsConnector<hyper::client::connect::HttpConnector>, hyper::body::Body>> { /// Perform the operation you have build so far. pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, LeaseTasksResponse)> { use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET}; use std::io::{Read, Seek}; use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION}; use client::ToParts; let mut dd = client::DefaultDelegate; let mut dlg: &mut dyn client::Delegate = match self._delegate { Some(d) => d, None => &mut dd }; dlg.begin(client::MethodInfo { id: "cloudtasks.projects.locations.queues.tasks.lease", http_method: hyper::Method::POST }); let mut params: Vec<(&str, String)> = Vec::with_capacity(4 + self._additional_params.len()); params.push(("parent", self._parent.to_string())); for &field in ["alt", "parent"].iter() { if self._additional_params.contains_key(field) { dlg.finished(false); return Err(client::Error::FieldClash(field)); } } for (name, value) in self._additional_params.iter() { params.push((&name, value.clone())); } params.push(("alt", "json".to_string())); let mut url = self.hub._base_url.clone() + "v2beta2/{+parent}/tasks:lease"; if self._scopes.len() == 0 { self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ()); } for &(find_this, param_name) in [("{+parent}", "parent")].iter() { let mut replace_with = String::new(); for &(name, ref value) in params.iter() { if name == param_name { replace_with = value.to_string(); break; } } if find_this.as_bytes()[1] == '+' as u8 { replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string(); } url = url.replace(find_this, &replace_with); } { let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1); for param_name in ["parent"].iter() { if let Some(index) = params.iter().position(|t| &t.0 == param_name) { indices_for_removal.push(index); } } for &index in indices_for_removal.iter() { params.remove(index); } } let url = url::Url::parse_with_params(&url, params).unwrap(); let mut json_mime_type: mime::Mime = "application/json".parse().unwrap(); let mut request_value_reader = { let mut value = json::value::to_value(&self._request).expect("serde to work"); client::remove_json_null_values(&mut value); let mut dst = io::Cursor::new(Vec::with_capacity(128)); json::to_writer(&mut dst, &value).unwrap(); dst }; let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap(); request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); loop { let authenticator = self.hub.auth.borrow_mut(); let token = match authenticator.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await { Ok(token) => token.clone(), Err(err) => { match dlg.token(&err) { Some(token) => token, None => { dlg.finished(false); return Err(client::Error::MissingToken(err)) } } } }; request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); let mut req_result = { let mut client = &mut *self.hub.client.borrow_mut(); dlg.pre_request(); let mut req_builder = hyper::Request::builder().method(hyper::Method::POST).uri(url.clone().into_string()) .header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str())); let request = req_builder .header(CONTENT_TYPE, format!("{}", json_mime_type)) .header(CONTENT_LENGTH, request_size as u64) .body(hyper::body::Body::from(request_value_reader.get_ref().clone())); client.borrow_mut().request(request.unwrap()).await }; match req_result { Err(err) => { if let client::Retry::After(d) = dlg.http_error(&err) { sleep(d); continue; } dlg.finished(false); return Err(client::Error::HttpError(err)) } Ok(mut res) => { let (res_parts, res_body) = res.into_parts(); let res_body_string: String = String::from_utf8( hyper::body::to_bytes(res_body) .await .unwrap() .into_iter() .collect(), ) .unwrap(); let reconstructed_result = hyper::Response::from_parts(res_parts, res_body_string.clone().into()); if !reconstructed_result.status().is_success() { let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok(); let server_error = json::from_str::<client::ServerError>(&res_body_string) .or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error)) .ok(); if let client::Retry::After(d) = dlg.http_failure(&reconstructed_result, json_server_error, server_error) { sleep(d); continue; } dlg.finished(false); return match json::from_str::<client::ErrorResponse>(&res_body_string){ Err(_) => Err(client::Error::Failure(reconstructed_result)), Ok(serr) => Err(client::Error::BadRequest(serr)) } } let result_value = { match json::from_str(&res_body_string) { Ok(decoded) => (reconstructed_result, decoded), Err(err) => { dlg.response_json_decode_error(&res_body_string, &err); return Err(client::Error::JsonDecodeError(res_body_string, err)); } } }; dlg.finished(true); return Ok(result_value) } } } } /// /// Sets the *request* property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn request(mut self, new_value: LeaseTasksRequest) -> ProjectLocationQueueTaskLeaseCall<'a, C> { self._request = new_value; self } /// Required. The queue name. For example: /// `projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID` /// /// Sets the *parent* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn parent(mut self, new_value: &str) -> ProjectLocationQueueTaskLeaseCall<'a, C> { self._parent = new_value.to_string(); self } /// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong /// while executing the actual API request. /// /// It should be used to handle progress information, and to implement a certain level of resilience. /// /// Sets the *delegate* property to the given value. pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ProjectLocationQueueTaskLeaseCall<'a, C> { self._delegate = Some(new_value); self } /// Set any additional parameter of the query string used in the request. /// It should be used to set parameters which are not yet available through their own /// setters. /// /// Please note that this method must not be used to set any of the known parameters /// which have their own setter method. If done anyway, the request will fail. /// /// # Additional Parameters /// /// * *$.xgafv* (query-string) - V1 error format. /// * *access_token* (query-string) - OAuth access token. /// * *alt* (query-string) - Data format for response. /// * *callback* (query-string) - JSONP /// * *fields* (query-string) - Selector specifying which fields to include in a partial response. /// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. /// * *oauth_token* (query-string) - OAuth 2.0 token for the current user. /// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks. /// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. /// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart"). /// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart"). pub fn param<T>(mut self, name: T, value: T) -> ProjectLocationQueueTaskLeaseCall<'a, C> where T: AsRef<str> { self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string()); self } /// Identifies the authorization scope for the method you are building. /// /// Use this method to actively specify which scope should be used, instead the default `Scope` variant /// `Scope::CloudPlatform`. /// /// The `scope` will be added to a set of scopes. This is important as one can maintain access /// tokens for more than one scope. /// If `None` is specified, then all scopes will be removed and no default scope will be used either. /// In that case, you have to specify your API-key using the `key` parameter (see the `param()` /// function for details). /// /// Usually there is more than one suitable scope to authorize an operation, some of which may /// encompass more rights than others. For example, for listing resources, a *read-only* scope will be /// sufficient, a read-write scope will do as well. pub fn add_scope<T, S>(mut self, scope: T) -> ProjectLocationQueueTaskLeaseCall<'a, C> where T: Into<Option<S>>, S: AsRef<str> { match scope.into() { Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()), None => None, }; self } } /// Lists the tasks in a queue. /// /// By default, only the BASIC view is retrieved /// due to performance considerations; /// response_view controls the /// subset of information which is returned. /// /// The tasks may be returned in any order. The ordering may change at any /// time. /// /// A builder for the *locations.queues.tasks.list* method supported by a *project* resource. /// It is not used directly, but through a `ProjectMethods` instance. /// /// # Example /// /// Instantiate a resource method builder /// /// ```test_harness,no_run /// # extern crate hyper; /// # extern crate hyper_rustls; /// # extern crate yup_oauth2 as oauth2; /// # extern crate google_cloudtasks2_beta2 as cloudtasks2_beta2; /// # #[test] fn egal() { /// # use std::default::Default; /// # use oauth2; /// # use cloudtasks2_beta2::CloudTasks; /// /// # let secret: ApplicationSecret = Default::default(); /// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder( /// # secret, /// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect, /// # ).build().await.unwrap(); /// # let mut hub = CloudTasks::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth); /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.projects().locations_queues_tasks_list("parent") /// .response_view("Lorem") /// .page_token("gubergren") /// .page_size(-75) /// .doit(); /// # } /// ``` pub struct ProjectLocationQueueTaskListCall<'a, C> where C: 'a { hub: &'a CloudTasks<C>, _parent: String, _response_view: Option<String>, _page_token: Option<String>, _page_size: Option<i32>, _delegate: Option<&'a mut dyn client::Delegate>, _additional_params: HashMap<String, String>, _scopes: BTreeMap<String, ()> } impl<'a, C> client::CallBuilder for ProjectLocationQueueTaskListCall<'a, C> {} impl<'a, C> ProjectLocationQueueTaskListCall<'a, C> where C: BorrowMut<hyper::Client<hyper_rustls::HttpsConnector<hyper::client::connect::HttpConnector>, hyper::body::Body>> { /// Perform the operation you have build so far. pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, ListTasksResponse)> { use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET}; use std::io::{Read, Seek}; use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION}; use client::ToParts; let mut dd = client::DefaultDelegate; let mut dlg: &mut dyn client::Delegate = match self._delegate { Some(d) => d, None => &mut dd }; dlg.begin(client::MethodInfo { id: "cloudtasks.projects.locations.queues.tasks.list", http_method: hyper::Method::GET }); let mut params: Vec<(&str, String)> = Vec::with_capacity(6 + self._additional_params.len()); params.push(("parent", self._parent.to_string())); if let Some(value) = self._response_view { params.push(("responseView", value.to_string())); } if let Some(value) = self._page_token { params.push(("pageToken", value.to_string())); } if let Some(value) = self._page_size { params.push(("pageSize", value.to_string())); } for &field in ["alt", "parent", "responseView", "pageToken", "pageSize"].iter() { if self._additional_params.contains_key(field) { dlg.finished(false); return Err(client::Error::FieldClash(field)); } } for (name, value) in self._additional_params.iter() { params.push((&name, value.clone())); } params.push(("alt", "json".to_string())); let mut url = self.hub._base_url.clone() + "v2beta2/{+parent}/tasks"; if self._scopes.len() == 0 { self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ()); } for &(find_this, param_name) in [("{+parent}", "parent")].iter() { let mut replace_with = String::new(); for &(name, ref value) in params.iter() { if name == param_name { replace_with = value.to_string(); break; } } if find_this.as_bytes()[1] == '+' as u8 { replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string(); } url = url.replace(find_this, &replace_with); } { let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1); for param_name in ["parent"].iter() { if let Some(index) = params.iter().position(|t| &t.0 == param_name) { indices_for_removal.push(index); } } for &index in indices_for_removal.iter() { params.remove(index); } } let url = url::Url::parse_with_params(&url, params).unwrap(); loop { let authenticator = self.hub.auth.borrow_mut(); let token = match authenticator.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await { Ok(token) => token.clone(), Err(err) => { match dlg.token(&err) { Some(token) => token, None => { dlg.finished(false); return Err(client::Error::MissingToken(err)) } } } }; let mut req_result = { let mut client = &mut *self.hub.client.borrow_mut(); dlg.pre_request(); let mut req_builder = hyper::Request::builder().method(hyper::Method::GET).uri(url.clone().into_string()) .header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str())); let request = req_builder .body(hyper::body::Body::empty()); client.borrow_mut().request(request.unwrap()).await }; match req_result { Err(err) => { if let client::Retry::After(d) = dlg.http_error(&err) { sleep(d); continue; } dlg.finished(false); return Err(client::Error::HttpError(err)) } Ok(mut res) => { let (res_parts, res_body) = res.into_parts(); let res_body_string: String = String::from_utf8( hyper::body::to_bytes(res_body) .await .unwrap() .into_iter() .collect(), ) .unwrap(); let reconstructed_result = hyper::Response::from_parts(res_parts, res_body_string.clone().into()); if !reconstructed_result.status().is_success() { let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok(); let server_error = json::from_str::<client::ServerError>(&res_body_string) .or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error)) .ok(); if let client::Retry::After(d) = dlg.http_failure(&reconstructed_result, json_server_error, server_error) { sleep(d); continue; } dlg.finished(false); return match json::from_str::<client::ErrorResponse>(&res_body_string){ Err(_) => Err(client::Error::Failure(reconstructed_result)), Ok(serr) => Err(client::Error::BadRequest(serr)) } } let result_value = { match json::from_str(&res_body_string) { Ok(decoded) => (reconstructed_result, decoded), Err(err) => { dlg.response_json_decode_error(&res_body_string, &err); return Err(client::Error::JsonDecodeError(res_body_string, err)); } } }; dlg.finished(true); return Ok(result_value) } } } } /// Required. The queue name. For example: /// `projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID` /// /// Sets the *parent* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn parent(mut self, new_value: &str) -> ProjectLocationQueueTaskListCall<'a, C> { self._parent = new_value.to_string(); self } /// The response_view specifies which subset of the Task will be /// returned. /// /// By default response_view is BASIC; not all /// information is retrieved by default because some data, such as /// payloads, might be desirable to return only when needed because /// of its large size or because of the sensitivity of data that it /// contains. /// /// Authorization for FULL requires /// `cloudtasks.tasks.fullView` [Google IAM](https://cloud.google.com/iam/) /// permission on the Task resource. /// /// Sets the *response view* query property to the given value. pub fn response_view(mut self, new_value: &str) -> ProjectLocationQueueTaskListCall<'a, C> { self._response_view = Some(new_value.to_string()); self } /// A token identifying the page of results to return. /// /// To request the first page results, page_token must be empty. To /// request the next page of results, page_token must be the value of /// next_page_token returned /// from the previous call to ListTasks /// method. /// /// The page token is valid for only 2 hours. /// /// Sets the *page token* query property to the given value. pub fn page_token(mut self, new_value: &str) -> ProjectLocationQueueTaskListCall<'a, C> { self._page_token = Some(new_value.to_string()); self } /// Maximum page size. /// /// Fewer tasks than requested might be returned, even if more tasks exist; use /// next_page_token in the response to /// determine if more tasks exist. /// /// The maximum page size is 1000. If unspecified, the page size will be the /// maximum. /// /// Sets the *page size* query property to the given value. pub fn page_size(mut self, new_value: i32) -> ProjectLocationQueueTaskListCall<'a, C> { self._page_size = Some(new_value); self } /// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong /// while executing the actual API request. /// /// It should be used to handle progress information, and to implement a certain level of resilience. /// /// Sets the *delegate* property to the given value. pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ProjectLocationQueueTaskListCall<'a, C> { self._delegate = Some(new_value); self } /// Set any additional parameter of the query string used in the request. /// It should be used to set parameters which are not yet available through their own /// setters. /// /// Please note that this method must not be used to set any of the known parameters /// which have their own setter method. If done anyway, the request will fail. /// /// # Additional Parameters /// /// * *$.xgafv* (query-string) - V1 error format. /// * *access_token* (query-string) - OAuth access token. /// * *alt* (query-string) - Data format for response. /// * *callback* (query-string) - JSONP /// * *fields* (query-string) - Selector specifying which fields to include in a partial response. /// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. /// * *oauth_token* (query-string) - OAuth 2.0 token for the current user. /// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks. /// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. /// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart"). /// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart"). pub fn param<T>(mut self, name: T, value: T) -> ProjectLocationQueueTaskListCall<'a, C> where T: AsRef<str> { self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string()); self } /// Identifies the authorization scope for the method you are building. /// /// Use this method to actively specify which scope should be used, instead the default `Scope` variant /// `Scope::CloudPlatform`. /// /// The `scope` will be added to a set of scopes. This is important as one can maintain access /// tokens for more than one scope. /// If `None` is specified, then all scopes will be removed and no default scope will be used either. /// In that case, you have to specify your API-key using the `key` parameter (see the `param()` /// function for details). /// /// Usually there is more than one suitable scope to authorize an operation, some of which may /// encompass more rights than others. For example, for listing resources, a *read-only* scope will be /// sufficient, a read-write scope will do as well. pub fn add_scope<T, S>(mut self, scope: T) -> ProjectLocationQueueTaskListCall<'a, C> where T: Into<Option<S>>, S: AsRef<str> { match scope.into() { Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()), None => None, }; self } } /// Renew the current lease of a pull task. /// /// The worker can use this method to extend the lease by a new /// duration, starting from now. The new task lease will be /// returned in the task's schedule_time. /// /// A builder for the *locations.queues.tasks.renewLease* method supported by a *project* resource. /// It is not used directly, but through a `ProjectMethods` instance. /// /// # Example /// /// Instantiate a resource method builder /// /// ```test_harness,no_run /// # extern crate hyper; /// # extern crate hyper_rustls; /// # extern crate yup_oauth2 as oauth2; /// # extern crate google_cloudtasks2_beta2 as cloudtasks2_beta2; /// use cloudtasks2_beta2::api::RenewLeaseRequest; /// # #[test] fn egal() { /// # use std::default::Default; /// # use oauth2; /// # use cloudtasks2_beta2::CloudTasks; /// /// # let secret: ApplicationSecret = Default::default(); /// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder( /// # secret, /// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect, /// # ).build().await.unwrap(); /// # let mut hub = CloudTasks::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth); /// // As the method needs a request, you would usually fill it with the desired information /// // into the respective structure. Some of the parts shown here might not be applicable ! /// // Values shown here are possibly random and not representative ! /// let mut req = RenewLeaseRequest::default(); /// /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.projects().locations_queues_tasks_renew_lease(req, "name") /// .doit(); /// # } /// ``` pub struct ProjectLocationQueueTaskRenewLeaseCall<'a, C> where C: 'a { hub: &'a CloudTasks<C>, _request: RenewLeaseRequest, _name: String, _delegate: Option<&'a mut dyn client::Delegate>, _additional_params: HashMap<String, String>, _scopes: BTreeMap<String, ()> } impl<'a, C> client::CallBuilder for ProjectLocationQueueTaskRenewLeaseCall<'a, C> {} impl<'a, C> ProjectLocationQueueTaskRenewLeaseCall<'a, C> where C: BorrowMut<hyper::Client<hyper_rustls::HttpsConnector<hyper::client::connect::HttpConnector>, hyper::body::Body>> { /// Perform the operation you have build so far. pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, Task)> { use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET}; use std::io::{Read, Seek}; use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION}; use client::ToParts; let mut dd = client::DefaultDelegate; let mut dlg: &mut dyn client::Delegate = match self._delegate { Some(d) => d, None => &mut dd }; dlg.begin(client::MethodInfo { id: "cloudtasks.projects.locations.queues.tasks.renewLease", http_method: hyper::Method::POST }); let mut params: Vec<(&str, String)> = Vec::with_capacity(4 + self._additional_params.len()); params.push(("name", self._name.to_string())); for &field in ["alt", "name"].iter() { if self._additional_params.contains_key(field) { dlg.finished(false); return Err(client::Error::FieldClash(field)); } } for (name, value) in self._additional_params.iter() { params.push((&name, value.clone())); } params.push(("alt", "json".to_string())); let mut url = self.hub._base_url.clone() + "v2beta2/{+name}:renewLease"; if self._scopes.len() == 0 { self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ()); } for &(find_this, param_name) in [("{+name}", "name")].iter() { let mut replace_with = String::new(); for &(name, ref value) in params.iter() { if name == param_name { replace_with = value.to_string(); break; } } if find_this.as_bytes()[1] == '+' as u8 { replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string(); } url = url.replace(find_this, &replace_with); } { let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1); for param_name in ["name"].iter() { if let Some(index) = params.iter().position(|t| &t.0 == param_name) { indices_for_removal.push(index); } } for &index in indices_for_removal.iter() { params.remove(index); } } let url = url::Url::parse_with_params(&url, params).unwrap(); let mut json_mime_type: mime::Mime = "application/json".parse().unwrap(); let mut request_value_reader = { let mut value = json::value::to_value(&self._request).expect("serde to work"); client::remove_json_null_values(&mut value); let mut dst = io::Cursor::new(Vec::with_capacity(128)); json::to_writer(&mut dst, &value).unwrap(); dst }; let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap(); request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); loop { let authenticator = self.hub.auth.borrow_mut(); let token = match authenticator.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await { Ok(token) => token.clone(), Err(err) => { match dlg.token(&err) { Some(token) => token, None => { dlg.finished(false); return Err(client::Error::MissingToken(err)) } } } }; request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); let mut req_result = { let mut client = &mut *self.hub.client.borrow_mut(); dlg.pre_request(); let mut req_builder = hyper::Request::builder().method(hyper::Method::POST).uri(url.clone().into_string()) .header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str())); let request = req_builder .header(CONTENT_TYPE, format!("{}", json_mime_type)) .header(CONTENT_LENGTH, request_size as u64) .body(hyper::body::Body::from(request_value_reader.get_ref().clone())); client.borrow_mut().request(request.unwrap()).await }; match req_result { Err(err) => { if let client::Retry::After(d) = dlg.http_error(&err) { sleep(d); continue; } dlg.finished(false); return Err(client::Error::HttpError(err)) } Ok(mut res) => { let (res_parts, res_body) = res.into_parts(); let res_body_string: String = String::from_utf8( hyper::body::to_bytes(res_body) .await .unwrap() .into_iter() .collect(), ) .unwrap(); let reconstructed_result = hyper::Response::from_parts(res_parts, res_body_string.clone().into()); if !reconstructed_result.status().is_success() { let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok(); let server_error = json::from_str::<client::ServerError>(&res_body_string) .or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error)) .ok(); if let client::Retry::After(d) = dlg.http_failure(&reconstructed_result, json_server_error, server_error) { sleep(d); continue; } dlg.finished(false); return match json::from_str::<client::ErrorResponse>(&res_body_string){ Err(_) => Err(client::Error::Failure(reconstructed_result)), Ok(serr) => Err(client::Error::BadRequest(serr)) } } let result_value = { match json::from_str(&res_body_string) { Ok(decoded) => (reconstructed_result, decoded), Err(err) => { dlg.response_json_decode_error(&res_body_string, &err); return Err(client::Error::JsonDecodeError(res_body_string, err)); } } }; dlg.finished(true); return Ok(result_value) } } } } /// /// Sets the *request* property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn request(mut self, new_value: RenewLeaseRequest) -> ProjectLocationQueueTaskRenewLeaseCall<'a, C> { self._request = new_value; self } /// Required. The task name. For example: /// `projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks/TASK_ID` /// /// Sets the *name* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn name(mut self, new_value: &str) -> ProjectLocationQueueTaskRenewLeaseCall<'a, C> { self._name = new_value.to_string(); self } /// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong /// while executing the actual API request. /// /// It should be used to handle progress information, and to implement a certain level of resilience. /// /// Sets the *delegate* property to the given value. pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ProjectLocationQueueTaskRenewLeaseCall<'a, C> { self._delegate = Some(new_value); self } /// Set any additional parameter of the query string used in the request. /// It should be used to set parameters which are not yet available through their own /// setters. /// /// Please note that this method must not be used to set any of the known parameters /// which have their own setter method. If done anyway, the request will fail. /// /// # Additional Parameters /// /// * *$.xgafv* (query-string) - V1 error format. /// * *access_token* (query-string) - OAuth access token. /// * *alt* (query-string) - Data format for response. /// * *callback* (query-string) - JSONP /// * *fields* (query-string) - Selector specifying which fields to include in a partial response. /// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. /// * *oauth_token* (query-string) - OAuth 2.0 token for the current user. /// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks. /// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. /// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart"). /// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart"). pub fn param<T>(mut self, name: T, value: T) -> ProjectLocationQueueTaskRenewLeaseCall<'a, C> where T: AsRef<str> { self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string()); self } /// Identifies the authorization scope for the method you are building. /// /// Use this method to actively specify which scope should be used, instead the default `Scope` variant /// `Scope::CloudPlatform`. /// /// The `scope` will be added to a set of scopes. This is important as one can maintain access /// tokens for more than one scope. /// If `None` is specified, then all scopes will be removed and no default scope will be used either. /// In that case, you have to specify your API-key using the `key` parameter (see the `param()` /// function for details). /// /// Usually there is more than one suitable scope to authorize an operation, some of which may /// encompass more rights than others. For example, for listing resources, a *read-only* scope will be /// sufficient, a read-write scope will do as well. pub fn add_scope<T, S>(mut self, scope: T) -> ProjectLocationQueueTaskRenewLeaseCall<'a, C> where T: Into<Option<S>>, S: AsRef<str> { match scope.into() { Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()), None => None, }; self } } /// Forces a task to run now. /// /// When this method is called, Cloud Tasks will dispatch the task, even if /// the task is already running, the queue has reached its RateLimits or /// is PAUSED. /// /// This command is meant to be used for manual debugging. For /// example, RunTask can be used to retry a failed /// task after a fix has been made or to manually force a task to be /// dispatched now. /// /// The dispatched task is returned. That is, the task that is returned /// contains the status after the task is dispatched but /// before the task is received by its target. /// /// If Cloud Tasks receives a successful response from the task's /// target, then the task will be deleted; otherwise the task's /// schedule_time will be reset to the time that /// RunTask was called plus the retry delay specified /// in the queue's RetryConfig. /// /// RunTask returns /// NOT_FOUND when it is called on a /// task that has already succeeded or permanently failed. /// /// RunTask cannot be called on a /// pull task. /// /// A builder for the *locations.queues.tasks.run* method supported by a *project* resource. /// It is not used directly, but through a `ProjectMethods` instance. /// /// # Example /// /// Instantiate a resource method builder /// /// ```test_harness,no_run /// # extern crate hyper; /// # extern crate hyper_rustls; /// # extern crate yup_oauth2 as oauth2; /// # extern crate google_cloudtasks2_beta2 as cloudtasks2_beta2; /// use cloudtasks2_beta2::api::RunTaskRequest; /// # #[test] fn egal() { /// # use std::default::Default; /// # use oauth2; /// # use cloudtasks2_beta2::CloudTasks; /// /// # let secret: ApplicationSecret = Default::default(); /// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder( /// # secret, /// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect, /// # ).build().await.unwrap(); /// # let mut hub = CloudTasks::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth); /// // As the method needs a request, you would usually fill it with the desired information /// // into the respective structure. Some of the parts shown here might not be applicable ! /// // Values shown here are possibly random and not representative ! /// let mut req = RunTaskRequest::default(); /// /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.projects().locations_queues_tasks_run(req, "name") /// .doit(); /// # } /// ``` pub struct ProjectLocationQueueTaskRunCall<'a, C> where C: 'a { hub: &'a CloudTasks<C>, _request: RunTaskRequest, _name: String, _delegate: Option<&'a mut dyn client::Delegate>, _additional_params: HashMap<String, String>, _scopes: BTreeMap<String, ()> } impl<'a, C> client::CallBuilder for ProjectLocationQueueTaskRunCall<'a, C> {} impl<'a, C> ProjectLocationQueueTaskRunCall<'a, C> where C: BorrowMut<hyper::Client<hyper_rustls::HttpsConnector<hyper::client::connect::HttpConnector>, hyper::body::Body>> { /// Perform the operation you have build so far. pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, Task)> { use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET}; use std::io::{Read, Seek}; use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION}; use client::ToParts; let mut dd = client::DefaultDelegate; let mut dlg: &mut dyn client::Delegate = match self._delegate { Some(d) => d, None => &mut dd }; dlg.begin(client::MethodInfo { id: "cloudtasks.projects.locations.queues.tasks.run", http_method: hyper::Method::POST }); let mut params: Vec<(&str, String)> = Vec::with_capacity(4 + self._additional_params.len()); params.push(("name", self._name.to_string())); for &field in ["alt", "name"].iter() { if self._additional_params.contains_key(field) { dlg.finished(false); return Err(client::Error::FieldClash(field)); } } for (name, value) in self._additional_params.iter() { params.push((&name, value.clone())); } params.push(("alt", "json".to_string())); let mut url = self.hub._base_url.clone() + "v2beta2/{+name}:run"; if self._scopes.len() == 0 { self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ()); } for &(find_this, param_name) in [("{+name}", "name")].iter() { let mut replace_with = String::new(); for &(name, ref value) in params.iter() { if name == param_name { replace_with = value.to_string(); break; } } if find_this.as_bytes()[1] == '+' as u8 { replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string(); } url = url.replace(find_this, &replace_with); } { let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1); for param_name in ["name"].iter() { if let Some(index) = params.iter().position(|t| &t.0 == param_name) { indices_for_removal.push(index); } } for &index in indices_for_removal.iter() { params.remove(index); } } let url = url::Url::parse_with_params(&url, params).unwrap(); let mut json_mime_type: mime::Mime = "application/json".parse().unwrap(); let mut request_value_reader = { let mut value = json::value::to_value(&self._request).expect("serde to work"); client::remove_json_null_values(&mut value); let mut dst = io::Cursor::new(Vec::with_capacity(128)); json::to_writer(&mut dst, &value).unwrap(); dst }; let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap(); request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); loop { let authenticator = self.hub.auth.borrow_mut(); let token = match authenticator.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await { Ok(token) => token.clone(), Err(err) => { match dlg.token(&err) { Some(token) => token, None => { dlg.finished(false); return Err(client::Error::MissingToken(err)) } } } }; request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); let mut req_result = { let mut client = &mut *self.hub.client.borrow_mut(); dlg.pre_request(); let mut req_builder = hyper::Request::builder().method(hyper::Method::POST).uri(url.clone().into_string()) .header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str())); let request = req_builder .header(CONTENT_TYPE, format!("{}", json_mime_type)) .header(CONTENT_LENGTH, request_size as u64) .body(hyper::body::Body::from(request_value_reader.get_ref().clone())); client.borrow_mut().request(request.unwrap()).await }; match req_result { Err(err) => { if let client::Retry::After(d) = dlg.http_error(&err) { sleep(d); continue; } dlg.finished(false); return Err(client::Error::HttpError(err)) } Ok(mut res) => { let (res_parts, res_body) = res.into_parts(); let res_body_string: String = String::from_utf8( hyper::body::to_bytes(res_body) .await .unwrap() .into_iter() .collect(), ) .unwrap(); let reconstructed_result = hyper::Response::from_parts(res_parts, res_body_string.clone().into()); if !reconstructed_result.status().is_success() { let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok(); let server_error = json::from_str::<client::ServerError>(&res_body_string) .or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error)) .ok(); if let client::Retry::After(d) = dlg.http_failure(&reconstructed_result, json_server_error, server_error) { sleep(d); continue; } dlg.finished(false); return match json::from_str::<client::ErrorResponse>(&res_body_string){ Err(_) => Err(client::Error::Failure(reconstructed_result)), Ok(serr) => Err(client::Error::BadRequest(serr)) } } let result_value = { match json::from_str(&res_body_string) { Ok(decoded) => (reconstructed_result, decoded), Err(err) => { dlg.response_json_decode_error(&res_body_string, &err); return Err(client::Error::JsonDecodeError(res_body_string, err)); } } }; dlg.finished(true); return Ok(result_value) } } } } /// /// Sets the *request* property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn request(mut self, new_value: RunTaskRequest) -> ProjectLocationQueueTaskRunCall<'a, C> { self._request = new_value; self } /// Required. The task name. For example: /// `projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID/tasks/TASK_ID` /// /// Sets the *name* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn name(mut self, new_value: &str) -> ProjectLocationQueueTaskRunCall<'a, C> { self._name = new_value.to_string(); self } /// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong /// while executing the actual API request. /// /// It should be used to handle progress information, and to implement a certain level of resilience. /// /// Sets the *delegate* property to the given value. pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ProjectLocationQueueTaskRunCall<'a, C> { self._delegate = Some(new_value); self } /// Set any additional parameter of the query string used in the request. /// It should be used to set parameters which are not yet available through their own /// setters. /// /// Please note that this method must not be used to set any of the known parameters /// which have their own setter method. If done anyway, the request will fail. /// /// # Additional Parameters /// /// * *$.xgafv* (query-string) - V1 error format. /// * *access_token* (query-string) - OAuth access token. /// * *alt* (query-string) - Data format for response. /// * *callback* (query-string) - JSONP /// * *fields* (query-string) - Selector specifying which fields to include in a partial response. /// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. /// * *oauth_token* (query-string) - OAuth 2.0 token for the current user. /// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks. /// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. /// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart"). /// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart"). pub fn param<T>(mut self, name: T, value: T) -> ProjectLocationQueueTaskRunCall<'a, C> where T: AsRef<str> { self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string()); self } /// Identifies the authorization scope for the method you are building. /// /// Use this method to actively specify which scope should be used, instead the default `Scope` variant /// `Scope::CloudPlatform`. /// /// The `scope` will be added to a set of scopes. This is important as one can maintain access /// tokens for more than one scope. /// If `None` is specified, then all scopes will be removed and no default scope will be used either. /// In that case, you have to specify your API-key using the `key` parameter (see the `param()` /// function for details). /// /// Usually there is more than one suitable scope to authorize an operation, some of which may /// encompass more rights than others. For example, for listing resources, a *read-only* scope will be /// sufficient, a read-write scope will do as well. pub fn add_scope<T, S>(mut self, scope: T) -> ProjectLocationQueueTaskRunCall<'a, C> where T: Into<Option<S>>, S: AsRef<str> { match scope.into() { Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()), None => None, }; self } } /// Creates a queue. /// /// Queues created with this method allow tasks to live for a maximum of 31 /// days. After a task is 31 days old, the task will be deleted regardless of whether /// it was dispatched or not. /// /// WARNING: Using this method may have unintended side effects if you are /// using an App Engine `queue.yaml` or `queue.xml` file to manage your queues. /// Read /// [Overview of Queue Management and /// queue.yaml](https://cloud.google.com/tasks/docs/queue-yaml) before using /// this method. /// /// A builder for the *locations.queues.create* method supported by a *project* resource. /// It is not used directly, but through a `ProjectMethods` instance. /// /// # Example /// /// Instantiate a resource method builder /// /// ```test_harness,no_run /// # extern crate hyper; /// # extern crate hyper_rustls; /// # extern crate yup_oauth2 as oauth2; /// # extern crate google_cloudtasks2_beta2 as cloudtasks2_beta2; /// use cloudtasks2_beta2::api::Queue; /// # #[test] fn egal() { /// # use std::default::Default; /// # use oauth2; /// # use cloudtasks2_beta2::CloudTasks; /// /// # let secret: ApplicationSecret = Default::default(); /// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder( /// # secret, /// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect, /// # ).build().await.unwrap(); /// # let mut hub = CloudTasks::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth); /// // As the method needs a request, you would usually fill it with the desired information /// // into the respective structure. Some of the parts shown here might not be applicable ! /// // Values shown here are possibly random and not representative ! /// let mut req = Queue::default(); /// /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.projects().locations_queues_create(req, "parent") /// .doit(); /// # } /// ``` pub struct ProjectLocationQueueCreateCall<'a, C> where C: 'a { hub: &'a CloudTasks<C>, _request: Queue, _parent: String, _delegate: Option<&'a mut dyn client::Delegate>, _additional_params: HashMap<String, String>, _scopes: BTreeMap<String, ()> } impl<'a, C> client::CallBuilder for ProjectLocationQueueCreateCall<'a, C> {} impl<'a, C> ProjectLocationQueueCreateCall<'a, C> where C: BorrowMut<hyper::Client<hyper_rustls::HttpsConnector<hyper::client::connect::HttpConnector>, hyper::body::Body>> { /// Perform the operation you have build so far. pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, Queue)> { use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET}; use std::io::{Read, Seek}; use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION}; use client::ToParts; let mut dd = client::DefaultDelegate; let mut dlg: &mut dyn client::Delegate = match self._delegate { Some(d) => d, None => &mut dd }; dlg.begin(client::MethodInfo { id: "cloudtasks.projects.locations.queues.create", http_method: hyper::Method::POST }); let mut params: Vec<(&str, String)> = Vec::with_capacity(4 + self._additional_params.len()); params.push(("parent", self._parent.to_string())); for &field in ["alt", "parent"].iter() { if self._additional_params.contains_key(field) { dlg.finished(false); return Err(client::Error::FieldClash(field)); } } for (name, value) in self._additional_params.iter() { params.push((&name, value.clone())); } params.push(("alt", "json".to_string())); let mut url = self.hub._base_url.clone() + "v2beta2/{+parent}/queues"; if self._scopes.len() == 0 { self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ()); } for &(find_this, param_name) in [("{+parent}", "parent")].iter() { let mut replace_with = String::new(); for &(name, ref value) in params.iter() { if name == param_name { replace_with = value.to_string(); break; } } if find_this.as_bytes()[1] == '+' as u8 { replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string(); } url = url.replace(find_this, &replace_with); } { let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1); for param_name in ["parent"].iter() { if let Some(index) = params.iter().position(|t| &t.0 == param_name) { indices_for_removal.push(index); } } for &index in indices_for_removal.iter() { params.remove(index); } } let url = url::Url::parse_with_params(&url, params).unwrap(); let mut json_mime_type: mime::Mime = "application/json".parse().unwrap(); let mut request_value_reader = { let mut value = json::value::to_value(&self._request).expect("serde to work"); client::remove_json_null_values(&mut value); let mut dst = io::Cursor::new(Vec::with_capacity(128)); json::to_writer(&mut dst, &value).unwrap(); dst }; let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap(); request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); loop { let authenticator = self.hub.auth.borrow_mut(); let token = match authenticator.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await { Ok(token) => token.clone(), Err(err) => { match dlg.token(&err) { Some(token) => token, None => { dlg.finished(false); return Err(client::Error::MissingToken(err)) } } } }; request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); let mut req_result = { let mut client = &mut *self.hub.client.borrow_mut(); dlg.pre_request(); let mut req_builder = hyper::Request::builder().method(hyper::Method::POST).uri(url.clone().into_string()) .header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str())); let request = req_builder .header(CONTENT_TYPE, format!("{}", json_mime_type)) .header(CONTENT_LENGTH, request_size as u64) .body(hyper::body::Body::from(request_value_reader.get_ref().clone())); client.borrow_mut().request(request.unwrap()).await }; match req_result { Err(err) => { if let client::Retry::After(d) = dlg.http_error(&err) { sleep(d); continue; } dlg.finished(false); return Err(client::Error::HttpError(err)) } Ok(mut res) => { let (res_parts, res_body) = res.into_parts(); let res_body_string: String = String::from_utf8( hyper::body::to_bytes(res_body) .await .unwrap() .into_iter() .collect(), ) .unwrap(); let reconstructed_result = hyper::Response::from_parts(res_parts, res_body_string.clone().into()); if !reconstructed_result.status().is_success() { let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok(); let server_error = json::from_str::<client::ServerError>(&res_body_string) .or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error)) .ok(); if let client::Retry::After(d) = dlg.http_failure(&reconstructed_result, json_server_error, server_error) { sleep(d); continue; } dlg.finished(false); return match json::from_str::<client::ErrorResponse>(&res_body_string){ Err(_) => Err(client::Error::Failure(reconstructed_result)), Ok(serr) => Err(client::Error::BadRequest(serr)) } } let result_value = { match json::from_str(&res_body_string) { Ok(decoded) => (reconstructed_result, decoded), Err(err) => { dlg.response_json_decode_error(&res_body_string, &err); return Err(client::Error::JsonDecodeError(res_body_string, err)); } } }; dlg.finished(true); return Ok(result_value) } } } } /// /// Sets the *request* property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn request(mut self, new_value: Queue) -> ProjectLocationQueueCreateCall<'a, C> { self._request = new_value; self } /// Required. The location name in which the queue will be created. /// For example: `projects/PROJECT_ID/locations/LOCATION_ID` /// /// The list of allowed locations can be obtained by calling Cloud /// Tasks' implementation of /// ListLocations. /// /// Sets the *parent* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn parent(mut self, new_value: &str) -> ProjectLocationQueueCreateCall<'a, C> { self._parent = new_value.to_string(); self } /// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong /// while executing the actual API request. /// /// It should be used to handle progress information, and to implement a certain level of resilience. /// /// Sets the *delegate* property to the given value. pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ProjectLocationQueueCreateCall<'a, C> { self._delegate = Some(new_value); self } /// Set any additional parameter of the query string used in the request. /// It should be used to set parameters which are not yet available through their own /// setters. /// /// Please note that this method must not be used to set any of the known parameters /// which have their own setter method. If done anyway, the request will fail. /// /// # Additional Parameters /// /// * *$.xgafv* (query-string) - V1 error format. /// * *access_token* (query-string) - OAuth access token. /// * *alt* (query-string) - Data format for response. /// * *callback* (query-string) - JSONP /// * *fields* (query-string) - Selector specifying which fields to include in a partial response. /// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. /// * *oauth_token* (query-string) - OAuth 2.0 token for the current user. /// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks. /// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. /// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart"). /// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart"). pub fn param<T>(mut self, name: T, value: T) -> ProjectLocationQueueCreateCall<'a, C> where T: AsRef<str> { self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string()); self } /// Identifies the authorization scope for the method you are building. /// /// Use this method to actively specify which scope should be used, instead the default `Scope` variant /// `Scope::CloudPlatform`. /// /// The `scope` will be added to a set of scopes. This is important as one can maintain access /// tokens for more than one scope. /// If `None` is specified, then all scopes will be removed and no default scope will be used either. /// In that case, you have to specify your API-key using the `key` parameter (see the `param()` /// function for details). /// /// Usually there is more than one suitable scope to authorize an operation, some of which may /// encompass more rights than others. For example, for listing resources, a *read-only* scope will be /// sufficient, a read-write scope will do as well. pub fn add_scope<T, S>(mut self, scope: T) -> ProjectLocationQueueCreateCall<'a, C> where T: Into<Option<S>>, S: AsRef<str> { match scope.into() { Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()), None => None, }; self } } /// Deletes a queue. /// /// This command will delete the queue even if it has tasks in it. /// /// Note: If you delete a queue, a queue with the same name can't be created /// for 7 days. /// /// WARNING: Using this method may have unintended side effects if you are /// using an App Engine `queue.yaml` or `queue.xml` file to manage your queues. /// Read /// [Overview of Queue Management and /// queue.yaml](https://cloud.google.com/tasks/docs/queue-yaml) before using /// this method. /// /// A builder for the *locations.queues.delete* method supported by a *project* resource. /// It is not used directly, but through a `ProjectMethods` instance. /// /// # Example /// /// Instantiate a resource method builder /// /// ```test_harness,no_run /// # extern crate hyper; /// # extern crate hyper_rustls; /// # extern crate yup_oauth2 as oauth2; /// # extern crate google_cloudtasks2_beta2 as cloudtasks2_beta2; /// # #[test] fn egal() { /// # use std::default::Default; /// # use oauth2; /// # use cloudtasks2_beta2::CloudTasks; /// /// # let secret: ApplicationSecret = Default::default(); /// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder( /// # secret, /// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect, /// # ).build().await.unwrap(); /// # let mut hub = CloudTasks::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth); /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.projects().locations_queues_delete("name") /// .doit(); /// # } /// ``` pub struct ProjectLocationQueueDeleteCall<'a, C> where C: 'a { hub: &'a CloudTasks<C>, _name: String, _delegate: Option<&'a mut dyn client::Delegate>, _additional_params: HashMap<String, String>, _scopes: BTreeMap<String, ()> } impl<'a, C> client::CallBuilder for ProjectLocationQueueDeleteCall<'a, C> {} impl<'a, C> ProjectLocationQueueDeleteCall<'a, C> where C: BorrowMut<hyper::Client<hyper_rustls::HttpsConnector<hyper::client::connect::HttpConnector>, hyper::body::Body>> { /// Perform the operation you have build so far. pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, Empty)> { use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET}; use std::io::{Read, Seek}; use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION}; use client::ToParts; let mut dd = client::DefaultDelegate; let mut dlg: &mut dyn client::Delegate = match self._delegate { Some(d) => d, None => &mut dd }; dlg.begin(client::MethodInfo { id: "cloudtasks.projects.locations.queues.delete", http_method: hyper::Method::DELETE }); let mut params: Vec<(&str, String)> = Vec::with_capacity(3 + self._additional_params.len()); params.push(("name", self._name.to_string())); for &field in ["alt", "name"].iter() { if self._additional_params.contains_key(field) { dlg.finished(false); return Err(client::Error::FieldClash(field)); } } for (name, value) in self._additional_params.iter() { params.push((&name, value.clone())); } params.push(("alt", "json".to_string())); let mut url = self.hub._base_url.clone() + "v2beta2/{+name}"; if self._scopes.len() == 0 { self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ()); } for &(find_this, param_name) in [("{+name}", "name")].iter() { let mut replace_with = String::new(); for &(name, ref value) in params.iter() { if name == param_name { replace_with = value.to_string(); break; } } if find_this.as_bytes()[1] == '+' as u8 { replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string(); } url = url.replace(find_this, &replace_with); } { let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1); for param_name in ["name"].iter() { if let Some(index) = params.iter().position(|t| &t.0 == param_name) { indices_for_removal.push(index); } } for &index in indices_for_removal.iter() { params.remove(index); } } let url = url::Url::parse_with_params(&url, params).unwrap(); loop { let authenticator = self.hub.auth.borrow_mut(); let token = match authenticator.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await { Ok(token) => token.clone(), Err(err) => { match dlg.token(&err) { Some(token) => token, None => { dlg.finished(false); return Err(client::Error::MissingToken(err)) } } } }; let mut req_result = { let mut client = &mut *self.hub.client.borrow_mut(); dlg.pre_request(); let mut req_builder = hyper::Request::builder().method(hyper::Method::DELETE).uri(url.clone().into_string()) .header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str())); let request = req_builder .body(hyper::body::Body::empty()); client.borrow_mut().request(request.unwrap()).await }; match req_result { Err(err) => { if let client::Retry::After(d) = dlg.http_error(&err) { sleep(d); continue; } dlg.finished(false); return Err(client::Error::HttpError(err)) } Ok(mut res) => { let (res_parts, res_body) = res.into_parts(); let res_body_string: String = String::from_utf8( hyper::body::to_bytes(res_body) .await .unwrap() .into_iter() .collect(), ) .unwrap(); let reconstructed_result = hyper::Response::from_parts(res_parts, res_body_string.clone().into()); if !reconstructed_result.status().is_success() { let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok(); let server_error = json::from_str::<client::ServerError>(&res_body_string) .or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error)) .ok(); if let client::Retry::After(d) = dlg.http_failure(&reconstructed_result, json_server_error, server_error) { sleep(d); continue; } dlg.finished(false); return match json::from_str::<client::ErrorResponse>(&res_body_string){ Err(_) => Err(client::Error::Failure(reconstructed_result)), Ok(serr) => Err(client::Error::BadRequest(serr)) } } let result_value = { match json::from_str(&res_body_string) { Ok(decoded) => (reconstructed_result, decoded), Err(err) => { dlg.response_json_decode_error(&res_body_string, &err); return Err(client::Error::JsonDecodeError(res_body_string, err)); } } }; dlg.finished(true); return Ok(result_value) } } } } /// Required. The queue name. For example: /// `projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID` /// /// Sets the *name* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn name(mut self, new_value: &str) -> ProjectLocationQueueDeleteCall<'a, C> { self._name = new_value.to_string(); self } /// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong /// while executing the actual API request. /// /// It should be used to handle progress information, and to implement a certain level of resilience. /// /// Sets the *delegate* property to the given value. pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ProjectLocationQueueDeleteCall<'a, C> { self._delegate = Some(new_value); self } /// Set any additional parameter of the query string used in the request. /// It should be used to set parameters which are not yet available through their own /// setters. /// /// Please note that this method must not be used to set any of the known parameters /// which have their own setter method. If done anyway, the request will fail. /// /// # Additional Parameters /// /// * *$.xgafv* (query-string) - V1 error format. /// * *access_token* (query-string) - OAuth access token. /// * *alt* (query-string) - Data format for response. /// * *callback* (query-string) - JSONP /// * *fields* (query-string) - Selector specifying which fields to include in a partial response. /// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. /// * *oauth_token* (query-string) - OAuth 2.0 token for the current user. /// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks. /// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. /// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart"). /// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart"). pub fn param<T>(mut self, name: T, value: T) -> ProjectLocationQueueDeleteCall<'a, C> where T: AsRef<str> { self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string()); self } /// Identifies the authorization scope for the method you are building. /// /// Use this method to actively specify which scope should be used, instead the default `Scope` variant /// `Scope::CloudPlatform`. /// /// The `scope` will be added to a set of scopes. This is important as one can maintain access /// tokens for more than one scope. /// If `None` is specified, then all scopes will be removed and no default scope will be used either. /// In that case, you have to specify your API-key using the `key` parameter (see the `param()` /// function for details). /// /// Usually there is more than one suitable scope to authorize an operation, some of which may /// encompass more rights than others. For example, for listing resources, a *read-only* scope will be /// sufficient, a read-write scope will do as well. pub fn add_scope<T, S>(mut self, scope: T) -> ProjectLocationQueueDeleteCall<'a, C> where T: Into<Option<S>>, S: AsRef<str> { match scope.into() { Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()), None => None, }; self } } /// Gets a queue. /// /// A builder for the *locations.queues.get* method supported by a *project* resource. /// It is not used directly, but through a `ProjectMethods` instance. /// /// # Example /// /// Instantiate a resource method builder /// /// ```test_harness,no_run /// # extern crate hyper; /// # extern crate hyper_rustls; /// # extern crate yup_oauth2 as oauth2; /// # extern crate google_cloudtasks2_beta2 as cloudtasks2_beta2; /// # #[test] fn egal() { /// # use std::default::Default; /// # use oauth2; /// # use cloudtasks2_beta2::CloudTasks; /// /// # let secret: ApplicationSecret = Default::default(); /// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder( /// # secret, /// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect, /// # ).build().await.unwrap(); /// # let mut hub = CloudTasks::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth); /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.projects().locations_queues_get("name") /// .read_mask("duo") /// .doit(); /// # } /// ``` pub struct ProjectLocationQueueGetCall<'a, C> where C: 'a { hub: &'a CloudTasks<C>, _name: String, _read_mask: Option<String>, _delegate: Option<&'a mut dyn client::Delegate>, _additional_params: HashMap<String, String>, _scopes: BTreeMap<String, ()> } impl<'a, C> client::CallBuilder for ProjectLocationQueueGetCall<'a, C> {} impl<'a, C> ProjectLocationQueueGetCall<'a, C> where C: BorrowMut<hyper::Client<hyper_rustls::HttpsConnector<hyper::client::connect::HttpConnector>, hyper::body::Body>> { /// Perform the operation you have build so far. pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, Queue)> { use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET}; use std::io::{Read, Seek}; use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION}; use client::ToParts; let mut dd = client::DefaultDelegate; let mut dlg: &mut dyn client::Delegate = match self._delegate { Some(d) => d, None => &mut dd }; dlg.begin(client::MethodInfo { id: "cloudtasks.projects.locations.queues.get", http_method: hyper::Method::GET }); let mut params: Vec<(&str, String)> = Vec::with_capacity(4 + self._additional_params.len()); params.push(("name", self._name.to_string())); if let Some(value) = self._read_mask { params.push(("readMask", value.to_string())); } for &field in ["alt", "name", "readMask"].iter() { if self._additional_params.contains_key(field) { dlg.finished(false); return Err(client::Error::FieldClash(field)); } } for (name, value) in self._additional_params.iter() { params.push((&name, value.clone())); } params.push(("alt", "json".to_string())); let mut url = self.hub._base_url.clone() + "v2beta2/{+name}"; if self._scopes.len() == 0 { self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ()); } for &(find_this, param_name) in [("{+name}", "name")].iter() { let mut replace_with = String::new(); for &(name, ref value) in params.iter() { if name == param_name { replace_with = value.to_string(); break; } } if find_this.as_bytes()[1] == '+' as u8 { replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string(); } url = url.replace(find_this, &replace_with); } { let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1); for param_name in ["name"].iter() { if let Some(index) = params.iter().position(|t| &t.0 == param_name) { indices_for_removal.push(index); } } for &index in indices_for_removal.iter() { params.remove(index); } } let url = url::Url::parse_with_params(&url, params).unwrap(); loop { let authenticator = self.hub.auth.borrow_mut(); let token = match authenticator.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await { Ok(token) => token.clone(), Err(err) => { match dlg.token(&err) { Some(token) => token, None => { dlg.finished(false); return Err(client::Error::MissingToken(err)) } } } }; let mut req_result = { let mut client = &mut *self.hub.client.borrow_mut(); dlg.pre_request(); let mut req_builder = hyper::Request::builder().method(hyper::Method::GET).uri(url.clone().into_string()) .header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str())); let request = req_builder .body(hyper::body::Body::empty()); client.borrow_mut().request(request.unwrap()).await }; match req_result { Err(err) => { if let client::Retry::After(d) = dlg.http_error(&err) { sleep(d); continue; } dlg.finished(false); return Err(client::Error::HttpError(err)) } Ok(mut res) => { let (res_parts, res_body) = res.into_parts(); let res_body_string: String = String::from_utf8( hyper::body::to_bytes(res_body) .await .unwrap() .into_iter() .collect(), ) .unwrap(); let reconstructed_result = hyper::Response::from_parts(res_parts, res_body_string.clone().into()); if !reconstructed_result.status().is_success() { let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok(); let server_error = json::from_str::<client::ServerError>(&res_body_string) .or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error)) .ok(); if let client::Retry::After(d) = dlg.http_failure(&reconstructed_result, json_server_error, server_error) { sleep(d); continue; } dlg.finished(false); return match json::from_str::<client::ErrorResponse>(&res_body_string){ Err(_) => Err(client::Error::Failure(reconstructed_result)), Ok(serr) => Err(client::Error::BadRequest(serr)) } } let result_value = { match json::from_str(&res_body_string) { Ok(decoded) => (reconstructed_result, decoded), Err(err) => { dlg.response_json_decode_error(&res_body_string, &err); return Err(client::Error::JsonDecodeError(res_body_string, err)); } } }; dlg.finished(true); return Ok(result_value) } } } } /// Required. The resource name of the queue. For example: /// `projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID` /// /// Sets the *name* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn name(mut self, new_value: &str) -> ProjectLocationQueueGetCall<'a, C> { self._name = new_value.to_string(); self } /// Optional. Read mask is used for a more granular control over what the API returns. By /// it includes all fields in Queue except for stats. /// /// Sets the *read mask* query property to the given value. pub fn read_mask(mut self, new_value: &str) -> ProjectLocationQueueGetCall<'a, C> { self._read_mask = Some(new_value.to_string()); self } /// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong /// while executing the actual API request. /// /// It should be used to handle progress information, and to implement a certain level of resilience. /// /// Sets the *delegate* property to the given value. pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ProjectLocationQueueGetCall<'a, C> { self._delegate = Some(new_value); self } /// Set any additional parameter of the query string used in the request. /// It should be used to set parameters which are not yet available through their own /// setters. /// /// Please note that this method must not be used to set any of the known parameters /// which have their own setter method. If done anyway, the request will fail. /// /// # Additional Parameters /// /// * *$.xgafv* (query-string) - V1 error format. /// * *access_token* (query-string) - OAuth access token. /// * *alt* (query-string) - Data format for response. /// * *callback* (query-string) - JSONP /// * *fields* (query-string) - Selector specifying which fields to include in a partial response. /// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. /// * *oauth_token* (query-string) - OAuth 2.0 token for the current user. /// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks. /// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. /// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart"). /// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart"). pub fn param<T>(mut self, name: T, value: T) -> ProjectLocationQueueGetCall<'a, C> where T: AsRef<str> { self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string()); self } /// Identifies the authorization scope for the method you are building. /// /// Use this method to actively specify which scope should be used, instead the default `Scope` variant /// `Scope::CloudPlatform`. /// /// The `scope` will be added to a set of scopes. This is important as one can maintain access /// tokens for more than one scope. /// If `None` is specified, then all scopes will be removed and no default scope will be used either. /// In that case, you have to specify your API-key using the `key` parameter (see the `param()` /// function for details). /// /// Usually there is more than one suitable scope to authorize an operation, some of which may /// encompass more rights than others. For example, for listing resources, a *read-only* scope will be /// sufficient, a read-write scope will do as well. pub fn add_scope<T, S>(mut self, scope: T) -> ProjectLocationQueueGetCall<'a, C> where T: Into<Option<S>>, S: AsRef<str> { match scope.into() { Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()), None => None, }; self } } /// Gets the access control policy for a Queue. /// Returns an empty policy if the resource exists and does not have a policy /// set. /// /// Authorization requires the following /// [Google IAM](https://cloud.google.com/iam) permission on the specified /// resource parent: /// /// * `cloudtasks.queues.getIamPolicy` /// /// A builder for the *locations.queues.getIamPolicy* method supported by a *project* resource. /// It is not used directly, but through a `ProjectMethods` instance. /// /// # Example /// /// Instantiate a resource method builder /// /// ```test_harness,no_run /// # extern crate hyper; /// # extern crate hyper_rustls; /// # extern crate yup_oauth2 as oauth2; /// # extern crate google_cloudtasks2_beta2 as cloudtasks2_beta2; /// use cloudtasks2_beta2::api::GetIamPolicyRequest; /// # #[test] fn egal() { /// # use std::default::Default; /// # use oauth2; /// # use cloudtasks2_beta2::CloudTasks; /// /// # let secret: ApplicationSecret = Default::default(); /// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder( /// # secret, /// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect, /// # ).build().await.unwrap(); /// # let mut hub = CloudTasks::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth); /// // As the method needs a request, you would usually fill it with the desired information /// // into the respective structure. Some of the parts shown here might not be applicable ! /// // Values shown here are possibly random and not representative ! /// let mut req = GetIamPolicyRequest::default(); /// /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.projects().locations_queues_get_iam_policy(req, "resource") /// .doit(); /// # } /// ``` pub struct ProjectLocationQueueGetIamPolicyCall<'a, C> where C: 'a { hub: &'a CloudTasks<C>, _request: GetIamPolicyRequest, _resource: String, _delegate: Option<&'a mut dyn client::Delegate>, _additional_params: HashMap<String, String>, _scopes: BTreeMap<String, ()> } impl<'a, C> client::CallBuilder for ProjectLocationQueueGetIamPolicyCall<'a, C> {} impl<'a, C> ProjectLocationQueueGetIamPolicyCall<'a, C> where C: BorrowMut<hyper::Client<hyper_rustls::HttpsConnector<hyper::client::connect::HttpConnector>, hyper::body::Body>> { /// Perform the operation you have build so far. pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, Policy)> { use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET}; use std::io::{Read, Seek}; use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION}; use client::ToParts; let mut dd = client::DefaultDelegate; let mut dlg: &mut dyn client::Delegate = match self._delegate { Some(d) => d, None => &mut dd }; dlg.begin(client::MethodInfo { id: "cloudtasks.projects.locations.queues.getIamPolicy", http_method: hyper::Method::POST }); let mut params: Vec<(&str, String)> = Vec::with_capacity(4 + self._additional_params.len()); params.push(("resource", self._resource.to_string())); for &field in ["alt", "resource"].iter() { if self._additional_params.contains_key(field) { dlg.finished(false); return Err(client::Error::FieldClash(field)); } } for (name, value) in self._additional_params.iter() { params.push((&name, value.clone())); } params.push(("alt", "json".to_string())); let mut url = self.hub._base_url.clone() + "v2beta2/{+resource}:getIamPolicy"; if self._scopes.len() == 0 { self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ()); } for &(find_this, param_name) in [("{+resource}", "resource")].iter() { let mut replace_with = String::new(); for &(name, ref value) in params.iter() { if name == param_name { replace_with = value.to_string(); break; } } if find_this.as_bytes()[1] == '+' as u8 { replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string(); } url = url.replace(find_this, &replace_with); } { let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1); for param_name in ["resource"].iter() { if let Some(index) = params.iter().position(|t| &t.0 == param_name) { indices_for_removal.push(index); } } for &index in indices_for_removal.iter() { params.remove(index); } } let url = url::Url::parse_with_params(&url, params).unwrap(); let mut json_mime_type: mime::Mime = "application/json".parse().unwrap(); let mut request_value_reader = { let mut value = json::value::to_value(&self._request).expect("serde to work"); client::remove_json_null_values(&mut value); let mut dst = io::Cursor::new(Vec::with_capacity(128)); json::to_writer(&mut dst, &value).unwrap(); dst }; let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap(); request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); loop { let authenticator = self.hub.auth.borrow_mut(); let token = match authenticator.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await { Ok(token) => token.clone(), Err(err) => { match dlg.token(&err) { Some(token) => token, None => { dlg.finished(false); return Err(client::Error::MissingToken(err)) } } } }; request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); let mut req_result = { let mut client = &mut *self.hub.client.borrow_mut(); dlg.pre_request(); let mut req_builder = hyper::Request::builder().method(hyper::Method::POST).uri(url.clone().into_string()) .header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str())); let request = req_builder .header(CONTENT_TYPE, format!("{}", json_mime_type)) .header(CONTENT_LENGTH, request_size as u64) .body(hyper::body::Body::from(request_value_reader.get_ref().clone())); client.borrow_mut().request(request.unwrap()).await }; match req_result { Err(err) => { if let client::Retry::After(d) = dlg.http_error(&err) { sleep(d); continue; } dlg.finished(false); return Err(client::Error::HttpError(err)) } Ok(mut res) => { let (res_parts, res_body) = res.into_parts(); let res_body_string: String = String::from_utf8( hyper::body::to_bytes(res_body) .await .unwrap() .into_iter() .collect(), ) .unwrap(); let reconstructed_result = hyper::Response::from_parts(res_parts, res_body_string.clone().into()); if !reconstructed_result.status().is_success() { let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok(); let server_error = json::from_str::<client::ServerError>(&res_body_string) .or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error)) .ok(); if let client::Retry::After(d) = dlg.http_failure(&reconstructed_result, json_server_error, server_error) { sleep(d); continue; } dlg.finished(false); return match json::from_str::<client::ErrorResponse>(&res_body_string){ Err(_) => Err(client::Error::Failure(reconstructed_result)), Ok(serr) => Err(client::Error::BadRequest(serr)) } } let result_value = { match json::from_str(&res_body_string) { Ok(decoded) => (reconstructed_result, decoded), Err(err) => { dlg.response_json_decode_error(&res_body_string, &err); return Err(client::Error::JsonDecodeError(res_body_string, err)); } } }; dlg.finished(true); return Ok(result_value) } } } } /// /// Sets the *request* property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn request(mut self, new_value: GetIamPolicyRequest) -> ProjectLocationQueueGetIamPolicyCall<'a, C> { self._request = new_value; self } /// REQUIRED: The resource for which the policy is being requested. /// See the operation documentation for the appropriate value for this field. /// /// Sets the *resource* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn resource(mut self, new_value: &str) -> ProjectLocationQueueGetIamPolicyCall<'a, C> { self._resource = new_value.to_string(); self } /// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong /// while executing the actual API request. /// /// It should be used to handle progress information, and to implement a certain level of resilience. /// /// Sets the *delegate* property to the given value. pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ProjectLocationQueueGetIamPolicyCall<'a, C> { self._delegate = Some(new_value); self } /// Set any additional parameter of the query string used in the request. /// It should be used to set parameters which are not yet available through their own /// setters. /// /// Please note that this method must not be used to set any of the known parameters /// which have their own setter method. If done anyway, the request will fail. /// /// # Additional Parameters /// /// * *$.xgafv* (query-string) - V1 error format. /// * *access_token* (query-string) - OAuth access token. /// * *alt* (query-string) - Data format for response. /// * *callback* (query-string) - JSONP /// * *fields* (query-string) - Selector specifying which fields to include in a partial response. /// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. /// * *oauth_token* (query-string) - OAuth 2.0 token for the current user. /// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks. /// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. /// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart"). /// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart"). pub fn param<T>(mut self, name: T, value: T) -> ProjectLocationQueueGetIamPolicyCall<'a, C> where T: AsRef<str> { self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string()); self } /// Identifies the authorization scope for the method you are building. /// /// Use this method to actively specify which scope should be used, instead the default `Scope` variant /// `Scope::CloudPlatform`. /// /// The `scope` will be added to a set of scopes. This is important as one can maintain access /// tokens for more than one scope. /// If `None` is specified, then all scopes will be removed and no default scope will be used either. /// In that case, you have to specify your API-key using the `key` parameter (see the `param()` /// function for details). /// /// Usually there is more than one suitable scope to authorize an operation, some of which may /// encompass more rights than others. For example, for listing resources, a *read-only* scope will be /// sufficient, a read-write scope will do as well. pub fn add_scope<T, S>(mut self, scope: T) -> ProjectLocationQueueGetIamPolicyCall<'a, C> where T: Into<Option<S>>, S: AsRef<str> { match scope.into() { Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()), None => None, }; self } } /// Lists queues. /// /// Queues are returned in lexicographical order. /// /// A builder for the *locations.queues.list* method supported by a *project* resource. /// It is not used directly, but through a `ProjectMethods` instance. /// /// # Example /// /// Instantiate a resource method builder /// /// ```test_harness,no_run /// # extern crate hyper; /// # extern crate hyper_rustls; /// # extern crate yup_oauth2 as oauth2; /// # extern crate google_cloudtasks2_beta2 as cloudtasks2_beta2; /// # #[test] fn egal() { /// # use std::default::Default; /// # use oauth2; /// # use cloudtasks2_beta2::CloudTasks; /// /// # let secret: ApplicationSecret = Default::default(); /// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder( /// # secret, /// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect, /// # ).build().await.unwrap(); /// # let mut hub = CloudTasks::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth); /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.projects().locations_queues_list("parent") /// .page_token("ut") /// .page_size(-12) /// .filter("rebum.") /// .doit(); /// # } /// ``` pub struct ProjectLocationQueueListCall<'a, C> where C: 'a { hub: &'a CloudTasks<C>, _parent: String, _page_token: Option<String>, _page_size: Option<i32>, _filter: Option<String>, _delegate: Option<&'a mut dyn client::Delegate>, _additional_params: HashMap<String, String>, _scopes: BTreeMap<String, ()> } impl<'a, C> client::CallBuilder for ProjectLocationQueueListCall<'a, C> {} impl<'a, C> ProjectLocationQueueListCall<'a, C> where C: BorrowMut<hyper::Client<hyper_rustls::HttpsConnector<hyper::client::connect::HttpConnector>, hyper::body::Body>> { /// Perform the operation you have build so far. pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, ListQueuesResponse)> { use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET}; use std::io::{Read, Seek}; use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION}; use client::ToParts; let mut dd = client::DefaultDelegate; let mut dlg: &mut dyn client::Delegate = match self._delegate { Some(d) => d, None => &mut dd }; dlg.begin(client::MethodInfo { id: "cloudtasks.projects.locations.queues.list", http_method: hyper::Method::GET }); let mut params: Vec<(&str, String)> = Vec::with_capacity(6 + self._additional_params.len()); params.push(("parent", self._parent.to_string())); if let Some(value) = self._page_token { params.push(("pageToken", value.to_string())); } if let Some(value) = self._page_size { params.push(("pageSize", value.to_string())); } if let Some(value) = self._filter { params.push(("filter", value.to_string())); } for &field in ["alt", "parent", "pageToken", "pageSize", "filter"].iter() { if self._additional_params.contains_key(field) { dlg.finished(false); return Err(client::Error::FieldClash(field)); } } for (name, value) in self._additional_params.iter() { params.push((&name, value.clone())); } params.push(("alt", "json".to_string())); let mut url = self.hub._base_url.clone() + "v2beta2/{+parent}/queues"; if self._scopes.len() == 0 { self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ()); } for &(find_this, param_name) in [("{+parent}", "parent")].iter() { let mut replace_with = String::new(); for &(name, ref value) in params.iter() { if name == param_name { replace_with = value.to_string(); break; } } if find_this.as_bytes()[1] == '+' as u8 { replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string(); } url = url.replace(find_this, &replace_with); } { let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1); for param_name in ["parent"].iter() { if let Some(index) = params.iter().position(|t| &t.0 == param_name) { indices_for_removal.push(index); } } for &index in indices_for_removal.iter() { params.remove(index); } } let url = url::Url::parse_with_params(&url, params).unwrap(); loop { let authenticator = self.hub.auth.borrow_mut(); let token = match authenticator.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await { Ok(token) => token.clone(), Err(err) => { match dlg.token(&err) { Some(token) => token, None => { dlg.finished(false); return Err(client::Error::MissingToken(err)) } } } }; let mut req_result = { let mut client = &mut *self.hub.client.borrow_mut(); dlg.pre_request(); let mut req_builder = hyper::Request::builder().method(hyper::Method::GET).uri(url.clone().into_string()) .header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str())); let request = req_builder .body(hyper::body::Body::empty()); client.borrow_mut().request(request.unwrap()).await }; match req_result { Err(err) => { if let client::Retry::After(d) = dlg.http_error(&err) { sleep(d); continue; } dlg.finished(false); return Err(client::Error::HttpError(err)) } Ok(mut res) => { let (res_parts, res_body) = res.into_parts(); let res_body_string: String = String::from_utf8( hyper::body::to_bytes(res_body) .await .unwrap() .into_iter() .collect(), ) .unwrap(); let reconstructed_result = hyper::Response::from_parts(res_parts, res_body_string.clone().into()); if !reconstructed_result.status().is_success() { let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok(); let server_error = json::from_str::<client::ServerError>(&res_body_string) .or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error)) .ok(); if let client::Retry::After(d) = dlg.http_failure(&reconstructed_result, json_server_error, server_error) { sleep(d); continue; } dlg.finished(false); return match json::from_str::<client::ErrorResponse>(&res_body_string){ Err(_) => Err(client::Error::Failure(reconstructed_result)), Ok(serr) => Err(client::Error::BadRequest(serr)) } } let result_value = { match json::from_str(&res_body_string) { Ok(decoded) => (reconstructed_result, decoded), Err(err) => { dlg.response_json_decode_error(&res_body_string, &err); return Err(client::Error::JsonDecodeError(res_body_string, err)); } } }; dlg.finished(true); return Ok(result_value) } } } } /// Required. The location name. /// For example: `projects/PROJECT_ID/locations/LOCATION_ID` /// /// Sets the *parent* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn parent(mut self, new_value: &str) -> ProjectLocationQueueListCall<'a, C> { self._parent = new_value.to_string(); self } /// A token identifying the page of results to return. /// /// To request the first page results, page_token must be empty. To /// request the next page of results, page_token must be the value of /// next_page_token returned /// from the previous call to ListQueues /// method. It is an error to switch the value of the /// filter while iterating through pages. /// /// Sets the *page token* query property to the given value. pub fn page_token(mut self, new_value: &str) -> ProjectLocationQueueListCall<'a, C> { self._page_token = Some(new_value.to_string()); self } /// Requested page size. /// /// The maximum page size is 9800. If unspecified, the page size will /// be the maximum. Fewer queues than requested might be returned, /// even if more queues exist; use the /// next_page_token in the /// response to determine if more queues exist. /// /// Sets the *page size* query property to the given value. pub fn page_size(mut self, new_value: i32) -> ProjectLocationQueueListCall<'a, C> { self._page_size = Some(new_value); self } /// `filter` can be used to specify a subset of queues. Any Queue /// field can be used as a filter and several operators as supported. /// For example: `<=, <, >=, >, !=, =, :`. The filter syntax is the same as /// described in /// [Stackdriver's Advanced Logs /// Filters](https://cloud.google.com/logging/docs/view/advanced_filters). /// /// Sample filter "app_engine_http_target: *". /// /// Note that using filters might cause fewer queues than the /// requested_page size to be returned. /// /// Sets the *filter* query property to the given value. pub fn filter(mut self, new_value: &str) -> ProjectLocationQueueListCall<'a, C> { self._filter = Some(new_value.to_string()); self } /// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong /// while executing the actual API request. /// /// It should be used to handle progress information, and to implement a certain level of resilience. /// /// Sets the *delegate* property to the given value. pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ProjectLocationQueueListCall<'a, C> { self._delegate = Some(new_value); self } /// Set any additional parameter of the query string used in the request. /// It should be used to set parameters which are not yet available through their own /// setters. /// /// Please note that this method must not be used to set any of the known parameters /// which have their own setter method. If done anyway, the request will fail. /// /// # Additional Parameters /// /// * *$.xgafv* (query-string) - V1 error format. /// * *access_token* (query-string) - OAuth access token. /// * *alt* (query-string) - Data format for response. /// * *callback* (query-string) - JSONP /// * *fields* (query-string) - Selector specifying which fields to include in a partial response. /// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. /// * *oauth_token* (query-string) - OAuth 2.0 token for the current user. /// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks. /// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. /// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart"). /// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart"). pub fn param<T>(mut self, name: T, value: T) -> ProjectLocationQueueListCall<'a, C> where T: AsRef<str> { self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string()); self } /// Identifies the authorization scope for the method you are building. /// /// Use this method to actively specify which scope should be used, instead the default `Scope` variant /// `Scope::CloudPlatform`. /// /// The `scope` will be added to a set of scopes. This is important as one can maintain access /// tokens for more than one scope. /// If `None` is specified, then all scopes will be removed and no default scope will be used either. /// In that case, you have to specify your API-key using the `key` parameter (see the `param()` /// function for details). /// /// Usually there is more than one suitable scope to authorize an operation, some of which may /// encompass more rights than others. For example, for listing resources, a *read-only* scope will be /// sufficient, a read-write scope will do as well. pub fn add_scope<T, S>(mut self, scope: T) -> ProjectLocationQueueListCall<'a, C> where T: Into<Option<S>>, S: AsRef<str> { match scope.into() { Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()), None => None, }; self } } /// Updates a queue. /// /// This method creates the queue if it does not exist and updates /// the queue if it does exist. /// /// Queues created with this method allow tasks to live for a maximum of 31 /// days. After a task is 31 days old, the task will be deleted regardless of whether /// it was dispatched or not. /// /// WARNING: Using this method may have unintended side effects if you are /// using an App Engine `queue.yaml` or `queue.xml` file to manage your queues. /// Read /// [Overview of Queue Management and /// queue.yaml](https://cloud.google.com/tasks/docs/queue-yaml) before using /// this method. /// /// A builder for the *locations.queues.patch* method supported by a *project* resource. /// It is not used directly, but through a `ProjectMethods` instance. /// /// # Example /// /// Instantiate a resource method builder /// /// ```test_harness,no_run /// # extern crate hyper; /// # extern crate hyper_rustls; /// # extern crate yup_oauth2 as oauth2; /// # extern crate google_cloudtasks2_beta2 as cloudtasks2_beta2; /// use cloudtasks2_beta2::api::Queue; /// # #[test] fn egal() { /// # use std::default::Default; /// # use oauth2; /// # use cloudtasks2_beta2::CloudTasks; /// /// # let secret: ApplicationSecret = Default::default(); /// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder( /// # secret, /// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect, /// # ).build().await.unwrap(); /// # let mut hub = CloudTasks::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth); /// // As the method needs a request, you would usually fill it with the desired information /// // into the respective structure. Some of the parts shown here might not be applicable ! /// // Values shown here are possibly random and not representative ! /// let mut req = Queue::default(); /// /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.projects().locations_queues_patch(req, "name") /// .update_mask("ipsum") /// .doit(); /// # } /// ``` pub struct ProjectLocationQueuePatchCall<'a, C> where C: 'a { hub: &'a CloudTasks<C>, _request: Queue, _name: String, _update_mask: Option<String>, _delegate: Option<&'a mut dyn client::Delegate>, _additional_params: HashMap<String, String>, _scopes: BTreeMap<String, ()> } impl<'a, C> client::CallBuilder for ProjectLocationQueuePatchCall<'a, C> {} impl<'a, C> ProjectLocationQueuePatchCall<'a, C> where C: BorrowMut<hyper::Client<hyper_rustls::HttpsConnector<hyper::client::connect::HttpConnector>, hyper::body::Body>> { /// Perform the operation you have build so far. pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, Queue)> { use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET}; use std::io::{Read, Seek}; use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION}; use client::ToParts; let mut dd = client::DefaultDelegate; let mut dlg: &mut dyn client::Delegate = match self._delegate { Some(d) => d, None => &mut dd }; dlg.begin(client::MethodInfo { id: "cloudtasks.projects.locations.queues.patch", http_method: hyper::Method::PATCH }); let mut params: Vec<(&str, String)> = Vec::with_capacity(5 + self._additional_params.len()); params.push(("name", self._name.to_string())); if let Some(value) = self._update_mask { params.push(("updateMask", value.to_string())); } for &field in ["alt", "name", "updateMask"].iter() { if self._additional_params.contains_key(field) { dlg.finished(false); return Err(client::Error::FieldClash(field)); } } for (name, value) in self._additional_params.iter() { params.push((&name, value.clone())); } params.push(("alt", "json".to_string())); let mut url = self.hub._base_url.clone() + "v2beta2/{+name}"; if self._scopes.len() == 0 { self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ()); } for &(find_this, param_name) in [("{+name}", "name")].iter() { let mut replace_with = String::new(); for &(name, ref value) in params.iter() { if name == param_name { replace_with = value.to_string(); break; } } if find_this.as_bytes()[1] == '+' as u8 { replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string(); } url = url.replace(find_this, &replace_with); } { let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1); for param_name in ["name"].iter() { if let Some(index) = params.iter().position(|t| &t.0 == param_name) { indices_for_removal.push(index); } } for &index in indices_for_removal.iter() { params.remove(index); } } let url = url::Url::parse_with_params(&url, params).unwrap(); let mut json_mime_type: mime::Mime = "application/json".parse().unwrap(); let mut request_value_reader = { let mut value = json::value::to_value(&self._request).expect("serde to work"); client::remove_json_null_values(&mut value); let mut dst = io::Cursor::new(Vec::with_capacity(128)); json::to_writer(&mut dst, &value).unwrap(); dst }; let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap(); request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); loop { let authenticator = self.hub.auth.borrow_mut(); let token = match authenticator.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await { Ok(token) => token.clone(), Err(err) => { match dlg.token(&err) { Some(token) => token, None => { dlg.finished(false); return Err(client::Error::MissingToken(err)) } } } }; request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); let mut req_result = { let mut client = &mut *self.hub.client.borrow_mut(); dlg.pre_request(); let mut req_builder = hyper::Request::builder().method(hyper::Method::PATCH).uri(url.clone().into_string()) .header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str())); let request = req_builder .header(CONTENT_TYPE, format!("{}", json_mime_type)) .header(CONTENT_LENGTH, request_size as u64) .body(hyper::body::Body::from(request_value_reader.get_ref().clone())); client.borrow_mut().request(request.unwrap()).await }; match req_result { Err(err) => { if let client::Retry::After(d) = dlg.http_error(&err) { sleep(d); continue; } dlg.finished(false); return Err(client::Error::HttpError(err)) } Ok(mut res) => { let (res_parts, res_body) = res.into_parts(); let res_body_string: String = String::from_utf8( hyper::body::to_bytes(res_body) .await .unwrap() .into_iter() .collect(), ) .unwrap(); let reconstructed_result = hyper::Response::from_parts(res_parts, res_body_string.clone().into()); if !reconstructed_result.status().is_success() { let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok(); let server_error = json::from_str::<client::ServerError>(&res_body_string) .or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error)) .ok(); if let client::Retry::After(d) = dlg.http_failure(&reconstructed_result, json_server_error, server_error) { sleep(d); continue; } dlg.finished(false); return match json::from_str::<client::ErrorResponse>(&res_body_string){ Err(_) => Err(client::Error::Failure(reconstructed_result)), Ok(serr) => Err(client::Error::BadRequest(serr)) } } let result_value = { match json::from_str(&res_body_string) { Ok(decoded) => (reconstructed_result, decoded), Err(err) => { dlg.response_json_decode_error(&res_body_string, &err); return Err(client::Error::JsonDecodeError(res_body_string, err)); } } }; dlg.finished(true); return Ok(result_value) } } } } /// /// Sets the *request* property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn request(mut self, new_value: Queue) -> ProjectLocationQueuePatchCall<'a, C> { self._request = new_value; self } /// Caller-specified and required in CreateQueue, /// after which it becomes output only. /// /// The queue name. /// /// The queue name must have the following format: /// `projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID` /// /// * `PROJECT_ID` can contain letters ([A-Za-z]), numbers ([0-9]), /// hyphens (-), colons (:), or periods (.). /// For more information, see /// [Identifying /// projects](https://cloud.google.com/resource-manager/docs/creating-managing-projects#identifying_projects) /// * `LOCATION_ID` is the canonical ID for the queue's location. /// The list of available locations can be obtained by calling /// ListLocations. /// For more information, see https://cloud.google.com/about/locations/. /// * `QUEUE_ID` can contain letters ([A-Za-z]), numbers ([0-9]), or /// hyphens (-). The maximum length is 100 characters. /// /// Sets the *name* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn name(mut self, new_value: &str) -> ProjectLocationQueuePatchCall<'a, C> { self._name = new_value.to_string(); self } /// A mask used to specify which fields of the queue are being updated. /// /// If empty, then all fields will be updated. /// /// Sets the *update mask* query property to the given value. pub fn update_mask(mut self, new_value: &str) -> ProjectLocationQueuePatchCall<'a, C> { self._update_mask = Some(new_value.to_string()); self } /// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong /// while executing the actual API request. /// /// It should be used to handle progress information, and to implement a certain level of resilience. /// /// Sets the *delegate* property to the given value. pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ProjectLocationQueuePatchCall<'a, C> { self._delegate = Some(new_value); self } /// Set any additional parameter of the query string used in the request. /// It should be used to set parameters which are not yet available through their own /// setters. /// /// Please note that this method must not be used to set any of the known parameters /// which have their own setter method. If done anyway, the request will fail. /// /// # Additional Parameters /// /// * *$.xgafv* (query-string) - V1 error format. /// * *access_token* (query-string) - OAuth access token. /// * *alt* (query-string) - Data format for response. /// * *callback* (query-string) - JSONP /// * *fields* (query-string) - Selector specifying which fields to include in a partial response. /// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. /// * *oauth_token* (query-string) - OAuth 2.0 token for the current user. /// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks. /// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. /// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart"). /// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart"). pub fn param<T>(mut self, name: T, value: T) -> ProjectLocationQueuePatchCall<'a, C> where T: AsRef<str> { self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string()); self } /// Identifies the authorization scope for the method you are building. /// /// Use this method to actively specify which scope should be used, instead the default `Scope` variant /// `Scope::CloudPlatform`. /// /// The `scope` will be added to a set of scopes. This is important as one can maintain access /// tokens for more than one scope. /// If `None` is specified, then all scopes will be removed and no default scope will be used either. /// In that case, you have to specify your API-key using the `key` parameter (see the `param()` /// function for details). /// /// Usually there is more than one suitable scope to authorize an operation, some of which may /// encompass more rights than others. For example, for listing resources, a *read-only* scope will be /// sufficient, a read-write scope will do as well. pub fn add_scope<T, S>(mut self, scope: T) -> ProjectLocationQueuePatchCall<'a, C> where T: Into<Option<S>>, S: AsRef<str> { match scope.into() { Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()), None => None, }; self } } /// Pauses the queue. /// /// If a queue is paused then the system will stop dispatching tasks /// until the queue is resumed via /// ResumeQueue. Tasks can still be added /// when the queue is paused. A queue is paused if its /// state is PAUSED. /// /// A builder for the *locations.queues.pause* method supported by a *project* resource. /// It is not used directly, but through a `ProjectMethods` instance. /// /// # Example /// /// Instantiate a resource method builder /// /// ```test_harness,no_run /// # extern crate hyper; /// # extern crate hyper_rustls; /// # extern crate yup_oauth2 as oauth2; /// # extern crate google_cloudtasks2_beta2 as cloudtasks2_beta2; /// use cloudtasks2_beta2::api::PauseQueueRequest; /// # #[test] fn egal() { /// # use std::default::Default; /// # use oauth2; /// # use cloudtasks2_beta2::CloudTasks; /// /// # let secret: ApplicationSecret = Default::default(); /// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder( /// # secret, /// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect, /// # ).build().await.unwrap(); /// # let mut hub = CloudTasks::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth); /// // As the method needs a request, you would usually fill it with the desired information /// // into the respective structure. Some of the parts shown here might not be applicable ! /// // Values shown here are possibly random and not representative ! /// let mut req = PauseQueueRequest::default(); /// /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.projects().locations_queues_pause(req, "name") /// .doit(); /// # } /// ``` pub struct ProjectLocationQueuePauseCall<'a, C> where C: 'a { hub: &'a CloudTasks<C>, _request: PauseQueueRequest, _name: String, _delegate: Option<&'a mut dyn client::Delegate>, _additional_params: HashMap<String, String>, _scopes: BTreeMap<String, ()> } impl<'a, C> client::CallBuilder for ProjectLocationQueuePauseCall<'a, C> {} impl<'a, C> ProjectLocationQueuePauseCall<'a, C> where C: BorrowMut<hyper::Client<hyper_rustls::HttpsConnector<hyper::client::connect::HttpConnector>, hyper::body::Body>> { /// Perform the operation you have build so far. pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, Queue)> { use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET}; use std::io::{Read, Seek}; use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION}; use client::ToParts; let mut dd = client::DefaultDelegate; let mut dlg: &mut dyn client::Delegate = match self._delegate { Some(d) => d, None => &mut dd }; dlg.begin(client::MethodInfo { id: "cloudtasks.projects.locations.queues.pause", http_method: hyper::Method::POST }); let mut params: Vec<(&str, String)> = Vec::with_capacity(4 + self._additional_params.len()); params.push(("name", self._name.to_string())); for &field in ["alt", "name"].iter() { if self._additional_params.contains_key(field) { dlg.finished(false); return Err(client::Error::FieldClash(field)); } } for (name, value) in self._additional_params.iter() { params.push((&name, value.clone())); } params.push(("alt", "json".to_string())); let mut url = self.hub._base_url.clone() + "v2beta2/{+name}:pause"; if self._scopes.len() == 0 { self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ()); } for &(find_this, param_name) in [("{+name}", "name")].iter() { let mut replace_with = String::new(); for &(name, ref value) in params.iter() { if name == param_name { replace_with = value.to_string(); break; } } if find_this.as_bytes()[1] == '+' as u8 { replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string(); } url = url.replace(find_this, &replace_with); } { let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1); for param_name in ["name"].iter() { if let Some(index) = params.iter().position(|t| &t.0 == param_name) { indices_for_removal.push(index); } } for &index in indices_for_removal.iter() { params.remove(index); } } let url = url::Url::parse_with_params(&url, params).unwrap(); let mut json_mime_type: mime::Mime = "application/json".parse().unwrap(); let mut request_value_reader = { let mut value = json::value::to_value(&self._request).expect("serde to work"); client::remove_json_null_values(&mut value); let mut dst = io::Cursor::new(Vec::with_capacity(128)); json::to_writer(&mut dst, &value).unwrap(); dst }; let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap(); request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); loop { let authenticator = self.hub.auth.borrow_mut(); let token = match authenticator.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await { Ok(token) => token.clone(), Err(err) => { match dlg.token(&err) { Some(token) => token, None => { dlg.finished(false); return Err(client::Error::MissingToken(err)) } } } }; request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); let mut req_result = { let mut client = &mut *self.hub.client.borrow_mut(); dlg.pre_request(); let mut req_builder = hyper::Request::builder().method(hyper::Method::POST).uri(url.clone().into_string()) .header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str())); let request = req_builder .header(CONTENT_TYPE, format!("{}", json_mime_type)) .header(CONTENT_LENGTH, request_size as u64) .body(hyper::body::Body::from(request_value_reader.get_ref().clone())); client.borrow_mut().request(request.unwrap()).await }; match req_result { Err(err) => { if let client::Retry::After(d) = dlg.http_error(&err) { sleep(d); continue; } dlg.finished(false); return Err(client::Error::HttpError(err)) } Ok(mut res) => { let (res_parts, res_body) = res.into_parts(); let res_body_string: String = String::from_utf8( hyper::body::to_bytes(res_body) .await .unwrap() .into_iter() .collect(), ) .unwrap(); let reconstructed_result = hyper::Response::from_parts(res_parts, res_body_string.clone().into()); if !reconstructed_result.status().is_success() { let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok(); let server_error = json::from_str::<client::ServerError>(&res_body_string) .or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error)) .ok(); if let client::Retry::After(d) = dlg.http_failure(&reconstructed_result, json_server_error, server_error) { sleep(d); continue; } dlg.finished(false); return match json::from_str::<client::ErrorResponse>(&res_body_string){ Err(_) => Err(client::Error::Failure(reconstructed_result)), Ok(serr) => Err(client::Error::BadRequest(serr)) } } let result_value = { match json::from_str(&res_body_string) { Ok(decoded) => (reconstructed_result, decoded), Err(err) => { dlg.response_json_decode_error(&res_body_string, &err); return Err(client::Error::JsonDecodeError(res_body_string, err)); } } }; dlg.finished(true); return Ok(result_value) } } } } /// /// Sets the *request* property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn request(mut self, new_value: PauseQueueRequest) -> ProjectLocationQueuePauseCall<'a, C> { self._request = new_value; self } /// Required. The queue name. For example: /// `projects/PROJECT_ID/location/LOCATION_ID/queues/QUEUE_ID` /// /// Sets the *name* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn name(mut self, new_value: &str) -> ProjectLocationQueuePauseCall<'a, C> { self._name = new_value.to_string(); self } /// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong /// while executing the actual API request. /// /// It should be used to handle progress information, and to implement a certain level of resilience. /// /// Sets the *delegate* property to the given value. pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ProjectLocationQueuePauseCall<'a, C> { self._delegate = Some(new_value); self } /// Set any additional parameter of the query string used in the request. /// It should be used to set parameters which are not yet available through their own /// setters. /// /// Please note that this method must not be used to set any of the known parameters /// which have their own setter method. If done anyway, the request will fail. /// /// # Additional Parameters /// /// * *$.xgafv* (query-string) - V1 error format. /// * *access_token* (query-string) - OAuth access token. /// * *alt* (query-string) - Data format for response. /// * *callback* (query-string) - JSONP /// * *fields* (query-string) - Selector specifying which fields to include in a partial response. /// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. /// * *oauth_token* (query-string) - OAuth 2.0 token for the current user. /// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks. /// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. /// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart"). /// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart"). pub fn param<T>(mut self, name: T, value: T) -> ProjectLocationQueuePauseCall<'a, C> where T: AsRef<str> { self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string()); self } /// Identifies the authorization scope for the method you are building. /// /// Use this method to actively specify which scope should be used, instead the default `Scope` variant /// `Scope::CloudPlatform`. /// /// The `scope` will be added to a set of scopes. This is important as one can maintain access /// tokens for more than one scope. /// If `None` is specified, then all scopes will be removed and no default scope will be used either. /// In that case, you have to specify your API-key using the `key` parameter (see the `param()` /// function for details). /// /// Usually there is more than one suitable scope to authorize an operation, some of which may /// encompass more rights than others. For example, for listing resources, a *read-only* scope will be /// sufficient, a read-write scope will do as well. pub fn add_scope<T, S>(mut self, scope: T) -> ProjectLocationQueuePauseCall<'a, C> where T: Into<Option<S>>, S: AsRef<str> { match scope.into() { Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()), None => None, }; self } } /// Purges a queue by deleting all of its tasks. /// /// All tasks created before this method is called are permanently deleted. /// /// Purge operations can take up to one minute to take effect. Tasks /// might be dispatched before the purge takes effect. A purge is irreversible. /// /// A builder for the *locations.queues.purge* method supported by a *project* resource. /// It is not used directly, but through a `ProjectMethods` instance. /// /// # Example /// /// Instantiate a resource method builder /// /// ```test_harness,no_run /// # extern crate hyper; /// # extern crate hyper_rustls; /// # extern crate yup_oauth2 as oauth2; /// # extern crate google_cloudtasks2_beta2 as cloudtasks2_beta2; /// use cloudtasks2_beta2::api::PurgeQueueRequest; /// # #[test] fn egal() { /// # use std::default::Default; /// # use oauth2; /// # use cloudtasks2_beta2::CloudTasks; /// /// # let secret: ApplicationSecret = Default::default(); /// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder( /// # secret, /// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect, /// # ).build().await.unwrap(); /// # let mut hub = CloudTasks::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth); /// // As the method needs a request, you would usually fill it with the desired information /// // into the respective structure. Some of the parts shown here might not be applicable ! /// // Values shown here are possibly random and not representative ! /// let mut req = PurgeQueueRequest::default(); /// /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.projects().locations_queues_purge(req, "name") /// .doit(); /// # } /// ``` pub struct ProjectLocationQueuePurgeCall<'a, C> where C: 'a { hub: &'a CloudTasks<C>, _request: PurgeQueueRequest, _name: String, _delegate: Option<&'a mut dyn client::Delegate>, _additional_params: HashMap<String, String>, _scopes: BTreeMap<String, ()> } impl<'a, C> client::CallBuilder for ProjectLocationQueuePurgeCall<'a, C> {} impl<'a, C> ProjectLocationQueuePurgeCall<'a, C> where C: BorrowMut<hyper::Client<hyper_rustls::HttpsConnector<hyper::client::connect::HttpConnector>, hyper::body::Body>> { /// Perform the operation you have build so far. pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, Queue)> { use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET}; use std::io::{Read, Seek}; use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION}; use client::ToParts; let mut dd = client::DefaultDelegate; let mut dlg: &mut dyn client::Delegate = match self._delegate { Some(d) => d, None => &mut dd }; dlg.begin(client::MethodInfo { id: "cloudtasks.projects.locations.queues.purge", http_method: hyper::Method::POST }); let mut params: Vec<(&str, String)> = Vec::with_capacity(4 + self._additional_params.len()); params.push(("name", self._name.to_string())); for &field in ["alt", "name"].iter() { if self._additional_params.contains_key(field) { dlg.finished(false); return Err(client::Error::FieldClash(field)); } } for (name, value) in self._additional_params.iter() { params.push((&name, value.clone())); } params.push(("alt", "json".to_string())); let mut url = self.hub._base_url.clone() + "v2beta2/{+name}:purge"; if self._scopes.len() == 0 { self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ()); } for &(find_this, param_name) in [("{+name}", "name")].iter() { let mut replace_with = String::new(); for &(name, ref value) in params.iter() { if name == param_name { replace_with = value.to_string(); break; } } if find_this.as_bytes()[1] == '+' as u8 { replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string(); } url = url.replace(find_this, &replace_with); } { let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1); for param_name in ["name"].iter() { if let Some(index) = params.iter().position(|t| &t.0 == param_name) { indices_for_removal.push(index); } } for &index in indices_for_removal.iter() { params.remove(index); } } let url = url::Url::parse_with_params(&url, params).unwrap(); let mut json_mime_type: mime::Mime = "application/json".parse().unwrap(); let mut request_value_reader = { let mut value = json::value::to_value(&self._request).expect("serde to work"); client::remove_json_null_values(&mut value); let mut dst = io::Cursor::new(Vec::with_capacity(128)); json::to_writer(&mut dst, &value).unwrap(); dst }; let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap(); request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); loop { let authenticator = self.hub.auth.borrow_mut(); let token = match authenticator.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await { Ok(token) => token.clone(), Err(err) => { match dlg.token(&err) { Some(token) => token, None => { dlg.finished(false); return Err(client::Error::MissingToken(err)) } } } }; request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); let mut req_result = { let mut client = &mut *self.hub.client.borrow_mut(); dlg.pre_request(); let mut req_builder = hyper::Request::builder().method(hyper::Method::POST).uri(url.clone().into_string()) .header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str())); let request = req_builder .header(CONTENT_TYPE, format!("{}", json_mime_type)) .header(CONTENT_LENGTH, request_size as u64) .body(hyper::body::Body::from(request_value_reader.get_ref().clone())); client.borrow_mut().request(request.unwrap()).await }; match req_result { Err(err) => { if let client::Retry::After(d) = dlg.http_error(&err) { sleep(d); continue; } dlg.finished(false); return Err(client::Error::HttpError(err)) } Ok(mut res) => { let (res_parts, res_body) = res.into_parts(); let res_body_string: String = String::from_utf8( hyper::body::to_bytes(res_body) .await .unwrap() .into_iter() .collect(), ) .unwrap(); let reconstructed_result = hyper::Response::from_parts(res_parts, res_body_string.clone().into()); if !reconstructed_result.status().is_success() { let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok(); let server_error = json::from_str::<client::ServerError>(&res_body_string) .or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error)) .ok(); if let client::Retry::After(d) = dlg.http_failure(&reconstructed_result, json_server_error, server_error) { sleep(d); continue; } dlg.finished(false); return match json::from_str::<client::ErrorResponse>(&res_body_string){ Err(_) => Err(client::Error::Failure(reconstructed_result)), Ok(serr) => Err(client::Error::BadRequest(serr)) } } let result_value = { match json::from_str(&res_body_string) { Ok(decoded) => (reconstructed_result, decoded), Err(err) => { dlg.response_json_decode_error(&res_body_string, &err); return Err(client::Error::JsonDecodeError(res_body_string, err)); } } }; dlg.finished(true); return Ok(result_value) } } } } /// /// Sets the *request* property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn request(mut self, new_value: PurgeQueueRequest) -> ProjectLocationQueuePurgeCall<'a, C> { self._request = new_value; self } /// Required. The queue name. For example: /// `projects/PROJECT_ID/location/LOCATION_ID/queues/QUEUE_ID` /// /// Sets the *name* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn name(mut self, new_value: &str) -> ProjectLocationQueuePurgeCall<'a, C> { self._name = new_value.to_string(); self } /// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong /// while executing the actual API request. /// /// It should be used to handle progress information, and to implement a certain level of resilience. /// /// Sets the *delegate* property to the given value. pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ProjectLocationQueuePurgeCall<'a, C> { self._delegate = Some(new_value); self } /// Set any additional parameter of the query string used in the request. /// It should be used to set parameters which are not yet available through their own /// setters. /// /// Please note that this method must not be used to set any of the known parameters /// which have their own setter method. If done anyway, the request will fail. /// /// # Additional Parameters /// /// * *$.xgafv* (query-string) - V1 error format. /// * *access_token* (query-string) - OAuth access token. /// * *alt* (query-string) - Data format for response. /// * *callback* (query-string) - JSONP /// * *fields* (query-string) - Selector specifying which fields to include in a partial response. /// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. /// * *oauth_token* (query-string) - OAuth 2.0 token for the current user. /// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks. /// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. /// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart"). /// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart"). pub fn param<T>(mut self, name: T, value: T) -> ProjectLocationQueuePurgeCall<'a, C> where T: AsRef<str> { self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string()); self } /// Identifies the authorization scope for the method you are building. /// /// Use this method to actively specify which scope should be used, instead the default `Scope` variant /// `Scope::CloudPlatform`. /// /// The `scope` will be added to a set of scopes. This is important as one can maintain access /// tokens for more than one scope. /// If `None` is specified, then all scopes will be removed and no default scope will be used either. /// In that case, you have to specify your API-key using the `key` parameter (see the `param()` /// function for details). /// /// Usually there is more than one suitable scope to authorize an operation, some of which may /// encompass more rights than others. For example, for listing resources, a *read-only* scope will be /// sufficient, a read-write scope will do as well. pub fn add_scope<T, S>(mut self, scope: T) -> ProjectLocationQueuePurgeCall<'a, C> where T: Into<Option<S>>, S: AsRef<str> { match scope.into() { Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()), None => None, }; self } } /// Resume a queue. /// /// This method resumes a queue after it has been /// PAUSED or /// DISABLED. The state of a queue is stored /// in the queue's state; after calling this method it /// will be set to RUNNING. /// /// WARNING: Resuming many high-QPS queues at the same time can /// lead to target overloading. If you are resuming high-QPS /// queues, follow the 500/50/5 pattern described in /// [Managing Cloud Tasks Scaling /// Risks](https://cloud.google.com/tasks/docs/manage-cloud-task-scaling). /// /// A builder for the *locations.queues.resume* method supported by a *project* resource. /// It is not used directly, but through a `ProjectMethods` instance. /// /// # Example /// /// Instantiate a resource method builder /// /// ```test_harness,no_run /// # extern crate hyper; /// # extern crate hyper_rustls; /// # extern crate yup_oauth2 as oauth2; /// # extern crate google_cloudtasks2_beta2 as cloudtasks2_beta2; /// use cloudtasks2_beta2::api::ResumeQueueRequest; /// # #[test] fn egal() { /// # use std::default::Default; /// # use oauth2; /// # use cloudtasks2_beta2::CloudTasks; /// /// # let secret: ApplicationSecret = Default::default(); /// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder( /// # secret, /// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect, /// # ).build().await.unwrap(); /// # let mut hub = CloudTasks::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth); /// // As the method needs a request, you would usually fill it with the desired information /// // into the respective structure. Some of the parts shown here might not be applicable ! /// // Values shown here are possibly random and not representative ! /// let mut req = ResumeQueueRequest::default(); /// /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.projects().locations_queues_resume(req, "name") /// .doit(); /// # } /// ``` pub struct ProjectLocationQueueResumeCall<'a, C> where C: 'a { hub: &'a CloudTasks<C>, _request: ResumeQueueRequest, _name: String, _delegate: Option<&'a mut dyn client::Delegate>, _additional_params: HashMap<String, String>, _scopes: BTreeMap<String, ()> } impl<'a, C> client::CallBuilder for ProjectLocationQueueResumeCall<'a, C> {} impl<'a, C> ProjectLocationQueueResumeCall<'a, C> where C: BorrowMut<hyper::Client<hyper_rustls::HttpsConnector<hyper::client::connect::HttpConnector>, hyper::body::Body>> { /// Perform the operation you have build so far. pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, Queue)> { use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET}; use std::io::{Read, Seek}; use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION}; use client::ToParts; let mut dd = client::DefaultDelegate; let mut dlg: &mut dyn client::Delegate = match self._delegate { Some(d) => d, None => &mut dd }; dlg.begin(client::MethodInfo { id: "cloudtasks.projects.locations.queues.resume", http_method: hyper::Method::POST }); let mut params: Vec<(&str, String)> = Vec::with_capacity(4 + self._additional_params.len()); params.push(("name", self._name.to_string())); for &field in ["alt", "name"].iter() { if self._additional_params.contains_key(field) { dlg.finished(false); return Err(client::Error::FieldClash(field)); } } for (name, value) in self._additional_params.iter() { params.push((&name, value.clone())); } params.push(("alt", "json".to_string())); let mut url = self.hub._base_url.clone() + "v2beta2/{+name}:resume"; if self._scopes.len() == 0 { self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ()); } for &(find_this, param_name) in [("{+name}", "name")].iter() { let mut replace_with = String::new(); for &(name, ref value) in params.iter() { if name == param_name { replace_with = value.to_string(); break; } } if find_this.as_bytes()[1] == '+' as u8 { replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string(); } url = url.replace(find_this, &replace_with); } { let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1); for param_name in ["name"].iter() { if let Some(index) = params.iter().position(|t| &t.0 == param_name) { indices_for_removal.push(index); } } for &index in indices_for_removal.iter() { params.remove(index); } } let url = url::Url::parse_with_params(&url, params).unwrap(); let mut json_mime_type: mime::Mime = "application/json".parse().unwrap(); let mut request_value_reader = { let mut value = json::value::to_value(&self._request).expect("serde to work"); client::remove_json_null_values(&mut value); let mut dst = io::Cursor::new(Vec::with_capacity(128)); json::to_writer(&mut dst, &value).unwrap(); dst }; let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap(); request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); loop { let authenticator = self.hub.auth.borrow_mut(); let token = match authenticator.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await { Ok(token) => token.clone(), Err(err) => { match dlg.token(&err) { Some(token) => token, None => { dlg.finished(false); return Err(client::Error::MissingToken(err)) } } } }; request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); let mut req_result = { let mut client = &mut *self.hub.client.borrow_mut(); dlg.pre_request(); let mut req_builder = hyper::Request::builder().method(hyper::Method::POST).uri(url.clone().into_string()) .header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str())); let request = req_builder .header(CONTENT_TYPE, format!("{}", json_mime_type)) .header(CONTENT_LENGTH, request_size as u64) .body(hyper::body::Body::from(request_value_reader.get_ref().clone())); client.borrow_mut().request(request.unwrap()).await }; match req_result { Err(err) => { if let client::Retry::After(d) = dlg.http_error(&err) { sleep(d); continue; } dlg.finished(false); return Err(client::Error::HttpError(err)) } Ok(mut res) => { let (res_parts, res_body) = res.into_parts(); let res_body_string: String = String::from_utf8( hyper::body::to_bytes(res_body) .await .unwrap() .into_iter() .collect(), ) .unwrap(); let reconstructed_result = hyper::Response::from_parts(res_parts, res_body_string.clone().into()); if !reconstructed_result.status().is_success() { let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok(); let server_error = json::from_str::<client::ServerError>(&res_body_string) .or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error)) .ok(); if let client::Retry::After(d) = dlg.http_failure(&reconstructed_result, json_server_error, server_error) { sleep(d); continue; } dlg.finished(false); return match json::from_str::<client::ErrorResponse>(&res_body_string){ Err(_) => Err(client::Error::Failure(reconstructed_result)), Ok(serr) => Err(client::Error::BadRequest(serr)) } } let result_value = { match json::from_str(&res_body_string) { Ok(decoded) => (reconstructed_result, decoded), Err(err) => { dlg.response_json_decode_error(&res_body_string, &err); return Err(client::Error::JsonDecodeError(res_body_string, err)); } } }; dlg.finished(true); return Ok(result_value) } } } } /// /// Sets the *request* property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn request(mut self, new_value: ResumeQueueRequest) -> ProjectLocationQueueResumeCall<'a, C> { self._request = new_value; self } /// Required. The queue name. For example: /// `projects/PROJECT_ID/location/LOCATION_ID/queues/QUEUE_ID` /// /// Sets the *name* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn name(mut self, new_value: &str) -> ProjectLocationQueueResumeCall<'a, C> { self._name = new_value.to_string(); self } /// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong /// while executing the actual API request. /// /// It should be used to handle progress information, and to implement a certain level of resilience. /// /// Sets the *delegate* property to the given value. pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ProjectLocationQueueResumeCall<'a, C> { self._delegate = Some(new_value); self } /// Set any additional parameter of the query string used in the request. /// It should be used to set parameters which are not yet available through their own /// setters. /// /// Please note that this method must not be used to set any of the known parameters /// which have their own setter method. If done anyway, the request will fail. /// /// # Additional Parameters /// /// * *$.xgafv* (query-string) - V1 error format. /// * *access_token* (query-string) - OAuth access token. /// * *alt* (query-string) - Data format for response. /// * *callback* (query-string) - JSONP /// * *fields* (query-string) - Selector specifying which fields to include in a partial response. /// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. /// * *oauth_token* (query-string) - OAuth 2.0 token for the current user. /// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks. /// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. /// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart"). /// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart"). pub fn param<T>(mut self, name: T, value: T) -> ProjectLocationQueueResumeCall<'a, C> where T: AsRef<str> { self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string()); self } /// Identifies the authorization scope for the method you are building. /// /// Use this method to actively specify which scope should be used, instead the default `Scope` variant /// `Scope::CloudPlatform`. /// /// The `scope` will be added to a set of scopes. This is important as one can maintain access /// tokens for more than one scope. /// If `None` is specified, then all scopes will be removed and no default scope will be used either. /// In that case, you have to specify your API-key using the `key` parameter (see the `param()` /// function for details). /// /// Usually there is more than one suitable scope to authorize an operation, some of which may /// encompass more rights than others. For example, for listing resources, a *read-only* scope will be /// sufficient, a read-write scope will do as well. pub fn add_scope<T, S>(mut self, scope: T) -> ProjectLocationQueueResumeCall<'a, C> where T: Into<Option<S>>, S: AsRef<str> { match scope.into() { Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()), None => None, }; self } } /// Sets the access control policy for a Queue. Replaces any existing /// policy. /// /// Note: The Cloud Console does not check queue-level IAM permissions yet. /// Project-level permissions are required to use the Cloud Console. /// /// Authorization requires the following /// [Google IAM](https://cloud.google.com/iam) permission on the specified /// resource parent: /// /// * `cloudtasks.queues.setIamPolicy` /// /// A builder for the *locations.queues.setIamPolicy* method supported by a *project* resource. /// It is not used directly, but through a `ProjectMethods` instance. /// /// # Example /// /// Instantiate a resource method builder /// /// ```test_harness,no_run /// # extern crate hyper; /// # extern crate hyper_rustls; /// # extern crate yup_oauth2 as oauth2; /// # extern crate google_cloudtasks2_beta2 as cloudtasks2_beta2; /// use cloudtasks2_beta2::api::SetIamPolicyRequest; /// # #[test] fn egal() { /// # use std::default::Default; /// # use oauth2; /// # use cloudtasks2_beta2::CloudTasks; /// /// # let secret: ApplicationSecret = Default::default(); /// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder( /// # secret, /// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect, /// # ).build().await.unwrap(); /// # let mut hub = CloudTasks::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth); /// // As the method needs a request, you would usually fill it with the desired information /// // into the respective structure. Some of the parts shown here might not be applicable ! /// // Values shown here are possibly random and not representative ! /// let mut req = SetIamPolicyRequest::default(); /// /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.projects().locations_queues_set_iam_policy(req, "resource") /// .doit(); /// # } /// ``` pub struct ProjectLocationQueueSetIamPolicyCall<'a, C> where C: 'a { hub: &'a CloudTasks<C>, _request: SetIamPolicyRequest, _resource: String, _delegate: Option<&'a mut dyn client::Delegate>, _additional_params: HashMap<String, String>, _scopes: BTreeMap<String, ()> } impl<'a, C> client::CallBuilder for ProjectLocationQueueSetIamPolicyCall<'a, C> {} impl<'a, C> ProjectLocationQueueSetIamPolicyCall<'a, C> where C: BorrowMut<hyper::Client<hyper_rustls::HttpsConnector<hyper::client::connect::HttpConnector>, hyper::body::Body>> { /// Perform the operation you have build so far. pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, Policy)> { use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET}; use std::io::{Read, Seek}; use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION}; use client::ToParts; let mut dd = client::DefaultDelegate; let mut dlg: &mut dyn client::Delegate = match self._delegate { Some(d) => d, None => &mut dd }; dlg.begin(client::MethodInfo { id: "cloudtasks.projects.locations.queues.setIamPolicy", http_method: hyper::Method::POST }); let mut params: Vec<(&str, String)> = Vec::with_capacity(4 + self._additional_params.len()); params.push(("resource", self._resource.to_string())); for &field in ["alt", "resource"].iter() { if self._additional_params.contains_key(field) { dlg.finished(false); return Err(client::Error::FieldClash(field)); } } for (name, value) in self._additional_params.iter() { params.push((&name, value.clone())); } params.push(("alt", "json".to_string())); let mut url = self.hub._base_url.clone() + "v2beta2/{+resource}:setIamPolicy"; if self._scopes.len() == 0 { self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ()); } for &(find_this, param_name) in [("{+resource}", "resource")].iter() { let mut replace_with = String::new(); for &(name, ref value) in params.iter() { if name == param_name { replace_with = value.to_string(); break; } } if find_this.as_bytes()[1] == '+' as u8 { replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string(); } url = url.replace(find_this, &replace_with); } { let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1); for param_name in ["resource"].iter() { if let Some(index) = params.iter().position(|t| &t.0 == param_name) { indices_for_removal.push(index); } } for &index in indices_for_removal.iter() { params.remove(index); } } let url = url::Url::parse_with_params(&url, params).unwrap(); let mut json_mime_type: mime::Mime = "application/json".parse().unwrap(); let mut request_value_reader = { let mut value = json::value::to_value(&self._request).expect("serde to work"); client::remove_json_null_values(&mut value); let mut dst = io::Cursor::new(Vec::with_capacity(128)); json::to_writer(&mut dst, &value).unwrap(); dst }; let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap(); request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); loop { let authenticator = self.hub.auth.borrow_mut(); let token = match authenticator.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await { Ok(token) => token.clone(), Err(err) => { match dlg.token(&err) { Some(token) => token, None => { dlg.finished(false); return Err(client::Error::MissingToken(err)) } } } }; request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); let mut req_result = { let mut client = &mut *self.hub.client.borrow_mut(); dlg.pre_request(); let mut req_builder = hyper::Request::builder().method(hyper::Method::POST).uri(url.clone().into_string()) .header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str())); let request = req_builder .header(CONTENT_TYPE, format!("{}", json_mime_type)) .header(CONTENT_LENGTH, request_size as u64) .body(hyper::body::Body::from(request_value_reader.get_ref().clone())); client.borrow_mut().request(request.unwrap()).await }; match req_result { Err(err) => { if let client::Retry::After(d) = dlg.http_error(&err) { sleep(d); continue; } dlg.finished(false); return Err(client::Error::HttpError(err)) } Ok(mut res) => { let (res_parts, res_body) = res.into_parts(); let res_body_string: String = String::from_utf8( hyper::body::to_bytes(res_body) .await .unwrap() .into_iter() .collect(), ) .unwrap(); let reconstructed_result = hyper::Response::from_parts(res_parts, res_body_string.clone().into()); if !reconstructed_result.status().is_success() { let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok(); let server_error = json::from_str::<client::ServerError>(&res_body_string) .or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error)) .ok(); if let client::Retry::After(d) = dlg.http_failure(&reconstructed_result, json_server_error, server_error) { sleep(d); continue; } dlg.finished(false); return match json::from_str::<client::ErrorResponse>(&res_body_string){ Err(_) => Err(client::Error::Failure(reconstructed_result)), Ok(serr) => Err(client::Error::BadRequest(serr)) } } let result_value = { match json::from_str(&res_body_string) { Ok(decoded) => (reconstructed_result, decoded), Err(err) => { dlg.response_json_decode_error(&res_body_string, &err); return Err(client::Error::JsonDecodeError(res_body_string, err)); } } }; dlg.finished(true); return Ok(result_value) } } } } /// /// Sets the *request* property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn request(mut self, new_value: SetIamPolicyRequest) -> ProjectLocationQueueSetIamPolicyCall<'a, C> { self._request = new_value; self } /// REQUIRED: The resource for which the policy is being specified. /// See the operation documentation for the appropriate value for this field. /// /// Sets the *resource* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn resource(mut self, new_value: &str) -> ProjectLocationQueueSetIamPolicyCall<'a, C> { self._resource = new_value.to_string(); self } /// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong /// while executing the actual API request. /// /// It should be used to handle progress information, and to implement a certain level of resilience. /// /// Sets the *delegate* property to the given value. pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ProjectLocationQueueSetIamPolicyCall<'a, C> { self._delegate = Some(new_value); self } /// Set any additional parameter of the query string used in the request. /// It should be used to set parameters which are not yet available through their own /// setters. /// /// Please note that this method must not be used to set any of the known parameters /// which have their own setter method. If done anyway, the request will fail. /// /// # Additional Parameters /// /// * *$.xgafv* (query-string) - V1 error format. /// * *access_token* (query-string) - OAuth access token. /// * *alt* (query-string) - Data format for response. /// * *callback* (query-string) - JSONP /// * *fields* (query-string) - Selector specifying which fields to include in a partial response. /// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. /// * *oauth_token* (query-string) - OAuth 2.0 token for the current user. /// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks. /// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. /// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart"). /// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart"). pub fn param<T>(mut self, name: T, value: T) -> ProjectLocationQueueSetIamPolicyCall<'a, C> where T: AsRef<str> { self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string()); self } /// Identifies the authorization scope for the method you are building. /// /// Use this method to actively specify which scope should be used, instead the default `Scope` variant /// `Scope::CloudPlatform`. /// /// The `scope` will be added to a set of scopes. This is important as one can maintain access /// tokens for more than one scope. /// If `None` is specified, then all scopes will be removed and no default scope will be used either. /// In that case, you have to specify your API-key using the `key` parameter (see the `param()` /// function for details). /// /// Usually there is more than one suitable scope to authorize an operation, some of which may /// encompass more rights than others. For example, for listing resources, a *read-only* scope will be /// sufficient, a read-write scope will do as well. pub fn add_scope<T, S>(mut self, scope: T) -> ProjectLocationQueueSetIamPolicyCall<'a, C> where T: Into<Option<S>>, S: AsRef<str> { match scope.into() { Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()), None => None, }; self } } /// Returns permissions that a caller has on a Queue. /// If the resource does not exist, this will return an empty set of /// permissions, not a NOT_FOUND error. /// /// Note: This operation is designed to be used for building permission-aware /// UIs and command-line tools, not for authorization checking. This operation /// may "fail open" without warning. /// /// A builder for the *locations.queues.testIamPermissions* method supported by a *project* resource. /// It is not used directly, but through a `ProjectMethods` instance. /// /// # Example /// /// Instantiate a resource method builder /// /// ```test_harness,no_run /// # extern crate hyper; /// # extern crate hyper_rustls; /// # extern crate yup_oauth2 as oauth2; /// # extern crate google_cloudtasks2_beta2 as cloudtasks2_beta2; /// use cloudtasks2_beta2::api::TestIamPermissionsRequest; /// # #[test] fn egal() { /// # use std::default::Default; /// # use oauth2; /// # use cloudtasks2_beta2::CloudTasks; /// /// # let secret: ApplicationSecret = Default::default(); /// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder( /// # secret, /// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect, /// # ).build().await.unwrap(); /// # let mut hub = CloudTasks::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth); /// // As the method needs a request, you would usually fill it with the desired information /// // into the respective structure. Some of the parts shown here might not be applicable ! /// // Values shown here are possibly random and not representative ! /// let mut req = TestIamPermissionsRequest::default(); /// /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.projects().locations_queues_test_iam_permissions(req, "resource") /// .doit(); /// # } /// ``` pub struct ProjectLocationQueueTestIamPermissionCall<'a, C> where C: 'a { hub: &'a CloudTasks<C>, _request: TestIamPermissionsRequest, _resource: String, _delegate: Option<&'a mut dyn client::Delegate>, _additional_params: HashMap<String, String>, _scopes: BTreeMap<String, ()> } impl<'a, C> client::CallBuilder for ProjectLocationQueueTestIamPermissionCall<'a, C> {} impl<'a, C> ProjectLocationQueueTestIamPermissionCall<'a, C> where C: BorrowMut<hyper::Client<hyper_rustls::HttpsConnector<hyper::client::connect::HttpConnector>, hyper::body::Body>> { /// Perform the operation you have build so far. pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, TestIamPermissionsResponse)> { use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET}; use std::io::{Read, Seek}; use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION}; use client::ToParts; let mut dd = client::DefaultDelegate; let mut dlg: &mut dyn client::Delegate = match self._delegate { Some(d) => d, None => &mut dd }; dlg.begin(client::MethodInfo { id: "cloudtasks.projects.locations.queues.testIamPermissions", http_method: hyper::Method::POST }); let mut params: Vec<(&str, String)> = Vec::with_capacity(4 + self._additional_params.len()); params.push(("resource", self._resource.to_string())); for &field in ["alt", "resource"].iter() { if self._additional_params.contains_key(field) { dlg.finished(false); return Err(client::Error::FieldClash(field)); } } for (name, value) in self._additional_params.iter() { params.push((&name, value.clone())); } params.push(("alt", "json".to_string())); let mut url = self.hub._base_url.clone() + "v2beta2/{+resource}:testIamPermissions"; if self._scopes.len() == 0 { self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ()); } for &(find_this, param_name) in [("{+resource}", "resource")].iter() { let mut replace_with = String::new(); for &(name, ref value) in params.iter() { if name == param_name { replace_with = value.to_string(); break; } } if find_this.as_bytes()[1] == '+' as u8 { replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string(); } url = url.replace(find_this, &replace_with); } { let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1); for param_name in ["resource"].iter() { if let Some(index) = params.iter().position(|t| &t.0 == param_name) { indices_for_removal.push(index); } } for &index in indices_for_removal.iter() { params.remove(index); } } let url = url::Url::parse_with_params(&url, params).unwrap(); let mut json_mime_type: mime::Mime = "application/json".parse().unwrap(); let mut request_value_reader = { let mut value = json::value::to_value(&self._request).expect("serde to work"); client::remove_json_null_values(&mut value); let mut dst = io::Cursor::new(Vec::with_capacity(128)); json::to_writer(&mut dst, &value).unwrap(); dst }; let request_size = request_value_reader.seek(io::SeekFrom::End(0)).unwrap(); request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); loop { let authenticator = self.hub.auth.borrow_mut(); let token = match authenticator.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await { Ok(token) => token.clone(), Err(err) => { match dlg.token(&err) { Some(token) => token, None => { dlg.finished(false); return Err(client::Error::MissingToken(err)) } } } }; request_value_reader.seek(io::SeekFrom::Start(0)).unwrap(); let mut req_result = { let mut client = &mut *self.hub.client.borrow_mut(); dlg.pre_request(); let mut req_builder = hyper::Request::builder().method(hyper::Method::POST).uri(url.clone().into_string()) .header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str())); let request = req_builder .header(CONTENT_TYPE, format!("{}", json_mime_type)) .header(CONTENT_LENGTH, request_size as u64) .body(hyper::body::Body::from(request_value_reader.get_ref().clone())); client.borrow_mut().request(request.unwrap()).await }; match req_result { Err(err) => { if let client::Retry::After(d) = dlg.http_error(&err) { sleep(d); continue; } dlg.finished(false); return Err(client::Error::HttpError(err)) } Ok(mut res) => { let (res_parts, res_body) = res.into_parts(); let res_body_string: String = String::from_utf8( hyper::body::to_bytes(res_body) .await .unwrap() .into_iter() .collect(), ) .unwrap(); let reconstructed_result = hyper::Response::from_parts(res_parts, res_body_string.clone().into()); if !reconstructed_result.status().is_success() { let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok(); let server_error = json::from_str::<client::ServerError>(&res_body_string) .or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error)) .ok(); if let client::Retry::After(d) = dlg.http_failure(&reconstructed_result, json_server_error, server_error) { sleep(d); continue; } dlg.finished(false); return match json::from_str::<client::ErrorResponse>(&res_body_string){ Err(_) => Err(client::Error::Failure(reconstructed_result)), Ok(serr) => Err(client::Error::BadRequest(serr)) } } let result_value = { match json::from_str(&res_body_string) { Ok(decoded) => (reconstructed_result, decoded), Err(err) => { dlg.response_json_decode_error(&res_body_string, &err); return Err(client::Error::JsonDecodeError(res_body_string, err)); } } }; dlg.finished(true); return Ok(result_value) } } } } /// /// Sets the *request* property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn request(mut self, new_value: TestIamPermissionsRequest) -> ProjectLocationQueueTestIamPermissionCall<'a, C> { self._request = new_value; self } /// REQUIRED: The resource for which the policy detail is being requested. /// See the operation documentation for the appropriate value for this field. /// /// Sets the *resource* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn resource(mut self, new_value: &str) -> ProjectLocationQueueTestIamPermissionCall<'a, C> { self._resource = new_value.to_string(); self } /// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong /// while executing the actual API request. /// /// It should be used to handle progress information, and to implement a certain level of resilience. /// /// Sets the *delegate* property to the given value. pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ProjectLocationQueueTestIamPermissionCall<'a, C> { self._delegate = Some(new_value); self } /// Set any additional parameter of the query string used in the request. /// It should be used to set parameters which are not yet available through their own /// setters. /// /// Please note that this method must not be used to set any of the known parameters /// which have their own setter method. If done anyway, the request will fail. /// /// # Additional Parameters /// /// * *$.xgafv* (query-string) - V1 error format. /// * *access_token* (query-string) - OAuth access token. /// * *alt* (query-string) - Data format for response. /// * *callback* (query-string) - JSONP /// * *fields* (query-string) - Selector specifying which fields to include in a partial response. /// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. /// * *oauth_token* (query-string) - OAuth 2.0 token for the current user. /// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks. /// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. /// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart"). /// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart"). pub fn param<T>(mut self, name: T, value: T) -> ProjectLocationQueueTestIamPermissionCall<'a, C> where T: AsRef<str> { self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string()); self } /// Identifies the authorization scope for the method you are building. /// /// Use this method to actively specify which scope should be used, instead the default `Scope` variant /// `Scope::CloudPlatform`. /// /// The `scope` will be added to a set of scopes. This is important as one can maintain access /// tokens for more than one scope. /// If `None` is specified, then all scopes will be removed and no default scope will be used either. /// In that case, you have to specify your API-key using the `key` parameter (see the `param()` /// function for details). /// /// Usually there is more than one suitable scope to authorize an operation, some of which may /// encompass more rights than others. For example, for listing resources, a *read-only* scope will be /// sufficient, a read-write scope will do as well. pub fn add_scope<T, S>(mut self, scope: T) -> ProjectLocationQueueTestIamPermissionCall<'a, C> where T: Into<Option<S>>, S: AsRef<str> { match scope.into() { Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()), None => None, }; self } } /// Gets information about a location. /// /// A builder for the *locations.get* method supported by a *project* resource. /// It is not used directly, but through a `ProjectMethods` instance. /// /// # Example /// /// Instantiate a resource method builder /// /// ```test_harness,no_run /// # extern crate hyper; /// # extern crate hyper_rustls; /// # extern crate yup_oauth2 as oauth2; /// # extern crate google_cloudtasks2_beta2 as cloudtasks2_beta2; /// # #[test] fn egal() { /// # use std::default::Default; /// # use oauth2; /// # use cloudtasks2_beta2::CloudTasks; /// /// # let secret: ApplicationSecret = Default::default(); /// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder( /// # secret, /// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect, /// # ).build().await.unwrap(); /// # let mut hub = CloudTasks::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth); /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.projects().locations_get("name") /// .doit(); /// # } /// ``` pub struct ProjectLocationGetCall<'a, C> where C: 'a { hub: &'a CloudTasks<C>, _name: String, _delegate: Option<&'a mut dyn client::Delegate>, _additional_params: HashMap<String, String>, _scopes: BTreeMap<String, ()> } impl<'a, C> client::CallBuilder for ProjectLocationGetCall<'a, C> {} impl<'a, C> ProjectLocationGetCall<'a, C> where C: BorrowMut<hyper::Client<hyper_rustls::HttpsConnector<hyper::client::connect::HttpConnector>, hyper::body::Body>> { /// Perform the operation you have build so far. pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, Location)> { use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET}; use std::io::{Read, Seek}; use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION}; use client::ToParts; let mut dd = client::DefaultDelegate; let mut dlg: &mut dyn client::Delegate = match self._delegate { Some(d) => d, None => &mut dd }; dlg.begin(client::MethodInfo { id: "cloudtasks.projects.locations.get", http_method: hyper::Method::GET }); let mut params: Vec<(&str, String)> = Vec::with_capacity(3 + self._additional_params.len()); params.push(("name", self._name.to_string())); for &field in ["alt", "name"].iter() { if self._additional_params.contains_key(field) { dlg.finished(false); return Err(client::Error::FieldClash(field)); } } for (name, value) in self._additional_params.iter() { params.push((&name, value.clone())); } params.push(("alt", "json".to_string())); let mut url = self.hub._base_url.clone() + "v2beta2/{+name}"; if self._scopes.len() == 0 { self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ()); } for &(find_this, param_name) in [("{+name}", "name")].iter() { let mut replace_with = String::new(); for &(name, ref value) in params.iter() { if name == param_name { replace_with = value.to_string(); break; } } if find_this.as_bytes()[1] == '+' as u8 { replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string(); } url = url.replace(find_this, &replace_with); } { let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1); for param_name in ["name"].iter() { if let Some(index) = params.iter().position(|t| &t.0 == param_name) { indices_for_removal.push(index); } } for &index in indices_for_removal.iter() { params.remove(index); } } let url = url::Url::parse_with_params(&url, params).unwrap(); loop { let authenticator = self.hub.auth.borrow_mut(); let token = match authenticator.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await { Ok(token) => token.clone(), Err(err) => { match dlg.token(&err) { Some(token) => token, None => { dlg.finished(false); return Err(client::Error::MissingToken(err)) } } } }; let mut req_result = { let mut client = &mut *self.hub.client.borrow_mut(); dlg.pre_request(); let mut req_builder = hyper::Request::builder().method(hyper::Method::GET).uri(url.clone().into_string()) .header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str())); let request = req_builder .body(hyper::body::Body::empty()); client.borrow_mut().request(request.unwrap()).await }; match req_result { Err(err) => { if let client::Retry::After(d) = dlg.http_error(&err) { sleep(d); continue; } dlg.finished(false); return Err(client::Error::HttpError(err)) } Ok(mut res) => { let (res_parts, res_body) = res.into_parts(); let res_body_string: String = String::from_utf8( hyper::body::to_bytes(res_body) .await .unwrap() .into_iter() .collect(), ) .unwrap(); let reconstructed_result = hyper::Response::from_parts(res_parts, res_body_string.clone().into()); if !reconstructed_result.status().is_success() { let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok(); let server_error = json::from_str::<client::ServerError>(&res_body_string) .or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error)) .ok(); if let client::Retry::After(d) = dlg.http_failure(&reconstructed_result, json_server_error, server_error) { sleep(d); continue; } dlg.finished(false); return match json::from_str::<client::ErrorResponse>(&res_body_string){ Err(_) => Err(client::Error::Failure(reconstructed_result)), Ok(serr) => Err(client::Error::BadRequest(serr)) } } let result_value = { match json::from_str(&res_body_string) { Ok(decoded) => (reconstructed_result, decoded), Err(err) => { dlg.response_json_decode_error(&res_body_string, &err); return Err(client::Error::JsonDecodeError(res_body_string, err)); } } }; dlg.finished(true); return Ok(result_value) } } } } /// Resource name for the location. /// /// Sets the *name* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn name(mut self, new_value: &str) -> ProjectLocationGetCall<'a, C> { self._name = new_value.to_string(); self } /// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong /// while executing the actual API request. /// /// It should be used to handle progress information, and to implement a certain level of resilience. /// /// Sets the *delegate* property to the given value. pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ProjectLocationGetCall<'a, C> { self._delegate = Some(new_value); self } /// Set any additional parameter of the query string used in the request. /// It should be used to set parameters which are not yet available through their own /// setters. /// /// Please note that this method must not be used to set any of the known parameters /// which have their own setter method. If done anyway, the request will fail. /// /// # Additional Parameters /// /// * *$.xgafv* (query-string) - V1 error format. /// * *access_token* (query-string) - OAuth access token. /// * *alt* (query-string) - Data format for response. /// * *callback* (query-string) - JSONP /// * *fields* (query-string) - Selector specifying which fields to include in a partial response. /// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. /// * *oauth_token* (query-string) - OAuth 2.0 token for the current user. /// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks. /// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. /// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart"). /// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart"). pub fn param<T>(mut self, name: T, value: T) -> ProjectLocationGetCall<'a, C> where T: AsRef<str> { self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string()); self } /// Identifies the authorization scope for the method you are building. /// /// Use this method to actively specify which scope should be used, instead the default `Scope` variant /// `Scope::CloudPlatform`. /// /// The `scope` will be added to a set of scopes. This is important as one can maintain access /// tokens for more than one scope. /// If `None` is specified, then all scopes will be removed and no default scope will be used either. /// In that case, you have to specify your API-key using the `key` parameter (see the `param()` /// function for details). /// /// Usually there is more than one suitable scope to authorize an operation, some of which may /// encompass more rights than others. For example, for listing resources, a *read-only* scope will be /// sufficient, a read-write scope will do as well. pub fn add_scope<T, S>(mut self, scope: T) -> ProjectLocationGetCall<'a, C> where T: Into<Option<S>>, S: AsRef<str> { match scope.into() { Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()), None => None, }; self } } /// Lists information about the supported locations for this service. /// /// A builder for the *locations.list* method supported by a *project* resource. /// It is not used directly, but through a `ProjectMethods` instance. /// /// # Example /// /// Instantiate a resource method builder /// /// ```test_harness,no_run /// # extern crate hyper; /// # extern crate hyper_rustls; /// # extern crate yup_oauth2 as oauth2; /// # extern crate google_cloudtasks2_beta2 as cloudtasks2_beta2; /// # #[test] fn egal() { /// # use std::default::Default; /// # use oauth2; /// # use cloudtasks2_beta2::CloudTasks; /// /// # let secret: ApplicationSecret = Default::default(); /// # let auth = yup_oauth2::InstalledFlowAuthenticator::builder( /// # secret, /// # yup_oauth2::InstalledFlowReturnMethod::HTTPRedirect, /// # ).build().await.unwrap(); /// # let mut hub = CloudTasks::new(hyper::Client::with_connector(hyper::net::HttpsConnector::new(hyper_rustls::TlsClient::new())), auth); /// // You can configure optional parameters by calling the respective setters at will, and /// // execute the final call using `doit()`. /// // Values shown here are possibly random and not representative ! /// let result = hub.projects().locations_list("name") /// .page_token("labore") /// .page_size(-43) /// .filter("duo") /// .doit(); /// # } /// ``` pub struct ProjectLocationListCall<'a, C> where C: 'a { hub: &'a CloudTasks<C>, _name: String, _page_token: Option<String>, _page_size: Option<i32>, _filter: Option<String>, _delegate: Option<&'a mut dyn client::Delegate>, _additional_params: HashMap<String, String>, _scopes: BTreeMap<String, ()> } impl<'a, C> client::CallBuilder for ProjectLocationListCall<'a, C> {} impl<'a, C> ProjectLocationListCall<'a, C> where C: BorrowMut<hyper::Client<hyper_rustls::HttpsConnector<hyper::client::connect::HttpConnector>, hyper::body::Body>> { /// Perform the operation you have build so far. pub async fn doit(mut self) -> client::Result<(hyper::Response<hyper::body::Body>, ListLocationsResponse)> { use url::percent_encoding::{percent_encode, DEFAULT_ENCODE_SET}; use std::io::{Read, Seek}; use hyper::header::{CONTENT_TYPE, CONTENT_LENGTH, AUTHORIZATION, USER_AGENT, LOCATION}; use client::ToParts; let mut dd = client::DefaultDelegate; let mut dlg: &mut dyn client::Delegate = match self._delegate { Some(d) => d, None => &mut dd }; dlg.begin(client::MethodInfo { id: "cloudtasks.projects.locations.list", http_method: hyper::Method::GET }); let mut params: Vec<(&str, String)> = Vec::with_capacity(6 + self._additional_params.len()); params.push(("name", self._name.to_string())); if let Some(value) = self._page_token { params.push(("pageToken", value.to_string())); } if let Some(value) = self._page_size { params.push(("pageSize", value.to_string())); } if let Some(value) = self._filter { params.push(("filter", value.to_string())); } for &field in ["alt", "name", "pageToken", "pageSize", "filter"].iter() { if self._additional_params.contains_key(field) { dlg.finished(false); return Err(client::Error::FieldClash(field)); } } for (name, value) in self._additional_params.iter() { params.push((&name, value.clone())); } params.push(("alt", "json".to_string())); let mut url = self.hub._base_url.clone() + "v2beta2/{+name}/locations"; if self._scopes.len() == 0 { self._scopes.insert(Scope::CloudPlatform.as_ref().to_string(), ()); } for &(find_this, param_name) in [("{+name}", "name")].iter() { let mut replace_with = String::new(); for &(name, ref value) in params.iter() { if name == param_name { replace_with = value.to_string(); break; } } if find_this.as_bytes()[1] == '+' as u8 { replace_with = percent_encode(replace_with.as_bytes(), DEFAULT_ENCODE_SET).to_string(); } url = url.replace(find_this, &replace_with); } { let mut indices_for_removal: Vec<usize> = Vec::with_capacity(1); for param_name in ["name"].iter() { if let Some(index) = params.iter().position(|t| &t.0 == param_name) { indices_for_removal.push(index); } } for &index in indices_for_removal.iter() { params.remove(index); } } let url = url::Url::parse_with_params(&url, params).unwrap(); loop { let authenticator = self.hub.auth.borrow_mut(); let token = match authenticator.token(&self._scopes.keys().collect::<Vec<_>>()[..]).await { Ok(token) => token.clone(), Err(err) => { match dlg.token(&err) { Some(token) => token, None => { dlg.finished(false); return Err(client::Error::MissingToken(err)) } } } }; let mut req_result = { let mut client = &mut *self.hub.client.borrow_mut(); dlg.pre_request(); let mut req_builder = hyper::Request::builder().method(hyper::Method::GET).uri(url.clone().into_string()) .header(USER_AGENT, self.hub._user_agent.clone()) .header(AUTHORIZATION, format!("Bearer {}", token.as_str())); let request = req_builder .body(hyper::body::Body::empty()); client.borrow_mut().request(request.unwrap()).await }; match req_result { Err(err) => { if let client::Retry::After(d) = dlg.http_error(&err) { sleep(d); continue; } dlg.finished(false); return Err(client::Error::HttpError(err)) } Ok(mut res) => { let (res_parts, res_body) = res.into_parts(); let res_body_string: String = String::from_utf8( hyper::body::to_bytes(res_body) .await .unwrap() .into_iter() .collect(), ) .unwrap(); let reconstructed_result = hyper::Response::from_parts(res_parts, res_body_string.clone().into()); if !reconstructed_result.status().is_success() { let json_server_error = json::from_str::<client::JsonServerError>(&res_body_string).ok(); let server_error = json::from_str::<client::ServerError>(&res_body_string) .or_else(|_| json::from_str::<client::ErrorResponse>(&res_body_string).map(|r| r.error)) .ok(); if let client::Retry::After(d) = dlg.http_failure(&reconstructed_result, json_server_error, server_error) { sleep(d); continue; } dlg.finished(false); return match json::from_str::<client::ErrorResponse>(&res_body_string){ Err(_) => Err(client::Error::Failure(reconstructed_result)), Ok(serr) => Err(client::Error::BadRequest(serr)) } } let result_value = { match json::from_str(&res_body_string) { Ok(decoded) => (reconstructed_result, decoded), Err(err) => { dlg.response_json_decode_error(&res_body_string, &err); return Err(client::Error::JsonDecodeError(res_body_string, err)); } } }; dlg.finished(true); return Ok(result_value) } } } } /// The resource that owns the locations collection, if applicable. /// /// Sets the *name* path property to the given value. /// /// Even though the property as already been set when instantiating this call, /// we provide this method for API completeness. pub fn name(mut self, new_value: &str) -> ProjectLocationListCall<'a, C> { self._name = new_value.to_string(); self } /// The standard list page token. /// /// Sets the *page token* query property to the given value. pub fn page_token(mut self, new_value: &str) -> ProjectLocationListCall<'a, C> { self._page_token = Some(new_value.to_string()); self } /// The standard list page size. /// /// Sets the *page size* query property to the given value. pub fn page_size(mut self, new_value: i32) -> ProjectLocationListCall<'a, C> { self._page_size = Some(new_value); self } /// The standard list filter. /// /// Sets the *filter* query property to the given value. pub fn filter(mut self, new_value: &str) -> ProjectLocationListCall<'a, C> { self._filter = Some(new_value.to_string()); self } /// The delegate implementation is consulted whenever there is an intermediate result, or if something goes wrong /// while executing the actual API request. /// /// It should be used to handle progress information, and to implement a certain level of resilience. /// /// Sets the *delegate* property to the given value. pub fn delegate(mut self, new_value: &'a mut dyn client::Delegate) -> ProjectLocationListCall<'a, C> { self._delegate = Some(new_value); self } /// Set any additional parameter of the query string used in the request. /// It should be used to set parameters which are not yet available through their own /// setters. /// /// Please note that this method must not be used to set any of the known parameters /// which have their own setter method. If done anyway, the request will fail. /// /// # Additional Parameters /// /// * *$.xgafv* (query-string) - V1 error format. /// * *access_token* (query-string) - OAuth access token. /// * *alt* (query-string) - Data format for response. /// * *callback* (query-string) - JSONP /// * *fields* (query-string) - Selector specifying which fields to include in a partial response. /// * *key* (query-string) - API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. /// * *oauth_token* (query-string) - OAuth 2.0 token for the current user. /// * *prettyPrint* (query-boolean) - Returns response with indentations and line breaks. /// * *quotaUser* (query-string) - Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. /// * *uploadType* (query-string) - Legacy upload protocol for media (e.g. "media", "multipart"). /// * *upload_protocol* (query-string) - Upload protocol for media (e.g. "raw", "multipart"). pub fn param<T>(mut self, name: T, value: T) -> ProjectLocationListCall<'a, C> where T: AsRef<str> { self._additional_params.insert(name.as_ref().to_string(), value.as_ref().to_string()); self } /// Identifies the authorization scope for the method you are building. /// /// Use this method to actively specify which scope should be used, instead the default `Scope` variant /// `Scope::CloudPlatform`. /// /// The `scope` will be added to a set of scopes. This is important as one can maintain access /// tokens for more than one scope. /// If `None` is specified, then all scopes will be removed and no default scope will be used either. /// In that case, you have to specify your API-key using the `key` parameter (see the `param()` /// function for details). /// /// Usually there is more than one suitable scope to authorize an operation, some of which may /// encompass more rights than others. For example, for listing resources, a *read-only* scope will be /// sufficient, a read-write scope will do as well. pub fn add_scope<T, S>(mut self, scope: T) -> ProjectLocationListCall<'a, C> where T: Into<Option<S>>, S: AsRef<str> { match scope.into() { Some(scope) => self._scopes.insert(scope.as_ref().to_string(), ()), None => None, }; self } }
44.700011
781
0.582941
1c8bfe23b6d4d01bde38426abc4603be123d8b80
4,214
#![recursion_limit = "128"] extern crate proc_macro; extern crate proc_macro2; extern crate syn; #[macro_use] extern crate quote; use proc_macro2::Span; #[proc_macro_derive(Uniform)] pub fn uniform(input: proc_macro::TokenStream) -> proc_macro::TokenStream { let input = proc_macro2::TokenStream::from(input); let ast = syn::parse2(input).unwrap(); proc_macro::TokenStream::from(impl_uniform(&ast)) } fn impl_uniform(ast: &syn::DeriveInput) -> proc_macro2::TokenStream { let name = &ast.ident; let rname = format_ident!("LayoutStd140{}", name); let fields = match &ast.data { syn::Data::Struct(syn::DataStruct { fields: syn::Fields::Named(syn::FieldsNamed { named, .. }), .. }) => named, _ => panic!(), }; let aligned_fields = fields.iter().flat_map(|field| { let (a, f) = aligned_field(field); vec![a, f] }); let field_names = fields.iter().map(|field| field.ident.as_ref().unwrap()); let field_names2 = fields.iter().map(|field| field.ident.as_ref().unwrap()); let dummy = format_ident!("_GLSL_LAYOUT_{}", name); quote! { #[allow(bad_style)] const #dummy: () = { #[repr(C, align(16))] #[derive(Clone, Copy, Debug, Default)] pub struct #rname {#( #aligned_fields, )*} unsafe impl glsl_layout::Std140 for #rname {} impl glsl_layout::Uniform for #rname { type Align = glsl_layout::align::Align16; type Std140 = #rname; fn std140(&self) -> #rname { self.clone() } } impl glsl_layout::Uniform for #name { type Align = glsl_layout::align::Align16; type Std140 = #rname; fn std140(&self) -> #rname { #rname { #(#field_names: self.#field_names2.std140(),)* ..Default::default() } } } }; } } fn aligned_field(field: &syn::Field) -> (syn::Field, syn::Field) { let name = field.ident.as_ref().unwrap(); let align = syn::Field { ty: syn::Type::Path(align_type_for(&field.ty)), ident: Some(format_ident!("_align_{}", name)), attrs: Vec::new(), vis: syn::Visibility::Inherited, colon_token: Some(Default::default()), }; let std140 = syn::Field { ty: syn::Type::Path(std140_type_for(&field.ty)), ..field.clone() }; (align, std140) } fn align_type_for(aligned: &syn::Type) -> syn::TypePath { use std::iter::once; syn::TypePath { qself: Some(syn::QSelf { lt_token: Default::default(), ty: Box::new(aligned.clone()), position: 2, as_token: Some(Default::default()), gt_token: Default::default(), }), path: syn::Path { leading_colon: None, segments: once(syn::PathSegment::from(syn::Ident::new( "glsl_layout", Span::call_site(), ))) .chain(once(syn::Ident::new("Uniform", Span::call_site()).into())) .chain(once(syn::Ident::new("Align", Span::call_site()).into())) .collect(), }, } } fn std140_type_for(aligned: &syn::Type) -> syn::TypePath { use std::iter::once; syn::TypePath { qself: Some(syn::QSelf { lt_token: Default::default(), ty: Box::new(aligned.clone()), position: 2, as_token: Some(Default::default()), gt_token: Default::default(), }), path: syn::Path { leading_colon: None, segments: once(syn::PathSegment::from(syn::Ident::new( "glsl_layout", Span::call_site(), ))) .chain(once( syn::Ident::new("Uniform".into(), Span::call_site()).into(), )) .chain(once( syn::Ident::new("Std140".into(), Span::call_site()).into(), )) .collect(), }, } }
29.062069
80
0.507594
5ddfff25081df197db39ff2a3e247ac072beb837
77
mod control; pub use control::*; mod segmentation; pub use segmentation::*;
12.833333
24
0.727273
5be860ef0675962f4eb4b51c5bb4ffc3de609e40
2,420
/* Project Euler Problem 18 Solution * * Problem statement: * By starting at the top of the triangle below and moving * to adjacent numbers on the row below, the maximum total * from top to bottom is 23. * 3 * 7 4 * 2 4 6 * 8 5 9 3 * That is, 3 + 7 + 4 + 9 = 23. Find the maximum total from * top to bottom in `inputs/problem067.in', a 15K text file * containing a triangle with one-hundred rows. * * NOTE: This is a much more difficult version of Problem 18. * It is not possible to try every route to solve this problem, * as there are 2^99 altogether! If you could check one trillion * (10^12) routes every second it would take over twenty billion * years to check them all. * * Solution description: * Brute-force. * * Authors: Daniel Schuette * Date: 2019/03/09 * License: MIT (see ../LICENSE.md) */ use std::fs; // Returns the largest sum of numbers in the triangle located // at `path'. See the problem statement above for additional // information. pub fn find_triangle_sum(path: &str) -> i64 { let file = fs::read_to_string(path).expect("Cannot read file"); let mut data: Vec<Vec<i64>> = vec![]; // read data from file into vector for line in file.split('\n') { let mut item: Vec<i64> = vec![]; for num in line.split(' ') { if let Ok(parsed) = num.parse() { item.push(parsed); } else { continue; } } if !item.is_empty() { data.push(item); } } // Traverse triangle bottom-up and calculate sums. The // approach calculates new rows based on the sums from // the previous row to get to the maximum sum in O(n) // time (where n is the number of items in the triangle). let mut last_sums: Vec<i64> = data[data.len() - 1].clone(); let mut new_sums: Vec<i64>; for row in (0..(data.len() - 1)).rev() { new_sums = vec![]; /* reset vector to collect new sums */ for item in 0..data[row].len() { if last_sums[item] > last_sums[item + 1] { new_sums.push(data[row][item] + last_sums[item]); } else { new_sums.push(data[row][item] + last_sums[item + 1]); } } last_sums = new_sums.clone(); } // finally, only the largest some is left last_sums[0] }
32.702703
69
0.583884
69239f9f0159895d7addf3ea48fdfac15a30a630
199
fn main() { let args: Vec<String> = std::env::args().collect(); let query = &args[1]; let filename = &args[2]; println!("query: {}", query); println!("filename: {}", filename); }
24.875
55
0.542714
18ccd58f43cc7625ac15aa2150a56035d1cfc5d0
832
use { crate::append_vec::{StoredAccountMeta, StoredMeta}, solana_sdk::{account::AccountSharedData, clock::Slot}, std::sync::{Arc, RwLock}, }; pub trait AccountsUpdateNotifierInterface: std::fmt::Debug { /// Notified when an account is updated at runtime, due to transaction activities fn notify_account_update(&self, slot: Slot, meta: &StoredMeta, account: &AccountSharedData); /// Notified when the AccountsDb is initialized at start when restored /// from a snapshot. fn notify_account_restore_from_snapshot(&self, slot: Slot, account: &StoredAccountMeta); /// Notified when all accounts have been notified when restoring from a snapshot. fn notify_end_of_restore_from_snapshot(&self); } pub type AccountsUpdateNotifier = Arc<RwLock<dyn AccountsUpdateNotifierInterface + Sync + Send>>;
41.6
97
0.747596