hexsha
stringlengths
40
40
size
int64
2
1.05M
content
stringlengths
2
1.05M
avg_line_length
float64
1.33
100
max_line_length
int64
1
1k
alphanum_fraction
float64
0.25
1
efb876f9603c353a224d9d266ac631df308dcba6
12,915
use crate::slicevec::SliceVec; #[inline] pub fn errno_get() -> i32 { errno::errno().0 } #[derive(Debug)] pub struct SymlinkCounter { max: u16, cur: u16, } impl SymlinkCounter { // This is Linux's limit (sysconf(_SC_SYMLOOP_MAX) always fails on glibc, so we need a fallback) const DEFAULT_SYMLOOP_MAX: u16 = 40; #[inline] pub fn new() -> Self { use core::convert::TryInto; Self { max: unsafe { libc::sysconf(libc::_SC_SYMLOOP_MAX) } .try_into() .unwrap_or(Self::DEFAULT_SYMLOOP_MAX), cur: 0, } } #[inline] pub fn advance(&mut self) -> Result<(), i32> { if self.cur >= self.max { Err(libc::ELOOP) } else { self.cur += 1; Ok(()) } } } #[derive(Debug)] pub struct ComponentStack<'a> { buf: &'a mut [u8], i: usize, } impl<'a> ComponentStack<'a> { #[inline] pub fn new(buf: &'a mut [u8]) -> Self { Self { i: buf.len(), buf } } #[inline] pub fn is_empty(&self) -> bool { self.i == self.buf.len() } pub unsafe fn push_readlink(&mut self, path: *const u8) -> Result<(), i32> { if self.i == 0 { return Err(libc::ENAMETOOLONG); } match libc::readlink( path as *const _, self.buf.as_mut_ptr() as *mut libc::c_char, self.i, ) { -1 => Err(errno_get()), len => { debug_assert!(len > 0); let len = len as usize; // POSIX doesn't specify whether or not the returned string is nul-terminated. // On OSes other than Linux/macOS/*BSD, it *might* be. Let's check. #[cfg(not(any( target_os = "linux", target_os = "android", target_os = "freebsd", target_os = "dragonfly", target_os = "openbsd", target_os = "netbsd", target_os = "macos", target_os = "ios", )))] let len = if self.buf[len - 1] == 0 { len - 1 } else { len }; debug_assert_ne!(self.buf[len - 1], 0); if len >= self.i - 1 { Err(libc::ENAMETOOLONG) } else { self.i -= 1; self.buf[self.i] = 0; self.i -= len; self.buf.copy_within(0..len, self.i); Ok(()) } } } } pub fn next(&mut self) -> Option<&[u8]> { macro_rules! skip_slashes_nul { ($self:expr) => {{ while let Some((&b'/', _)) = $self.buf[$self.i..].split_first() { $self.i += 1; } if $self.buf.get($self.i) == Some(&0) { // We've exhausted the first path; advance to the next one $self.i += 1; debug_assert_ne!($self.buf.get($self.i), Some(&0)); } }}; } loop { match self.buf[self.i..].split_first() { // Empty -> nothing left to iterate over None => return None, // The first path starts with a slash Some((&b'/', rest)) => { // Trim leading slashes from the path self.i += 1; skip_slashes_nul!(self); if rest.first() == Some(&b'/') && rest.get(1) != Some(&b'/') { debug_assert!(rest.starts_with(b"/")); debug_assert!(!rest.starts_with(b"//")); return Some(b"//"); } else { debug_assert!(!rest.starts_with(b"/") || rest.starts_with(b"//")); return Some(b"/"); } } // So there's at least one path, and it doesn't start with a slash _ => { // The first byte should NOT be NUL; we remove trailing slashes when we hit the end debug_assert_ne!(self.buf[self.i], 0); let component; if let Some(offset) = self.buf[self.i..].iter().position(|&c| c == b'/' || c == 0) { component = &self.buf[self.i..self.i + offset]; self.i += offset; skip_slashes_nul!(self); } else { // The entire stack did not contain any slashes or NUL bytes. // This means we've got one component left. component = &self.buf[self.i..]; self.i = self.buf.len(); }; if component != b"" && component != b"." { debug_assert!(!component.contains(&0)); break Some(component); } } } } } pub fn clear(&mut self) -> &mut [u8] { self.i = self.buf.len(); &mut self.buf } } #[derive(Clone, Debug)] pub struct ComponentIter<'a>(&'a [u8]); impl<'a> ComponentIter<'a> { #[inline] pub fn new(path: &'a [u8]) -> Result<Self, i32> { if path.is_empty() { Err(libc::ENOENT) } else if path.contains(&0) { Err(libc::EINVAL) } else { Ok(Self(path)) } } pub fn is_empty(&self) -> bool { match self.0.split_first() { None => true, Some((&b'/', _)) => false, _ => self.clone().next().is_none(), } } } impl<'a> Iterator for ComponentIter<'a> { type Item = &'a [u8]; fn next(&mut self) -> Option<Self::Item> { match self.0.split_first() { // Empty -> nothing left to iterate over None => None, // Starts with a slash Some((&b'/', rest)) => { // Trim leading slashes from the path self.0 = strip_leading_slashes(rest); if rest.len() - self.0.len() == 1 { // This means that at the start of this method, `self.0` started with exactly 2 // slashes. One was removed by split_first(), and the other by // strip_leading_slashes(). debug_assert!(rest.starts_with(b"/")); debug_assert!(!rest.starts_with(b"//")); Some(b"//") } else { debug_assert!(!rest.starts_with(b"/") || rest.starts_with(b"//")); Some(b"/") } } // So it's not empty and it doesn't start with a slash _ => loop { let component = match self.0.iter().position(|&c| c == b'/') { // We were able to find a slash inside it // Return the part up to the slash, then strip the rest Some(index) => { let (component, rest) = self.0.split_at(index); self.0 = strip_leading_slashes(rest); component } // Exhausted None if self.0.is_empty() => break None, // No slashes -> only one component left None => core::mem::take(&mut self.0), }; if component != b"." { break Some(component); } }, } } } pub unsafe fn check_isdir(path: *const u8) -> Result<(), i32> { let mut buf = core::mem::MaybeUninit::uninit(); if libc::stat(path as *const _, buf.as_mut_ptr()) < 0 { Err(errno_get()) } else if buf.assume_init().st_mode & libc::S_IFMT == libc::S_IFDIR { Ok(()) } else { Err(libc::ENOTDIR) } } #[inline] pub unsafe fn readlink_empty(path: *const u8) -> Result<(), i32> { if libc::readlink(path as *const _, &mut 0, 1) < 0 { Err(errno_get()) } else { Ok(()) } } pub fn getcwd(buf: &mut SliceVec) -> Result<(), i32> { buf.set_len(buf.capacity()); if unsafe { libc::getcwd(buf.as_mut_ptr() as *mut _, buf.len()) }.is_null() { Err(match errno_get() { libc::EINVAL | libc::ERANGE => libc::ENAMETOOLONG, eno => eno, }) } else if buf[0] != b'/' { Err(libc::ENOENT) } else { buf.set_len(buf.iter().position(|&ch| ch == 0).unwrap()); Ok(()) } } pub fn strip_leading_slashes(mut s: &[u8]) -> &[u8] { while let Some((&b'/', rest)) = s.split_first() { s = rest; } s } #[cfg(test)] mod tests { use super::*; #[test] fn test_strip_leading_slashes() { assert_eq!(strip_leading_slashes(b""), b""); assert_eq!(strip_leading_slashes(b"/"), b""); assert_eq!(strip_leading_slashes(b"//"), b""); assert_eq!(strip_leading_slashes(b"abc"), b"abc"); assert_eq!(strip_leading_slashes(b"abc/"), b"abc/"); assert_eq!(strip_leading_slashes(b"abc/def"), b"abc/def"); assert_eq!(strip_leading_slashes(b"/abc"), b"abc"); assert_eq!(strip_leading_slashes(b"/abc/"), b"abc/"); assert_eq!(strip_leading_slashes(b"/abc/def/"), b"abc/def/"); assert_eq!(strip_leading_slashes(b"//abc/def/"), b"abc/def/"); } #[test] fn test_component_stack() { let mut buf = [0; 100]; let mut stack = ComponentStack::new(&mut buf); assert_eq!(stack.next(), None); let data = b"pqr/\0mno\0/jkl\0ghi/\0///abc/./def/\0//\0/\0."; stack.i = stack.buf.len() - data.len(); stack.buf[stack.i..].copy_from_slice(data); assert_eq!(stack.next().unwrap(), b"pqr"); assert_eq!(stack.next().unwrap(), b"mno"); assert_eq!(stack.next().unwrap(), b"/"); assert_eq!(stack.next().unwrap(), b"jkl"); assert_eq!(stack.next().unwrap(), b"ghi"); assert_eq!(stack.next().unwrap(), b"/"); assert_eq!(stack.next().unwrap(), b"abc"); assert_eq!(stack.next().unwrap(), b"def"); assert_eq!(stack.next().unwrap(), b"//"); assert_eq!(stack.next().unwrap(), b"/"); assert_eq!(stack.next(), None); let data = b"abc"; stack.i = stack.buf.len() - data.len(); stack.buf[stack.i..].copy_from_slice(data); assert_eq!(stack.next().unwrap(), b"abc"); assert_eq!(stack.next(), None); let mut stack = ComponentStack::new(&mut []); assert_eq!( unsafe { stack.push_readlink(b"/\0".as_ptr()) }.unwrap_err(), libc::ENAMETOOLONG, ); } #[test] fn test_component_iter() { fn check_it(mut it: ComponentIter, res: &[&[u8]]) { let mut res = res.iter(); while let Some(component) = it.next() { assert_eq!(res.next().cloned(), Some(component)); assert_eq!(it.is_empty(), res.len() == 0); } assert!(it.is_empty()); assert_eq!(res.len(), 0, "{:?}", res); } assert_eq!(ComponentIter::new(b"").unwrap_err(), libc::ENOENT); assert_eq!(ComponentIter::new(b"\0").unwrap_err(), libc::EINVAL); check_it(ComponentIter::new(b"/").unwrap(), &[b"/"]); check_it(ComponentIter::new(b"/abc").unwrap(), &[b"/", b"abc"]); check_it(ComponentIter::new(b"/abc/").unwrap(), &[b"/", b"abc"]); check_it(ComponentIter::new(b"./abc/").unwrap(), &[b"abc"]); check_it(ComponentIter::new(b"/./abc/.").unwrap(), &[b"/", b"abc"]); check_it( ComponentIter::new(b"/../abc/..").unwrap(), &[b"/", b"..", b"abc", b".."], ); } #[test] fn test_getcwd_toolong() { assert_eq!( getcwd(&mut SliceVec::empty(&mut [])).unwrap_err(), libc::ENAMETOOLONG ); assert_eq!( getcwd(&mut SliceVec::empty(&mut [0])).unwrap_err(), libc::ENAMETOOLONG ); } #[test] fn test_check_isdir() { unsafe { assert_eq!(check_isdir(b"\0".as_ptr()).unwrap_err(), libc::ENOENT); assert_eq!( check_isdir(b"/bin/sh\0".as_ptr()).unwrap_err(), libc::ENOTDIR ); check_isdir(b"/\0".as_ptr()).unwrap(); } } #[test] fn test_readlink_empty() { unsafe { assert_eq!(readlink_empty(b"\0".as_ptr()).unwrap_err(), libc::ENOENT); assert_eq!(readlink_empty(b"/\0".as_ptr()).unwrap_err(), libc::EINVAL); } } }
31.195652
103
0.459233
90e4958e63bd663d16e9d6e9504ba9d1505abeee
2,438
use std::io::Write; use byteorder::{BE, WriteBytesExt}; use crate::error::{parse_io, TychoStatus}; use crate::Number; use crate::types::ident::NumberIdent; use crate::write::func::write_byte; pub(crate) const NUM_LEN_1: u8 = 0x00; pub(crate) const NUM_LEN_8: u8 = 0x01; pub(crate) const NUM_LEN_16: u8 = 0x02; pub(crate) const NUM_LEN_32: u8 = 0x03; pub(crate) const NUM_LEN_64: u8 = 0x04; pub(crate) const NUM_LEN_128: u8 = 0x05; pub(crate) const NUM_FLOAT: u8 = 0x20; pub(crate) const NUM_SIGNED: u8 = 0x10; pub(crate) fn write_number_ident<W: Write>(writer: &mut W, ident: &NumberIdent) -> TychoStatus { let value = match ident { NumberIdent::Bit => NUM_LEN_1, NumberIdent::Unsigned8 => NUM_LEN_8, NumberIdent::Signed8 => NUM_LEN_8 | NUM_SIGNED, NumberIdent::Unsigned16 => NUM_LEN_16, NumberIdent::Signed16 => NUM_LEN_16 | NUM_SIGNED, NumberIdent::Unsigned32 => NUM_LEN_32, NumberIdent::Signed32 => NUM_LEN_32 | NUM_SIGNED, NumberIdent::Unsigned64 => NUM_LEN_64, NumberIdent::Signed64 => NUM_LEN_64 | NUM_SIGNED, NumberIdent::Unsigned128 => NUM_LEN_128, NumberIdent::Signed128 => NUM_LEN_128 | NUM_SIGNED, NumberIdent::Float32 => NUM_LEN_32 | NUM_FLOAT, NumberIdent::Float64 => NUM_LEN_64 | NUM_FLOAT, }; write_byte(writer, &value) } pub(crate) fn write_number<W: Write>(writer: &mut W, number: &Number) -> TychoStatus { match number { Number::Bit(x) => match x { true => write_byte(writer, &0x01), false => write_byte(writer, &0x00), }, Number::Unsigned8(x) => parse_io(writer.write_u8(*x)), Number::Signed8(x) => parse_io(writer.write_i8(*x)), Number::Unsigned16(x) => parse_io(writer.write_u16::<BE>(*x)), Number::Signed16(x) => parse_io(writer.write_i16::<BE>(*x)), Number::Unsigned32(x) => parse_io(writer.write_u32::<BE>(*x)), Number::Signed32(x) => parse_io(writer.write_i32::<BE>(*x)), Number::Unsigned64(x) => parse_io(writer.write_u64::<BE>(*x)), Number::Signed64(x) => parse_io(writer.write_i64::<BE>(*x)), Number::Unsigned128(x) => parse_io(writer.write_u128::<BE>(*x)), Number::Signed128(x) => parse_io(writer.write_i128::<BE>(*x)), Number::Float32(x) => parse_io(writer.write_f32::<BE>(*x)), Number::Float64(x) => parse_io(writer.write_f64::<BE>(*x)), } }
42.77193
96
0.640689
016d18e0d44188294ec231bee2f954fdb1f7f5f5
1,477
use bioinformatics_algorithms::alignment::pairwise::gotoh_space_efficient::GotohSpaceEfficientAligner; use bioinformatics_algorithms::alignment::pairwise::nw_se::NwSpaceEfficientAligner; use bioinformatics_algorithms::alignment::AlignmentResult; use bioinformatics_algorithms::alignment::MatchParams; use bioinformatics_algorithms::alignment::Scoring; use lazy_static::lazy_static; lazy_static! { static ref ALIGNER_GOTOH_SPACE_EFFICIENT: GotohSpaceEfficientAligner<MatchParams> = GotohSpaceEfficientAligner::new(Scoring::from_scores(-5, -1, 2, -1)); static ref ALIGNER_NW_SPACE_EFFICIENT: NwSpaceEfficientAligner<MatchParams> = NwSpaceEfficientAligner::new(Scoring::from_scores(0, -1, 2, -1)); } static X1: &[u8] = b"ATGATGATGATGATGATGATGCG"; // ATGATGATGATGATGATGATGCG static Y1: &[u8] = b"ATGAATGCG"; // ATGA--------------ATGCG (14G, 9M) static X2: &[u8] = b"AAAAAAAGGGTTTCCCCCCCCCC"; // AAAAAAAGGGTTTCCCCCCCCCC static Y2: &[u8] = b"AAAAGGGTTT"; // ---AAAAGGGTTT---------- pub fn show_alignment_result(res: AlignmentResult) { println!("Score, {}; {:?}", res.score, res.as_strings('-')); } pub fn test1() { show_alignment_result(ALIGNER_GOTOH_SPACE_EFFICIENT.global(X1, Y1)); show_alignment_result(ALIGNER_NW_SPACE_EFFICIENT.global(X1, Y1)); } pub fn test2() { show_alignment_result(ALIGNER_GOTOH_SPACE_EFFICIENT.global(X2, Y2)); show_alignment_result(ALIGNER_NW_SPACE_EFFICIENT.global(X2, Y2)); }
43.441176
102
0.743399
f75ab3f431c2a23455f6a471fa2f814e3711b506
5,239
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use std::fmt; use std::io; use externalfiles::ExternalHtml; #[derive(Clone)] pub struct Layout { pub logo: String, pub favicon: String, pub external_html: ExternalHtml, pub krate: String, pub playground_url: String, } pub struct Page<'a> { pub title: &'a str, pub ty: &'a str, pub root_path: &'a str, pub description: &'a str, pub keywords: &'a str } pub fn render<T: fmt::String, S: fmt::String>( dst: &mut io::Writer, layout: &Layout, page: &Page, sidebar: &S, t: &T) -> io::IoResult<()> { write!(dst, r##"<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"> <meta name="viewport" content="width=device-width, initial-scale=1.0"> <meta name="generator" content="rustdoc"> <meta name="description" content="{description}"> <meta name="keywords" content="{keywords}"> <title>{title}</title> <link rel="stylesheet" type="text/css" href="{root_path}main.css"> {favicon} {in_header} </head> <body class="rustdoc"> <!--[if lte IE 8]> <div class="warning"> This old browser is unsupported and will most likely display funky things. </div> <![endif]--> {before_content} <section class="sidebar"> {logo} {sidebar} </section> <nav class="sub"> <form class="search-form js-only"> <div class="search-container"> <input class="search-input" name="search" autocomplete="off" placeholder="Click or press 'S' to search, '?' for more options..." type="search"> </div> </form> </nav> <section id='main' class="content {ty}">{content}</section> <section id='search' class="content hidden"></section> <section class="footer"></section> <div id="help" class="hidden"> <div class="shortcuts"> <h1>Keyboard shortcuts</h1> <dl> <dt>?</dt> <dd>Show this help dialog</dd> <dt>S</dt> <dd>Focus the search field</dd> <dt>&larrb;</dt> <dd>Move up in search results</dd> <dt>&rarrb;</dt> <dd>Move down in search results</dd> <dt>&#9166;</dt> <dd>Go to active search result</dd> </dl> </div> <div class="infos"> <h1>Search tricks</h1> <p> Prefix searches with a type followed by a colon (e.g. <code>fn:</code>) to restrict the search to a given type. </p> <p> Accepted types are: <code>fn</code>, <code>mod</code>, <code>struct</code>, <code>enum</code>, <code>trait</code>, <code>typedef</code> (or <code>tdef</code>). </p> </div> </div> {after_content} <script> window.rootPath = "{root_path}"; window.currentCrate = "{krate}"; window.playgroundUrl = "{play_url}"; </script> <script src="{root_path}jquery.js"></script> <script src="{root_path}main.js"></script> {play_js} <script async src="{root_path}search-index.js"></script> </body> </html>"##, content = *t, root_path = page.root_path, ty = page.ty, logo = if layout.logo.len() == 0 { "".to_string() } else { format!("<a href='{}{}/index.html'>\ <img src='{}' alt='' width='100'></a>", page.root_path, layout.krate, layout.logo) }, title = page.title, description = page.description, keywords = page.keywords, favicon = if layout.favicon.len() == 0 { "".to_string() } else { format!(r#"<link rel="shortcut icon" href="{}">"#, layout.favicon) }, in_header = layout.external_html.in_header, before_content = layout.external_html.before_content, after_content = layout.external_html.after_content, sidebar = *sidebar, krate = layout.krate, play_url = layout.playground_url, play_js = if layout.playground_url.len() == 0 { "".to_string() } else { format!(r#"<script src="{}playpen.js"></script>"#, page.root_path) }, ) } pub fn redirect(dst: &mut io::Writer, url: &str) -> io::IoResult<()> { // <script> triggers a redirect before refresh, so this is fine. write!(dst, r##"<!DOCTYPE html> <html lang="en"> <head> <meta http-equiv="refresh" content="0;URL={url}"> </head> <body> <p>Redirecting to <a href="{url}">{url}</a>...</p> <script>location.replace("{url}" + location.search + location.hash);</script> </body> </html>"##, url = url, ) }
29.432584
90
0.552586
d58dbeee37c7b1796cbecb5ffc92b259dcf6fc98
10,433
pub mod subscription { use crate::common::{CerberusError, CerberusResult, User}; pub async fn run( global: &clap::ArgMatches<'_>, params: &clap::ArgMatches<'_>, user_opt: Option<User<'_>>, ) -> CerberusResult<()> { let sub_info_opt = params.value_of("stream").and_then(|stream| { params .value_of("group-id") .map(|group_id| (stream, group_id)) }); let (stream_name, group_id) = sub_info_opt.expect("Both stream and group-id params are previously checked by Clap"); let connection = crate::common::create_connection_default(global).await?; let mut setts = eventstore::PersistentSubscriptionSettings::default(); setts.resolve_link_tos = params.is_present("resolve-link"); setts.extra_stats = params.is_present("extra-stats"); if let Some(param) = params.value_of("start-from") { let start_from = param.parse().map_err(|e| { CerberusError::user_fault(format!( "Failed to parse --start-from number parameter: {}", e )) })?; setts.start_from = start_from; } if let Some(param) = params.value_of("message-timeout") { let millis = param.parse().map_err(|e| { CerberusError::user_fault(format!( "Failed to parse --message-timeout number parameter: {}", e )) })?; setts.msg_timeout = std::time::Duration::from_millis(millis); } if let Some(param) = params.value_of("max-retry-count") { let count = param.parse().map_err(|e| { CerberusError::user_fault(format!( "Failed to parse --max-retry-count number parameter: {}", e )) })?; setts.max_retry_count = count; } if let Some(param) = params.value_of("live-buffer-size") { let size = param.parse().map_err(|e| { CerberusError::user_fault(format!( "Failed to parse --live-buffer-size number parameter: {}", e )) })?; setts.live_buf_size = size; } if let Some(param) = params.value_of("read-batch-size") { let size = param.parse().map_err(|e| { CerberusError::user_fault(format!( "Failed to parse --read-batch-size number parameter: {}", e )) })?; setts.read_batch_size = size; } if let Some(param) = params.value_of("history-buffer-size") { let size = param.parse().map_err(|e| { CerberusError::user_fault(format!( "Failed to parse --history-buffer-size number parameter: {}", e )) })?; setts.history_buf_size = size; } if let Some(param) = params.value_of("checkpoint-after") { let millis = param.parse().map_err(|e| { CerberusError::user_fault(format!( "Failed to parse --checkpoint-after number parameter: {}", e )) })?; setts.checkpoint_after = std::time::Duration::from_millis(millis); } if let Some(param) = params.value_of("min-checkpoint-count") { let count = param.parse().map_err(|e| { CerberusError::user_fault(format!( "Failed to parse --min-checkpoint-count number parameter: {}", e )) })?; setts.min_checkpoint_count = count; } if let Some(param) = params.value_of("max-checkpoint-count") { let count = param.parse().map_err(|e| { CerberusError::user_fault(format!( "Failed to parse --max-checkpoint-count number parameter: {}", e )) })?; setts.max_checkpoint_count = count; } if let Some(param) = params.value_of("max-subs-count") { let count = param.parse().map_err(|e| { CerberusError::user_fault(format!( "Failed to parse --max-subs-count number parameter: {}", e )) })?; setts.max_subs_count = count; } if let Some(param) = params.value_of("consumer-strategy") { let strategy = match param { "dispatch-to-single" => eventstore::SystemConsumerStrategy::DispatchToSingle, "round-robin" => eventstore::SystemConsumerStrategy::RoundRobin, "pinned" => eventstore::SystemConsumerStrategy::Pinned, wrong => { return Err(CerberusError::user_fault(format!( "Unknown --consumer-strategy value: [{}]", wrong ))); } }; setts.named_consumer_strategy = strategy; } let mut cmd = connection .create_persistent_subscription(stream_name, group_id) .settings(setts); if let Some(creds) = user_opt.map(|usr| usr.to_credentials()) { cmd = cmd.credentials(creds); } match cmd.execute().await { Err(e) => match e { // Very unlikely we got that error because it should be handle by // `eventstore::PersistActionError::AccessDenied`. eventstore::OperationError::AccessDenied(_) => { Err(CerberusError::user_fault(format!( "Your current credentials doesn't allow you to create \ a persistent subscription on [{}] stream.", stream_name ))) } eventstore::OperationError::StreamDeleted(_) => { Err(CerberusError::user_fault(format!( "You can't create a persistent subscription on [{}] stream because that strean got deleted", stream_name ))) } error => Err(CerberusError::user_fault(format!( "Can't create a persistent subscription on [{}] \ stream because: {}.", stream_name, error ))), }, Ok(result) => match result { eventstore::PersistActionResult::Failure(error) => match error { eventstore::PersistActionError::AccessDenied => { Err(CerberusError::user_fault(format!( "Your current credentials doesn't allow you to create \ a persistent subscription on [{}] stream.", stream_name ))) } eventstore::PersistActionError::AlreadyExists => { Err(CerberusError::user_fault(format!( "A persistent subscription already exists for the stream \ [{}] with the group [{}]", stream_name, group_id ))) } // TODO - Pretty sure that use-case can't exist when creating a persistent // subscription on a non existing stream. EventStore tends to not giving // crap about this. eventstore::PersistActionError::DoesNotExist => { Err(CerberusError::user_fault(format!( "You can't create a persistent subscription on stream [{}] \ because [{}] stream doesn't exist", stream_name, stream_name ))) } eventstore::PersistActionError::Fail => Err(CerberusError::user_fault(format!( "Failed to create a persistent subscription on stream \ [{}] with group [{}] but we don't have \ information on why", stream_name, group_id ))), }, _ => { println!("Persistent subscription created."); Ok(()) } }, } } } pub mod projection { use crate::common::{CerberusError, CerberusResult}; use crate::api::{Api, ProjectionConf}; pub async fn run( _: &clap::ArgMatches<'_>, params: &clap::ArgMatches<'_>, api: Api<'_>, ) -> CerberusResult<()> { let script_filepath = params .value_of("SCRIPT") .expect("SCRIPT was check by clap already"); let script = std::fs::read_to_string(script_filepath).map_err(|e| { CerberusError::UserFault(format!( "There was an issue with the script's filepath you submitted: {}", e )) })?; let conf = ProjectionConf { name: params.value_of("name"), kind: params .value_of("kind") .expect("Kind was check by clap already"), enabled: params.is_present("enabled"), emit: params.is_present("emit"), checkpoints: params.is_present("checkpoints"), track_emitted_streams: params.is_present("track-emitted-streams"), script, }; let result = api.create_projection(conf).await?; let info = api.projection_cropped_info(&result.name).await?; if info.status == "Faulted" { let reason = info .reason .unwrap_or_else(|| "<unavailable faulted reason>".to_owned()); return Err(CerberusError::user_fault(format!( "Unsuccessful projection [{}] creation:\n>> {}", result.name, reason ))); } println!("Projection [{}] created", result.name); Ok(()) } }
36.865724
98
0.483849
382f1fcbe82515d6b22d012bfff27d70451f6a3b
15,254
use fluvio_controlplane::UpdateDerivedStreamRequest; use fluvio_controlplane_metadata::message::SmartModuleMsg; use fluvio_controlplane_metadata::partition::Replica; use fluvio_controlplane_metadata::smartmodule::SmartModuleSpec; use fluvio_controlplane_metadata::derivedstream::DerivedStreamSpec; use fluvio_future::timer::sleep; use fluvio_service::ConnectInfo; use std::sync::Arc; use std::io::Error as IoError; use std::io::ErrorKind; use std::time::Duration; use tracing::error; use tracing::{debug, info, trace, instrument}; use async_trait::async_trait; use futures_util::stream::Stream; use fluvio_types::SpuId; use dataplane::api::RequestMessage; use fluvio_controlplane_metadata::spu::store::SpuLocalStorePolicy; use fluvio_service::{FluvioService, wait_for_request}; use fluvio_socket::{FluvioSocket, SocketError, FluvioSink}; use fluvio_controlplane::{ InternalScRequest, InternalScKey, RegisterSpuResponse, UpdateLrsRequest, UpdateReplicaRequest, UpdateSpuRequest, ReplicaRemovedRequest, UpdateSmartModuleRequest, }; use fluvio_controlplane_metadata::message::{ReplicaMsg, Message, SpuMsg}; use crate::core::SharedContext; use crate::stores::{K8ChangeListener}; use crate::stores::partition::{PartitionSpec, PartitionStatus, PartitionResolution}; use crate::stores::spu::SpuSpec; use crate::stores::actions::WSAction; const HEALTH_DURATION: u64 = 90; #[derive(Debug)] pub struct ScInternalService {} impl ScInternalService { pub fn new() -> Self { Self {} } } #[async_trait] impl FluvioService for ScInternalService { type Context = SharedContext; type Request = InternalScRequest; #[instrument(skip(self, context))] async fn respond( self: Arc<Self>, context: SharedContext, socket: FluvioSocket, _connection: ConnectInfo, ) -> Result<(), SocketError> { let (mut sink, mut stream) = socket.split(); let mut api_stream = stream.api_stream::<InternalScRequest, InternalScKey>(); // every SPU need to be validated and registered let spu_id = wait_for_request!(api_stream, InternalScRequest::RegisterSpuRequest(req_msg) => { let spu_id = req_msg.request.spu(); let mut status = true; debug!(spu_id,"registration req"); let register_res = if context.spus().store().validate_spu_for_registered(spu_id).await { RegisterSpuResponse::ok() } else { status = false; debug!(spu_id,"spu validation failed"); RegisterSpuResponse::failed_registration() }; let response = req_msg.new_response(register_res); sink.send_response(&response,req_msg.header.api_version()).await?; if !status { return Ok(()) } spu_id } ); info!(spu_id, "SPU connected"); let health_check = context.health().clone(); health_check.update(spu_id, true).await; if let Err(err) = dispatch_loop(context, spu_id, api_stream, sink).await { error!("error with SPU <{}>, error: {}", spu_id, err); } info!(spu_id, "Terminating connection to SPU"); health_check.update(spu_id, false).await; Ok(()) } } // perform internal dispatch #[instrument(name = "ScInternalService", skip(context, api_stream))] async fn dispatch_loop( context: SharedContext, spu_id: SpuId, mut api_stream: impl Stream<Item = Result<InternalScRequest, SocketError>> + Unpin, mut sink: FluvioSink, ) -> Result<(), SocketError> { let mut spu_spec_listener = context.spus().change_listener(); let mut partition_spec_listener = context.partitions().change_listener(); let mut sm_spec_listener = context.smartmodules().change_listener(); let mut ss_spec_listener = context.derivedstreams().change_listener(); // send initial changes let mut health_check_timer = sleep(Duration::from_secs(HEALTH_DURATION)); loop { use tokio::select; use futures_util::stream::StreamExt; send_spu_spec_changes(&mut spu_spec_listener, &mut sink, spu_id).await?; send_replica_spec_changes(&mut partition_spec_listener, &mut sink, spu_id).await?; send_smartmodule_changes(&mut sm_spec_listener, &mut sink, spu_id).await?; send_derivedstream_changes(&mut ss_spec_listener, &mut sink, spu_id).await?; trace!(spu_id, "waiting for SPU channel"); select! { _ = &mut health_check_timer => { debug!("health check timer expired. ending"); break; }, spu_request_msg = api_stream.next() => { if let Some(spu_request) = spu_request_msg { if let Ok(req_message) = spu_request { match req_message { InternalScRequest::UpdateLrsRequest(msg) => { receive_lrs_update(&context,msg.request).await; }, InternalScRequest::RegisterSpuRequest(msg) => { error!("registration req only valid during initialization: {:#?}",msg); return Err(IoError::new(ErrorKind::InvalidData,"register spu request is only valid at init").into()) }, InternalScRequest::ReplicaRemovedRequest(msg) => { receive_replica_remove(&context,msg.request).await; } } // reset timer health_check_timer = sleep(Duration::from_secs(HEALTH_DURATION)); trace!("health check reset"); } else { debug!(spu_id,"no message content, ending processing loop"); break; } } else { debug!(spu_id,"detected end of stream, ending processing loop"); break; } }, _ = spu_spec_listener.listen() => { debug!("spec lister changed"); }, _ = partition_spec_listener.listen() => { debug!("partition lister changed"); } } } Ok(()) } /// send lrs update to metadata stores #[instrument(skip(ctx, requests))] async fn receive_lrs_update(ctx: &SharedContext, requests: UpdateLrsRequest) { let requests = requests.into_requests(); if requests.is_empty() { trace!("no requests, just health check"); return; } else { debug!(?requests, "received lr requests"); } let mut actions = vec![]; let read_guard = ctx.partitions().store().read().await; for lrs_req in requests.into_iter() { if let Some(partition) = read_guard.get(&lrs_req.id) { let mut current_status = partition.inner().status().clone(); let key = lrs_req.id.clone(); let new_status = PartitionStatus::new2( lrs_req.leader, lrs_req.replicas, lrs_req.size, PartitionResolution::Online, ); current_status.merge(new_status); actions.push(WSAction::UpdateStatus::<PartitionSpec>(( key, current_status, ))); } else { error!( "trying to update replica: {}, that doesn't exist", lrs_req.id ); return; } } drop(read_guard); for action in actions.into_iter() { ctx.partitions().send_action(action).await; } } #[instrument( skip(ctx,request), fields(replica=%request.id) )] async fn receive_replica_remove(ctx: &SharedContext, request: ReplicaRemovedRequest) { debug!(request=?request); // create action inside to optimize read locking let read_guard = ctx.partitions().store().read().await; let delete_action = if read_guard.contains_key(&request.id) { // force to delete partition regardless if confirm if request.confirm { debug!("force delete"); Some(WSAction::DeleteFinal::<PartitionSpec>(request.id)) } else { debug!("no delete"); None } } else { error!("replica doesn't exist"); None }; drop(read_guard); if let Some(action) = delete_action { ctx.partitions().send_action(action).await; } } /// send spu spec changes only #[instrument(skip(sink))] async fn send_spu_spec_changes( listener: &mut K8ChangeListener<SpuSpec>, sink: &mut FluvioSink, spu_id: SpuId, ) -> Result<(), SocketError> { if !listener.has_change() { return Ok(()); } let changes = listener.sync_spec_changes().await; if changes.is_empty() { return Ok(()); } let epoch = changes.epoch; let is_sync_all = changes.is_sync_all(); let (updates, deletes) = changes.parts(); let request = if is_sync_all { UpdateSpuRequest::with_all(epoch, updates.into_iter().map(|u| u.spec).collect()) } else { let mut changes: Vec<SpuMsg> = updates .into_iter() .map(|v| Message::update(v.spec)) .collect(); let mut deletes = deletes .into_iter() .map(|d| Message::delete(d.spec)) .collect(); changes.append(&mut deletes); UpdateSpuRequest::with_changes(epoch, changes) }; let mut message = RequestMessage::new_request(request); message.get_mut_header().set_client_id("sc"); debug!( spu_id, all = message.request.all.len(), changes = message.request.changes.len(), "sending to spu", ); sink.send_request(&message).await?; Ok(()) } #[instrument(level = "trace", skip(sink))] async fn send_replica_spec_changes( listener: &mut K8ChangeListener<PartitionSpec>, sink: &mut FluvioSink, spu_id: SpuId, ) -> Result<(), SocketError> { use crate::stores::ChangeFlag; if !listener.has_change() { trace!("changes is empty, skipping"); return Ok(()); } // we are only interested in spec changes or metadata changes // not rely on partition status deleted because partition status contains offset changes which we don't want let changes = listener .sync_changes_with_filter(&ChangeFlag { spec: true, status: false, meta: true, }) .await; if changes.is_empty() { trace!("spec changes is empty, skipping"); return Ok(()); } let epoch = changes.epoch; let is_sync_all = changes.is_sync_all(); let (updates, deletes) = changes.parts(); let request = if is_sync_all { UpdateReplicaRequest::with_all( epoch, updates .into_iter() .map(|partition| { let replica: Replica = partition.into(); replica }) .collect(), ) } else { let mut changes: Vec<ReplicaMsg> = updates .into_iter() .map(|partition| { let replica: Replica = partition.into(); Message::update(replica) }) .collect(); let mut deletes = deletes .into_iter() .map(|partition| { let replica: Replica = partition.into(); Message::delete(replica) }) .collect(); changes.append(&mut deletes); UpdateReplicaRequest::with_changes(epoch, changes) }; debug!(?request, "sending replica to spu"); let mut message = RequestMessage::new_request(request); message.get_mut_header().set_client_id("sc"); sink.send_request(&message).await?; Ok(()) } #[instrument(level = "trace", skip(sink))] async fn send_smartmodule_changes( listener: &mut K8ChangeListener<SmartModuleSpec>, sink: &mut FluvioSink, spu_id: SpuId, ) -> Result<(), SocketError> { use crate::stores::ChangeFlag; if !listener.has_change() { trace!("changes is empty, skipping"); return Ok(()); } let changes = listener .sync_changes_with_filter(&ChangeFlag { spec: true, status: false, meta: true, }) .await; if changes.is_empty() { trace!("spec changes is empty, skipping"); return Ok(()); } let epoch = changes.epoch; let is_sync_all = changes.is_sync_all(); let (updates, deletes) = changes.parts(); let request = if is_sync_all { UpdateSmartModuleRequest::with_all(epoch, updates.into_iter().map(|sm| sm.into()).collect()) } else { let mut changes: Vec<SmartModuleMsg> = updates .into_iter() .map(|sm| Message::update(sm.into())) .collect(); let mut deletes = deletes .into_iter() .map(|sm| Message::delete(sm.into())) .collect(); changes.append(&mut deletes); UpdateSmartModuleRequest::with_changes(epoch, changes) }; debug!(?request, "sending sm to spu"); let mut message = RequestMessage::new_request(request); message.get_mut_header().set_client_id("sc"); sink.send_request(&message).await?; Ok(()) } #[instrument(level = "trace", skip(sink))] async fn send_derivedstream_changes( listener: &mut K8ChangeListener<DerivedStreamSpec>, sink: &mut FluvioSink, spu_id: SpuId, ) -> Result<(), SocketError> { use fluvio_controlplane_metadata::message::{DerivedStreamMsg}; use crate::stores::ChangeFlag; if !listener.has_change() { trace!("changes is empty, skipping"); return Ok(()); } let changes = listener .sync_changes_with_filter(&ChangeFlag { spec: true, status: true, meta: true, }) .await; if changes.is_empty() { trace!("spec changes is empty, skipping"); return Ok(()); } let epoch = changes.epoch; let is_sync_all = changes.is_sync_all(); let (updates, deletes) = changes.parts(); let request = if is_sync_all { UpdateDerivedStreamRequest::with_all( epoch, updates.into_iter().map(|sm| sm.into()).collect(), ) } else { let mut changes: Vec<DerivedStreamMsg> = updates .into_iter() .map(|sm| Message::update(sm.into())) .collect(); let mut deletes = deletes .into_iter() .map(|sm| Message::delete(sm.into())) .collect(); changes.append(&mut deletes); UpdateDerivedStreamRequest::with_changes(epoch, changes) }; debug!(?request, "sending ss to spu"); let mut message = RequestMessage::new_request(request); message.get_mut_header().set_client_id("sc"); sink.send_request(&message).await?; Ok(()) }
31.004065
132
0.585748
ab3afc73025af5474aaf67dcb35c3944390a7e44
2,594
//! Timers. #![feature(proc_macro_hygiene)] #![warn(missing_docs)] #![warn(clippy::pedantic)] #![allow(clippy::type_repetition_in_bounds, clippy::wildcard_imports)] #![no_std] #[cfg(any( stm32_mcu = "stm32f401", stm32_mcu = "stm32f405", stm32_mcu = "stm32f407", stm32_mcu = "stm32f410", stm32_mcu = "stm32f411", stm32_mcu = "stm32f412", stm32_mcu = "stm32f413", stm32_mcu = "stm32f427", stm32_mcu = "stm32f429", stm32_mcu = "stm32f446", stm32_mcu = "stm32f469", stm32_mcu = "stm32l4x1", stm32_mcu = "stm32l4x2", stm32_mcu = "stm32l4x3", stm32_mcu = "stm32l4x5", stm32_mcu = "stm32l4x6", stm32_mcu = "stm32l4r5", stm32_mcu = "stm32l4r7", stm32_mcu = "stm32l4r9", stm32_mcu = "stm32l4s5", stm32_mcu = "stm32l4s7", stm32_mcu = "stm32l4s9" ))] pub mod advanced; #[cfg(any( stm32_mcu = "stm32f100", stm32_mcu = "stm32f101", stm32_mcu = "stm32f103", stm32_mcu = "stm32f107", stm32_mcu = "stm32f405", stm32_mcu = "stm32f407", stm32_mcu = "stm32f410", stm32_mcu = "stm32f412", stm32_mcu = "stm32f413", stm32_mcu = "stm32f427", stm32_mcu = "stm32f429", stm32_mcu = "stm32f446", stm32_mcu = "stm32f469", stm32_mcu = "stm32l4x1", stm32_mcu = "stm32l4x2", stm32_mcu = "stm32l4x3", stm32_mcu = "stm32l4x5", stm32_mcu = "stm32l4x6", stm32_mcu = "stm32l4r5", stm32_mcu = "stm32l4r7", stm32_mcu = "stm32l4r9", stm32_mcu = "stm32l4s5", stm32_mcu = "stm32l4s7", stm32_mcu = "stm32l4s9" ))] pub mod basic; #[cfg(any( stm32_mcu = "stm32f401", stm32_mcu = "stm32f405", stm32_mcu = "stm32f407", stm32_mcu = "stm32f410", stm32_mcu = "stm32f411", stm32_mcu = "stm32f412", stm32_mcu = "stm32f413", stm32_mcu = "stm32f427", stm32_mcu = "stm32f429", stm32_mcu = "stm32f446", stm32_mcu = "stm32f469", stm32_mcu = "stm32l4x1", stm32_mcu = "stm32l4x2", stm32_mcu = "stm32l4x3", stm32_mcu = "stm32l4x5", stm32_mcu = "stm32l4x6", stm32_mcu = "stm32l4r5", stm32_mcu = "stm32l4r7", stm32_mcu = "stm32l4r9", stm32_mcu = "stm32l4s5", stm32_mcu = "stm32l4s7", stm32_mcu = "stm32l4s9" ))] pub mod general; #[cfg(any( stm32_mcu = "stm32l4x1", stm32_mcu = "stm32l4x2", stm32_mcu = "stm32l4x3", stm32_mcu = "stm32l4x5", stm32_mcu = "stm32l4x6", stm32_mcu = "stm32l4r5", stm32_mcu = "stm32l4r7", stm32_mcu = "stm32l4r9", stm32_mcu = "stm32l4s5", stm32_mcu = "stm32l4s7", stm32_mcu = "stm32l4s9" ))] pub mod low_power;
25.94
70
0.627602
e99b37c8bf13ad59eb7ee5e6e2ffae7de5141876
7,277
use std::ffi::OsStr; use std::{io, ptr}; use widestring::WideCString; use winapi::um::winsvc; use crate::sc_handle::ScHandle; use crate::service::{to_wide, RawServiceInfo, Service, ServiceAccess, ServiceInfo}; use crate::{Error, Result}; bitflags::bitflags! { /// Flags describing access permissions for [`ServiceManager`]. pub struct ServiceManagerAccess: u32 { /// Can connect to service control manager. const CONNECT = winsvc::SC_MANAGER_CONNECT; /// Can create services. const CREATE_SERVICE = winsvc::SC_MANAGER_CREATE_SERVICE; /// Can enumerate services or receive notifications. const ENUMERATE_SERVICE = winsvc::SC_MANAGER_ENUMERATE_SERVICE; } } /// Service manager. pub struct ServiceManager { manager_handle: ScHandle, } impl ServiceManager { /// Private initializer. /// /// # Arguments /// /// * `machine` - The name of machine. Pass `None` to connect to local machine. /// * `database` - The name of database to connect to. Pass `None` to connect to active /// database. fn new( machine: Option<impl AsRef<OsStr>>, database: Option<impl AsRef<OsStr>>, request_access: ServiceManagerAccess, ) -> Result<Self> { let machine_name = to_wide(machine).map_err(Error::InvalidMachineName)?; let database_name = to_wide(database).map_err(Error::InvalidDatabaseName)?; let handle = unsafe { winsvc::OpenSCManagerW( machine_name.map_or(ptr::null(), |s| s.as_ptr()), database_name.map_or(ptr::null(), |s| s.as_ptr()), request_access.bits(), ) }; if handle.is_null() { Err(Error::Winapi(io::Error::last_os_error())) } else { Ok(ServiceManager { manager_handle: unsafe { ScHandle::new(handle) }, }) } } /// Connect to local services database. /// /// # Arguments /// /// * `database` - The name of database to connect to. Pass `None` to connect to active /// database. /// * `request_access` - Desired access permissions. pub fn local_computer( database: Option<impl AsRef<OsStr>>, request_access: ServiceManagerAccess, ) -> Result<Self> { ServiceManager::new(None::<&OsStr>, database, request_access) } /// Connect to remote services database. /// /// # Arguments /// /// * `machine` - The name of remote machine. /// * `database` - The name of database to connect to. Pass `None` to connect to active /// database. /// * `request_access` - desired access permissions. pub fn remote_computer( machine: impl AsRef<OsStr>, database: Option<impl AsRef<OsStr>>, request_access: ServiceManagerAccess, ) -> Result<Self> { ServiceManager::new(Some(machine), database, request_access) } /// Create a service. /// /// # Arguments /// /// * `service_info` - The service information that will be saved to the system services /// registry. /// * `service_access` - Desired access permissions for the returned [`Service`] instance. /// /// # Example /// /// ```rust,no_run /// use std::ffi::OsString; /// use std::path::PathBuf; /// use windows_service::service::{ /// ServiceAccess, ServiceErrorControl, ServiceInfo, ServiceStartType, ServiceType, /// }; /// use windows_service::service_manager::{ServiceManager, ServiceManagerAccess}; /// /// fn main() -> windows_service::Result<()> { /// let manager = /// ServiceManager::local_computer(None::<&str>, ServiceManagerAccess::CREATE_SERVICE)?; /// /// let my_service_info = ServiceInfo { /// name: OsString::from("my_service"), /// display_name: OsString::from("My service"), /// service_type: ServiceType::OWN_PROCESS, /// start_type: ServiceStartType::OnDemand, /// error_control: ServiceErrorControl::Normal, /// executable_path: PathBuf::from(r"C:\path\to\my\service.exe"), /// launch_arguments: vec![], /// dependencies: vec![], /// account_name: None, // run as System /// account_password: None, /// }; /// /// let my_service = manager.create_service(&my_service_info, ServiceAccess::QUERY_STATUS)?; /// Ok(()) /// } /// ``` pub fn create_service( &self, service_info: &ServiceInfo, service_access: ServiceAccess, ) -> Result<Service> { let raw_info = RawServiceInfo::new(service_info)?; let service_handle = unsafe { winsvc::CreateServiceW( self.manager_handle.raw_handle(), raw_info.name.as_ptr(), raw_info.display_name.as_ptr(), service_access.bits(), raw_info.service_type, raw_info.start_type, raw_info.error_control, raw_info.launch_command.as_ptr(), ptr::null(), // load ordering group ptr::null_mut(), // tag id within the load ordering group raw_info .dependencies .as_ref() .map_or(ptr::null(), |s| s.as_ptr()), raw_info .account_name .as_ref() .map_or(ptr::null(), |s| s.as_ptr()), raw_info .account_password .as_ref() .map_or(ptr::null(), |s| s.as_ptr()), ) }; if service_handle.is_null() { Err(Error::Winapi(io::Error::last_os_error())) } else { Ok(Service::new(unsafe { ScHandle::new(service_handle) })) } } /// Open an existing service. /// /// # Arguments /// /// * `name` - The service name. /// * `request_access` - Desired permissions for the returned [`Service`] instance. /// /// # Example /// /// ```rust,no_run /// use windows_service::service::ServiceAccess; /// use windows_service::service_manager::{ServiceManager, ServiceManagerAccess}; /// /// # fn main() -> windows_service::Result<()> { /// let manager = ServiceManager::local_computer(None::<&str>, ServiceManagerAccess::CONNECT)?; /// let my_service = manager.open_service("my_service", ServiceAccess::QUERY_STATUS)?; /// # Ok(()) /// # } /// ``` pub fn open_service( &self, name: impl AsRef<OsStr>, request_access: ServiceAccess, ) -> Result<Service> { let service_name = WideCString::from_os_str(name).map_err(Error::InvalidServiceName)?; let service_handle = unsafe { winsvc::OpenServiceW( self.manager_handle.raw_handle(), service_name.as_ptr(), request_access.bits(), ) }; if service_handle.is_null() { Err(Error::Winapi(io::Error::last_os_error())) } else { Ok(Service::new(unsafe { ScHandle::new(service_handle) })) } } }
34.488152
100
0.564793
031576dd30830480655873f2b3d10179d6f4abb7
743
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // Test use of const fn from another crate without a feature gate. // compile-pass // skip-codegen #![allow(unused_variables)] // aux-build:const_fn_lib.rs extern crate const_fn_lib; use const_fn_lib::foo; fn main() { let x = foo(); // use outside a constant is ok }
28.576923
68
0.728129
09d59061243af9cf940e58aba743b5794eeaa8b0
114
use ink_lang as ink; const INPUT: &str = "test"; const _: u32 = ink::selector_bytes!(INPUT); fn main() {}
16.285714
44
0.614035
91fa6273198e0712f5085b7f6ae196d2c20adb6f
1,757
mod shapes; mod operations; use glam::{Mat2, Vec2}; use crate::operations::{Intersection, Invert, Rotation, Subtraction, Translation, Union}; use crate::shapes::{Circle, Hexagon, Horseshoe, Rectangle}; pub trait Sdf : Sized{ fn density(&self, pos: Vec2) -> f32; } pub struct Shapes; impl Shapes { pub fn circle(radius: f32) -> Circle { Circle { radius } } pub fn hexagon(radius: f32) -> Hexagon { Hexagon { radius } } pub fn rectangle(width: f32, height: f32) -> Rectangle { Rectangle { width, height } } pub fn horseshoe(angle: f32, radius: f32, length: f32, width: f32) -> Horseshoe { Horseshoe { angle, radius, length, width, } } } pub trait Ops where Self: Sdf { fn invert(self) -> Invert<Self> { Invert(self) } fn translate(self, x: f32, y: f32) -> Translation<Self> { Translation(self, -Vec2::new(x, y)) } fn rotate(self, angle: f32) -> Rotation<Self> { Rotation(self, Mat2::from_angle(-angle)) } fn subtract<T: Sdf>(self, other: T) -> Subtraction<Self, T> { Subtraction(self, other) } fn union<T: Sdf>(self, other: T) -> Union<Self, T> { Union(self, other) } fn intersection<T: Sdf>(self, other: T) -> Intersection<Self, T> { Intersection(self, other) } } impl<T: Sdf> Ops for T {} #[derive(Debug, Copy, Clone)] pub enum Constant { Full, Empty } impl Sdf for Constant { fn density(&self, pos: Vec2) -> f32 { match self { Constant::Full => f32::NEG_INFINITY, Constant::Empty => f32::INFINITY } } }
23.426667
89
0.54354
ffc73f4c947c5c10b24182713ba0c48338496b73
58,377
/// Video annotation request. #[derive(Clone, PartialEq, ::prost::Message)] pub struct AnnotateVideoRequest { /// Input video location. Currently, only /// [Cloud Storage](<https://cloud.google.com/storage/>) URIs are /// supported. URIs must be specified in the following format: /// `gs://bucket-id/object-id` (other URI formats return /// \[google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT\]). For /// more information, see [Request /// URIs](<https://cloud.google.com/storage/docs/request-endpoints>). To identify /// multiple videos, a video URI may include wildcards in the `object-id`. /// Supported wildcards: '*' to match 0 or more characters; /// '?' to match 1 character. If unset, the input video should be embedded /// in the request as `input_content`. If set, `input_content` must be unset. #[prost(string, tag="1")] pub input_uri: ::prost::alloc::string::String, /// The video data bytes. /// If unset, the input video(s) should be specified via the `input_uri`. /// If set, `input_uri` must be unset. #[prost(bytes="bytes", tag="6")] pub input_content: ::prost::bytes::Bytes, /// Required. Requested video annotation features. #[prost(enumeration="Feature", repeated, packed="false", tag="2")] pub features: ::prost::alloc::vec::Vec<i32>, /// Additional video context and/or feature-specific parameters. #[prost(message, optional, tag="3")] pub video_context: ::core::option::Option<VideoContext>, /// Optional. Location where the output (in JSON format) should be stored. /// Currently, only [Cloud Storage](<https://cloud.google.com/storage/>) /// URIs are supported. These must be specified in the following format: /// `gs://bucket-id/object-id` (other URI formats return /// \[google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT\]). For /// more information, see [Request /// URIs](<https://cloud.google.com/storage/docs/request-endpoints>). #[prost(string, tag="4")] pub output_uri: ::prost::alloc::string::String, /// Optional. Cloud region where annotation should take place. Supported cloud /// regions are: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no /// region is specified, the region will be determined based on video file /// location. #[prost(string, tag="5")] pub location_id: ::prost::alloc::string::String, } /// Video context and/or feature-specific parameters. #[derive(Clone, PartialEq, ::prost::Message)] pub struct VideoContext { /// Video segments to annotate. The segments may overlap and are not required /// to be contiguous or span the whole video. If unspecified, each video is /// treated as a single segment. #[prost(message, repeated, tag="1")] pub segments: ::prost::alloc::vec::Vec<VideoSegment>, /// Config for LABEL_DETECTION. #[prost(message, optional, tag="2")] pub label_detection_config: ::core::option::Option<LabelDetectionConfig>, /// Config for SHOT_CHANGE_DETECTION. #[prost(message, optional, tag="3")] pub shot_change_detection_config: ::core::option::Option<ShotChangeDetectionConfig>, /// Config for EXPLICIT_CONTENT_DETECTION. #[prost(message, optional, tag="4")] pub explicit_content_detection_config: ::core::option::Option<ExplicitContentDetectionConfig>, /// Config for FACE_DETECTION. #[prost(message, optional, tag="5")] pub face_detection_config: ::core::option::Option<FaceDetectionConfig>, /// Config for SPEECH_TRANSCRIPTION. #[prost(message, optional, tag="6")] pub speech_transcription_config: ::core::option::Option<SpeechTranscriptionConfig>, /// Config for TEXT_DETECTION. #[prost(message, optional, tag="8")] pub text_detection_config: ::core::option::Option<TextDetectionConfig>, /// Config for PERSON_DETECTION. #[prost(message, optional, tag="11")] pub person_detection_config: ::core::option::Option<PersonDetectionConfig>, /// Config for OBJECT_TRACKING. #[prost(message, optional, tag="13")] pub object_tracking_config: ::core::option::Option<ObjectTrackingConfig>, } /// Config for LABEL_DETECTION. #[derive(Clone, PartialEq, ::prost::Message)] pub struct LabelDetectionConfig { /// What labels should be detected with LABEL_DETECTION, in addition to /// video-level labels or segment-level labels. /// If unspecified, defaults to `SHOT_MODE`. #[prost(enumeration="LabelDetectionMode", tag="1")] pub label_detection_mode: i32, /// Whether the video has been shot from a stationary (i.e., non-moving) /// camera. When set to true, might improve detection accuracy for moving /// objects. Should be used with `SHOT_AND_FRAME_MODE` enabled. #[prost(bool, tag="2")] pub stationary_camera: bool, /// Model to use for label detection. /// Supported values: "builtin/stable" (the default if unset) and /// "builtin/latest". #[prost(string, tag="3")] pub model: ::prost::alloc::string::String, /// The confidence threshold we perform filtering on the labels from /// frame-level detection. If not set, it is set to 0.4 by default. The valid /// range for this threshold is [0.1, 0.9]. Any value set outside of this /// range will be clipped. /// Note: For best results, follow the default threshold. We will update /// the default threshold everytime when we release a new model. #[prost(float, tag="4")] pub frame_confidence_threshold: f32, /// The confidence threshold we perform filtering on the labels from /// video-level and shot-level detections. If not set, it's set to 0.3 by /// default. The valid range for this threshold is [0.1, 0.9]. Any value set /// outside of this range will be clipped. /// Note: For best results, follow the default threshold. We will update /// the default threshold everytime when we release a new model. #[prost(float, tag="5")] pub video_confidence_threshold: f32, } /// Config for SHOT_CHANGE_DETECTION. #[derive(Clone, PartialEq, ::prost::Message)] pub struct ShotChangeDetectionConfig { /// Model to use for shot change detection. /// Supported values: "builtin/stable" (the default if unset) and /// "builtin/latest". #[prost(string, tag="1")] pub model: ::prost::alloc::string::String, } /// Config for OBJECT_TRACKING. #[derive(Clone, PartialEq, ::prost::Message)] pub struct ObjectTrackingConfig { /// Model to use for object tracking. /// Supported values: "builtin/stable" (the default if unset) and /// "builtin/latest". #[prost(string, tag="1")] pub model: ::prost::alloc::string::String, } /// Config for EXPLICIT_CONTENT_DETECTION. #[derive(Clone, PartialEq, ::prost::Message)] pub struct ExplicitContentDetectionConfig { /// Model to use for explicit content detection. /// Supported values: "builtin/stable" (the default if unset) and /// "builtin/latest". #[prost(string, tag="1")] pub model: ::prost::alloc::string::String, } /// Config for FACE_DETECTION. #[derive(Clone, PartialEq, ::prost::Message)] pub struct FaceDetectionConfig { /// Model to use for face detection. /// Supported values: "builtin/stable" (the default if unset) and /// "builtin/latest". #[prost(string, tag="1")] pub model: ::prost::alloc::string::String, /// Whether bounding boxes are included in the face annotation output. #[prost(bool, tag="2")] pub include_bounding_boxes: bool, /// Whether to enable face attributes detection, such as glasses, dark_glasses, /// mouth_open etc. Ignored if 'include_bounding_boxes' is set to false. #[prost(bool, tag="5")] pub include_attributes: bool, } /// Config for PERSON_DETECTION. #[derive(Clone, PartialEq, ::prost::Message)] pub struct PersonDetectionConfig { /// Whether bounding boxes are included in the person detection annotation /// output. #[prost(bool, tag="1")] pub include_bounding_boxes: bool, /// Whether to enable pose landmarks detection. Ignored if /// 'include_bounding_boxes' is set to false. #[prost(bool, tag="2")] pub include_pose_landmarks: bool, /// Whether to enable person attributes detection, such as cloth color (black, /// blue, etc), type (coat, dress, etc), pattern (plain, floral, etc), hair, /// etc. /// Ignored if 'include_bounding_boxes' is set to false. #[prost(bool, tag="3")] pub include_attributes: bool, } /// Config for TEXT_DETECTION. #[derive(Clone, PartialEq, ::prost::Message)] pub struct TextDetectionConfig { /// Language hint can be specified if the language to be detected is known a /// priori. It can increase the accuracy of the detection. Language hint must /// be language code in BCP-47 format. /// /// Automatic language detection is performed if no hint is provided. #[prost(string, repeated, tag="1")] pub language_hints: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, /// Model to use for text detection. /// Supported values: "builtin/stable" (the default if unset) and /// "builtin/latest". #[prost(string, tag="2")] pub model: ::prost::alloc::string::String, } /// Video segment. #[derive(Clone, PartialEq, ::prost::Message)] pub struct VideoSegment { /// Time-offset, relative to the beginning of the video, /// corresponding to the start of the segment (inclusive). #[prost(message, optional, tag="1")] pub start_time_offset: ::core::option::Option<::prost_types::Duration>, /// Time-offset, relative to the beginning of the video, /// corresponding to the end of the segment (inclusive). #[prost(message, optional, tag="2")] pub end_time_offset: ::core::option::Option<::prost_types::Duration>, } /// Video segment level annotation results for label detection. #[derive(Clone, PartialEq, ::prost::Message)] pub struct LabelSegment { /// Video segment where a label was detected. #[prost(message, optional, tag="1")] pub segment: ::core::option::Option<VideoSegment>, /// Confidence that the label is accurate. Range: [0, 1]. #[prost(float, tag="2")] pub confidence: f32, } /// Video frame level annotation results for label detection. #[derive(Clone, PartialEq, ::prost::Message)] pub struct LabelFrame { /// Time-offset, relative to the beginning of the video, corresponding to the /// video frame for this location. #[prost(message, optional, tag="1")] pub time_offset: ::core::option::Option<::prost_types::Duration>, /// Confidence that the label is accurate. Range: [0, 1]. #[prost(float, tag="2")] pub confidence: f32, } /// Detected entity from video analysis. #[derive(Clone, PartialEq, ::prost::Message)] pub struct Entity { /// Opaque entity ID. Some IDs may be available in /// [Google Knowledge Graph Search /// API](<https://developers.google.com/knowledge-graph/>). #[prost(string, tag="1")] pub entity_id: ::prost::alloc::string::String, /// Textual description, e.g., `Fixed-gear bicycle`. #[prost(string, tag="2")] pub description: ::prost::alloc::string::String, /// Language code for `description` in BCP-47 format. #[prost(string, tag="3")] pub language_code: ::prost::alloc::string::String, } /// Label annotation. #[derive(Clone, PartialEq, ::prost::Message)] pub struct LabelAnnotation { /// Detected entity. #[prost(message, optional, tag="1")] pub entity: ::core::option::Option<Entity>, /// Common categories for the detected entity. /// For example, when the label is `Terrier`, the category is likely `dog`. And /// in some cases there might be more than one categories e.g., `Terrier` could /// also be a `pet`. #[prost(message, repeated, tag="2")] pub category_entities: ::prost::alloc::vec::Vec<Entity>, /// All video segments where a label was detected. #[prost(message, repeated, tag="3")] pub segments: ::prost::alloc::vec::Vec<LabelSegment>, /// All video frames where a label was detected. #[prost(message, repeated, tag="4")] pub frames: ::prost::alloc::vec::Vec<LabelFrame>, } /// Video frame level annotation results for explicit content. #[derive(Clone, PartialEq, ::prost::Message)] pub struct ExplicitContentFrame { /// Time-offset, relative to the beginning of the video, corresponding to the /// video frame for this location. #[prost(message, optional, tag="1")] pub time_offset: ::core::option::Option<::prost_types::Duration>, /// Likelihood of the pornography content.. #[prost(enumeration="Likelihood", tag="2")] pub pornography_likelihood: i32, } /// Explicit content annotation (based on per-frame visual signals only). /// If no explicit content has been detected in a frame, no annotations are /// present for that frame. #[derive(Clone, PartialEq, ::prost::Message)] pub struct ExplicitContentAnnotation { /// All video frames where explicit content was detected. #[prost(message, repeated, tag="1")] pub frames: ::prost::alloc::vec::Vec<ExplicitContentFrame>, } /// Normalized bounding box. /// The normalized vertex coordinates are relative to the original image. /// Range: [0, 1]. #[derive(Clone, PartialEq, ::prost::Message)] pub struct NormalizedBoundingBox { /// Left X coordinate. #[prost(float, tag="1")] pub left: f32, /// Top Y coordinate. #[prost(float, tag="2")] pub top: f32, /// Right X coordinate. #[prost(float, tag="3")] pub right: f32, /// Bottom Y coordinate. #[prost(float, tag="4")] pub bottom: f32, } /// For tracking related features. /// An object at time_offset with attributes, and located with /// normalized_bounding_box. #[derive(Clone, PartialEq, ::prost::Message)] pub struct TimestampedObject { /// Normalized Bounding box in a frame, where the object is located. #[prost(message, optional, tag="1")] pub normalized_bounding_box: ::core::option::Option<NormalizedBoundingBox>, /// Time-offset, relative to the beginning of the video, /// corresponding to the video frame for this object. #[prost(message, optional, tag="2")] pub time_offset: ::core::option::Option<::prost_types::Duration>, /// Optional. The attributes of the object in the bounding box. #[prost(message, repeated, tag="3")] pub attributes: ::prost::alloc::vec::Vec<DetectedAttribute>, /// Optional. The detected landmarks. #[prost(message, repeated, tag="4")] pub landmarks: ::prost::alloc::vec::Vec<DetectedLandmark>, } /// A track of an object instance. #[derive(Clone, PartialEq, ::prost::Message)] pub struct Track { /// Video segment of a track. #[prost(message, optional, tag="1")] pub segment: ::core::option::Option<VideoSegment>, /// The object with timestamp and attributes per frame in the track. #[prost(message, repeated, tag="2")] pub timestamped_objects: ::prost::alloc::vec::Vec<TimestampedObject>, /// Optional. Attributes in the track level. #[prost(message, repeated, tag="3")] pub attributes: ::prost::alloc::vec::Vec<DetectedAttribute>, /// Optional. The confidence score of the tracked object. #[prost(float, tag="4")] pub confidence: f32, } /// A generic detected attribute represented by name in string format. #[derive(Clone, PartialEq, ::prost::Message)] pub struct DetectedAttribute { /// The name of the attribute, for example, glasses, dark_glasses, mouth_open. /// A full list of supported type names will be provided in the document. #[prost(string, tag="1")] pub name: ::prost::alloc::string::String, /// Detected attribute confidence. Range [0, 1]. #[prost(float, tag="2")] pub confidence: f32, /// Text value of the detection result. For example, the value for "HairColor" /// can be "black", "blonde", etc. #[prost(string, tag="3")] pub value: ::prost::alloc::string::String, } /// Celebrity definition. #[derive(Clone, PartialEq, ::prost::Message)] pub struct Celebrity { /// The resource name of the celebrity. Have the format /// `video-intelligence/kg-mid` indicates a celebrity from preloaded gallery. /// kg-mid is the id in Google knowledge graph, which is unique for the /// celebrity. #[prost(string, tag="1")] pub name: ::prost::alloc::string::String, /// The celebrity name. #[prost(string, tag="2")] pub display_name: ::prost::alloc::string::String, /// Textual description of additional information about the celebrity, if /// applicable. #[prost(string, tag="3")] pub description: ::prost::alloc::string::String, } /// The annotation result of a celebrity face track. RecognizedCelebrity field /// could be empty if the face track does not have any matched celebrities. #[derive(Clone, PartialEq, ::prost::Message)] pub struct CelebrityTrack { /// Top N match of the celebrities for the face in this track. #[prost(message, repeated, tag="1")] pub celebrities: ::prost::alloc::vec::Vec<celebrity_track::RecognizedCelebrity>, /// A track of a person's face. #[prost(message, optional, tag="3")] pub face_track: ::core::option::Option<Track>, } /// Nested message and enum types in `CelebrityTrack`. pub mod celebrity_track { /// The recognized celebrity with confidence score. #[derive(Clone, PartialEq, ::prost::Message)] pub struct RecognizedCelebrity { /// The recognized celebrity. #[prost(message, optional, tag="1")] pub celebrity: ::core::option::Option<super::Celebrity>, /// Recognition confidence. Range [0, 1]. #[prost(float, tag="2")] pub confidence: f32, } } /// Celebrity recognition annotation per video. #[derive(Clone, PartialEq, ::prost::Message)] pub struct CelebrityRecognitionAnnotation { /// The tracks detected from the input video, including recognized celebrities /// and other detected faces in the video. #[prost(message, repeated, tag="1")] pub celebrity_tracks: ::prost::alloc::vec::Vec<CelebrityTrack>, } /// A generic detected landmark represented by name in string format and a 2D /// location. #[derive(Clone, PartialEq, ::prost::Message)] pub struct DetectedLandmark { /// The name of this landmark, for example, left_hand, right_shoulder. #[prost(string, tag="1")] pub name: ::prost::alloc::string::String, /// The 2D point of the detected landmark using the normalized image /// coordindate system. The normalized coordinates have the range from 0 to 1. #[prost(message, optional, tag="2")] pub point: ::core::option::Option<NormalizedVertex>, /// The confidence score of the detected landmark. Range [0, 1]. #[prost(float, tag="3")] pub confidence: f32, } /// Face detection annotation. #[derive(Clone, PartialEq, ::prost::Message)] pub struct FaceDetectionAnnotation { /// The face tracks with attributes. #[prost(message, repeated, tag="3")] pub tracks: ::prost::alloc::vec::Vec<Track>, /// The thumbnail of a person's face. #[prost(bytes="bytes", tag="4")] pub thumbnail: ::prost::bytes::Bytes, } /// Person detection annotation per video. #[derive(Clone, PartialEq, ::prost::Message)] pub struct PersonDetectionAnnotation { /// The detected tracks of a person. #[prost(message, repeated, tag="1")] pub tracks: ::prost::alloc::vec::Vec<Track>, } /// Annotation results for a single video. #[derive(Clone, PartialEq, ::prost::Message)] pub struct VideoAnnotationResults { /// Video file location in /// [Cloud Storage](<https://cloud.google.com/storage/>). #[prost(string, tag="1")] pub input_uri: ::prost::alloc::string::String, /// Video segment on which the annotation is run. #[prost(message, optional, tag="10")] pub segment: ::core::option::Option<VideoSegment>, /// Topical label annotations on video level or user-specified segment level. /// There is exactly one element for each unique label. #[prost(message, repeated, tag="2")] pub segment_label_annotations: ::prost::alloc::vec::Vec<LabelAnnotation>, /// Presence label annotations on video level or user-specified segment level. /// There is exactly one element for each unique label. Compared to the /// existing topical `segment_label_annotations`, this field presents more /// fine-grained, segment-level labels detected in video content and is made /// available only when the client sets `LabelDetectionConfig.model` to /// "builtin/latest" in the request. #[prost(message, repeated, tag="23")] pub segment_presence_label_annotations: ::prost::alloc::vec::Vec<LabelAnnotation>, /// Topical label annotations on shot level. /// There is exactly one element for each unique label. #[prost(message, repeated, tag="3")] pub shot_label_annotations: ::prost::alloc::vec::Vec<LabelAnnotation>, /// Presence label annotations on shot level. There is exactly one element for /// each unique label. Compared to the existing topical /// `shot_label_annotations`, this field presents more fine-grained, shot-level /// labels detected in video content and is made available only when the client /// sets `LabelDetectionConfig.model` to "builtin/latest" in the request. #[prost(message, repeated, tag="24")] pub shot_presence_label_annotations: ::prost::alloc::vec::Vec<LabelAnnotation>, /// Label annotations on frame level. /// There is exactly one element for each unique label. #[prost(message, repeated, tag="4")] pub frame_label_annotations: ::prost::alloc::vec::Vec<LabelAnnotation>, /// Face detection annotations. #[prost(message, repeated, tag="13")] pub face_detection_annotations: ::prost::alloc::vec::Vec<FaceDetectionAnnotation>, /// Shot annotations. Each shot is represented as a video segment. #[prost(message, repeated, tag="6")] pub shot_annotations: ::prost::alloc::vec::Vec<VideoSegment>, /// Explicit content annotation. #[prost(message, optional, tag="7")] pub explicit_annotation: ::core::option::Option<ExplicitContentAnnotation>, /// Speech transcription. #[prost(message, repeated, tag="11")] pub speech_transcriptions: ::prost::alloc::vec::Vec<SpeechTranscription>, /// OCR text detection and tracking. /// Annotations for list of detected text snippets. Each will have list of /// frame information associated with it. #[prost(message, repeated, tag="12")] pub text_annotations: ::prost::alloc::vec::Vec<TextAnnotation>, /// Annotations for list of objects detected and tracked in video. #[prost(message, repeated, tag="14")] pub object_annotations: ::prost::alloc::vec::Vec<ObjectTrackingAnnotation>, /// Annotations for list of logos detected, tracked and recognized in video. #[prost(message, repeated, tag="19")] pub logo_recognition_annotations: ::prost::alloc::vec::Vec<LogoRecognitionAnnotation>, /// Person detection annotations. #[prost(message, repeated, tag="20")] pub person_detection_annotations: ::prost::alloc::vec::Vec<PersonDetectionAnnotation>, /// Celebrity recognition annotations. #[prost(message, optional, tag="21")] pub celebrity_recognition_annotations: ::core::option::Option<CelebrityRecognitionAnnotation>, /// If set, indicates an error. Note that for a single `AnnotateVideoRequest` /// some videos may succeed and some may fail. #[prost(message, optional, tag="9")] pub error: ::core::option::Option<super::super::super::rpc::Status>, } /// Video annotation response. Included in the `response` /// field of the `Operation` returned by the `GetOperation` /// call of the `google::longrunning::Operations` service. #[derive(Clone, PartialEq, ::prost::Message)] pub struct AnnotateVideoResponse { /// Annotation results for all videos specified in `AnnotateVideoRequest`. #[prost(message, repeated, tag="1")] pub annotation_results: ::prost::alloc::vec::Vec<VideoAnnotationResults>, } /// Annotation progress for a single video. #[derive(Clone, PartialEq, ::prost::Message)] pub struct VideoAnnotationProgress { /// Video file location in /// [Cloud Storage](<https://cloud.google.com/storage/>). #[prost(string, tag="1")] pub input_uri: ::prost::alloc::string::String, /// Approximate percentage processed thus far. Guaranteed to be /// 100 when fully processed. #[prost(int32, tag="2")] pub progress_percent: i32, /// Time when the request was received. #[prost(message, optional, tag="3")] pub start_time: ::core::option::Option<::prost_types::Timestamp>, /// Time of the most recent update. #[prost(message, optional, tag="4")] pub update_time: ::core::option::Option<::prost_types::Timestamp>, /// Specifies which feature is being tracked if the request contains more than /// one feature. #[prost(enumeration="Feature", tag="5")] pub feature: i32, /// Specifies which segment is being tracked if the request contains more than /// one segment. #[prost(message, optional, tag="6")] pub segment: ::core::option::Option<VideoSegment>, } /// Video annotation progress. Included in the `metadata` /// field of the `Operation` returned by the `GetOperation` /// call of the `google::longrunning::Operations` service. #[derive(Clone, PartialEq, ::prost::Message)] pub struct AnnotateVideoProgress { /// Progress metadata for all videos specified in `AnnotateVideoRequest`. #[prost(message, repeated, tag="1")] pub annotation_progress: ::prost::alloc::vec::Vec<VideoAnnotationProgress>, } /// Config for SPEECH_TRANSCRIPTION. #[derive(Clone, PartialEq, ::prost::Message)] pub struct SpeechTranscriptionConfig { /// Required. *Required* The language of the supplied audio as a /// \[BCP-47\](<https://www.rfc-editor.org/rfc/bcp/bcp47.txt>) language tag. /// Example: "en-US". /// See [Language Support](<https://cloud.google.com/speech/docs/languages>) /// for a list of the currently supported language codes. #[prost(string, tag="1")] pub language_code: ::prost::alloc::string::String, /// Optional. Maximum number of recognition hypotheses to be returned. /// Specifically, the maximum number of `SpeechRecognitionAlternative` messages /// within each `SpeechTranscription`. The server may return fewer than /// `max_alternatives`. Valid values are `0`-`30`. A value of `0` or `1` will /// return a maximum of one. If omitted, will return a maximum of one. #[prost(int32, tag="2")] pub max_alternatives: i32, /// Optional. If set to `true`, the server will attempt to filter out /// profanities, replacing all but the initial character in each filtered word /// with asterisks, e.g. "f***". If set to `false` or omitted, profanities /// won't be filtered out. #[prost(bool, tag="3")] pub filter_profanity: bool, /// Optional. A means to provide context to assist the speech recognition. #[prost(message, repeated, tag="4")] pub speech_contexts: ::prost::alloc::vec::Vec<SpeechContext>, /// Optional. If 'true', adds punctuation to recognition result hypotheses. /// This feature is only available in select languages. Setting this for /// requests in other languages has no effect at all. The default 'false' value /// does not add punctuation to result hypotheses. NOTE: "This is currently /// offered as an experimental service, complimentary to all users. In the /// future this may be exclusively available as a premium feature." #[prost(bool, tag="5")] pub enable_automatic_punctuation: bool, /// Optional. For file formats, such as MXF or MKV, supporting multiple audio /// tracks, specify up to two tracks. Default: track 0. #[prost(int32, repeated, packed="false", tag="6")] pub audio_tracks: ::prost::alloc::vec::Vec<i32>, /// Optional. If 'true', enables speaker detection for each recognized word in /// the top alternative of the recognition result using a speaker_tag provided /// in the WordInfo. /// Note: When this is true, we send all the words from the beginning of the /// audio for the top alternative in every consecutive response. /// This is done in order to improve our speaker tags as our models learn to /// identify the speakers in the conversation over time. #[prost(bool, tag="7")] pub enable_speaker_diarization: bool, /// Optional. If set, specifies the estimated number of speakers in the /// conversation. If not set, defaults to '2'. Ignored unless /// enable_speaker_diarization is set to true. #[prost(int32, tag="8")] pub diarization_speaker_count: i32, /// Optional. If `true`, the top result includes a list of words and the /// confidence for those words. If `false`, no word-level confidence /// information is returned. The default is `false`. #[prost(bool, tag="9")] pub enable_word_confidence: bool, } /// Provides "hints" to the speech recognizer to favor specific words and phrases /// in the results. #[derive(Clone, PartialEq, ::prost::Message)] pub struct SpeechContext { /// Optional. A list of strings containing words and phrases "hints" so that /// the speech recognition is more likely to recognize them. This can be used /// to improve the accuracy for specific words and phrases, for example, if /// specific commands are typically spoken by the user. This can also be used /// to add additional words to the vocabulary of the recognizer. See /// [usage limits](<https://cloud.google.com/speech/limits#content>). #[prost(string, repeated, tag="1")] pub phrases: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, } /// A speech recognition result corresponding to a portion of the audio. #[derive(Clone, PartialEq, ::prost::Message)] pub struct SpeechTranscription { /// May contain one or more recognition hypotheses (up to the maximum specified /// in `max_alternatives`). These alternatives are ordered in terms of /// accuracy, with the top (first) alternative being the most probable, as /// ranked by the recognizer. #[prost(message, repeated, tag="1")] pub alternatives: ::prost::alloc::vec::Vec<SpeechRecognitionAlternative>, /// Output only. The \[BCP-47\](<https://www.rfc-editor.org/rfc/bcp/bcp47.txt>) /// language tag of the language in this result. This language code was /// detected to have the most likelihood of being spoken in the audio. #[prost(string, tag="2")] pub language_code: ::prost::alloc::string::String, } /// Alternative hypotheses (a.k.a. n-best list). #[derive(Clone, PartialEq, ::prost::Message)] pub struct SpeechRecognitionAlternative { /// Transcript text representing the words that the user spoke. #[prost(string, tag="1")] pub transcript: ::prost::alloc::string::String, /// Output only. The confidence estimate between 0.0 and 1.0. A higher number /// indicates an estimated greater likelihood that the recognized words are /// correct. This field is set only for the top alternative. /// This field is not guaranteed to be accurate and users should not rely on it /// to be always provided. /// The default of 0.0 is a sentinel value indicating `confidence` was not set. #[prost(float, tag="2")] pub confidence: f32, /// Output only. A list of word-specific information for each recognized word. /// Note: When `enable_speaker_diarization` is set to true, you will see all /// the words from the beginning of the audio. #[prost(message, repeated, tag="3")] pub words: ::prost::alloc::vec::Vec<WordInfo>, } /// Word-specific information for recognized words. Word information is only /// included in the response when certain request parameters are set, such /// as `enable_word_time_offsets`. #[derive(Clone, PartialEq, ::prost::Message)] pub struct WordInfo { /// Time offset relative to the beginning of the audio, and /// corresponding to the start of the spoken word. This field is only set if /// `enable_word_time_offsets=true` and only in the top hypothesis. This is an /// experimental feature and the accuracy of the time offset can vary. #[prost(message, optional, tag="1")] pub start_time: ::core::option::Option<::prost_types::Duration>, /// Time offset relative to the beginning of the audio, and /// corresponding to the end of the spoken word. This field is only set if /// `enable_word_time_offsets=true` and only in the top hypothesis. This is an /// experimental feature and the accuracy of the time offset can vary. #[prost(message, optional, tag="2")] pub end_time: ::core::option::Option<::prost_types::Duration>, /// The word corresponding to this set of information. #[prost(string, tag="3")] pub word: ::prost::alloc::string::String, /// Output only. The confidence estimate between 0.0 and 1.0. A higher number /// indicates an estimated greater likelihood that the recognized words are /// correct. This field is set only for the top alternative. /// This field is not guaranteed to be accurate and users should not rely on it /// to be always provided. /// The default of 0.0 is a sentinel value indicating `confidence` was not set. #[prost(float, tag="4")] pub confidence: f32, /// Output only. A distinct integer value is assigned for every speaker within /// the audio. This field specifies which one of those speakers was detected to /// have spoken this word. Value ranges from 1 up to diarization_speaker_count, /// and is only set if speaker diarization is enabled. #[prost(int32, tag="5")] pub speaker_tag: i32, } /// A vertex represents a 2D point in the image. /// NOTE: the normalized vertex coordinates are relative to the original image /// and range from 0 to 1. #[derive(Clone, PartialEq, ::prost::Message)] pub struct NormalizedVertex { /// X coordinate. #[prost(float, tag="1")] pub x: f32, /// Y coordinate. #[prost(float, tag="2")] pub y: f32, } /// Normalized bounding polygon for text (that might not be aligned with axis). /// Contains list of the corner points in clockwise order starting from /// top-left corner. For example, for a rectangular bounding box: /// When the text is horizontal it might look like: /// 0----1 /// | | /// 3----2 /// /// When it's clockwise rotated 180 degrees around the top-left corner it /// becomes: /// 2----3 /// | | /// 1----0 /// /// and the vertex order will still be (0, 1, 2, 3). Note that values can be less /// than 0, or greater than 1 due to trignometric calculations for location of /// the box. #[derive(Clone, PartialEq, ::prost::Message)] pub struct NormalizedBoundingPoly { /// Normalized vertices of the bounding polygon. #[prost(message, repeated, tag="1")] pub vertices: ::prost::alloc::vec::Vec<NormalizedVertex>, } /// Video segment level annotation results for text detection. #[derive(Clone, PartialEq, ::prost::Message)] pub struct TextSegment { /// Video segment where a text snippet was detected. #[prost(message, optional, tag="1")] pub segment: ::core::option::Option<VideoSegment>, /// Confidence for the track of detected text. It is calculated as the highest /// over all frames where OCR detected text appears. #[prost(float, tag="2")] pub confidence: f32, /// Information related to the frames where OCR detected text appears. #[prost(message, repeated, tag="3")] pub frames: ::prost::alloc::vec::Vec<TextFrame>, } /// Video frame level annotation results for text annotation (OCR). /// Contains information regarding timestamp and bounding box locations for the /// frames containing detected OCR text snippets. #[derive(Clone, PartialEq, ::prost::Message)] pub struct TextFrame { /// Bounding polygon of the detected text for this frame. #[prost(message, optional, tag="1")] pub rotated_bounding_box: ::core::option::Option<NormalizedBoundingPoly>, /// Timestamp of this frame. #[prost(message, optional, tag="2")] pub time_offset: ::core::option::Option<::prost_types::Duration>, } /// Annotations related to one detected OCR text snippet. This will contain the /// corresponding text, confidence value, and frame level information for each /// detection. #[derive(Clone, PartialEq, ::prost::Message)] pub struct TextAnnotation { /// The detected text. #[prost(string, tag="1")] pub text: ::prost::alloc::string::String, /// All video segments where OCR detected text appears. #[prost(message, repeated, tag="2")] pub segments: ::prost::alloc::vec::Vec<TextSegment>, } /// Video frame level annotations for object detection and tracking. This field /// stores per frame location, time offset, and confidence. #[derive(Clone, PartialEq, ::prost::Message)] pub struct ObjectTrackingFrame { /// The normalized bounding box location of this object track for the frame. #[prost(message, optional, tag="1")] pub normalized_bounding_box: ::core::option::Option<NormalizedBoundingBox>, /// The timestamp of the frame in microseconds. #[prost(message, optional, tag="2")] pub time_offset: ::core::option::Option<::prost_types::Duration>, } /// Annotations corresponding to one tracked object. #[derive(Clone, PartialEq, ::prost::Message)] pub struct ObjectTrackingAnnotation { /// Entity to specify the object category that this track is labeled as. #[prost(message, optional, tag="1")] pub entity: ::core::option::Option<Entity>, /// Object category's labeling confidence of this track. #[prost(float, tag="4")] pub confidence: f32, /// Information corresponding to all frames where this object track appears. /// Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame /// messages in frames. /// Streaming mode: it can only be one ObjectTrackingFrame message in frames. #[prost(message, repeated, tag="2")] pub frames: ::prost::alloc::vec::Vec<ObjectTrackingFrame>, /// Different representation of tracking info in non-streaming batch /// and streaming modes. #[prost(oneof="object_tracking_annotation::TrackInfo", tags="3, 5")] pub track_info: ::core::option::Option<object_tracking_annotation::TrackInfo>, } /// Nested message and enum types in `ObjectTrackingAnnotation`. pub mod object_tracking_annotation { /// Different representation of tracking info in non-streaming batch /// and streaming modes. #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum TrackInfo { /// Non-streaming batch mode ONLY. /// Each object track corresponds to one video segment where it appears. #[prost(message, tag="3")] Segment(super::VideoSegment), /// Streaming mode ONLY. /// In streaming mode, we do not know the end time of a tracked object /// before it is completed. Hence, there is no VideoSegment info returned. /// Instead, we provide a unique identifiable integer track_id so that /// the customers can correlate the results of the ongoing /// ObjectTrackAnnotation of the same track_id over time. #[prost(int64, tag="5")] TrackId(i64), } } /// Annotation corresponding to one detected, tracked and recognized logo class. #[derive(Clone, PartialEq, ::prost::Message)] pub struct LogoRecognitionAnnotation { /// Entity category information to specify the logo class that all the logo /// tracks within this LogoRecognitionAnnotation are recognized as. #[prost(message, optional, tag="1")] pub entity: ::core::option::Option<Entity>, /// All logo tracks where the recognized logo appears. Each track corresponds /// to one logo instance appearing in consecutive frames. #[prost(message, repeated, tag="2")] pub tracks: ::prost::alloc::vec::Vec<Track>, /// All video segments where the recognized logo appears. There might be /// multiple instances of the same logo class appearing in one VideoSegment. #[prost(message, repeated, tag="3")] pub segments: ::prost::alloc::vec::Vec<VideoSegment>, } /// The top-level message sent by the client for the `StreamingAnnotateVideo` /// method. Multiple `StreamingAnnotateVideoRequest` messages are sent. /// The first message must only contain a `StreamingVideoConfig` message. /// All subsequent messages must only contain `input_content` data. #[derive(Clone, PartialEq, ::prost::Message)] pub struct StreamingAnnotateVideoRequest { /// *Required* The streaming request, which is either a streaming config or /// video content. #[prost(oneof="streaming_annotate_video_request::StreamingRequest", tags="1, 2")] pub streaming_request: ::core::option::Option<streaming_annotate_video_request::StreamingRequest>, } /// Nested message and enum types in `StreamingAnnotateVideoRequest`. pub mod streaming_annotate_video_request { /// *Required* The streaming request, which is either a streaming config or /// video content. #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum StreamingRequest { /// Provides information to the annotator, specifing how to process the /// request. The first `AnnotateStreamingVideoRequest` message must only /// contain a `video_config` message. #[prost(message, tag="1")] VideoConfig(super::StreamingVideoConfig), /// The video data to be annotated. Chunks of video data are sequentially /// sent in `StreamingAnnotateVideoRequest` messages. Except the initial /// `StreamingAnnotateVideoRequest` message containing only /// `video_config`, all subsequent `AnnotateStreamingVideoRequest` /// messages must only contain `input_content` field. /// Note: as with all bytes fields, protobuffers use a pure binary /// representation (not base64). #[prost(bytes, tag="2")] InputContent(::prost::bytes::Bytes), } } /// Provides information to the annotator that specifies how to process the /// request. #[derive(Clone, PartialEq, ::prost::Message)] pub struct StreamingVideoConfig { /// Requested annotation feature. #[prost(enumeration="StreamingFeature", tag="1")] pub feature: i32, /// Streaming storage option. By default: storage is disabled. #[prost(message, optional, tag="30")] pub storage_config: ::core::option::Option<StreamingStorageConfig>, /// Config for requested annotation feature. #[prost(oneof="streaming_video_config::StreamingConfig", tags="2, 3, 4, 5, 23, 21, 22")] pub streaming_config: ::core::option::Option<streaming_video_config::StreamingConfig>, } /// Nested message and enum types in `StreamingVideoConfig`. pub mod streaming_video_config { /// Config for requested annotation feature. #[derive(Clone, PartialEq, ::prost::Oneof)] pub enum StreamingConfig { /// Config for STREAMING_SHOT_CHANGE_DETECTION. #[prost(message, tag="2")] ShotChangeDetectionConfig(super::StreamingShotChangeDetectionConfig), /// Config for STREAMING_LABEL_DETECTION. #[prost(message, tag="3")] LabelDetectionConfig(super::StreamingLabelDetectionConfig), /// Config for STREAMING_EXPLICIT_CONTENT_DETECTION. #[prost(message, tag="4")] ExplicitContentDetectionConfig(super::StreamingExplicitContentDetectionConfig), /// Config for STREAMING_OBJECT_TRACKING. #[prost(message, tag="5")] ObjectTrackingConfig(super::StreamingObjectTrackingConfig), /// Config for STREAMING_AUTOML_ACTION_RECOGNITION. #[prost(message, tag="23")] AutomlActionRecognitionConfig(super::StreamingAutomlActionRecognitionConfig), /// Config for STREAMING_AUTOML_CLASSIFICATION. #[prost(message, tag="21")] AutomlClassificationConfig(super::StreamingAutomlClassificationConfig), /// Config for STREAMING_AUTOML_OBJECT_TRACKING. #[prost(message, tag="22")] AutomlObjectTrackingConfig(super::StreamingAutomlObjectTrackingConfig), } } /// `StreamingAnnotateVideoResponse` is the only message returned to the client /// by `StreamingAnnotateVideo`. A series of zero or more /// `StreamingAnnotateVideoResponse` messages are streamed back to the client. #[derive(Clone, PartialEq, ::prost::Message)] pub struct StreamingAnnotateVideoResponse { /// If set, returns a \[google.rpc.Status][google.rpc.Status\] message that /// specifies the error for the operation. #[prost(message, optional, tag="1")] pub error: ::core::option::Option<super::super::super::rpc::Status>, /// Streaming annotation results. #[prost(message, optional, tag="2")] pub annotation_results: ::core::option::Option<StreamingVideoAnnotationResults>, /// Google Cloud Storage(GCS) URI that stores annotation results of one /// streaming session in JSON format. /// It is the annotation_result_storage_directory /// from the request followed by '/cloud_project_number-session_id'. #[prost(string, tag="3")] pub annotation_results_uri: ::prost::alloc::string::String, } /// Streaming annotation results corresponding to a portion of the video /// that is currently being processed. #[derive(Clone, PartialEq, ::prost::Message)] pub struct StreamingVideoAnnotationResults { /// Shot annotation results. Each shot is represented as a video segment. #[prost(message, repeated, tag="1")] pub shot_annotations: ::prost::alloc::vec::Vec<VideoSegment>, /// Label annotation results. #[prost(message, repeated, tag="2")] pub label_annotations: ::prost::alloc::vec::Vec<LabelAnnotation>, /// Explicit content annotation results. #[prost(message, optional, tag="3")] pub explicit_annotation: ::core::option::Option<ExplicitContentAnnotation>, /// Object tracking results. #[prost(message, repeated, tag="4")] pub object_annotations: ::prost::alloc::vec::Vec<ObjectTrackingAnnotation>, } /// Config for STREAMING_SHOT_CHANGE_DETECTION. #[derive(Clone, PartialEq, ::prost::Message)] pub struct StreamingShotChangeDetectionConfig { } /// Config for STREAMING_LABEL_DETECTION. #[derive(Clone, PartialEq, ::prost::Message)] pub struct StreamingLabelDetectionConfig { /// Whether the video has been captured from a stationary (i.e. non-moving) /// camera. When set to true, might improve detection accuracy for moving /// objects. Default: false. #[prost(bool, tag="1")] pub stationary_camera: bool, } /// Config for STREAMING_EXPLICIT_CONTENT_DETECTION. #[derive(Clone, PartialEq, ::prost::Message)] pub struct StreamingExplicitContentDetectionConfig { } /// Config for STREAMING_OBJECT_TRACKING. #[derive(Clone, PartialEq, ::prost::Message)] pub struct StreamingObjectTrackingConfig { } /// Config for STREAMING_AUTOML_ACTION_RECOGNITION. #[derive(Clone, PartialEq, ::prost::Message)] pub struct StreamingAutomlActionRecognitionConfig { /// Resource name of AutoML model. /// Format: `projects/{project_id}/locations/{location_id}/models/{model_id}` #[prost(string, tag="1")] pub model_name: ::prost::alloc::string::String, } /// Config for STREAMING_AUTOML_CLASSIFICATION. #[derive(Clone, PartialEq, ::prost::Message)] pub struct StreamingAutomlClassificationConfig { /// Resource name of AutoML model. /// Format: /// `projects/{project_number}/locations/{location_id}/models/{model_id}` #[prost(string, tag="1")] pub model_name: ::prost::alloc::string::String, } /// Config for STREAMING_AUTOML_OBJECT_TRACKING. #[derive(Clone, PartialEq, ::prost::Message)] pub struct StreamingAutomlObjectTrackingConfig { /// Resource name of AutoML model. /// Format: `projects/{project_id}/locations/{location_id}/models/{model_id}` #[prost(string, tag="1")] pub model_name: ::prost::alloc::string::String, } /// Config for streaming storage option. #[derive(Clone, PartialEq, ::prost::Message)] pub struct StreamingStorageConfig { /// Enable streaming storage. Default: false. #[prost(bool, tag="1")] pub enable_storage_annotation_result: bool, /// Cloud Storage URI to store all annotation results for one client. Client /// should specify this field as the top-level storage directory. Annotation /// results of different sessions will be put into different sub-directories /// denoted by project_name and session_id. All sub-directories will be auto /// generated by program and will be made accessible to client in response /// proto. URIs must be specified in the following format: /// `gs://bucket-id/object-id` `bucket-id` should be a valid Cloud Storage /// bucket created by client and bucket permission shall also be configured /// properly. `object-id` can be arbitrary string that make sense to client. /// Other URI formats will return error and cause Cloud Storage write failure. #[prost(string, tag="3")] pub annotation_result_storage_directory: ::prost::alloc::string::String, } /// Label detection mode. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum LabelDetectionMode { /// Unspecified. Unspecified = 0, /// Detect shot-level labels. ShotMode = 1, /// Detect frame-level labels. FrameMode = 2, /// Detect both shot-level and frame-level labels. ShotAndFrameMode = 3, } /// Bucketized representation of likelihood. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum Likelihood { /// Unspecified likelihood. Unspecified = 0, /// Very unlikely. VeryUnlikely = 1, /// Unlikely. Unlikely = 2, /// Possible. Possible = 3, /// Likely. Likely = 4, /// Very likely. VeryLikely = 5, } /// Streaming video annotation feature. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum StreamingFeature { /// Unspecified. Unspecified = 0, /// Label detection. Detect objects, such as dog or flower. StreamingLabelDetection = 1, /// Shot change detection. StreamingShotChangeDetection = 2, /// Explicit content detection. StreamingExplicitContentDetection = 3, /// Object detection and tracking. StreamingObjectTracking = 4, /// Action recognition based on AutoML model. StreamingAutomlActionRecognition = 23, /// Video classification based on AutoML model. StreamingAutomlClassification = 21, /// Object detection and tracking based on AutoML model. StreamingAutomlObjectTracking = 22, } /// Video annotation feature. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum Feature { /// Unspecified. Unspecified = 0, /// Label detection. Detect objects, such as dog or flower. LabelDetection = 1, /// Shot change detection. ShotChangeDetection = 2, /// Explicit content detection. ExplicitContentDetection = 3, /// Human face detection. FaceDetection = 4, /// Speech transcription. SpeechTranscription = 6, /// OCR text detection and tracking. TextDetection = 7, /// Object detection and tracking. ObjectTracking = 9, /// Logo detection, tracking, and recognition. LogoRecognition = 12, /// Celebrity recognition. CelebrityRecognition = 13, /// Person detection. PersonDetection = 14, } /// Generated client implementations. pub mod video_intelligence_service_client { #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] use tonic::codegen::*; /// Service that implements the Video Intelligence API. #[derive(Debug, Clone)] pub struct VideoIntelligenceServiceClient<T> { inner: tonic::client::Grpc<T>, } impl<T> VideoIntelligenceServiceClient<T> where T: tonic::client::GrpcService<tonic::body::BoxBody>, T::Error: Into<StdError>, T::ResponseBody: Body<Data = Bytes> + Send + 'static, <T::ResponseBody as Body>::Error: Into<StdError> + Send, { pub fn new(inner: T) -> Self { let inner = tonic::client::Grpc::new(inner); Self { inner } } pub fn with_interceptor<F>( inner: T, interceptor: F, ) -> VideoIntelligenceServiceClient<InterceptedService<T, F>> where F: tonic::service::Interceptor, T::ResponseBody: Default, T: tonic::codegen::Service< http::Request<tonic::body::BoxBody>, Response = http::Response< <T as tonic::client::GrpcService<tonic::body::BoxBody>>::ResponseBody, >, >, <T as tonic::codegen::Service< http::Request<tonic::body::BoxBody>, >>::Error: Into<StdError> + Send + Sync, { VideoIntelligenceServiceClient::new( InterceptedService::new(inner, interceptor), ) } /// Compress requests with `gzip`. /// /// This requires the server to support it otherwise it might respond with an /// error. #[must_use] pub fn send_gzip(mut self) -> Self { self.inner = self.inner.send_gzip(); self } /// Enable decompressing responses with `gzip`. #[must_use] pub fn accept_gzip(mut self) -> Self { self.inner = self.inner.accept_gzip(); self } /// Performs asynchronous video annotation. Progress and results can be /// retrieved through the `google.longrunning.Operations` interface. /// `Operation.metadata` contains `AnnotateVideoProgress` (progress). /// `Operation.response` contains `AnnotateVideoResponse` (results). pub async fn annotate_video( &mut self, request: impl tonic::IntoRequest<super::AnnotateVideoRequest>, ) -> Result< tonic::Response<super::super::super::super::longrunning::Operation>, tonic::Status, > { self.inner .ready() .await .map_err(|e| { tonic::Status::new( tonic::Code::Unknown, format!("Service was not ready: {}", e.into()), ) })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/google.cloud.videointelligence.v1p3beta1.VideoIntelligenceService/AnnotateVideo", ); self.inner.unary(request.into_request(), path, codec).await } } } /// Generated client implementations. pub mod streaming_video_intelligence_service_client { #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] use tonic::codegen::*; /// Service that implements streaming Video Intelligence API. #[derive(Debug, Clone)] pub struct StreamingVideoIntelligenceServiceClient<T> { inner: tonic::client::Grpc<T>, } impl<T> StreamingVideoIntelligenceServiceClient<T> where T: tonic::client::GrpcService<tonic::body::BoxBody>, T::Error: Into<StdError>, T::ResponseBody: Body<Data = Bytes> + Send + 'static, <T::ResponseBody as Body>::Error: Into<StdError> + Send, { pub fn new(inner: T) -> Self { let inner = tonic::client::Grpc::new(inner); Self { inner } } pub fn with_interceptor<F>( inner: T, interceptor: F, ) -> StreamingVideoIntelligenceServiceClient<InterceptedService<T, F>> where F: tonic::service::Interceptor, T::ResponseBody: Default, T: tonic::codegen::Service< http::Request<tonic::body::BoxBody>, Response = http::Response< <T as tonic::client::GrpcService<tonic::body::BoxBody>>::ResponseBody, >, >, <T as tonic::codegen::Service< http::Request<tonic::body::BoxBody>, >>::Error: Into<StdError> + Send + Sync, { StreamingVideoIntelligenceServiceClient::new( InterceptedService::new(inner, interceptor), ) } /// Compress requests with `gzip`. /// /// This requires the server to support it otherwise it might respond with an /// error. #[must_use] pub fn send_gzip(mut self) -> Self { self.inner = self.inner.send_gzip(); self } /// Enable decompressing responses with `gzip`. #[must_use] pub fn accept_gzip(mut self) -> Self { self.inner = self.inner.accept_gzip(); self } /// Performs video annotation with bidirectional streaming: emitting results /// while sending video/audio bytes. /// This method is only available via the gRPC API (not REST). pub async fn streaming_annotate_video( &mut self, request: impl tonic::IntoStreamingRequest< Message = super::StreamingAnnotateVideoRequest, >, ) -> Result< tonic::Response< tonic::codec::Streaming<super::StreamingAnnotateVideoResponse>, >, tonic::Status, > { self.inner .ready() .await .map_err(|e| { tonic::Status::new( tonic::Code::Unknown, format!("Service was not ready: {}", e.into()), ) })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( "/google.cloud.videointelligence.v1p3beta1.StreamingVideoIntelligenceService/StreamingAnnotateVideo", ); self.inner.streaming(request.into_streaming_request(), path, codec).await } } }
46.851525
117
0.675454
e259b4dc0771aaf5214f90076228ab068ab51af5
2,657
use time; use std::collections::VecDeque; use std::fs::{File, OpenOptions}; use std::io::BufWriter; use std::io::Error as IOError; use std::io::prelude::*; use std::path::PathBuf; /// Debug is meant to output text to a text buffer and a file, printing /// messages generated by errors and to quantify other metrics for analysis. pub struct Debug { disp_length: usize, max_length: usize, logname: Option<PathBuf>, messages: VecDeque<String>, } pub fn default_log(logging: bool) -> Option<PathBuf> { if logging { let t = time::now(); let ts = match time::strftime("%y%m%d%H%M%S", &t) { Ok(s) => s, Err(f) => panic!(f) }; let p = PathBuf::from(&format!("logs/{}.log", ts)); match File::create(&p) { Ok(_) => {}, Err(f) => panic!(f) } Some(p) } else { None } } impl Debug { pub fn new(cap: usize, logging: bool) -> Debug { Debug { disp_length: cap, max_length: 1024, logname: default_log(logging), messages: VecDeque::with_capacity(1024) } } pub fn output(&self) -> String { let mut out = String::new(); let len = self.messages.len(); let d = self.disp_length; let start = if len > d { len - d } else { 0 }; for msg in self.messages.iter().skip(start).take(d) { out.push_str(msg); } out } pub fn input(&mut self, msg: &str) { if self.messages.len() >= (self.max_length as usize) { let n = (self.max_length as usize) - (self.disp_length as usize); match self.flush(n) { Ok(_) => {}, Err(f) => panic!(f) } } self.messages.push_back(format!("{}", msg)); } fn flush(&mut self, n: usize) -> Result<(), IOError> { if let Some(ref path) = self.logname { match OpenOptions::new().append(true).open(path) { Ok(f) => { let mut buf = BufWriter::new(f); for msg in self.messages.drain(0 .. n) { buf.write(msg.as_bytes())?; } buf.get_mut().sync_all()?; Ok(()) }, Err(f) => Err(f) } } else { self.messages.truncate(self.disp_length as usize); Ok(()) } } pub fn flush_all(&mut self) -> Result<(), IOError> { let n = self.messages.len(); self.flush(n) } }
27.391753
77
0.475725
acb9deebbf10a2d3dc8a3625da928a5a862a547e
2,046
use std::collections::HashSet; use std::env; use std::fmt; use std::fs::File; use std::io::{self, BufRead}; fn main() -> Result<(), InputError> { let args: Vec<String> = env::args().collect(); if args.len() < 2 { return Err(InputError::InvalidArguments); } let filename = &args[1]; let file = File::open(filename).expect("failed to open file"); let mut adapters: HashSet<_> = io::BufReader::new(file) .lines() .map(|l| l.unwrap()) .map(|l| l.parse::<u32>().unwrap()) .collect(); let mut diff_one_count = 0u32; let mut diff_three_count = 1u32; let mut source_joltage = 0u32; while !adapters.is_empty() { let mut found_compatible_adapter = false; for diff in 1..=3 { let total_joltage = source_joltage + diff; if adapters.contains(&total_joltage) { adapters.remove(&total_joltage); match diff { 1 => diff_one_count += 1, 3 => diff_three_count += 1, _ => {} } source_joltage = total_joltage; found_compatible_adapter = true; break; } } if !found_compatible_adapter { return Err(InputError::CouldNotFindCompatibleAdapter(source_joltage)); } } println!("{}", diff_one_count * diff_three_count); Ok(()) } #[derive(Debug)] enum InputError { InvalidArguments, CouldNotFindCompatibleAdapter(u32), } impl fmt::Display for InputError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match &*self { InputError::InvalidArguments => { write!(f, "usage: day10part1 <path to input text file>") } InputError::CouldNotFindCompatibleAdapter(src) => write!( f, "could not find compatible adapter for source joltage {}", src ), } } } impl std::error::Error for InputError {}
30.088235
82
0.5435
233abc2b2b6fc66d45fdcf0f62e33a3158bc96b2
30,360
// Copyright (c) Microsoft. All rights reserved. #![deny(rust_2018_idioms)] #![warn(clippy::all, clippy::pedantic)] #![allow( clippy::default_trait_access, clippy::let_and_return, clippy::let_underscore_drop, clippy::let_unit_value, clippy::missing_errors_doc, clippy::module_name_repetitions, clippy::must_use_candidate, clippy::too_many_lines, clippy::type_complexity )] #![allow(dead_code)] use std::sync::Arc; use async_trait::async_trait; use aziot_identityd_config as config; pub mod auth; pub mod configext; pub mod error; mod http; pub mod identity; use config_common::watcher::UpdateConfig; pub use error::{Error, InternalError}; /// URI query parameter that identifies module identity type. const ID_TYPE_AZIOT: &str = "aziot"; /// URI query parameter that identifies local identity type. const ID_TYPE_LOCAL: &str = "local"; macro_rules! match_id_type { ($id_type:ident { $( $type:ident => $action:block ,)+ }) => { if let Some(id_type) = $id_type { match id_type { $( $type => $action, )+ _ => Err(Error::invalid_parameter("type", format!("invalid type: {}", id_type))), } } else { Err(Error::invalid_parameter("type", "missing parameter")) } }; } #[derive(Debug)] pub enum ReprovisionTrigger { ConfigurationFileUpdate, Api, Startup, } pub async fn main( settings: config::Settings, config_path: std::path::PathBuf, config_directory_path: std::path::PathBuf, ) -> Result<(http_common::Connector, http::Service), Box<dyn std::error::Error>> { let settings = settings.check().map_err(InternalError::BadSettings)?; let homedir_path = &settings.homedir; let connector = settings.endpoints.aziot_identityd.clone(); if !homedir_path.exists() { let () = std::fs::create_dir_all(&homedir_path).map_err(error::InternalError::CreateHomeDir)?; } let api = Api::new(settings.clone())?; let api = Arc::new(futures_util::lock::Mutex::new(api)); let api_startup = api.clone(); let mut api_ = api_startup.lock().await; let _ = api_ .update_config_inner(settings.clone(), ReprovisionTrigger::Startup) .await?; config_common::watcher::start_watcher(config_path, config_directory_path, api.clone()); let service = http::Service { api }; Ok((connector, service)) } pub struct Api { pub settings: config::Settings, pub authenticator: Box<dyn auth::authentication::Authenticator<Error = Error> + Send + Sync>, pub authorizer: Box<dyn auth::authorization::Authorizer<Error = Error> + Send + Sync>, pub id_manager: identity::IdentityManager, pub local_identities: std::collections::BTreeMap< aziot_identity_common::ModuleId, Option<aziot_identity_common::LocalIdOpts>, >, key_client: Arc<aziot_key_client_async::Client>, key_engine: Arc<futures_util::lock::Mutex<openssl2::FunctionalEngine>>, cert_client: Arc<aziot_cert_client_async::Client>, tpm_client: Arc<aziot_tpm_client_async::Client>, proxy_uri: Option<hyper::Uri>, } impl Api { pub fn new(settings: config::Settings) -> Result<Self, Error> { let key_service_connector = settings.endpoints.aziot_keyd.clone(); let key_client = { let key_client = aziot_key_client_async::Client::new( aziot_key_common_http::ApiVersion::V2021_05_01, key_service_connector.clone(), ); let key_client = Arc::new(key_client); key_client }; let key_engine = { let key_client = aziot_key_client::Client::new( aziot_key_common_http::ApiVersion::V2021_05_01, key_service_connector, ); let key_client = Arc::new(key_client); let key_engine = aziot_key_openssl_engine::load(key_client) .map_err(|err| Error::Internal(InternalError::LoadKeyOpensslEngine(err)))?; let key_engine = Arc::new(futures_util::lock::Mutex::new(key_engine)); key_engine }; let cert_client = { let cert_service_connector = settings.endpoints.aziot_certd.clone(); let cert_client = aziot_cert_client_async::Client::new( aziot_cert_common_http::ApiVersion::V2020_09_01, cert_service_connector, ); let cert_client = Arc::new(cert_client); cert_client }; let tpm_client = { let tpm_service_connector = settings.endpoints.aziot_tpmd.clone(); let tpm_client = aziot_tpm_client_async::Client::new( aziot_tpm_common_http::ApiVersion::V2020_09_01, tpm_service_connector, ); let tpm_client = Arc::new(tpm_client); tpm_client }; let proxy_uri = http_common::get_proxy_uri(None) .map_err(|err| Error::Internal(InternalError::InvalidProxyUri(Box::new(err))))?; let id_manager = identity::IdentityManager::new( settings.homedir.clone(), key_client.clone(), key_engine.clone(), cert_client.clone(), tpm_client.clone(), None, proxy_uri.clone(), ); Ok(Api { settings, authenticator: Box::new(auth::authentication::DefaultAuthenticator), authorizer: Box::new(auth::authorization::DefaultAuthorizer), id_manager, local_identities: Default::default(), key_client, key_engine, cert_client, tpm_client, proxy_uri, }) } pub async fn get_caller_identity( &self, auth_id: auth::AuthId, ) -> Result<aziot_identity_common::Identity, Error> { if self.authorizer.authorize(auth::Operation { auth_id: auth_id.clone(), op_type: auth::OperationType::GetDevice, })? { return self.id_manager.get_device_identity().await; } else if let crate::auth::AuthId::HostProcess(ref caller_principal) = auth_id { if self.authorizer.authorize(auth::Operation { auth_id: auth_id.clone(), op_type: auth::OperationType::GetModule(caller_principal.name.0.clone()), })? { return self .id_manager .get_module_identity(&caller_principal.name.0) .await; } } return Err(Error::Authorization); } pub async fn get_identity( &self, auth_id: auth::AuthId, id_type: Option<&str>, module_id: &str, ) -> Result<aziot_identity_common::Identity, Error> { if !self.authorizer.authorize(auth::Operation { auth_id, op_type: auth::OperationType::GetModule(String::from(module_id)), })? { return Err(Error::Authorization); } match_id_type!(id_type { ID_TYPE_AZIOT => { self.id_manager.get_module_identity(module_id).await }, ID_TYPE_LOCAL => { // Callers of this API must have a local identity specified in the principals list. match self .local_identities .get(&aziot_identity_common::ModuleId(module_id.to_owned())) { Some(opts) => self.issue_local_identity(module_id, opts.as_ref()).await, None => Err( Error::invalid_parameter( "moduleId", format!("local identity for {} doesn't exist", module_id) ) ), } }, }) } pub async fn get_identities( &self, auth_id: auth::AuthId, id_type: Option<&str>, ) -> Result<Vec<aziot_identity_common::Identity>, Error> { if !self.authorizer.authorize(auth::Operation { auth_id, op_type: auth::OperationType::GetAllHubModules, })? { return Err(Error::Authorization); } match_id_type!(id_type { ID_TYPE_AZIOT => { self.id_manager.get_module_identities().await }, }) } pub async fn get_device_identity( &self, auth_id: auth::AuthId, _idtype: &str, ) -> Result<aziot_identity_common::Identity, Error> { if !self.authorizer.authorize(auth::Operation { auth_id, op_type: auth::OperationType::GetDevice, })? { return Err(Error::Authorization); } self.id_manager.get_device_identity().await } pub async fn create_identity( &self, auth_id: auth::AuthId, id_type: Option<&str>, module_id: &str, opts: Option<aziot_identity_common_http::create_module_identity::CreateModuleOpts>, ) -> Result<aziot_identity_common::Identity, Error> { if !self.authorizer.authorize(auth::Operation { auth_id, op_type: auth::OperationType::CreateModule(String::from(module_id)), })? { return Err(Error::Authorization); } match_id_type!( id_type { ID_TYPE_AZIOT => { self.id_manager.create_module_identity(module_id).await }, ID_TYPE_LOCAL => { if self.local_identities .get(&aziot_identity_common::ModuleId(module_id.to_owned())) .is_some() { // Don't create a local identity for a module in the principals list. Err(Error::invalid_parameter( "moduleId", format!("local identity for {} already exists", module_id) )) } else { let opts = opts.map(|opts| { match opts { aziot_identity_common_http::create_module_identity::CreateModuleOpts::LocalIdOpts(opts) => opts, // Currently, the only supported opts variant is LocalIdOpts. // But if more variants are added in the future, they should be rejected here. } }); self.issue_local_identity(module_id, opts.as_ref()).await } }, }) } pub async fn update_identity( &self, auth_id: auth::AuthId, id_type: Option<&str>, module_id: &str, ) -> Result<aziot_identity_common::Identity, Error> { if !self.authorizer.authorize(auth::Operation { auth_id, op_type: auth::OperationType::UpdateModule(String::from(module_id)), })? { return Err(Error::Authorization); } match_id_type!(id_type { ID_TYPE_AZIOT => { self.id_manager.update_module_identity(module_id).await }, }) } pub async fn delete_identity( &self, auth_id: auth::AuthId, id_type: Option<&str>, module_id: &str, ) -> Result<(), Error> { if !self.authorizer.authorize(auth::Operation { auth_id, op_type: auth::OperationType::DeleteModule(String::from(module_id)), })? { return Err(Error::Authorization); } match_id_type!(id_type { ID_TYPE_AZIOT => { self.id_manager.delete_module_identity(module_id).await }, }) } pub async fn get_trust_bundle( &self, auth_id: auth::AuthId, ) -> Result<aziot_cert_common_http::Pem, Error> { if !self.authorizer.authorize(auth::Operation { auth_id, op_type: auth::OperationType::GetTrustBundle, })? { return Err(Error::Authorization); } //TODO: invoke get trust bundle Ok(aziot_cert_common_http::Pem { 0: std::vec::Vec::default(), }) } pub async fn reprovision_device( &mut self, auth_id: auth::AuthId, trigger: ReprovisionTrigger, ) -> Result<(), Error> { if !self.authorizer.authorize(auth::Operation { auth_id, op_type: auth::OperationType::ReprovisionDevice, })? { return Err(Error::Authorization); } log::info!("Provisioning starting. Reason: {:?}", trigger); let _ = match trigger { ReprovisionTrigger::ConfigurationFileUpdate => { // For now, skip reprovisioning if there's a valid backup. This means config file // updates will only reconcile identities. self.id_manager .provision_device(self.settings.provisioning.clone(), true) .await? } ReprovisionTrigger::Api => { // Clear the backed up device state before reprovisioning. // If this fails, log a warning but continue with reprovisioning. let mut backup_file = self.settings.homedir.clone(); backup_file.push(identity::DEVICE_BACKUP_LOCATION); if let Err(err) = std::fs::remove_file(backup_file) { if err.kind() != std::io::ErrorKind::NotFound { log::warn!( "Failed to clear device state before reprovisioning: {}", err ); } } self.id_manager .provision_device(self.settings.provisioning.clone(), false) .await? } ReprovisionTrigger::Startup => { self.id_manager .provision_device(self.settings.provisioning.clone(), true) .await? } }; log::info!("Provisioning complete."); log::info!("Identity reconciliation started. Reason: {:?}", trigger); if let Err(err) = self .id_manager .reconcile_hub_identities(self.settings.clone()) .await { // For Hub client errors only, attempt to reprovision with Hub and retry reconciliation. match err { Error::HubClient(_) => match trigger { ReprovisionTrigger::Startup | ReprovisionTrigger::ConfigurationFileUpdate => { log::info!("Could not reconcile Identities with current device data. Reprovisioning."); self.id_manager .provision_device(self.settings.provisioning.clone(), false) .await?; log::info!("Successfully reprovisioned."); self.id_manager .reconcile_hub_identities(self.settings.clone()) .await?; } // Don't attempt to reprovision if this function was called by the reprovision API. // The reprovision API provided a fresh reprovision, so failing to reconcile in this // scenario should not be retried. ReprovisionTrigger::Api => { return Err(err); } }, _ => return Err(err), } } log::info!("Identity reconciliation complete."); Ok(()) } async fn issue_local_identity( &self, module_id: &str, opts: Option<&aziot_identity_common::LocalIdOpts>, ) -> Result<aziot_identity_common::Identity, Error> { let localid = self.settings.localid.as_ref().ok_or_else(|| { Error::Internal(InternalError::BadSettings(std::io::Error::new( std::io::ErrorKind::InvalidInput, "no local id settings specified", ))) })?; let local_identity = { let attributes = opts.as_ref() .map_or( aziot_identity_common::LocalIdAttr::default(), |opts| match opts { aziot_identity_common::LocalIdOpts::X509 { attributes } => *attributes, }, ); // Generate new private key for local identity. let rsa = openssl::rsa::Rsa::generate(2048) .map_err(|err| Error::Internal(InternalError::CreateCertificate(Box::new(err))))?; let private_key = openssl::pkey::PKey::from_rsa(rsa) .map_err(|err| Error::Internal(InternalError::CreateCertificate(Box::new(err))))?; let private_key_pem = private_key .private_key_to_pem_pkcs8() .map_err(|err| Error::Internal(InternalError::CreateCertificate(Box::new(err))))?; let private_key_pem = std::string::String::from_utf8(private_key_pem) .map_err(|err| Error::Internal(InternalError::CreateCertificate(Box::new(err))))?; let public_key = private_key .public_key_to_pem() .map_err(|err| Error::Internal(InternalError::CreateCertificate(Box::new(err))))?; let public_key = openssl::pkey::PKey::public_key_from_pem(&public_key) .map_err(|err| Error::Internal(InternalError::CreateCertificate(Box::new(err))))?; // Create local identity CSR. let subject = format!( "{}.{}.{}", module_id, self.settings.hostname, localid.domain ); let csr = create_csr(&subject, &public_key, &private_key, Some(attributes)) .map_err(|err| Error::Internal(InternalError::CreateCertificate(Box::new(err))))?; let certificate = self .cert_client .create_cert(&module_id, &csr, None) .await .map_err(|err| Error::Internal(InternalError::CreateCertificate(Box::new(err))))?; let certificate = String::from_utf8(certificate) .map_err(|err| Error::Internal(InternalError::CreateCertificate(Box::new(err))))?; // Parse certificate expiration time. let expiration = get_cert_expiration(&certificate)?; aziot_identity_common::Identity::Local(aziot_identity_common::LocalIdSpec { module_id: module_id.to_owned(), auth: aziot_identity_common::LocalAuthenticationInfo { private_key: private_key_pem, certificate, expiration, }, }) }; Ok(local_identity) } async fn update_config_inner( &mut self, settings: config::Settings, trigger: ReprovisionTrigger, ) -> Result<(), Error> { let (allowed_users, _, local_modules) = configext::prepare_authorized_principals(&settings.principal); let authorizer = Box::new(SettingsAuthorizer {}); self.authorizer = authorizer; // All uids in the principals are authenticated users to this service let authenticator = Box::new(SettingsAuthenticator { allowed_users: allowed_users.clone(), }); self.authenticator = authenticator; self.local_identities = local_modules; self.settings = settings; // Attempt to re-provision the device. Failures need to be logged and the device should // run offline. if let Err(err) = self .reprovision_device(auth::AuthId::LocalRoot, trigger) .await { log::warn!( "Failed to reprovision device. Running offline. Reprovisioning failure reason: {}. ", err ); } Ok(()) } } #[async_trait] impl UpdateConfig for Api { type Config = config::Settings; type Error = Error; async fn update_config(&mut self, new_config: config::Settings) -> Result<(), Self::Error> { self.update_config_inner(new_config, ReprovisionTrigger::ConfigurationFileUpdate) .await } } pub(crate) fn create_csr( subject: &str, public_key: &openssl::pkey::PKeyRef<openssl::pkey::Public>, private_key: &openssl::pkey::PKeyRef<openssl::pkey::Private>, attributes: Option<aziot_identity_common::LocalIdAttr>, ) -> Result<Vec<u8>, openssl::error::ErrorStack> { let mut csr = openssl::x509::X509Req::builder()?; csr.set_version(0)?; if let Some(attr) = attributes { let mut extensions: openssl::stack::Stack<openssl::x509::X509Extension> = openssl::stack::Stack::new()?; // basicConstraints = critical, CA:FALSE let basic_constraints = openssl::x509::extension::BasicConstraints::new() .critical() .build()?; extensions.push(basic_constraints)?; // keyUsage = digitalSignature, nonRepudiation, keyEncipherment let key_usage = openssl::x509::extension::KeyUsage::new() .critical() .digital_signature() .non_repudiation() .key_encipherment() .build()?; extensions.push(key_usage)?; // extendedKeyUsage = critical, clientAuth // Always set (even for servers) because it's required for EST client certificate renewal. let mut extended_key_usage = openssl::x509::extension::ExtendedKeyUsage::new(); extended_key_usage.critical(); extended_key_usage.client_auth(); if attr == aziot_identity_common::LocalIdAttr::Server { // extendedKeyUsage = serverAuth (in addition to clientAuth) extended_key_usage.server_auth(); } let extended_key_usage = extended_key_usage.build()?; extensions.push(extended_key_usage)?; csr.add_extensions(&extensions)?; } let mut subject_name = openssl::x509::X509Name::builder()?; subject_name.append_entry_by_nid(openssl::nid::Nid::COMMONNAME, subject)?; let subject_name = subject_name.build(); csr.set_subject_name(&subject_name)?; csr.set_pubkey(public_key)?; csr.sign(private_key, openssl::hash::MessageDigest::sha256())?; let csr = csr.build(); let csr = csr.to_pem()?; Ok(csr) } pub struct SettingsAuthenticator { pub allowed_users: std::collections::BTreeMap<config::Uid, config::Principal>, } impl auth::authentication::Authenticator for SettingsAuthenticator { type Error = Error; fn authenticate(&self, credentials: config::Uid) -> Result<auth::AuthId, Self::Error> { //DEVNOTE: The authentication logic is ordered to lookup the principals first // so that a host process can be configured to run as root. if let Some(p) = self.allowed_users.get(&credentials) { if p.id_type.is_some() { Ok(auth::AuthId::HostProcess(p.clone())) } else { Ok(auth::AuthId::Daemon) } } else if credentials == config::Uid(0) { Ok(auth::AuthId::LocalRoot) } else { Ok(auth::AuthId::Unknown) } } } pub struct SettingsAuthorizer {} impl auth::authorization::Authorizer for SettingsAuthorizer { type Error = Error; fn authorize(&self, o: auth::Operation) -> Result<bool, Self::Error> { match o.auth_id { crate::auth::AuthId::LocalRoot | crate::auth::AuthId::Daemon => Ok(true), crate::auth::AuthId::HostProcess(p) => Ok(match o.op_type { auth::OperationType::GetModule(m) => { p.name.0 == m && p.id_type.map_or(false, |i| { i.contains(&aziot_identity_common::IdType::Module) }) } auth::OperationType::GetDevice => p .id_type .map_or(true, |i| i.contains(&aziot_identity_common::IdType::Device)), auth::OperationType::GetAllHubModules | auth::OperationType::CreateModule(_) | auth::OperationType::DeleteModule(_) | auth::OperationType::UpdateModule(_) | auth::OperationType::ReprovisionDevice => false, auth::OperationType::GetTrustBundle => true, }), crate::auth::AuthId::Unknown => { Ok(o.op_type == crate::auth::OperationType::GetTrustBundle) } } } } fn get_cert_expiration(cert: &str) -> Result<String, Error> { let cert = openssl::x509::X509::from_pem(cert.as_bytes()) .map_err(|err| Error::Internal(InternalError::CreateCertificate(Box::new(err))))?; let epoch = openssl::asn1::Asn1Time::from_unix(0).expect("unix epoch must be valid"); let diff = epoch .diff(&cert.not_after()) .map_err(|err| Error::Internal(InternalError::CreateCertificate(Box::new(err))))?; let diff = i64::from(diff.secs) + i64::from(diff.days) * 86400; let expiration = chrono::NaiveDateTime::from_timestamp(diff, 0); let expiration = chrono::DateTime::<chrono::Utc>::from_utc(expiration, chrono::Utc).to_rfc3339(); Ok(expiration) } #[cfg(test)] mod tests { use std::path::Path; use aziot_identity_common::{IdType, LocalIdAttr, LocalIdOpts, ModuleId}; use aziot_identityd_config::{LocalId, Principal, Uid}; use crate::auth::authorization::Authorizer; use crate::auth::{AuthId, Operation, OperationType}; use crate::SettingsAuthorizer; use crate::configext::prepare_authorized_principals; #[test] fn convert_to_map_creates_principal_lookup() { let local_p: Principal = Principal { uid: Uid(1000), name: ModuleId(String::from("local1")), id_type: Some(vec![IdType::Local]), localid: None, }; let module_p: Principal = Principal { uid: Uid(1001), name: ModuleId(String::from("module1")), id_type: Some(vec![IdType::Module]), localid: None, }; let v = vec![module_p.clone(), local_p.clone()]; let (map, _, _) = prepare_authorized_principals(&v); assert!(map.contains_key(&Uid(1000))); assert_eq!(map.get(&Uid(1000)).unwrap(), &local_p); assert!(map.contains_key(&Uid(1001))); assert_eq!(map.get(&Uid(1001)).unwrap(), &module_p); } #[test] fn convert_to_map_module_sets() { let v = vec![ Principal { uid: Uid(1000), name: ModuleId("hubmodule".to_owned()), id_type: Some(vec![IdType::Module]), localid: None, }, Principal { uid: Uid(1001), name: ModuleId("localmodule".to_owned()), id_type: Some(vec![IdType::Local]), localid: None, }, Principal { uid: Uid(1002), name: ModuleId("globalmodule".to_owned()), id_type: Some(vec![IdType::Module, IdType::Local]), localid: None, }, ]; let (_, hub_modules, local_modules) = prepare_authorized_principals(&v); assert!(hub_modules.contains(&ModuleId("hubmodule".to_owned()))); assert!(hub_modules.contains(&ModuleId("globalmodule".to_owned()))); assert!(!hub_modules.contains(&ModuleId("localmodule".to_owned()))); assert!(local_modules.contains_key(&ModuleId("localmodule".to_owned()))); assert!(local_modules.contains_key(&ModuleId("globalmodule".to_owned()))); assert!(!local_modules.contains_key(&ModuleId("hubmodule".to_owned()))); } #[test] fn settings_test() { let settings = super::configext::load_file(Path::new("test/good_auth_settings.toml")).unwrap(); let localid = settings.localid.unwrap(); assert_eq!( localid, LocalId { domain: "example.com".to_owned(), } ); let (map, _, _) = prepare_authorized_principals(&settings.principal); assert_eq!(map.len(), 3); assert!(map.contains_key(&Uid(1003))); assert_eq!(map.get(&Uid(1003)).unwrap().uid, Uid(1003)); assert_eq!( map.get(&Uid(1003)).unwrap().name, ModuleId(String::from("hostprocess2")) ); assert_eq!( map.get(&Uid(1003)).unwrap().id_type, Some(vec![IdType::Module, IdType::Local]) ); } #[test] fn local_id_opts() { let s = super::configext::load_file(std::path::Path::new("test/good_local_opts.toml")).unwrap(); assert_eq!( &s.principal, &[ Principal { uid: Uid(1000), name: ModuleId("module1".to_owned()), id_type: Some(vec![IdType::Local]), localid: None, }, Principal { uid: Uid(1001), name: ModuleId("module2".to_owned()), id_type: Some(vec![IdType::Local]), localid: Some(LocalIdOpts::X509 { attributes: LocalIdAttr::default() }), }, Principal { uid: Uid(1002), name: ModuleId("module3".to_owned()), id_type: Some(vec![IdType::Local]), localid: Some(LocalIdOpts::X509 { attributes: LocalIdAttr::Server }), }, ] ); } #[test] fn empty_auth_settings_deny_any_action() { let auth = SettingsAuthorizer {}; let operation = Operation { auth_id: AuthId::Unknown, op_type: OperationType::CreateModule(String::default()), }; let res = auth.authorize(operation); match res { Ok(false) => (), _ => panic!("incorrect authorization returned"), } } }
35.971564
124
0.561726
c19819ded77aa45559cb14edabaf0b5ca6471168
979
use rpc::{commands, types, RpcClient}; use std::env; use url::Url; mod get_balance_from_block; mod get_blocks_in_chain; mod get_invalid_blocks_in_chain; fn get_rpc_client() -> RpcClient { let tezos_node_url = get_tezos_node_url_for_test(); RpcClient::new(tezos_node_url) } fn get_tezos_node_url_for_test() -> Url { match is_testing_on_cloud() { false => get_local_testnet_url(), true => get_public_testnet_url(), } } fn is_testing_on_cloud() -> bool { let is_deployed_env_key = "CI"; match env::var(is_deployed_env_key) { Ok(val) => val == "true", Err(_) => false, } } fn get_public_testnet_url() -> Url { Url::parse("https://tezos-florence.cryptonomic-infra.tech:443").unwrap() } fn get_local_testnet_url() -> Url { Url::parse("http://localhost:8090").unwrap() } fn get_main_chain_id_by_tag() -> types::Chain { types::Chain::Main } fn get_block_id_by_tag() -> types::Block { types::Block::Head }
23.309524
76
0.674157
72993e672195eb6ca9149039ec09ff17ca12be66
5,365
#[cfg(test)] pub mod helpers { use super::*; use crate::contract::query; use crate::contract::{instantiate, INITIAL_MIXNODE_BOND}; use crate::transactions::{try_add_gateway, try_add_mixnode}; use config::defaults::DENOM; use cosmwasm_std::coin; use cosmwasm_std::from_binary; use cosmwasm_std::testing::mock_dependencies; use cosmwasm_std::testing::mock_env; use cosmwasm_std::testing::mock_info; use cosmwasm_std::testing::MockApi; use cosmwasm_std::testing::MockQuerier; use cosmwasm_std::testing::MockStorage; use cosmwasm_std::Addr; use cosmwasm_std::Coin; use cosmwasm_std::OwnedDeps; use cosmwasm_std::{Empty, MemoryStorage}; use mixnet_contract::{ Gateway, GatewayBond, InstantiateMsg, Layer, MixNode, MixNodeBond, PagedGatewayResponse, PagedMixnodeResponse, QueryMsg, }; pub fn add_mixnode( sender: &str, stake: Vec<Coin>, deps: &mut OwnedDeps<MockStorage, MockApi, MockQuerier>, ) -> String { let info = mock_info(sender, &stake); let key = format!("{}mixnode", sender); try_add_mixnode( deps.as_mut(), info, MixNode { identity_key: key.clone(), ..helpers::mix_node_fixture() }, ) .unwrap(); key } pub fn get_mix_nodes( deps: &mut OwnedDeps<MockStorage, MockApi, MockQuerier>, ) -> Vec<MixNodeBond> { let result = query( deps.as_ref(), mock_env(), QueryMsg::GetMixNodes { start_after: None, limit: Option::from(2), }, ) .unwrap(); let page: PagedMixnodeResponse = from_binary(&result).unwrap(); page.nodes } pub fn add_gateway( sender: &str, stake: Vec<Coin>, deps: &mut OwnedDeps<MockStorage, MockApi, MockQuerier>, ) -> String { let info = mock_info(sender, &stake); let key = format!("{}gateway", sender); try_add_gateway( deps.as_mut(), info, Gateway { identity_key: key.clone(), ..helpers::gateway_fixture() }, ) .unwrap(); key } pub fn get_gateways( deps: &mut OwnedDeps<MockStorage, MockApi, MockQuerier>, ) -> Vec<GatewayBond> { let result = query( deps.as_ref(), mock_env(), QueryMsg::GetGateways { start_after: None, limit: None, }, ) .unwrap(); let page: PagedGatewayResponse = from_binary(&result).unwrap(); page.nodes } pub fn init_contract() -> OwnedDeps<MemoryStorage, MockApi, MockQuerier<Empty>> { let mut deps = mock_dependencies(&[]); let msg = InstantiateMsg {}; let env = mock_env(); let info = mock_info("creator", &[]); instantiate(deps.as_mut(), env.clone(), info, msg).unwrap(); return deps; } pub fn mix_node_fixture() -> MixNode { MixNode { host: "mix.node.org".to_string(), mix_port: 1789, verloc_port: 1790, http_api_port: 8000, sphinx_key: "sphinx".to_string(), identity_key: "identity".to_string(), version: "0.10.0".to_string(), } } pub fn mixnode_bond_fixture() -> MixNodeBond { let mix_node = MixNode { host: "1.1.1.1".to_string(), mix_port: 1789, verloc_port: 1790, http_api_port: 8000, sphinx_key: "1234".to_string(), identity_key: "aaaa".to_string(), version: "0.10.0".to_string(), }; MixNodeBond::new( coin(50, DENOM), Addr::unchecked("foo"), Layer::One, mix_node, ) } pub fn gateway_fixture() -> Gateway { Gateway { host: "1.1.1.1".to_string(), mix_port: 1789, clients_port: 9000, location: "Sweden".to_string(), sphinx_key: "sphinx".to_string(), identity_key: "identity".to_string(), version: "0.10.0".to_string(), } } pub fn gateway_bond_fixture() -> GatewayBond { let gateway = Gateway { host: "1.1.1.1".to_string(), mix_port: 1789, clients_port: 9000, location: "London".to_string(), sphinx_key: "sphinx".to_string(), identity_key: "identity".to_string(), version: "0.10.0".to_string(), }; GatewayBond::new(coin(50, DENOM), Addr::unchecked("foo"), gateway) } pub fn query_contract_balance( address: Addr, deps: OwnedDeps<MockStorage, MockApi, MockQuerier>, ) -> Vec<Coin> { let querier = deps.as_ref().querier; vec![querier.query_balance(address, DENOM).unwrap()] } pub fn good_mixnode_bond() -> Vec<Coin> { vec![Coin { denom: DENOM.to_string(), amount: INITIAL_MIXNODE_BOND, }] } pub fn good_gateway_bond() -> Vec<Coin> { vec![Coin { denom: DENOM.to_string(), amount: INITIAL_MIXNODE_BOND, }] } }
29.157609
96
0.534949
de84b039aceae49c0b0dcc01dbbed367bba79a39
1,991
// PNG Pong // // Copyright © 2019-2020 Jeron Aldaron Lau // Copyright © 2014-2017 Kornel Lesiński // Copyright © 2005-2016 Lode Vandevenne // // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or // https://apache.org/licenses/LICENSE-2.0>, or the Zlib License, <LICENSE-ZLIB // or http://opensource.org/licenses/Zlib>, at your option. This file may not be // copied, modified, or distributed except according to those terms. use crate::{ChunkDecoder, DecodeError, Format, Frame}; use std::{io::Read, marker::PhantomData}; /// Frame Encoder for PNG files. #[derive(Debug)] pub struct FrameDecoder<R: Read, F: Format> { decoder: ChunkDecoder<R>, _phantom: PhantomData<F>, // FIXME: This is a workaround for not supporting APNG yet. has_decoded: bool, } impl<R: Read, F: Format> FrameDecoder<R, F> { /// Create a new encoder. pub fn new(r: R) -> Self { FrameDecoder { decoder: ChunkDecoder::new(r), _phantom: PhantomData, has_decoded: false, } } } impl<R, F> Iterator for FrameDecoder<R, F> where R: Read, F: Format<Chan = pix::chan::Ch8>, // FIXME { type Item = Result<Frame<F>, DecodeError>; fn next(&mut self) -> Option<Self::Item> { if self.has_decoded { return None; } self.has_decoded = true; if cfg!(feature = "crc_checksums") { self.decoder.state.decoder.check_crc = true; } if cfg!(feature = "adler32_checksums") { self.decoder.state.decoder.zlibsettings.check_adler32 = true; } let mut bytes: Vec<u8> = vec![]; let raster = match self.decoder.bytes.read_to_end(&mut bytes) { Ok(_len) => match self.decoder.state.decode(bytes) { Ok(raster) => Ok(Frame { raster, delay: 0 }), Err(error) => Err(error), }, Err(e) => Err(DecodeError::Io(e.kind())), }; Some(raster) } }
29.279412
80
0.598192
8710fb5b87194b4c291d0b5188f59a8ef0ddbf5c
16,926
/* * Copyright 2019 DTAI Research Group - KU Leuven. * License: Apache License 2.0 * Author: Laurens Devos */ use num::{Integer, One}; use std::mem::{size_of}; use std::slice; use std::ops::{BitAnd, BitOr, Shr, Shl, Not, Deref, DerefMut}; use std::convert::From; // - Utilities ------------------------------------------------------------------------------------ pub fn get_bit<T>(bits: T, pos: u8) -> bool where T: Integer + One + Shr<Output=T> + BitAnd<Output=T> + From<u8> { T::one() & (bits >> T::from(pos)) == T::one() } pub fn enable_bit<T>(bits: T, pos: u8) -> T where T: Integer + One + Shl<Output=T> + BitOr<Output=T> + From<u8> { bits | T::one() << T::from(pos) } pub fn disable_bit<T>(bits: T, pos: u8) -> T where T: Integer + One + Not<Output=T> + Shl<Output=T> + BitAnd<Output=T> + From<u8> { bits & !(T::one() << T::from(pos)) } pub fn set_bit<T>(bits: T, pos: u8, value: bool) -> T where T: Integer + One + Not<Output=T> + Shl<Output=T> + BitAnd<Output=T> + BitOr<Output=T> + From<u8> { if value { enable_bit(bits, pos) } else { disable_bit(bits, pos) } } pub fn get_blockpos<T: Integer>(bit_index: usize) -> usize { bit_index / (size_of::<T>() * 8) } pub fn get_bitpos<T: Integer>(bit_index: usize) -> u8 { (bit_index % (size_of::<T>() * 8)) as u8 } // - Aligned bit block type ----------------------------------------------------------------------- const BITBLOCK_BYTES: usize = 32; // 32*8 = 256 bits per block /// Properly aligned bit blocks (cache boundary Intel 64B) #[repr(C, align(32))] #[derive(Clone)] pub struct BitBlock { bytes: [u8; BITBLOCK_BYTES], } impl BitBlock { pub fn nbytes() -> usize { size_of::<BitBlock>() } pub fn nbits() -> usize { Self::nbytes() * 8 } pub fn zeros() -> BitBlock { BitBlock { bytes: [0u8; BITBLOCK_BYTES] } } pub fn ones() -> BitBlock { BitBlock { bytes: [0xFFu8; BITBLOCK_BYTES] } } pub fn is_zero(&self) -> bool { let n = Self::nbytes() / size_of::<u128>(); let ptr = self.as_ptr() as *const u128; for i in 0..n { if unsafe { *ptr.add(i) != 0 } { return false; } } return true; } /// Construct a BitBlock from an iterator of integers. Returns a tuple containing whether the /// iterator was fully consumed, and the constructed BitBlock. pub fn from_iter<T, I>(iter: &mut I) -> (bool, BitBlock) where T: Copy + Integer, I: Iterator<Item = T> { let mut iter_done = false; let mut bb = BitBlock::zeros(); { let bb_arr = bb.cast_mut::<T>(); for i in 0..bb_arr.len() { if let Some(v) = iter.next() { bb_arr[i] = v; } else { iter_done = true; break; } } } (iter_done, bb) } /// Construct a BitBlock from an iterator of bools. Returns a tuple containing whether the /// iterator was fully consumed, and the constructed BitBlock. pub fn from_bool_iter<I>(iter: &mut I) -> (bool, BitBlock) where I: Iterator<Item = bool> { let mut iter_done = false; let mut bb = BitBlock::zeros(); for i in 0..BitBlock::nbits() { if let Some(b) = iter.next() { bb.set_bit(i, b); } else { iter_done = true; break; } } (iter_done, bb) } /// Constructs a BitBlock from right to left pub fn from_slice<'a, T>(v: &'a [T]) -> BitBlock where T: 'a + Copy + Integer { let mut iter = v.into_iter().map(|x| *x); Self::from_iter(&mut iter).1 } pub fn from_4u64(a: u64, b: u64, c: u64, d: u64) -> BitBlock { let mut bb = BitBlock::zeros(); { let bb_arr = bb.cast_mut::<u64>(); bb_arr[0] = a; bb_arr[1] = b; bb_arr[2] = c; bb_arr[3] = d; } bb } pub fn get_bit(&self, index: usize) -> bool { let b = get_blockpos::<u8>(index); let i = get_bitpos::<u8>(index); get_bit(self.bytes[b], i) } pub fn enable_bit(&mut self, index: usize) { let b = get_blockpos::<u8>(index); let i = get_bitpos::<u8>(index); self.bytes[b] = enable_bit(self.bytes[b], i); } pub fn disable_bit(&mut self, index: usize) { let b = get_blockpos::<u8>(index); let i = get_bitpos::<u8>(index); self.bytes[b] = disable_bit(self.bytes[b], i); } pub fn set_bit(&mut self, index: usize, value: bool) { if value { self.enable_bit(index); } else { self.disable_bit(index); } } pub fn count_ones(&self) -> u32 { let mut count = 0u32; let ptr = self.as_ptr() as *const u64; for i in 0..(Self::nbytes() / size_of::<u64>()) { count += unsafe { (*ptr.add(i)).count_ones() }; } count } pub fn blocks_required_for(nbits: usize) -> usize { let w = Self::nbits(); let correction = if (nbits % w) == 0 { 0 } else { 1 }; nbits / w + correction } pub fn cast<T: Integer>(&self) -> &[T] { let sz = Self::nbytes() / size_of::<T>(); let ptr = self.bytes.as_ptr() as *const T; unsafe { slice::from_raw_parts(ptr, sz) } } pub fn cast_mut<T: Integer>(&mut self) -> &mut [T] { let sz = Self::nbytes() / size_of::<T>(); let ptr = self.bytes.as_mut_ptr() as *mut T; unsafe { slice::from_raw_parts_mut(ptr, sz) } } pub fn as_ptr(&self) -> *const u8 { self.bytes.as_ptr() } pub fn as_mut_ptr(&mut self) -> *mut u8 { self.bytes.as_mut_ptr() } } impl <T: Integer> From<T> for BitBlock { fn from(v: T) -> BitBlock { let mut bb = BitBlock::zeros(); bb.cast_mut::<T>()[0] = v; bb } } impl Default for BitBlock { fn default() -> Self { BitBlock::zeros() } } // - Array of BitBlocks --------------------------------------------------------------------------- pub struct BitBlocks { blocks: Vec<BitBlock>, } impl BitBlocks { pub fn empty() -> BitBlocks { BitBlocks { blocks: Vec::new(), } } pub fn is_empty(&self) -> bool { self.blocks.is_empty() } pub fn zero_blocks(nblocks: usize) -> BitBlocks { assert!(nblocks > 0); BitBlocks { blocks: vec![BitBlock::zeros(); nblocks], } } pub fn one_blocks(nblocks: usize) -> BitBlocks { assert!(nblocks > 0); BitBlocks { blocks: vec![BitBlock::ones(); nblocks], } } pub fn zero_bits(nbits: usize) -> BitBlocks { let nblocks = BitBlock::blocks_required_for(nbits); Self::zero_blocks(nblocks) } pub fn one_bits(nbits: usize) -> BitBlocks { let nblocks = BitBlock::blocks_required_for(nbits); let mut blocks = Self::one_blocks(nblocks); //if let Some(last) = blocks.get_slice_mut(range).last_mut() { if let Some(last) = blocks.deref_mut().last_mut() { let u64s = last.cast_mut::<u64>(); // zero out the last bits let mut zeros = nblocks * BitBlock::nbits() - nbits; let mut i = u64s.len()-1; loop { if zeros >= 64 { u64s[i] = 0; } else { u64s[i] >>= zeros; } if zeros > 64 { zeros -= 64; i -= 1; } else { break; } } } blocks } pub fn from_iter<T, I>(nvalues: usize, mut iter: I) -> BitBlocks where T: Integer + Copy, I: Iterator<Item = T>, { let nblocks = BitBlock::blocks_required_for(nvalues * size_of::<T>() * 8); let mut blocks = Self::zero_blocks(nblocks); for i in 0..nblocks { let (_, block) = BitBlock::from_iter(&mut iter); blocks[i] = block; } blocks } pub fn from_bool_iter<I>(nbits: usize, mut iter: I) -> BitBlocks where I: Iterator<Item = bool>, { let nblocks = BitBlock::blocks_required_for(nbits); let mut blocks = Self::zero_blocks(nblocks); for i in 0..nblocks { let (_, block) = BitBlock::from_bool_iter(&mut iter); blocks[i] = block; } blocks } pub fn block_len<T: Integer>(&self) -> usize { let nblocks = self.blocks.len(); nblocks * (BitBlock::nbytes() / size_of::<T>()) } pub fn cast<T: Integer>(&self) -> &[T] { let sz = self.block_len::<T>(); let ptr = self.as_ptr() as *const T; unsafe { slice::from_raw_parts(ptr, sz) } } pub fn cast_mut<T: Integer>(&mut self) -> &mut [T] { let ptr = self.as_mut_ptr() as *mut T; let sz = self.block_len::<T>(); unsafe { slice::from_raw_parts_mut(ptr, sz) } } pub fn get<T: Integer>(&self, index: usize) -> &T { &self.cast::<T>()[index] } pub unsafe fn get_unchecked<T: Integer>(&self, index: usize) -> &T { safety_check!(index < self.block_len::<T>()); let ptr = self.as_ptr() as *const T; &*ptr.add(index) } pub fn set<T: Integer + Copy>(&mut self, index: usize, value: T) { self.cast_mut::<T>()[index] = value; } pub unsafe fn set_unchecked<T: Integer + Copy>(&mut self, index: usize, value: T) { safety_check!(index < self.block_len::<T>()); let ptr = self.as_mut_ptr() as *mut T; *&mut *ptr.add(index) = value; } pub fn resize(&mut self, nblocks: usize) { assert!(nblocks > 0); self.blocks.resize(nblocks, BitBlock::zeros()); } pub fn reset(&mut self) { self.blocks.clear(); } } impl Deref for BitBlocks { type Target = [BitBlock]; fn deref(&self) -> &[BitBlock] { &self.blocks } } impl DerefMut for BitBlocks { fn deref_mut(&mut self) -> &mut [BitBlock] { &mut self.blocks } } // ------------------------------------------------------------------------------------------------ #[cfg(test)] mod test { use super::*; #[test] fn test_bitblock0() { let zeros = BitBlock::zeros(); for i in 0..BITBLOCK_BYTES { assert_eq!(0, zeros.cast::<u8>()[i]); } let ones = BitBlock::ones(); for i in 0..BITBLOCK_BYTES { assert_eq!(0xFF, ones.cast::<u8>()[i]); } } #[test] fn test_bitblock1() { let block1 = BitBlock::from_4u64(0b1001, 0, 0, 0); assert_eq!(BitBlock::nbits(), 256); assert_eq!(block1.get_bit(0), true); assert_eq!(block1.get_bit(1), false); assert_eq!(block1.get_bit(2), false); assert_eq!(block1.get_bit(3), true); assert_eq!(block1.get_bit(4), false); assert_eq!(block1.get_bit(63), false); let mut block2 = block1.clone(); block2.set_bit(63, true); assert_eq!(block2.cast::<u64>()[0], 0x8000000000000009); assert_eq!(block2.get_bit(63), true); } #[test] fn test_bitblock2() { let mut block = BitBlock::from_4u64(0x0807060504030201, 0x0807060504030201, 0x0807060504030201, 0x0807060504030201); for (i, u) in block.cast::<u8>().into_iter().enumerate() { assert_eq!((i as u8) % 8 + 1, *u); } block.enable_bit(129); // byte 16, was 0b01, now 0b11 assert_eq!(block.cast::<u8>()[16], 0b11); block.enable_bit(130); // byte 16, was 0b11, now 0b111 assert_eq!(block.cast::<u8>()[16], 0b111); } #[test] fn test_bitblock3() { let bits = [true, true, false, true, false, false, true]; let mut iter = bits.iter().map(|b| *b); let (done, bb) = BitBlock::from_bool_iter(&mut iter); assert!(done); assert_eq!(bb.cast::<u8>()[0], 0b1001011); // reverse order, bit 0 first! } #[test] fn test_bitblock4() { let mut bb = BitBlock::zeros(); bb.set_bit(10, true); assert!(bb.get_bit(10) == true); assert!(bb.cast::<u64>()[0] == 0x400); bb.set_bit(0, true); assert!(bb.get_bit(0) == true); bb.set_bit(1, true); assert!(bb.get_bit(1) == true); bb.set_bit(63, true); assert!(bb.get_bit(63) == true); bb.set_bit(64, true); assert!(bb.get_bit(64) == true); assert!(bb.cast::<u64>()[0] == 0x8000000000000403); assert!(bb.cast::<u64>()[1] == 0x1); bb.set_bit(63, false); assert!(bb.get_bit(63) == false); bb.set_bit(64, false); assert!(bb.get_bit(64) == false); assert!(bb.get_bit(1) == true); assert!(bb.cast::<u64>()[0] == 0x403); assert!(bb.cast::<u64>()[1] == 0x0); bb.set_bit(127, true); assert!(bb.get_bit(127) == true); } // bitblocks #[test] fn bitvec_basic() { let n = 10_000; let mut blocks = BitBlocks::from_iter::<u32, _>(n, 0u32..n as u32); for i in 0..n { assert_eq!(blocks.cast::<u32>()[i], i as u32); assert_eq!(*blocks.get::<u32>(i), i as u32); blocks.set::<u32>(i, 0); assert_eq!(blocks.cast::<u32>()[i], 0); assert_eq!(*blocks.get::<u32>(i), 0); blocks.cast_mut::<u32>()[i] = (n - i) as u32; } for i in 0..n { assert_eq!(blocks.cast::<u32>()[i], (n - i) as u32); assert_eq!(*blocks.get::<u32>(i), (n - i) as u32); } } #[test] fn bitvec_from_bool_iter() { let n = 10_000; let f = |k| k<n && k%13==1; let iter = (0..n).map(f); let blocks = BitBlocks::from_bool_iter(n, iter.clone()); for (i, block) in blocks.iter().enumerate() { for j in 0..BitBlock::nbits() { let k = i*BitBlock::nbits() + j; let b = f(k); assert_eq!(b, block.get_bit(j)); } } } #[test] fn bitvec_from_iter() { let n = 4367; let f = |i| if i >= n as u32 { 0 } else { 101*i+13 }; let mut blocks = BitBlocks::from_iter(n, (0u32..n as u32).map(f)); for (i, &b_u32) in blocks.cast::<u32>().iter().enumerate() { assert_eq!(b_u32, f(i as u32)); } for i in 0..n { assert_eq!(*blocks.get::<u32>(i), f(i as u32)); } for i in 0..n { unsafe { assert_eq!(*blocks.get_unchecked::<u32>(i), f(i as u32)); } } for i in 0..n { blocks.set::<u32>(i, f(i as u32) + 10); } for i in 0..n { assert_eq!(*blocks.get::<u32>(i), f(i as u32) + 10); } } #[test] fn bitvec_cast_len() { let n = 13456; let f = |k| k<n && k%31==1; let iter = (0..n).map(f); let blocks = BitBlocks::from_bool_iter(n, iter); assert_eq!(blocks.len(), n / 256 + 1); assert_eq!(blocks.cast::<u128>().len(), blocks.len() * 2); assert_eq!(blocks.cast::<u64>().len(), blocks.len() * 4); assert_eq!(blocks.cast::<u32>().len(), blocks.len() * 8); assert_eq!(blocks.cast::<u16>().len(), blocks.len() * 16); assert_eq!(blocks.cast::<u8>().len(), blocks.len() * 32); for (i, qword) in blocks.cast::<u64>().iter().enumerate() { for j in 0..64 { let b = f(i*64 + j); assert_eq!(b, qword >> j & 0x1 == 0x1); } } } #[test] fn bitvec_zeros_end() { // allocate some memory let blocks = BitBlocks::from_iter(3, 10u32..13u32); assert_eq!(blocks.cast::<u32>()[1], 11); assert_eq!(blocks.cast::<u32>().iter().cloned().last().unwrap(), 0); for _ in 0..100 { let blocks = BitBlocks::from_iter(3, 10u32..13u32); for (i, &b_u32) in blocks.cast::<u32>().iter().enumerate() { if i < 3 { assert_eq!(b_u32, (10+i) as u32); } else { assert_eq!(b_u32, 0); } } let blocks = BitBlocks::from_bool_iter(32, (0..32).map(|_| true)); for (i, &b_u32) in blocks.cast::<u32>().iter().enumerate() { if i == 0 { assert_eq!(b_u32, 0xFFFFFFFF); } else { assert_eq!(b_u32, 0); } } } } #[test] fn bitvec_one_bits() { let blocks = BitBlocks::one_bits(50); let v = blocks.cast::<u32>(); assert_eq!(v.len(), 8); assert_eq!(v[0], 0xFFFFFFFF); assert_eq!(v[1], 0x3FFFF); assert_eq!(v[2], 0); assert_eq!(v[3], 0); assert_eq!(v[4], 0); assert_eq!(v[5], 0); assert_eq!(v[6], 0); assert_eq!(v[7], 0); } }
28.543002
104
0.500177
723f2cd5edcacec6fe7956e0c22a03357a5e98a2
5,560
use map_gui::tools::{draw_isochrone, ChooseSomething, ColorLegend}; use map_gui::ID; use map_model::AmenityType; use widgetry::{ Cached, Choice, Color, Drawable, EventCtx, GfxCtx, HorizontalAlignment, Line, Panel, SimpleState, State, TextExt, Transition, VerticalAlignment, Widget, }; use crate::isochrone::{BorderIsochrone, Isochrone, Options}; use crate::viewer::{draw_star, HoverKey, HoverOnBuilding}; use crate::App; /// Calculate isochrones around each amenity on a map and merge them together using the min value pub struct FindAmenity; impl FindAmenity { pub fn new_state(ctx: &mut EventCtx, options: Options) -> Box<dyn State<App>> { ChooseSomething::new_state( ctx, "Choose an amenity", AmenityType::all() .into_iter() .map(|at| Choice::new(at.to_string(), at)) .collect(), Box::new(move |at, ctx, app| { let multi_isochrone = create_multi_isochrone(ctx, app, at, options.clone()); let border_isochrone = create_border_isochrone(ctx, app, options); return Transition::Replace(Results::new_state( ctx, app, multi_isochrone, border_isochrone, at, )); }), ) } } /// For every one of the requested amenity on the map, draw an isochrone fn create_multi_isochrone( ctx: &mut EventCtx, app: &App, category: AmenityType, options: Options, ) -> Isochrone { let map = &app.map; // For a category, find all matching stores let mut stores = Vec::new(); for b in map.all_buildings() { if b.has_amenity(category) { stores.push(b.id); } } Isochrone::new(ctx, app, stores, options) } /// Draw an isochrone from every intersection border fn create_border_isochrone(ctx: &mut EventCtx, app: &App, options: Options) -> BorderIsochrone { let mut all_intersections = Vec::new(); for i in app.map.all_intersections() { if i.is_border() { all_intersections.push(i.id); } } BorderIsochrone::new(ctx, app, all_intersections, options) } struct Results { draw: Drawable, isochrone: Isochrone, hovering_on_bldg: Cached<HoverKey, HoverOnBuilding>, } impl Results { fn new_state( ctx: &mut EventCtx, app: &App, isochrone: Isochrone, border_isochrone: BorderIsochrone, category: AmenityType, ) -> Box<dyn State<App>> { let panel = Panel::new_builder(Widget::col(vec![ Widget::row(vec![ Line(format!("{} within 15 minutes", category)) .small_heading() .into_widget(ctx), ctx.style().btn_close_widget(ctx), ]), format!("{} matching amenities", isochrone.start.len()).text_widget(ctx), ColorLegend::categories( ctx, vec![ (Color::GREEN, "5 mins"), (Color::ORANGE, "10 mins"), (Color::RED, "15 mins"), ], ), ColorLegend::row( ctx, Color::rgb(0, 0, 0).alpha(0.3), "< 15 mins from border (amenity could exist off map)", ), ])) .aligned(HorizontalAlignment::RightInset, VerticalAlignment::TopInset) .build(ctx); let mut batch = draw_isochrone( &app.map, &border_isochrone.time_to_reach_building, &border_isochrone.thresholds, &border_isochrone.colors, ); batch.append(draw_isochrone( &app.map, &isochrone.time_to_reach_building, &isochrone.thresholds, &isochrone.colors, )); for &start in &isochrone.start { batch.append(draw_star(ctx, app.map.get_b(start))); } <dyn SimpleState<_>>::new_state( panel, Box::new(Results { draw: ctx.upload(batch), isochrone, hovering_on_bldg: Cached::new(), }), ) } } impl SimpleState<App> for Results { fn on_click( &mut self, _: &mut EventCtx, _: &mut App, x: &str, _: &mut Panel, ) -> Transition<App> { match x { "close" => Transition::Pop, _ => unreachable!(), } } fn other_event(&mut self, ctx: &mut EventCtx, app: &mut App) -> Transition<App> { ctx.canvas_movement(); if ctx.redo_mouseover() { let isochrone = &self.isochrone; self.hovering_on_bldg .update(HoverOnBuilding::key(ctx, app), |key| { HoverOnBuilding::value(ctx, app, key, isochrone) }); // Also update this to conveniently get an outline drawn. Note we don't want to do this // inside the callback above, because it doesn't run when the key becomes None. app.current_selection = self.hovering_on_bldg.key().map(|(b, _)| ID::Building(b)); } Transition::Keep } fn draw(&self, g: &mut GfxCtx, _: &App) { self.isochrone.draw.draw(g); if let Some(hover) = self.hovering_on_bldg.value() { g.draw_mouse_tooltip(hover.tooltip.clone()); g.redraw(&hover.drawn_route); } g.redraw(&self.draw); } }
31.771429
99
0.546763
d75afddf30f5543f64527d575fdd6838da8630f2
10,136
use crate::utility::*; use crate::*; use azure_kinect_sys::k4a::*; use std::ptr; pub struct Device<'a> { pub(crate) api: &'a azure_kinect_sys::api::Api, pub(crate) handle: k4a_device_t, } #[derive(Copy, Clone)] pub struct ColorControlCapabilities { pub supports_auto: bool, pub min_value: i32, pub max_value: i32, pub step_value: i32, pub default_value: i32, pub default_mode: ColorControlMode, } impl Device<'_> { pub fn from_handle(api: &azure_kinect_sys::api::Api, handle: k4a_device_t) -> Device { Device { api, handle } } /// Starts the K4A device's cameras pub fn start_cameras(&self, configuration: &DeviceConfiguration) -> Result<Camera, Error> { Camera::new(&self, configuration) } /// Get the K4A device serial number pub fn get_serialnum(&self) -> Result<String, Error> { get_k4a_string(&|serialnum, buffer| unsafe { (self.api.funcs.k4a_device_get_serialnum)(self.handle, serialnum, buffer) }) } /// Get the K4A color sensor control value pub fn get_color_control( &self, command: ColorControlCommand, ) -> Result<(ColorControlMode, i32), Error> { let mut mode: k4a_color_control_mode_t = k4a_color_control_mode_t_K4A_COLOR_CONTROL_MODE_AUTO; let mut value: i32 = 0; Error::from_k4a_result_t(unsafe { (self.api.funcs.k4a_device_get_color_control)( self.handle, command.into(), &mut mode, &mut value, ) }) .to_result((ColorControlMode::from_primitive(mode), value)) } /// Set the K4A color sensor control value pub fn set_color_control( &mut self, command: ColorControlCommand, mode: ColorControlMode, value: i32, ) -> Result<(), Error> { Error::from_k4a_result_t(unsafe { (self.api.funcs.k4a_device_set_color_control)( self.handle, command.into(), mode.into(), value, ) }) .to_result(()) } pub fn get_color_control_capabilities( &self, command: ColorControlCommand, ) -> Result<ColorControlCapabilities, Error> { let mut capabilties = unsafe { std::mem::zeroed::<ColorControlCapabilities>() }; let mut mode: k4a_color_control_mode_t = k4a_color_control_mode_t::default(); Error::from_k4a_result_t(unsafe { (self.api.funcs.k4a_device_get_color_control_capabilities)( self.handle, command.into(), &mut capabilties.supports_auto, &mut capabilties.min_value, &mut capabilties.max_value, &mut capabilties.step_value, &mut capabilties.default_value, &mut mode, ) }) .to_result({ capabilties.default_mode = ColorControlMode::from_primitive(mode); capabilties }) } /// Get the raw calibration blob for the entire K4A device. pub fn get_raw_calibration(&self) -> Result<Vec<u8>, Error> { get_k4a_binary_data(&|calibration, buffer| unsafe { (self.api.funcs.k4a_device_get_raw_calibration)(self.handle, calibration, buffer) }) } /// Get the camera calibration for the entire K4A device, which is used for all transformation functions. pub fn get_calibration( &self, depth_mode: DepthMode, color_resolution: ColorResolution, ) -> Result<Calibration, Error> { let mut calibaraion = k4a_calibration_t::default(); Error::from_k4a_result_t(unsafe { (self.api.funcs.k4a_device_get_calibration)( self.handle, depth_mode.into(), color_resolution.into(), &mut calibaraion, ) }) .to_result_fn(|| Calibration::from_handle(self.api, calibaraion)) } /// Get the device jack status for the synchronization connectors pub fn is_sync_connected(&self) -> Result<(bool, bool), Error> { let mut sync_in_jack_connected = false; let mut sync_out_jack_connected = false; Error::from_k4a_result_t(unsafe { (self.api.funcs.k4a_device_get_sync_jack)( self.handle, &mut sync_in_jack_connected, &mut sync_out_jack_connected, ) }) .to_result((sync_in_jack_connected, sync_out_jack_connected)) } /// Get the device jack status for the synchronization in connector pub fn is_sync_in_connected(&self) -> Result<bool, Error> { Ok(self.is_sync_connected()?.0) } /// Get the device jack status for the synchronization out connector pub fn is_sync_out_connected(&self) -> Result<bool, Error> { Ok(self.is_sync_connected()?.1) } /// Get the version numbers of the K4A subsystems' firmware pub fn get_version(&self) -> Result<HardwareVersion, Error> { let mut version = k4a_hardware_version_t::default(); Error::from_k4a_result_t(unsafe { (self.api.funcs.k4a_device_get_version)(self.handle, &mut version) }) .to_result(HardwareVersion { value: version }) } } impl NativeHandle for Device<'_> { unsafe fn get_native_handle(&self) -> *mut () { self.handle as *mut () } } impl Drop for Device<'_> { fn drop(&mut self) { unsafe { (self.api.funcs.k4a_device_close)(self.handle); } self.handle = ptr::null_mut(); } } #[derive(Copy, Clone, Default)] pub struct DeviceConfiguration { pub(crate) value: k4a_device_configuration_t, } impl DeviceConfiguration { pub(crate) fn for_k4arecord(&self) -> &azure_kinect_sys::k4arecord::k4a_device_configuration_t { unsafe { std::mem::transmute(&self.value) } } pub fn builder() -> DeviceConfigurationBuilder { DeviceConfigurationBuilder::default() } #[doc = " Image format to capture with the color camera."] pub fn color_format(&self) -> ImageFormat { ImageFormat::from_primitive(self.value.color_format) } #[doc = " Image resolution to capture with the color camera."] pub fn color_resolution(&self) -> ColorResolution { ColorResolution::from_primitive(self.value.color_resolution) } #[doc = " Capture mode for the depth camera."] pub fn depth_mode(&self) -> DepthMode { DepthMode::from_primitive(self.value.depth_mode) } #[doc = " Desired frame rate for the color and depth camera."] pub fn camera_fps(&self) -> Fps { Fps::from_primitive(self.value.camera_fps.into()) } #[doc = " Only produce k4a_capture_t objects if they contain synchronized color and depth images."] pub fn synchronized_images_only(&self) -> bool { self.value.synchronized_images_only } #[doc = " Desired delay between the capture of the color image and the capture of the depth image."] pub fn depth_delay_off_color_usec(&self) -> i32 { self.value.depth_delay_off_color_usec } #[doc = " The external synchronization mode."] pub fn wired_sync_mode(&self) -> WiredSyncMode { WiredSyncMode::from_primitive(self.value.wired_sync_mode.into()) } #[doc = " The external synchronization timing."] pub fn subordinate_delay_off_master_usec(&self) -> u32 { self.value.subordinate_delay_off_master_usec } #[doc = " Streaming indicator automatically turns on when the color or depth camera's are in use."] pub fn disable_streaming_indicator(&self) -> bool { self.value.disable_streaming_indicator } } #[derive(Default)] pub struct DeviceConfigurationBuilder { value: k4a_device_configuration_t, } impl DeviceConfigurationBuilder { #[doc = " Image format to capture with the color camera."] pub fn color_format(mut self, value: ImageFormat) -> DeviceConfigurationBuilder { self.value.color_format = value.into(); self } #[doc = " Image resolution to capture with the color camera."] pub fn color_resolution(mut self, value: ColorResolution) -> DeviceConfigurationBuilder { self.value.color_resolution = value.into(); self } #[doc = " Capture mode for the depth camera."] pub fn depth_mode(mut self, value: DepthMode) -> DeviceConfigurationBuilder { self.value.depth_mode = value.into(); self } #[doc = " Desired frame rate for the color and depth camera."] pub fn camera_fps(mut self, value: Fps) -> DeviceConfigurationBuilder { self.value.camera_fps = value.into(); self } #[doc = " Only produce k4a_capture_t objects if they contain synchronized color and depth images."] pub fn synchronized_images_only(mut self, value: bool) -> DeviceConfigurationBuilder { self.value.synchronized_images_only = value; self } #[doc = " Desired delay between the capture of the color image and the capture of the depth image."] pub fn depth_delay_off_color_usec(mut self, value: i32) -> DeviceConfigurationBuilder { self.value.depth_delay_off_color_usec = value; self } #[doc = " The external synchronization mode."] pub fn wired_sync_mode(mut self, value: WiredSyncMode) -> DeviceConfigurationBuilder { self.value.wired_sync_mode = value.into(); self } #[doc = " The external synchronization timing."] pub fn subordinate_delay_off_master_usec(mut self, value: u32) -> DeviceConfigurationBuilder { self.value.subordinate_delay_off_master_usec = value; self } #[doc = " Streaming indicator automatically turns on when the color or depth camera's are in use."] pub fn disable_streaming_indicator(mut self, value: bool) -> DeviceConfigurationBuilder { self.value.disable_streaming_indicator = value; self } pub fn build(self) -> DeviceConfiguration { DeviceConfiguration { value: self.value } } }
34.127946
109
0.639108
d541779ac0de226bf1380ded5b0b016cd7196528
1,720
/// The ContentFilterProperty_t field provides all the required information to /// enable content filtering on the Writer side. For example, for the default /// DDSSQL filter class, a valid filter expression for a data type containing /// members a, b and c could be “(a < 5) AND (b == %0) AND (c >= %1)” with /// expression parameters “5” and “3.” In order for the Writer to apply /// the filter, it must have been configured to handle filters of the specified /// filter class. If not, the Writer will simply ignore the filter information /// and not filter any data samples. pub struct ContentFilterProperty_t { /// Name of the Content-filtered Topic associated with the Reader. /// Must have non-zero length. content_filtered_topic_name: String, /// Name of the Topic related to the Content-filtered Topic. /// Must have non-zero length. related_topic_name: String, /// Identifies the filter class this filter belongs to. RTPS can support /// multiple filter classes (SQL, regular expressions, custom filters, /// etc). Must have non-zero length. /// RTPS predefines the following values: /// “DDSSQL” Default filter class name if none specified. /// Matches the SQL filter specified by DDS, which must be available in all /// implementations. filter_class_name: String, /// The actual filter expression. Must be a valid expression for the filter /// class specified using filter_class_name. /// Must have non-zero length. filter_expression: String, /// Defines the value for each parameter in the filter expression. /// Can have zero length if the filter expression contains no parameters. extension_parameters: Vec<String>, }
47.777778
79
0.719186
64c020dc2dc2ab415ba03e545e1a2df3cdece861
2,890
use crate::gles2::gles2_bindings::types::GLenum; use crate::gles2::{gles2_bindings, GlContext, LocationId}; use crate::RafxResult; pub fn is_uniform_buffer_field_type(gl_type: GLenum) -> bool { match gl_type { gles2_bindings::INT | gles2_bindings::BOOL | gles2_bindings::FLOAT | gles2_bindings::INT_VEC2 | gles2_bindings::BOOL_VEC2 | gles2_bindings::FLOAT_VEC2 | gles2_bindings::INT_VEC3 | gles2_bindings::BOOL_VEC3 | gles2_bindings::FLOAT_VEC3 | gles2_bindings::INT_VEC4 | gles2_bindings::BOOL_VEC4 | gles2_bindings::FLOAT_VEC4 | gles2_bindings::FLOAT_MAT2 | gles2_bindings::FLOAT_MAT3 | gles2_bindings::FLOAT_MAT4 => true, _ => false, } } #[allow(dead_code)] pub fn byte_size_of_type(gl_type: GLenum) -> u32 { match gl_type { gles2_bindings::INT | gles2_bindings::BOOL | gles2_bindings::FLOAT => 4, gles2_bindings::INT_VEC2 | gles2_bindings::BOOL_VEC2 | gles2_bindings::FLOAT_VEC2 => 8, gles2_bindings::INT_VEC3 | gles2_bindings::BOOL_VEC3 | gles2_bindings::FLOAT_VEC3 | gles2_bindings::INT_VEC4 | gles2_bindings::BOOL_VEC4 | gles2_bindings::FLOAT_VEC4 => 16, gles2_bindings::FLOAT_MAT2 => 32, gles2_bindings::FLOAT_MAT3 => 48, gles2_bindings::FLOAT_MAT4 => 64, _ => unimplemented!("Unknown GL type in byte_size_of_type"), } } pub fn set_uniform<T: Copy>( gl_context: &GlContext, location: &LocationId, data: &T, gl_type: GLenum, count: u32, ) -> RafxResult<()> { match gl_type { gles2_bindings::INT | gles2_bindings::BOOL => { gl_context.gl_uniform_1iv(location, data, count) } gles2_bindings::FLOAT => gl_context.gl_uniform_1fv(location, data, count), gles2_bindings::INT_VEC2 | gles2_bindings::BOOL_VEC2 => { gl_context.gl_uniform_2iv(location, data, count) } gles2_bindings::FLOAT_VEC2 => gl_context.gl_uniform_2fv(location, data, count), gles2_bindings::INT_VEC3 | gles2_bindings::BOOL_VEC3 => { gl_context.gl_uniform_3iv(location, data, count) } gles2_bindings::FLOAT_VEC3 => gl_context.gl_uniform_3fv(location, data, count), gles2_bindings::INT_VEC4 | gles2_bindings::BOOL_VEC4 => { gl_context.gl_uniform_4iv(location, data, count) } gles2_bindings::FLOAT_VEC4 => gl_context.gl_uniform_4fv(location, data, count), gles2_bindings::FLOAT_MAT2 => gl_context.gl_uniform_matrix_2fv(location, data, count), gles2_bindings::FLOAT_MAT3 => gl_context.gl_uniform_matrix_3fv(location, data, count), gles2_bindings::FLOAT_MAT4 => gl_context.gl_uniform_matrix_4fv(location, data, count), _ => unimplemented!("Unknown GL type in set_uniform"), } }
39.054054
95
0.661246
6479df59a386f404a76b775c0746c2537096ce8b
645
// This powerful wrapper provides the ability to store a positive integer value. // Rewrite it using generics so that it supports wrapping ANY type. // Execute `rustlings hint generics2` for hints! struct Wrapper<T> { value: T, } impl<T> Wrapper<T> { pub fn new(value: T) -> Self { Wrapper { value } } } #[derive(PartialEq, Copy, Clone)] struct Foo(u8); #[cfg(test)] mod tests { use super::*; #[test] fn store_u32_in_wrapper() { assert_eq!(Wrapper::new(42).value, 42); } #[test] fn store_str_in_wrapper() { let v = Foo(123); assert!(Wrapper::new(v).value == v); } }
18.970588
80
0.603101
14924c260a17987b56d064f63b1a0457921f9a02
10,824
#![deny(rust_2018_idioms, warnings)] #![allow(clippy::type_complexity)] use std::future::Future; use std::rc::Rc; use std::task::{self, Context, Poll}; mod and_then; mod and_then_apply_fn; mod apply; mod apply_cfg; pub mod boxed; mod fn_service; mod fn_transform; mod map; mod map_config; mod map_err; mod map_init_err; mod pipeline; mod then; mod transform; mod transform_err; pub use self::apply::{apply_fn, apply_fn_factory}; pub use self::fn_service::{ fn_factory, fn_factory_with_config, fn_mut_service, fn_service, }; pub use self::fn_transform::fn_transform; pub use self::map_config::{map_config, map_config_service, unit_config}; pub use self::pipeline::{pipeline, pipeline_factory, Pipeline, PipelineFactory}; pub use self::transform::{apply, Transform}; #[doc(hidden)] pub use self::apply_cfg::{apply_cfg, apply_cfg_factory}; /// An asynchronous function from `Request` to a `Response`. /// /// `Service` represents a service that represanting interation, taking requests and giving back /// replies. You can think about service as a function with one argument and result as a return /// type. In general form it looks like `async fn(Req) -> Result<Res, Err>`. `Service` /// trait just generalizing form of this function. Each parameter described as an assotiated type. /// /// Services provides a symmetric and uniform API, same abstractions represents /// clients and servers. Services describe only `transforamtion` operation /// which encorouge to simplify api surface and phrases `value transformation`. /// That leads to simplier design of each service. That also allows better testability /// and better composition. /// /// Services could be represented in several different forms. In general, /// Service is a type that implements `Service` trait. /// /// ```rust,ignore /// struct MyService; /// /// impl Service for MyService { /// type Request = u8; /// type Response = u64; /// type Error = MyError; /// type Future = Pin<Box<Future<Output=Result<Self::Response, Self::Error>>>; /// /// fn poll_ready(&self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { ... } /// /// fn call(&self, req: Self::Request) -> Self::Future { ... } /// } /// ``` /// /// Service can have mutable state that influence computation. /// This service could be rewritten as a simple function: /// /// ```rust,ignore /// async fn my_service(req: u8) -> Result<u64, MyError>; /// ``` pub trait Service { /// Requests handled by the service. type Request; /// Responses given by the service. type Response; /// Errors produced by the service. type Error; /// The future response value. type Future: Future<Output = Result<Self::Response, Self::Error>>; /// Returns `Ready` when the service is able to process requests. /// /// If the service is at capacity, then `Pending` is returned and the task /// is notified when the service becomes ready again. This function is /// expected to be called while on a task. /// /// This is a **best effort** implementation. False positives are permitted. /// It is permitted for the service to return `Ready` from a `poll_ready` /// call and the next invocation of `call` results in an error. /// /// There are several notes to consider: /// /// 1. `.poll_ready()` might be called on different task from actual service call. /// /// 2. In case of chained services, `.poll_ready()` get called for all services at once. fn poll_ready(&self, ctx: &mut task::Context<'_>) -> Poll<Result<(), Self::Error>>; #[inline] #[allow(unused_variables)] /// Shutdown service. /// /// Returns `Ready` when the service is properly shutdowned. This method might be called /// after it returns `Ready`. fn poll_shutdown(&self, ctx: &mut task::Context<'_>, is_error: bool) -> Poll<()> { Poll::Ready(()) } /// Process the request and return the response asynchronously. /// /// This function is expected to be callable off task. As such, /// implementations should take care to not call `poll_ready`. If the /// service is at capacity and the request is unable to be handled, the /// returned `Future` should resolve to an error. /// /// Calling `call` without calling `poll_ready` is permitted. The /// implementation must be resilient to this fact. fn call(&self, req: Self::Request) -> Self::Future; #[inline] /// Map this service's output to a different type, returning a new service /// of the resulting type. /// /// This function is similar to the `Option::map` or `Iterator::map` where /// it will change the type of the underlying service. /// /// Note that this function consumes the receiving service and returns a /// wrapped version of it, similar to the existing `map` methods in the /// standard library. fn map<F, R>(self, f: F) -> crate::dev::Map<Self, F, R> where Self: Sized, F: FnMut(Self::Response) -> R, { crate::dev::Map::new(self, f) } #[inline] /// Map this service's error to a different error, returning a new service. /// /// This function is similar to the `Result::map_err` where it will change /// the error type of the underlying service. This is useful for example to /// ensure that services have the same error type. /// /// Note that this function consumes the receiving service and returns a /// wrapped version of it. fn map_err<F, E>(self, f: F) -> crate::dev::MapErr<Self, F, E> where Self: Sized, F: Fn(Self::Error) -> E, { crate::dev::MapErr::new(self, f) } } /// Creates new `Service` values. /// /// Acts as a service factory. This is useful for cases where new `Service` /// values must be produced. One case is a TCP server listener. The listener /// accepts new TCP streams, obtains a new `Service` value using the /// `ServiceFactory` trait, and uses that new `Service` value to process inbound /// requests on that new TCP stream. /// /// `Config` is a service factory configuration type. pub trait ServiceFactory { /// Requests handled by the service. type Request; /// Responses given by the service type Response; /// Errors produced by the service type Error; /// Service factory configuration type Config; /// The `Service` value created by this factory type Service: Service< Request = Self::Request, Response = Self::Response, Error = Self::Error, >; /// Errors produced while building a service. type InitError; /// The future of the `ServiceFactory` instance. type Future: Future<Output = Result<Self::Service, Self::InitError>>; /// Create and return a new service value asynchronously. fn new_service(&self, cfg: Self::Config) -> Self::Future; #[inline] /// Map this service's output to a different type, returning a new service /// of the resulting type. fn map<F, R>(self, f: F) -> crate::map::MapServiceFactory<Self, F, R> where Self: Sized, F: FnMut(Self::Response) -> R + Clone, { crate::map::MapServiceFactory::new(self, f) } #[inline] /// Map this service's error to a different error, returning a new service. fn map_err<F, E>(self, f: F) -> crate::map_err::MapErrServiceFactory<Self, F, E> where Self: Sized, F: Fn(Self::Error) -> E + Clone, { crate::map_err::MapErrServiceFactory::new(self, f) } #[inline] /// Map this factory's init error to a different error, returning a new service. fn map_init_err<F, E>(self, f: F) -> crate::map_init_err::MapInitErr<Self, F, E> where Self: Sized, F: Fn(Self::InitError) -> E + Clone, { crate::map_init_err::MapInitErr::new(self, f) } } impl<S> Service for Box<S> where S: Service + ?Sized, { type Request = S::Request; type Response = S::Response; type Error = S::Error; type Future = S::Future; #[inline] fn poll_ready(&self, cx: &mut Context<'_>) -> Poll<Result<(), S::Error>> { (**self).poll_ready(cx) } #[inline] fn poll_shutdown(&self, cx: &mut Context<'_>, is_error: bool) -> Poll<()> { (**self).poll_shutdown(cx, is_error) } #[inline] fn call(&self, request: Self::Request) -> S::Future { (**self).call(request) } } impl<S> Service for Rc<S> where S: Service, { type Request = S::Request; type Response = S::Response; type Error = S::Error; type Future = S::Future; fn poll_ready(&self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { (**self).poll_ready(cx) } #[inline] fn poll_shutdown(&self, cx: &mut Context<'_>, is_error: bool) -> Poll<()> { (**self).poll_shutdown(cx, is_error) } fn call(&self, request: Self::Request) -> S::Future { (**self).call(request) } } impl<S> ServiceFactory for Rc<S> where S: ServiceFactory, { type Request = S::Request; type Response = S::Response; type Error = S::Error; type Config = S::Config; type Service = S::Service; type InitError = S::InitError; type Future = S::Future; fn new_service(&self, cfg: S::Config) -> S::Future { self.as_ref().new_service(cfg) } } /// Trait for types that can be converted to a `Service` pub trait IntoService<T> where T: Service, { /// Convert to a `Service` fn into_service(self) -> T; } /// Trait for types that can be converted to a `ServiceFactory` pub trait IntoServiceFactory<T> where T: ServiceFactory, { /// Convert `Self` to a `ServiceFactory` fn into_factory(self) -> T; } impl<T> IntoService<T> for T where T: Service, { fn into_service(self) -> T { self } } impl<T> IntoServiceFactory<T> for T where T: ServiceFactory, { fn into_factory(self) -> T { self } } /// Convert object of type `T` to a service `S` pub fn into_service<T, S>(tp: T) -> S where S: Service, T: IntoService<S>, { tp.into_service() } pub mod dev { pub use crate::apply::{Apply, ApplyServiceFactory}; pub use crate::fn_service::{ FnMutService, FnService, FnServiceConfig, FnServiceFactory, FnServiceNoConfig, }; pub use crate::map::{Map, MapServiceFactory}; pub use crate::map_config::{MapConfig, UnitConfig}; pub use crate::map_err::{MapErr, MapErrServiceFactory}; pub use crate::map_init_err::MapInitErr; pub use crate::transform::ApplyTransform; pub use crate::transform_err::TransformMapInitErr; } #[doc(hidden)] pub mod util { pub use ntex_util::future::Either; pub use ntex_util::future::Ready; pub use ntex_util::future::{lazy, Lazy}; }
30.150418
98
0.64357
4a06d98622bbdb78e553315bbda2b77238201c42
1,732
#![allow(clippy::module_inception)] #![allow(clippy::upper_case_acronyms)] #![allow(clippy::large_enum_variant)] #![allow(clippy::wrong_self_convention)] #![allow(clippy::should_implement_trait)] #![allow(clippy::blacklisted_name)] #![allow(clippy::vec_init_then_push)] #![allow(rustdoc::bare_urls)] #![warn(missing_docs)] //! <p>Amazon DocumentDB API documentation</p> // Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT. pub use error_meta::Error; pub use config::Config; mod aws_endpoint; /// Client and fluent builders for calling the service. #[cfg(feature = "client")] pub mod client; /// Configuration for the service. pub mod config; /// Errors that can occur when calling the service. pub mod error; mod error_meta; /// Input structures for operations. pub mod input; /// Data structures used by operation inputs/outputs. pub mod model; mod no_credentials; /// All operations that this crate can perform. pub mod operation; mod operation_deser; mod operation_ser; /// Output structures for operations. pub mod output; mod query_ser; mod rest_xml_wrapped_errors; mod xml_deser; /// Crate version number. pub static PKG_VERSION: &str = env!("CARGO_PKG_VERSION"); pub use aws_smithy_http::byte_stream::ByteStream; pub use aws_smithy_http::result::SdkError; pub use aws_smithy_types::Blob; pub use aws_smithy_types::DateTime; static API_METADATA: aws_http::user_agent::ApiMetadata = aws_http::user_agent::ApiMetadata::new("docdb", PKG_VERSION); pub use aws_smithy_http::endpoint::Endpoint; pub use aws_smithy_types::retry::RetryConfig; pub use aws_types::app_name::AppName; pub use aws_types::region::Region; pub use aws_types::Credentials; #[cfg(feature = "client")] pub use client::Client;
31.490909
80
0.76963
e88f64bd427edd6643a2e143e3734c24829de847
20,507
#[doc = "Register `PWM_CAPINEN` reader"] pub struct R(crate::R<PWM_CAPINEN_SPEC>); impl core::ops::Deref for R { type Target = crate::R<PWM_CAPINEN_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl From<crate::R<PWM_CAPINEN_SPEC>> for R { #[inline(always)] fn from(reader: crate::R<PWM_CAPINEN_SPEC>) -> Self { R(reader) } } #[doc = "Register `PWM_CAPINEN` writer"] pub struct W(crate::W<PWM_CAPINEN_SPEC>); impl core::ops::Deref for W { type Target = crate::W<PWM_CAPINEN_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl core::ops::DerefMut for W { #[inline(always)] fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } impl From<crate::W<PWM_CAPINEN_SPEC>> for W { #[inline(always)] fn from(writer: crate::W<PWM_CAPINEN_SPEC>) -> Self { W(writer) } } #[doc = "Capture Input Enable Bits\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum CAPINEN0_A { #[doc = "0: PWM Channel capture input path Disabled. The input of PWM channel capture function is always regarded as 0"] _0 = 0, #[doc = "1: PWM Channel capture input path Enabled. The input of PWM channel capture function comes from correlative multifunction pin"] _1 = 1, } impl From<CAPINEN0_A> for bool { #[inline(always)] fn from(variant: CAPINEN0_A) -> Self { variant as u8 != 0 } } #[doc = "Field `CAPINEN0` reader - Capture Input Enable Bits"] pub struct CAPINEN0_R(crate::FieldReader<bool, CAPINEN0_A>); impl CAPINEN0_R { pub(crate) fn new(bits: bool) -> Self { CAPINEN0_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> CAPINEN0_A { match self.bits { false => CAPINEN0_A::_0, true => CAPINEN0_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { **self == CAPINEN0_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { **self == CAPINEN0_A::_1 } } impl core::ops::Deref for CAPINEN0_R { type Target = crate::FieldReader<bool, CAPINEN0_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `CAPINEN0` writer - Capture Input Enable Bits"] pub struct CAPINEN0_W<'a> { w: &'a mut W, } impl<'a> CAPINEN0_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: CAPINEN0_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "PWM Channel capture input path Disabled. The input of PWM channel capture function is always regarded as 0"] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(CAPINEN0_A::_0) } #[doc = "PWM Channel capture input path Enabled. The input of PWM channel capture function comes from correlative multifunction pin"] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(CAPINEN0_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !0x01) | (value as u32 & 0x01); self.w } } #[doc = "Capture Input Enable Bits\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum CAPINEN1_A { #[doc = "0: PWM Channel capture input path Disabled. The input of PWM channel capture function is always regarded as 0"] _0 = 0, #[doc = "1: PWM Channel capture input path Enabled. The input of PWM channel capture function comes from correlative multifunction pin"] _1 = 1, } impl From<CAPINEN1_A> for bool { #[inline(always)] fn from(variant: CAPINEN1_A) -> Self { variant as u8 != 0 } } #[doc = "Field `CAPINEN1` reader - Capture Input Enable Bits"] pub struct CAPINEN1_R(crate::FieldReader<bool, CAPINEN1_A>); impl CAPINEN1_R { pub(crate) fn new(bits: bool) -> Self { CAPINEN1_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> CAPINEN1_A { match self.bits { false => CAPINEN1_A::_0, true => CAPINEN1_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { **self == CAPINEN1_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { **self == CAPINEN1_A::_1 } } impl core::ops::Deref for CAPINEN1_R { type Target = crate::FieldReader<bool, CAPINEN1_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `CAPINEN1` writer - Capture Input Enable Bits"] pub struct CAPINEN1_W<'a> { w: &'a mut W, } impl<'a> CAPINEN1_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: CAPINEN1_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "PWM Channel capture input path Disabled. The input of PWM channel capture function is always regarded as 0"] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(CAPINEN1_A::_0) } #[doc = "PWM Channel capture input path Enabled. The input of PWM channel capture function comes from correlative multifunction pin"] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(CAPINEN1_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 1)) | ((value as u32 & 0x01) << 1); self.w } } #[doc = "Capture Input Enable Bits\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum CAPINEN2_A { #[doc = "0: PWM Channel capture input path Disabled. The input of PWM channel capture function is always regarded as 0"] _0 = 0, #[doc = "1: PWM Channel capture input path Enabled. The input of PWM channel capture function comes from correlative multifunction pin"] _1 = 1, } impl From<CAPINEN2_A> for bool { #[inline(always)] fn from(variant: CAPINEN2_A) -> Self { variant as u8 != 0 } } #[doc = "Field `CAPINEN2` reader - Capture Input Enable Bits"] pub struct CAPINEN2_R(crate::FieldReader<bool, CAPINEN2_A>); impl CAPINEN2_R { pub(crate) fn new(bits: bool) -> Self { CAPINEN2_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> CAPINEN2_A { match self.bits { false => CAPINEN2_A::_0, true => CAPINEN2_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { **self == CAPINEN2_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { **self == CAPINEN2_A::_1 } } impl core::ops::Deref for CAPINEN2_R { type Target = crate::FieldReader<bool, CAPINEN2_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `CAPINEN2` writer - Capture Input Enable Bits"] pub struct CAPINEN2_W<'a> { w: &'a mut W, } impl<'a> CAPINEN2_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: CAPINEN2_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "PWM Channel capture input path Disabled. The input of PWM channel capture function is always regarded as 0"] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(CAPINEN2_A::_0) } #[doc = "PWM Channel capture input path Enabled. The input of PWM channel capture function comes from correlative multifunction pin"] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(CAPINEN2_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 2)) | ((value as u32 & 0x01) << 2); self.w } } #[doc = "Capture Input Enable Bits\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum CAPINEN3_A { #[doc = "0: PWM Channel capture input path Disabled. The input of PWM channel capture function is always regarded as 0"] _0 = 0, #[doc = "1: PWM Channel capture input path Enabled. The input of PWM channel capture function comes from correlative multifunction pin"] _1 = 1, } impl From<CAPINEN3_A> for bool { #[inline(always)] fn from(variant: CAPINEN3_A) -> Self { variant as u8 != 0 } } #[doc = "Field `CAPINEN3` reader - Capture Input Enable Bits"] pub struct CAPINEN3_R(crate::FieldReader<bool, CAPINEN3_A>); impl CAPINEN3_R { pub(crate) fn new(bits: bool) -> Self { CAPINEN3_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> CAPINEN3_A { match self.bits { false => CAPINEN3_A::_0, true => CAPINEN3_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { **self == CAPINEN3_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { **self == CAPINEN3_A::_1 } } impl core::ops::Deref for CAPINEN3_R { type Target = crate::FieldReader<bool, CAPINEN3_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `CAPINEN3` writer - Capture Input Enable Bits"] pub struct CAPINEN3_W<'a> { w: &'a mut W, } impl<'a> CAPINEN3_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: CAPINEN3_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "PWM Channel capture input path Disabled. The input of PWM channel capture function is always regarded as 0"] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(CAPINEN3_A::_0) } #[doc = "PWM Channel capture input path Enabled. The input of PWM channel capture function comes from correlative multifunction pin"] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(CAPINEN3_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 3)) | ((value as u32 & 0x01) << 3); self.w } } #[doc = "Capture Input Enable Bits\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum CAPINEN4_A { #[doc = "0: PWM Channel capture input path Disabled. The input of PWM channel capture function is always regarded as 0"] _0 = 0, #[doc = "1: PWM Channel capture input path Enabled. The input of PWM channel capture function comes from correlative multifunction pin"] _1 = 1, } impl From<CAPINEN4_A> for bool { #[inline(always)] fn from(variant: CAPINEN4_A) -> Self { variant as u8 != 0 } } #[doc = "Field `CAPINEN4` reader - Capture Input Enable Bits"] pub struct CAPINEN4_R(crate::FieldReader<bool, CAPINEN4_A>); impl CAPINEN4_R { pub(crate) fn new(bits: bool) -> Self { CAPINEN4_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> CAPINEN4_A { match self.bits { false => CAPINEN4_A::_0, true => CAPINEN4_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { **self == CAPINEN4_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { **self == CAPINEN4_A::_1 } } impl core::ops::Deref for CAPINEN4_R { type Target = crate::FieldReader<bool, CAPINEN4_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `CAPINEN4` writer - Capture Input Enable Bits"] pub struct CAPINEN4_W<'a> { w: &'a mut W, } impl<'a> CAPINEN4_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: CAPINEN4_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "PWM Channel capture input path Disabled. The input of PWM channel capture function is always regarded as 0"] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(CAPINEN4_A::_0) } #[doc = "PWM Channel capture input path Enabled. The input of PWM channel capture function comes from correlative multifunction pin"] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(CAPINEN4_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 4)) | ((value as u32 & 0x01) << 4); self.w } } #[doc = "Capture Input Enable Bits\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum CAPINEN5_A { #[doc = "0: PWM Channel capture input path Disabled. The input of PWM channel capture function is always regarded as 0"] _0 = 0, #[doc = "1: PWM Channel capture input path Enabled. The input of PWM channel capture function comes from correlative multifunction pin"] _1 = 1, } impl From<CAPINEN5_A> for bool { #[inline(always)] fn from(variant: CAPINEN5_A) -> Self { variant as u8 != 0 } } #[doc = "Field `CAPINEN5` reader - Capture Input Enable Bits"] pub struct CAPINEN5_R(crate::FieldReader<bool, CAPINEN5_A>); impl CAPINEN5_R { pub(crate) fn new(bits: bool) -> Self { CAPINEN5_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> CAPINEN5_A { match self.bits { false => CAPINEN5_A::_0, true => CAPINEN5_A::_1, } } #[doc = "Checks if the value of the field is `_0`"] #[inline(always)] pub fn is_0(&self) -> bool { **self == CAPINEN5_A::_0 } #[doc = "Checks if the value of the field is `_1`"] #[inline(always)] pub fn is_1(&self) -> bool { **self == CAPINEN5_A::_1 } } impl core::ops::Deref for CAPINEN5_R { type Target = crate::FieldReader<bool, CAPINEN5_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `CAPINEN5` writer - Capture Input Enable Bits"] pub struct CAPINEN5_W<'a> { w: &'a mut W, } impl<'a> CAPINEN5_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: CAPINEN5_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "PWM Channel capture input path Disabled. The input of PWM channel capture function is always regarded as 0"] #[inline(always)] pub fn _0(self) -> &'a mut W { self.variant(CAPINEN5_A::_0) } #[doc = "PWM Channel capture input path Enabled. The input of PWM channel capture function comes from correlative multifunction pin"] #[inline(always)] pub fn _1(self) -> &'a mut W { self.variant(CAPINEN5_A::_1) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 5)) | ((value as u32 & 0x01) << 5); self.w } } impl R { #[doc = "Bit 0 - Capture Input Enable Bits"] #[inline(always)] pub fn capinen0(&self) -> CAPINEN0_R { CAPINEN0_R::new((self.bits & 0x01) != 0) } #[doc = "Bit 1 - Capture Input Enable Bits"] #[inline(always)] pub fn capinen1(&self) -> CAPINEN1_R { CAPINEN1_R::new(((self.bits >> 1) & 0x01) != 0) } #[doc = "Bit 2 - Capture Input Enable Bits"] #[inline(always)] pub fn capinen2(&self) -> CAPINEN2_R { CAPINEN2_R::new(((self.bits >> 2) & 0x01) != 0) } #[doc = "Bit 3 - Capture Input Enable Bits"] #[inline(always)] pub fn capinen3(&self) -> CAPINEN3_R { CAPINEN3_R::new(((self.bits >> 3) & 0x01) != 0) } #[doc = "Bit 4 - Capture Input Enable Bits"] #[inline(always)] pub fn capinen4(&self) -> CAPINEN4_R { CAPINEN4_R::new(((self.bits >> 4) & 0x01) != 0) } #[doc = "Bit 5 - Capture Input Enable Bits"] #[inline(always)] pub fn capinen5(&self) -> CAPINEN5_R { CAPINEN5_R::new(((self.bits >> 5) & 0x01) != 0) } } impl W { #[doc = "Bit 0 - Capture Input Enable Bits"] #[inline(always)] pub fn capinen0(&mut self) -> CAPINEN0_W { CAPINEN0_W { w: self } } #[doc = "Bit 1 - Capture Input Enable Bits"] #[inline(always)] pub fn capinen1(&mut self) -> CAPINEN1_W { CAPINEN1_W { w: self } } #[doc = "Bit 2 - Capture Input Enable Bits"] #[inline(always)] pub fn capinen2(&mut self) -> CAPINEN2_W { CAPINEN2_W { w: self } } #[doc = "Bit 3 - Capture Input Enable Bits"] #[inline(always)] pub fn capinen3(&mut self) -> CAPINEN3_W { CAPINEN3_W { w: self } } #[doc = "Bit 4 - Capture Input Enable Bits"] #[inline(always)] pub fn capinen4(&mut self) -> CAPINEN4_W { CAPINEN4_W { w: self } } #[doc = "Bit 5 - Capture Input Enable Bits"] #[inline(always)] pub fn capinen5(&mut self) -> CAPINEN5_W { CAPINEN5_W { w: self } } #[doc = "Writes raw bits to the register."] #[inline(always)] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.0.bits(bits); self } } #[doc = "PWM Capture Input Enable Register\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [pwm_capinen](index.html) module"] pub struct PWM_CAPINEN_SPEC; impl crate::RegisterSpec for PWM_CAPINEN_SPEC { type Ux = u32; } #[doc = "`read()` method returns [pwm_capinen::R](R) reader structure"] impl crate::Readable for PWM_CAPINEN_SPEC { type Reader = R; } #[doc = "`write(|w| ..)` method takes [pwm_capinen::W](W) writer structure"] impl crate::Writable for PWM_CAPINEN_SPEC { type Writer = W; } #[doc = "`reset()` method sets PWM_CAPINEN to value 0"] impl crate::Resettable for PWM_CAPINEN_SPEC { #[inline(always)] fn reset_value() -> Self::Ux { 0 } }
32.8112
425
0.59487
5d8830abb4721ab1d1173c76d96e778c1add99f6
2,703
pub mod docker; #[cfg(not(windows))] pub mod posix_perm; pub mod serde_string; pub mod sys; #[cfg(windows)] pub mod win_perm; use std::{mem, time::Duration}; /// Provide a way to convert numeric types safely to i64 pub trait ToI64 { fn to_i64(self) -> i64; } impl ToI64 for usize { fn to_i64(self) -> i64 { if mem::size_of::<usize>() >= mem::size_of::<i64>() && self > i64::max_value() as usize { if cfg!(debug_assertions) { panic!("Tried to convert an out-of-range usize ({}) to i64", self); } else { error!("Tried to convert an out-of-range usize ({}) to i64; using \ i64::max_value()", self); i64::max_value() } } else { self as i64 } } } impl ToI64 for u64 { fn to_i64(self) -> i64 { if self > i64::max_value() as u64 { if cfg!(debug_assertions) { panic!("Tried to convert an out-of-range u64 ({}) to i64", self); } else { error!("Tried to convert an out-of-range u64 ({}) to i64; using i64::max_value()", self); i64::max_value() } } else { self as i64 } } } pub fn wait_for(delay: Duration, times: usize) -> impl IntoIterator<Item = Duration> { vec![delay].into_iter().cycle().take(times) } #[cfg(test)] mod tests { use super::*; #[test] fn conversion_of_usize_to_i64() { let just_right: usize = 42; let zero: usize = 0; assert_eq!(just_right.to_i64(), 42); assert_eq!(zero.to_i64(), 0); } #[test] #[should_panic] #[cfg(debug_assertions)] fn conversion_of_too_big_usize_panics_in_debug_mode() { let too_big = usize::max_value(); too_big.to_i64(); } #[test] #[cfg(not(debug_assertions))] fn conversion_of_too_big_usize_caps_in_release_mode() { let too_big = usize::max_value(); assert_eq!(too_big.to_i64(), i64::max_value()); } #[test] fn conversion_of_u64_to_i64() { let just_right: u64 = 42; let zero: u64 = 0; assert_eq!(just_right.to_i64(), 42); assert_eq!(zero.to_i64(), 0); } #[test] #[should_panic] #[cfg(debug_assertions)] fn conversion_of_too_big_u64_panics_in_debug_mode() { let too_big = u64::max_value(); too_big.to_i64(); } #[test] #[cfg(not(debug_assertions))] fn conversion_of_too_big_u64_caps_in_release_mode() { let too_big = u64::max_value(); assert_eq!(too_big.to_i64(), i64::max_value()); } }
25.5
98
0.54458
016bac1a78f37d7a39ec118083f17ad0f9c1f6ae
723
use serde_json::Value; use super::super::errors; use super::super::scope; #[allow(missing_copy_implementations)] pub struct Not { pub url: url::Url, } impl super::Validator for Not { fn validate(&self, val: &Value, path: &str, scope: &scope::Scope) -> super::ValidationState { let schema = scope.resolve(&self.url); let mut state = super::ValidationState::new(); if let Some(schema) = schema { if schema.validate_in(val, path).is_valid() { state.errors.push(Box::new(errors::Not { path: path.to_string(), })) } } else { state.missing.push(self.url.clone()); } state } }
24.931034
97
0.554633
f4f21f1a0648f86346b77b4b25c2fbec395b8f84
1,340
use serde::{Deserialize, Serialize}; use std::borrow::Cow; use vila::{EmptyResponse, Request, RequestData}; pub mod matchers; pub struct EmptyHello; impl Request for EmptyHello { type Data = (); type Response = EmptyResponse; fn endpoint(&self) -> Cow<str> { "/hello".into() } } #[derive(Serialize)] pub struct QueryHello { pub name: String, } #[derive(Serialize)] pub struct JsonHello { pub name: String, } #[derive(Serialize)] pub struct FormHello { pub name: String, } #[derive(Deserialize, Serialize, Debug, PartialEq)] pub struct NameGreeting { pub message: String, } impl Request for QueryHello { type Data = Self; type Response = NameGreeting; fn endpoint(&self) -> Cow<str> { "/hello".into() } fn data(&self) -> RequestData<&Self> { RequestData::Query(&self) } } impl Request for JsonHello { type Data = Self; type Response = NameGreeting; fn endpoint(&self) -> Cow<str> { "/hello".into() } fn data(&self) -> RequestData<&Self> { RequestData::Json(&self) } } impl Request for FormHello { type Data = Self; type Response = NameGreeting; fn endpoint(&self) -> Cow<str> { "/hello".into() } fn data(&self) -> RequestData<&Self> { RequestData::Form(&self) } }
17.631579
51
0.607463
76891f44e2991fb1c98fe1f96c1a88abcf00c3c7
408
pub use crate::{ IntoQueueNameClient, IntoQueueServiceClient, MessageBodyRequired, MessageBodySupport, MessageTTLRequired, MessageTTLSupport, NumberOfMessagesOption, NumberOfMessagesSupport, PopReceiptRequired, PopReceiptSupport, QueueNameService, QueueService, VisibilityTimeoutOption, VisibilityTimeoutRequired, VisibilityTimeoutSupport, WithQueueNameClient, WithQueueServiceClient, };
51
99
0.845588
e4697f172644dba9049bc6b510c45ed73fd24213
695
extern crate std; use std::sync::mpsc::{channel, Receiver, Sender}; use std::sync::Mutex; use serde_json::Value; use chatroom::ChatRoom; use user::User; lazy_static!{ pub static ref SRV_MSG: (Mutex<Sender<SrvMsg>>, Mutex<Receiver<SrvMsg>>) = { let (tx, rx) = channel(); (Mutex::new(tx), Mutex::new(rx)) }; } #[derive(Debug)] pub enum SrvMsg { ShowMessageBox(String), ShowVerifyImage(String), AddContact(User), AddGroup(ChatRoom), MessageReceived(Value), AppendImageMessage(i32, Value), RefreshChatMembers(String), YieldEvent, QuitEvent, } pub fn send_server_message(m: SrvMsg) { SRV_MSG.0.lock().unwrap().send(m).unwrap(); }
20.441176
80
0.656115
fc2be53d73d2a14e7e00b01d5027766987cf09e6
5,207
use crate::modules::{RevisionDateNode, RevisionNode}; use crate::parser::*; use crate::Node; use nom::branch::alt; use nom::bytes::complete::{tag, take_while_m_n}; use nom::character::complete::{char, multispace0, multispace1}; use nom::error::{make_error, ErrorKind}; use nom::multi::many0; use nom::sequence::delimited; use nom::Err; use nom::IResult; // YYYY-MM-DD format. fn revision_date_parse(s: &str) -> IResult<&str, Node> { let (s, year) = take_while_m_n(4, 4, |c: char| c.is_ascii_digit())(s)?; let (s, _) = char('-')(s)?; let (s, month) = take_while_m_n(2, 2, |c: char| c.is_ascii_digit())(s)?; let (s, _) = char('-')(s)?; let (s, day) = take_while_m_n(2, 2, |c: char| c.is_ascii_digit())(s)?; let n = RevisionNode::new(std::format!("{}-{}-{}", year, month, day)); Ok((s, Node::Revision(Box::new(n)))) } // Quoted "YYYY-MM-DD" format. fn revision_date_quoted_parse(s: &str) -> IResult<&str, Node> { let (s, n) = delimited(char('"'), revision_date_parse, char('"'))(s)?; Ok((s, n)) } fn revision_date_token_parse(s: &str) -> IResult<&str, Node> { alt((revision_date_parse, revision_date_quoted_parse))(s) } fn revision_sub_parse(s: &str) -> IResult<&str, Vec<Node>> { let (s, _) = char('{')(s)?; let (s, nodes) = many0(alt((description_parse, reference_parse)))(s)?; let (s, _) = multispace0(s)?; let (s, _) = char('}')(s)?; Ok((s, nodes)) } pub fn revision_parse(s: &str) -> IResult<&str, Node> { let (s, _) = multispace0(s)?; let (s, _) = tag("revision")(s)?; let (s, _) = multispace1(s)?; let (s, v) = revision_date_token_parse(s)?; let (s, _) = multispace0(s)?; let (s, subs) = alt((revision_sub_parse, semicolon_end_parse))(s)?; if let Node::Revision(mut node) = v { for sub in &subs { match sub { Node::Description(n) => { node.description = Some(n.name.to_owned()); } Node::Reference(n) => { node.reference = Some(n.name.to_owned()); } _ => {} } } return Ok((s, Node::Revision(node))); } Ok((s, Node::EmptyNode)) } // For import/include. pub fn revision_date_stmt_parse(s: &str) -> IResult<&str, Node> { let (s, _) = multispace0(s)?; let (s, _) = tag("revision-date")(s)?; let (s, _) = multispace1(s)?; let (s, v) = revision_date_token_parse(s)?; let (s, _) = multispace0(s)?; if let Node::Revision(n) = v { let node = RevisionDateNode::new(n.name); Ok((s, Node::RevisionDate(Box::new(node)))) } else { Err(Err::Error(make_error(s, ErrorKind::Fix))) } } #[cfg(test)] mod tests { use super::*; #[test] fn revision_date_parse_test() { let revision = "2020-08-10"; let n = RevisionNode { name: String::from("2020-08-10"), description: None, reference: None, }; let node = Node::Revision(Box::new(n)); let (_, v) = revision_date_parse(revision).unwrap(); assert_eq!(v, node); let (_, v) = revision_date_token_parse(revision).unwrap(); assert_eq!(v, node); } #[test] fn revision_date_quoted_parse_test() { let revision = "\"2020-08-11\""; let n = RevisionNode { name: String::from("2020-08-11"), description: None, reference: None, }; let node = Node::Revision(Box::new(n)); let (_, v) = revision_date_quoted_parse(revision).unwrap(); assert_eq!(v, node); let (_, v) = revision_date_token_parse(revision).unwrap(); assert_eq!(v, node); } #[test] fn revision_statement_test() { let revision = r#" revision 2018-02-20 { description "Updated to support NMDA."; reference "RFC 8343: A YANG Data Model for Interface Management"; } "#; let n = RevisionNode { name: String::from("2018-02-20"), description: Some(String::from("Updated to support NMDA.")), reference: Some(String::from( "RFC 8343: A YANG Data Model for Interface Management", )), }; let node = Node::Revision(Box::new(n)); let (_, v) = revision_parse(revision).unwrap(); assert_eq!(v, node); } #[test] fn revision_single_statement_test() { let revision = r#" revision 2018-02-20; "#; let n = RevisionNode { name: String::from("2018-02-20"), description: None, reference: None, }; let node = Node::Revision(Box::new(n)); let (_, v) = revision_parse(revision).unwrap(); assert_eq!(v, node); } #[test] fn revision_date_statement_test() { let revision = r#" revision-date 2018-02-20; "#; let n = RevisionDateNode { name: String::from("2018-02-20"), }; let node = Node::RevisionDate(Box::new(n)); let (_, v) = revision_date_stmt_parse(revision).unwrap(); assert_eq!(v, node); } }
29.754286
76
0.541195
08379240ba6ca1d64419fe35d76845f4fa0d6da6
27,044
use crate::context::CommandRegistry; use crate::errors::ShellError; use crate::evaluate::{evaluate_baseline_expr, Scope}; use crate::object::TaggedDictBuilder; use crate::parser::{hir, Operator}; use crate::prelude::*; use crate::Text; use chrono::{DateTime, Utc}; use chrono_humanize::Humanize; use derive_new::new; use ordered_float::OrderedFloat; use serde::{Deserialize, Serialize}; use std::fmt; use std::path::PathBuf; use std::time::SystemTime; #[derive(Debug, Clone, Copy, Ord, PartialOrd, Eq, PartialEq, new, Serialize, Deserialize)] pub struct OF64 { pub(crate) inner: OrderedFloat<f64>, } impl OF64 { pub(crate) fn into_inner(&self) -> f64 { self.inner.into_inner() } } impl From<f64> for OF64 { fn from(float: f64) -> Self { OF64::new(OrderedFloat(float)) } } #[derive(Debug, Clone, Ord, PartialOrd, Eq, PartialEq, Deserialize, Serialize)] pub enum Primitive { Nothing, Int(i64), #[allow(unused)] Float(OF64), Bytes(u64), String(String), Boolean(bool), Date(DateTime<Utc>), Path(PathBuf), // Stream markers (used as bookend markers rather than actual values) BeginningOfStream, EndOfStream, } impl Primitive { pub(crate) fn type_name(&self) -> String { use Primitive::*; match self { Nothing => "nothing", BeginningOfStream => "beginning-of-stream", EndOfStream => "end-of-stream", Path(_) => "path", Int(_) => "int", Float(_) => "float", Bytes(_) => "bytes", String(_) => "string", Boolean(_) => "boolean", Date(_) => "date", } .to_string() } pub(crate) fn debug(&self, f: &mut fmt::Formatter) -> fmt::Result { use Primitive::*; match self { Nothing => write!(f, "Nothing"), BeginningOfStream => write!(f, "BeginningOfStream"), EndOfStream => write!(f, "EndOfStream"), Int(int) => write!(f, "{}", int), Path(path) => write!(f, "{}", path.display()), Float(float) => write!(f, "{:?}", float), Bytes(bytes) => write!(f, "{}", bytes), String(string) => write!(f, "{:?}", string), Boolean(boolean) => write!(f, "{}", boolean), Date(date) => write!(f, "{}", date), } } pub fn format(&self, field_name: Option<&String>) -> String { match self { Primitive::Nothing => String::new(), Primitive::BeginningOfStream => String::new(), Primitive::EndOfStream => String::new(), Primitive::Path(p) => format!("{}", p.display()), Primitive::Bytes(b) => { let byte = byte_unit::Byte::from_bytes(*b as u128); if byte.get_bytes() == 0u128 { return "—".to_string(); } let byte = byte.get_appropriate_unit(false); match byte.get_unit() { byte_unit::ByteUnit::B => format!("{} B ", byte.get_value()), _ => format!("{}", byte.format(1)), } } Primitive::Int(i) => format!("{}", i), Primitive::Float(OF64 { inner: f }) => format!("{:.*}", 2, f.into_inner()), Primitive::String(s) => format!("{}", s), Primitive::Boolean(b) => match (b, field_name) { (true, None) => format!("Yes"), (false, None) => format!("No"), (true, Some(s)) if !s.is_empty() => format!("{}", s), (false, Some(s)) if !s.is_empty() => format!(""), (true, Some(_)) => format!("Yes"), (false, Some(_)) => format!("No"), }, Primitive::Date(d) => format!("{}", d.humanize()), } } pub fn style(&self) -> &'static str { match self { Primitive::Bytes(0) => "c", // centre 'missing' indicator Primitive::Int(_) | Primitive::Bytes(_) | Primitive::Float(_) => "r", _ => "", } } } #[derive(Debug, Ord, PartialOrd, Eq, PartialEq, Clone, new, Serialize)] pub struct Operation { pub(crate) left: Value, pub(crate) operator: Operator, pub(crate) right: Value, } #[derive(Debug, Ord, PartialOrd, Eq, PartialEq, Clone, Serialize, Deserialize, new)] pub struct Block { pub(crate) expressions: Vec<hir::Expression>, pub(crate) source: Text, pub(crate) span: Span, } impl Block { pub fn invoke(&self, value: &Tagged<Value>) -> Result<Tagged<Value>, ShellError> { let scope = Scope::new(value.clone()); if self.expressions.len() == 0 { return Ok(Value::nothing().simple_spanned(self.span)); } let mut last = None; for expr in self.expressions.iter() { last = Some(evaluate_baseline_expr( &expr, &CommandRegistry::empty(), &scope, &self.source, )?) } Ok(last.unwrap()) } } #[derive(Debug, Eq, PartialEq, Ord, PartialOrd, Clone, Serialize, Deserialize)] pub enum Value { Primitive(Primitive), Object(crate::object::Dictionary), #[serde(with = "serde_bytes")] Binary(Vec<u8>), List(Vec<Tagged<Value>>), #[allow(unused)] Block(Block), } pub fn debug_list(values: &Vec<Tagged<Value>>) -> ValuesDebug<'_> { ValuesDebug { values } } pub struct ValuesDebug<'a> { values: &'a Vec<Tagged<Value>>, } impl fmt::Debug for ValuesDebug<'_> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_list() .entries(self.values.iter().map(|i| i.debug())) .finish() } } pub struct ValueDebug<'a> { value: &'a Tagged<Value>, } impl fmt::Debug for ValueDebug<'_> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self.value.item() { Value::Primitive(p) => p.debug(f), Value::Object(o) => o.debug(f), Value::List(l) => debug_list(l).fmt(f), Value::Block(_) => write!(f, "[[block]]"), Value::Binary(_) => write!(f, "[[binary]]"), } } } impl Tagged<Value> { pub(crate) fn tagged_type_name(&self) -> Tagged<String> { let name = self.type_name(); Tagged::from_item(name, self.tag()) } } impl std::convert::TryFrom<&Tagged<Value>> for Block { type Error = ShellError; fn try_from(value: &Tagged<Value>) -> Result<Block, ShellError> { match value.item() { Value::Block(block) => Ok(block.clone()), v => Err(ShellError::type_error( "Block", value.copy_span(v.type_name()), )), } } } impl std::convert::TryFrom<&Tagged<Value>> for i64 { type Error = ShellError; fn try_from(value: &Tagged<Value>) -> Result<i64, ShellError> { match value.item() { Value::Primitive(Primitive::Int(int)) => Ok(*int), v => Err(ShellError::type_error( "Integer", value.copy_span(v.type_name()), )), } } } impl std::convert::TryFrom<&Tagged<Value>> for String { type Error = ShellError; fn try_from(value: &Tagged<Value>) -> Result<String, ShellError> { match value.item() { Value::Primitive(Primitive::String(s)) => Ok(s.clone()), v => Err(ShellError::type_error( "String", value.copy_span(v.type_name()), )), } } } impl std::convert::TryFrom<&Tagged<Value>> for Vec<u8> { type Error = ShellError; fn try_from(value: &Tagged<Value>) -> Result<Vec<u8>, ShellError> { match value.item() { Value::Binary(b) => Ok(b.clone()), v => Err(ShellError::type_error( "Binary", value.copy_span(v.type_name()), )), } } } impl<'a> std::convert::TryFrom<&'a Tagged<Value>> for &'a crate::object::Dictionary { type Error = ShellError; fn try_from(value: &'a Tagged<Value>) -> Result<&'a crate::object::Dictionary, ShellError> { match value.item() { Value::Object(d) => Ok(d), v => Err(ShellError::type_error( "Dictionary", value.copy_span(v.type_name()), )), } } } #[derive(Serialize, Deserialize)] pub enum Switch { Present, Absent, } impl Switch { #[allow(unused)] pub fn is_present(&self) -> bool { match self { Switch::Present => true, Switch::Absent => false, } } } impl std::convert::TryFrom<Option<&Tagged<Value>>> for Switch { type Error = ShellError; fn try_from(value: Option<&Tagged<Value>>) -> Result<Switch, ShellError> { match value { None => Ok(Switch::Absent), Some(value) => match value.item() { Value::Primitive(Primitive::Boolean(true)) => Ok(Switch::Present), v => Err(ShellError::type_error( "Boolean", value.copy_span(v.type_name()), )), }, } } } impl Tagged<Value> { pub(crate) fn debug(&self) -> ValueDebug<'_> { ValueDebug { value: self } } } impl Value { pub(crate) fn type_name(&self) -> String { match self { Value::Primitive(p) => p.type_name(), Value::Object(_) => format!("object"), Value::List(_) => format!("list"), Value::Block(_) => format!("block"), Value::Binary(_) => format!("binary"), } } // TODO: This is basically a legacy construct, I think pub fn data_descriptors(&self) -> Vec<String> { match self { Value::Primitive(_) => vec![], Value::Object(o) => o .entries .keys() .into_iter() .map(|x| x.to_string()) .collect(), Value::Block(_) => vec![], Value::List(_) => vec![], Value::Binary(_) => vec![], } } pub(crate) fn get_data_by_key(&self, name: &str) -> Option<&Tagged<Value>> { match self { Value::Object(o) => o.get_data_by_key(name), Value::List(l) => { for item in l { match item { Tagged { item: Value::Object(o), .. } => match o.get_data_by_key(name) { Some(v) => return Some(v), None => {} }, _ => {} } } None } _ => None, } } #[allow(unused)] pub(crate) fn get_data_by_index(&self, idx: usize) -> Option<&Tagged<Value>> { match self { Value::List(l) => l.iter().nth(idx), _ => None, } } pub fn get_data_by_path(&self, tag: Tag, path: &str) -> Option<Tagged<&Value>> { let mut current = self; for p in path.split(".") { match current.get_data_by_key(p) { Some(v) => current = v, None => return None, } } Some(Tagged::from_item(current, tag)) } pub fn insert_data_at_path( &self, tag: Tag, path: &str, new_value: Value, ) -> Option<Tagged<Value>> { let mut new_obj = self.clone(); let split_path: Vec<_> = path.split(".").collect(); if let Value::Object(ref mut o) = new_obj { let mut current = o; if split_path.len() == 1 { // Special case for inserting at the top level current .entries .insert(path.to_string(), Tagged::from_item(new_value, tag)); return Some(Tagged::from_item(new_obj, tag)); } for idx in 0..split_path.len() { match current.entries.get_mut(split_path[idx]) { Some(next) => { if idx == (split_path.len() - 2) { match &mut next.item { Value::Object(o) => { o.entries.insert( split_path[idx + 1].to_string(), Tagged::from_item(new_value, tag), ); } _ => {} } return Some(Tagged::from_item(new_obj, tag)); } else { match next.item { Value::Object(ref mut o) => { current = o; } _ => return None, } } } _ => return None, } } } None } pub fn replace_data_at_path( &self, tag: Tag, path: &str, replaced_value: Value, ) -> Option<Tagged<Value>> { let mut new_obj = self.clone(); let split_path: Vec<_> = path.split(".").collect(); if let Value::Object(ref mut o) = new_obj { let mut current = o; for idx in 0..split_path.len() { match current.entries.get_mut(split_path[idx]) { Some(next) => { if idx == (split_path.len() - 1) { *next = Tagged::from_item(replaced_value, tag); return Some(Tagged::from_item(new_obj, tag)); } else { match next.item { Value::Object(ref mut o) => { current = o; } _ => return None, } } } _ => return None, } } } None } pub fn get_data(&self, desc: &String) -> MaybeOwned<'_, Value> { match self { p @ Value::Primitive(_) => MaybeOwned::Borrowed(p), Value::Object(o) => o.get_data(desc), Value::Block(_) => MaybeOwned::Owned(Value::nothing()), Value::List(_) => MaybeOwned::Owned(Value::nothing()), Value::Binary(_) => MaybeOwned::Owned(Value::nothing()), } } pub(crate) fn format_leaf(&self, desc: Option<&String>) -> String { match self { Value::Primitive(p) => p.format(desc), Value::Block(b) => itertools::join( b.expressions .iter() .map(|e| e.source(&b.source).to_string()), "; ", ), Value::Object(_) => format!("[{}]", self.type_name()), Value::List(l) => format!( "[{} {}]", l.len(), if l.len() == 1 { "item" } else { "items" } ), Value::Binary(_) => format!("<binary>"), } } pub(crate) fn style_leaf(&self) -> &'static str { match self { Value::Primitive(p) => p.style(), _ => "", } } #[allow(unused)] pub(crate) fn compare(&self, operator: &Operator, other: &Value) -> Result<bool, (String, String)> { match operator { _ => { let coerced = coerce_compare(self, other)?; let ordering = coerced.compare(); use std::cmp::Ordering; let result = match (operator, ordering) { (Operator::Equal, Ordering::Equal) => true, (Operator::NotEqual, Ordering::Less) | (Operator::NotEqual, Ordering::Greater) => true, (Operator::LessThan, Ordering::Less) => true, (Operator::GreaterThan, Ordering::Greater) => true, (Operator::GreaterThanOrEqual, Ordering::Greater) | (Operator::GreaterThanOrEqual, Ordering::Equal) => true, (Operator::LessThanOrEqual, Ordering::Less) | (Operator::LessThanOrEqual, Ordering::Equal) => true, _ => false, }; Ok(result) } } } #[allow(unused)] pub(crate) fn is_string(&self, expected: &str) -> bool { match self { Value::Primitive(Primitive::String(s)) if s == expected => true, other => false, } } // pub(crate) fn as_pair(&self) -> Result<(Tagged<Value>, Tagged<Value>), ShellError> { // match self { // Value::List(list) if list.len() == 2 => Ok((list[0].clone(), list[1].clone())), // other => Err(ShellError::string(format!( // "Expected pair, got {:?}", // other // ))), // } // } pub(crate) fn as_string(&self) -> Result<String, ShellError> { match self { Value::Primitive(Primitive::String(s)) => Ok(s.clone()), Value::Primitive(Primitive::Boolean(x)) => Ok(format!("{}", x)), Value::Primitive(Primitive::Float(x)) => Ok(format!("{}", x.into_inner())), Value::Primitive(Primitive::Int(x)) => Ok(format!("{}", x)), Value::Primitive(Primitive::Bytes(x)) => Ok(format!("{}", x)), // TODO: this should definitely be more general with better errors other => Err(ShellError::string(format!( "Expected string, got {:?}", other ))), } } pub(crate) fn as_i64(&self) -> Result<i64, ShellError> { match self { Value::Primitive(Primitive::Int(i)) => Ok(*i), Value::Primitive(Primitive::Bytes(b)) => Ok(*b as i64), // TODO: this should definitely be more general with better errors other => Err(ShellError::string(format!( "Expected integer, got {:?}", other ))), } } pub(crate) fn is_true(&self) -> bool { match self { Value::Primitive(Primitive::Boolean(true)) => true, _ => false, } } pub fn string(s: impl Into<String>) -> Value { Value::Primitive(Primitive::String(s.into())) } pub fn path(s: impl Into<PathBuf>) -> Value { Value::Primitive(Primitive::Path(s.into())) } pub fn bytes(s: impl Into<u64>) -> Value { Value::Primitive(Primitive::Bytes(s.into())) } pub fn int(s: impl Into<i64>) -> Value { Value::Primitive(Primitive::Int(s.into())) } pub fn float(s: impl Into<OF64>) -> Value { Value::Primitive(Primitive::Float(s.into())) } pub fn boolean(s: impl Into<bool>) -> Value { Value::Primitive(Primitive::Boolean(s.into())) } pub fn system_date(s: SystemTime) -> Value { Value::Primitive(Primitive::Date(s.into())) } #[allow(unused)] pub fn date_from_str(s: &str) -> Result<Value, ShellError> { let date = DateTime::parse_from_rfc3339(s) .map_err(|err| ShellError::string(&format!("Date parse error: {}", err)))?; let date = date.with_timezone(&chrono::offset::Utc); Ok(Value::Primitive(Primitive::Date(date))) } pub fn nothing() -> Value { Value::Primitive(Primitive::Nothing) } } impl Tagged<Value> { pub(crate) fn as_path(&self) -> Result<PathBuf, ShellError> { match self.item() { Value::Primitive(Primitive::Path(path)) => Ok(path.clone()), other => Err(ShellError::type_error( "Path", other.type_name().tagged(self.span()), )), } } } pub(crate) fn select_fields(obj: &Value, fields: &[String], tag: impl Into<Tag>) -> Tagged<Value> { let mut out = TaggedDictBuilder::new(tag); let descs = obj.data_descriptors(); for field in fields { match descs.iter().find(|d| *d == field) { None => out.insert(field, Value::nothing()), Some(desc) => out.insert(desc.clone(), obj.get_data(desc).borrow().clone()), } } out.into_tagged_value() } pub(crate) fn reject_fields(obj: &Value, fields: &[String], tag: impl Into<Tag>) -> Tagged<Value> { let mut out = TaggedDictBuilder::new(tag); let descs = obj.data_descriptors(); for desc in descs { if fields.iter().any(|field| *field == desc) { continue; } else { out.insert(desc.clone(), obj.get_data(&desc).borrow().clone()) } } out.into_tagged_value() } #[allow(unused)] pub(crate) fn find(obj: &Value, field: &str, op: &Operator, rhs: &Value) -> bool { let descs = obj.data_descriptors(); match descs.iter().find(|d| *d == field) { None => false, Some(desc) => { let v = obj.get_data(desc).borrow().clone(); match v { Value::Primitive(Primitive::Boolean(b)) => match (op, rhs) { (Operator::Equal, Value::Primitive(Primitive::Boolean(b2))) => b == *b2, (Operator::NotEqual, Value::Primitive(Primitive::Boolean(b2))) => b != *b2, _ => false, }, Value::Primitive(Primitive::Bytes(i)) => match (op, rhs) { (Operator::LessThan, Value::Primitive(Primitive::Int(i2))) => i < (*i2 as u64), (Operator::GreaterThan, Value::Primitive(Primitive::Int(i2))) => { i > (*i2 as u64) } (Operator::LessThanOrEqual, Value::Primitive(Primitive::Int(i2))) => { i <= (*i2 as u64) } (Operator::GreaterThanOrEqual, Value::Primitive(Primitive::Int(i2))) => { i >= (*i2 as u64) } (Operator::Equal, Value::Primitive(Primitive::Int(i2))) => i == (*i2 as u64), (Operator::NotEqual, Value::Primitive(Primitive::Int(i2))) => i != (*i2 as u64), _ => false, }, Value::Primitive(Primitive::Int(i)) => match (op, rhs) { (Operator::LessThan, Value::Primitive(Primitive::Int(i2))) => i < *i2, (Operator::GreaterThan, Value::Primitive(Primitive::Int(i2))) => i > *i2, (Operator::LessThanOrEqual, Value::Primitive(Primitive::Int(i2))) => i <= *i2, (Operator::GreaterThanOrEqual, Value::Primitive(Primitive::Int(i2))) => { i >= *i2 } (Operator::Equal, Value::Primitive(Primitive::Int(i2))) => i == *i2, (Operator::NotEqual, Value::Primitive(Primitive::Int(i2))) => i != *i2, _ => false, }, Value::Primitive(Primitive::Float(i)) => match (op, rhs) { (Operator::LessThan, Value::Primitive(Primitive::Float(i2))) => i < *i2, (Operator::GreaterThan, Value::Primitive(Primitive::Float(i2))) => i > *i2, (Operator::LessThanOrEqual, Value::Primitive(Primitive::Float(i2))) => i <= *i2, (Operator::GreaterThanOrEqual, Value::Primitive(Primitive::Float(i2))) => { i >= *i2 } (Operator::Equal, Value::Primitive(Primitive::Float(i2))) => i == *i2, (Operator::NotEqual, Value::Primitive(Primitive::Float(i2))) => i != *i2, (Operator::LessThan, Value::Primitive(Primitive::Int(i2))) => { (i.into_inner()) < *i2 as f64 } (Operator::GreaterThan, Value::Primitive(Primitive::Int(i2))) => { i.into_inner() > *i2 as f64 } (Operator::LessThanOrEqual, Value::Primitive(Primitive::Int(i2))) => { i.into_inner() <= *i2 as f64 } (Operator::GreaterThanOrEqual, Value::Primitive(Primitive::Int(i2))) => { i.into_inner() >= *i2 as f64 } (Operator::Equal, Value::Primitive(Primitive::Int(i2))) => { i.into_inner() == *i2 as f64 } (Operator::NotEqual, Value::Primitive(Primitive::Int(i2))) => { i.into_inner() != *i2 as f64 } _ => false, }, Value::Primitive(Primitive::String(s)) => match (op, rhs) { (Operator::Equal, Value::Primitive(Primitive::String(s2))) => s == *s2, (Operator::NotEqual, Value::Primitive(Primitive::String(s2))) => s != *s2, _ => false, }, _ => false, } } } } enum CompareValues { Ints(i64, i64), Floats(OF64, OF64), Bytes(i128, i128), String(String, String), } impl CompareValues { fn compare(&self) -> std::cmp::Ordering { match self { CompareValues::Ints(left, right) => left.cmp(right), CompareValues::Floats(left, right) => left.cmp(right), CompareValues::Bytes(left, right) => left.cmp(right), CompareValues::String(left, right) => left.cmp(right), } } } fn coerce_compare(left: &Value, right: &Value) -> Result<CompareValues, (String, String)> { match (left, right) { (Value::Primitive(left), Value::Primitive(right)) => coerce_compare_primitive(left, right), _ => Err((left.type_name(), right.type_name())), } } fn coerce_compare_primitive( left: &Primitive, right: &Primitive, ) -> Result<CompareValues, (String, String)> { use Primitive::*; Ok(match (left, right) { (Int(left), Int(right)) => CompareValues::Ints(*left, *right), (Float(left), Int(right)) => CompareValues::Floats(*left, (*right as f64).into()), (Int(left), Float(right)) => CompareValues::Floats((*left as f64).into(), *right), (Int(left), Bytes(right)) => CompareValues::Bytes(*left as i128, *right as i128), (Bytes(left), Int(right)) => CompareValues::Bytes(*left as i128, *right as i128), (String(left), String(right)) => CompareValues::String(left.clone(), right.clone()), _ => return Err((left.type_name(), right.type_name())), }) }
33.470297
104
0.4797
e2fb7e05495e31450734a7962590991e29b60965
3,823
// Copyright (c) 2022, Mysten Labs, Inc. // SPDX-License-Identifier: Apache-2.0 use std::path::Path; use crate::{ api::{RpcGatewayServer, SignedTransaction, TransactionBytes}, rpc_gateway::responses::{GetObjectInfoResponse, ObjectResponse, SuiTypeTag}, }; use anyhow::anyhow; use async_trait::async_trait; use jsonrpsee::core::RpcResult; use move_core_types::identifier::Identifier; use sui_core::gateway_state::{ gateway_responses::{TransactionEffectsResponse, TransactionResponse}, GatewayTxSeqNumber, }; use sui_core::sui_json::SuiJsonValue; use sui_types::{ base_types::{ObjectID, SuiAddress, TransactionDigest}, json_schema::Base64, object::ObjectRead, }; pub struct SuiNode {} impl SuiNode { pub fn new(_config_path: &Path) -> anyhow::Result<Self> { Ok(Self {}) } } #[async_trait] impl RpcGatewayServer for SuiNode { async fn transfer_coin( &self, _signer: SuiAddress, _object_id: ObjectID, _gas: Option<ObjectID>, _gas_budget: u64, _recipient: SuiAddress, ) -> RpcResult<TransactionBytes> { Err(anyhow!("Sui Node only supports read-only methods").into()) } async fn publish( &self, _sender: SuiAddress, _compiled_modules: Vec<Base64>, _gas: Option<ObjectID>, _gas_budget: u64, ) -> RpcResult<TransactionBytes> { Err(anyhow!("Sui Node only supports read-only methods").into()) } async fn split_coin( &self, _signer: SuiAddress, _coin_object_id: ObjectID, _split_amounts: Vec<u64>, _gas: Option<ObjectID>, _gas_budget: u64, ) -> RpcResult<TransactionBytes> { Err(anyhow!("Sui Node only supports read-only methods").into()) } async fn merge_coin( &self, _signer: SuiAddress, _primary_coin: ObjectID, _coin_to_merge: ObjectID, _gas: Option<ObjectID>, _gas_budget: u64, ) -> RpcResult<TransactionBytes> { Err(anyhow!("Sui Node only supports read-only methods").into()) } async fn get_owned_objects(&self, _owner: SuiAddress) -> RpcResult<ObjectResponse> { todo!() } async fn get_object_info(&self, _object_id: ObjectID) -> RpcResult<ObjectRead> { todo!() } async fn get_object_typed_info( &self, _object_id: ObjectID, ) -> RpcResult<GetObjectInfoResponse> { todo!() } async fn execute_transaction( &self, _signed_tx: SignedTransaction, ) -> RpcResult<TransactionResponse> { Err(anyhow!("Sui Node only supports read-only methods").into()) } async fn move_call( &self, _signer: SuiAddress, _package_object_id: ObjectID, _module: Identifier, _function: Identifier, _type_arguments: Vec<SuiTypeTag>, _rpc_arguments: Vec<SuiJsonValue>, _gas: Option<ObjectID>, _gas_budget: u64, ) -> RpcResult<TransactionBytes> { Err(anyhow!("Sui Node only supports read-only methods").into()) } async fn sync_account_state(&self, _address: SuiAddress) -> RpcResult<()> { todo!() } async fn get_total_transaction_number(&self) -> RpcResult<u64> { todo!() } async fn get_transactions_in_range( &self, _start: GatewayTxSeqNumber, _end: GatewayTxSeqNumber, ) -> RpcResult<Vec<(GatewayTxSeqNumber, TransactionDigest)>> { todo!() } async fn get_recent_transactions( &self, _count: u64, ) -> RpcResult<Vec<(GatewayTxSeqNumber, TransactionDigest)>> { todo!() } async fn get_transaction( &self, _digest: TransactionDigest, ) -> RpcResult<TransactionEffectsResponse> { todo!() } }
26.365517
88
0.627779
72a39c15ee6c894528431e42d6e6451d0ae657f5
2,541
use serde::{Deserialize, Serialize}; #[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] pub struct Player { pub name: String, pub connection_id: String, } pub type LobbyId = String; #[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] pub struct Lobby { pub id: LobbyId, pub players: Vec<Player>, } #[derive(Serialize, Deserialize, Debug, PartialEq)] pub struct LobbyActionCreate { pub name: String, } #[derive(Serialize, Deserialize, Debug, PartialEq)] pub struct LobbyActionCreateResponse { pub lobby: Lobby, } #[derive(Serialize, Deserialize, Debug, PartialEq)] pub struct LobbyActionJoin { pub name: String, pub lobby_code: String, } #[derive(Serialize, Deserialize, Debug, PartialEq)] pub struct LobbyActionJoinResponse { pub lobby: Lobby, } #[derive(Serialize, Deserialize, Debug, PartialEq)] pub struct LobbyMessage { pub lobby_code: String, pub body: String, } #[derive(Serialize, Deserialize, Debug, PartialEq, Clone)] pub struct LobbyMessageResponse { pub name: String, pub body: String, } /// the structure of the client payload (action aside) #[derive(Serialize, Deserialize, Debug, PartialEq)] #[serde(tag = "type", rename_all = "snake_case")] pub enum Message { LobbyActionCreate(LobbyActionCreate), LobbyActionCreateResponse(LobbyActionCreateResponse), LobbyActionJoin(LobbyActionJoin), LobbyActionJoinResponse(LobbyActionJoinResponse), LobbyMessage(LobbyMessage), LobbyMessageResponse(LobbyMessageResponse), } // #[cfg(test)] // mod tests { // use super::*; // #[test] // fn deserialize_create_lobby_event() { // let event = serde_json::from_str::<Event>(include_str!("../tests/data/create_lobby.json")) // .expect("failed to deserialize send event"); // assert_eq!( // event.message().and_then(|m| Some(m)), // Some(Message::LobbyActionCreate(Message::LobbyActionCreate { // name: "Host".to_string() // })) // ) // } // #[test] // fn deserialize_join_lobby_event() { // let event = serde_json::from_str::<Event>(include_str!("../tests/data/join_lobby.json")) // .expect("failed to deserialize send event"); // assert_eq!( // event.message().and_then(|m| Some(m)), // Some(Message::LobbyActionJoin(LobbyActionJoin { // name: "Host".to_string(), // lobby_code: "hljk".to_string(), // })) // ) // } // }
27.923077
101
0.644235
149cc5c73cd02d4f2468258e95f14a18bfc80e24
1,280
// Copyright (c) The Libra Core Contributors // SPDX-License-Identifier: Apache-2.0 use executable_helpers::helpers::{setup_executable, ARG_CONFIG_PATH, ARG_DISABLE_LOGGING}; use signal_hook; use std::sync::{ atomic::{AtomicBool, Ordering}, Arc, }; #[global_allocator] static ALLOC: jemallocator::Jemalloc = jemallocator::Jemalloc; fn register_signals(term: Arc<AtomicBool>) { for signal in &[ signal_hook::SIGTERM, signal_hook::SIGINT, signal_hook::SIGHUP, ] { let term_clone = Arc::clone(&term); let thread = std::thread::current(); unsafe { signal_hook::register(*signal, move || { term_clone.store(true, Ordering::Release); thread.unpark(); }) .expect("failed to register signal handler"); } } } fn main() { let (mut config, _logger, _args) = setup_executable( "Libra single node".to_string(), vec![ARG_CONFIG_PATH, ARG_DISABLE_LOGGING], ); let (_ac_handle, _node_handle) = libra_node::main_node::setup_environment(&mut config); let term = Arc::new(AtomicBool::new(false)); register_signals(Arc::clone(&term)); while !term.load(Ordering::Acquire) { std::thread::park(); } }
27.826087
91
0.625
ccac3bf7343b1c04ef2deb9c9c7e8b6f1f018644
1,131
// One tricky thing about the macros is that the `assert_approx_eq` crate doesn't support messages // when something fails. This means that it can be hard to tell which of the many sub assertions is // the one that failed. #[macro_export] macro_rules! assert_tuple_approx_eq { ($a:expr, $b:expr) => {{ use assert_approx_eq::assert_approx_eq; assert_approx_eq!($a.x, $b.x, 1e-5f64); assert_approx_eq!($a.y, $b.y, 1e-5f64); assert_approx_eq!($a.z, $b.z, 1e-5f64); assert_approx_eq!($a.w, $b.w, 1e-5f64); }}; } #[macro_export] macro_rules! assert_color_approx_eq { ($a:expr, $b:expr) => {{ use assert_approx_eq::assert_approx_eq; assert_approx_eq!($a.r, $b.r, 1e-5f64); assert_approx_eq!($a.g, $b.g, 1e-5f64); assert_approx_eq!($a.b, $b.b, 1e-5f64); }}; } #[macro_export] macro_rules! assert_matrix_approx_eq { ( $a:expr, $b:expr) => {{ use assert_approx_eq::assert_approx_eq; for x in 0..4 { for y in 0..4 { assert_approx_eq!($a[(x, y)], $b[(x, y)], 1e-5f64); } } }}; }
30.567568
99
0.591512
4be9c8d3f0ba256d4ccd4d9d22e84783045c59cf
2,617
use std::env; use std::fmt; fn main() { let args: Vec<String> = env::args().collect(); if args.len() != 3 { println!("Usage: fastpebsym <depth> <total-amt>"); std::process::exit(1); } let depth = args[1].parse::<u32>(); let height = args[2].parse::<u32>(); if depth.is_err() { println!( r#"Depth must be a positive integer (received "{}")"#, args[1] ); } if height.is_err() { println!( r#"Height must be a positive integer (received "{}")"#, args[2] ); } let pebbleable = Tree::candidates(depth.unwrap(), height.unwrap()) .filter(|tree| tree.is_pebbleable()) .collect::<Vec<Tree>>(); if pebbleable.len() == 0 { println!("No pebbleable solutions"); return; } println!(" r"); for tree in pebbleable { println!("{}", tree); } } #[derive(Debug)] struct Tree(Vec<u32>); impl Tree { fn candidates(depth: u32, total: u32) -> Box<dyn Iterator<Item = Tree>> { Tree::gen_candidates(depth, total, depth as usize) } fn gen_candidates(depth: u32, total: u32, tree_depth: usize) -> Box<dyn Iterator<Item = Tree>> { if depth == 0 { let items = vec![0; tree_depth]; Box::new(std::iter::once(Tree(items))) } else { Box::new(root_amts(total).flat_map(move |root_amt| { let residue = (total - root_amt) / 2; Tree::gen_candidates(depth - 1, residue, tree_depth).map(move |mut subtree| { subtree.0[tree_depth - depth as usize] = root_amt; subtree }) })) } } fn is_pebbleable(&self) -> bool { let mut from_above = 0; for (i, amt) in self.0.iter().enumerate() { let rest = &self.0[i + 1..]; let from_below = muster(rest); let total_at_node = amt + from_above / 2 + from_below / 2; if total_at_node == 0 { return false; } from_above = total_at_node; } true } } impl fmt::Display for Tree { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{:?}", self.0) } } fn muster(items: &[u32]) -> u32 { let mut acc = 0; for amt in items.iter().rev() { acc = amt + 2 * (acc / 2); } acc } fn root_amts(total: u32) -> impl Iterator<Item = u32> { if total % 2 == 0 { (0..total + 1).rev().step_by(2) } else { (1..total + 1).rev().step_by(2) } }
24.457944
100
0.495224
3a5e7e8cb78f998e618b303877e83c9ead9db460
6,136
pub use crate::{Arena, Handle}; impl crate::Module { /// Apply the usual default interpolation for vertex shader outputs and fragment shader inputs. /// /// For every [`Binding`] that is a vertex shader output or a fragment shader /// input, and that has an `interpolation` or `sampling` of `None`, assign a /// default interpolation and sampling as follows: /// /// - If the `Binding`'s type contains only 32-bit floating-point values or /// vectors of such, default its interpolation to `Perspective` and its /// sampling to `Center`. /// /// - Otherwise, mark its interpolation as `Flat`. /// /// When struct appear in input or output types, apply these rules to their /// leaves, since those are the things that actually get assigned locations. /// /// This function is a utility front ends may use to satisfy the Naga IR's /// requirement that all I/O `Binding`s from the vertex shader to the /// fragment shader must have non-`None` `interpolation` values. This /// requirement is meant to ensure that input languages' policies have been /// applied appropriately. /// /// All the shader languages Naga supports have similar rules: /// perspective-correct, center-sampled interpolation is the default for any /// binding that can vary, and everything else either defaults to flat, or /// requires an explicit flat qualifier/attribute/what-have-you. /// /// [`Binding`]: super::Binding pub fn apply_common_default_interpolation(&mut self) { use crate::{Binding, ScalarKind, Type, TypeInner}; /// Choose a default interpolation for a function argument or result. /// /// `binding` refers to the `Binding` whose type is `ty`. If `ty` is a struct, then it's the /// bindings of the struct's members that we care about, and the binding of the struct /// itself is meaningless, so `binding` should be `None`. fn default_binding_or_struct(binding: &mut Option<Binding>, ty: Handle<Type>, types: &mut Arena<Type>) { match types.get_mut(ty).inner { // A struct. It's the individual members we care about, so recurse. TypeInner::Struct { members: ref mut m, .. } => { // To choose the right interpolations for `members`, we must consult other // elements of `types`. But both `members` and the types it refers to are stored // in `types`, and Rust won't let us mutate one element of the `Arena`'s `Vec` // while reading others. // // So, temporarily swap the member list out its type, assign appropriate // interpolations to its members, and then swap the list back in. use std::mem::replace; let mut members = replace(m, vec![]); for member in &mut members { default_binding_or_struct(&mut member.binding, member.ty, types); } // Swap the member list back in. It's essential that we call `types.get_mut` // afresh here, rather than just using `m`: it's only because `m` was dead that // we were able to pass `types` to the recursive call. match types.get_mut(ty).inner { TypeInner::Struct { members: ref mut m, .. } => replace(m, members), _ => unreachable!("ty must be a struct"), }; } // Some interpolatable type. // // GLSL has 64-bit floats, but it won't interpolate them. WGSL and MSL only have // 32-bit floats. SPIR-V has 16- and 64-bit float capabilities, but Vulkan is vague // about what can and cannot be interpolated. TypeInner::Scalar { kind: ScalarKind::Float, width: 4 } | TypeInner::Vector { kind: ScalarKind::Float, width: 4, .. } => { // unwrap: all `EntryPoint` arguments or return values must either be structures // or have a `Binding`. let binding = binding.as_mut().unwrap(); if let Binding::Location { ref mut interpolation, ref mut sampling, .. } = *binding { if interpolation.is_none() { *interpolation = Some(crate::Interpolation::Perspective); } if sampling.is_none() && *interpolation != Some(crate::Interpolation::Flat) { *sampling = Some(crate::Sampling::Center); } } } // Some type that can't be interpolated. _ => { // unwrap: all `EntryPoint` arguments or return values must either be structures // or have a `Binding`. let binding = binding.as_mut().unwrap(); if let Binding::Location { ref mut interpolation, ref mut sampling, .. } = *binding { *interpolation = Some(crate::Interpolation::Flat); *sampling = None; } } } } for ep in &mut self.entry_points { let function = &mut ep.function; match ep.stage { crate::ShaderStage::Fragment => { for arg in &mut function.arguments { default_binding_or_struct(&mut arg.binding, arg.ty, &mut self.types); } } crate::ShaderStage::Vertex => { if let Some(result) = function.result.as_mut() { default_binding_or_struct(&mut result.binding, result.ty, &mut self.types); } } _ => (), } } } }
51.133333
105
0.535365
2224bf5f66e90103c4f2542583f0e849b4779797
75,994
use std::any::{type_name, Any}; use std::cell::{Cell, RefCell}; use std::collections::BTreeSet; use std::env; use std::ffi::OsStr; use std::fmt::{Debug, Write}; use std::fs; use std::hash::Hash; use std::ops::Deref; use std::path::{Component, Path, PathBuf}; use std::process::Command; use std::time::{Duration, Instant}; use crate::cache::{Cache, Interned, INTERNER}; use crate::compile; use crate::config::{SplitDebuginfo, TargetSelection}; use crate::dist; use crate::doc; use crate::flags::{Color, Subcommand}; use crate::install; use crate::native; use crate::run; use crate::test; use crate::tool::{self, SourceType}; use crate::util::{self, add_dylib_path, add_link_lib_path, exe, libdir, output, t}; use crate::EXTRA_CHECK_CFGS; use crate::{check, Config}; use crate::{Build, CLang, DocTests, GitRepo, Mode}; pub use crate::Compiler; // FIXME: replace with std::lazy after it gets stabilized and reaches beta use once_cell::sync::Lazy; pub struct Builder<'a> { pub build: &'a Build, pub top_stage: u32, pub kind: Kind, cache: Cache, stack: RefCell<Vec<Box<dyn Any>>>, time_spent_on_dependencies: Cell<Duration>, pub paths: Vec<PathBuf>, } impl<'a> Deref for Builder<'a> { type Target = Build; fn deref(&self) -> &Self::Target { self.build } } pub trait Step: 'static + Clone + Debug + PartialEq + Eq + Hash { /// `PathBuf` when directories are created or to return a `Compiler` once /// it's been assembled. type Output: Clone; /// Whether this step is run by default as part of its respective phase. /// `true` here can still be overwritten by `should_run` calling `default_condition`. const DEFAULT: bool = false; /// If true, then this rule should be skipped if --target was specified, but --host was not const ONLY_HOSTS: bool = false; /// Primary function to execute this rule. Can call `builder.ensure()` /// with other steps to run those. fn run(self, builder: &Builder<'_>) -> Self::Output; /// When bootstrap is passed a set of paths, this controls whether this rule /// will execute. However, it does not get called in a "default" context /// when we are not passed any paths; in that case, `make_run` is called /// directly. fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_>; /// Builds up a "root" rule, either as a default rule or from a path passed /// to us. /// /// When path is `None`, we are executing in a context where no paths were /// passed. When `./x.py build` is run, for example, this rule could get /// called if it is in the correct list below with a path of `None`. fn make_run(_run: RunConfig<'_>) { // It is reasonable to not have an implementation of make_run for rules // who do not want to get called from the root context. This means that // they are likely dependencies (e.g., sysroot creation) or similar, and // as such calling them from ./x.py isn't logical. unimplemented!() } } pub struct RunConfig<'a> { pub builder: &'a Builder<'a>, pub target: TargetSelection, pub path: PathBuf, } impl RunConfig<'_> { pub fn build_triple(&self) -> TargetSelection { self.builder.build.build } } struct StepDescription { default: bool, only_hosts: bool, should_run: fn(ShouldRun<'_>) -> ShouldRun<'_>, make_run: fn(RunConfig<'_>), name: &'static str, kind: Kind, } #[derive(Clone, PartialOrd, Ord, PartialEq, Eq)] pub struct TaskPath { pub path: PathBuf, pub kind: Option<Kind>, } impl TaskPath { pub fn parse(path: impl Into<PathBuf>) -> TaskPath { let mut kind = None; let mut path = path.into(); let mut components = path.components(); if let Some(Component::Normal(os_str)) = components.next() { if let Some(str) = os_str.to_str() { if let Some((found_kind, found_prefix)) = str.split_once("::") { if found_kind.is_empty() { panic!("empty kind in task path {}", path.display()); } kind = Kind::parse(found_kind); assert!(kind.is_some()); path = Path::new(found_prefix).join(components.as_path()); } } } TaskPath { path, kind } } } impl Debug for TaskPath { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { if let Some(kind) = &self.kind { write!(f, "{}::", kind.as_str())?; } write!(f, "{}", self.path.display()) } } /// Collection of paths used to match a task rule. #[derive(Debug, Clone, PartialOrd, Ord, PartialEq, Eq)] pub enum PathSet { /// A collection of individual paths. /// /// These are generally matched as a path suffix. For example, a /// command-line value of `libstd` will match if `src/libstd` is in the /// set. Set(BTreeSet<TaskPath>), /// A "suite" of paths. /// /// These can match as a path suffix (like `Set`), or as a prefix. For /// example, a command-line value of `src/test/ui/abi/variadic-ffi.rs` /// will match `src/test/ui`. A command-line value of `ui` would also /// match `src/test/ui`. Suite(TaskPath), } impl PathSet { fn empty() -> PathSet { PathSet::Set(BTreeSet::new()) } fn one<P: Into<PathBuf>>(path: P, kind: Kind) -> PathSet { let mut set = BTreeSet::new(); set.insert(TaskPath { path: path.into(), kind: Some(kind) }); PathSet::Set(set) } fn has(&self, needle: &Path, module: Option<Kind>) -> bool { let check = |p: &TaskPath| { if let (Some(p_kind), Some(kind)) = (&p.kind, module) { p.path.ends_with(needle) && *p_kind == kind } else { p.path.ends_with(needle) } }; match self { PathSet::Set(set) => set.iter().any(check), PathSet::Suite(suite) => check(suite), } } fn path(&self, builder: &Builder<'_>) -> PathBuf { match self { PathSet::Set(set) => { set.iter().next().map(|p| &p.path).unwrap_or(&builder.build.src).clone() } PathSet::Suite(path) => path.path.clone(), } } } impl StepDescription { fn from<S: Step>(kind: Kind) -> StepDescription { StepDescription { default: S::DEFAULT, only_hosts: S::ONLY_HOSTS, should_run: S::should_run, make_run: S::make_run, name: std::any::type_name::<S>(), kind, } } fn maybe_run(&self, builder: &Builder<'_>, pathset: &PathSet) { if self.is_excluded(builder, pathset) { return; } // Determine the targets participating in this rule. let targets = if self.only_hosts { &builder.hosts } else { &builder.targets }; for target in targets { let run = RunConfig { builder, path: pathset.path(builder), target: *target }; (self.make_run)(run); } } fn is_excluded(&self, builder: &Builder<'_>, pathset: &PathSet) -> bool { if builder.config.exclude.iter().any(|e| pathset.has(&e.path, e.kind)) { eprintln!("Skipping {:?} because it is excluded", pathset); return true; } if !builder.config.exclude.is_empty() { builder.verbose(&format!( "{:?} not skipped for {:?} -- not in {:?}", pathset, self.name, builder.config.exclude )); } false } fn run(v: &[StepDescription], builder: &Builder<'_>, paths: &[PathBuf]) { let should_runs = v .iter() .map(|desc| (desc.should_run)(ShouldRun::new(builder, desc.kind))) .collect::<Vec<_>>(); // sanity checks on rules for (desc, should_run) in v.iter().zip(&should_runs) { assert!( !should_run.paths.is_empty(), "{:?} should have at least one pathset", desc.name ); } if paths.is_empty() || builder.config.include_default_paths { for (desc, should_run) in v.iter().zip(&should_runs) { if desc.default && should_run.is_really_default() { for pathset in &should_run.paths { desc.maybe_run(builder, pathset); } } } } for path in paths { // strip CurDir prefix if present let path = match path.strip_prefix(".") { Ok(p) => p, Err(_) => path, }; let mut attempted_run = false; for (desc, should_run) in v.iter().zip(&should_runs) { if let Some(suite) = should_run.is_suite_path(path) { attempted_run = true; desc.maybe_run(builder, suite); } else if let Some(pathset) = should_run.pathset_for_path(path, desc.kind) { attempted_run = true; desc.maybe_run(builder, pathset); } } if !attempted_run { panic!("error: no rules matched {}", path.display()); } } } } enum ReallyDefault<'a> { Bool(bool), Lazy(Lazy<bool, Box<dyn Fn() -> bool + 'a>>), } pub struct ShouldRun<'a> { pub builder: &'a Builder<'a>, kind: Kind, // use a BTreeSet to maintain sort order paths: BTreeSet<PathSet>, // If this is a default rule, this is an additional constraint placed on // its run. Generally something like compiler docs being enabled. is_really_default: ReallyDefault<'a>, } impl<'a> ShouldRun<'a> { fn new(builder: &'a Builder<'_>, kind: Kind) -> ShouldRun<'a> { ShouldRun { builder, kind, paths: BTreeSet::new(), is_really_default: ReallyDefault::Bool(true), // by default no additional conditions } } pub fn default_condition(mut self, cond: bool) -> Self { self.is_really_default = ReallyDefault::Bool(cond); self } pub fn lazy_default_condition(mut self, lazy_cond: Box<dyn Fn() -> bool + 'a>) -> Self { self.is_really_default = ReallyDefault::Lazy(Lazy::new(lazy_cond)); self } pub fn is_really_default(&self) -> bool { match &self.is_really_default { ReallyDefault::Bool(val) => *val, ReallyDefault::Lazy(lazy) => *lazy.deref(), } } /// Indicates it should run if the command-line selects the given crate or /// any of its (local) dependencies. /// /// Compared to `krate`, this treats the dependencies as aliases for the /// same job. Generally it is preferred to use `krate`, and treat each /// individual path separately. For example `./x.py test src/liballoc` /// (which uses `krate`) will test just `liballoc`. However, `./x.py check /// src/liballoc` (which uses `all_krates`) will check all of `libtest`. /// `all_krates` should probably be removed at some point. pub fn all_krates(mut self, name: &str) -> Self { let mut set = BTreeSet::new(); for krate in self.builder.in_tree_crates(name, None) { let path = krate.local_path(self.builder); set.insert(TaskPath { path, kind: Some(self.kind) }); } self.paths.insert(PathSet::Set(set)); self } /// Indicates it should run if the command-line selects the given crate or /// any of its (local) dependencies. /// /// `make_run` will be called separately for each matching command-line path. pub fn krate(mut self, name: &str) -> Self { for krate in self.builder.in_tree_crates(name, None) { let path = krate.local_path(self.builder); self.paths.insert(PathSet::one(path, self.kind)); } self } // single alias, which does not correspond to any on-disk path pub fn alias(mut self, alias: &str) -> Self { assert!( !self.builder.src.join(alias).exists(), "use `builder.path()` for real paths: {}", alias ); self.paths.insert(PathSet::Set( std::iter::once(TaskPath { path: alias.into(), kind: Some(self.kind) }).collect(), )); self } // single, non-aliased path pub fn path(self, path: &str) -> Self { self.paths(&[path]) } // multiple aliases for the same job pub fn paths(mut self, paths: &[&str]) -> Self { self.paths.insert(PathSet::Set( paths .iter() .map(|p| { // FIXME(#96188): make sure this is actually a path. // This currently breaks for paths within submodules. //assert!( // self.builder.src.join(p).exists(), // "`should_run.paths` should correspond to real on-disk paths - use `alias` if there is no relevant path: {}", // p //); TaskPath { path: p.into(), kind: Some(self.kind) } }) .collect(), )); self } pub fn is_suite_path(&self, path: &Path) -> Option<&PathSet> { self.paths.iter().find(|pathset| match pathset { PathSet::Suite(p) => path.starts_with(&p.path), PathSet::Set(_) => false, }) } pub fn suite_path(mut self, suite: &str) -> Self { self.paths.insert(PathSet::Suite(TaskPath { path: suite.into(), kind: Some(self.kind) })); self } // allows being more explicit about why should_run in Step returns the value passed to it pub fn never(mut self) -> ShouldRun<'a> { self.paths.insert(PathSet::empty()); self } fn pathset_for_path(&self, path: &Path, kind: Kind) -> Option<&PathSet> { self.paths.iter().find(|pathset| pathset.has(path, Some(kind))) } } #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Debug)] pub enum Kind { Build, Check, Clippy, Fix, Format, Test, Bench, Doc, Clean, Dist, Install, Run, Setup, } impl Kind { pub fn parse(string: &str) -> Option<Kind> { // these strings, including the one-letter aliases, must match the x.py help text Some(match string { "build" | "b" => Kind::Build, "check" | "c" => Kind::Check, "clippy" => Kind::Clippy, "fix" => Kind::Fix, "fmt" => Kind::Format, "test" | "t" => Kind::Test, "bench" => Kind::Bench, "doc" | "d" => Kind::Doc, "clean" => Kind::Clean, "dist" => Kind::Dist, "install" => Kind::Install, "run" | "r" => Kind::Run, "setup" => Kind::Setup, _ => return None, }) } pub fn as_str(&self) -> &'static str { match self { Kind::Build => "build", Kind::Check => "check", Kind::Clippy => "clippy", Kind::Fix => "fix", Kind::Format => "fmt", Kind::Test => "test", Kind::Bench => "bench", Kind::Doc => "doc", Kind::Clean => "clean", Kind::Dist => "dist", Kind::Install => "install", Kind::Run => "run", Kind::Setup => "setup", } } } impl<'a> Builder<'a> { fn get_step_descriptions(kind: Kind) -> Vec<StepDescription> { macro_rules! describe { ($($rule:ty),+ $(,)?) => {{ vec![$(StepDescription::from::<$rule>(kind)),+] }}; } match kind { Kind::Build => describe!( compile::Std, compile::Assemble, compile::CodegenBackend, compile::StartupObjects, tool::BuildManifest, tool::Rustbook, tool::ErrorIndex, tool::UnstableBookGen, tool::Tidy, tool::Linkchecker, tool::CargoTest, tool::Compiletest, tool::RemoteTestServer, tool::RemoteTestClient, tool::RustInstaller, tool::Cargo, tool::Rls, tool::RustAnalyzer, tool::RustDemangler, tool::Rustdoc, tool::Clippy, tool::CargoClippy, native::Llvm, native::Sanitizers, tool::Rustfmt, tool::Miri, tool::CargoMiri, native::Lld, native::CrtBeginEnd ), Kind::Check => describe!( check::Std, check::Rustc, check::Rustdoc, check::CodegenBackend, check::Clippy, check::Miri, check::Rls, check::Rustfmt, check::Bootstrap ), Kind::Test => describe!( crate::toolstate::ToolStateCheck, test::ExpandYamlAnchors, test::Tidy, test::Ui, test::RunPassValgrind, test::MirOpt, test::Codegen, test::CodegenUnits, test::Assembly, test::Incremental, test::Debuginfo, test::UiFullDeps, test::Rustdoc, test::Pretty, test::Crate, test::CrateLibrustc, test::CrateRustdoc, test::CrateRustdocJsonTypes, test::Linkcheck, test::TierCheck, test::Cargotest, test::Cargo, test::Rls, test::ErrorIndex, test::Distcheck, test::RunMakeFullDeps, test::Nomicon, test::Reference, test::RustdocBook, test::RustByExample, test::TheBook, test::UnstableBook, test::RustcBook, test::LintDocs, test::RustcGuide, test::EmbeddedBook, test::EditionGuide, test::Rustfmt, test::Miri, test::Clippy, test::RustDemangler, test::CompiletestTest, test::RustdocJSStd, test::RustdocJSNotStd, test::RustdocGUI, test::RustdocTheme, test::RustdocUi, test::RustdocJson, test::HtmlCheck, // Run bootstrap close to the end as it's unlikely to fail test::Bootstrap, // Run run-make last, since these won't pass without make on Windows test::RunMake, ), Kind::Bench => describe!(test::Crate, test::CrateLibrustc), Kind::Doc => describe!( doc::UnstableBook, doc::UnstableBookGen, doc::TheBook, doc::Standalone, doc::Std, doc::Rustc, doc::Rustdoc, doc::Rustfmt, doc::ErrorIndex, doc::Nomicon, doc::Reference, doc::RustdocBook, doc::RustByExample, doc::RustcBook, doc::CargoBook, doc::Clippy, doc::EmbeddedBook, doc::EditionGuide, ), Kind::Dist => describe!( dist::Docs, dist::RustcDocs, dist::Mingw, dist::Rustc, dist::Std, dist::RustcDev, dist::Analysis, dist::Src, dist::Cargo, dist::Rls, dist::RustAnalyzer, dist::Rustfmt, dist::RustDemangler, dist::Clippy, dist::Miri, dist::LlvmTools, dist::RustDev, dist::Extended, // It seems that PlainSourceTarball somehow changes how some of the tools // perceive their dependencies (see #93033) which would invalidate fingerprints // and force us to rebuild tools after vendoring dependencies. // To work around this, create the Tarball after building all the tools. dist::PlainSourceTarball, dist::BuildManifest, dist::ReproducibleArtifacts, ), Kind::Install => describe!( install::Docs, install::Std, install::Cargo, install::Rls, install::RustAnalyzer, install::Rustfmt, install::RustDemangler, install::Clippy, install::Miri, install::Analysis, install::Src, install::Rustc ), Kind::Run => describe!(run::ExpandYamlAnchors, run::BuildManifest, run::BumpStage0), // These commands either don't use paths, or they're special-cased in Build::build() Kind::Clean | Kind::Clippy | Kind::Fix | Kind::Format | Kind::Setup => vec![], } } pub fn get_help(build: &Build, kind: Kind) -> Option<String> { let step_descriptions = Builder::get_step_descriptions(kind); if step_descriptions.is_empty() { return None; } let builder = Self::new_internal(build, kind, vec![]); let builder = &builder; // The "build" kind here is just a placeholder, it will be replaced with something else in // the following statement. let mut should_run = ShouldRun::new(builder, Kind::Build); for desc in step_descriptions { should_run.kind = desc.kind; should_run = (desc.should_run)(should_run); } let mut help = String::from("Available paths:\n"); let mut add_path = |path: &Path| { t!(write!(help, " ./x.py {} {}\n", kind.as_str(), path.display())); }; for pathset in should_run.paths { match pathset { PathSet::Set(set) => { for path in set { add_path(&path.path); } } PathSet::Suite(path) => { add_path(&path.path.join("...")); } } } Some(help) } fn new_internal(build: &Build, kind: Kind, paths: Vec<PathBuf>) -> Builder<'_> { Builder { build, top_stage: build.config.stage, kind, cache: Cache::new(), stack: RefCell::new(Vec::new()), time_spent_on_dependencies: Cell::new(Duration::new(0, 0)), paths, } } pub fn new(build: &Build) -> Builder<'_> { let (kind, paths) = match build.config.cmd { Subcommand::Build { ref paths } => (Kind::Build, &paths[..]), Subcommand::Check { ref paths } => (Kind::Check, &paths[..]), Subcommand::Clippy { ref paths, .. } => (Kind::Clippy, &paths[..]), Subcommand::Fix { ref paths } => (Kind::Fix, &paths[..]), Subcommand::Doc { ref paths, .. } => (Kind::Doc, &paths[..]), Subcommand::Test { ref paths, .. } => (Kind::Test, &paths[..]), Subcommand::Bench { ref paths, .. } => (Kind::Bench, &paths[..]), Subcommand::Dist { ref paths } => (Kind::Dist, &paths[..]), Subcommand::Install { ref paths } => (Kind::Install, &paths[..]), Subcommand::Run { ref paths } => (Kind::Run, &paths[..]), Subcommand::Format { .. } | Subcommand::Clean { .. } | Subcommand::Setup { .. } => { panic!() } }; Self::new_internal(build, kind, paths.to_owned()) } pub fn execute_cli(&self) { self.run_step_descriptions(&Builder::get_step_descriptions(self.kind), &self.paths); } pub fn default_doc(&self, paths: &[PathBuf]) { self.run_step_descriptions(&Builder::get_step_descriptions(Kind::Doc), paths); } /// NOTE: keep this in sync with `rustdoc::clean::utils::doc_rust_lang_org_channel`, or tests will fail on beta/stable. pub fn doc_rust_lang_org_channel(&self) -> String { let channel = match &*self.config.channel { "stable" => &self.version, "beta" => "beta", "nightly" | "dev" => "nightly", // custom build of rustdoc maybe? link to the latest stable docs just in case _ => "stable", }; "https://doc.rust-lang.org/".to_owned() + channel } fn run_step_descriptions(&self, v: &[StepDescription], paths: &[PathBuf]) { StepDescription::run(v, self, paths); } /// Obtain a compiler at a given stage and for a given host. Explicitly does /// not take `Compiler` since all `Compiler` instances are meant to be /// obtained through this function, since it ensures that they are valid /// (i.e., built and assembled). pub fn compiler(&self, stage: u32, host: TargetSelection) -> Compiler { self.ensure(compile::Assemble { target_compiler: Compiler { stage, host } }) } /// Similar to `compiler`, except handles the full-bootstrap option to /// silently use the stage1 compiler instead of a stage2 compiler if one is /// requested. /// /// Note that this does *not* have the side effect of creating /// `compiler(stage, host)`, unlike `compiler` above which does have such /// a side effect. The returned compiler here can only be used to compile /// new artifacts, it can't be used to rely on the presence of a particular /// sysroot. /// /// See `force_use_stage1` for documentation on what each argument is. pub fn compiler_for( &self, stage: u32, host: TargetSelection, target: TargetSelection, ) -> Compiler { if self.build.force_use_stage1(Compiler { stage, host }, target) { self.compiler(1, self.config.build) } else { self.compiler(stage, host) } } pub fn sysroot(&self, compiler: Compiler) -> Interned<PathBuf> { self.ensure(compile::Sysroot { compiler }) } /// Returns the libdir where the standard library and other artifacts are /// found for a compiler's sysroot. pub fn sysroot_libdir(&self, compiler: Compiler, target: TargetSelection) -> Interned<PathBuf> { #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] struct Libdir { compiler: Compiler, target: TargetSelection, } impl Step for Libdir { type Output = Interned<PathBuf>; fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { run.never() } fn run(self, builder: &Builder<'_>) -> Interned<PathBuf> { let lib = builder.sysroot_libdir_relative(self.compiler); let sysroot = builder .sysroot(self.compiler) .join(lib) .join("rustlib") .join(self.target.triple) .join("lib"); // Avoid deleting the rustlib/ directory we just copied // (in `impl Step for Sysroot`). if !builder.config.download_rustc { let _ = fs::remove_dir_all(&sysroot); t!(fs::create_dir_all(&sysroot)); } INTERNER.intern_path(sysroot) } } self.ensure(Libdir { compiler, target }) } pub fn sysroot_codegen_backends(&self, compiler: Compiler) -> PathBuf { self.sysroot_libdir(compiler, compiler.host).with_file_name("codegen-backends") } /// Returns the compiler's libdir where it stores the dynamic libraries that /// it itself links against. /// /// For example this returns `<sysroot>/lib` on Unix and `<sysroot>/bin` on /// Windows. pub fn rustc_libdir(&self, compiler: Compiler) -> PathBuf { if compiler.is_snapshot(self) { self.rustc_snapshot_libdir() } else { match self.config.libdir_relative() { Some(relative_libdir) if compiler.stage >= 1 => { self.sysroot(compiler).join(relative_libdir) } _ => self.sysroot(compiler).join(libdir(compiler.host)), } } } /// Returns the compiler's relative libdir where it stores the dynamic libraries that /// it itself links against. /// /// For example this returns `lib` on Unix and `bin` on /// Windows. pub fn libdir_relative(&self, compiler: Compiler) -> &Path { if compiler.is_snapshot(self) { libdir(self.config.build).as_ref() } else { match self.config.libdir_relative() { Some(relative_libdir) if compiler.stage >= 1 => relative_libdir, _ => libdir(compiler.host).as_ref(), } } } /// Returns the compiler's relative libdir where the standard library and other artifacts are /// found for a compiler's sysroot. /// /// For example this returns `lib` on Unix and Windows. pub fn sysroot_libdir_relative(&self, compiler: Compiler) -> &Path { match self.config.libdir_relative() { Some(relative_libdir) if compiler.stage >= 1 => relative_libdir, _ if compiler.stage == 0 => &self.build.initial_libdir, _ => Path::new("lib"), } } pub fn rustc_lib_paths(&self, compiler: Compiler) -> Vec<PathBuf> { let mut dylib_dirs = vec![self.rustc_libdir(compiler)]; // Ensure that the downloaded LLVM libraries can be found. if self.config.llvm_from_ci { let ci_llvm_lib = self.out.join(&*compiler.host.triple).join("ci-llvm").join("lib"); dylib_dirs.push(ci_llvm_lib); } dylib_dirs } /// Adds the compiler's directory of dynamic libraries to `cmd`'s dynamic /// library lookup path. pub fn add_rustc_lib_path(&self, compiler: Compiler, cmd: &mut Command) { // Windows doesn't need dylib path munging because the dlls for the // compiler live next to the compiler and the system will find them // automatically. if cfg!(windows) { return; } add_dylib_path(self.rustc_lib_paths(compiler), cmd); } /// Gets a path to the compiler specified. pub fn rustc(&self, compiler: Compiler) -> PathBuf { if compiler.is_snapshot(self) { self.initial_rustc.clone() } else { self.sysroot(compiler).join("bin").join(exe("rustc", compiler.host)) } } /// Gets the paths to all of the compiler's codegen backends. fn codegen_backends(&self, compiler: Compiler) -> impl Iterator<Item = PathBuf> { fs::read_dir(self.sysroot_codegen_backends(compiler)) .into_iter() .flatten() .filter_map(Result::ok) .map(|entry| entry.path()) } pub fn rustdoc(&self, compiler: Compiler) -> PathBuf { self.ensure(tool::Rustdoc { compiler }) } pub fn rustdoc_cmd(&self, compiler: Compiler) -> Command { let mut cmd = Command::new(&self.bootstrap_out.join("rustdoc")); cmd.env("RUSTC_STAGE", compiler.stage.to_string()) .env("RUSTC_SYSROOT", self.sysroot(compiler)) // Note that this is *not* the sysroot_libdir because rustdoc must be linked // equivalently to rustc. .env("RUSTDOC_LIBDIR", self.rustc_libdir(compiler)) .env("CFG_RELEASE_CHANNEL", &self.config.channel) .env("RUSTDOC_REAL", self.rustdoc(compiler)) .env("RUSTC_BOOTSTRAP", "1"); cmd.arg("-Wrustdoc::invalid_codeblock_attributes"); if self.config.deny_warnings { cmd.arg("-Dwarnings"); } cmd.arg("-Znormalize-docs"); // Remove make-related flags that can cause jobserver problems. cmd.env_remove("MAKEFLAGS"); cmd.env_remove("MFLAGS"); if let Some(linker) = self.linker(compiler.host) { cmd.env("RUSTDOC_LINKER", linker); } if self.is_fuse_ld_lld(compiler.host) { cmd.env("RUSTDOC_FUSE_LD_LLD", "1"); } cmd } /// Return the path to `llvm-config` for the target, if it exists. /// /// Note that this returns `None` if LLVM is disabled, or if we're in a /// check build or dry-run, where there's no need to build all of LLVM. fn llvm_config(&self, target: TargetSelection) -> Option<PathBuf> { if self.config.llvm_enabled() && self.kind != Kind::Check && !self.config.dry_run { let llvm_config = self.ensure(native::Llvm { target }); if llvm_config.is_file() { return Some(llvm_config); } } None } /// Convenience wrapper to allow `builder.llvm_link_shared()` instead of `builder.config.llvm_link_shared(&builder)`. pub(crate) fn llvm_link_shared(&self) -> bool { Config::llvm_link_shared(self) } /// Prepares an invocation of `cargo` to be run. /// /// This will create a `Command` that represents a pending execution of /// Cargo. This cargo will be configured to use `compiler` as the actual /// rustc compiler, its output will be scoped by `mode`'s output directory, /// it will pass the `--target` flag for the specified `target`, and will be /// executing the Cargo command `cmd`. pub fn cargo( &self, compiler: Compiler, mode: Mode, source_type: SourceType, target: TargetSelection, cmd: &str, ) -> Cargo { let mut cargo = Command::new(&self.initial_cargo); let out_dir = self.stage_out(compiler, mode); // Codegen backends are not yet tracked by -Zbinary-dep-depinfo, // so we need to explicitly clear out if they've been updated. for backend in self.codegen_backends(compiler) { self.clear_if_dirty(&out_dir, &backend); } if cmd == "doc" || cmd == "rustdoc" { let my_out = match mode { // This is the intended out directory for compiler documentation. Mode::Rustc | Mode::ToolRustc => self.compiler_doc_out(target), Mode::Std => out_dir.join(target.triple).join("doc"), _ => panic!("doc mode {:?} not expected", mode), }; let rustdoc = self.rustdoc(compiler); self.clear_if_dirty(&my_out, &rustdoc); } cargo.env("CARGO_TARGET_DIR", &out_dir).arg(cmd); let profile_var = |name: &str| { let profile = if self.config.rust_optimize { "RELEASE" } else { "DEV" }; format!("CARGO_PROFILE_{}_{}", profile, name) }; // See comment in rustc_llvm/build.rs for why this is necessary, largely llvm-config // needs to not accidentally link to libLLVM in stage0/lib. cargo.env("REAL_LIBRARY_PATH_VAR", &util::dylib_path_var()); if let Some(e) = env::var_os(util::dylib_path_var()) { cargo.env("REAL_LIBRARY_PATH", e); } // Found with `rg "init_env_logger\("`. If anyone uses `init_env_logger` // from out of tree it shouldn't matter, since x.py is only used for // building in-tree. let color_logs = ["RUSTDOC_LOG_COLOR", "RUSTC_LOG_COLOR", "RUST_LOG_COLOR"]; match self.build.config.color { Color::Always => { cargo.arg("--color=always"); for log in &color_logs { cargo.env(log, "always"); } } Color::Never => { cargo.arg("--color=never"); for log in &color_logs { cargo.env(log, "never"); } } Color::Auto => {} // nothing to do } if cmd != "install" { cargo.arg("--target").arg(target.rustc_target_arg()); } else { assert_eq!(target, compiler.host); } // Set a flag for `check`/`clippy`/`fix`, so that certain build // scripts can do less work (i.e. not building/requiring LLVM). if cmd == "check" || cmd == "clippy" || cmd == "fix" { // If we've not yet built LLVM, or it's stale, then bust // the rustc_llvm cache. That will always work, even though it // may mean that on the next non-check build we'll need to rebuild // rustc_llvm. But if LLVM is stale, that'll be a tiny amount // of work comparatively, and we'd likely need to rebuild it anyway, // so that's okay. if crate::native::prebuilt_llvm_config(self, target).is_err() { cargo.env("RUST_CHECK", "1"); } } let stage = if compiler.stage == 0 && self.local_rebuild { // Assume the local-rebuild rustc already has stage1 features. 1 } else { compiler.stage }; let mut rustflags = Rustflags::new(target); if stage != 0 { if let Ok(s) = env::var("CARGOFLAGS_NOT_BOOTSTRAP") { cargo.args(s.split_whitespace()); } rustflags.env("RUSTFLAGS_NOT_BOOTSTRAP"); } else { if let Ok(s) = env::var("CARGOFLAGS_BOOTSTRAP") { cargo.args(s.split_whitespace()); } rustflags.env("RUSTFLAGS_BOOTSTRAP"); if cmd == "clippy" { // clippy overwrites sysroot if we pass it to cargo. // Pass it directly to clippy instead. // NOTE: this can't be fixed in clippy because we explicitly don't set `RUSTC`, // so it has no way of knowing the sysroot. rustflags.arg("--sysroot"); rustflags.arg( self.sysroot(compiler) .as_os_str() .to_str() .expect("sysroot must be valid UTF-8"), ); // Only run clippy on a very limited subset of crates (in particular, not build scripts). cargo.arg("-Zunstable-options"); // Explicitly does *not* set `--cfg=bootstrap`, since we're using a nightly clippy. let host_version = Command::new("rustc").arg("--version").output().map_err(|_| ()); let output = host_version.and_then(|output| { if output.status.success() { Ok(output) } else { Err(()) } }).unwrap_or_else(|_| { eprintln!( "error: `x.py clippy` requires a host `rustc` toolchain with the `clippy` component" ); eprintln!("help: try `rustup component add clippy`"); std::process::exit(1); }); if !t!(std::str::from_utf8(&output.stdout)).contains("nightly") { rustflags.arg("--cfg=bootstrap"); } } else { rustflags.arg("--cfg=bootstrap"); } } let use_new_symbol_mangling = match self.config.rust_new_symbol_mangling { Some(setting) => { // If an explicit setting is given, use that setting } None => { if mode == Mode::Std { // The standard library defaults to the legacy scheme false } else { // The compiler and tools default to the new scheme true } } }; if use_new_symbol_mangling { rustflags.arg("-Csymbol-mangling-version=v0"); } else { rustflags.arg("-Csymbol-mangling-version=legacy"); rustflags.arg("-Zunstable-options"); } // #[cfg(not(bootstrap)] if stage != 0 { // Enable cfg checking of cargo features // FIXME: De-comment this when cargo beta get support for it // cargo.arg("-Zcheck-cfg-features"); // Enable cfg checking of rustc well-known names rustflags .arg("-Zunstable-options") // Enable checking of well known names .arg("--check-cfg=names()") // Enable checking of well known values .arg("--check-cfg=values()"); // Add extra cfg not defined in rustc for (restricted_mode, name, values) in EXTRA_CHECK_CFGS { if *restricted_mode == None || *restricted_mode == Some(mode) { // Creating a string of the values by concatenating each value: // ',"tvos","watchos"' or '' (nothing) when there are no values let values = match values { Some(values) => values .iter() .map(|val| [",", "\"", val, "\""]) .flatten() .collect::<String>(), None => String::new(), }; rustflags.arg(&format!("--check-cfg=values({name}{values})")); } } } // FIXME: It might be better to use the same value for both `RUSTFLAGS` and `RUSTDOCFLAGS`, // but this breaks CI. At the very least, stage0 `rustdoc` needs `--cfg bootstrap`. See // #71458. let mut rustdocflags = rustflags.clone(); rustdocflags.propagate_cargo_env("RUSTDOCFLAGS"); if stage == 0 { rustdocflags.env("RUSTDOCFLAGS_BOOTSTRAP"); } else { rustdocflags.env("RUSTDOCFLAGS_NOT_BOOTSTRAP"); } if let Ok(s) = env::var("CARGOFLAGS") { cargo.args(s.split_whitespace()); } match mode { Mode::Std | Mode::ToolBootstrap | Mode::ToolStd => {} Mode::Rustc | Mode::Codegen | Mode::ToolRustc => { // Build proc macros both for the host and the target if target != compiler.host && cmd != "check" { cargo.arg("-Zdual-proc-macros"); rustflags.arg("-Zdual-proc-macros"); } } } // This tells Cargo (and in turn, rustc) to output more complete // dependency information. Most importantly for rustbuild, this // includes sysroot artifacts, like libstd, which means that we don't // need to track those in rustbuild (an error prone process!). This // feature is currently unstable as there may be some bugs and such, but // it represents a big improvement in rustbuild's reliability on // rebuilds, so we're using it here. // // For some additional context, see #63470 (the PR originally adding // this), as well as #63012 which is the tracking issue for this // feature on the rustc side. cargo.arg("-Zbinary-dep-depinfo"); cargo.arg("-j").arg(self.jobs().to_string()); // Remove make-related flags to ensure Cargo can correctly set things up cargo.env_remove("MAKEFLAGS"); cargo.env_remove("MFLAGS"); // FIXME: Temporary fix for https://github.com/rust-lang/cargo/issues/3005 // Force cargo to output binaries with disambiguating hashes in the name let mut metadata = if compiler.stage == 0 { // Treat stage0 like a special channel, whether it's a normal prior- // release rustc or a local rebuild with the same version, so we // never mix these libraries by accident. "bootstrap".to_string() } else { self.config.channel.to_string() }; // We want to make sure that none of the dependencies between // std/test/rustc unify with one another. This is done for weird linkage // reasons but the gist of the problem is that if librustc, libtest, and // libstd all depend on libc from crates.io (which they actually do) we // want to make sure they all get distinct versions. Things get really // weird if we try to unify all these dependencies right now, namely // around how many times the library is linked in dynamic libraries and // such. If rustc were a static executable or if we didn't ship dylibs // this wouldn't be a problem, but we do, so it is. This is in general // just here to make sure things build right. If you can remove this and // things still build right, please do! match mode { Mode::Std => metadata.push_str("std"), // When we're building rustc tools, they're built with a search path // that contains things built during the rustc build. For example, // bitflags is built during the rustc build, and is a dependency of // rustdoc as well. We're building rustdoc in a different target // directory, though, which means that Cargo will rebuild the // dependency. When we go on to build rustdoc, we'll look for // bitflags, and find two different copies: one built during the // rustc step and one that we just built. This isn't always a // problem, somehow -- not really clear why -- but we know that this // fixes things. Mode::ToolRustc => metadata.push_str("tool-rustc"), // Same for codegen backends. Mode::Codegen => metadata.push_str("codegen"), _ => {} } cargo.env("__CARGO_DEFAULT_LIB_METADATA", &metadata); if cmd == "clippy" { rustflags.arg("-Zforce-unstable-if-unmarked"); } rustflags.arg("-Zmacro-backtrace"); let want_rustdoc = self.doc_tests != DocTests::No; // We synthetically interpret a stage0 compiler used to build tools as a // "raw" compiler in that it's the exact snapshot we download. Normally // the stage0 build means it uses libraries build by the stage0 // compiler, but for tools we just use the precompiled libraries that // we've downloaded let use_snapshot = mode == Mode::ToolBootstrap; assert!(!use_snapshot || stage == 0 || self.local_rebuild); let maybe_sysroot = self.sysroot(compiler); let sysroot = if use_snapshot { self.rustc_snapshot_sysroot() } else { &maybe_sysroot }; let libdir = self.rustc_libdir(compiler); // Clear the output directory if the real rustc we're using has changed; // Cargo cannot detect this as it thinks rustc is bootstrap/debug/rustc. // // Avoid doing this during dry run as that usually means the relevant // compiler is not yet linked/copied properly. // // Only clear out the directory if we're compiling std; otherwise, we // should let Cargo take care of things for us (via depdep info) if !self.config.dry_run && mode == Mode::Std && cmd == "build" { self.clear_if_dirty(&out_dir, &self.rustc(compiler)); } // Customize the compiler we're running. Specify the compiler to cargo // as our shim and then pass it some various options used to configure // how the actual compiler itself is called. // // These variables are primarily all read by // src/bootstrap/bin/{rustc.rs,rustdoc.rs} cargo .env("RUSTBUILD_NATIVE_DIR", self.native_dir(target)) .env("RUSTC_REAL", self.rustc(compiler)) .env("RUSTC_STAGE", stage.to_string()) .env("RUSTC_SYSROOT", &sysroot) .env("RUSTC_LIBDIR", &libdir) .env("RUSTDOC", self.bootstrap_out.join("rustdoc")) .env( "RUSTDOC_REAL", if cmd == "doc" || cmd == "rustdoc" || (cmd == "test" && want_rustdoc) { self.rustdoc(compiler) } else { PathBuf::from("/path/to/nowhere/rustdoc/not/required") }, ) .env("RUSTC_ERROR_METADATA_DST", self.extended_error_dir()) .env("RUSTC_BREAK_ON_ICE", "1"); // Clippy support is a hack and uses the default `cargo-clippy` in path. // Don't override RUSTC so that the `cargo-clippy` in path will be run. if cmd != "clippy" { cargo.env("RUSTC", self.bootstrap_out.join("rustc")); } // Dealing with rpath here is a little special, so let's go into some // detail. First off, `-rpath` is a linker option on Unix platforms // which adds to the runtime dynamic loader path when looking for // dynamic libraries. We use this by default on Unix platforms to ensure // that our nightlies behave the same on Windows, that is they work out // of the box. This can be disabled, of course, but basically that's why // we're gated on RUSTC_RPATH here. // // Ok, so the astute might be wondering "why isn't `-C rpath` used // here?" and that is indeed a good question to ask. This codegen // option is the compiler's current interface to generating an rpath. // Unfortunately it doesn't quite suffice for us. The flag currently // takes no value as an argument, so the compiler calculates what it // should pass to the linker as `-rpath`. This unfortunately is based on // the **compile time** directory structure which when building with // Cargo will be very different than the runtime directory structure. // // All that's a really long winded way of saying that if we use // `-Crpath` then the executables generated have the wrong rpath of // something like `$ORIGIN/deps` when in fact the way we distribute // rustc requires the rpath to be `$ORIGIN/../lib`. // // So, all in all, to set up the correct rpath we pass the linker // argument manually via `-C link-args=-Wl,-rpath,...`. Plus isn't it // fun to pass a flag to a tool to pass a flag to pass a flag to a tool // to change a flag in a binary? if self.config.rust_rpath && util::use_host_linker(target) { let rpath = if target.contains("apple") { // Note that we need to take one extra step on macOS to also pass // `-Wl,-instal_name,@rpath/...` to get things to work right. To // do that we pass a weird flag to the compiler to get it to do // so. Note that this is definitely a hack, and we should likely // flesh out rpath support more fully in the future. rustflags.arg("-Zosx-rpath-install-name"); Some("-Wl,-rpath,@loader_path/../lib") } else if !target.contains("windows") { rustflags.arg("-Clink-args=-Wl,-z,origin"); Some("-Wl,-rpath,$ORIGIN/../lib") } else { None }; if let Some(rpath) = rpath { rustflags.arg(&format!("-Clink-args={}", rpath)); } } if let Some(host_linker) = self.linker(compiler.host) { cargo.env("RUSTC_HOST_LINKER", host_linker); } if self.is_fuse_ld_lld(compiler.host) { cargo.env("RUSTC_HOST_FUSE_LD_LLD", "1"); cargo.env("RUSTDOC_FUSE_LD_LLD", "1"); } if let Some(target_linker) = self.linker(target) { let target = crate::envify(&target.triple); cargo.env(&format!("CARGO_TARGET_{}_LINKER", target), target_linker); } if self.is_fuse_ld_lld(target) { rustflags.arg("-Clink-args=-fuse-ld=lld"); } self.lld_flags(target).for_each(|flag| { rustdocflags.arg(&flag); }); if !(["build", "check", "clippy", "fix", "rustc"].contains(&cmd)) && want_rustdoc { cargo.env("RUSTDOC_LIBDIR", self.rustc_libdir(compiler)); } let debuginfo_level = match mode { Mode::Rustc | Mode::Codegen => self.config.rust_debuginfo_level_rustc, Mode::Std => self.config.rust_debuginfo_level_std, Mode::ToolBootstrap | Mode::ToolStd | Mode::ToolRustc => { self.config.rust_debuginfo_level_tools } }; cargo.env(profile_var("DEBUG"), debuginfo_level.to_string()); cargo.env( profile_var("DEBUG_ASSERTIONS"), if mode == Mode::Std { self.config.rust_debug_assertions_std.to_string() } else { self.config.rust_debug_assertions.to_string() }, ); cargo.env( profile_var("OVERFLOW_CHECKS"), if mode == Mode::Std { self.config.rust_overflow_checks_std.to_string() } else { self.config.rust_overflow_checks.to_string() }, ); // FIXME(davidtwco): #[cfg(not(bootstrap))] - #95612 needs to be in the bootstrap compiler // for this conditional to be removed. if !target.contains("windows") || compiler.stage >= 1 { if target.contains("linux") || target.contains("windows") || target.contains("openbsd") { rustflags.arg("-Zunstable-options"); } match self.config.rust_split_debuginfo { SplitDebuginfo::Packed => rustflags.arg("-Csplit-debuginfo=packed"), SplitDebuginfo::Unpacked => rustflags.arg("-Csplit-debuginfo=unpacked"), SplitDebuginfo::Off => rustflags.arg("-Csplit-debuginfo=off"), }; } if self.config.cmd.bless() { // Bless `expect!` tests. cargo.env("UPDATE_EXPECT", "1"); } if !mode.is_tool() { cargo.env("RUSTC_FORCE_UNSTABLE", "1"); } if let Some(x) = self.crt_static(target) { if x { rustflags.arg("-Ctarget-feature=+crt-static"); } else { rustflags.arg("-Ctarget-feature=-crt-static"); } } if let Some(x) = self.crt_static(compiler.host) { cargo.env("RUSTC_HOST_CRT_STATIC", x.to_string()); } if let Some(map_to) = self.build.debuginfo_map_to(GitRepo::Rustc) { let map = format!("{}={}", self.build.src.display(), map_to); cargo.env("RUSTC_DEBUGINFO_MAP", map); // `rustc` needs to know the virtual `/rustc/$hash` we're mapping to, // in order to opportunistically reverse it later. cargo.env("CFG_VIRTUAL_RUST_SOURCE_BASE_DIR", map_to); } // Enable usage of unstable features cargo.env("RUSTC_BOOTSTRAP", "1"); self.add_rust_test_threads(&mut cargo); // Almost all of the crates that we compile as part of the bootstrap may // have a build script, including the standard library. To compile a // build script, however, it itself needs a standard library! This // introduces a bit of a pickle when we're compiling the standard // library itself. // // To work around this we actually end up using the snapshot compiler // (stage0) for compiling build scripts of the standard library itself. // The stage0 compiler is guaranteed to have a libstd available for use. // // For other crates, however, we know that we've already got a standard // library up and running, so we can use the normal compiler to compile // build scripts in that situation. if mode == Mode::Std { cargo .env("RUSTC_SNAPSHOT", &self.initial_rustc) .env("RUSTC_SNAPSHOT_LIBDIR", self.rustc_snapshot_libdir()); } else { cargo .env("RUSTC_SNAPSHOT", self.rustc(compiler)) .env("RUSTC_SNAPSHOT_LIBDIR", self.rustc_libdir(compiler)); } // Tools that use compiler libraries may inherit the `-lLLVM` link // requirement, but the `-L` library path is not propagated across // separate Cargo projects. We can add LLVM's library path to the // platform-specific environment variable as a workaround. if mode == Mode::ToolRustc || mode == Mode::Codegen { if let Some(llvm_config) = self.llvm_config(target) { let llvm_libdir = output(Command::new(&llvm_config).arg("--libdir")); add_link_lib_path(vec![llvm_libdir.trim().into()], &mut cargo); } } // Compile everything except libraries and proc macros with the more // efficient initial-exec TLS model. This doesn't work with `dlopen`, // so we can't use it by default in general, but we can use it for tools // and our own internal libraries. if !mode.must_support_dlopen() && !target.triple.starts_with("powerpc-") { rustflags.arg("-Ztls-model=initial-exec"); } if self.config.incremental { cargo.env("CARGO_INCREMENTAL", "1"); } else { // Don't rely on any default setting for incr. comp. in Cargo cargo.env("CARGO_INCREMENTAL", "0"); } if let Some(ref on_fail) = self.config.on_fail { cargo.env("RUSTC_ON_FAIL", on_fail); } if self.config.print_step_timings { cargo.env("RUSTC_PRINT_STEP_TIMINGS", "1"); } if self.config.print_step_rusage { cargo.env("RUSTC_PRINT_STEP_RUSAGE", "1"); } if self.config.backtrace_on_ice { cargo.env("RUSTC_BACKTRACE_ON_ICE", "1"); } cargo.env("RUSTC_VERBOSE", self.verbosity.to_string()); if source_type == SourceType::InTree { let mut lint_flags = Vec::new(); // When extending this list, add the new lints to the RUSTFLAGS of the // build_bootstrap function of src/bootstrap/bootstrap.py as well as // some code doesn't go through this `rustc` wrapper. lint_flags.push("-Wrust_2018_idioms"); lint_flags.push("-Wunused_lifetimes"); lint_flags.push("-Wsemicolon_in_expressions_from_macros"); if self.config.deny_warnings { lint_flags.push("-Dwarnings"); rustdocflags.arg("-Dwarnings"); } // This does not use RUSTFLAGS due to caching issues with Cargo. // Clippy is treated as an "in tree" tool, but shares the same // cache as other "submodule" tools. With these options set in // RUSTFLAGS, that causes *every* shared dependency to be rebuilt. // By injecting this into the rustc wrapper, this circumvents // Cargo's fingerprint detection. This is fine because lint flags // are always ignored in dependencies. Eventually this should be // fixed via better support from Cargo. cargo.env("RUSTC_LINT_FLAGS", lint_flags.join(" ")); rustdocflags.arg("-Wrustdoc::invalid_codeblock_attributes"); } if mode == Mode::Rustc { rustflags.arg("-Zunstable-options"); rustflags.arg("-Wrustc::internal"); } // Throughout the build Cargo can execute a number of build scripts // compiling C/C++ code and we need to pass compilers, archivers, flags, etc // obtained previously to those build scripts. // Build scripts use either the `cc` crate or `configure/make` so we pass // the options through environment variables that are fetched and understood by both. // // FIXME: the guard against msvc shouldn't need to be here if target.contains("msvc") { if let Some(ref cl) = self.config.llvm_clang_cl { cargo.env("CC", cl).env("CXX", cl); } } else { let ccache = self.config.ccache.as_ref(); let ccacheify = |s: &Path| { let ccache = match ccache { Some(ref s) => s, None => return s.display().to_string(), }; // FIXME: the cc-rs crate only recognizes the literal strings // `ccache` and `sccache` when doing caching compilations, so we // mirror that here. It should probably be fixed upstream to // accept a new env var or otherwise work with custom ccache // vars. match &ccache[..] { "ccache" | "sccache" => format!("{} {}", ccache, s.display()), _ => s.display().to_string(), } }; let cc = ccacheify(&self.cc(target)); cargo.env(format!("CC_{}", target.triple), &cc); let cflags = self.cflags(target, GitRepo::Rustc, CLang::C).join(" "); cargo.env(format!("CFLAGS_{}", target.triple), &cflags); if let Some(ar) = self.ar(target) { let ranlib = format!("{} s", ar.display()); cargo .env(format!("AR_{}", target.triple), ar) .env(format!("RANLIB_{}", target.triple), ranlib); } if let Ok(cxx) = self.cxx(target) { let cxx = ccacheify(&cxx); let cxxflags = self.cflags(target, GitRepo::Rustc, CLang::Cxx).join(" "); cargo .env(format!("CXX_{}", target.triple), &cxx) .env(format!("CXXFLAGS_{}", target.triple), cxxflags); } } if mode == Mode::Std && self.config.extended && compiler.is_final_stage(self) { rustflags.arg("-Zsave-analysis"); cargo.env( "RUST_SAVE_ANALYSIS_CONFIG", "{\"output_file\": null,\"full_docs\": false,\ \"pub_only\": true,\"reachable_only\": false,\ \"distro_crate\": true,\"signatures\": false,\"borrow_data\": false}", ); } // If Control Flow Guard is enabled, pass the `control-flow-guard` flag to rustc // when compiling the standard library, since this might be linked into the final outputs // produced by rustc. Since this mitigation is only available on Windows, only enable it // for the standard library in case the compiler is run on a non-Windows platform. // This is not needed for stage 0 artifacts because these will only be used for building // the stage 1 compiler. if cfg!(windows) && mode == Mode::Std && self.config.control_flow_guard && compiler.stage >= 1 { rustflags.arg("-Ccontrol-flow-guard"); } // For `cargo doc` invocations, make rustdoc print the Rust version into the docs // This replaces spaces with newlines because RUSTDOCFLAGS does not // support arguments with regular spaces. Hopefully someday Cargo will // have space support. let rust_version = self.rust_version().replace(' ', "\n"); rustdocflags.arg("--crate-version").arg(&rust_version); // Environment variables *required* throughout the build // // FIXME: should update code to not require this env var cargo.env("CFG_COMPILER_HOST_TRIPLE", target.triple); // Set this for all builds to make sure doc builds also get it. cargo.env("CFG_RELEASE_CHANNEL", &self.config.channel); // This one's a bit tricky. As of the time of this writing the compiler // links to the `winapi` crate on crates.io. This crate provides raw // bindings to Windows system functions, sort of like libc does for // Unix. This crate also, however, provides "import libraries" for the // MinGW targets. There's an import library per dll in the windows // distribution which is what's linked to. These custom import libraries // are used because the winapi crate can reference Windows functions not // present in the MinGW import libraries. // // For example MinGW may ship libdbghelp.a, but it may not have // references to all the functions in the dbghelp dll. Instead the // custom import library for dbghelp in the winapi crates has all this // information. // // Unfortunately for us though the import libraries are linked by // default via `-ldylib=winapi_foo`. That is, they're linked with the // `dylib` type with a `winapi_` prefix (so the winapi ones don't // conflict with the system MinGW ones). This consequently means that // the binaries we ship of things like rustc_codegen_llvm (aka the rustc_codegen_llvm // DLL) when linked against *again*, for example with procedural macros // or plugins, will trigger the propagation logic of `-ldylib`, passing // `-lwinapi_foo` to the linker again. This isn't actually available in // our distribution, however, so the link fails. // // To solve this problem we tell winapi to not use its bundled import // libraries. This means that it will link to the system MinGW import // libraries by default, and the `-ldylib=foo` directives will still get // passed to the final linker, but they'll look like `-lfoo` which can // be resolved because MinGW has the import library. The downside is we // don't get newer functions from Windows, but we don't use any of them // anyway. if !mode.is_tool() { cargo.env("WINAPI_NO_BUNDLED_LIBRARIES", "1"); } for _ in 0..self.verbosity { cargo.arg("-v"); } match (mode, self.config.rust_codegen_units_std, self.config.rust_codegen_units) { (Mode::Std, Some(n), _) | (_, _, Some(n)) => { cargo.env(profile_var("CODEGEN_UNITS"), n.to_string()); } _ => { // Don't set anything } } if self.config.rust_optimize { // FIXME: cargo bench/install do not accept `--release` if cmd != "bench" && cmd != "install" { cargo.arg("--release"); } } if self.config.locked_deps { cargo.arg("--locked"); } if self.config.vendor || self.is_sudo { cargo.arg("--frozen"); } // Try to use a sysroot-relative bindir, in case it was configured absolutely. cargo.env("RUSTC_INSTALL_BINDIR", self.config.bindir_relative()); self.ci_env.force_coloring_in_ci(&mut cargo); // When we build Rust dylibs they're all intended for intermediate // usage, so make sure we pass the -Cprefer-dynamic flag instead of // linking all deps statically into the dylib. if matches!(mode, Mode::Std | Mode::Rustc) { rustflags.arg("-Cprefer-dynamic"); } // When building incrementally we default to a lower ThinLTO import limit // (unless explicitly specified otherwise). This will produce a somewhat // slower code but give way better compile times. { let limit = match self.config.rust_thin_lto_import_instr_limit { Some(limit) => Some(limit), None if self.config.incremental => Some(10), _ => None, }; if let Some(limit) = limit { rustflags.arg(&format!("-Cllvm-args=-import-instr-limit={}", limit)); } } Cargo { command: cargo, rustflags, rustdocflags } } /// Ensure that a given step is built, returning its output. This will /// cache the step, so it is safe (and good!) to call this as often as /// needed to ensure that all dependencies are built. pub fn ensure<S: Step>(&'a self, step: S) -> S::Output { { let mut stack = self.stack.borrow_mut(); for stack_step in stack.iter() { // should skip if stack_step.downcast_ref::<S>().map_or(true, |stack_step| *stack_step != step) { continue; } let mut out = String::new(); out += &format!("\n\nCycle in build detected when adding {:?}\n", step); for el in stack.iter().rev() { out += &format!("\t{:?}\n", el); } panic!("{}", out); } if let Some(out) = self.cache.get(&step) { self.verbose_than(1, &format!("{}c {:?}", " ".repeat(stack.len()), step)); return out; } self.verbose_than(1, &format!("{}> {:?}", " ".repeat(stack.len()), step)); stack.push(Box::new(step.clone())); } let (out, dur) = { let start = Instant::now(); let zero = Duration::new(0, 0); let parent = self.time_spent_on_dependencies.replace(zero); let out = step.clone().run(self); let dur = start.elapsed(); let deps = self.time_spent_on_dependencies.replace(parent + dur); (out, dur - deps) }; if self.config.print_step_timings && !self.config.dry_run { let step_string = format!("{:?}", step); let brace_index = step_string.find("{").unwrap_or(0); let type_string = type_name::<S>(); println!( "[TIMING] {} {} -- {}.{:03}", &type_string.strip_prefix("bootstrap::").unwrap_or(type_string), &step_string[brace_index..], dur.as_secs(), dur.subsec_millis() ); } { let mut stack = self.stack.borrow_mut(); let cur_step = stack.pop().expect("step stack empty"); assert_eq!(cur_step.downcast_ref(), Some(&step)); } self.verbose_than(1, &format!("{}< {:?}", " ".repeat(self.stack.borrow().len()), step)); self.cache.put(step, out.clone()); out } /// Ensure that a given step is built *only if it's supposed to be built by default*, returning /// its output. This will cache the step, so it's safe (and good!) to call this as often as /// needed to ensure that all dependencies are build. pub(crate) fn ensure_if_default<T, S: Step<Output = Option<T>>>( &'a self, step: S, kind: Kind, ) -> S::Output { let desc = StepDescription::from::<S>(kind); let should_run = (desc.should_run)(ShouldRun::new(self, desc.kind)); // Avoid running steps contained in --exclude for pathset in &should_run.paths { if desc.is_excluded(self, pathset) { return None; } } // Only execute if it's supposed to run as default if desc.default && should_run.is_really_default() { self.ensure(step) } else { None } } /// Checks if any of the "should_run" paths is in the `Builder` paths. pub(crate) fn was_invoked_explicitly<S: Step>(&'a self, kind: Kind) -> bool { let desc = StepDescription::from::<S>(kind); let should_run = (desc.should_run)(ShouldRun::new(self, desc.kind)); for path in &self.paths { if should_run.paths.iter().any(|s| s.has(path, Some(desc.kind))) && !desc.is_excluded( self, &PathSet::Suite(TaskPath { path: path.clone(), kind: Some(desc.kind) }), ) { return true; } } false } } #[cfg(test)] mod tests; #[derive(Debug, Clone)] struct Rustflags(String, TargetSelection); impl Rustflags { fn new(target: TargetSelection) -> Rustflags { let mut ret = Rustflags(String::new(), target); ret.propagate_cargo_env("RUSTFLAGS"); ret } /// By default, cargo will pick up on various variables in the environment. However, bootstrap /// reuses those variables to pass additional flags to rustdoc, so by default they get overridden. /// Explicitly add back any previous value in the environment. /// /// `prefix` is usually `RUSTFLAGS` or `RUSTDOCFLAGS`. fn propagate_cargo_env(&mut self, prefix: &str) { // Inherit `RUSTFLAGS` by default ... self.env(prefix); // ... and also handle target-specific env RUSTFLAGS if they're configured. let target_specific = format!("CARGO_TARGET_{}_{}", crate::envify(&self.1.triple), prefix); self.env(&target_specific); } fn env(&mut self, env: &str) { if let Ok(s) = env::var(env) { for part in s.split(' ') { self.arg(part); } } } fn arg(&mut self, arg: &str) -> &mut Self { assert_eq!(arg.split(' ').count(), 1); if !self.0.is_empty() { self.0.push(' '); } self.0.push_str(arg); self } } #[derive(Debug)] pub struct Cargo { command: Command, rustflags: Rustflags, rustdocflags: Rustflags, } impl Cargo { pub fn rustdocflag(&mut self, arg: &str) -> &mut Cargo { self.rustdocflags.arg(arg); self } pub fn rustflag(&mut self, arg: &str) -> &mut Cargo { self.rustflags.arg(arg); self } pub fn arg(&mut self, arg: impl AsRef<OsStr>) -> &mut Cargo { self.command.arg(arg.as_ref()); self } pub fn args<I, S>(&mut self, args: I) -> &mut Cargo where I: IntoIterator<Item = S>, S: AsRef<OsStr>, { for arg in args { self.arg(arg.as_ref()); } self } pub fn env(&mut self, key: impl AsRef<OsStr>, value: impl AsRef<OsStr>) -> &mut Cargo { // These are managed through rustflag/rustdocflag interfaces. assert_ne!(key.as_ref(), "RUSTFLAGS"); assert_ne!(key.as_ref(), "RUSTDOCFLAGS"); self.command.env(key.as_ref(), value.as_ref()); self } pub fn add_rustc_lib_path(&mut self, builder: &Builder<'_>, compiler: Compiler) { builder.add_rustc_lib_path(compiler, &mut self.command); } pub fn current_dir(&mut self, dir: &Path) -> &mut Cargo { self.command.current_dir(dir); self } } impl From<Cargo> for Command { fn from(mut cargo: Cargo) -> Command { let rustflags = &cargo.rustflags.0; if !rustflags.is_empty() { cargo.command.env("RUSTFLAGS", rustflags); } let rustdocflags = &cargo.rustdocflags.0; if !rustdocflags.is_empty() { cargo.command.env("RUSTDOCFLAGS", rustdocflags); } cargo.command } }
39.051387
134
0.548754
33f3ad79838e9f79f9dc6fb34a43584fc0058f3e
737
//! A plugin to demonstrate getting and setting blocks in the world. use quill::{entities::Player, BlockState, Game, Plugin, Position}; quill::plugin!(BlockAccess); pub struct BlockAccess; impl Plugin for BlockAccess { fn enable(_game: &mut quill::Game, setup: &mut quill::Setup<Self>) -> Self { setup.add_system(system); Self } fn disable(self, _game: &mut quill::Game) {} } fn system(_plugin: &mut BlockAccess, game: &mut Game) { // Set the blocks each player is standing on // to bedrock. for (_entity, (_, pos)) in game.query::<(&Player, &Position)>() { let block_pos = pos.block(); game.set_block(block_pos, BlockState::from_id(33).unwrap()) .ok(); } }
26.321429
80
0.632293
5d73eb53da2f926e11b2650d2068a1bd221d07b7
1,316
// structs2.rs // Address all the TODOs to make the tests pass! #[derive(Debug)] struct Order { name: String, year: u32, made_by_phone: bool, made_by_mobile: bool, made_by_email: bool, item_number: u32, count: u32, } fn create_order_template() -> Order { Order { name: String::from("Bob"), year: 2019, made_by_phone: false, made_by_mobile: false, made_by_email: true, item_number: 123, count: 0, } } #[cfg(test)] mod tests { use super::*; #[test] fn your_order() { let order_template = create_order_template(); // TODO: Create your own order using the update syntax and template above! let mut your_order = create_order_template(); your_order.name = "Hacker in Rust".to_string(); your_order.count = 1; assert_eq!(your_order.name, "Hacker in Rust"); assert_eq!(your_order.year, order_template.year); assert_eq!(your_order.made_by_phone, order_template.made_by_phone); assert_eq!(your_order.made_by_mobile, order_template.made_by_mobile); assert_eq!(your_order.made_by_email, order_template.made_by_email); assert_eq!(your_order.item_number, order_template.item_number); assert_eq!(your_order.count, 1); } }
26.857143
82
0.645137
386f8106044b80e821a92be73b3c8bfc2be0751e
1,299
mod link; use serenity::{ client::Context, model::{ id::GuildId, interactions::{ApplicationCommandOptionType, Interaction}, }, }; pub async fn register(ctx: &Context, guild_id: GuildId, application_id: u64) { Interaction::create_guild_application_command(&ctx.http, guild_id, application_id, |a| { a.name("minecraft") .description("Subcommands for Minecraft linking") .create_interaction_option(|o| { o.name("link") .description("Generates a new server address you join to link your account") .kind(ApplicationCommandOptionType::SubCommand) .create_sub_option(|o| { o.name("username") .description("Current Minecraft username") .kind(ApplicationCommandOptionType::String) .required(true) }) }) }) .await .unwrap(); } pub async fn execute(ctx: Context, interaction: Interaction) { match interaction.data.as_ref().unwrap().options.get(0) { Some(n) => match n.name.as_str() { "link" => link::execute(ctx, interaction).await, _ => {} }, None => {} } }
32.475
96
0.539646
f7d700ea8dab164607471c339c33d500bffb52e4
4,489
use std::borrow::Cow; #[cfg(not(feature = "http"))] use std::fs::File; use std::path::{Path, PathBuf}; #[cfg(feature = "http")] use reqwest::Client; #[cfg(feature = "http")] use tokio::{fs::File, io::AsyncReadExt}; use url::Url; #[cfg(feature = "http")] use crate::error::{Error, Result}; /// Enum that allows a user to pass a [`Path`] or a [`File`] type to [`send_files`] /// /// [`send_files`]: crate::model::id::ChannelId::send_files #[derive(Clone, Debug)] #[non_exhaustive] pub enum AttachmentType<'a> { /// Indicates that the [`AttachmentType`] is a byte slice with a filename. Bytes { data: Cow<'a, [u8]>, filename: String }, /// Indicates that the [`AttachmentType`] is a [`File`] File { file: &'a File, filename: String }, /// Indicates that the [`AttachmentType`] is a [`Path`] Path(&'a Path), /// Indicates that the [`AttachmentType`] is an image URL. Image(Url), } #[cfg(feature = "http")] impl<'a> AttachmentType<'a> { pub(crate) async fn data(&self, client: &Client) -> Result<Vec<u8>> { let data = match self { AttachmentType::Bytes { data, .. } => data.clone().into_owned(), AttachmentType::File { file, .. } => { let mut buf = Vec::new(); file.try_clone().await?.read_to_end(&mut buf).await?; buf }, AttachmentType::Path(path) => { let mut file = File::open(path).await?; let mut buf = Vec::new(); file.read_to_end(&mut buf).await?; buf }, AttachmentType::Image(url) => { let response = client.get(url.clone()).send().await?; response.bytes().await?.to_vec() }, }; Ok(data) } pub(crate) fn filename(&self) -> Result<Option<String>> { match self { AttachmentType::Bytes { filename, .. } | AttachmentType::File { filename, .. } => Ok(Some(filename.to_string())), AttachmentType::Path(path) => { Ok(path.file_name().map(|filename| filename.to_string_lossy().to_string())) }, AttachmentType::Image(url) => { match url.path_segments().and_then(|segments| segments.last()) { Some(filename) => Ok(Some(filename.to_string())), None => Err(Error::Url(url.to_string())), } }, } } } impl<'a> From<(&'a [u8], &str)> for AttachmentType<'a> { fn from(params: (&'a [u8], &str)) -> AttachmentType<'a> { AttachmentType::Bytes { data: Cow::Borrowed(params.0), filename: params.1.to_string(), } } } impl<'a> From<&'a str> for AttachmentType<'a> { /// Constructs an [`AttachmentType`] from a string. /// This string may refer to the path of a file on disk, or the http url to an image on the internet. fn from(s: &'a str) -> AttachmentType<'_> { match Url::parse(s) { Ok(url) => AttachmentType::Image(url), Err(_) => AttachmentType::Path(Path::new(s)), } } } impl<'a> From<&'a Path> for AttachmentType<'a> { fn from(path: &'a Path) -> AttachmentType<'_> { AttachmentType::Path(path) } } impl<'a> From<&'a PathBuf> for AttachmentType<'a> { fn from(pathbuf: &'a PathBuf) -> AttachmentType<'_> { AttachmentType::Path(pathbuf.as_path()) } } impl<'a> From<(&'a File, &str)> for AttachmentType<'a> { fn from(f: (&'a File, &str)) -> AttachmentType<'a> { AttachmentType::File { file: f.0, filename: f.1.to_string(), } } } #[cfg(test)] mod test { use std::path::Path; use super::AttachmentType; #[test] fn test_attachment_type() { assert!(matches!( AttachmentType::from(Path::new("./dogs/corgis/kona.png")), AttachmentType::Path(_) )); assert!(matches!( AttachmentType::from(Path::new("./cats/copycat.png")), AttachmentType::Path(_) )); assert!(matches!( AttachmentType::from("./mascots/crabs/ferris.png"), AttachmentType::Path(_) )); assert!(matches!( AttachmentType::from("https://test.url/test.jpg"), AttachmentType::Image(_) )) } }
30.537415
105
0.521051
039f4e97ea6c1611a658f6a22adaaac9f43be88a
41,398
// Copyright Materialize, Inc. All rights reserved. // // Use of this software is governed by the Business Source License // included in the LICENSE file. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0. //! An interactive dataflow server. use std::cell::RefCell; use std::collections::HashMap; use std::collections::VecDeque; use std::net::TcpStream; use std::pin::Pin; use std::rc::Rc; use std::sync::Mutex; use std::time::{Instant, UNIX_EPOCH}; use differential_dataflow::operators::arrange::arrangement::Arrange; use differential_dataflow::trace::cursor::Cursor; use differential_dataflow::trace::TraceReader; use differential_dataflow::Collection; use futures::channel::mpsc::UnboundedReceiver; use futures::executor::block_on; use futures::future::TryFutureExt; use futures::sink::{Sink, SinkExt}; use serde::{Deserialize, Serialize}; use timely::communication::allocator::generic::GenericBuilder; use timely::communication::allocator::zero_copy::initialize::initialize_networking_from_sockets; use timely::communication::initialize::WorkerGuards; use timely::communication::Allocate; use timely::dataflow::operators::unordered_input::UnorderedHandle; use timely::dataflow::operators::ActivateCapability; use timely::logging::Logger; use timely::order::PartialOrder; use timely::progress::frontier::Antichain; use timely::progress::ChangeBatch; use timely::worker::Worker as TimelyWorker; use dataflow_types::logging::LoggingConfig; use dataflow_types::{ DataflowDesc, DataflowError, MzOffset, PeekResponse, Timestamp, TimestampSourceUpdate, Update, }; use expr::{Diff, GlobalId, PartitionId, RowSetFinishing, SourceInstanceId}; use ore::future::channel::mpsc::ReceiverExt; use repr::{Datum, Row, RowArena}; use crate::arrangement::manager::{TraceBundle, TraceManager}; use crate::logging; use crate::logging::materialized::MaterializedEvent; use crate::operator::CollectionExt; use crate::render::{self, RenderState}; use crate::server::metrics::Metrics; use crate::source::persistence::WorkerPersistenceData; mod metrics; /// A [`comm::broadcast::Token`] that permits broadcasting commands to the /// Timely workers. pub struct BroadcastToken; impl comm::broadcast::Token for BroadcastToken { type Item = SequencedCommand; /// Returns true, to enable loopback. /// /// Since the coordinator lives on the same process as one set of /// workers, we need to enable loopback so that broadcasts are /// transmitted intraprocess and visible to those workers. fn loopback(&self) -> bool { true } } /// Explicit instructions for timely dataflow workers. #[derive(Clone, Debug, Serialize, Deserialize)] pub enum SequencedCommand { /// Create a sequence of dataflows. CreateDataflows(Vec<DataflowDesc>), /// Drop the sources bound to these names. DropSources(Vec<GlobalId>), /// Drop the sinks bound to these names. DropSinks(Vec<GlobalId>), /// Drop the indexes bound to these namees. DropIndexes(Vec<GlobalId>), /// Peek at an arrangement. /// /// This request elicits data from the worker, by naming an /// arrangement and some actions to apply to the results before /// returning them. Peek { /// The identifier of the arrangement. id: GlobalId, /// The identifier of this peek request. /// /// Used in responses and cancelation requests. conn_id: u32, /// A communication link for sending a response. tx: comm::mpsc::Sender<PeekResponse>, /// The logical timestamp at which the arrangement is queried. timestamp: Timestamp, /// Actions to apply to the result set before returning them. finishing: RowSetFinishing, /// A projection that should be applied to results. project: Option<Vec<usize>>, /// A list of predicates that should restrict the set of results. filter: Vec<expr::ScalarExpr>, }, /// Cancel the peek associated with the given `conn_id`. CancelPeek { /// The identifier of the peek request to cancel. conn_id: u32, }, /// Insert `updates` into the local input named `id`. Insert { /// Identifier of the local input. id: GlobalId, /// A list of updates to be introduced to the input. updates: Vec<Update>, }, /// Enable compaction in views. /// /// Each entry in the vector names a view and provides a frontier after which /// accumulations must be correct. The workers gain the liberty of compacting /// the corresponding maintained traces up through that frontier. AllowCompaction(Vec<(GlobalId, Antichain<Timestamp>)>), /// Advance worker timestamp AdvanceSourceTimestamp { /// The ID of the timestamped source id: SourceInstanceId, /// The associated update (RT or BYO) update: TimestampSourceUpdate, }, /// Advance all local inputs to the given timestamp. AdvanceAllLocalInputs { /// The timestamp to advance to. advance_to: Timestamp, }, /// Request that feedback is streamed to the provided channel. EnableFeedback(comm::mpsc::Sender<WorkerFeedbackWithMeta>), /// Request that persistence data is streamed to the provided channel. EnablePersistence(comm::mpsc::Sender<PersistenceMessage>), /// Request that the logging sources in the contained configuration are /// installed. EnableLogging(LoggingConfig), /// Disconnect inputs, drain dataflows, and shut down timely workers. Shutdown, } /// Information from timely dataflow workers. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct WorkerFeedbackWithMeta { /// Identifies the worker by its identifier. pub worker_id: usize, /// The feedback itself. pub message: WorkerFeedback, } /// All data and metadata messages that can be sent by dataflow workers or coordinator /// to the persister thread. #[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] pub enum PersistenceMessage { /// Data to be persisted (sent from dataflow workers) Data(WorkerPersistenceData), /// Add source to persist AddSource(GlobalId), /// Drop source to persist DropSource(GlobalId), /// Shut down persistence thread Shutdown, } /// Responses the worker can provide back to the coordinator. #[derive(Clone, Debug, Serialize, Deserialize)] pub enum WorkerFeedback { /// A list of identifiers of traces, with prior and new upper frontiers. FrontierUppers(Vec<(GlobalId, ChangeBatch<Timestamp>)>), /// The id of a source whose source connector has been dropped DroppedSource(SourceInstanceId), /// The id of a source whose source connector has been created CreateSource(SourceInstanceId), } /// Initiates a timely dataflow computation, processing materialized commands. /// /// TODO(benesch): pass a config struct here, or find some other way to cut /// down on the number of arguments. pub fn serve<C>( sockets: Vec<Option<TcpStream>>, threads: usize, process: usize, switchboard: comm::Switchboard<C>, executor: tokio::runtime::Handle, ) -> Result<WorkerGuards<()>, String> where C: comm::Connection, { assert!(threads > 0); // Construct endpoints for each thread that will receive the coordinator's // sequenced command stream. // // TODO(benesch): package up this idiom of handing out ownership of N items // to the N timely threads that will be spawned. The Mutex<Vec<Option<T>>> // is hard to read through. let command_rxs = { let mut rx = switchboard.broadcast_rx(BroadcastToken).fanout(); let command_rxs = Mutex::new((0..threads).map(|_| Some(rx.attach())).collect::<Vec<_>>()); executor.spawn( rx.shuttle() .map_err(|err| panic!("failure shuttling dataflow receiver commands: {}", err)), ); command_rxs }; let log_fn = Box::new(|_| None); let (builders, guard) = initialize_networking_from_sockets(sockets, process, threads, log_fn) .map_err(|err| format!("failed to initialize networking: {}", err))?; let builders = builders.into_iter().map(GenericBuilder::ZeroCopy).collect(); timely::execute::execute_from(builders, Box::new(guard), move |timely_worker| { executor.enter(|| { let command_rx = command_rxs.lock().unwrap()[timely_worker.index() % threads] .take() .unwrap() .request_unparks(&executor); let worker_idx = timely_worker.index(); Worker { timely_worker, render_state: RenderState { traces: TraceManager::new(worker_idx), local_inputs: HashMap::new(), ts_source_mapping: HashMap::new(), ts_histories: Default::default(), ts_source_updates: Default::default(), dataflow_tokens: HashMap::new(), persistence_tx: None, }, materialized_logger: None, command_rx, pending_peeks: Vec::new(), feedback_tx: None, reported_frontiers: HashMap::new(), metrics: Metrics::for_worker_id(worker_idx), } .run() }) }) } /// A type wrapper for the number of partitions associated with a source. pub type PartitionCount = i32; /// A type wrapper for a timestamp update /// For real-time sources, it consists of a PartitionCount /// For BYO sources, it consists of a mapping from PartitionId to a vector of /// (PartitionCount, Timestamp, MzOffset) tuple. pub enum TimestampDataUpdate { /// RT sources see a current estimate of the number of partitions for the soruce RealTime(PartitionCount), /// BYO sources see a list of (PartitionCount, Timestamp, MzOffset) timestamp updates BringYourOwn(HashMap<PartitionId, VecDeque<(PartitionCount, Timestamp, MzOffset)>>), } /// Map of source ID to timestamp data updates (RT or BYO). pub type TimestampDataUpdates = Rc<RefCell<HashMap<SourceInstanceId, TimestampDataUpdate>>>; /// List of sources that need to start being timestamped or have been dropped and no longer require /// timestamping. /// A source inserts a StartTimestamping to this vector on source creation, and adds a /// StopTimestamping request once the operator for the source is dropped. pub type TimestampMetadataUpdates = Rc<RefCell<Vec<TimestampMetadataUpdate>>>; /// Possible timestamping metadata information messages that get sent from workers to coordinator pub enum TimestampMetadataUpdate { /// Requests to start timestamping a source with given id StartTimestamping(SourceInstanceId), /// Request to stop timestamping a source wth given id StopTimestamping(SourceInstanceId), } /// State maintained for each worker thread. /// /// Much of this state can be viewed as local variables for the worker thread, /// holding state that persists across function calls. struct Worker<'w, A> where A: Allocate, { /// The underlying Timely worker. timely_worker: &'w mut TimelyWorker<A>, /// The state associated with rendering dataflows. render_state: RenderState, /// The logger, from Timely's logging framework, if logs are enabled. materialized_logger: Option<logging::materialized::Logger>, /// The channel from which commands are drawn. command_rx: UnboundedReceiver<SequencedCommand>, /// Peek commands that are awaiting fulfillment. pending_peeks: Vec<PendingPeek>, /// The channel over which fontier information is reported. feedback_tx: Option<Pin<Box<dyn Sink<WorkerFeedbackWithMeta, Error = ()>>>>, /// Tracks the frontier information that has been sent over `feedback_tx`. reported_frontiers: HashMap<GlobalId, Antichain<Timestamp>>, /// Metrics bundle. metrics: Metrics, } impl<'w, A> Worker<'w, A> where A: Allocate + 'w, { /// Initializes timely dataflow logging and publishes as a view. fn initialize_logging(&mut self, logging: &LoggingConfig) { if self.materialized_logger.is_some() { panic!("dataflow server has already initialized logging"); } use crate::logging::BatchLogger; use timely::dataflow::operators::capture::event::link::EventLink; let granularity_ms = std::cmp::max(1, logging.granularity_ns / 1_000_000) as Timestamp; // Track time relative to the Unix epoch, rather than when the server // started, so that the logging sources can be joined with tables and // other real time sources for semi-sensible results. let now = Instant::now(); let unix = UNIX_EPOCH.elapsed().expect("time went backwards"); // Establish loggers first, so we can either log the logging or not, as we like. let t_linked = std::rc::Rc::new(EventLink::new()); let mut t_logger = BatchLogger::new(t_linked.clone(), granularity_ms); let d_linked = std::rc::Rc::new(EventLink::new()); let mut d_logger = BatchLogger::new(d_linked.clone(), granularity_ms); let m_linked = std::rc::Rc::new(EventLink::new()); let mut m_logger = BatchLogger::new(m_linked.clone(), granularity_ms); // Construct logging dataflows and endpoints before registering any. let t_traces = logging::timely::construct(&mut self.timely_worker, logging, t_linked); let d_traces = logging::differential::construct(&mut self.timely_worker, logging, d_linked); let m_traces = logging::materialized::construct(&mut self.timely_worker, logging, m_linked); // Register each logger endpoint. self.timely_worker.log_register().insert_logger( "timely", Logger::new(now, unix, self.timely_worker.index(), move |time, data| { t_logger.publish_batch(time, data) }), ); self.timely_worker.log_register().insert_logger( "differential/arrange", Logger::new(now, unix, self.timely_worker.index(), move |time, data| { d_logger.publish_batch(time, data) }), ); self.timely_worker.log_register().insert_logger( "materialized", Logger::new(now, unix, self.timely_worker.index(), move |time, data| { m_logger.publish_batch(time, data) }), ); let errs = self.timely_worker.dataflow::<Timestamp, _, _>(|scope| { Collection::<_, DataflowError, isize>::empty(scope) .arrange() .trace }); let logger = self .timely_worker .log_register() .get("materialized") .unwrap(); // Install traces as maintained indexes for (log, (_, trace)) in t_traces { let id = logging.active_logs[&log]; self.render_state .traces .set(id, TraceBundle::new(trace, errs.clone())); self.reported_frontiers.insert(id, Antichain::from_elem(0)); logger.log(MaterializedEvent::Frontier(id, 0, 1)); } for (log, (_, trace)) in d_traces { let id = logging.active_logs[&log]; self.render_state .traces .set(id, TraceBundle::new(trace, errs.clone())); self.reported_frontiers.insert(id, Antichain::from_elem(0)); logger.log(MaterializedEvent::Frontier(id, 0, 1)); } for (log, (_, trace)) in m_traces { let id = logging.active_logs[&log]; self.render_state .traces .set(id, TraceBundle::new(trace, errs.clone())); self.reported_frontiers.insert(id, Antichain::from_elem(0)); logger.log(MaterializedEvent::Frontier(id, 0, 1)); } self.materialized_logger = Some(logger); } /// Disables timely dataflow logging. /// /// This does not unpublish views and is only useful to terminate logging streams to ensure that /// materialized can terminate cleanly. fn shutdown_logging(&mut self) { self.timely_worker.log_register().remove("timely"); self.timely_worker .log_register() .remove("differential/arrange"); self.timely_worker.log_register().remove("materialized"); } /// Draws from `dataflow_command_receiver` until shutdown. fn run(&mut self) { let mut shutdown = false; while !shutdown { // Enable trace compaction. self.render_state.traces.maintenance(); // Ask Timely to execute a unit of work. If Timely decides there's // nothing to do, it will park the thread. We rely on another thread // unparking us when there's new work to be done, e.g., when sending // a command or when new Kafka messages have arrived. self.timely_worker.step_or_park(None); // Report frontier information back the coordinator. self.report_frontiers(); self.report_source_modifications(); // Handle any received commands. let mut cmds = vec![]; while let Ok(Some(cmd)) = self.command_rx.try_next() { cmds.push(cmd); } self.metrics.observe_command_queue(&cmds); for cmd in cmds { if let SequencedCommand::Shutdown = cmd { shutdown = true; } self.metrics.observe_command(&cmd); self.handle_command(cmd); } self.metrics.observe_pending_peeks(&self.pending_peeks); self.metrics.observe_command_finish(); self.process_peeks(); } } /// Report source drops or creations to the coordinator fn report_source_modifications(&mut self) { let mut updates = self.render_state.ts_source_updates.borrow_mut(); for source_update in updates.iter() { match source_update { TimestampMetadataUpdate::StopTimestamping(id) => { // A source was deleted self.render_state.ts_histories.borrow_mut().remove(id); self.render_state.ts_source_mapping.remove(id); let connector = self.feedback_tx.as_mut().unwrap(); block_on(connector.send(WorkerFeedbackWithMeta { worker_id: self.timely_worker.index(), message: WorkerFeedback::DroppedSource(*id), })) .unwrap(); } TimestampMetadataUpdate::StartTimestamping(id) => { // A source was created let connector = self.feedback_tx.as_mut().unwrap(); block_on(connector.send(WorkerFeedbackWithMeta { worker_id: self.timely_worker.index(), message: WorkerFeedback::CreateSource(*id), })) .unwrap(); } } } updates.clear(); } /// Send progress information to the coordinator. fn report_frontiers(&mut self) { if let Some(feedback_tx) = &mut self.feedback_tx { let mut upper = Antichain::new(); let mut progress = Vec::new(); let ids = self .render_state .traces .traces .keys() .cloned() .collect::<Vec<_>>(); for id in ids { if let Some(traces) = self.render_state.traces.get_mut(&id) { // Read the upper frontier and compare to what we've reported. traces.oks_mut().read_upper(&mut upper); let lower = self .reported_frontiers .get_mut(&id) .expect("Frontier missing!"); if lower != &upper { let mut changes = ChangeBatch::new(); for time in lower.elements().iter() { changes.update(time.clone(), -1); } for time in upper.elements().iter() { changes.update(time.clone(), 1); } let lower = self.reported_frontiers.get_mut(&id).unwrap(); changes.compact(); if !changes.is_empty() { progress.push((id, changes)); } lower.clone_from(&upper); } } } if let Some(logger) = self.materialized_logger.as_mut() { for (id, changes) in &mut progress { for (time, diff) in changes.iter() { logger.log(MaterializedEvent::Frontier(*id, *time, *diff)); } } } if !progress.is_empty() { block_on(feedback_tx.send(WorkerFeedbackWithMeta { worker_id: self.timely_worker.index(), message: WorkerFeedback::FrontierUppers(progress), })) .unwrap(); } } } fn handle_command(&mut self, cmd: SequencedCommand) { match cmd { SequencedCommand::CreateDataflows(dataflows) => { for dataflow in dataflows.into_iter() { for (idx_id, idx, _) in dataflow.index_exports.iter() { self.reported_frontiers .insert(*idx_id, Antichain::from_elem(0)); if let Some(logger) = self.materialized_logger.as_mut() { logger.log(MaterializedEvent::Dataflow(*idx_id, true)); logger.log(MaterializedEvent::Frontier(*idx_id, 0, 1)); for import_id in dataflow.get_imports(&idx.on_id) { logger.log(MaterializedEvent::DataflowDependency { dataflow: *idx_id, source: import_id, }) } } } render::build_dataflow(self.timely_worker, &mut self.render_state, dataflow); } } SequencedCommand::DropSources(names) => { for name in names { self.render_state.local_inputs.remove(&name); } } SequencedCommand::DropSinks(ids) => { for id in ids { self.render_state.dataflow_tokens.remove(&id); } } SequencedCommand::DropIndexes(ids) => { for id in ids { self.render_state.traces.del_trace(&id); let frontier = self .reported_frontiers .remove(&id) .expect("Dropped index with no frontier"); if let Some(logger) = self.materialized_logger.as_mut() { logger.log(MaterializedEvent::Dataflow(id, false)); for time in frontier.elements().iter() { logger.log(MaterializedEvent::Frontier(id, *time, -1)); } } } } SequencedCommand::Peek { id, timestamp, conn_id, tx, finishing, project, filter, } => { // Acquire a copy of the trace suitable for fulfilling the peek. let mut trace_bundle = self.render_state.traces.get(&id).unwrap().clone(); let timestamp_frontier = Antichain::from_elem(timestamp); let empty_frontier = Antichain::new(); trace_bundle .oks_mut() .advance_by(timestamp_frontier.borrow()); trace_bundle .errs_mut() .advance_by(timestamp_frontier.borrow()); trace_bundle .oks_mut() .distinguish_since(empty_frontier.borrow()); trace_bundle .errs_mut() .distinguish_since(empty_frontier.borrow()); // Prepare a description of the peek work to do. let mut peek = PendingPeek { id, conn_id, tx, timestamp, finishing, trace_bundle, project, filter, }; // Log the receipt of the peek. if let Some(logger) = self.materialized_logger.as_mut() { logger.log(MaterializedEvent::Peek(peek.as_log_event(), true)); } // Attempt to fulfill the peek. let fulfilled = peek.seek_fulfillment(&mut Antichain::new()); if !fulfilled { self.pending_peeks.push(peek); } else { // Log the fulfillment of the peek. if let Some(logger) = self.materialized_logger.as_mut() { logger.log(MaterializedEvent::Peek(peek.as_log_event(), false)); } } self.metrics.observe_pending_peeks(&self.pending_peeks); } SequencedCommand::CancelPeek { conn_id } => { let logger = &mut self.materialized_logger; self.pending_peeks.retain(|peek| { if peek.conn_id == conn_id { let mut tx = block_on(peek.tx.connect()).unwrap(); block_on(tx.send(PeekResponse::Canceled)).unwrap(); if let Some(logger) = logger { logger.log(MaterializedEvent::Peek(peek.as_log_event(), false)); } false // don't retain } else { true // retain } }) } SequencedCommand::AdvanceAllLocalInputs { advance_to } => { for (_, local_input) in self.render_state.local_inputs.iter_mut() { local_input.capability.downgrade(&advance_to); } } SequencedCommand::Insert { id, updates } => { if self.timely_worker.index() == 0 { let input = match self.render_state.local_inputs.get_mut(&id) { Some(input) => input, None => panic!("local input {} missing for insert", id), }; let mut session = input.handle.session(input.capability.clone()); for update in updates { assert!(update.timestamp >= *input.capability.time()); session.give((update.row, update.timestamp, update.diff)); } } } SequencedCommand::AllowCompaction(list) => { for (id, frontier) in list { self.render_state .traces .allow_compaction(id, frontier.borrow()); } } SequencedCommand::EnableFeedback(tx) => { self.feedback_tx = Some(Box::pin(block_on(tx.connect()).unwrap().sink_map_err( |err| panic!("error sending worker feedback: {}", err), ))); } SequencedCommand::EnableLogging(config) => { self.initialize_logging(&config); } SequencedCommand::EnablePersistence(tx) => { self.render_state.persistence_tx = Some(tx); } SequencedCommand::Shutdown => { // this should lead timely to wind down eventually self.render_state.traces.del_all_traces(); self.shutdown_logging(); } SequencedCommand::AdvanceSourceTimestamp { id, update } => { let mut timestamps = self.render_state.ts_histories.borrow_mut(); if let Some(ts_entries) = timestamps.get_mut(&id) { match ts_entries { TimestampDataUpdate::BringYourOwn(entries) => { if let TimestampSourceUpdate::BringYourOwn( partition_count, pid, timestamp, offset, ) = update { let partition_entries = entries.entry(pid).or_insert_with(VecDeque::new); let (_, last_ts, last_offset) = partition_entries .back() .unwrap_or(&(0, 0, MzOffset { offset: 0 })); assert!( offset >= *last_offset, "offset should not go backwards, but {} < {}", offset, last_offset ); assert!( timestamp > *last_ts, "timestamp should move forwards, but {} <= {}", timestamp, last_ts ); partition_entries.push_back((partition_count, timestamp, offset)); } else { panic!("Unexpected message type. Expected BYO update.") } } TimestampDataUpdate::RealTime(current_partition_count) => { if let TimestampSourceUpdate::RealTime(partition_count) = update { assert!( *current_partition_count <= partition_count, "The number of partititions\ for source {} decreased from {} to {}", id, partition_count, current_partition_count ); *current_partition_count = partition_count; } else { panic!("Expected message type. Expected RT update."); } } } let source = self .render_state .ts_source_mapping .get(&id) .expect("Id should be present"); if let Some(source) = source.upgrade() { if let Some(token) = &*source { token.activate(); } } } } } } /// Scan pending peeks and attempt to retire each. fn process_peeks(&mut self) { let mut upper = Antichain::new(); let pending_peeks_len = self.pending_peeks.len(); let mut pending_peeks = std::mem::replace( &mut self.pending_peeks, Vec::with_capacity(pending_peeks_len), ); for mut peek in pending_peeks.drain(..) { let success = peek.seek_fulfillment(&mut upper); if !success { self.pending_peeks.push(peek); } else { // Log the fulfillment of the peek. if let Some(logger) = self.materialized_logger.as_mut() { logger.log(MaterializedEvent::Peek(peek.as_log_event(), false)); } } } } } pub struct LocalInput { pub handle: UnorderedHandle<Timestamp, (Row, Timestamp, Diff)>, pub capability: ActivateCapability<Timestamp>, } /// An in-progress peek, and data to eventually fulfill it. #[derive(Clone)] struct PendingPeek { /// The identifier of the dataflow to peek. id: GlobalId, /// The ID of the connection that submitted the peek. For logging only. conn_id: u32, /// A transmitter connected to the intended recipient of the peek. tx: comm::mpsc::Sender<PeekResponse>, /// Time at which the collection should be materialized. timestamp: Timestamp, /// Finishing operations to perform on the peek, like an ordering and a /// limit. finishing: RowSetFinishing, project: Option<Vec<usize>>, filter: Vec<expr::ScalarExpr>, /// The data from which the trace derives. trace_bundle: TraceBundle, } impl PendingPeek { /// Produces a corresponding log event. pub fn as_log_event(&self) -> crate::logging::materialized::Peek { crate::logging::materialized::Peek::new(self.id, self.timestamp, self.conn_id) } /// Attempts to fulfill the peek and reports success. /// /// To produce output at `peek.timestamp`, we must be certain that /// it is no longer changing. A trace guarantees that all future /// changes will be greater than or equal to an element of `upper`. /// /// If an element of `upper` is less or equal to `peek.timestamp`, /// then there can be further updates that would change the output. /// If no element of `upper` is less or equal to `peek.timestamp`, /// then for any time `t` less or equal to `peek.timestamp` it is /// not the case that `upper` is less or equal to that timestamp, /// and so the result cannot further evolve. fn seek_fulfillment(&mut self, upper: &mut Antichain<Timestamp>) -> bool { self.trace_bundle.oks_mut().read_upper(upper); if upper.less_equal(&self.timestamp) { return false; } self.trace_bundle.errs_mut().read_upper(upper); if upper.less_equal(&self.timestamp) { return false; } let response = match self.collect_finished_data() { Ok(rows) => PeekResponse::Rows(rows), Err(text) => PeekResponse::Error(text), }; let mut tx = block_on(self.tx.connect()).unwrap(); let tx_result = block_on(tx.send(response)); if let Err(e) = tx_result { block_on(tx.send(PeekResponse::Error(e.to_string()))).unwrap(); } true } /// Collects data for a known-complete peek. fn collect_finished_data(&mut self) -> Result<Vec<Row>, String> { // Check if there exist any errors and, if so, return whatever one we // find first. let (mut cursor, storage) = self.trace_bundle.errs_mut().cursor(); while cursor.key_valid(&storage) { let mut copies = 0; cursor.map_times(&storage, |time, diff| { if time.less_equal(&self.timestamp) { copies += diff; } }); if copies < 0 { return Err(format!( "Negative multiplicity: {} for {}", copies, cursor.key(&storage), )); } if copies > 0 { return Err(cursor.key(&storage).to_string()); } cursor.step_key(&storage); } let (mut cursor, storage) = self.trace_bundle.oks_mut().cursor(); let mut results = Vec::new(); // We can limit the record enumeration if i. there is a limit set, // and ii. if the specified ordering is empty (specifies no order). let limit = if self.finishing.order_by.is_empty() { self.finishing.limit.map(|l| l + self.finishing.offset) } else { None }; let mut datums = Vec::new(); while cursor.key_valid(&storage) && limit.map(|l| results.len() < l).unwrap_or(true) { while cursor.val_valid(&storage) && limit.map(|l| results.len() < l).unwrap_or(true) { let row = cursor.val(&storage); let mut retain = true; if !self.filter.is_empty() { datums.clear(); datums.extend(row.iter()); // Before (expensively) determining how many copies of a row // we have, let's eliminate rows that we don't care about. let temp_storage = RowArena::new(); for predicate in &self.filter { let d = predicate .eval(&datums, &temp_storage) .map_err(|e| e.to_string())?; if d != Datum::True { retain = false; break; } } } if retain { // Differential dataflow represents collections with binary counts, // but our output representation is unary (as many rows as reported // by the count). We should determine this count, and especially if // it is non-zero, before producing any output data. let mut copies = 0; cursor.map_times(&storage, |time, diff| { if time.less_equal(&self.timestamp) { copies += diff; } }); if copies < 0 { return Err(format!( "Negative multiplicity: {} for {:?}", copies, row.unpack(), )); } // TODO: We could push a count here, as we create owned output later. for _ in 0..copies { results.push(row); } } cursor.step_val(&storage); } cursor.step_key(&storage) } // If we have extracted a projection, we should re-write the order_by columns. if let Some(columns) = &self.project { for key in self.finishing.order_by.iter_mut() { key.column = columns[key.column]; } } // TODO: We could sort here in any case, as it allows a merge sort at the coordinator. if let Some(limit) = self.finishing.limit { let offset_plus_limit = limit + self.finishing.offset; if results.len() > offset_plus_limit { // The `results` should be sorted by `Row`, which means we only // need to re-order `results` when there is a non-empty order_by. if !self.finishing.order_by.is_empty() { pdqselect::select_by(&mut results, offset_plus_limit, |left, right| { expr::compare_columns( &self.finishing.order_by, &left.unpack(), &right.unpack(), || left.cmp(right), ) }); } results.truncate(offset_plus_limit); } } Ok(if let Some(columns) = &self.project { let mut row_packer = repr::RowPacker::new(); results .iter() .map({ move |row| { let datums = row.unpack(); row_packer.pack(columns.iter().map(|i| datums[*i])) } }) .collect() } else { results.iter().map(|row| (*row).clone()).collect() }) } } /// The presence of this function forces `rustc` to instantiate the /// slow-to-compile differential and timely templates while compiling this /// crate. This means that iterating on crates that depend upon this crate is /// much faster, because these templates don't need to be reinstantiated /// whenever a downstream dependency changes. And iterating on this crate /// doesn't really become slower, because you needed to instantiate these /// templates anyway to run tests. pub fn __explicit_instantiation__() { ore::hint::black_box(serve::<tokio::net::TcpStream> as fn(_, _, _, _, _) -> _); }
41.522568
100
0.547756
9160f2ed659deab0ac8a522aa37f38d1bd02bd8d
1,202
use crate::math::{AngVector, Vector, SPATIAL_DIM}; use na::{DVectorSlice, DVectorSliceMut}; use na::{Scalar, SimdRealField}; use std::ops::AddAssign; #[derive(Copy, Clone, Debug)] #[repr(C)] //#[repr(align(64))] pub struct DeltaVel<N: Scalar + Copy> { pub linear: Vector<N>, pub angular: AngVector<N>, } impl<N: Scalar + Copy> DeltaVel<N> { pub fn as_slice(&self) -> &[N; SPATIAL_DIM] { unsafe { std::mem::transmute(self) } } pub fn as_mut_slice(&mut self) -> &mut [N; SPATIAL_DIM] { unsafe { std::mem::transmute(self) } } pub fn as_vector_slice(&self) -> DVectorSlice<N> { DVectorSlice::from_slice(&self.as_slice()[..], SPATIAL_DIM) } pub fn as_vector_slice_mut(&mut self) -> DVectorSliceMut<N> { DVectorSliceMut::from_slice(&mut self.as_mut_slice()[..], SPATIAL_DIM) } } impl<N: SimdRealField + Copy> DeltaVel<N> { pub fn zero() -> Self { Self { linear: na::zero(), angular: na::zero(), } } } impl<N: SimdRealField + Copy> AddAssign for DeltaVel<N> { fn add_assign(&mut self, rhs: Self) { self.linear += rhs.linear; self.angular += rhs.angular; } }
25.574468
78
0.599002
28fad62db0604bcc43ea32cc8716bf80c8f01ecb
806
use rand::Rng; use std::cmp::Ordering; use std::io; pub fn start() { println!("Guess the number!"); let secret_number = rand::thread_rng().gen_range(1, 100); loop { println!("Please input your guess"); let mut guess = String::new(); io::stdin() .read_line(&mut guess) .expect("Failed to read line"); let guess: u32 = match guess.trim().parse() { Ok(num) => num, Err(_) => continue, }; println!("You guessed: {}", guess); match guess.cmp(&secret_number) { Ordering::Less => println!("Too small"), Ordering::Greater => println!("Too big"), Ordering::Equal => { println!("You win!"); break; } } } }
26.866667
61
0.483871
29593f7d8ceedab3568ace0f73f096f3fa25ee6e
2,508
#[doc = r" Value read from the register"] pub struct R { bits: u32, } #[doc = r" Value to write to the register"] pub struct W { bits: u32, } impl super::DSCR { #[doc = r" Modifies the contents of the register"] #[inline] pub fn modify<F>(&self, f: F) where for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W, { let bits = self.register.get(); let r = R { bits }; let mut w = W { bits }; f(&r, &mut w); self.register.set(w.bits); } #[doc = r" Reads the contents of the register"] #[inline] pub fn read(&self) -> R { R { bits: self.register.get(), } } #[doc = r" Writes to the register"] #[inline] pub fn write<F>(&self, f: F) where F: FnOnce(&mut W) -> &mut W, { let mut w = W::reset_value(); f(&mut w); self.register.set(w.bits); } #[doc = r" Writes the reset value to the register"] #[inline] pub fn reset(&self) { self.write(|w| w) } } #[doc = r" Value of the field"] pub struct DASAR { bits: u32, } impl DASAR { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bits(&self) -> u32 { self.bits } } #[doc = r" Proxy"] pub struct _DASAW<'a> { w: &'a mut W, } impl<'a> _DASAW<'a> { #[doc = r" Writes raw bits to the field"] #[inline] pub unsafe fn bits(self, value: u32) -> &'a mut W { const MASK: u32 = 67108863; const OFFSET: u8 = 6; self.w.bits &= !((MASK as u32) << OFFSET); self.w.bits |= ((value & MASK) as u32) << OFFSET; self.w } } impl R { #[doc = r" Value of the register as raw bits"] #[inline] pub fn bits(&self) -> u32 { self.bits } #[doc = "Bits 6:31 - Descriptor Area Start Address"] #[inline] pub fn dasa(&self) -> DASAR { let bits = { const MASK: u32 = 67108863; const OFFSET: u8 = 6; ((self.bits >> OFFSET) & MASK as u32) as u32 }; DASAR { bits } } } impl W { #[doc = r" Reset value of the register"] #[inline] pub fn reset_value() -> W { W { bits: 0 } } #[doc = r" Writes raw bits to the register"] #[inline] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.bits = bits; self } #[doc = "Bits 6:31 - Descriptor Area Start Address"] #[inline] pub fn dasa(&mut self) -> _DASAW { _DASAW { w: self } } }
23.660377
59
0.496411
8a2d3418c257b2c85292836dd8e2ec56d20a1bf9
2,088
use crate::{DirectionalLight, PointLight, StandardMaterial}; use bevy_asset::Handle; use bevy_ecs::{bundle::Bundle, component::Component}; use bevy_render2::{ mesh::Mesh, primitives::{CubemapFrusta, Frustum}, view::{ComputedVisibility, Visibility, VisibleEntities}, }; use bevy_transform::components::{GlobalTransform, Transform}; /// A component bundle for PBR entities with a [`Mesh`] and a [`StandardMaterial`]. #[derive(Bundle, Clone, Default)] pub struct PbrBundle { pub mesh: Handle<Mesh>, pub material: Handle<StandardMaterial>, pub transform: Transform, pub global_transform: GlobalTransform, /// User indication of whether an entity is visible pub visibility: Visibility, /// Algorithmically-computed indication of whether an entity is visible and should be extracted for rendering pub computed_visibility: ComputedVisibility, } #[derive(Component, Clone, Debug, Default)] pub struct CubemapVisibleEntities { data: [VisibleEntities; 6], } impl CubemapVisibleEntities { pub fn get(&self, i: usize) -> &VisibleEntities { &self.data[i] } pub fn get_mut(&mut self, i: usize) -> &mut VisibleEntities { &mut self.data[i] } pub fn iter(&self) -> impl DoubleEndedIterator<Item = &VisibleEntities> { self.data.iter() } pub fn iter_mut(&mut self) -> impl DoubleEndedIterator<Item = &mut VisibleEntities> { self.data.iter_mut() } } /// A component bundle for [`PointLight`] entities. #[derive(Debug, Bundle, Default)] pub struct PointLightBundle { pub point_light: PointLight, pub cubemap_visible_entities: CubemapVisibleEntities, pub cubemap_frusta: CubemapFrusta, pub transform: Transform, pub global_transform: GlobalTransform, } /// A component bundle for [`DirectionalLight`] entities. #[derive(Debug, Bundle, Default)] pub struct DirectionalLightBundle { pub directional_light: DirectionalLight, pub frustum: Frustum, pub visible_entities: VisibleEntities, pub transform: Transform, pub global_transform: GlobalTransform, }
31.636364
113
0.717912
334cae38422e9cf148e70a255ca0e679089208ca
3,804
//! There are cases where you may want to interrupt this synchronous execution of the Wasm module //! while the it is calling a host function. This can be useful for saving resources, and not //! returning back to the guest Wasm for execution, when you already know the Wasm execution will //! fail, or no longer be needed. //! //! In this example, we will run a Wasm module that calls the imported host function //! interrupt_execution. This host function will immediately stop executing the WebAssembly module. //! //! You can run the example directly by executing in Wasmer root: //! //! ```shell //! cargo run --example early-exit --release --features "cranelift" //! ``` //! //! Ready? use anyhow::bail; use std::fmt; use wasmer::{imports, wat2wasm, Function, Instance, Module, NativeFunc, RuntimeError, Store}; use wasmer_compiler_cranelift::Cranelift; use wasmer_engine_jit::JIT; // First we need to create an error type that we'll use to signal the end of execution. #[derive(Debug, Clone, Copy)] struct ExitCode(u32); // This type must implement `std::error::Error` so we must also implement `std::fmt::Display` for it. impl fmt::Display for ExitCode { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.0) } } // And then we implement `std::error::Error`. impl std::error::Error for ExitCode {} fn main() -> anyhow::Result<()> { // Let's declare the Wasm module with the text representation. let wasm_bytes = wat2wasm( br#" (module (type $run_t (func (param i32 i32) (result i32))) (type $early_exit_t (func (param) (result))) (import "env" "early_exit" (func $early_exit (type $early_exit_t))) (func $run (type $run_t) (param $x i32) (param $y i32) (result i32) (call $early_exit) (i32.add local.get $x local.get $y)) (export "run" (func $run))) "#, )?; // Create a Store. // Note that we don't need to specify the engine/compiler if we want to use // the default provided by Wasmer. // You can use `Store::default()` for that. let store = Store::new(&JIT::new(Cranelift::default()).engine()); println!("Compiling module..."); // Let's compile the Wasm module. let module = Module::new(&store, wasm_bytes)?; // We declare the host function that we'll use to terminate execution. fn early_exit() { // This is where it happens. RuntimeError::raise(Box::new(ExitCode(1))); } // Create an import object. let import_object = imports! { "env" => { "early_exit" => Function::new_native(&store, early_exit), } }; println!("Instantiating module..."); // Let's instantiate the Wasm module. let instance = Instance::new(&module, &import_object)?; // Here we go. // // Get the `run` function which we'll use as our entrypoint. println!("Calling `run` function..."); let run_func: NativeFunc<(i32, i32), i32> = instance.exports.get_native_function("run")?; // When we call a function it can either succeed or fail. We expect it to fail. match run_func.call(1, 7) { Ok(result) => { bail!( "Expected early termination with `ExitCode`, found: {}", result ); } // In case of a failure, which we expect, we attempt to downcast the error into the error // type that we were expecting. Err(e) => match e.downcast::<ExitCode>() { // We found the exit code used to terminate execution. Ok(exit_code) => { println!("Exited early with exit code: {}", exit_code); Ok(()) } Err(e) => { bail!("Unknown error `{}` found. expected `ErrorCode`", e); } }, } }
34.581818
101
0.618822
23b083d408fd5867f580b6387ce107a234e85bbf
428
use std::env; use std::fs; fn main() { let args: Vec<String> = env::args().collect(); let file_name = match args.len() { 0 | 1 => { panic!("invalid argument length"); } _ => args.get(1).unwrap(), }; let contents = fs::read_to_string(file_name) .expect("Something went wrong reading the file"); for line in contents.lines() { println!("{}", line); } }
19.454545
57
0.530374
4b10451c81363dbee8596fdbc22163184ad03109
5,188
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use std::io; pub fn general() { io::println("Usage: rustpkg [options] <cmd> [args..] Where <cmd> is one of: build, clean, do, info, install, list, prefer, test, uninstall, unprefer Options: -h, --help Display this message --sysroot PATH Override the system root <cmd> -h, <cmd> --help Display help for <cmd>"); } pub fn build() { io::println("rustpkg build [options..] [package-ID] Build the given package ID if specified. With no package ID argument, build the package in the current directory. In that case, the current directory must be a direct child of an `src` directory in a workspace. Options: -c, --cfg Pass a cfg flag to the package script --no-link Compile and assemble, but don't link (like -c in rustc) --no-trans Parse and translate, but don't generate any code --pretty Pretty-print the code, but don't generate output --parse-only Parse the code, but don't typecheck or generate code -S Generate assembly code, but don't assemble or link it -S --emit-llvm Generate LLVM assembly code --emit-llvm Generate LLVM bitcode --linker PATH Use a linker other than the system linker --link-args [ARG..] Extra arguments to pass to the linker --opt-level=n Set the optimization level (0 <= n <= 3) -O Equivalent to --opt-level=2 --save-temps Don't delete temporary files --target TRIPLE Set the target triple --target-cpu CPU Set the target CPU -Z FLAG Enable an experimental rustc feature (see `rustc --help`)"); } pub fn clean() { io::println("rustpkg clean Remove all build files in the work cache for the package in the current directory."); } pub fn do_cmd() { io::println("rustpkg do <cmd> Runs a command in the package script. You can listen to a command by tagging a function with the attribute `#[pkg_do(cmd)]`."); } pub fn info() { io::println("rustpkg [options..] info Probe the package script in the current directory for information. Options: -j, --json Output the result as JSON"); } pub fn list() { io::println("rustpkg list List all installed packages."); } pub fn install() { io::println("rustpkg install [options..] [package-ID] Install the given package ID if specified. With no package ID argument, install the package in the current directory. In that case, the current directory must be a direct child of a `src` directory in a workspace. Examples: rustpkg install rustpkg install github.com/mozilla/servo rustpkg install github.com/mozilla/servo#0.1.2 Options: -c, --cfg Pass a cfg flag to the package script --emit-llvm Generate LLVM bitcode --linker PATH Use a linker other than the system linker --link-args [ARG..] Extra arguments to pass to the linker --opt-level=n Set the optimization level (0 <= n <= 3) -O Equivalent to --opt-level=2 --save-temps Don't delete temporary files --target TRIPLE Set the target triple --target-cpu CPU Set the target CPU -Z FLAG Enable an experimental rustc feature (see `rustc --help`)"); } pub fn uninstall() { io::println("rustpkg uninstall <id|name>[@version] Remove a package by id or name and optionally version. If the package(s) is/are depended on by another package then they cannot be removed."); } pub fn prefer() { io::println("rustpkg [options..] prefer <id|name>[@version] By default all binaries are given a unique name so that multiple versions can coexist. The prefer command will symlink the uniquely named binary to the binary directory under its bare name. If version is not supplied, the latest version of the package will be preferred. Example: export PATH=$PATH:/home/user/.rustpkg/bin rustpkg prefer machine@1.2.4 machine -v ==> v1.2.4 rustpkg prefer machine@0.4.6 machine -v ==> v0.4.6"); } pub fn unprefer() { io::println("rustpkg [options..] unprefer <id|name>[@version] Remove all symlinks from the store to the binary directory for a package name and optionally version. If version is not supplied, the latest version of the package will be unpreferred. See `rustpkg prefer -h` for more information."); } pub fn test() { io::println("rustpkg [options..] test Build all test crates in the current directory with the test flag. Then, run all the resulting test executables, redirecting the output and exit code. Options: -c, --cfg Pass a cfg flag to the package script"); } pub fn init() { io::println("rustpkg init This will turn the current working directory into a workspace. The first command you run when starting off a new project. "); }
32.628931
79
0.689476
8768c18215711afc03ae57d7466096f1e6170f4f
1,488
// Copyright 2020 The Grin Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use mimble_chain as chain; use mimble_core as core; use mimble_p2p as p2p; use mimble_pool as pool; use mimble_util as util; use failure; #[macro_use] extern crate failure_derive; #[macro_use] extern crate lazy_static; #[macro_use] extern crate serde_derive; extern crate serde_json; #[macro_use] extern crate log; #[macro_use] mod web; pub mod auth; pub mod client; mod foreign; mod foreign_rpc; mod handlers; mod owner; mod owner_rpc; mod rest; mod router; mod stratum; mod stratum_rpc; mod types; pub use crate::auth::{ BasicAuthMiddleware, BasicAuthURIMiddleware, MWC_BASIC_REALM, MWC_FOREIGN_BASIC_REALM, }; pub use crate::foreign::Foreign; pub use crate::foreign_rpc::ForeignRpc; pub use crate::handlers::node_apis; pub use crate::owner::Owner; pub use crate::owner_rpc::OwnerRpc; pub use crate::rest::*; pub use crate::router::*; pub use crate::types::*; pub use crate::web::*;
24.393443
87
0.757392
22878471f84237f649a90285ea9d16a14e83e878
1,670
use std::{ collections::HashMap, path::{Path, PathBuf}, sync::{Arc, Once, RwLock, Weak}, }; use super::{Result, ResultWrap, StorageData}; type Storages = Arc<RwLock<HashMap<PathBuf, Weak<StorageData>>>>; static mut STORAGES: Option<Storages> = None; static INITIALIZE_STORAGES: Once = Once::new(); #[inline] fn init_storages() { INITIALIZE_STORAGES.call_once(|| unsafe { STORAGES = Some(Arc::new(RwLock::new(HashMap::new()))); }); } fn get_storages() -> Storages { init_storages(); if let Some(storages) = unsafe { &STORAGES } { storages.clone() } else { unreachable!(); } } pub(crate) struct Pool; impl Pool { #[inline] pub(crate) fn get<P: AsRef<Path>>(path: P) -> Result<Option<Arc<StorageData>>> { let path = path.as_ref(); let storages = get_storages(); let map = storages.read().wrap_err()?; Ok(map.get(path).and_then(|env| env.upgrade())) } #[inline] pub(crate) fn put(path: PathBuf, storage: &Arc<StorageData>) -> Result<()> { let storages = get_storages(); let mut map = storages.write().wrap_err()?; map.insert(path, Arc::downgrade(storage)); Ok(()) } #[inline] pub(crate) fn del<P: AsRef<Path>>(path: P) -> Result<()> { let path = path.as_ref(); let storages = get_storages(); let mut map = storages.write().wrap_err()?; map.remove(path); Ok(()) } #[inline] pub(crate) fn lst() -> Result<Vec<PathBuf>> { let storages = get_storages(); let map = storages.read().wrap_err()?; Ok(map.keys().cloned().collect()) } }
25.30303
84
0.576048
221fa10b1046a710b094e6c1923fc902945b6ca8
4,595
use crate::connection; use crate::db; use crate::oracle; use crate::process_manager; use crate::rollover_taker; use anyhow::Result; use async_trait::async_trait; use futures::StreamExt; use maia::secp256k1_zkp::schnorrsig; use model::OrderId; use std::time::Duration; use time::OffsetDateTime; use tokio_tasks::Tasks; use xtra::Actor as _; use xtra::Address; use xtra_productivity::xtra_productivity; use xtras::AddressMap; use xtras::SendAsyncSafe; use xtras::SendInterval; pub struct Actor<O> { db: db::Connection, oracle_pk: schnorrsig::PublicKey, process_manager: Address<process_manager::Actor>, conn: Address<connection::Actor>, oracle: Address<O>, n_payouts: usize, rollover_actors: AddressMap<OrderId, rollover_taker::Actor>, tasks: Tasks, } impl<O> Actor<O> { pub fn new( db: db::Connection, oracle_pk: schnorrsig::PublicKey, process_manager: Address<process_manager::Actor>, conn: Address<connection::Actor>, oracle: Address<O>, n_payouts: usize, ) -> Self { Self { db, oracle_pk, process_manager, conn, oracle, n_payouts, rollover_actors: AddressMap::default(), tasks: Tasks::default(), } } } #[xtra_productivity] impl<O> Actor<O> where O: xtra::Handler<oracle::GetAnnouncement>, { async fn handle(&mut self, _msg: AutoRollover, ctx: &mut xtra::Context<Self>) { tracing::trace!("Checking all CFDs for rollover eligibility"); // Auto-rollover is invoked periodically by `addr.send_interval()`, // which does not handle errors - forward implementation to allow `?` inside if let Err(e) = self.handle_auto_rollover_impl(ctx).await { tracing::error!("Auto-rollover failed: {:#}", e); } } async fn handle(&mut self, Rollover(order_id): Rollover) { let disconnected = match self.rollover_actors.get_disconnected(order_id) { Ok(disconnected) => disconnected, Err(_) => { tracing::debug!(%order_id, "Rollover already in progress"); return; } }; let addr = rollover_taker::Actor::new( order_id, self.n_payouts, self.oracle_pk, self.conn.clone(), &self.oracle, self.process_manager.clone(), self.db.clone(), ) .create(None) .spawn(&mut self.tasks); disconnected.insert(addr); } } impl<O> Actor<O> where O: xtra::Handler<oracle::GetAnnouncement>, { async fn handle_auto_rollover_impl( &mut self, ctx: &mut xtra::Context<Actor<O>>, ) -> Result<(), anyhow::Error> { let this = ctx .address() .expect("actor to be able to give address to itself"); let mut stream = self.db.load_all_open_cfds::<model::Cfd>(()); while let Some(cfd) = stream.next().await { let cfd = match cfd { Ok(cfd) => cfd, Err(e) => { tracing::warn!("Failed to load CFD from database: {e:#}"); continue; } }; let id = cfd.id(); match cfd.can_auto_rollover_taker(OffsetDateTime::now_utc()) { Ok(()) => { let _ = this.send_async_safe(Rollover(id)).await; // If we disconnect, we don't // care. } Err(reason) => { tracing::trace!(order_id = %id, %reason, "CFD is not eligible for auto-rollover"); } } } Ok(()) } } #[async_trait] impl<O> xtra::Actor for Actor<O> where O: xtra::Handler<oracle::GetAnnouncement> + 'static, { type Stop = (); async fn started(&mut self, ctx: &mut xtra::Context<Self>) { let this = ctx.address().expect("we are alive"); self.tasks .add(this.send_interval(Duration::from_secs(5 * 60), || AutoRollover)); } async fn stopped(self) -> Self::Stop {} } /// Message sent to ourselves at an interval to check if rollover can /// be triggered for any of the CFDs in the database. #[derive(Clone, Copy)] pub struct AutoRollover; /// Message used to trigger rollover internally within the `auto_rollover::Actor` /// /// This helps us trigger rollover in the tests unconditionally of time. #[derive(Clone, Copy)] pub struct Rollover(pub OrderId);
28.899371
102
0.573014
db0912e601d0444b237f1faf9000eff876ca22b4
2,148
use crate::{Address, Gas}; /// Holds `Transaction` **agnostic** content. /// /// Once created it **can't** be modified (immutable). /// /// The [`Envelope`]'s data should be passed externally from the `Node`. /// That's why we are not allowed to touch its content, and have it immutable. #[derive(Debug, Clone, PartialEq)] pub struct Envelope { principal: Address, amount: u64, gas_limit: Gas, gas_fee: u64, } impl Default for Envelope { fn default() -> Self { Self::with_principal(Address::zeros()) } } impl Envelope { /// Creates a new [`Envelope`]. pub fn new(principal: Address, amount: u64, gas_limit: Gas, gas_fee: u64) -> Self { Self { principal, amount, gas_limit, gas_fee, } } /// Creates a new [`Envelope`] with the given `principal` parameter. /// /// Sets default values for all remaining fields. /// Sets no `gas limit` (suitable when running with gas pricing off). /// /// # Notes /// /// This method should be useful to ease tests setup. pub fn with_principal(principal: Address) -> Self { Self { principal, amount: 0, gas_limit: Gas::new(), gas_fee: 0, } } /// Creates a new [`Envelope`] with the given `gas_limit` parameter. /// /// Sets default values for all remaining fields. /// /// # Notes /// /// This method should be useful to ease tests setup. pub fn with_gas_limit(gas_limit: Gas) -> Self { Self { principal: Address::zeros(), amount: 0, gas_limit, gas_fee: 0, } } /// The `Address` of the `Account` paying for the [`Gas`]. pub fn principal(&self) -> &Address { &self.principal } /// Funding by the `Principal`. pub fn amount(&self) -> u64 { self.amount } /// Maximum units of Gas to be paid. pub fn gas_limit(&self) -> Gas { self.gas_limit } /// Fee per Unit of [`Gas`]. pub fn gas_fee(&self) -> u64 { self.gas_fee } }
24.689655
87
0.549814
5d62fa5553a24deaa1f7c67712c13456be9e70f3
11,911
//! Source implementation for MySQL database. mod errors; mod typesystem; pub use self::errors::MySQLSourceError; use crate::{ data_order::DataOrder, errors::ConnectorXError, sources::{PartitionParser, Produce, Source, SourcePartition}, sql::{count_query, get_limit, CXQuery}, }; use anyhow::anyhow; use chrono::{NaiveDate, NaiveDateTime, NaiveTime}; use fehler::{throw, throws}; use r2d2::{Pool, PooledConnection}; use r2d2_mysql::{ mysql::{prelude::Queryable, Binary, Opts, OptsBuilder, QueryResult, Row, Text}, MysqlConnectionManager, }; use rust_decimal::Decimal; use serde_json::Value; use sqlparser::dialect::MySqlDialect; use std::marker::PhantomData; pub use typesystem::MySQLTypeSystem; type MysqlManager = MysqlConnectionManager; type MysqlConn = PooledConnection<MysqlManager>; pub enum BinaryProtocol {} pub enum TextProtocol {} pub struct MySQLSource<P> { pool: Pool<MysqlManager>, queries: Vec<CXQuery<String>>, names: Vec<String>, schema: Vec<MySQLTypeSystem>, buf_size: usize, _protocol: PhantomData<P>, } impl<P> MySQLSource<P> { #[throws(MySQLSourceError)] pub fn new(conn: &str, nconn: usize) -> Self { let manager = MysqlConnectionManager::new(OptsBuilder::from_opts(Opts::from_url(&conn)?)); let pool = r2d2::Pool::builder() .max_size(nconn as u32) .build(manager)?; Self { pool, queries: vec![], names: vec![], schema: vec![], buf_size: 32, _protocol: PhantomData, } } pub fn buf_size(&mut self, buf_size: usize) { self.buf_size = buf_size; } } impl<P> Source for MySQLSource<P> where MySQLSourcePartition<P>: SourcePartition<TypeSystem = MySQLTypeSystem, Error = MySQLSourceError>, P: Send, { const DATA_ORDERS: &'static [DataOrder] = &[DataOrder::RowMajor]; type Partition = MySQLSourcePartition<P>; type TypeSystem = MySQLTypeSystem; type Error = MySQLSourceError; #[throws(MySQLSourceError)] fn set_data_order(&mut self, data_order: DataOrder) { if !matches!(data_order, DataOrder::RowMajor) { throw!(ConnectorXError::UnsupportedDataOrder(data_order)); } } fn set_queries<Q: ToString>(&mut self, queries: &[CXQuery<Q>]) { self.queries = queries.iter().map(|q| q.map(Q::to_string)).collect(); } #[throws(MySQLSourceError)] fn fetch_metadata(&mut self) { assert!(!self.queries.is_empty()); let mut conn = self.pool.get()?; let first_query = &self.queries[0]; let stmt = conn.prep(&*first_query)?; let (names, types) = stmt .columns() .iter() .map(|col| { ( col.name_str().to_string(), MySQLTypeSystem::from(&col.column_type()), ) }) .unzip(); self.names = names; self.schema = types; } fn names(&self) -> Vec<String> { self.names.clone() } fn schema(&self) -> Vec<Self::TypeSystem> { self.schema.clone() } #[throws(MySQLSourceError)] fn partition(self) -> Vec<Self::Partition> { let mut ret = vec![]; for query in self.queries { let conn = self.pool.get()?; ret.push(MySQLSourcePartition::new( conn, &query, &self.schema, self.buf_size, )); } ret } } pub struct MySQLSourcePartition<P> { conn: MysqlConn, query: CXQuery<String>, schema: Vec<MySQLTypeSystem>, nrows: usize, ncols: usize, buf_size: usize, _protocol: PhantomData<P>, } impl<P> MySQLSourcePartition<P> { pub fn new( conn: MysqlConn, query: &CXQuery<String>, schema: &[MySQLTypeSystem], buf_size: usize, ) -> Self { Self { conn, query: query.clone(), schema: schema.to_vec(), nrows: 0, ncols: schema.len(), buf_size, _protocol: PhantomData, } } } impl SourcePartition for MySQLSourcePartition<BinaryProtocol> { type TypeSystem = MySQLTypeSystem; type Parser<'a> = MySQLBinarySourceParser<'a>; type Error = MySQLSourceError; #[throws(MySQLSourceError)] fn prepare(&mut self) { self.nrows = match get_limit(&self.query, &MySqlDialect {})? { None => { let row: usize = self .conn .query_first(&count_query(&self.query, &MySqlDialect {})?)? .ok_or_else(|| { anyhow!("mysql failed to get the count of query: {}", self.query) })?; row } Some(n) => n, }; } #[throws(MySQLSourceError)] fn parser(&mut self) -> Self::Parser<'_> { let stmt = self.conn.prep(self.query.as_str())?; let iter = self.conn.exec_iter(stmt, ())?; MySQLBinarySourceParser::new(iter, &self.schema, self.buf_size) } fn nrows(&self) -> usize { self.nrows } fn ncols(&self) -> usize { self.ncols } } impl SourcePartition for MySQLSourcePartition<TextProtocol> { type TypeSystem = MySQLTypeSystem; type Parser<'a> = MySQLTextSourceParser<'a>; type Error = MySQLSourceError; #[throws(MySQLSourceError)] fn prepare(&mut self) { self.nrows = match get_limit(&self.query, &MySqlDialect {})? { None => { let row: usize = self .conn .query_first(&count_query(&self.query, &MySqlDialect {})?)? .ok_or_else(|| { anyhow!("mysql failed to get the count of query: {}", self.query) })?; row } Some(n) => n, }; } #[throws(MySQLSourceError)] fn parser(&mut self) -> Self::Parser<'_> { let query = self.query.clone(); let iter = self.conn.query_iter(query)?; MySQLTextSourceParser::new(iter, &self.schema, self.buf_size) } fn nrows(&self) -> usize { self.nrows } fn ncols(&self) -> usize { self.ncols } } pub struct MySQLBinarySourceParser<'a> { iter: QueryResult<'a, 'a, 'a, Binary>, buf_size: usize, rowbuf: Vec<Row>, ncols: usize, current_col: usize, current_row: usize, } impl<'a> MySQLBinarySourceParser<'a> { pub fn new( iter: QueryResult<'a, 'a, 'a, Binary>, schema: &[MySQLTypeSystem], buf_size: usize, ) -> Self { Self { iter, buf_size, rowbuf: Vec::with_capacity(buf_size), ncols: schema.len(), current_row: 0, current_col: 0, } } #[throws(MySQLSourceError)] fn next_loc(&mut self) -> (usize, usize) { if self.current_row >= self.rowbuf.len() { if !self.rowbuf.is_empty() { self.rowbuf.drain(..); } for _ in 0..self.buf_size { if let Some(item) = self.iter.next() { self.rowbuf.push(item?); } else { break; } } if self.rowbuf.is_empty() { throw!(anyhow!("Mysql EOF")); } self.current_row = 0; self.current_col = 0; } let ret = (self.current_row, self.current_col); self.current_row += (self.current_col + 1) / self.ncols; self.current_col = (self.current_col + 1) % self.ncols; ret } } impl<'a> PartitionParser<'a> for MySQLBinarySourceParser<'a> { type TypeSystem = MySQLTypeSystem; type Error = MySQLSourceError; } macro_rules! impl_produce_binary { ($($t: ty,)+) => { $( impl<'r, 'a> Produce<'r, $t> for MySQLBinarySourceParser<'a> { type Error = MySQLSourceError; #[throws(MySQLSourceError)] fn produce(&'r mut self) -> $t { let (ridx, cidx) = self.next_loc()?; let res = self.rowbuf[ridx].take(cidx).ok_or_else(|| anyhow!("mysql cannot parse at position: ({}, {})", ridx, cidx))?; res } } impl<'r, 'a> Produce<'r, Option<$t>> for MySQLBinarySourceParser<'a> { type Error = MySQLSourceError; #[throws(MySQLSourceError)] fn produce(&'r mut self) -> Option<$t> { let (ridx, cidx) = self.next_loc()?; let res = self.rowbuf[ridx].take(cidx).ok_or_else(|| anyhow!("mysql cannot parse at position: ({}, {})", ridx, cidx))?; res } } )+ }; } impl_produce_binary!( i8, i16, i32, i64, f32, f64, NaiveDate, NaiveTime, NaiveDateTime, Decimal, String, Vec<u8>, Value, ); pub struct MySQLTextSourceParser<'a> { iter: QueryResult<'a, 'a, 'a, Text>, buf_size: usize, rowbuf: Vec<Row>, ncols: usize, current_col: usize, current_row: usize, } impl<'a> MySQLTextSourceParser<'a> { pub fn new( iter: QueryResult<'a, 'a, 'a, Text>, schema: &[MySQLTypeSystem], buf_size: usize, ) -> Self { Self { iter, buf_size, rowbuf: Vec::with_capacity(buf_size), ncols: schema.len(), current_row: 0, current_col: 0, } } #[throws(MySQLSourceError)] fn next_loc(&mut self) -> (usize, usize) { if self.current_row >= self.rowbuf.len() { if !self.rowbuf.is_empty() { self.rowbuf.drain(..); } for _ in 0..self.buf_size { if let Some(item) = self.iter.next() { self.rowbuf.push(item?); } else { break; } } if self.rowbuf.is_empty() { throw!(anyhow!("Mysql EOF")); } self.current_row = 0; self.current_col = 0; } let ret = (self.current_row, self.current_col); self.current_row += (self.current_col + 1) / self.ncols; self.current_col = (self.current_col + 1) % self.ncols; ret } } impl<'a> PartitionParser<'a> for MySQLTextSourceParser<'a> { type TypeSystem = MySQLTypeSystem; type Error = MySQLSourceError; } macro_rules! impl_produce_text { ($($t: ty,)+) => { $( impl<'r, 'a> Produce<'r, $t> for MySQLTextSourceParser<'a> { type Error = MySQLSourceError; #[throws(MySQLSourceError)] fn produce(&'r mut self) -> $t { let (ridx, cidx) = self.next_loc()?; let res = self.rowbuf[ridx].take(cidx).ok_or_else(|| anyhow!("mysql cannot parse at position: ({}, {})", ridx, cidx))?; res } } impl<'r, 'a> Produce<'r, Option<$t>> for MySQLTextSourceParser<'a> { type Error = MySQLSourceError; #[throws(MySQLSourceError)] fn produce(&'r mut self) -> Option<$t> { let (ridx, cidx) = self.next_loc()?; let res = self.rowbuf[ridx].take(cidx).ok_or_else(|| anyhow!("mysql cannot parse at position: ({}, {})", ridx, cidx))?; res } } )+ }; } impl_produce_text!( i8, i16, i32, i64, f32, f64, NaiveDate, NaiveTime, NaiveDateTime, Decimal, String, Vec<u8>, Value, );
27.132118
139
0.522878
8fe7ee15ef3e1f4071c9abe3f7073f49d31d0752
1,888
use super::*; #[test] fn with_number_or_atom_second_returns_first() { with_process_arc(|arc_process| { TestRunner::new(Config::with_source_file(file!())) .run( &( strategy::term::local_reference(arc_process.clone()), strategy::term::number_or_atom(arc_process.clone()), ), |(first, second)| { prop_assert_eq!(native(first, second), first); Ok(()) }, ) .unwrap(); }); } #[test] fn with_lesser_local_reference_second_returns_first() { max(|_, process| process.reference(0).unwrap(), First); } #[test] fn with_same_local_reference_second_returns_first() { max(|first, _| first, First); } #[test] fn with_same_value_local_reference_second_returns_first() { max(|_, process| process.reference(1).unwrap(), First); } #[test] fn with_greater_local_reference_second_returns_second() { max(|_, process| process.reference(2).unwrap(), Second); } #[test] fn with_function_port_pid_tuple_map_list_or_bitstring_second_returns_second() { with_process_arc(|arc_process| { TestRunner::new(Config::with_source_file(file!())) .run( &( strategy::term::local_reference(arc_process.clone()), strategy::term::function_port_pid_tuple_map_list_or_bitstring( arc_process.clone(), ), ), |(first, second)| { prop_assert_eq!(native(first, second), second.into()); Ok(()) }, ) .unwrap(); }); } fn max<R>(second: R, which: FirstSecond) where R: FnOnce(Term, &Process) -> Term, { super::max(|process| process.reference(1).unwrap(), second, which); }
27.362319
82
0.555085
1e40ff24e3ae5a90dc9a3c82933a36b005c567c7
25,549
use expect_test::expect; use super::{check_infer, check_no_mismatches, check_types}; #[test] fn bug_484() { check_infer( r#" fn test() { let x = if true {}; } "#, expect![[r#" 10..37 '{ ... {}; }': () 20..21 'x': () 24..34 'if true {}': () 27..31 'true': bool 32..34 '{}': () "#]], ); } #[test] fn no_panic_on_field_of_enum() { check_infer( r#" enum X {} fn test(x: X) { x.some_field; } "#, expect![[r#" 19..20 'x': X 25..46 '{ ...eld; }': () 31..32 'x': X 31..43 'x.some_field': {unknown} "#]], ); } #[test] fn bug_585() { check_infer( r#" fn test() { X {}; match x { A::B {} => (), A::Y() => (), } } "#, expect![[r#" 10..88 '{ ... } }': () 16..20 'X {}': {unknown} 26..86 'match ... }': () 32..33 'x': {unknown} 44..51 'A::B {}': {unknown} 55..57 '()': () 67..73 'A::Y()': {unknown} 77..79 '()': () "#]], ); } #[test] fn bug_651() { check_infer( r#" fn quux() { let y = 92; 1 + y; } "#, expect![[r#" 10..40 '{ ...+ y; }': () 20..21 'y': i32 24..26 '92': i32 32..33 '1': i32 32..37 '1 + y': i32 36..37 'y': i32 "#]], ); } #[test] fn recursive_vars() { check_infer( r#" fn test() { let y = unknown; [y, &y]; } "#, expect![[r#" 10..47 '{ ...&y]; }': () 20..21 'y': {unknown} 24..31 'unknown': {unknown} 37..44 '[y, &y]': [{unknown}; 2] 38..39 'y': {unknown} 41..43 '&y': &{unknown} 42..43 'y': {unknown} "#]], ); } #[test] fn recursive_vars_2() { check_infer( r#" fn test() { let x = unknown; let y = unknown; [(x, y), (&y, &x)]; } "#, expect![[r#" 10..79 '{ ...x)]; }': () 20..21 'x': &{unknown} 24..31 'unknown': &{unknown} 41..42 'y': {unknown} 45..52 'unknown': {unknown} 58..76 '[(x, y..., &x)]': [(&{unknown}, {unknown}); 2] 59..65 '(x, y)': (&{unknown}, {unknown}) 60..61 'x': &{unknown} 63..64 'y': {unknown} 67..75 '(&y, &x)': (&{unknown}, {unknown}) 68..70 '&y': &{unknown} 69..70 'y': {unknown} 72..74 '&x': &&{unknown} 73..74 'x': &{unknown} "#]], ); } #[test] fn array_elements_expected_type() { check_no_mismatches( r#" fn test() { let x: [[u32; 2]; 2] = [[1, 2], [3, 4]]; } "#, ); } #[test] fn infer_std_crash_1() { // caused stack overflow, taken from std check_infer( r#" enum Maybe<T> { Real(T), Fake, } fn write() { match something_unknown { Maybe::Real(ref mut something) => (), } } "#, expect![[r#" 53..138 '{ ... } }': () 59..136 'match ... }': () 65..82 'someth...nknown': Maybe<{unknown}> 93..123 'Maybe:...thing)': Maybe<{unknown}> 105..122 'ref mu...ething': &mut {unknown} 127..129 '()': () "#]], ); } #[test] fn infer_std_crash_2() { // caused "equating two type variables, ...", taken from std check_infer( r#" fn test_line_buffer() { &[0, b'\n', 1, b'\n']; } "#, expect![[r#" 22..52 '{ ...n']; }': () 28..49 '&[0, b...b'\n']': &[u8; 4] 29..49 '[0, b'...b'\n']': [u8; 4] 30..31 '0': u8 33..38 'b'\n'': u8 40..41 '1': u8 43..48 'b'\n'': u8 "#]], ); } #[test] fn infer_std_crash_3() { // taken from rustc check_infer( r#" pub fn compute() { match nope!() { SizeSkeleton::Pointer { non_zero: true, tail } => {} } } "#, expect![[r#" 17..107 '{ ... } }': () 23..105 'match ... }': () 29..36 'nope!()': {unknown} 47..93 'SizeSk...tail }': {unknown} 81..85 'true': bool 81..85 'true': bool 87..91 'tail': {unknown} 97..99 '{}': () "#]], ); } #[test] fn infer_std_crash_4() { // taken from rustc check_infer( r#" pub fn primitive_type() { match *self { BorrowedRef { type_: Primitive(p), ..} => {}, } } "#, expect![[r#" 24..105 '{ ... } }': () 30..103 'match ... }': () 36..41 '*self': {unknown} 37..41 'self': {unknown} 52..90 'Borrow...), ..}': {unknown} 73..85 'Primitive(p)': {unknown} 83..84 'p': {unknown} 94..96 '{}': () "#]], ); } #[test] fn infer_std_crash_5() { // taken from rustc check_infer( r#" fn extra_compiler_flags() { for content in doesnt_matter { let name = if doesnt_matter { first } else { &content }; let content = if ICE_REPORT_COMPILER_FLAGS_STRIP_VALUE.contains(&name) { name } else { content }; } } "#, expect![[r#" 26..322 '{ ... } }': () 32..320 'for co... }': () 36..43 'content': {unknown} 47..60 'doesnt_matter': {unknown} 61..320 '{ ... }': () 75..79 'name': &{unknown} 82..166 'if doe... }': &{unknown} 85..98 'doesnt_matter': bool 99..128 '{ ... }': &{unknown} 113..118 'first': &{unknown} 134..166 '{ ... }': &{unknown} 148..156 '&content': &{unknown} 149..156 'content': {unknown} 181..188 'content': &{unknown} 191..313 'if ICE... }': &{unknown} 194..231 'ICE_RE..._VALUE': {unknown} 194..247 'ICE_RE...&name)': bool 241..246 '&name': &&{unknown} 242..246 'name': &{unknown} 248..276 '{ ... }': &{unknown} 262..266 'name': &{unknown} 282..313 '{ ... }': {unknown} 296..303 'content': {unknown} "#]], ); } #[test] fn infer_nested_generics_crash() { // another crash found typechecking rustc check_infer( r#" struct Canonical<V> { value: V, } struct QueryResponse<V> { value: V, } fn test<R>(query_response: Canonical<QueryResponse<R>>) { &query_response.value; } "#, expect![[r#" 91..105 'query_response': Canonical<QueryResponse<R>> 136..166 '{ ...lue; }': () 142..163 '&query....value': &QueryResponse<R> 143..157 'query_response': Canonical<QueryResponse<R>> 143..163 'query_....value': QueryResponse<R> "#]], ); } #[test] fn infer_paren_macro_call() { check_infer( r#" macro_rules! bar { () => {0u32} } fn test() { let a = (bar!()); } "#, expect![[r#" !0..4 '0u32': u32 44..69 '{ ...()); }': () 54..55 'a': u32 "#]], ); } #[test] fn infer_array_macro_call() { check_infer( r#" macro_rules! bar { () => {0u32} } fn test() { let a = [bar!()]; } "#, expect![[r#" !0..4 '0u32': u32 44..69 '{ ...()]; }': () 54..55 'a': [u32; 1] 58..66 '[bar!()]': [u32; 1] "#]], ); } #[test] fn bug_1030() { check_infer( r#" struct HashSet<T, H>; struct FxHasher; type FxHashSet<T> = HashSet<T, FxHasher>; impl<T, H> HashSet<T, H> { fn default() -> HashSet<T, H> {} } pub fn main_loop() { FxHashSet::default(); } "#, expect![[r#" 143..145 '{}': () 168..197 '{ ...t(); }': () 174..192 'FxHash...efault': fn default<{unknown}, FxHasher>() -> HashSet<{unknown}, FxHasher> 174..194 'FxHash...ault()': HashSet<{unknown}, FxHasher> "#]], ); } #[test] fn issue_2669() { check_infer( r#" trait A {} trait Write {} struct Response<T> {} trait D { fn foo(); } impl<T:A> D for Response<T> { fn foo() { end(); fn end<W: Write>() { let _x: T = loop {}; } } } "#, expect![[r#" 119..214 '{ ... }': () 129..132 'end': fn end<{unknown}>() 129..134 'end()': () 163..208 '{ ... }': () 181..183 '_x': ! 190..197 'loop {}': ! 195..197 '{}': () "#]], ) } #[test] fn issue_2705() { check_infer( r#" trait Trait {} fn test() { <Trait<u32>>::foo() } "#, expect![[r#" 25..52 '{ ...oo() }': () 31..48 '<Trait...>::foo': {unknown} 31..50 '<Trait...:foo()': () "#]], ); } #[test] fn issue_2683_chars_impl() { check_types( r#" //- minicore: iterator pub struct Chars<'a> {} impl<'a> Iterator for Chars<'a> { type Item = char; fn next(&mut self) -> Option<char> { loop {} } } fn test() { let chars: Chars<'_>; (chars.next(), chars.nth(1)); } //^^^^^^^^^^^^^^^^^^^^^^^^^^^^ (Option<char>, Option<char>) "#, ); } #[test] fn issue_3642_bad_macro_stackover() { check_no_mismatches( r#" #[macro_export] macro_rules! match_ast { (match $node:ident { $($tt:tt)* }) => { match_ast!(match ($node) { $($tt)* }) }; (match ($node:expr) { $( ast::$ast:ident($it:ident) => $res:expr, )* _ => $catch_all:expr $(,)? }) => {{ $( if let Some($it) = ast::$ast::cast($node.clone()) { $res } else )* { $catch_all } }}; } fn main() { let anchor = match_ast! { match parent { as => {}, _ => return None } }; }"#, ); } #[test] fn issue_3999_slice() { check_infer( r#" fn foo(params: &[usize]) { match params { [ps @ .., _] => {} } } "#, expect![[r#" 7..13 'params': &[usize] 25..80 '{ ... } }': () 31..78 'match ... }': () 37..43 'params': &[usize] 54..66 '[ps @ .., _]': [usize] 55..62 'ps @ ..': &[usize] 60..62 '..': [usize] 64..65 '_': usize 70..72 '{}': () "#]], ); } #[test] fn issue_3999_struct() { // rust-analyzer should not panic on seeing this malformed // record pattern. check_infer( r#" struct Bar { a: bool, } fn foo(b: Bar) { match b { Bar { a: .. } => {}, } } "#, expect![[r#" 35..36 'b': Bar 43..95 '{ ... } }': () 49..93 'match ... }': () 55..56 'b': Bar 67..80 'Bar { a: .. }': Bar 76..78 '..': bool 84..86 '{}': () "#]], ); } #[test] fn issue_4235_name_conflicts() { check_infer( r#" struct FOO {} static FOO:FOO = FOO {}; impl FOO { fn foo(&self) {} } fn main() { let a = &FOO; a.foo(); } "#, expect![[r#" 31..37 'FOO {}': FOO 63..67 'self': &FOO 69..71 '{}': () 85..119 '{ ...o(); }': () 95..96 'a': &FOO 99..103 '&FOO': &FOO 100..103 'FOO': FOO 109..110 'a': &FOO 109..116 'a.foo()': () "#]], ); } #[test] fn issue_4465_dollar_crate_at_type() { check_infer( r#" pub struct Foo {} pub fn anything<T>() -> T { loop {} } macro_rules! foo { () => {{ let r: $crate::Foo = anything(); r }}; } fn main() { let _a = foo!(); } "#, expect![[r#" 44..59 '{ loop {} }': T 50..57 'loop {}': ! 55..57 '{}': () !0..31 '{letr:...g();r}': Foo !4..5 'r': Foo !18..26 'anything': fn anything<Foo>() -> Foo !18..28 'anything()': Foo !29..30 'r': Foo 163..187 '{ ...!(); }': () 173..175 '_a': Foo "#]], ); } #[test] fn issue_6811() { check_infer( r#" macro_rules! profile_function { () => { let _a = 1; let _b = 1; }; } fn main() { profile_function!(); } "#, expect![[r#" !3..5 '_a': i32 !6..7 '1': i32 !11..13 '_b': i32 !14..15 '1': i32 103..131 '{ ...!(); }': () "#]], ); } #[test] fn issue_4053_diesel_where_clauses() { check_infer( r#" trait BoxedDsl<DB> { type Output; fn internal_into_boxed(self) -> Self::Output; } struct SelectStatement<From, Select, Distinct, Where, Order, LimitOffset, GroupBy, Locking> { order: Order, } trait QueryFragment<DB: Backend> {} trait Into<T> { fn into(self) -> T; } impl<F, S, D, W, O, LOf, DB> BoxedDsl<DB> for SelectStatement<F, S, D, W, O, LOf, G> where O: Into<dyn QueryFragment<DB>>, { type Output = XXX; fn internal_into_boxed(self) -> Self::Output { self.order.into(); } } "#, expect![[r#" 65..69 'self': Self 267..271 'self': Self 466..470 'self': SelectStatement<F, S, D, W, O, LOf, {unknown}, {unknown}> 488..522 '{ ... }': () 498..502 'self': SelectStatement<F, S, D, W, O, LOf, {unknown}, {unknown}> 498..508 'self.order': O 498..515 'self.o...into()': dyn QueryFragment<DB> "#]], ); } #[test] fn issue_4953() { check_infer( r#" pub struct Foo(pub i64); impl Foo { fn test() -> Self { Self(0i64) } } "#, expect![[r#" 58..72 '{ Self(0i64) }': Foo 60..64 'Self': Foo(i64) -> Foo 60..70 'Self(0i64)': Foo 65..69 '0i64': i64 "#]], ); check_infer( r#" pub struct Foo<T>(pub T); impl Foo<i64> { fn test() -> Self { Self(0i64) } } "#, expect![[r#" 64..78 '{ Self(0i64) }': Foo<i64> 66..70 'Self': Foo<i64>(i64) -> Foo<i64> 66..76 'Self(0i64)': Foo<i64> 71..75 '0i64': i64 "#]], ); } #[test] fn issue_4931() { check_infer( r#" trait Div<T> { type Output; } trait CheckedDiv: Div<()> {} trait PrimInt: CheckedDiv<Output = ()> { fn pow(self); } fn check<T: PrimInt>(i: T) { i.pow(); } "#, expect![[r#" 117..121 'self': Self 148..149 'i': T 154..170 '{ ...w(); }': () 160..161 'i': T 160..167 'i.pow()': () "#]], ); } #[test] fn issue_4885() { check_infer( r#" //- minicore: coerce_unsized, future use core::future::Future; trait Foo<R> { type Bar; } fn foo<R, K>(key: &K) -> impl Future<Output = K::Bar> where K: Foo<R>, { bar(key) } fn bar<R, K>(key: &K) -> impl Future<Output = K::Bar> where K: Foo<R>, { } "#, expect![[r#" 70..73 'key': &K 132..148 '{ ...key) }': impl Future<Output = <K as Foo<R>>::Bar> 138..141 'bar': fn bar<R, K>(&K) -> impl Future<Output = <K as Foo<R>>::Bar> 138..146 'bar(key)': impl Future<Output = <K as Foo<R>>::Bar> 142..145 'key': &K 162..165 'key': &K 224..227 '{ }': () "#]], ); } #[test] fn issue_4800() { check_infer( r#" trait Debug {} struct Foo<T>; type E1<T> = (T, T, T); type E2<T> = E1<E1<E1<(T, T, T)>>>; impl Debug for Foo<E2<()>> {} struct Request; pub trait Future { type Output; } pub struct PeerSet<D>; impl<D> Service<Request> for PeerSet<D> where D: Discover, D::Key: Debug, { type Error = (); type Future = dyn Future<Output = Self::Error>; fn call(&mut self) -> Self::Future { loop {} } } pub trait Discover { type Key; } pub trait Service<Request> { type Error; type Future: Future<Output = Self::Error>; fn call(&mut self) -> Self::Future; } "#, expect![[r#" 379..383 'self': &mut PeerSet<D> 401..424 '{ ... }': dyn Future<Output = ()> 411..418 'loop {}': ! 416..418 '{}': () 575..579 'self': &mut Self "#]], ); } #[test] fn issue_4966() { check_infer( r#" //- minicore: deref pub trait IntoIterator { type Item; } struct Repeat<A> { element: A } struct Map<F> { f: F } struct Vec<T> {} impl<T> core::ops::Deref for Vec<T> { type Target = [T]; } fn from_iter<A, T: IntoIterator<Item = A>>(iter: T) -> Vec<A> {} fn main() { let inner = Map { f: |_: &f64| 0.0 }; let repeat = Repeat { element: inner }; let vec = from_iter(repeat); vec.foo_bar(); } "#, expect![[r#" 225..229 'iter': T 244..246 '{}': () 258..402 '{ ...r(); }': () 268..273 'inner': Map<|&f64| -> f64> 276..300 'Map { ... 0.0 }': Map<|&f64| -> f64> 285..298 '|_: &f64| 0.0': |&f64| -> f64 286..287 '_': &f64 295..298 '0.0': f64 311..317 'repeat': Repeat<Map<|&f64| -> f64>> 320..345 'Repeat...nner }': Repeat<Map<|&f64| -> f64>> 338..343 'inner': Map<|&f64| -> f64> 356..359 'vec': Vec<IntoIterator::Item<Repeat<Map<|&f64| -> f64>>>> 362..371 'from_iter': fn from_iter<IntoIterator::Item<Repeat<Map<|&f64| -> f64>>>, Repeat<Map<|&f64| -> f64>>>(Repeat<Map<|&f64| -> f64>>) -> Vec<IntoIterator::Item<Repeat<Map<|&f64| -> f64>>>> 362..379 'from_i...epeat)': Vec<IntoIterator::Item<Repeat<Map<|&f64| -> f64>>>> 372..378 'repeat': Repeat<Map<|&f64| -> f64>> 386..389 'vec': Vec<IntoIterator::Item<Repeat<Map<|&f64| -> f64>>>> 386..399 'vec.foo_bar()': {unknown} "#]], ); } #[test] fn issue_6628() { check_infer( r#" //- minicore: fn struct S<T>(); impl<T> S<T> { fn f(&self, _t: T) {} fn g<F: FnOnce(&T)>(&self, _f: F) {} } fn main() { let s = S(); s.g(|_x| {}); s.f(10); } "#, expect![[r#" 40..44 'self': &S<T> 46..48 '_t': T 53..55 '{}': () 81..85 'self': &S<T> 87..89 '_f': F 94..96 '{}': () 109..160 '{ ...10); }': () 119..120 's': S<i32> 123..124 'S': S<i32>() -> S<i32> 123..126 'S()': S<i32> 132..133 's': S<i32> 132..144 's.g(|_x| {})': () 136..143 '|_x| {}': |&i32| -> () 137..139 '_x': &i32 141..143 '{}': () 150..151 's': S<i32> 150..157 's.f(10)': () 154..156 '10': i32 "#]], ); } #[test] fn issue_6852() { check_infer( r#" //- minicore: deref use core::ops::Deref; struct BufWriter {} struct Mutex<T> {} struct MutexGuard<'a, T> {} impl<T> Mutex<T> { fn lock(&self) -> MutexGuard<'_, T> {} } impl<'a, T: 'a> Deref for MutexGuard<'a, T> { type Target = T; } fn flush(&self) { let w: &Mutex<BufWriter>; *(w.lock()); } "#, expect![[r#" 123..127 'self': &Mutex<T> 150..152 '{}': () 234..238 'self': &{unknown} 240..290 '{ ...()); }': () 250..251 'w': &Mutex<BufWriter> 276..287 '*(w.lock())': BufWriter 278..279 'w': &Mutex<BufWriter> 278..286 'w.lock()': MutexGuard<BufWriter> "#]], ); } #[test] fn param_overrides_fn() { check_types( r#" fn example(example: i32) { fn f() {} example; //^^^^^^^ i32 } "#, ) } #[test] fn lifetime_from_chalk_during_deref() { check_types( r#" //- minicore: deref struct Box<T: ?Sized> {} impl<T> core::ops::Deref for Box<T> { type Target = T; fn deref(&self) -> &Self::Target { loop {} } } trait Iterator { type Item; } pub struct Iter<'a, T: 'a> { inner: Box<dyn IterTrait<'a, T, Item = &'a T> + 'a>, } trait IterTrait<'a, T: 'a>: Iterator<Item = &'a T> { fn clone_box(&self); } fn clone_iter<T>(s: Iter<T>) { s.inner.clone_box(); //^^^^^^^^^^^^^^^^^^^ () } "#, ) } #[test] fn issue_8686() { check_infer( r#" pub trait Try: FromResidual { type Output; type Residual; } pub trait FromResidual<R = <Self as Try>::Residual> { fn from_residual(residual: R) -> Self; } struct ControlFlow<B, C>; impl<B, C> Try for ControlFlow<B, C> { type Output = C; type Residual = ControlFlow<B, !>; } impl<B, C> FromResidual for ControlFlow<B, C> { fn from_residual(r: ControlFlow<B, !>) -> Self { ControlFlow } } fn test() { ControlFlow::from_residual(ControlFlow::<u32, !>); } "#, expect![[r#" 144..152 'residual': R 365..366 'r': ControlFlow<B, !> 395..410 '{ ControlFlow }': ControlFlow<B, C> 397..408 'ControlFlow': ControlFlow<B, C> 424..482 '{ ...!>); }': () 430..456 'Contro...sidual': fn from_residual<ControlFlow<u32, {unknown}>, ControlFlow<u32, !>>(ControlFlow<u32, !>) -> ControlFlow<u32, {unknown}> 430..479 'Contro...2, !>)': ControlFlow<u32, {unknown}> 457..478 'Contro...32, !>': ControlFlow<u32, !> "#]], ); } #[test] fn cfg_tail() { // https://github.com/rust-analyzer/rust-analyzer/issues/8378 check_infer( r#" fn fake_tail(){ { "first" } #[cfg(never)] 9 } fn multiple_fake(){ { "fake" } { "fake" } { "second" } #[cfg(never)] { 11 } #[cfg(never)] 12; #[cfg(never)] 13 } fn no_normal_tail(){ { "third" } #[cfg(never)] 14; #[cfg(never)] 15; } fn no_actual_tail(){ { "fourth" }; #[cfg(never)] 14; #[cfg(never)] 15 } "#, expect![[r#" 14..53 '{ ...)] 9 }': &str 20..31 '{ "first" }': &str 22..29 '"first"': &str 72..190 '{ ...] 13 }': &str 78..88 '{ "fake" }': &str 80..86 '"fake"': &str 93..103 '{ "fake" }': &str 95..101 '"fake"': &str 108..120 '{ "second" }': &str 110..118 '"second"': &str 210..273 '{ ... 15; }': &str 216..227 '{ "third" }': &str 218..225 '"third"': &str 293..357 '{ ...] 15 }': () 299..311 '{ "fourth" }': &str 301..309 '"fourth"': &str "#]], ) } #[test] fn impl_trait_in_option_9530() { check_types( r#" struct Option<T>; impl<T> Option<T> { fn unwrap(self) -> T { loop {} } } fn make() -> Option<impl Copy> { Option } trait Copy {} fn test() { let o = make(); o.unwrap(); //^^^^^^^^^^ impl Copy } "#, ) }
23.656481
205
0.377001
acbc127913d692b5efb896293553f51e2ecce337
3,486
#[doc = r" Value read from the register"] pub struct R { bits: u16, } #[doc = r" Value to write to the register"] pub struct W { bits: u16, } impl super::PACKET_RAM_0_255 { #[doc = r" Modifies the contents of the register"] #[inline] pub fn modify<F>(&self, f: F) where for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W, { let bits = self.register.get(); let r = R { bits: bits }; let mut w = W { bits: bits }; f(&r, &mut w); self.register.set(w.bits); } #[doc = r" Reads the contents of the register"] #[inline] pub fn read(&self) -> R { R { bits: self.register.get(), } } #[doc = r" Writes to the register"] #[inline] pub fn write<F>(&self, f: F) where F: FnOnce(&mut W) -> &mut W, { let mut w = W::reset_value(); f(&mut w); self.register.set(w.bits); } #[doc = r" Writes the reset value to the register"] #[inline] pub fn reset(&self) { self.write(|w| w) } } #[doc = r" Value of the field"] pub struct LSBYTER { bits: u8, } impl LSBYTER { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bits(&self) -> u8 { self.bits } } #[doc = r" Value of the field"] pub struct MSBYTER { bits: u8, } impl MSBYTER { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bits(&self) -> u8 { self.bits } } #[doc = r" Proxy"] pub struct _LSBYTEW<'a> { w: &'a mut W, } impl<'a> _LSBYTEW<'a> { #[doc = r" Writes raw bits to the field"] #[inline] pub unsafe fn bits(self, value: u8) -> &'a mut W { const MASK: u8 = 255; const OFFSET: u8 = 0; self.w.bits &= !((MASK as u16) << OFFSET); self.w.bits |= ((value & MASK) as u16) << OFFSET; self.w } } #[doc = r" Proxy"] pub struct _MSBYTEW<'a> { w: &'a mut W, } impl<'a> _MSBYTEW<'a> { #[doc = r" Writes raw bits to the field"] #[inline] pub unsafe fn bits(self, value: u8) -> &'a mut W { const MASK: u8 = 255; const OFFSET: u8 = 8; self.w.bits &= !((MASK as u16) << OFFSET); self.w.bits |= ((value & MASK) as u16) << OFFSET; self.w } } impl R { #[doc = r" Value of the register as raw bits"] #[inline] pub fn bits(&self) -> u16 { self.bits } #[doc = "Bits 0:7 - LSBYTE"] #[inline] pub fn lsbyte(&self) -> LSBYTER { let bits = { const MASK: u8 = 255; const OFFSET: u8 = 0; ((self.bits >> OFFSET) & MASK as u16) as u8 }; LSBYTER { bits } } #[doc = "Bits 8:15 - MSBYTE"] #[inline] pub fn msbyte(&self) -> MSBYTER { let bits = { const MASK: u8 = 255; const OFFSET: u8 = 8; ((self.bits >> OFFSET) & MASK as u16) as u8 }; MSBYTER { bits } } } impl W { #[doc = r" Reset value of the register"] #[inline] pub fn reset_value() -> W { W { bits: 0 } } #[doc = r" Writes raw bits to the register"] #[inline] pub unsafe fn bits(&mut self, bits: u16) -> &mut Self { self.bits = bits; self } #[doc = "Bits 0:7 - LSBYTE"] #[inline] pub fn lsbyte(&mut self) -> _LSBYTEW { _LSBYTEW { w: self } } #[doc = "Bits 8:15 - MSBYTE"] #[inline] pub fn msbyte(&mut self) -> _MSBYTEW { _MSBYTEW { w: self } } }
23.714286
59
0.49082
87f489695dc94332e175ca2c714504794ba3d139
745
use std::cmp::PartialOrd; use std::fmt; use std::str::FromStr; use anyhow::Error; use graph::impl_slog_value; use crate::DataSource; #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd)] pub struct NodeCapabilities {} impl FromStr for NodeCapabilities { type Err = Error; fn from_str(_s: &str) -> Result<Self, Self::Err> { Ok(NodeCapabilities {}) } } impl fmt::Display for NodeCapabilities { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.write_str("tendermint") } } impl_slog_value!(NodeCapabilities, "{}"); impl graph::blockchain::NodeCapabilities<crate::Chain> for NodeCapabilities { fn from_data_sources(_data_sources: &[DataSource]) -> Self { NodeCapabilities {} } }
21.911765
77
0.672483
9b956dbd0be36b56534c66e4552ac7e6ee461112
5,684
use event::{Action, MouseButton, WindowEvent}; use na::{self, Matrix3, Point2, Translation2, Vector2}; use num::Pow; use planar_camera::PlanarCamera; use resource::ShaderUniform; use std::f32; use window::Canvas; /// A 2D camera that can be zoomed and panned. #[derive(Clone, Debug)] pub struct Sidescroll { at: Point2<f32>, /// Distance from the camera to the `at` focus point. zoom: f32, /// Increment of the zoomance per unit scrolling. The default value is 40.0. zoom_step: f32, drag_button: Option<MouseButton>, view: Matrix3<f32>, proj: Matrix3<f32>, scaled_proj: Matrix3<f32>, inv_scaled_proj: Matrix3<f32>, last_cursor_pos: Vector2<f32>, } impl Sidescroll { /// Create a new arc-ball camera. pub fn new() -> Sidescroll { let mut res = Sidescroll { at: Point2::origin(), zoom: 1.0, zoom_step: 0.9, drag_button: Some(MouseButton::Button2), view: na::one(), proj: na::one(), scaled_proj: na::one(), inv_scaled_proj: na::one(), last_cursor_pos: na::zero(), }; res.update_projviews(); res } /// The point the arc-ball is looking at. pub fn at(&self) -> Point2<f32> { self.at } /// Get a mutable reference to the point the camera is looking at. pub fn set_at(&mut self, at: Point2<f32>) { self.at = at; self.update_projviews(); } /// Gets the zoom of the camera. pub fn zoom(&self) -> f32 { self.zoom } /// Sets the zoom of the camera. pub fn set_zoom(&mut self, zoom: f32) { self.zoom = zoom; self.update_restrictions(); self.update_projviews(); } /// Move the camera such that it is centered on a specific point. pub fn look_at(&mut self, at: Point2<f32>, zoom: f32) { self.at = at; self.zoom = zoom; self.update_projviews(); } /// Transformation applied by the camera without perspective. fn update_restrictions(&mut self) { if self.zoom < 0.00001 { self.zoom = 0.00001 } } /// The button used to drag the Sidescroll camera. pub fn drag_button(&self) -> Option<MouseButton> { self.drag_button } /// Set the button used to drag the Sidescroll camera. /// Use None to disable dragging. pub fn rebind_drag_button(&mut self, new_button: Option<MouseButton>) { self.drag_button = new_button; } /// Move the camera based on drag from right mouse button /// `dpos` is assumed to be in window space so the y-axis is flipped fn handle_right_button_displacement(&mut self, dpos: &Vector2<f32>) { self.at.x -= dpos.x / self.zoom; self.at.y += dpos.y / self.zoom; self.update_projviews(); } fn handle_scroll(&mut self, off: f32) { self.zoom = self.zoom / self.zoom_step.pow(off / 120.0); self.update_restrictions(); self.update_projviews(); } fn update_projviews(&mut self) { self.view = Translation2::new(-self.at.x, -self.at.y).to_homogeneous(); self.scaled_proj = self.proj; self.scaled_proj.m11 *= self.zoom; self.scaled_proj.m22 *= self.zoom; self.inv_scaled_proj.m11 = 1.0 / self.scaled_proj.m11; self.inv_scaled_proj.m22 = 1.0 / self.scaled_proj.m22; } } impl PlanarCamera for Sidescroll { fn handle_event(&mut self, canvas: &Canvas, event: &WindowEvent) { let hidpi = 1.0; // canvas.hidpi_factor(); match *event { WindowEvent::CursorPos(x, y, _) => { let curr_pos = Vector2::new(x as f32, y as f32); if let Some(drag_button) = self.drag_button { if canvas.get_mouse_button(drag_button) == Action::Press { let dpos = curr_pos - self.last_cursor_pos; self.handle_right_button_displacement(&dpos) } } self.last_cursor_pos = curr_pos; } WindowEvent::Scroll(_, off, _) => self.handle_scroll(off as f32), WindowEvent::FramebufferSize(w, h) => { self.proj = Matrix3::new( 2.0 * (hidpi as f32) / (w as f32), 0.0, 0.0, 0.0, 2.0 * (hidpi as f32) / (h as f32), 0.0, 0.0, 0.0, 1.0, ); self.update_projviews(); } _ => {} } } #[inline] fn upload( &self, proj: &mut ShaderUniform<Matrix3<f32>>, view: &mut ShaderUniform<Matrix3<f32>>, ) { proj.upload(&self.scaled_proj); view.upload(&self.view); } fn update(&mut self, _: &Canvas) {} /// Calculate the global position of the given window coordinate fn unproject(&self, window_coord: &Point2<f32>, size: &Vector2<f32>) -> Point2<f32> { // Convert window coordinates (origin at top left) to normalized screen coordinates // (origin at the center of the screen) let normalized_coords = Point2::new( 2.0 * window_coord.x / size.x - 1.0, 2.0 * -window_coord.y / size.y + 1.0, ); // Project normalized screen coordinate to screen space let unprojected_hom = self.inv_scaled_proj * normalized_coords.to_homogeneous(); // Convert from screen space to global space Point2::from_homogeneous(unprojected_hom).unwrap() + self.at.coords } }
30.724324
91
0.562808
bfbe763f05d94c331acc72b4020fcd2048f650e9
13,637
// Copyright Materialize, Inc. All rights reserved. // // Use of this software is governed by the Business Source License // included in the LICENSE file. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0. use std::convert::TryFrom; use std::os::unix::ffi::OsStringExt; use dataflow_types::{AvroOcfSinkConnector, KafkaSinkConnector}; use expr::{GlobalId, MirScalarExpr}; use ore::collections::CollectionExt; use repr::adt::array::ArrayDimension; use repr::{Datum, Row}; use sql::ast::{CreateIndexStatement, Statement}; use sql::names::DatabaseSpecifier; use crate::catalog::builtin::{ MZ_ARRAY_TYPES, MZ_AVRO_OCF_SINKS, MZ_BASE_TYPES, MZ_COLUMNS, MZ_DATABASES, MZ_FUNCTIONS, MZ_INDEXES, MZ_INDEX_COLUMNS, MZ_KAFKA_SINKS, MZ_LIST_TYPES, MZ_MAP_TYPES, MZ_PSEUDO_TYPES, MZ_ROLES, MZ_SCHEMAS, MZ_SINKS, MZ_SOURCES, MZ_TABLES, MZ_TYPES, MZ_VIEWS, }; use crate::catalog::{ Catalog, CatalogItem, Func, Index, Sink, SinkConnector, SinkConnectorState, Type, TypeInner, SYSTEM_CONN_ID, }; /// An update to a built-in table. #[derive(Debug)] pub struct BuiltinTableUpdate { /// The ID of the table to update. pub id: GlobalId, /// The data to put into the table. pub row: Row, /// The diff of the data. pub diff: isize, } impl Catalog { pub(super) fn pack_database_update(&self, name: &str, diff: isize) -> BuiltinTableUpdate { let database = &self.by_name[name]; BuiltinTableUpdate { id: MZ_DATABASES.id, row: Row::pack_slice(&[ Datum::Int64(database.id), Datum::Int32(database.oid as i32), Datum::String(&name), ]), diff, } } pub(super) fn pack_schema_update( &self, database_spec: &DatabaseSpecifier, schema_name: &str, diff: isize, ) -> BuiltinTableUpdate { let (database_id, schema) = match database_spec { DatabaseSpecifier::Ambient => (None, &self.ambient_schemas[schema_name]), DatabaseSpecifier::Name(name) => { let db = &self.by_name[name]; (Some(db.id), &db.schemas[schema_name]) } }; BuiltinTableUpdate { id: MZ_SCHEMAS.id, row: Row::pack_slice(&[ Datum::Int64(schema.id), Datum::Int32(schema.oid as i32), Datum::from(database_id), Datum::String(schema_name), ]), diff, } } pub(super) fn pack_role_update(&self, name: &str, diff: isize) -> BuiltinTableUpdate { let role = &self.roles[name]; BuiltinTableUpdate { id: MZ_ROLES.id, row: Row::pack_slice(&[ Datum::Int64(role.id), Datum::Int32(role.oid as i32), Datum::String(&name), ]), diff, } } pub(super) fn pack_item_update(&self, id: GlobalId, diff: isize) -> Vec<BuiltinTableUpdate> { let entry = self.get_by_id(&id); let id = entry.id(); let oid = entry.oid(); let conn_id = entry.item().conn_id().unwrap_or(SYSTEM_CONN_ID); let schema_id = self .get_schema(&entry.name().database, &entry.name().schema, conn_id) .unwrap() .id; let name = &entry.name().item; let mut updates = match entry.item() { CatalogItem::Index(index) => self.pack_index_update(id, oid, name, index, diff), CatalogItem::Table(_) => self.pack_table_update(id, oid, schema_id, name, diff), CatalogItem::Source(_) => self.pack_source_update(id, oid, schema_id, name, diff), CatalogItem::View(_) => self.pack_view_update(id, oid, schema_id, name, diff), CatalogItem::Sink(sink) => self.pack_sink_update(id, oid, schema_id, name, sink, diff), CatalogItem::Type(ty) => self.pack_type_update(id, oid, schema_id, name, ty, diff), CatalogItem::Func(func) => self.pack_func_update(id, schema_id, name, func, diff), }; if let Ok(desc) = entry.desc() { for (i, (column_name, column_type)) in desc.iter().enumerate() { updates.push(BuiltinTableUpdate { id: MZ_COLUMNS.id, row: Row::pack_slice(&[ Datum::String(&id.to_string()), Datum::String( &column_name .map(|n| n.to_string()) .unwrap_or_else(|| "?column?".to_owned()), ), Datum::Int64(i as i64 + 1), Datum::from(column_type.nullable), Datum::String(pgrepr::Type::from(&column_type.scalar_type).name()), ]), diff, }); } } updates } fn pack_table_update( &self, id: GlobalId, oid: u32, schema_id: i64, name: &str, diff: isize, ) -> Vec<BuiltinTableUpdate> { vec![BuiltinTableUpdate { id: MZ_TABLES.id, row: Row::pack_slice(&[ Datum::String(&id.to_string()), Datum::Int32(oid as i32), Datum::Int64(schema_id), Datum::String(name), ]), diff, }] } fn pack_source_update( &self, id: GlobalId, oid: u32, schema_id: i64, name: &str, diff: isize, ) -> Vec<BuiltinTableUpdate> { vec![BuiltinTableUpdate { id: MZ_SOURCES.id, row: Row::pack_slice(&[ Datum::String(&id.to_string()), Datum::Int32(oid as i32), Datum::Int64(schema_id), Datum::String(name), Datum::String(self.is_volatile(id).as_str()), ]), diff, }] } fn pack_view_update( &self, id: GlobalId, oid: u32, schema_id: i64, name: &str, diff: isize, ) -> Vec<BuiltinTableUpdate> { vec![BuiltinTableUpdate { id: MZ_VIEWS.id, row: Row::pack_slice(&[ Datum::String(&id.to_string()), Datum::Int32(oid as i32), Datum::Int64(schema_id), Datum::String(name), Datum::String(self.is_volatile(id).as_str()), ]), diff, }] } fn pack_sink_update( &self, id: GlobalId, oid: u32, schema_id: i64, name: &str, sink: &Sink, diff: isize, ) -> Vec<BuiltinTableUpdate> { let mut updates = vec![]; if let Sink { connector: SinkConnectorState::Ready(connector), .. } = sink { match connector { SinkConnector::Kafka(KafkaSinkConnector { topic, .. }) => { updates.push(BuiltinTableUpdate { id: MZ_KAFKA_SINKS.id, row: Row::pack_slice(&[ Datum::String(&id.to_string()), Datum::String(topic.as_str()), ]), diff, }); } SinkConnector::AvroOcf(AvroOcfSinkConnector { path, .. }) => { updates.push(BuiltinTableUpdate { id: MZ_AVRO_OCF_SINKS.id, row: Row::pack_slice(&[ Datum::String(&id.to_string()), Datum::Bytes(&path.clone().into_os_string().into_vec()), ]), diff, }); } _ => (), } updates.push(BuiltinTableUpdate { id: MZ_SINKS.id, row: Row::pack_slice(&[ Datum::String(&id.to_string()), Datum::Int32(oid as i32), Datum::Int64(schema_id), Datum::String(name), Datum::String(self.is_volatile(id).as_str()), ]), diff, }); } updates } fn pack_index_update( &self, id: GlobalId, oid: u32, name: &str, index: &Index, diff: isize, ) -> Vec<BuiltinTableUpdate> { let mut updates = vec![]; let key_sqls = match sql::parse::parse(&index.create_sql) .expect("create_sql cannot be invalid") .into_element() { Statement::CreateIndex(CreateIndexStatement { key_parts, .. }) => key_parts.unwrap(), _ => unreachable!(), }; updates.push(BuiltinTableUpdate { id: MZ_INDEXES.id, row: Row::pack_slice(&[ Datum::String(&id.to_string()), Datum::Int32(oid as i32), Datum::String(name), Datum::String(&index.on.to_string()), Datum::String(self.is_volatile(id).as_str()), ]), diff, }); for (i, key) in index.keys.iter().enumerate() { let nullable = key .typ(self.get_by_id(&index.on).desc().unwrap().typ()) .nullable; let seq_in_index = i64::try_from(i + 1).expect("invalid index sequence number"); let key_sql = key_sqls .get(i) .expect("missing sql information for index key") .to_string(); let (field_number, expression) = match key { MirScalarExpr::Column(col) => ( Datum::Int64(i64::try_from(*col + 1).expect("invalid index column number")), Datum::Null, ), _ => (Datum::Null, Datum::String(&key_sql)), }; updates.push(BuiltinTableUpdate { id: MZ_INDEX_COLUMNS.id, row: Row::pack_slice(&[ Datum::String(&id.to_string()), Datum::Int64(seq_in_index), field_number, expression, Datum::from(nullable), ]), diff, }); } updates } fn pack_type_update( &self, id: GlobalId, oid: u32, schema_id: i64, name: &str, typ: &Type, diff: isize, ) -> Vec<BuiltinTableUpdate> { let generic_update = BuiltinTableUpdate { id: MZ_TYPES.id, row: Row::pack_slice(&[ Datum::String(&id.to_string()), Datum::Int32(oid as i32), Datum::Int64(schema_id), Datum::String(name), ]), diff, }; let (index_id, update) = match typ.inner { TypeInner::Array { element_id } => ( MZ_ARRAY_TYPES.id, vec![id.to_string(), element_id.to_string()], ), TypeInner::Base => (MZ_BASE_TYPES.id, vec![id.to_string()]), TypeInner::List { element_id } => ( MZ_LIST_TYPES.id, vec![id.to_string(), element_id.to_string()], ), TypeInner::Map { key_id, value_id } => ( MZ_MAP_TYPES.id, vec![id.to_string(), key_id.to_string(), value_id.to_string()], ), TypeInner::Pseudo => (MZ_PSEUDO_TYPES.id, vec![id.to_string()]), }; let specific_update = BuiltinTableUpdate { id: index_id, row: Row::pack_slice(&update.iter().map(|c| Datum::String(c)).collect::<Vec<_>>()[..]), diff, }; vec![generic_update, specific_update] } fn pack_func_update( &self, id: GlobalId, schema_id: i64, name: &str, func: &Func, diff: isize, ) -> Vec<BuiltinTableUpdate> { let mut updates = vec![]; for func_impl_details in func.inner.func_impls() { let arg_ids = func_impl_details .arg_oids .iter() .map(|oid| self.get_by_oid(oid).id().to_string()) .collect::<Vec<_>>(); let mut row = Row::default(); row.push_array( &[ArrayDimension { lower_bound: 1, length: arg_ids.len(), }], arg_ids.iter().map(|id| Datum::String(&id)), ) .unwrap(); let arg_ids = row.unpack_first(); let variadic_id = match func_impl_details.variadic_oid { Some(oid) => Some(self.get_by_oid(&oid).id().to_string()), None => None, }; updates.push(BuiltinTableUpdate { id: MZ_FUNCTIONS.id, row: Row::pack_slice(&[ Datum::String(&id.to_string()), Datum::Int32(func_impl_details.oid as i32), Datum::Int64(schema_id), Datum::String(name), arg_ids, Datum::from(variadic_id.as_deref()), ]), diff, }); } updates } }
33.671605
99
0.485077
16e074e9d08c63b9f1ee05f0bece0cb1f6e491b3
81
#[macro_use] extern crate quick_error; pub mod node; pub mod path; pub mod net;
11.571429
25
0.740741
c149f373edef20e40058ccd915449d9decaf532f
16,394
//! QUIC connection transport parameters //! //! The `TransportParameters` type is used to represent the transport parameters //! negotiated by peers while establishing a QUIC connection. This process //! happens as part of the establishment of the TLS session. As such, the types //! contained in this modules should generally only be referred to by custom //! implementations of the `crypto::Session` trait. use std::{ convert::TryInto, net::{Ipv4Addr, Ipv6Addr, SocketAddrV4, SocketAddrV6}, }; use bytes::{buf::ext::BufExt as _, Buf, BufMut}; use err_derive::Error; use crate::{ coding::{BufExt, BufMutExt, UnexpectedEnd}, crypto, shared::{ConnectionId, ResetToken, ServerConfig}, Side, TransportConfig, TransportError, VarInt, MAX_CID_SIZE, REM_CID_COUNT, RESET_TOKEN_SIZE, }; // Apply a given macro to a list of all the transport parameters having integer types, along with // their codes and default values. Using this helps us avoid error-prone duplication of the // contained information across decoding, encoding, and the `Default` impl. Whenever we want to do // something with transport parameters, we'll handle the bulk of cases by writing a macro that // takes a list of arguments in this form, then passing it to this macro. macro_rules! apply_params { ($macro:ident) => { $macro! { // #[doc] name (id) = default, /// Milliseconds, disabled if zero idle_timeout(0x0001) = 0, /// Limits the size of packets that the endpoint is willing to receive max_packet_size(0x0003) = 65527, /// Initial value for the maximum amount of data that can be sent on the connection initial_max_data(0x0004) = 0, /// Initial flow control limit for locally-initiated bidirectional streams initial_max_stream_data_bidi_local(0x0005) = 0, /// Initial flow control limit for peer-initiated bidirectional streams initial_max_stream_data_bidi_remote(0x0006) = 0, /// Initial flow control limit for unidirectional streams initial_max_stream_data_uni(0x0007) = 0, /// Initial maximum number of bidirectional streams the peer may initiate initial_max_streams_bidi(0x0008) = 0, /// Initial maximum number of unidirectional streams the peer may initiate initial_max_streams_uni(0x0009) = 0, /// Exponent used to decode the ACK Delay field in the ACK frame ack_delay_exponent(0x000a) = 3, /// Maximum amount of time in milliseconds by which the endpoint will delay sending /// acknowledgments max_ack_delay(0x000b) = 25, /// Maximum number of connection IDs from the peer that an endpoint is willing to store active_connection_id_limit(0x000e) = 0, } }; } macro_rules! make_struct { {$($(#[$doc:meta])* $name:ident ($code:expr) = $default:expr,)*} => { /// Transport parameters used to negotiate connection-level preferences between peers #[derive(Debug, Copy, Clone, Eq, PartialEq)] pub struct TransportParameters { $($(#[$doc])* pub(crate) $name : u64,)* /// Does the endpoint support active connection migration pub(crate) disable_active_migration: bool, /// Maximum size for datagram frames pub(crate) max_datagram_frame_size: Option<VarInt>, // Server-only /// The DCID from the first Initial packet; must be included if sent in a Retry packet pub(crate) original_connection_id: Option<ConnectionId>, /// Token used by the client to verify a stateless reset from the server pub(crate) stateless_reset_token: Option<ResetToken>, /// The server's preferred address for communication after handshake completion pub(crate) preferred_address: Option<PreferredAddress>, } impl Default for TransportParameters { /// Standard defaults, used if the peer does not supply a given parameter. fn default() -> Self { Self { $($name: $default,)* disable_active_migration: false, max_datagram_frame_size: None, original_connection_id: None, stateless_reset_token: None, preferred_address: None, } } } } } apply_params!(make_struct); impl TransportParameters { pub(crate) fn new<S>(config: &TransportConfig, server_config: Option<&ServerConfig<S>>) -> Self where S: crypto::Session, { TransportParameters { initial_max_streams_bidi: config.stream_window_bidi, initial_max_streams_uni: config.stream_window_uni, initial_max_data: config.receive_window, initial_max_stream_data_bidi_local: config.stream_receive_window, initial_max_stream_data_bidi_remote: config.stream_receive_window, initial_max_stream_data_uni: config.stream_receive_window, idle_timeout: config.idle_timeout.map_or(0, |x| { x.as_millis() .try_into() .expect("setter guarantees this is in-bounds") }), max_ack_delay: 0, disable_active_migration: server_config.map_or(false, |c| !c.migration), active_connection_id_limit: REM_CID_COUNT, max_datagram_frame_size: config .datagram_receive_buffer_size .map(|x| (x.min(u16::max_value().into()) as u16).into()), ..Self::default() } } /// Check that these parameters are legal when resuming from /// certain cached parameters pub(crate) fn validate_0rtt(&self, cached: &TransportParameters) -> Result<(), TransportError> { if cached.initial_max_data < self.initial_max_data || cached.initial_max_stream_data_bidi_local < self.initial_max_stream_data_bidi_local || cached.initial_max_stream_data_bidi_remote < self.initial_max_stream_data_bidi_remote || cached.initial_max_stream_data_uni < self.initial_max_stream_data_uni || cached.initial_max_streams_bidi < self.initial_max_streams_bidi || cached.initial_max_streams_uni < self.initial_max_streams_uni || cached.max_datagram_frame_size < self.max_datagram_frame_size { return Err(TransportError::PROTOCOL_VIOLATION( "0-RTT accepted with incompatible transport parameters", )); } Ok(()) } } /// A server's preferred address /// /// This is communicated as a transport parameter during TLS session establishment. #[derive(Debug, Copy, Clone, Eq, PartialEq)] pub(crate) struct PreferredAddress { address_v4: Option<SocketAddrV4>, address_v6: Option<SocketAddrV6>, connection_id: ConnectionId, stateless_reset_token: [u8; RESET_TOKEN_SIZE], } impl PreferredAddress { fn wire_size(&self) -> u16 { 4 + 2 + 16 + 2 + 1 + self.connection_id.len() as u16 + 16 } fn write<W: BufMut>(&self, w: &mut W) { w.write(self.address_v4.map_or(Ipv4Addr::UNSPECIFIED, |x| *x.ip())); w.write::<u16>(self.address_v4.map_or(0, |x| x.port())); w.write(self.address_v6.map_or(Ipv6Addr::UNSPECIFIED, |x| *x.ip())); w.write::<u16>(self.address_v6.map_or(0, |x| x.port())); w.write::<u8>(self.connection_id.len() as u8); w.put_slice(&self.connection_id); w.put_slice(&self.stateless_reset_token); } fn read<R: Buf>(r: &mut R) -> Result<Self, Error> { let ip_v4 = r.get::<Ipv4Addr>()?; let port_v4 = r.get::<u16>()?; let ip_v6 = r.get::<Ipv6Addr>()?; let port_v6 = r.get::<u16>()?; let cid_len = r.get::<u8>()?; if r.remaining() < cid_len as usize || cid_len > MAX_CID_SIZE as u8 { return Err(Error::Malformed); } let mut stage = [0; MAX_CID_SIZE]; r.copy_to_slice(&mut stage[0..cid_len as usize]); let cid = ConnectionId::new(&stage[0..cid_len as usize]); if r.remaining() < 16 { return Err(Error::Malformed); } let mut token = [0; RESET_TOKEN_SIZE]; r.copy_to_slice(&mut token); let address_v4 = if ip_v4.is_unspecified() && port_v4 == 0 { None } else { Some(SocketAddrV4::new(ip_v4, port_v4)) }; let address_v6 = if ip_v6.is_unspecified() && port_v6 == 0 { None } else { Some(SocketAddrV6::new(ip_v6, port_v6, 0, 0)) }; if address_v4.is_none() && address_v6.is_none() { return Err(Error::IllegalValue); } Ok(Self { address_v4, address_v6, connection_id: cid, stateless_reset_token: token, }) } } /// Errors encountered while decoding `TransportParameters` #[derive(Debug, Copy, Clone, Eq, PartialEq, Error)] pub enum Error { /// Parameters that are semantically invalid #[error(display = "parameter had illegal value")] IllegalValue, /// Catch-all error for problems while decoding transport parameters #[error(display = "parameters were malformed")] Malformed, } impl From<Error> for TransportError { fn from(e: Error) -> Self { match e { Error::IllegalValue => TransportError::TRANSPORT_PARAMETER_ERROR("illegal value"), Error::Malformed => TransportError::TRANSPORT_PARAMETER_ERROR("malformed"), } } } impl From<UnexpectedEnd> for Error { fn from(_: UnexpectedEnd) -> Self { Error::Malformed } } impl TransportParameters { /// Encode `TransportParameters` into buffer pub fn write<W: BufMut>(&self, w: &mut W) { let mut buf = Vec::new(); macro_rules! write_params { {$($(#[$doc:meta])* $name:ident ($code:expr) = $default:expr,)*} => { $( if self.$name != $default { buf.write::<u16>($code); buf.write::<u16>(VarInt::from_u64(self.$name).expect("value too large").size() as u16); buf.write_var(self.$name); } )* } } apply_params!(write_params); // Add a reserved parameter to keep people on their toes buf.write::<u16>(31 * 5 + 27); buf.write::<u16>(0); if let Some(ref x) = self.original_connection_id { buf.write::<u16>(0x0000); buf.write::<u16>(x.len() as u16); buf.put_slice(x); } if let Some(ref x) = self.stateless_reset_token { buf.write::<u16>(0x0002); buf.write::<u16>(16); buf.put_slice(x); } if self.disable_active_migration { buf.write::<u16>(0x000c); buf.write::<u16>(0); } if let Some(x) = self.max_datagram_frame_size { buf.write::<u16>(0x0020); buf.write::<u16>(x.size() as u16); buf.write(x); } if let Some(ref x) = self.preferred_address { buf.write::<u16>(0x000d); buf.write::<u16>(x.wire_size()); x.write(&mut buf); } w.write::<u16>(buf.len() as u16); w.put_slice(&buf); } /// Decode `TransportParameters` from buffer pub fn read<R: Buf>(side: Side, r: &mut R) -> Result<Self, Error> { // Initialize to protocol-specified defaults let mut params = TransportParameters::default(); let params_len = r.get::<u16>()?; if params_len as usize != r.remaining() { return Err(Error::Malformed); } // State to check for duplicate transport parameters. macro_rules! param_state { {$($(#[$doc:meta])* $name:ident ($code:expr) = $default:expr,)*} => {{ struct ParamState { $($name: bool,)* } ParamState { $($name: false,)* } }} } let mut got = apply_params!(param_state); while r.has_remaining() { if r.remaining() < 4 { return Err(Error::Malformed); } let id = r.get::<u16>().unwrap(); let len = r.get::<u16>().unwrap(); if r.remaining() < len as usize { return Err(Error::Malformed); } match id { 0x0000 => { if len > MAX_CID_SIZE as u16 || params.original_connection_id.is_some() { return Err(Error::Malformed); } let mut staging = [0; MAX_CID_SIZE]; r.copy_to_slice(&mut staging[0..len as usize]); params.original_connection_id = Some(ConnectionId::new(&staging[0..len as usize])); } 0x0002 => { if len != 16 || params.stateless_reset_token.is_some() { return Err(Error::Malformed); } let mut tok = [0; RESET_TOKEN_SIZE]; r.copy_to_slice(&mut tok); params.stateless_reset_token = Some(tok.into()); } 0x000c => { if len != 0 || params.disable_active_migration { return Err(Error::Malformed); } params.disable_active_migration = true; } 0x000d => { if params.preferred_address.is_some() { return Err(Error::Malformed); } params.preferred_address = Some(PreferredAddress::read(&mut r.take(len as usize))?); } 0x0020 => { if len > 8 || params.max_datagram_frame_size.is_some() { return Err(Error::Malformed); } params.max_datagram_frame_size = Some(r.get().unwrap()); } _ => { macro_rules! parse { {$($(#[$doc:meta])* $name:ident ($code:expr) = $default:expr,)*} => { match id { $($code => { params.$name = r.get_var()?; if len != VarInt::from_u64(params.$name).unwrap().size() as u16 || got.$name { return Err(Error::Malformed); } got.$name = true; })* _ => r.advance(len as usize), } } } apply_params!(parse); } } } // Semantic validation if params.ack_delay_exponent > 20 || params.max_ack_delay >= 1 << 14 || (side.is_server() && (params.original_connection_id.is_some() || params.stateless_reset_token.is_some() || params.preferred_address.is_some())) { return Err(Error::IllegalValue); } Ok(params) } } #[cfg(test)] mod test { use super::*; #[test] fn coding() { let mut buf = Vec::new(); let params = TransportParameters { initial_max_streams_bidi: 16, initial_max_streams_uni: 16, ack_delay_exponent: 2, max_packet_size: 1200, preferred_address: Some(PreferredAddress { address_v4: Some(SocketAddrV4::new(Ipv4Addr::LOCALHOST, 42)), address_v6: None, connection_id: ConnectionId::new(&[]), stateless_reset_token: [0xab; RESET_TOKEN_SIZE], }), ..TransportParameters::default() }; params.write(&mut buf); assert_eq!( TransportParameters::read(Side::Client, &mut buf.as_slice()).unwrap(), params ); } }
38.393443
146
0.55441
e8c59d07b3ac3982b6185bf3e815e69837eb02a7
51,921
use super::grammars::{ExternalToken, LexicalGrammar, SyntaxGrammar, VariableType}; use super::nfa::CharacterSet; use super::rules::{Alias, AliasMap, Symbol, SymbolType}; use super::tables::{ AdvanceAction, FieldLocation, GotoAction, LexState, LexTable, ParseAction, ParseTable, ParseTableEntry, }; use core::ops::Range; use std::cmp; use std::collections::{BTreeMap, HashMap, HashSet}; use std::fmt::Write; use std::mem::swap; // Currently, the library supports a new ABI version that has not yet been // stabilized, and the parser generation does not use it by default. const STABLE_LANGUAGE_VERSION: usize = tree_sitter::LANGUAGE_VERSION - 1; macro_rules! add { ($this: tt, $($arg: tt)*) => {{ $this.buffer.write_fmt(format_args!($($arg)*)).unwrap(); }} } macro_rules! add_whitespace { ($this: tt) => {{ for _ in 0..$this.indent_level { write!(&mut $this.buffer, " ").unwrap(); } }}; } macro_rules! add_line { ($this: tt, $($arg: tt)*) => { add_whitespace!($this); $this.buffer.write_fmt(format_args!($($arg)*)).unwrap(); $this.buffer += "\n"; } } macro_rules! indent { ($this: tt) => { $this.indent_level += 1; }; } macro_rules! dedent { ($this: tt) => { assert_ne!($this.indent_level, 0); $this.indent_level -= 1; }; } const SMALL_STATE_THRESHOLD: usize = 64; struct Generator { buffer: String, indent_level: usize, language_name: String, parse_table: ParseTable, main_lex_table: LexTable, keyword_lex_table: LexTable, large_state_count: usize, keyword_capture_token: Option<Symbol>, syntax_grammar: SyntaxGrammar, lexical_grammar: LexicalGrammar, simple_aliases: AliasMap, symbol_order: HashMap<Symbol, usize>, symbol_ids: HashMap<Symbol, String>, alias_ids: HashMap<Alias, String>, alias_map: BTreeMap<Alias, Option<Symbol>>, field_names: Vec<String>, next_abi: bool, } impl Generator { fn generate(mut self) -> String { self.init(); self.add_includes(); self.add_pragmas(); self.add_stats(); self.add_symbol_enum(); self.add_symbol_names_list(); if self.next_abi { self.add_unique_symbol_map(); } self.add_symbol_metadata_list(); if !self.field_names.is_empty() { self.add_field_name_enum(); self.add_field_name_names_list(); self.add_field_sequences(); } if !self.parse_table.production_infos.is_empty() { self.add_alias_sequences(); } let mut main_lex_table = LexTable::default(); swap(&mut main_lex_table, &mut self.main_lex_table); self.add_lex_function("ts_lex", main_lex_table); if self.keyword_capture_token.is_some() { let mut keyword_lex_table = LexTable::default(); swap(&mut keyword_lex_table, &mut self.keyword_lex_table); self.add_lex_function("ts_lex_keywords", keyword_lex_table); } self.add_lex_modes_list(); if !self.syntax_grammar.external_tokens.is_empty() { self.add_external_token_enum(); self.add_external_scanner_symbol_map(); self.add_external_scanner_states_list(); } self.add_parse_table(); self.add_parser_export(); self.buffer } fn init(&mut self) { let mut symbol_identifiers = HashSet::new(); for i in 0..self.parse_table.symbols.len() { self.assign_symbol_id(self.parse_table.symbols[i], &mut symbol_identifiers); } let mut field_names = Vec::new(); for production_info in &self.parse_table.production_infos { for field_name in production_info.field_map.keys() { field_names.push(field_name); } for alias in &production_info.alias_sequence { if let Some(alias) = &alias { let alias_kind = alias.kind(); let matching_symbol = self.parse_table.symbols.iter().cloned().find(|symbol| { let (name, kind) = self.metadata_for_symbol(*symbol); name == alias.value && kind == alias_kind }); let alias_id = if let Some(symbol) = matching_symbol { self.symbol_ids[&symbol].clone() } else if alias.is_named { format!("alias_sym_{}", self.sanitize_identifier(&alias.value)) } else { format!("anon_alias_sym_{}", self.sanitize_identifier(&alias.value)) }; self.alias_ids.entry(alias.clone()).or_insert(alias_id); self.alias_map .entry(alias.clone()) .or_insert(matching_symbol); } } } field_names.sort_unstable(); field_names.dedup(); self.field_names = field_names.into_iter().cloned().collect(); // If we are opting in to the new unstable language ABI, then use the concept of // "small parse states". Otherwise, use the same representation for all parse // states. if self.next_abi { let threshold = cmp::min(SMALL_STATE_THRESHOLD, self.parse_table.symbols.len() / 2); self.large_state_count = self .parse_table .states .iter() .enumerate() .take_while(|(i, s)| { *i <= 1 || s.terminal_entries.len() + s.nonterminal_entries.len() > threshold }) .count(); } else { self.large_state_count = self.parse_table.states.len(); } } fn add_includes(&mut self) { add_line!(self, "#include <tree_sitter/parser.h>"); add_line!(self, ""); } fn add_pragmas(&mut self) { add_line!(self, "#if defined(__GNUC__) || defined(__clang__)"); add_line!(self, "#pragma GCC diagnostic push"); add_line!( self, "#pragma GCC diagnostic ignored \"-Wmissing-field-initializers\"" ); add_line!(self, "#endif"); add_line!(self, ""); // Compiling large lexer functions can be very slow. Disabling optimizations // is not ideal, but only a very small fraction of overall parse time is // spent lexing, so the performance impact of this is negligible. if self.main_lex_table.states.len() > 300 { add_line!(self, "#ifdef _MSC_VER"); add_line!(self, "#pragma optimize(\"\", off)"); add_line!(self, "#elif defined(__clang__)"); add_line!(self, "#pragma clang optimize off"); add_line!(self, "#elif defined(__GNUC__)"); add_line!(self, "#pragma GCC optimize (\"O0\")"); add_line!(self, "#endif"); add_line!(self, ""); } } fn add_stats(&mut self) { let token_count = self .parse_table .symbols .iter() .filter(|symbol| { if symbol.is_terminal() || symbol.is_eof() { true } else if symbol.is_external() { self.syntax_grammar.external_tokens[symbol.index] .corresponding_internal_token .is_none() } else { false } }) .count(); if self.next_abi { add_line!( self, "#define LANGUAGE_VERSION {}", tree_sitter::LANGUAGE_VERSION ); } else { add_line!(self, "#define LANGUAGE_VERSION {}", STABLE_LANGUAGE_VERSION); } add_line!( self, "#define STATE_COUNT {}", self.parse_table.states.len() ); if self.next_abi { add_line!(self, "#define LARGE_STATE_COUNT {}", self.large_state_count); } add_line!( self, "#define SYMBOL_COUNT {}", self.parse_table.symbols.len() ); add_line!( self, "#define ALIAS_COUNT {}", self.alias_map.iter().filter(|e| e.1.is_none()).count() ); add_line!(self, "#define TOKEN_COUNT {}", token_count); add_line!( self, "#define EXTERNAL_TOKEN_COUNT {}", self.syntax_grammar.external_tokens.len() ); add_line!(self, "#define FIELD_COUNT {}", self.field_names.len()); add_line!( self, "#define MAX_ALIAS_SEQUENCE_LENGTH {}", self.parse_table.max_aliased_production_length ); add_line!(self, ""); } fn add_symbol_enum(&mut self) { add_line!(self, "enum {{"); indent!(self); self.symbol_order.insert(Symbol::end(), 0); let mut i = 1; for symbol in self.parse_table.symbols.iter() { if *symbol != Symbol::end() { self.symbol_order.insert(*symbol, i); add_line!(self, "{} = {},", self.symbol_ids[&symbol], i); i += 1; } } for (alias, symbol) in &self.alias_map { if symbol.is_none() { add_line!(self, "{} = {},", self.alias_ids[&alias], i); i += 1; } } dedent!(self); add_line!(self, "}};"); add_line!(self, ""); } fn add_symbol_names_list(&mut self) { add_line!(self, "static const char *ts_symbol_names[] = {{"); indent!(self); for symbol in self.parse_table.symbols.iter() { let name = self.sanitize_string( self.simple_aliases .get(symbol) .map(|alias| alias.value.as_str()) .unwrap_or(self.metadata_for_symbol(*symbol).0), ); add_line!(self, "[{}] = \"{}\",", self.symbol_ids[&symbol], name); } for (alias, symbol) in &self.alias_map { if symbol.is_none() { add_line!( self, "[{}] = \"{}\",", self.alias_ids[&alias], self.sanitize_string(&alias.value) ); } } dedent!(self); add_line!(self, "}};"); add_line!(self, ""); } fn add_unique_symbol_map(&mut self) { add_line!(self, "static TSSymbol ts_symbol_map[] = {{"); indent!(self); for symbol in &self.parse_table.symbols { let mut mapping = symbol; // There can be multiple symbols in the grammar that have the same name and kind, // due to simple aliases. When that happens, ensure that they map to the same // public-facing symbol. If one of the symbols is not aliased, choose that one // to be the public-facing symbol. Otherwise, pick the symbol with the lowest // numeric value. if let Some(alias) = self.simple_aliases.get(symbol) { let kind = alias.kind(); for other_symbol in &self.parse_table.symbols { if let Some(other_alias) = self.simple_aliases.get(other_symbol) { if other_symbol < mapping && other_alias == alias { mapping = other_symbol; } } else if self.metadata_for_symbol(*other_symbol) == (&alias.value, kind) { mapping = other_symbol; break; } } } // Two anonymous tokens with different flags but the same string value // should be represented with the same symbol in the public API. Examples: // * "<" and token(prec(1, "<")) // * "(" and token.immediate("(") else if symbol.is_terminal() { let metadata = self.metadata_for_symbol(*symbol); for other_symbol in &self.parse_table.symbols { let other_metadata = self.metadata_for_symbol(*other_symbol); if other_metadata == metadata { mapping = other_symbol; break; } } } add_line!( self, "[{}] = {},", self.symbol_ids[&symbol], self.symbol_ids[mapping], ); } for (alias, symbol) in &self.alias_map { if symbol.is_none() { add_line!( self, "[{}] = {},", self.alias_ids[&alias], self.alias_ids[&alias], ); } } dedent!(self); add_line!(self, "}};"); add_line!(self, ""); } fn add_field_name_enum(&mut self) { add_line!(self, "enum {{"); indent!(self); for (i, field_name) in self.field_names.iter().enumerate() { add_line!(self, "{} = {},", self.field_id(field_name), i + 1); } dedent!(self); add_line!(self, "}};"); add_line!(self, ""); } fn add_field_name_names_list(&mut self) { add_line!(self, "static const char *ts_field_names[] = {{"); indent!(self); add_line!(self, "[0] = NULL,"); for field_name in &self.field_names { add_line!( self, "[{}] = \"{}\",", self.field_id(field_name), field_name ); } dedent!(self); add_line!(self, "}};"); add_line!(self, ""); } fn add_symbol_metadata_list(&mut self) { add_line!( self, "static const TSSymbolMetadata ts_symbol_metadata[] = {{" ); indent!(self); for symbol in &self.parse_table.symbols { add_line!(self, "[{}] = {{", self.symbol_ids[&symbol]); indent!(self); if let Some(Alias { is_named, .. }) = self.simple_aliases.get(symbol) { add_line!(self, ".visible = true,"); add_line!(self, ".named = {},", is_named); } else { match self.metadata_for_symbol(*symbol).1 { VariableType::Named => { add_line!(self, ".visible = true,"); add_line!(self, ".named = true,"); } VariableType::Anonymous => { add_line!(self, ".visible = true,"); add_line!(self, ".named = false,"); } VariableType::Hidden => { add_line!(self, ".visible = false,"); add_line!(self, ".named = true,"); } VariableType::Auxiliary => { add_line!(self, ".visible = false,"); add_line!(self, ".named = false,"); } } } dedent!(self); add_line!(self, "}},"); } for (alias, matching_symbol) in &self.alias_map { if matching_symbol.is_none() { add_line!(self, "[{}] = {{", self.alias_ids[&alias]); indent!(self); add_line!(self, ".visible = true,"); add_line!(self, ".named = {},", alias.is_named); dedent!(self); add_line!(self, "}},"); } } dedent!(self); add_line!(self, "}};"); add_line!(self, ""); } fn add_alias_sequences(&mut self) { add_line!( self, "static TSSymbol ts_alias_sequences[{}][MAX_ALIAS_SEQUENCE_LENGTH] = {{", self.parse_table.production_infos.len() ); indent!(self); for (i, production_info) in self.parse_table.production_infos.iter().enumerate() { if production_info.alias_sequence.is_empty() { // Work around MSVC's intolerance of empty array initializers by // explicitly zero-initializing the first element. if i == 0 { add_line!(self, "[0] = {{0}},"); } continue; } add_line!(self, "[{}] = {{", i); indent!(self); for (j, alias) in production_info.alias_sequence.iter().enumerate() { if let Some(alias) = alias { add_line!(self, "[{}] = {},", j, self.alias_ids[&alias]); } } dedent!(self); add_line!(self, "}},"); } dedent!(self); add_line!(self, "}};"); add_line!(self, ""); } fn add_field_sequences(&mut self) { let mut flat_field_maps = vec![]; let mut next_flat_field_map_index = 0; self.get_field_map_id( &Vec::new(), &mut flat_field_maps, &mut next_flat_field_map_index, ); let mut field_map_ids = Vec::new(); for production_info in &self.parse_table.production_infos { if !production_info.field_map.is_empty() { let mut flat_field_map = Vec::new(); for (field_name, locations) in &production_info.field_map { for location in locations { flat_field_map.push((field_name.clone(), *location)); } } field_map_ids.push(( self.get_field_map_id( &flat_field_map, &mut flat_field_maps, &mut next_flat_field_map_index, ), flat_field_map.len(), )); } else { field_map_ids.push((0, 0)); } } add_line!( self, "static const TSFieldMapSlice ts_field_map_slices[{}] = {{", self.parse_table.production_infos.len(), ); indent!(self); for (production_id, (row_id, length)) in field_map_ids.into_iter().enumerate() { if length > 0 { add_line!( self, "[{}] = {{.index = {}, .length = {}}},", production_id, row_id, length ); } } dedent!(self); add_line!(self, "}};"); add_line!(self, ""); add_line!( self, "static const TSFieldMapEntry ts_field_map_entries[] = {{", ); indent!(self); for (row_index, field_pairs) in flat_field_maps.into_iter().skip(1) { add_line!(self, "[{}] =", row_index); indent!(self); for (field_name, location) in field_pairs { add_whitespace!(self); add!(self, "{{{}, {}", self.field_id(&field_name), location.index); if location.inherited { add!(self, ", .inherited = true"); } add!(self, "}},\n"); } dedent!(self); } dedent!(self); add_line!(self, "}};"); add_line!(self, ""); } fn add_lex_function(&mut self, name: &str, lex_table: LexTable) { add_line!( self, "static bool {}(TSLexer *lexer, TSStateId state) {{", name ); indent!(self); add_line!(self, "START_LEXER();"); if self.next_abi { add_line!(self, "eof = lexer->eof(lexer);"); } else { add_line!(self, "eof = lookahead == 0;"); } add_line!(self, "switch (state) {{"); indent!(self); for (i, state) in lex_table.states.into_iter().enumerate() { add_line!(self, "case {}:", i); indent!(self); self.add_lex_state(state); dedent!(self); } add_line!(self, "default:"); indent!(self); add_line!(self, "return false;"); dedent!(self); dedent!(self); add_line!(self, "}}"); dedent!(self); add_line!(self, "}}"); add_line!(self, ""); } fn add_lex_state(&mut self, state: LexState) { if let Some(accept_action) = state.accept_action { add_line!(self, "ACCEPT_TOKEN({});", self.symbol_ids[&accept_action]); } if let Some(eof_action) = state.eof_action { add_line!(self, "if (eof) ADVANCE({});", eof_action.state); } let mut ruled_out_characters = HashSet::new(); for (characters, action) in state.advance_actions { let previous_length = self.buffer.len(); add_whitespace!(self); add!(self, "if ("); if self.add_character_set_condition(&characters, &ruled_out_characters) { add!(self, ") "); self.add_advance_action(&action); if let CharacterSet::Include(chars) = characters { ruled_out_characters.extend(chars.iter().map(|c| *c as u32)); } } else { self.buffer.truncate(previous_length); self.add_advance_action(&action); } add!(self, "\n"); } add_line!(self, "END_STATE();"); } fn add_character_set_condition( &mut self, characters: &CharacterSet, ruled_out_characters: &HashSet<u32>, ) -> bool { match characters { CharacterSet::Include(chars) => { let ranges = Self::get_ranges(chars, ruled_out_characters); self.add_character_range_conditions(ranges, false) } CharacterSet::Exclude(chars) => { let ranges = Some('\0'..'\0') .into_iter() .chain(Self::get_ranges(chars, ruled_out_characters)); self.add_character_range_conditions(ranges, true) } } } fn add_character_range_conditions( &mut self, ranges: impl Iterator<Item = Range<char>>, is_negated: bool, ) -> bool { let line_break = "\n "; let mut did_add = false; for range in ranges { if is_negated { if did_add { add!(self, " &&{}", line_break); } if range.end == range.start { add!(self, "lookahead != "); self.add_character(range.start); } else if range.end as u32 == range.start as u32 + 1 { add!(self, "lookahead != "); self.add_character(range.start); add!(self, " &&{}lookahead != ", line_break); self.add_character(range.end); } else { add!(self, "(lookahead < "); self.add_character(range.start); add!(self, " || "); self.add_character(range.end); add!(self, " < lookahead)"); } } else { if did_add { add!(self, " ||{}", line_break); } if range.end == range.start { add!(self, "lookahead == "); self.add_character(range.start); } else if range.end as u32 == range.start as u32 + 1 { add!(self, "lookahead == "); self.add_character(range.start); add!(self, " ||{}lookahead == ", line_break); self.add_character(range.end); } else { add!(self, "("); self.add_character(range.start); add!(self, " <= lookahead && lookahead <= "); self.add_character(range.end); add!(self, ")"); } } did_add = true; } did_add } fn get_ranges<'a>( chars: &'a Vec<char>, ruled_out_characters: &'a HashSet<u32>, ) -> impl Iterator<Item = Range<char>> + 'a { let mut prev_range: Option<Range<char>> = None; chars .iter() .map(|c| (*c, false)) .chain(Some(('\0', true))) .filter_map(move |(c, done)| { if done { return prev_range.clone(); } if ruled_out_characters.contains(&(c as u32)) { return None; } if let Some(range) = prev_range.clone() { let mut prev_range_successor = range.end as u32 + 1; while prev_range_successor < c as u32 { if !ruled_out_characters.contains(&prev_range_successor) { prev_range = Some(c..c); return Some(range); } prev_range_successor += 1; } prev_range = Some(range.start..c); None } else { prev_range = Some(c..c); None } }) } fn add_advance_action(&mut self, action: &AdvanceAction) { if action.in_main_token { add!(self, "ADVANCE({});", action.state); } else { add!(self, "SKIP({})", action.state); } } fn add_lex_modes_list(&mut self) { add_line!(self, "static TSLexMode ts_lex_modes[STATE_COUNT] = {{"); indent!(self); for (i, state) in self.parse_table.states.iter().enumerate() { if state.is_non_terminal_extra && state.terminal_entries.len() == 1 && *state.terminal_entries.iter().next().unwrap().0 == Symbol::end() { add_line!(self, "[{}] = {{-1}},", i,); } else if state.external_lex_state_id > 0 { add_line!( self, "[{}] = {{.lex_state = {}, .external_lex_state = {}}},", i, state.lex_state_id, state.external_lex_state_id ); } else { add_line!(self, "[{}] = {{.lex_state = {}}},", i, state.lex_state_id); } } dedent!(self); add_line!(self, "}};"); add_line!(self, ""); } fn add_external_token_enum(&mut self) { add_line!(self, "enum {{"); indent!(self); for i in 0..self.syntax_grammar.external_tokens.len() { add_line!( self, "{} = {},", self.external_token_id(&self.syntax_grammar.external_tokens[i]), i ); } dedent!(self); add_line!(self, "}};"); add_line!(self, ""); } fn add_external_scanner_symbol_map(&mut self) { add_line!( self, "static TSSymbol ts_external_scanner_symbol_map[EXTERNAL_TOKEN_COUNT] = {{" ); indent!(self); for i in 0..self.syntax_grammar.external_tokens.len() { let token = &self.syntax_grammar.external_tokens[i]; let id_token = token .corresponding_internal_token .unwrap_or(Symbol::external(i)); add_line!( self, "[{}] = {},", self.external_token_id(&token), self.symbol_ids[&id_token], ); } dedent!(self); add_line!(self, "}};"); add_line!(self, ""); } fn add_external_scanner_states_list(&mut self) { add_line!( self, "static bool ts_external_scanner_states[{}][EXTERNAL_TOKEN_COUNT] = {{", self.parse_table.external_lex_states.len(), ); indent!(self); for i in 0..self.parse_table.external_lex_states.len() { if !self.parse_table.external_lex_states[i].is_empty() { add_line!(self, "[{}] = {{", i); indent!(self); for token in self.parse_table.external_lex_states[i].iter() { add_line!( self, "[{}] = true,", self.external_token_id(&self.syntax_grammar.external_tokens[token.index]) ); } dedent!(self); add_line!(self, "}},"); } } dedent!(self); add_line!(self, "}};"); add_line!(self, ""); } fn add_parse_table(&mut self) { let mut parse_table_entries = Vec::new(); let mut next_parse_action_list_index = 0; self.get_parse_action_list_id( &ParseTableEntry { actions: Vec::new(), reusable: false, }, &mut parse_table_entries, &mut next_parse_action_list_index, ); add_line!( self, "static uint16_t ts_parse_table[{}][SYMBOL_COUNT] = {{", if self.next_abi { "LARGE_STATE_COUNT" } else { "STATE_COUNT" } ); indent!(self); let mut terminal_entries = Vec::new(); let mut nonterminal_entries = Vec::new(); for (i, state) in self .parse_table .states .iter() .enumerate() .take(self.large_state_count) { add_line!(self, "[{}] = {{", i); indent!(self); // Ensure the entries are in a deterministic order, since they are // internally represented as a hash map. terminal_entries.clear(); nonterminal_entries.clear(); terminal_entries.extend(state.terminal_entries.iter()); nonterminal_entries.extend(state.nonterminal_entries.iter()); terminal_entries.sort_unstable_by_key(|e| self.symbol_order.get(e.0)); nonterminal_entries.sort_unstable_by_key(|k| k.0); for (symbol, action) in &nonterminal_entries { add_line!( self, "[{}] = STATE({}),", self.symbol_ids[symbol], match action { GotoAction::Goto(state) => *state, GotoAction::ShiftExtra => i, } ); } for (symbol, entry) in &terminal_entries { let entry_id = self.get_parse_action_list_id( entry, &mut parse_table_entries, &mut next_parse_action_list_index, ); add_line!( self, "[{}] = ACTIONS({}),", self.symbol_ids[symbol], entry_id ); } dedent!(self); add_line!(self, "}},"); } dedent!(self); add_line!(self, "}};"); add_line!(self, ""); if self.large_state_count < self.parse_table.states.len() { add_line!(self, "static uint16_t ts_small_parse_table[] = {{"); indent!(self); let mut index = 0; let mut small_state_indices = Vec::new(); let mut symbols_by_value: HashMap<(usize, SymbolType), Vec<Symbol>> = HashMap::new(); for state in self.parse_table.states.iter().skip(self.large_state_count) { small_state_indices.push(index); symbols_by_value.clear(); terminal_entries.clear(); terminal_entries.extend(state.terminal_entries.iter()); terminal_entries.sort_unstable_by_key(|e| self.symbol_order.get(e.0)); // In a given parse state, many lookahead symbols have the same actions. // So in the "small state" representation, group symbols by their action // in order to avoid repeating the action. for (symbol, entry) in &terminal_entries { let entry_id = self.get_parse_action_list_id( entry, &mut parse_table_entries, &mut next_parse_action_list_index, ); symbols_by_value .entry((entry_id, SymbolType::Terminal)) .or_default() .push(**symbol); } for (symbol, action) in &state.nonterminal_entries { let state_id = match action { GotoAction::Goto(i) => *i, GotoAction::ShiftExtra => { self.large_state_count + small_state_indices.len() - 1 } }; symbols_by_value .entry((state_id, SymbolType::NonTerminal)) .or_default() .push(*symbol); } let mut values_with_symbols = symbols_by_value.drain().collect::<Vec<_>>(); values_with_symbols.sort_unstable_by_key(|((value, kind), symbols)| { (symbols.len(), *kind, *value, symbols[0]) }); add_line!(self, "[{}] = {},", index, values_with_symbols.len()); indent!(self); for ((value, kind), symbols) in values_with_symbols.iter_mut() { if *kind == SymbolType::NonTerminal { add_line!(self, "STATE({}), {},", value, symbols.len()); } else { add_line!(self, "ACTIONS({}), {},", value, symbols.len()); } symbols.sort_unstable(); indent!(self); for symbol in symbols { add_line!(self, "{},", self.symbol_ids[symbol]); } dedent!(self); } dedent!(self); index += 1 + values_with_symbols .iter() .map(|(_, symbols)| 2 + symbols.len()) .sum::<usize>(); } dedent!(self); add_line!(self, "}};"); add_line!(self, ""); add_line!(self, "static uint32_t ts_small_parse_table_map[] = {{"); indent!(self); for i in self.large_state_count..self.parse_table.states.len() { add_line!( self, "[SMALL_STATE({})] = {},", i, small_state_indices[i - self.large_state_count] ); } dedent!(self); add_line!(self, "}};"); add_line!(self, ""); } self.add_parse_action_list(parse_table_entries); } fn add_parse_action_list(&mut self, parse_table_entries: Vec<(usize, ParseTableEntry)>) { add_line!(self, "static TSParseActionEntry ts_parse_actions[] = {{"); indent!(self); for (i, entry) in parse_table_entries { add!( self, " [{}] = {{.count = {}, .reusable = {}}},", i, entry.actions.len(), entry.reusable ); for action in entry.actions { add!(self, " "); match action { ParseAction::Accept => add!(self, " ACCEPT_INPUT()"), ParseAction::Recover => add!(self, "RECOVER()"), ParseAction::ShiftExtra => add!(self, "SHIFT_EXTRA()"), ParseAction::Shift { state, is_repetition, } => { if is_repetition { add!(self, "SHIFT_REPEAT({})", state); } else { add!(self, "SHIFT({})", state); } } ParseAction::Reduce { symbol, child_count, dynamic_precedence, production_id, .. } => { add!(self, "REDUCE({}, {}", self.symbol_ids[&symbol], child_count); if dynamic_precedence != 0 { add!(self, ", .dynamic_precedence = {}", dynamic_precedence); } if production_id != 0 { add!(self, ", .production_id = {}", production_id); } add!(self, ")"); } } add!(self, ",") } add!(self, "\n"); } dedent!(self); add_line!(self, "}};"); add_line!(self, ""); } fn add_parser_export(&mut self) { let language_function_name = format!("tree_sitter_{}", self.language_name); let external_scanner_name = format!("{}_external_scanner", language_function_name); add_line!(self, "#ifdef __cplusplus"); add_line!(self, r#"extern "C" {{"#); add_line!(self, "#endif"); if !self.syntax_grammar.external_tokens.is_empty() { add_line!(self, "void *{}_create(void);", external_scanner_name); add_line!(self, "void {}_destroy(void *);", external_scanner_name); add_line!( self, "bool {}_scan(void *, TSLexer *, const bool *);", external_scanner_name ); add_line!( self, "unsigned {}_serialize(void *, char *);", external_scanner_name ); add_line!( self, "void {}_deserialize(void *, const char *, unsigned);", external_scanner_name ); add_line!(self, ""); } add_line!(self, "#ifdef _WIN32"); add_line!(self, "#define extern __declspec(dllexport)"); add_line!(self, "#endif"); add_line!(self, ""); add_line!( self, "extern const TSLanguage *{}(void) {{", language_function_name ); indent!(self); add_line!(self, "static TSLanguage language = {{"); indent!(self); add_line!(self, ".version = LANGUAGE_VERSION,"); add_line!(self, ".symbol_count = SYMBOL_COUNT,"); add_line!(self, ".alias_count = ALIAS_COUNT,"); add_line!(self, ".token_count = TOKEN_COUNT,"); if self.next_abi { add_line!(self, ".large_state_count = LARGE_STATE_COUNT,"); } add_line!(self, ".symbol_metadata = ts_symbol_metadata,"); add_line!( self, ".parse_table = (const unsigned short *)ts_parse_table," ); if self.large_state_count < self.parse_table.states.len() { add_line!( self, ".small_parse_table = (const uint16_t *)ts_small_parse_table," ); add_line!( self, ".small_parse_table_map = (const uint32_t *)ts_small_parse_table_map," ); } add_line!(self, ".parse_actions = ts_parse_actions,"); add_line!(self, ".lex_modes = ts_lex_modes,"); add_line!(self, ".symbol_names = ts_symbol_names,"); if self.next_abi { add_line!(self, ".public_symbol_map = ts_symbol_map,"); } if !self.parse_table.production_infos.is_empty() { add_line!( self, ".alias_sequences = (const TSSymbol *)ts_alias_sequences," ); } add_line!(self, ".field_count = FIELD_COUNT,"); if !self.field_names.is_empty() { add_line!(self, ".field_names = ts_field_names,"); add_line!( self, ".field_map_slices = (const TSFieldMapSlice *)ts_field_map_slices," ); add_line!( self, ".field_map_entries = (const TSFieldMapEntry *)ts_field_map_entries," ); } add_line!( self, ".max_alias_sequence_length = MAX_ALIAS_SEQUENCE_LENGTH," ); add_line!(self, ".lex_fn = ts_lex,"); if let Some(keyword_capture_token) = self.keyword_capture_token { add_line!(self, ".keyword_lex_fn = ts_lex_keywords,"); add_line!( self, ".keyword_capture_token = {},", self.symbol_ids[&keyword_capture_token] ); } add_line!(self, ".external_token_count = EXTERNAL_TOKEN_COUNT,"); if !self.syntax_grammar.external_tokens.is_empty() { add_line!(self, ".external_scanner = {{"); indent!(self); add_line!(self, "(const bool *)ts_external_scanner_states,"); add_line!(self, "ts_external_scanner_symbol_map,"); add_line!(self, "{}_create,", external_scanner_name); add_line!(self, "{}_destroy,", external_scanner_name); add_line!(self, "{}_scan,", external_scanner_name); add_line!(self, "{}_serialize,", external_scanner_name); add_line!(self, "{}_deserialize,", external_scanner_name); dedent!(self); add_line!(self, "}},"); } dedent!(self); add_line!(self, "}};"); add_line!(self, "return &language;"); dedent!(self); add_line!(self, "}}"); add_line!(self, "#ifdef __cplusplus"); add_line!(self, "}}"); add_line!(self, "#endif"); } fn get_parse_action_list_id( &self, entry: &ParseTableEntry, parse_table_entries: &mut Vec<(usize, ParseTableEntry)>, next_parse_action_list_index: &mut usize, ) -> usize { if let Some((index, _)) = parse_table_entries.iter().find(|(_, e)| *e == *entry) { return *index; } let result = *next_parse_action_list_index; parse_table_entries.push((result, entry.clone())); *next_parse_action_list_index += 1 + entry.actions.len(); result } fn get_field_map_id( &self, flat_field_map: &Vec<(String, FieldLocation)>, flat_field_maps: &mut Vec<(usize, Vec<(String, FieldLocation)>)>, next_flat_field_map_index: &mut usize, ) -> usize { if let Some((index, _)) = flat_field_maps.iter().find(|(_, e)| *e == *flat_field_map) { return *index; } let result = *next_flat_field_map_index; flat_field_maps.push((result, flat_field_map.clone())); *next_flat_field_map_index += flat_field_map.len(); result } fn external_token_id(&self, token: &ExternalToken) -> String { format!( "ts_external_token_{}", self.sanitize_identifier(&token.name) ) } fn assign_symbol_id(&mut self, symbol: Symbol, used_identifiers: &mut HashSet<String>) { let mut id; if symbol == Symbol::end() { id = "ts_builtin_sym_end".to_string(); } else { let (name, kind) = self.metadata_for_symbol(symbol); id = match kind { VariableType::Auxiliary => format!("aux_sym_{}", self.sanitize_identifier(name)), VariableType::Anonymous => format!("anon_sym_{}", self.sanitize_identifier(name)), VariableType::Hidden | VariableType::Named => { format!("sym_{}", self.sanitize_identifier(name)) } }; let mut suffix_number = 1; let mut suffix = String::new(); while used_identifiers.contains(&id) { id.drain(id.len() - suffix.len()..); suffix_number += 1; suffix = suffix_number.to_string(); id += &suffix; } } used_identifiers.insert(id.clone()); self.symbol_ids.insert(symbol, id); } fn field_id(&self, field_name: &String) -> String { format!("field_{}", field_name) } fn metadata_for_symbol(&self, symbol: Symbol) -> (&str, VariableType) { match symbol.kind { SymbolType::End => ("end", VariableType::Hidden), SymbolType::NonTerminal => { let variable = &self.syntax_grammar.variables[symbol.index]; (&variable.name, variable.kind) } SymbolType::Terminal => { let variable = &self.lexical_grammar.variables[symbol.index]; (&variable.name, variable.kind) } SymbolType::External => { let token = &self.syntax_grammar.external_tokens[symbol.index]; (&token.name, token.kind) } } } fn sanitize_identifier(&self, name: &str) -> String { let mut result = String::with_capacity(name.len()); for c in name.chars() { if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || ('0' <= c && c <= '9') || c == '_' { result.push(c); } else { let replacement = match c { '~' => "TILDE", '`' => "BQUOTE", '!' => "BANG", '@' => "AT", '#' => "POUND", '$' => "DOLLAR", '%' => "PERCENT", '^' => "CARET", '&' => "AMP", '*' => "STAR", '(' => "LPAREN", ')' => "RPAREN", '-' => "DASH", '+' => "PLUS", '=' => "EQ", '{' => "LBRACE", '}' => "RBRACE", '[' => "LBRACK", ']' => "RBRACK", '\\' => "BSLASH", '|' => "PIPE", ':' => "COLON", ';' => "SEMI", '"' => "DQUOTE", '\'' => "SQUOTE", '<' => "LT", '>' => "GT", ',' => "COMMA", '.' => "DOT", '?' => "QMARK", '/' => "SLASH", '\n' => "LF", '\r' => "CR", '\t' => "TAB", _ => continue, }; if !result.is_empty() && !result.ends_with("_") { result.push('_'); } result += replacement; } } result } fn sanitize_string(&self, name: &str) -> String { let mut result = String::with_capacity(name.len()); for c in name.chars() { match c { '\"' => result += "\\\"", '\\' => result += "\\\\", '\u{000c}' => result += "\\f", '\n' => result += "\\n", '\r' => result += "\\r", '\t' => result += "\\t", _ => result.push(c), } } result } fn add_character(&mut self, c: char) { match c { '\'' => add!(self, "'\\''"), '\\' => add!(self, "'\\\\'"), '\u{000c}' => add!(self, "'\\f'"), '\n' => add!(self, "'\\n'"), '\t' => add!(self, "'\\t'"), '\r' => add!(self, "'\\r'"), _ => { if c == ' ' || c.is_ascii_graphic() { add!(self, "'{}'", c) } else { add!(self, "{}", c as u32) } } } } } /// Returns a String of C code for the given components of a parser. /// /// # Arguments /// /// * `name` - A string slice containing the name of the language /// * `parse_table` - The generated parse table for the language /// * `main_lex_table` - The generated lexing table for the language /// * `keyword_lex_table` - The generated keyword lexing table for the language /// * `keyword_capture_token` - A symbol indicating which token is used /// for keyword capture, if any. /// * `syntax_grammar` - The syntax grammar extracted from the language's grammar /// * `lexical_grammar` - The lexical grammar extracted from the language's grammar /// * `simple_aliases` - A map describing the global rename rules that should apply. /// the keys are symbols that are *always* aliased in the same way, and the values /// are the aliases that are applied to those symbols. /// * `next_abi` - A boolean indicating whether to opt into the new, unstable parse /// table format. This is mainly used for testing, when developing Tree-sitter itself. pub(crate) fn render_c_code( name: &str, parse_table: ParseTable, main_lex_table: LexTable, keyword_lex_table: LexTable, keyword_capture_token: Option<Symbol>, syntax_grammar: SyntaxGrammar, lexical_grammar: LexicalGrammar, simple_aliases: AliasMap, next_abi: bool, ) -> String { Generator { buffer: String::new(), indent_level: 0, language_name: name.to_string(), large_state_count: 0, parse_table, main_lex_table, keyword_lex_table, keyword_capture_token, syntax_grammar, lexical_grammar, simple_aliases, symbol_ids: HashMap::new(), symbol_order: HashMap::new(), alias_ids: HashMap::new(), alias_map: BTreeMap::new(), field_names: Vec::new(), next_abi, } .generate() } #[cfg(test)] mod tests { use super::*; #[test] fn test_get_char_ranges() { struct Row { chars: Vec<char>, ruled_out_chars: Vec<char>, expected_ranges: Vec<Range<char>>, } let table = [ Row { chars: vec!['a'], ruled_out_chars: vec![], expected_ranges: vec!['a'..'a'], }, Row { chars: vec!['a', 'b', 'c', 'e', 'z'], ruled_out_chars: vec![], expected_ranges: vec!['a'..'c', 'e'..'e', 'z'..'z'], }, Row { chars: vec!['a', 'b', 'c', 'e', 'h', 'z'], ruled_out_chars: vec!['d', 'f', 'g'], expected_ranges: vec!['a'..'h', 'z'..'z'], }, ]; for Row { chars, ruled_out_chars, expected_ranges, } in table.iter() { let ruled_out_chars = ruled_out_chars .into_iter() .map(|c: &char| *c as u32) .collect(); let ranges = Generator::get_ranges(chars, &ruled_out_chars).collect::<Vec<_>>(); assert_eq!(ranges, *expected_ranges); } } }
34.963636
98
0.47189
712c2ef944b8115196ea0354dce49c7bf7f5bf4e
10,328
use std::fs::File; use std::io::{BufWriter, Result, Write}; use std::path::Path; use super::Object::*; use super::{Dictionary, Document, Object, Stream, StringFormat}; use crate::xref::*; impl Document { /// Save PDF document to specified file path. #[inline] pub fn save<P: AsRef<Path>>(&mut self, path: P) -> Result<File> { let mut file = BufWriter::new(File::create(path)?); self.save_internal(&mut file)?; Ok(file.into_inner()?) } /// Save PDF to arbitrary target #[inline] pub fn save_to<W: Write>(&mut self, target: &mut W) -> Result<()> { self.save_internal(target) } fn save_internal<W: Write>(&mut self, target: &mut W) -> Result<()> { let mut target = CountingWrite { inner: target, bytes_written: 0, }; let mut xref = Xref::new(self.max_id + 1); writeln!(target, "%PDF-{}", self.version)?; for (&(id, generation), object) in &self.objects { if object .type_name() .map(|name| ["ObjStm", "XRef", "Linearized"].contains(&name)) .ok() != Some(true) { Writer::write_indirect_object(&mut target, id, generation, object, &mut xref)?; } } let xref_start = target.bytes_written; Writer::write_xref(&mut target, &xref)?; self.write_trailer(&mut target)?; write!(target, "\nstartxref\n{}\n%%EOF", xref_start)?; Ok(()) } fn write_trailer(&mut self, file: &mut dyn Write) -> Result<()> { self.trailer.set("Size", i64::from(self.max_id + 1)); file.write_all(b"trailer\n")?; Writer::write_dictionary(file, &self.trailer)?; Ok(()) } } pub struct Writer; impl Writer { fn need_separator(object: &Object) -> bool { matches!(*object, Null | Boolean(_) | Integer(_) | Real(_) | Reference(_)) } fn need_end_separator(object: &Object) -> bool { matches!( *object, Null | Boolean(_) | Integer(_) | Real(_) | Name(_) | Reference(_) | Object::Stream(_) ) } fn write_xref(file: &mut dyn Write, xref: &Xref) -> Result<()> { writeln!(file, "xref\n0 {}", xref.size)?; let mut write_xref_entry = |offset: u32, generation: u16, kind: char| writeln!(file, "{:>010} {:>05} {} ", offset, generation, kind); write_xref_entry(0, 65535, 'f')?; let mut obj_id = 1; while obj_id < xref.size { if let Some(entry) = xref.get(obj_id) { if let XrefEntry::Normal { offset, generation } = *entry { write_xref_entry(offset, generation, 'n')?; }; } else { write_xref_entry(0, 65535, 'f')?; } obj_id += 1; } Ok(()) } fn write_indirect_object<W: Write>( file: &mut CountingWrite<&mut W>, id: u32, generation: u16, object: &Object, xref: &mut Xref, ) -> Result<()> { let offset = file.bytes_written as u32; xref.insert(id, XrefEntry::Normal { offset, generation }); write!( file, "{} {} obj{}", id, generation, if Writer::need_separator(object) { " " } else { "" } )?; Writer::write_object(file, object)?; writeln!( file, "{}endobj", if Writer::need_end_separator(object) { " " } else { "" } )?; Ok(()) } pub fn write_object(file: &mut dyn Write, object: &Object) -> Result<()> { match *object { Null => file.write_all(b"null"), Boolean(ref value) => { if *value { file.write_all(b"true") } else { file.write_all(b"false") } } Integer(ref value) => { let _ = itoa::write(file, *value); Ok(()) } Real(ref value) => file.write_all(format!("{:.02?}", *value).as_bytes()), Name(ref name) => Writer::write_name(file, name), String(ref text, ref format) => Writer::write_string(file, text, format), Array(ref array) => Writer::write_array(file, array), Object::Dictionary(ref dict) => Writer::write_dictionary(file, dict), Object::Stream(ref stream) => Writer::write_stream(file, stream), Reference(ref id) => write!(file, "{} {} R", id.0, id.1), } } fn write_name(file: &mut dyn Write, name: &[u8]) -> Result<()> { file.write_all(b"/")?; for &byte in name { // white-space and delimiter chars are encoded to # sequences // also encode bytes outside of the range 33 (!) to 126 (~) if b" \t\n\r\x0C()<>[]{}/%#".contains(&byte) || byte < 33 || byte > 126 { write!(file, "#{:02X}", byte)?; } else { file.write_all(&[byte])?; } } Ok(()) } fn write_string(file: &mut dyn Write, text: &[u8], format: &StringFormat) -> Result<()> { match *format { // Within a Literal string, backslash (\) and unbalanced parentheses should be escaped. // This rule apply to each individual byte in a string object, // whether the string is interpreted as single-byte or multiple-byte character codes. // If an end-of-line marker appears within a literal string without a preceding backslash, the result is equivalent to \n. // So \r also need be escaped. StringFormat::Literal => { let mut escape_indice = Vec::new(); let mut parentheses = Vec::new(); for (index, &byte) in text.iter().enumerate() { match byte { b'(' => parentheses.push(index), b')' => { if !parentheses.is_empty() { parentheses.pop(); } else { escape_indice.push(index); } } b'\\' | b'\r' => escape_indice.push(index), _ => continue, } } escape_indice.append(&mut parentheses); file.write_all(b"(")?; if !escape_indice.is_empty() { for (index, &byte) in text.iter().enumerate() { if escape_indice.contains(&index) { file.write_all(b"\\")?; file.write_all(&[if byte == b'\r' { b'r' } else { byte }])?; } else { file.write_all(&[byte])?; } } } else { file.write_all(text)?; } file.write_all(b")")?; } StringFormat::Hexadecimal => { file.write_all(b"<")?; for &byte in text { write!(file, "{:02X}", byte)?; } file.write_all(b">")?; } } Ok(()) } fn write_array(file: &mut dyn Write, array: &[Object]) -> Result<()> { file.write_all(b"[")?; let mut first = true; for object in array { if first { first = false; } else if Writer::need_separator(object) { file.write_all(b" ")?; } Writer::write_object(file, object)?; } file.write_all(b"]")?; Ok(()) } fn write_dictionary(file: &mut dyn Write, dictionary: &Dictionary) -> Result<()> { file.write_all(b"<<")?; for (key, value) in dictionary { Writer::write_name(file, key)?; if Writer::need_separator(value) { file.write_all(b" ")?; } Writer::write_object(file, value)?; } file.write_all(b">>")?; Ok(()) } fn write_stream(file: &mut dyn Write, stream: &Stream) -> Result<()> { Writer::write_dictionary(file, &stream.dict)?; file.write_all(b"stream\n")?; file.write_all(&stream.content)?; file.write_all(b"endstream")?; Ok(()) } } pub struct CountingWrite<W: Write> { inner: W, bytes_written: usize, } impl<W: Write> Write for CountingWrite<W> { #[inline] fn write(&mut self, buffer: &[u8]) -> Result<usize> { let result = self.inner.write(buffer); if let Ok(bytes) = result { self.bytes_written += bytes; } result } #[inline] fn write_all(&mut self, buffer: &[u8]) -> Result<()> { self.bytes_written += buffer.len(); // If this returns `Err` we can’t know how many bytes were actually written (if any) // but that doesn’t matter since we’re gonna abort the entire PDF generation anyway. self.inner.write_all(buffer) } #[inline] fn flush(&mut self) -> Result<()> { self.inner.flush() } } #[test] fn save_document() { let mut doc = Document::with_version("1.5"); doc.objects.insert((1, 0), Null); doc.objects.insert((2, 0), Boolean(true)); doc.objects.insert((3, 0), Integer(3)); doc.objects.insert((4, 0), Real(0.5)); doc.objects .insert((5, 0), String("text((\r)".as_bytes().to_vec(), StringFormat::Literal)); doc.objects.insert( (6, 0), String("text((\r)".as_bytes().to_vec(), StringFormat::Hexadecimal), ); doc.objects.insert((7, 0), Name(b"name \t".to_vec())); doc.objects.insert((8, 0), Reference((1, 0))); doc.objects .insert((9, 2), Array(vec![Integer(1), Integer(2), Integer(3)])); doc.objects .insert((11, 0), Stream(Stream::new(Dictionary::new(), vec![0x41, 0x42, 0x43]))); let mut dict = Dictionary::new(); dict.set("A", Null); dict.set("B", false); dict.set("C", Name(b"name".to_vec())); doc.objects.insert((12, 0), Object::Dictionary(dict)); doc.max_id = 12; doc.save("test_0_save.pdf").unwrap(); }
34.426667
134
0.490802
9b4085f2a961b822d3245e89799a997480f6e814
7,794
#[doc = "Register `sf2_if_io_dly_0` reader"] pub struct R(crate::R<SF2_IF_IO_DLY_0_SPEC>); impl core::ops::Deref for R { type Target = crate::R<SF2_IF_IO_DLY_0_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl core::convert::From<crate::R<SF2_IF_IO_DLY_0_SPEC>> for R { fn from(reader: crate::R<SF2_IF_IO_DLY_0_SPEC>) -> Self { R(reader) } } #[doc = "Register `sf2_if_io_dly_0` writer"] pub struct W(crate::W<SF2_IF_IO_DLY_0_SPEC>); impl core::ops::Deref for W { type Target = crate::W<SF2_IF_IO_DLY_0_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl core::ops::DerefMut for W { #[inline(always)] fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } impl core::convert::From<crate::W<SF2_IF_IO_DLY_0_SPEC>> for W { fn from(writer: crate::W<SF2_IF_IO_DLY_0_SPEC>) -> Self { W(writer) } } #[doc = "Field `sf2_dqs_do_dly_sel` reader - "] pub struct SF2_DQS_DO_DLY_SEL_R(crate::FieldReader<u8, u8>); impl SF2_DQS_DO_DLY_SEL_R { pub(crate) fn new(bits: u8) -> Self { SF2_DQS_DO_DLY_SEL_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for SF2_DQS_DO_DLY_SEL_R { type Target = crate::FieldReader<u8, u8>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `sf2_dqs_do_dly_sel` writer - "] pub struct SF2_DQS_DO_DLY_SEL_W<'a> { w: &'a mut W, } impl<'a> SF2_DQS_DO_DLY_SEL_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !(0x03 << 30)) | (((value as u32) & 0x03) << 30); self.w } } #[doc = "Field `sf2_dqs_di_dly_sel` reader - "] pub struct SF2_DQS_DI_DLY_SEL_R(crate::FieldReader<u8, u8>); impl SF2_DQS_DI_DLY_SEL_R { pub(crate) fn new(bits: u8) -> Self { SF2_DQS_DI_DLY_SEL_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for SF2_DQS_DI_DLY_SEL_R { type Target = crate::FieldReader<u8, u8>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `sf2_dqs_di_dly_sel` writer - "] pub struct SF2_DQS_DI_DLY_SEL_W<'a> { w: &'a mut W, } impl<'a> SF2_DQS_DI_DLY_SEL_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !(0x03 << 28)) | (((value as u32) & 0x03) << 28); self.w } } #[doc = "Field `sf2_dqs_oe_dly_sel` reader - "] pub struct SF2_DQS_OE_DLY_SEL_R(crate::FieldReader<u8, u8>); impl SF2_DQS_OE_DLY_SEL_R { pub(crate) fn new(bits: u8) -> Self { SF2_DQS_OE_DLY_SEL_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for SF2_DQS_OE_DLY_SEL_R { type Target = crate::FieldReader<u8, u8>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `sf2_dqs_oe_dly_sel` writer - "] pub struct SF2_DQS_OE_DLY_SEL_W<'a> { w: &'a mut W, } impl<'a> SF2_DQS_OE_DLY_SEL_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !(0x03 << 26)) | (((value as u32) & 0x03) << 26); self.w } } #[doc = "Field `sf2_clk_out_dly_sel` reader - "] pub struct SF2_CLK_OUT_DLY_SEL_R(crate::FieldReader<u8, u8>); impl SF2_CLK_OUT_DLY_SEL_R { pub(crate) fn new(bits: u8) -> Self { SF2_CLK_OUT_DLY_SEL_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for SF2_CLK_OUT_DLY_SEL_R { type Target = crate::FieldReader<u8, u8>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `sf2_clk_out_dly_sel` writer - "] pub struct SF2_CLK_OUT_DLY_SEL_W<'a> { w: &'a mut W, } impl<'a> SF2_CLK_OUT_DLY_SEL_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !(0x03 << 8)) | (((value as u32) & 0x03) << 8); self.w } } #[doc = "Field `sf2_cs_dly_sel` reader - "] pub struct SF2_CS_DLY_SEL_R(crate::FieldReader<u8, u8>); impl SF2_CS_DLY_SEL_R { pub(crate) fn new(bits: u8) -> Self { SF2_CS_DLY_SEL_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for SF2_CS_DLY_SEL_R { type Target = crate::FieldReader<u8, u8>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `sf2_cs_dly_sel` writer - "] pub struct SF2_CS_DLY_SEL_W<'a> { w: &'a mut W, } impl<'a> SF2_CS_DLY_SEL_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !0x03) | ((value as u32) & 0x03); self.w } } impl R { #[doc = "Bits 30:31"] #[inline(always)] pub fn sf2_dqs_do_dly_sel(&self) -> SF2_DQS_DO_DLY_SEL_R { SF2_DQS_DO_DLY_SEL_R::new(((self.bits >> 30) & 0x03) as u8) } #[doc = "Bits 28:29"] #[inline(always)] pub fn sf2_dqs_di_dly_sel(&self) -> SF2_DQS_DI_DLY_SEL_R { SF2_DQS_DI_DLY_SEL_R::new(((self.bits >> 28) & 0x03) as u8) } #[doc = "Bits 26:27"] #[inline(always)] pub fn sf2_dqs_oe_dly_sel(&self) -> SF2_DQS_OE_DLY_SEL_R { SF2_DQS_OE_DLY_SEL_R::new(((self.bits >> 26) & 0x03) as u8) } #[doc = "Bits 8:9"] #[inline(always)] pub fn sf2_clk_out_dly_sel(&self) -> SF2_CLK_OUT_DLY_SEL_R { SF2_CLK_OUT_DLY_SEL_R::new(((self.bits >> 8) & 0x03) as u8) } #[doc = "Bits 0:1"] #[inline(always)] pub fn sf2_cs_dly_sel(&self) -> SF2_CS_DLY_SEL_R { SF2_CS_DLY_SEL_R::new((self.bits & 0x03) as u8) } } impl W { #[doc = "Bits 30:31"] #[inline(always)] pub fn sf2_dqs_do_dly_sel(&mut self) -> SF2_DQS_DO_DLY_SEL_W { SF2_DQS_DO_DLY_SEL_W { w: self } } #[doc = "Bits 28:29"] #[inline(always)] pub fn sf2_dqs_di_dly_sel(&mut self) -> SF2_DQS_DI_DLY_SEL_W { SF2_DQS_DI_DLY_SEL_W { w: self } } #[doc = "Bits 26:27"] #[inline(always)] pub fn sf2_dqs_oe_dly_sel(&mut self) -> SF2_DQS_OE_DLY_SEL_W { SF2_DQS_OE_DLY_SEL_W { w: self } } #[doc = "Bits 8:9"] #[inline(always)] pub fn sf2_clk_out_dly_sel(&mut self) -> SF2_CLK_OUT_DLY_SEL_W { SF2_CLK_OUT_DLY_SEL_W { w: self } } #[doc = "Bits 0:1"] #[inline(always)] pub fn sf2_cs_dly_sel(&mut self) -> SF2_CS_DLY_SEL_W { SF2_CS_DLY_SEL_W { w: self } } #[doc = "Writes raw bits to the register."] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.0.bits(bits); self } } #[doc = "sf2_if_io_dly_0.\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [sf2_if_io_dly_0](index.html) module"] pub struct SF2_IF_IO_DLY_0_SPEC; impl crate::RegisterSpec for SF2_IF_IO_DLY_0_SPEC { type Ux = u32; } #[doc = "`read()` method returns [sf2_if_io_dly_0::R](R) reader structure"] impl crate::Readable for SF2_IF_IO_DLY_0_SPEC { type Reader = R; } #[doc = "`write(|w| ..)` method takes [sf2_if_io_dly_0::W](W) writer structure"] impl crate::Writable for SF2_IF_IO_DLY_0_SPEC { type Writer = W; } #[doc = "`reset()` method sets sf2_if_io_dly_0 to value 0"] impl crate::Resettable for SF2_IF_IO_DLY_0_SPEC { #[inline(always)] fn reset_value() -> Self::Ux { 0 } }
31.942623
412
0.6165
1a5a3aed6047bbd4b46557750019d0fae80fc371
3,280
use time::{at_utc, Tm, Timespec}; use json::{FromJsonnable, ToJsonnable}; use serde::de::{Error, Type}; use serde_json::value::Value; use serde_json::error::Error as JsonError; use serde_json::builder::ObjectBuilder; impl FromJsonnable for Tm { // Deserialize via Timespec fn from_json(json: Value) -> Result<Self, JsonError> { match json { Value::Object(map) => { let sec = match map.get("sec") { Some(sec) => match sec { &Value::I64(sec) => sec, &Value::U64(sec) => sec as i64, // The types get weird here _ => return Err(JsonError::invalid_type(Type::I64)), }, None => return Err(JsonError::missing_field("Missing \"sec\"")), }; let nsec = match map.get("nsec") { Some(nsec) => match nsec { &Value::I64(nsec) => nsec as i32, &Value::U64(nsec) => nsec as i32, _ => return Err(JsonError::invalid_type(Type::I32)), }, None => return Err(JsonError::missing_field("Missing \"nsec\"")), }; Ok(at_utc(Timespec::new(sec, nsec))) }, _ => Err(JsonError::invalid_type(Type::Struct)), } } } impl ToJsonnable for Tm { // Serialize via Timespec fn to_json(&self) -> Value { let spec = self.to_timespec(); ObjectBuilder::new().insert("sec", &spec.sec) .insert("nsec", &spec.nsec) .build() } } impl<T: FromJsonnable> FromJsonnable for Vec<T> { fn from_json(json: Value) -> Result<Self, JsonError> { match json { Value::Array(arr) => { let mut elems: Vec<T> = Vec::with_capacity(arr.len()); for elem in arr { match T::from_json(elem) { Ok(elem) => elems.push(elem), Err(e) => return Err(e), } } Ok(elems) }, _ => Err(JsonError::invalid_type(Type::Seq)), } } } impl<T: ToJsonnable> ToJsonnable for Vec<T> { fn to_json(&self) -> Value { Value::Array(self.iter().map(|ref elem| elem.to_json()).collect()) } } macro_rules! primitive_to_json { ($t:ty, $v:ident, $d:ty) => { impl ToJsonnable for $t { fn to_json(&self) -> Value { Value::$v(self.clone() as $d) } } }; } macro_rules! primitive_from_json { ($t:ty, $expected:ident, $($v:ident)+) => { impl FromJsonnable for $t { fn from_json(json: Value) -> Result<Self, JsonError> { match json { $( Value::$v(value) => Ok(value as $t), )+ _ => Err(JsonError::invalid_type(Type::$expected)), } } } }; } primitive_to_json!(i8, I64, i64); primitive_to_json!(i16, I64, i64); primitive_to_json!(i32, I64, i64); primitive_to_json!(i64, I64, i64); primitive_to_json!(u8, U64, u64); primitive_to_json!(u16, U64, u64); primitive_to_json!(u32, U64, u64); primitive_to_json!(u64, U64, u64); primitive_to_json!(f32, F64, f64); primitive_to_json!(f64, F64, f64); primitive_from_json!(i8, I64, I64 U64); // Non-signed numbers are interpreted as unsigned primitive_from_json!(i16, I64, I64 U64); primitive_from_json!(i32, I64, I64 U64); primitive_from_json!(i64, I64, I64 U64); primitive_from_json!(u8, U64, U64); primitive_from_json!(u16, U64, U64); primitive_from_json!(u32, U64, U64); primitive_from_json!(u64, U64, U64); primitive_from_json!(f32, F64, F64); primitive_from_json!(f64, F64, F64);
25.625
91
0.609756
21ce975aa499e678ae7390df06dbada4721c1ede
2,040
/* * MIT License * * Copyright (c) 2017 Robert Swain <robert.swain@gmail.com * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ use color::Color; use glapp::*; use na::Point3; pub trait Shape { fn vertex_data(&self) -> &[f32]; fn index_data(&self) -> &[u32]; fn vertex_shader(&self) -> Option<String>; fn fragment_shader(&self) -> Option<String>; fn draw(&self); fn is_stroke(&self) -> bool; } pub fn assign_vertex(p: &Point3<f32>, uv: &[f32], c: &Color, vd: &mut [f32]) { vd[0] = p.x; vd[1] = p.y; vd[2] = p.z; vd[3] = uv[0]; vd[4] = uv[1]; vd[5] = c.x; vd[6] = c.y; vd[7] = c.z; vd[8] = c.w; } pub fn draw(shape: &Shape) { let vertex_data = shape.vertex_data(); let index_data = shape.index_data(); let n_triangles = index_data.len() - 2; append_data(vertex_data, index_data); let shader_program = get_shader_program(shape.vertex_shader(), shape.fragment_shader()); append_shape(shader_program, n_triangles as u32); }
34.576271
92
0.687745
9c160a1b63fcb98bc07a70ab5284f1f0ba50f713
4,150
#[doc = "Register `DIEPTSIZ0` reader"] pub struct R(crate::R<DIEPTSIZ0_SPEC>); impl core::ops::Deref for R { type Target = crate::R<DIEPTSIZ0_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl From<crate::R<DIEPTSIZ0_SPEC>> for R { #[inline(always)] fn from(reader: crate::R<DIEPTSIZ0_SPEC>) -> Self { R(reader) } } #[doc = "Register `DIEPTSIZ0` writer"] pub struct W(crate::W<DIEPTSIZ0_SPEC>); impl core::ops::Deref for W { type Target = crate::W<DIEPTSIZ0_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl core::ops::DerefMut for W { #[inline(always)] fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } impl From<crate::W<DIEPTSIZ0_SPEC>> for W { #[inline(always)] fn from(writer: crate::W<DIEPTSIZ0_SPEC>) -> Self { W(writer) } } #[doc = "Field `XferSize` reader - Transfer Size"] pub struct XFERSIZE_R(crate::FieldReader<u8, u8>); impl XFERSIZE_R { pub(crate) fn new(bits: u8) -> Self { XFERSIZE_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for XFERSIZE_R { type Target = crate::FieldReader<u8, u8>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `XferSize` writer - Transfer Size"] pub struct XFERSIZE_W<'a> { w: &'a mut W, } impl<'a> XFERSIZE_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !0x7f) | (value as u32 & 0x7f); self.w } } #[doc = "Field `PktCnt` reader - Packet Count"] pub struct PKTCNT_R(crate::FieldReader<u8, u8>); impl PKTCNT_R { pub(crate) fn new(bits: u8) -> Self { PKTCNT_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for PKTCNT_R { type Target = crate::FieldReader<u8, u8>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `PktCnt` writer - Packet Count"] pub struct PKTCNT_W<'a> { w: &'a mut W, } impl<'a> PKTCNT_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !(0x03 << 19)) | ((value as u32 & 0x03) << 19); self.w } } impl R { #[doc = "Bits 0:6 - Transfer Size"] #[inline(always)] pub fn xfer_size(&self) -> XFERSIZE_R { XFERSIZE_R::new((self.bits & 0x7f) as u8) } #[doc = "Bits 19:20 - Packet Count"] #[inline(always)] pub fn pkt_cnt(&self) -> PKTCNT_R { PKTCNT_R::new(((self.bits >> 19) & 0x03) as u8) } } impl W { #[doc = "Bits 0:6 - Transfer Size"] #[inline(always)] pub fn xfer_size(&mut self) -> XFERSIZE_W { XFERSIZE_W { w: self } } #[doc = "Bits 19:20 - Packet Count"] #[inline(always)] pub fn pkt_cnt(&mut self) -> PKTCNT_W { PKTCNT_W { w: self } } #[doc = "Writes raw bits to the register."] #[inline(always)] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.0.bits(bits); self } } #[doc = "Device IN Endpoint Transfer Size Register\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [dieptsiz0](index.html) module"] pub struct DIEPTSIZ0_SPEC; impl crate::RegisterSpec for DIEPTSIZ0_SPEC { type Ux = u32; } #[doc = "`read()` method returns [dieptsiz0::R](R) reader structure"] impl crate::Readable for DIEPTSIZ0_SPEC { type Reader = R; } #[doc = "`write(|w| ..)` method takes [dieptsiz0::W](W) writer structure"] impl crate::Writable for DIEPTSIZ0_SPEC { type Writer = W; } #[doc = "`reset()` method sets DIEPTSIZ0 to value 0"] impl crate::Resettable for DIEPTSIZ0_SPEC { #[inline(always)] fn reset_value() -> Self::Ux { 0 } }
29.856115
431
0.596386
bb99a4196d0eb51b7c7bebbe4b4dc36acefd293c
6,580
/* Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ use bn254CX::big::NLEN; use super::super::arch::Chunk; use types::{ModType, CurveType, CurvePairingType, SexticTwist, SignOfX}; // Base Bits= 28 // bn254CX Modulus pub const MODULUS: [Chunk; NLEN] = [ 0xC1B55B3, 0x6623EF5, 0x93EE1BE, 0xD6EE180, 0x6D3243F, 0x647A636, 0xDB0BDDF, 0x8702A0, 0x4000000, 0x2, ]; pub const R2MODP: [Chunk; NLEN] = [ 0x8A0800A, 0x466A061, 0x43056A3, 0x2B3A225, 0x9C6600, 0x148515B, 0x6BDF50, 0xEC9EA56, 0xC992E66, 0x1, ]; pub const MCONST: Chunk = 0x9789E85; pub const FRA: [Chunk; NLEN] = [ 0x5C80EA3, 0xD908335, 0x3F8215B, 0x7326F17, 0x8986867, 0x8AACA71, 0x4AFE18B, 0xA63A016, 0x359082F, 0x1, ]; pub const FRB: [Chunk; NLEN] = [ 0x6534710, 0x8D1BBC0, 0x546C062, 0x63C7269, 0xE3ABBD8, 0xD9CDBC4, 0x900DC53, 0x623628A, 0xA6F7D0, 0x1, ]; // bn254CX Curve pub const CURVE_COF_I: isize = 1; pub const CURVE_A: isize = 0; pub const CURVE_B_I: isize = 2; pub const CURVE_B: [Chunk; NLEN] = [0x2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0]; pub const CURVE_ORDER: [Chunk; NLEN] = [ 0x6EB1F6D, 0x11C0A63, 0x906CEBE, 0xD6EE0CC, 0x6D2C43F, 0x647A636, 0xDB0BDDF, 0x8702A0, 0x4000000, 0x2, ]; pub const CURVE_GX: [Chunk; NLEN] = [ 0xC1B55B2, 0x6623EF5, 0x93EE1BE, 0xD6EE180, 0x6D3243F, 0x647A636, 0xDB0BDDF, 0x8702A0, 0x4000000, 0x2, ]; pub const CURVE_GY: [Chunk; NLEN] = [0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0]; pub const CURVE_BNX: [Chunk; NLEN] = [0x3C012B1, 0x0, 0x40, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0]; pub const CURVE_COF: [Chunk; NLEN] = [0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0]; pub const CURVE_CRU: [Chunk; NLEN] = [ 0x4235C97, 0xE093179, 0xF875631, 0xDF6471E, 0xF1440BD, 0xCA83, 0x480000, 0x0, 0x0, 0x0, ]; pub const CURVE_PXA: [Chunk; NLEN] = [ 0x4D2EC74, 0x851CEEE, 0xE2726C0, 0x85BFA03, 0xBBB907C, 0xF5C34, 0x6358B25, 0x7053B25, 0x9682D2C, 0x1, ]; pub const CURVE_PXB: [Chunk; NLEN] = [ 0xE29CFE1, 0xA58E8B2, 0x9C30F47, 0x97B0C20, 0x743F81B, 0x37A8E99, 0xAA011C9, 0x3E19F64, 0x466B9EC, 0x1, ]; pub const CURVE_PYA: [Chunk; NLEN] = [ 0xF0BE09F, 0xFBFCEBC, 0xEC1B30C, 0xB33D847, 0x2096361, 0x157DAEE, 0xDD81E22, 0x72332B8, 0xA79EDD9, 0x0, ]; pub const CURVE_PYB: [Chunk; NLEN] = [ 0x898EE9D, 0x904B228, 0x2EDEBED, 0x4EA569D, 0x461C286, 0x512D8D3, 0x35C6E4, 0xECC4C09, 0x6160C39, 0x0, ]; pub const CURVE_W: [[Chunk; NLEN]; 2] = [ [ 0x62FEB83, 0x5463491, 0x381200, 0xB4, 0x6000, 0x0, 0x0, 0x0, 0x0, 0x0, ], [0x7802561, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0], ]; pub const CURVE_SB: [[[Chunk; NLEN]; 2]; 2] = [ [ [ 0xDB010E4, 0x5463491, 0x381280, 0xB4, 0x6000, 0x0, 0x0, 0x0, 0x0, 0x0, ], [0x7802561, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0], ], [ [0x7802561, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0], [ 0xBB33EA, 0xBD5D5D2, 0x8CEBCBD, 0xD6EE018, 0x6D2643F, 0x647A636, 0xDB0BDDF, 0x8702A0, 0x4000000, 0x2, ], ], ]; pub const CURVE_WB: [[Chunk; NLEN]; 4] = [ [ 0x67A84B0, 0x1C21185, 0x12B040, 0x3C, 0x2000, 0x0, 0x0, 0x0, 0x0, 0x0, ], [ 0xE220475, 0xCDF995B, 0xA7F9A36, 0x94EDA8C, 0xA0DC07E, 0x8702, 0x300000, 0x0, 0x0, 0x0, ], [ 0xF10B93, 0x66FCCAE, 0x53FCD3B, 0x4A76D46, 0x506E03F, 0x4381, 0x180000, 0x0, 0x0, 0x0, ], [ 0xDFAAA11, 0x1C21185, 0x12B0C0, 0x3C, 0x2000, 0x0, 0x0, 0x0, 0x0, 0x0, ], ]; pub const CURVE_BB: [[[Chunk; NLEN]; 4]; 4] = [ [ [ 0x32B0CBD, 0x11C0A63, 0x906CE7E, 0xD6EE0CC, 0x6D2C43F, 0x647A636, 0xDB0BDDF, 0x8702A0, 0x4000000, 0x2, ], [ 0x32B0CBC, 0x11C0A63, 0x906CE7E, 0xD6EE0CC, 0x6D2C43F, 0x647A636, 0xDB0BDDF, 0x8702A0, 0x4000000, 0x2, ], [ 0x32B0CBC, 0x11C0A63, 0x906CE7E, 0xD6EE0CC, 0x6D2C43F, 0x647A636, 0xDB0BDDF, 0x8702A0, 0x4000000, 0x2, ], [0x7802562, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0], ], [ [0x7802561, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0], [ 0x32B0CBC, 0x11C0A63, 0x906CE7E, 0xD6EE0CC, 0x6D2C43F, 0x647A636, 0xDB0BDDF, 0x8702A0, 0x4000000, 0x2, ], [ 0x32B0CBD, 0x11C0A63, 0x906CE7E, 0xD6EE0CC, 0x6D2C43F, 0x647A636, 0xDB0BDDF, 0x8702A0, 0x4000000, 0x2, ], [ 0x32B0CBC, 0x11C0A63, 0x906CE7E, 0xD6EE0CC, 0x6D2C43F, 0x647A636, 0xDB0BDDF, 0x8702A0, 0x4000000, 0x2, ], ], [ [0x7802562, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0], [0x7802561, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0], [0x7802561, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0], [0x7802561, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0], ], [ [0x3C012B2, 0x0, 0x40, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0], [0xF004AC2, 0x0, 0x100, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0], [ 0xF6AFA0A, 0x11C0A62, 0x906CE3E, 0xD6EE0CC, 0x6D2C43F, 0x647A636, 0xDB0BDDF, 0x8702A0, 0x4000000, 0x2, ], [0x3C012B2, 0x0, 0x40, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0], ], ]; pub const USE_GLV: bool = true; pub const USE_GS_G2: bool = true; pub const USE_GS_GT: bool = true; pub const GT_STRONG: bool = true; pub const MODBYTES: usize = 32; pub const BASEBITS: usize = 28; pub const MODBITS: usize = 254; pub const MOD8: usize = 3; pub const MODTYPE: ModType = ModType::NOT_SPECIAL; pub const SH: usize = 14; pub const CURVETYPE: CurveType = CurveType::WEIERSTRASS; pub const CURVE_PAIRING_TYPE: CurvePairingType = CurvePairingType::BN; pub const SEXTIC_TWIST: SexticTwist = SexticTwist::D_TYPE; pub const SIGN_OF_X: SignOfX = SignOfX::NEGATIVEX; pub const HASH_TYPE: usize = 32; pub const AESKEY: usize = 16;
35.956284
98
0.64848
d78a7959bda4d17f26ae0244b992a00d6d97b053
2,245
extern crate birl; fn main(){ /* Print heading info. */ eprintln!("O SHELL QUE CONSTRÓI FIBRA. VERSÃO {}", env!("CARGO_PKG_VERSION")); eprintln!("BIRL © 2018, RAFAEL RODRIGUES NAKANO."); eprintln!("SHELL © 2018, MATHEUS BRANCO BORELLA."); eprintln!(); /* Setup BIRL. * The Context interpreter is not built to run interactively, * by default, as it lacks much of the framework that would be * needed to properly implement an interactive shell with * immidiate evaluation of expresssions. * * So, as a workaround, the shell cycles the interpreter manually * to the completion of all instructions added by a line. To * achieve that, we first call the root function, as any * expression typed into the interpreted will be compiled as a * root-level instruction. */ use birl::context::Context; let mut c = Context::new(); use birl::context::BIRL_GLOBAL_FUNCTION_ID; c.call_function_by_id(BIRL_GLOBAL_FUNCTION_ID, vec![]) .expect("Could not setup BIRL runtime."); /* Bind the Context interpreter to standard IO */ let _ = c.set_stdin({ use std::io; let reader = io::BufReader::new(io::stdin()); Some(Box::new(reader)) }); let _ = c.set_stdout({ use std::io; Some(Box::new(io::stdout())) }); /* Enter interactive loop */ use std::io::{stdin, BufReader, BufRead}; let mut prompt = BufReader::new(stdin()); loop{ eprint!("> "); let mut line = String::new(); match prompt.read_line(&mut line){ Ok(count) => if count == 0 { eprintln!("Reached end of input."); break }, Err(what) => { eprintln!("A read error occured: {:?}", what); break } } /* Parse and evaluate */ if let Err(what) = c.process_line(&line){ eprintln!("{}", what); } else { /* Drives the currently pending instructions to * completion. */ let mut saturate = || loop{ let status = c.execute_next_instruction(); use birl::vm::ExecutionStatus as Es; match status{ Ok(Es::Quit) => break Ok(()), Err(what) => break Err(what), _ => {} } }; if let Err(what) = saturate(){ eprintln!("{}", what); } } } /* Make sure the output is flushed */ c.set_stdout(None).unwrap().flush() .expect("Could not flush io::stdout()."); }
26.411765
79
0.638307
671d2e3837c798d8bad9bc820be0e4c4edd602d8
4,775
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use middle::ty::{BuiltinBounds}; use middle::ty::{mod, Ty}; use middle::typeck::infer::combine::*; use middle::typeck::infer::lattice::*; use middle::typeck::infer::equate::Equate; use middle::typeck::infer::higher_ranked::HigherRankedRelations; use middle::typeck::infer::lub::Lub; use middle::typeck::infer::sub::Sub; use middle::typeck::infer::{cres, InferCtxt}; use middle::typeck::infer::{TypeTrace, Subtype}; use syntax::ast::{Many, Once, MutImmutable, MutMutable}; use syntax::ast::{NormalFn, UnsafeFn}; use syntax::ast::{Onceness, FnStyle}; use util::ppaux::mt_to_string; use util::ppaux::Repr; /// "Greatest lower bound" (common subtype) pub struct Glb<'f, 'tcx: 'f> { fields: CombineFields<'f, 'tcx> } #[allow(non_snake_case)] pub fn Glb<'f, 'tcx>(cf: CombineFields<'f, 'tcx>) -> Glb<'f, 'tcx> { Glb { fields: cf } } impl<'f, 'tcx> Combine<'tcx> for Glb<'f, 'tcx> { fn infcx<'a>(&'a self) -> &'a InferCtxt<'a, 'tcx> { self.fields.infcx } fn tag(&self) -> String { "glb".to_string() } fn a_is_expected(&self) -> bool { self.fields.a_is_expected } fn trace(&self) -> TypeTrace<'tcx> { self.fields.trace.clone() } fn equate<'a>(&'a self) -> Equate<'a, 'tcx> { Equate(self.fields.clone()) } fn sub<'a>(&'a self) -> Sub<'a, 'tcx> { Sub(self.fields.clone()) } fn lub<'a>(&'a self) -> Lub<'a, 'tcx> { Lub(self.fields.clone()) } fn glb<'a>(&'a self) -> Glb<'a, 'tcx> { Glb(self.fields.clone()) } fn mts(&self, a: &ty::mt<'tcx>, b: &ty::mt<'tcx>) -> cres<'tcx, ty::mt<'tcx>> { let tcx = self.fields.infcx.tcx; debug!("{}.mts({}, {})", self.tag(), mt_to_string(tcx, a), mt_to_string(tcx, b)); match (a.mutbl, b.mutbl) { // If one side or both is mut, then the GLB must use // the precise type from the mut side. (MutMutable, MutMutable) => { let t = try!(self.equate().tys(a.ty, b.ty)); Ok(ty::mt {ty: t, mutbl: MutMutable}) } // If one side or both is immutable, we can use the GLB of // both sides but mutbl must be `MutImmutable`. (MutImmutable, MutImmutable) => { let t = try!(self.tys(a.ty, b.ty)); Ok(ty::mt {ty: t, mutbl: MutImmutable}) } // There is no mutual subtype of these combinations. (MutMutable, MutImmutable) | (MutImmutable, MutMutable) => { Err(ty::terr_mutability) } } } fn contratys(&self, a: Ty<'tcx>, b: Ty<'tcx>) -> cres<'tcx, Ty<'tcx>> { self.lub().tys(a, b) } fn fn_styles(&self, a: FnStyle, b: FnStyle) -> cres<'tcx, FnStyle> { match (a, b) { (NormalFn, _) | (_, NormalFn) => Ok(NormalFn), (UnsafeFn, UnsafeFn) => Ok(UnsafeFn) } } fn oncenesses(&self, a: Onceness, b: Onceness) -> cres<'tcx, Onceness> { match (a, b) { (Many, _) | (_, Many) => Ok(Many), (Once, Once) => Ok(Once) } } fn builtin_bounds(&self, a: ty::BuiltinBounds, b: ty::BuiltinBounds) -> cres<'tcx, ty::BuiltinBounds> { // More bounds is a subtype of fewer bounds, so // the GLB (mutual subtype) is the union. Ok(a.union(b)) } fn regions(&self, a: ty::Region, b: ty::Region) -> cres<'tcx, ty::Region> { debug!("{}.regions({}, {})", self.tag(), a.repr(self.fields.infcx.tcx), b.repr(self.fields.infcx.tcx)); Ok(self.fields.infcx.region_vars.glb_regions(Subtype(self.trace()), a, b)) } fn contraregions(&self, a: ty::Region, b: ty::Region) -> cres<'tcx, ty::Region> { self.lub().regions(a, b) } fn tys(&self, a: Ty<'tcx>, b: Ty<'tcx>) -> cres<'tcx, Ty<'tcx>> { super_lattice_tys(self, a, b) } fn fn_sigs(&self, a: &ty::FnSig<'tcx>, b: &ty::FnSig<'tcx>) -> cres<'tcx, ty::FnSig<'tcx>> { self.higher_ranked_glb(a, b) } fn trait_refs(&self, a: &ty::TraitRef<'tcx>, b: &ty::TraitRef<'tcx>) -> cres<'tcx, ty::TraitRef<'tcx>> { self.higher_ranked_glb(a, b) } }
35.37037
83
0.54911
08b6c3141a1e3acb8730359768b0c1f616f8f582
227
pub fn slice_1(s: &String) -> usize { let bytes = s.as_bytes(); for (i, &item) in bytes.iter().enumerate() { // Declare ' ' to byte. if item == b' ' { return i; } } s.len() }
20.636364
48
0.440529
90c27ca74fcc3edb114c59fd88b2c4295d2edd90
3,206
<?xml version="1.0" encoding="UTF-8"?> <WebElementEntity> <description></description> <name>i_menu</name> <tag></tag> <elementGuidId>3075b26f-8515-4202-9dc2-329f156e4fb3</elementGuidId> <selectorCollection> <entry> <key>CSS</key> <value>#main-view > div > div.navbar > div > div.left > a</value> </entry> <entry> <key>XPATH</key> <value>//div[@id='main-view']/div/div/div/div/a/i</value> </entry> </selectorCollection> <selectorMethod>CSS</selectorMethod> <useRalativeImagePath>false</useRalativeImagePath> <webElementProperties> <isSelected>true</isSelected> <matchCondition>equals</matchCondition> <name>tag</name> <type>Main</type> <value>i</value> </webElementProperties> <webElementProperties> <isSelected>false</isSelected> <matchCondition>equals</matchCondition> <name>class</name> <type>Main</type> <value>icon material-icons</value> </webElementProperties> <webElementProperties> <isSelected>true</isSelected> <matchCondition>equals</matchCondition> <name>text</name> <type>Main</type> <value>menu</value> </webElementProperties> <webElementProperties> <isSelected>false</isSelected> <matchCondition>equals</matchCondition> <name>xpath</name> <type>Main</type> <value>id(&quot;main-view&quot;)/div[@class=&quot;page page-current&quot;]/div[@class=&quot;navbar&quot;]/div[@class=&quot;navbar-inner sliding&quot;]/div[@class=&quot;left&quot;]/a[@class=&quot;link icon-only panel-open&quot;]/i[@class=&quot;icon material-icons&quot;]</value> </webElementProperties> <webElementXpaths> <isSelected>true</isSelected> <matchCondition>equals</matchCondition> <name>xpath:idRelative</name> <value>//div[@id='main-view']/div/div/div/div/a/i</value> </webElementXpaths> <webElementXpaths> <isSelected>false</isSelected> <matchCondition>equals</matchCondition> <name>xpath:neighbor</name> <value>(.//*[normalize-space(text()) and normalize-space(.)='Form'])[2]/following::i[1]</value> </webElementXpaths> <webElementXpaths> <isSelected>false</isSelected> <matchCondition>equals</matchCondition> <name>xpath:neighbor</name> <value>(.//*[normalize-space(text()) and normalize-space(.)='About'])[2]/following::i[1]</value> </webElementXpaths> <webElementXpaths> <isSelected>false</isSelected> <matchCondition>equals</matchCondition> <name>xpath:neighbor</name> <value>(.//*[normalize-space(text()) and normalize-space(.)='Personal Projects'])[2]/preceding::i[1]</value> </webElementXpaths> <webElementXpaths> <isSelected>false</isSelected> <matchCondition>equals</matchCondition> <name>xpath:neighbor</name> <value>(.//*[normalize-space(text()) and normalize-space(.)='search'])[1]/preceding::i[1]</value> </webElementXpaths> <webElementXpaths> <isSelected>false</isSelected> <matchCondition>equals</matchCondition> <name>xpath:position</name> <value>//i</value> </webElementXpaths> </WebElementEntity>
38.166667
283
0.658141
76a4cde02b60d1a7d92e541b1d604136023876c4
66,959
use flo_draw::*; use flo_draw::canvas::*; use flo_curves::*; use flo_curves::bezier::*; use flo_curves::bezier::path::*; use futures::prelude::*; use futures::executor; use futures::stream; use std::f64; use std::thread; use std::time::{Duration, Instant}; /// /// Demonstrates capturing the paths for a complicated rendering and distorting them with a ripple pattern /// pub fn main() { with_2d_graphics(|| { // Decode the mascot rendering let mascot = decode_drawing(MASCOT.chars()).collect::<Result<Vec<Draw>, _>>().unwrap(); // Create a window let canvas = create_drawing_window("Wibbling mascot"); // Convert the mascot to a set of paths let render_mascot = stream::iter(mascot.into_iter()); let mascot_paths = drawing_to_attributed_paths::<SimpleBezierPath, _>(render_mascot); let mascot_paths = executor::block_on(async move { mascot_paths.collect::<Vec<_>>().await }); // Draw the mascot with a moving distortion let start_time = Instant::now(); loop { // Get the current time where we're rendering this let since_start = Instant::now().duration_since(start_time); let since_start = since_start.as_nanos() as f64; let amplitude = 12.0; // Distort each of the paths in turn let distorted_mascot = mascot_paths.iter() .map(|(attributes, path_set)| (attributes, path_set.iter() .map(move |path: &SimpleBezierPath| distort_path::<_, _, SimpleBezierPath>(path, |point: Coord2, _curve, _t| { let distance = point.magnitude(); let ripple = (since_start / (f64::consts::PI * 500_000_000.0)) * 10.0; let offset_x = (distance / (f64::consts::PI*5.0) + ripple).sin() * amplitude * 0.5; let offset_y = (distance / (f64::consts::PI*4.0) + ripple).cos() * amplitude * 0.5; Coord2(point.x() + offset_x, point.y() + offset_y) }, 2.0, 1.0).unwrap()) .collect::<Vec<_>>())); // Render the current frame canvas.draw(|gc| { // Clear the canvas gc.clear_canvas(Color::Rgba(0.7, 0.9, 0.9, 1.0)); gc.canvas_height(768.0); gc.transform(Transform2D::scale(1.0, -1.0)); gc.center_region(0.0, 0.0, 1024.0, 768.0); gc.fill_color(Color::Rgba(1.0, 1.0, 1.0, 1.0)); gc.stroke_color(Color::Rgba(0.0, 0.0, 0.0, 1.0)); gc.line_width(6.0); // Render the distorted paths for (attributes, path_set) in distorted_mascot { gc.new_path(); for path in path_set { gc.bezier_path(&path); } for attribute in attributes { match attribute { PathAttribute::Fill(color) => { gc.fill_color(*color); gc.fill(); } PathAttribute::FillGradient(gradient, (x1, y1), (x2, y2), maybe_transform) => { gc.fill_gradient(*gradient, *x1, *y1, *x2, *y2); maybe_transform.map(|transform| gc.fill_transform(transform)); gc.fill(); } PathAttribute::FillTexture(texture, (x1, y1), (x2, y2), maybe_transform) => { gc.fill_texture(*texture, *x1, *y1, *x2, *y2); maybe_transform.map(|transform| gc.fill_transform(transform)); gc.fill(); } PathAttribute::Stroke(width, color) => { gc.line_width(*width); gc.stroke_color(*color); gc.stroke(); } PathAttribute::StrokePixels(width, color) => { gc.line_width_pixels(*width); gc.stroke_color(*color); gc.stroke(); } } } } }); // Wait for the next frame thread::sleep(Duration::from_nanos(1_000_000_000 / 60)); } }); } /// Mascot in canvas encoding form const MASCOT: &'static str = " NARdyJn+A+2bP/AHaoB/AAAAg/A ThAAAQEB TmAAAg/AAAAAAAAAAAAAAAAAAAAAAg/CAAAAAAAAAAAAAAAAAAAAAg/A TcAAAAAAAAAAAAAAAgEBAAAQEB P Tmm8dDBBAAAAAA9oAsEDAAAAAAm8dDBBPyEmFDAAAAAAAAAAAAAAAg/A P P P TmAAAQBBAAAAAAf/oQDBAAAAAAAAAQBBSslGEBAAAAAAAAAAAAAAAg/A Np my2579AAAAAAC ly2579AwKH6+C lKc9I9Anvf6+C c2Of/8AoFZ7+CWO0C9Anvf6+C2Of/8AfU46+C l2Of/8AhAr8+C ly2579AhAr8+C ly2579Ac9oA/C c46RB+AZ78F/Cy2579Ax1jC/CZ7899AzhWE/C cxgAL+AwKHK/CEYlD+A/UjH/C++pG+AKc9I/C c67na+AzMzM/C/pGP+AW5QL/CFueU+AXPKM/C cktzn+A9owN/C3kYg+AGEYN/Cpw1j+A9owN/C cAAAw+AiWkN/C67nq+A9owN/C99Tt+A0isN/C ctIb3+A99TN/Cx1jy+AGEYN/ChrH1+A99TN/C lDXP6+A99TN/C lDXP6+AAAAAAC ljX62+AAAAAAC ljX62+AXPKM/C cJbnv+ApbSM/CqGv0+AXPKM/CMdTy+AgVOM/C ccSMo+AFueM/CGZ7s+A8naM/Cnvfq+AFueM/C c0isd+ANepL/CqGvk+AFueM/CdTih+AXPKM/C cSMIQ+A46RJ/CueUY+A6mEL/CXktT+AMdTK/C cShrH+AZQgF/CpbSM+AcSMI/CUNeJ+Atd+G/C cY6mE+Ac9oA/CrcoF+AFDCE/CY6mE+AepbC/C lY6mE+AhAr8+C lrcol+AhAr8+C lrcol+AwKH6+C l8S3E+AwKH6+C l8S3E+AAAAAAC ly2579AAAAAAC . CfRJjMS/Au3eb/AdzNX/AAAAg/A F p P TmAAAQBBAAAAAAdTSSDBAAAAAAAAAQBBSslGEBAAAAAAAAAAAAAAAg/A Np myLdj+AJGEA/C ckCXv+AHv09+C357n+AJGEA/Cy25r+AkCX/+C ccSM4+AIwK3+CWO0y+ApbS8+C9ow1+Ae++5+C ciWk9+AGZ7s+Cnvf6+AzhW0+CpbS8+ABW5w+C c2Of/+AR2Of+CaR2++AKc9o+C2Of/+AFuek+C ciWk9+AzhWE+C2Of/+A03PV+CaR2++ApbSM+C ccSM4+AlYQg9CpbS8+A4P149Cnvf6+A7Rhr9C ckCXv+AmuJR8C9ow1+Ae++J9CWO0y+AmuJx8C cyLdj+AmuJx7Ay25r+AAAAAAA357n+AmuJx7A caR2O+AmuJR8CZ78d+AmuJx7A9owV+AAAAAAA cfU469AlYQg9C357H+AmuJx8CnEDC+Ae++J9C c8S3k9AzhWE+CmuJx9A7Rhr9Ce++p9A4P149C coFZb9AR2Of+C2Off9ApbSM+CoFZb9A03PV+C c8S3k9AGZ7s+CoFZb9AFuek+C2Off9AKc9o+C cfU469AIwK3+Ce++p9ABW5w+CmuJx9AzhW0+C caR2O+AHv09+CnEDC+Ae++5+C357H+ApbS8+C cyLdj+AJGEA/C9owV+AkCX/+CZ78d+AJGEA/C . myLdj+APKcd8C c+TNu+AvJxA9C/Ujn+APKcd8CEtIr+AxgAr8C cQ141+ABrco9C46Rx+AFDCM9Cpw1z+AoFZb9C cnvf6+AjX6G+C3573+ADCsy9CUNe5+Atd++9C cFDC8+AR2Of+C7Rh7+AsHFO+CFDC8+AHaRW+C cnvf6+Agqxr+CFDC8+AOJGk+C7Rh7+AJGEo+C cQ141+A03P1+CUNe5+A2Ofv+C3573+Ax1jy+C c+TNu+AW5Q7+Cpw1z+Aktz3+C46Rx+ALy25+C cyLdj+APKc9+CEtIr+AhAr8+C/Ujn+APKc9+C cLHaR+AW5Q7+CIFue+APKc9+C/UjX+AhAr8+C cCsyB+A03P1+CW5QL+ALy25+CQ14F+Aktz3+C cmuJx9Agqxr+CoFZ79Ax1jy+C03P19A2Ofv+C cxgAr9AR2Of+CYlDt9AJGEo+CxgAr9AOJGk+C cmuJx9AjX6G+CxgAr9AHaRW+CYlDt9AsHFO+C cCsyB+ABrco9C03P19Atd++9CoFZ79ADCsy9C cLHaR+AvJxA9CQ14F+AoFZb9CW5QL+AFDCM9C cyLdj+APKcd8C/UjX+AxgAr8CIFue+APKcd8C . CfRJjMS/Au3eb/AdzNX/AAAAg/A F p P TmAAAQBBAAAAAA3k8TDBAAAAAAAAAQBBSslGEBAAAAAAAAAAAAAAAg/A p P TmAAAQBBAAAAAAUNSVDBAAAAAAAAAQBBSslGEBAAAAAAAAAAAAAAAg/A Np mb8S3+Aepby+C c14lu+AxgA7+CY6m0+AamZ2+CCsyx+ACBW5+C cEYlj+AiWk9+CoFZr+AhAr8+CShrn+AiWk9+C cLHaR+AW5Q7+CIFue+AiWk9+C/UjX+AzMz8+C cnEDC+APf/0+CW5QL+A5lu5+C1NJG+AShr3+C cwfqx9AoFZr+Cy2579AMdTy+C9ow19A/pGv+C c7Rhr9Atd+e+CiWkt9AShrn+C7Rhr9AXktj+C cTiBB+A8S3k9C7Rhr9AMIwK+CNzMz9Aktz39C cmuJh+APKcd8ClDtI+AWO0C9CXktT+APKcd8C cPKct+AYlDN9CZQgl+APKcd8CmZmp+ANzMz8C cb8S3+Atd++9CmuJx+AlYQg9CFue0+AiWkt9C lb8S3+Aepby+C . mMIw6+AHaRO/C lMIw6+AAAAAAC lv0N5+AAAAAAC cktz3+AKc9I8CueU4+AAAAAACktz3+AmuJR7C ltIb3+A5QLy9C c99Tt+AYlDt8CY6m0+ACsyh9C46Rx+AKc9I9C c3kYg+AmuJx7AUNep+AmuJR7ChrHl+AmuJx7A cktz39ACBWZ9Cc9oQ+AmuJx7AzhWE+APKcd8C coFZb9Atd+e+CamZm9Awfqx9CoFZb9Av0NJ+C c8S3k9ApbSs+CoFZb9AOJGk+ClYQg9AueUo+C cLy259A++p2+CUNep9AlYQw+Cc9ow9AXktz+C csHFO+AiWk9+CdTiB+AmZm5+CIwKH+Ay257+C c6mEj+AJGEA/CPf/U+AR2O/+CGZ7c+AJGEA/C cIFuu+AiWk9+C/Ujn+AJGEA/CoFZr+AR2O/+C cb8S3+A9ow1+CnEDy+Agqx7+C8S30+Av0N5+C lb8S3+AHaRO/C lMIw6+AHaRO/C . CfRJjMS/Au3eb/AdzNX/AAAAg/A F p P TmAAAQBBAAAAAAtI7WDBAAAAAAAAAQBBSslGEBAAAAAAAAAAAAAAAg/A Np mVjX69A+TNu+C cpw1D+AQ141+CZ7899ATiBx+C46RB+AEYlz+C cO0iM+A7Rh7+CamZG+AJGE4+Cv0NJ+ALy25+C cJGEY+Atd+++Cuz3P+AGZ78+CXktT+A+TN++C cf/Uj+AJGEA/C8nac+Abnv/+Cc9og+AJGEA/C c2jCn+Auz3/+CFuek+AJGEA/C9owl+AAAAA/C c67nq+AIFu++CueUo+AJbn/+CmZmp+AR2O/+C lDXPq+AhAr8+C cCBWp+AFDC8+Ce++p+AXPK8+C5lup+Ay257+C cIwKn+A8na8+C4P1o+Ay257+CJGEo+AXPK8+C coa8i+AGZ78+C1NJm+AzMz8+CqGvk+AGZ78+C c/UjX+ANep7+ClYQg+AGZ78+CNepb+AO0i8+C cO0iM+AJGE4+CyLdT+AfU46+Cuz3P+AmZm5+C cOJGE+AU46x+CUNeJ+AamZ2+C++pG+AzhW0+C cfU469AUNep+CdTiB+A2Ofv+Ctd++9AhArs+C lfU469AAAAAAC lYlDt9AAAAAAC lYlDt9A/pG/+C lNzMz9A/pG/+C c4P149A0is9+CRLy29A/pG/+CueU49A14l++C lVjX69A+TNu+C . CfRJjMS/Au3eb/AdzNX/AAAAg/A F p P TmAAAQBBAAAAAAep7XDBAAAAAAAAAQBBSslGEBAAAAAAAAAAAAAAAg/A Np mR2Ov+AR2Of+C chrHF+A9owV+CMdTi+Ajsde+CLHaR+AEtIb+C cGEYl9A8na89Cwfqx9ASMIQ+CGEYl9AcSMI+C ce++p9AzhWk9CGEYl9ANzMz9CjX6m9AxgAr9C cRLy29AQ14F9CYlDt9AVjXa9Cwfqx9A/pGP9C czhWE+A8S3k8Cy2579ACBW58CTiBB+AYlDt8C c14lO+AmuJR8CShrH+ACBWZ8CxgAL+AmuJR8C cPKcd+AjX6m8C789T+AmuJR8C4P1Y+ACBWZ8C cPf/k+A357H9CTiBh+AmuJx8C6mEj+APKc98C cVjXq+AWO0i9CjX6m+ASMIQ9ClDto+AVjXa9C cR2Ov+A03P19CFDCs+ABrco9C0ist+A14lu9C lR2Ov+AR2Of+C . mQ14l9A1NJ2+C c++pG+AiWk9+C5QLy9ACBW5+C2Of/9Ay257+C cjsde+AJGEA/CiWkN+AR2O/+CZQgV+AJGEA/C cktzn+A14l++Cepbi+AJGEA/CGEYl+AJbn/+C cHv0t+AwKH6+CDXPq+AiWk9+CXPKs+AFDC8+C cLHax+Aoa8y+CkCXv+AJGE4+Cc9ow+A9ow1+C cx1jy+AUNep+C5QLy+ASMIw+Cx1jy+AGZ7s+C lx1jy+AAAAAAC lLHax+AAAAAAC cuz3v+AYlDN8CKxgw+AAAAAACAAAw+AvJxg7C lR2Ov+AxgAr9C ce++p+A/pGP9CPKct+Apw1j9Cgqxr+AVjXa9C cFuek+AWO0i8CcSMo+AWO0C9CamZm+ANzMz8C cXPKc+AvJxA7Aepbi+A8S3E8C3kYg+AmuJR7C c99TN+AvJxA8A/UjX+AmuJx7ADCsS+AvJxA8A c46RB+AvJxg6Av0NJ+AvJxA8AhrHF+AmuJx7A ciWkt9AxgAr8CfU469AvJxg7CXktz9AYlDN8C c8nac9APKcd9CjX6m9A2Of/8CCsyh9AYlDN9C cmuJR9Ay2579CgVOU9AjX6m9CmuJR9Ac9ow9C cCsyh9AYlDN+CmuJR9AEYlD+Cb8SX9ABrcI+C cAAAA+A4P1Y+CBrco9AwfqR+C5QLy9AZQgV+C cmZmZ+AlYQg+C++pG+AXPKc+C/pGP+AIFue+C cR2Ov+AnEDi+CU46h+A46Rh+CJGEo+ACsyh+C lR2Ov+AUNep+C cEtIr+AcSM4+CR2Ov+Abnvv+CHv0t+AqGv0+C cZ78d+APKc9+CueUo+ANep7+CzhWk+APKc9+C c99TN+AFDC8+C/UjX+APKc9+CU46R+AGZ78+C cCsyB+AKc94+ClDtI+AEtI7+C8S3E+AwKH6+C cqGv09AQ141+CPKc99Aktz3+CueU49ARLy2+C cO0is9AY6m0+CmuJx9APf/0+C14lu9AY6m0+C c357n9A03P1+CxgAr9AY6m0+CUNep9AqGv0+C lQ14l9A1NJ2+C . CfRJjMS/Au3eb/AdzNX/AAAAg/A F p P TmAAAQBBAAAAAA2OXZDBAAAAAAAAAQBBSslGEBAAAAAAAAAAAAAAAg/A Np mamZO/A/pG/+C lXktD/AAAAAAC lx1jC/AAAAAAC cnEDC/ACBW57CMdTC/AAAAAAC5QLC/AvJxA7C lmuJx+AlDt4+C c3kYw+AW5Q7+CvJxw+AUNe5+CKxgw+AVjX6+C cJbnv+AlDt4+CSMIw+AVjX6+Cuz3v+AUNe5+C lT3kY+ACBW57C cRLyW+AAAAAACJGEY+AvJxA7C/UjX+AAAAAAC lepbS+AAAAAAC lPKc98A/pG/+C lmuJR9A/pG/+C cVjXa9AIFu++C03PV9A/pG/+CueUY9Atd+++C cjsde9AZ789+C8nac9Ajsd++CPKcd9A+TN++C lyLdT+AoFZb9C cFueU+AxgAL9C789T+A03PV9CgVOU+ASMIQ9C cZQgV+ACBW58CqGvU+AQ14F9C03PV+ACsyB9C csyhW+AxgAL9C9owV+ACsyB9CiBBW+AQ14F9C cktzX+AoFZb9CRLyW+ASMIQ9Cb8SX+A03PV9C lIFuu+A14l++C cbnvv+A2Of/+CaR2u+A/pG/+CR2Ov+A2Of/+C lTiBx+A2Of/+C c5QLy+A14l++CdTix+A2Of/+CU46x+A/pG/+C lVjXC/AoFZb9C cDCsC/AxgAL9CnvfC/A03PV9Cx1jC/ASMIQ9C coa8C/Ab8S38CMIwC/AQ14F9CfU4C/AvJxA9C cNzMD/AxgAL9CxgAD/AvJxA9C6mED/AQ14F9C cyLdD/AoFZb9CW5QD/ASMIQ9Cf/UD/A03PV9C lFueM/AZ789+C cqGvM/AaR2++CO0iM/AQgV++CY6mM/A14l++C c03PN/A/pG/+C8S3M/Atd+++CYlDN/A/pG/+C lamZO/A/pG/+C . CfRJjMS/Au3eb/AdzNX/AAAAg/A F p p p p P TmAAAg/AAAAAAA8pAyADAAAAAAAAAg/Abn/dCDAAAAAAAAAAAAAAAg/A Np mVDjBEB8yVAEB cVDjBEB8yVAEB8SaBEBOJVAEB++jBEB0XUAEB cLyoCEBepPAEBTXpBEB5QWAEBVDDCEBUNSAEB cH6eGEB7R4+DB0iIEEB/JJAEB6bOFEBamFAEB cnP9IEBCBk7DBolhHEB2jz9DB1t/HEBqxV9DB c67GJEBtIv1DBpbBKEB78l5DBW50JEBcSu3DB cQgjGEB9orzDBamdIEBKx8zDBolkHEBrcxzDB cVjpDEB8nczDBo6gFEBjslzDBjsjEEB9oV0DB ceJTCEB03NxDBkCLDEBEt+yDBjMyCEBZQxyDB cE4HCEBoFwuDBeJTCEB03NxDB9oSCEBlDLwDB cwqcBEBrchrDBc9+BEBhrktDB2uvBEBpblsDB cOU4FEBpwwpDBwqcBEBrchrDBP/SEEBam+pDB caR9KEBlDWsDBOpdHEBZ7ipDBT3RJEBNzmpDB cGkiNEB/U3wDByroMEBfUFvDBFjqMEBW5DvDB cQ1iLEBGEAoDBGkiNEB/U3wDB++zKEBvJspDB c2DzQEBwfPlDByrRMEB99TmDBG5UPEBJb4kDB cDi/VEBW5lmDBmORSEB2jmlDBZwkUEBpwrlDB csHcTEBFucfDBDi/VEBW5lmDB46dTEBqGthDB cVDJVEBBWDYDBvUaTEB2OfbDBwfbUEBMdlYDB cAgNQEBTixQDBVDJVEBBWDYDB/pLREB5QHWDB cBL4GEBTiFIDBAgNQEBTixQDBj3UHEBU4cIDB cFDtFEBPfnHDBeebGEBUNuHDBOJEGEBuzZHDB cMIECEBKc1PDBr8VFEBwK1HDBTCoCEB143ODB c5QR5DBxgkdDBFOgBEBe+yQDBUNA6DBzMZcDB cHag4DB35EgDBfUi4DBv0veDBpwm4DBVjjeDB cFuq3DBcStgDBHag4DB35EgDBwKH4DBU4TgDB cPfh3DBoFKhDBpbl3DBhAygDBKxg3DBe+8gDB c8nk3DBCsSjDB+Tj3DBaRrhDBShi3DBuzxiDB cam+3DBO0tjDBKxl3DBShkjDBQgu3DBtIrjDB cQgY7DB/pekDBZQh4DB6mzjDB5QV6DBhAMkDB cwKd6DB5lwkDBQgY7DB/pekDBwfg7DBYlgkDB cf/U5DBMdAlDBgq45DBaR5kDBVjp5DBkt7kDB cCBZ3DBf/WlDBEYw4DBW5IlDBFDQ4DBDCNlDB cpwN3DBFucfDBCBZ3DBf/WlDBsHl3DBkCqhDB cx1v1DBjshWDBmZ22DBDXlbDBKcm2DBaRgZDB cPf30DBJbLUDBgqd1DBZQiVDBZ7O1DBHv+UDB cMdZ0DB99LQDBPf30DBJbLUDBkt+0DBfUQSDB camozDB/U5ODBCsQ0DBR2sPDBrc+zDBDCSPDB crcHxDB2jCNDB99+yDBaRKODBRLOyDB5QTNDB cNefwDBMdnNDBVj1wDBgV+MDBMIrwDB5lWNDB ciB0vDBrc2NDBNefwDBMdnNDBwKGwDBAAsNDB coaKvDBY6GPDBe+gvDBJbBODBIFfvDBv0bODB cdoWtDB99LQDBoaKvDBY6GPDBMIGuDBzMLPDB czMxrDBQ1APDBdoWtDB99LQDB7RxsDBIFWPDB c5QesDBLHmRDBzMxrDBQ1APDB5lisDBZQSQDB c5ltqDBjsLSDB5QesDBLHmRDBHvgrDBSMIRDB cueLsDB8SPTDB5ltqDBjsLSDBb8AsDB99rRDB cTieqDBQgHVDBueLsDB8SPTDBPfRrDBpbeTDB c670rDBe+wUDBTieqDBQgHVDBsHCrDBzMXUDB ciBrqDBtdcYDB670rDBe+wUDBGZCrDBPK4WDB cwfMqDBFDsbDB4PVqDB355ZDBFuNqDBbn/aDB c+TpqDBO0gdDBHvKqDBLHscDBU4YqDBSMYdDB c67/tDB7RdeDBQ12rDBShJeDBSMHtDBZ7MeDB cyLHwDBKxZgDBCs4uDBbnteDBU4mvDBNeNfDB cdTDxDBT3RjDBuenwDBuzMhDBTi+wDBCB4hDB cTiwwDB1NVmDBnEIxDBFurkDB7RJxDBjX5kDB cJbDwDB7RNpDBsyXwDBGExnDBZQWwDBy26nDB cpwswDBTiftDBamwvDBktfqDBaRFwDBO0ZsDB cnv3yDBVj5vDBZQUxDBZQluDBDC+xDBIwKvDB clDSzDBb8AxDBnv3yDBVj5vDBuzRzDBhAUwDB cdTOzDByLIzDB+TSzDB03txDBuzNzDBwfjyDB cFDDzDBFDI0DBNzOzDB03szDBcSTzDBWOA0DB cvJEyDBwfD0DBsywyDBR2Q0DBaRayDBamL0DB cU48wDBb8T0DB67pxDBW55zDBKcLxDBe+/zDB c6mTwDBXkU1DB14wwDBlYk0DBGEpwDBT3r0DB cgqfwDBaRF2DB1NGwDB99t1DBW5BwDB8nF2DB cdTDxDBsHh2DBkC5wDBe+E2DBam7wDB0iQ2DB chrKyDBBWt2DBdTDxDBsHh2DBep0xDB5Qr2DB c7RZzDBsHh2DBv05yDBsyx2DBZQIzDBJbi2DB c78g0DBqGx2DBFDwzDBEYf2DBlD8zDBZ712DB c9oC2DBcSP2DBx1F1DBaRs2DBwff1DB/pm2DB cZQV3DBlDi2DB9oC2DBcSP2DBZQg2DBnEi2DB ccS67DBoFx2DB5QK4DBFDi2DBDXY6DB1Ny2DB cPfu/DBqGx2DBUNc9DBc9v2DBqGU+DBvJu2DB cmO1AEBjX82DB6bkAEBFD02DBzBiAEB3kR3DB cRrcBEBFDT2DBpbIBEBwKn2DBljEBEBMdN2DB ccSgCEBDX82DBsy0BEB9oY2DBvUFCEB+T92DB cwqlEEBYlp2DBZQ7CEBmZ72DBdz9DEBlDc2DB ciBrGEBue43DBDiNFEBsH32DBffJGEBoaT3DB c8HnHEBWOH6DBGEAHEBHvP4DB5lpHEBNeN5DB ckCtFEBCBl8DBPqkHEB99A7DBtocGEByLG8DB cWOJDEB8nC+DBKc9EEBR2D9DBmZBEEBHvw9DB ckCXBEBvJw9DBFDRCEBxgU+DBaGGCEBaRK+DB ckiRAEBY6o7DBkCXBEBvJw9DB8y3AEBc9V8DB cc9M9DBYl/6DBXkW/DBy276DB/UT+DB67x6DB c+zcAEB2j78DBc9M9DBYl/6DB4aMAEBDC77DB cSMJ9DBY6o7DB+zcAEB2j78DBmur+DBEYW7DB cEtF7DB5Qh8DB9oS8DBwKz7DBbnr7DB/pN8DB ce+m/DBYlK9DBEtF7DB5Qh8DB++o+DB/pp8DB cJGc8DBamZ9DBe+m/DBYlK9DBQ1m9DBsHu8DB cnvY5DBlDO/DBDXR7DBIFF+DBKxm6DBamy+DB clDo3DBJGd/DBJb54DBIFZ/DBJbd4DBKxZ/DB cQ125DBMIs/DBlDo3DBJGd/DBamS4DBQgs/DB clD+7DBjXo/DBlDb7DBnvr/DBGEf6DBHv0/DB c/U+5DB5QhAEBlD+7DBjXo/DB5ly7DBf/JAEB cJGc8DBgKyAEB/U+5DB5QhAEBjXa6DBMd8AEB cU45+DBx1IAEBv0d+DBj3nAEBQgG+DBaRaAEB cAAg7DB9dMBEBU45+DBx1IAEBx1q9DBzhPBEB cRLQ+DB8HOBEBAAg7DB9dMBEBIF98DBm5aBEB cRLXAEBTisAEBuem/DBi2ABEBsyx/DB+z6AEB cZQy/DBp7SBEBRLXAEBTisAEBkCQAEBnP5AEB cW55+DBT3yBEBmue/DBU4dBEBgVAAEBO0NBEB cyLXAEBg1jBEBW55+DBT3yBEB3kw/DBto6BEB ctIGBEBZw2AEBSMpAEBFjWBEBXE1AEBjXJBEB cVDjBEB8yVAEBmudBEBzBdAEBDCVBEBDipAEB . mrcN2DB/UmxDB coaw3DB14NyDBrcN2DB/UmxDBcS+2DBLywxDB c6mx4DBamRzDBdTE4DBKcZyDB++o4DB5QzyDB cGEc5DBueP0DBzh64DB99wzDBGEc5DBueP0DB cjsY3DBc9N0DBGEc5DBueP0DBlY63DBmZc0DB cbnQ2DB5lr0DBVjD3DBW5E0DBT3a2DBQ1c0DB cW5L2DBO00zDBGZL2DBLHz0DBHaL2DBepC0DB cMdN2DB4PTyDB78M2DB78WzDBRLM2DBaRAzDB crcN2DB/UmxDBmuO2DB1NmxDBrcN2DB/UmxDB . LwAAAQBB CsRZiJG/A0O7M/Aoe6J/AAAAA/A S Np mVDjBEB8yVAEB cVDjBEB8yVAEB8SaBEBOJVAEB++jBEB0XUAEB cLyoCEBepPAEBTXpBEB5QWAEBVDDCEBUNSAEB cH6eGEB7R4+DB0iIEEB/JJAEB6bOFEBamFAEB cnP9IEBCBk7DBolhHEB2jz9DB1t/HEBqxV9DB c67GJEBtIv1DBpbBKEB78l5DBW50JEBcSu3DB cQgjGEB9orzDBamdIEBKx8zDBolkHEBrcxzDB cVjpDEB8nczDBo6gFEBjslzDBjsjEEB9oV0DB ceJTCEB03NxDBkCLDEBEt+yDBjMyCEBZQxyDB cE4HCEBoFwuDBeJTCEB03NxDB9oSCEBlDLwDB cwqcBEBrchrDBc9+BEBhrktDB2uvBEBpblsDB cOU4FEBpwwpDBwqcBEBrchrDBP/SEEBam+pDB caR9KEBlDWsDBOpdHEBZ7ipDBT3RJEBNzmpDB cGkiNEB/U3wDByroMEBfUFvDBFjqMEBW5DvDB cQ1iLEBGEAoDBGkiNEB/U3wDB++zKEBvJspDB c2DzQEBwfPlDByrRMEB99TmDBG5UPEBJb4kDB cDi/VEBW5lmDBmORSEB2jmlDBZwkUEBpwrlDB csHcTEBFucfDBDi/VEBW5lmDB46dTEBqGthDB cVDJVEBBWDYDBvUaTEB2OfbDBwfbUEBMdlYDB cAgNQEBTixQDBVDJVEBBWDYDB/pLREB5QHWDB cBL4GEBTiFIDBAgNQEBTixQDBj3UHEBU4cIDB cFDtFEBPfnHDBeebGEBUNuHDBOJEGEBuzZHDB cMIECEBKc1PDBr8VFEBwK1HDBTCoCEB143ODB c5QR5DBxgkdDBFOgBEBe+yQDBUNA6DBzMZcDB cHag4DB35EgDBfUi4DBv0veDBpwm4DBVjjeDB cFuq3DBcStgDBHag4DB35EgDBwKH4DBU4TgDB cPfh3DBoFKhDBpbl3DBhAygDBKxg3DBe+8gDB c8nk3DBCsSjDB+Tj3DBaRrhDBShi3DBuzxiDB cam+3DBO0tjDBKxl3DBShkjDBQgu3DBtIrjDB cQgY7DB/pekDBZQh4DB6mzjDB5QV6DBhAMkDB cwKd6DB5lwkDBQgY7DB/pekDBwfg7DBYlgkDB cf/U5DBMdAlDBgq45DBaR5kDBVjp5DBkt7kDB cCBZ3DBf/WlDBEYw4DBW5IlDBFDQ4DBDCNlDB cpwN3DBFucfDBCBZ3DBf/WlDBsHl3DBkCqhDB cx1v1DBjshWDBmZ22DBDXlbDBKcm2DBaRgZDB cPf30DBJbLUDBgqd1DBZQiVDBZ7O1DBHv+UDB cMdZ0DB99LQDBPf30DBJbLUDBkt+0DBfUQSDB camozDB/U5ODBCsQ0DBR2sPDBrc+zDBDCSPDB crcHxDB2jCNDB99+yDBaRKODBRLOyDB5QTNDB cNefwDBMdnNDBVj1wDBgV+MDBMIrwDB5lWNDB ciB0vDBrc2NDBNefwDBMdnNDBwKGwDBAAsNDB coaKvDBY6GPDBe+gvDBJbBODBIFfvDBv0bODB cdoWtDB99LQDBoaKvDBY6GPDBMIGuDBzMLPDB czMxrDBQ1APDBdoWtDB99LQDB7RxsDBIFWPDB c5QesDBLHmRDBzMxrDBQ1APDB5lisDBZQSQDB c5ltqDBjsLSDB5QesDBLHmRDBHvgrDBSMIRDB cueLsDB8SPTDB5ltqDBjsLSDBb8AsDB99rRDB cTieqDBQgHVDBueLsDB8SPTDBPfRrDBpbeTDB c670rDBe+wUDBTieqDBQgHVDBsHCrDBzMXUDB ciBrqDBtdcYDB670rDBe+wUDBGZCrDBPK4WDB cwfMqDBFDsbDB4PVqDB355ZDBFuNqDBbn/aDB c+TpqDBO0gdDBHvKqDBLHscDBU4YqDBSMYdDB c67/tDB7RdeDBQ12rDBShJeDBSMHtDBZ7MeDB cyLHwDBKxZgDBCs4uDBbnteDBU4mvDBNeNfDB cdTDxDBT3RjDBuenwDBuzMhDBTi+wDBCB4hDB cTiwwDB1NVmDBnEIxDBFurkDB7RJxDBjX5kDB cJbDwDB7RNpDBsyXwDBGExnDBZQWwDBy26nDB cpwswDBTiftDBamwvDBktfqDBaRFwDBO0ZsDB cnv3yDBVj5vDBZQUxDBZQluDBDC+xDBIwKvDB clDSzDBb8AxDBnv3yDBVj5vDBuzRzDBhAUwDB cdTOzDByLIzDB+TSzDB03txDBuzNzDBwfjyDB cFDDzDBFDI0DBNzOzDB03szDBcSTzDBWOA0DB cvJEyDBwfD0DBsywyDBR2Q0DBaRayDBamL0DB cU48wDBb8T0DB67pxDBW55zDBKcLxDBe+/zDB c6mTwDBXkU1DB14wwDBlYk0DBGEpwDBT3r0DB cgqfwDBaRF2DB1NGwDB99t1DBW5BwDB8nF2DB cdTDxDBsHh2DBkC5wDBe+E2DBam7wDB0iQ2DB chrKyDBBWt2DBdTDxDBsHh2DBep0xDB5Qr2DB c7RZzDBsHh2DBv05yDBsyx2DBZQIzDBJbi2DB c78g0DBqGx2DBFDwzDBEYf2DBlD8zDBZ712DB c9oC2DBcSP2DBx1F1DBaRs2DBwff1DB/pm2DB cZQV3DBlDi2DB9oC2DBcSP2DBZQg2DBnEi2DB ccS67DBoFx2DB5QK4DBFDi2DBDXY6DB1Ny2DB cPfu/DBqGx2DBUNc9DBc9v2DBqGU+DBvJu2DB cmO1AEBjX82DB6bkAEBFD02DBzBiAEB3kR3DB cRrcBEBFDT2DBpbIBEBwKn2DBljEBEBMdN2DB ccSgCEBDX82DBsy0BEB9oY2DBvUFCEB+T92DB cwqlEEBYlp2DBZQ7CEBmZ72DBdz9DEBlDc2DB ciBrGEBue43DBDiNFEBsH32DBffJGEBoaT3DB c8HnHEBWOH6DBGEAHEBHvP4DB5lpHEBNeN5DB ckCtFEBCBl8DBPqkHEB99A7DBtocGEByLG8DB cWOJDEB8nC+DBKc9EEBR2D9DBmZBEEBHvw9DB ckCXBEBvJw9DBFDRCEBxgU+DBaGGCEBaRK+DB ckiRAEBY6o7DBkCXBEBvJw9DB8y3AEBc9V8DB cc9M9DBYl/6DBXkW/DBy276DB/UT+DB67x6DB c+zcAEB2j78DBc9M9DBYl/6DB4aMAEBDC77DB cSMJ9DBY6o7DB+zcAEB2j78DBmur+DBEYW7DB cEtF7DB5Qh8DB9oS8DBwKz7DBbnr7DB/pN8DB ce+m/DBYlK9DBEtF7DB5Qh8DB++o+DB/pp8DB cJGc8DBamZ9DBe+m/DBYlK9DBQ1m9DBsHu8DB cnvY5DBlDO/DBDXR7DBIFF+DBKxm6DBamy+DB clDo3DBJGd/DBJb54DBIFZ/DBJbd4DBKxZ/DB cQ125DBMIs/DBlDo3DBJGd/DBamS4DBQgs/DB clD+7DBjXo/DBlDb7DBnvr/DBGEf6DBHv0/DB c/U+5DB5QhAEBlD+7DBjXo/DB5ly7DBf/JAEB cJGc8DBgKyAEB/U+5DB5QhAEBjXa6DBMd8AEB cU45+DBx1IAEBv0d+DBj3nAEBQgG+DBaRaAEB cAAg7DB9dMBEBU45+DBx1IAEBx1q9DBzhPBEB cRLQ+DB8HOBEBAAg7DB9dMBEBIF98DBm5aBEB cRLXAEBTisAEBuem/DBi2ABEBsyx/DB+z6AEB cZQy/DBp7SBEBRLXAEBTisAEBkCQAEBnP5AEB cW55+DBT3yBEBmue/DBU4dBEBgVAAEBO0NBEB cyLXAEBg1jBEBW55+DBT3yBEB3kw/DBto6BEB ctIGBEBZw2AEBSMpAEBFjWBEBXE1AEBjXJBEB cVDjBEB8yVAEBmudBEBzBdAEBDCVBEBDipAEB . mrcN2DB/UmxDB coaw3DB14NyDBrcN2DB/UmxDBcS+2DBLywxDB c6mx4DBamRzDBdTE4DBKcZyDB++o4DB5QzyDB cGEc5DBueP0DBzh64DB99wzDBGEc5DBueP0DB cjsY3DBc9N0DBGEc5DBueP0DBlY63DBmZc0DB cbnQ2DB5lr0DBVjD3DBW5E0DBT3a2DBQ1c0DB cW5L2DBO00zDBGZL2DBLHz0DBHaL2DBepC0DB cMdN2DB4PTyDB78M2DB78WzDBRLM2DBaRAzDB crcN2DB/UmxDBmuO2DB1NmxDBrcN2DB/UmxDB . LwAAACBB CsRiHeY/A1TPd/Asv+a/AamZG/A S Np mVDjBEB8yVAEB cVDjBEB8yVAEB8SaBEBOJVAEB++jBEB0XUAEB cLyoCEBepPAEBTXpBEB5QWAEBVDDCEBUNSAEB cH6eGEB7R4+DB0iIEEB/JJAEB6bOFEBamFAEB cnP9IEBCBk7DBolhHEB2jz9DB1t/HEBqxV9DB c67GJEBtIv1DBpbBKEB78l5DBW50JEBcSu3DB cQgjGEB9orzDBamdIEBKx8zDBolkHEBrcxzDB cVjpDEB8nczDBo6gFEBjslzDBjsjEEB9oV0DB ceJTCEB03NxDBkCLDEBEt+yDBjMyCEBZQxyDB cE4HCEBoFwuDBeJTCEB03NxDB9oSCEBlDLwDB cwqcBEBrchrDBc9+BEBhrktDB2uvBEBpblsDB cOU4FEBpwwpDBwqcBEBrchrDBP/SEEBam+pDB caR9KEBlDWsDBOpdHEBZ7ipDBT3RJEBNzmpDB cGkiNEB/U3wDByroMEBfUFvDBFjqMEBW5DvDB cQ1iLEBGEAoDBGkiNEB/U3wDB++zKEBvJspDB c2DzQEBwfPlDByrRMEB99TmDBG5UPEBJb4kDB cDi/VEBW5lmDBmORSEB2jmlDBZwkUEBpwrlDB csHcTEBFucfDBDi/VEBW5lmDB46dTEBqGthDB cVDJVEBBWDYDBvUaTEB2OfbDBwfbUEBMdlYDB cAgNQEBTixQDBVDJVEBBWDYDB/pLREB5QHWDB cBL4GEBTiFIDBAgNQEBTixQDBj3UHEBU4cIDB cFDtFEBPfnHDBeebGEBUNuHDBOJEGEBuzZHDB cMIECEBKc1PDBr8VFEBwK1HDBTCoCEB143ODB c5QR5DBxgkdDBFOgBEBe+yQDBUNA6DBzMZcDB cHag4DB35EgDBfUi4DBv0veDBpwm4DBVjjeDB cFuq3DBcStgDBHag4DB35EgDBwKH4DBU4TgDB cPfh3DBoFKhDBpbl3DBhAygDBKxg3DBe+8gDB c8nk3DBCsSjDB+Tj3DBaRrhDBShi3DBuzxiDB cam+3DBO0tjDBKxl3DBShkjDBQgu3DBtIrjDB cQgY7DB/pekDBZQh4DB6mzjDB5QV6DBhAMkDB cwKd6DB5lwkDBQgY7DB/pekDBwfg7DBYlgkDB cf/U5DBMdAlDBgq45DBaR5kDBVjp5DBkt7kDB cCBZ3DBf/WlDBEYw4DBW5IlDBFDQ4DBDCNlDB cpwN3DBFucfDBCBZ3DBf/WlDBsHl3DBkCqhDB cx1v1DBjshWDBmZ22DBDXlbDBKcm2DBaRgZDB cPf30DBJbLUDBgqd1DBZQiVDBZ7O1DBHv+UDB cMdZ0DB99LQDBPf30DBJbLUDBkt+0DBfUQSDB camozDB/U5ODBCsQ0DBR2sPDBrc+zDBDCSPDB crcHxDB2jCNDB99+yDBaRKODBRLOyDB5QTNDB cNefwDBMdnNDBVj1wDBgV+MDBMIrwDB5lWNDB ciB0vDBrc2NDBNefwDBMdnNDBwKGwDBAAsNDB coaKvDBY6GPDBe+gvDBJbBODBIFfvDBv0bODB cdoWtDB99LQDBoaKvDBY6GPDBMIGuDBzMLPDB czMxrDBQ1APDBdoWtDB99LQDB7RxsDBIFWPDB c5QesDBLHmRDBzMxrDBQ1APDB5lisDBZQSQDB c5ltqDBjsLSDB5QesDBLHmRDBHvgrDBSMIRDB cueLsDB8SPTDB5ltqDBjsLSDBb8AsDB99rRDB cTieqDBQgHVDBueLsDB8SPTDBPfRrDBpbeTDB c670rDBe+wUDBTieqDBQgHVDBsHCrDBzMXUDB ciBrqDBtdcYDB670rDBe+wUDBGZCrDBPK4WDB cwfMqDBFDsbDB4PVqDB355ZDBFuNqDBbn/aDB c+TpqDBO0gdDBHvKqDBLHscDBU4YqDBSMYdDB c67/tDB7RdeDBQ12rDBShJeDBSMHtDBZ7MeDB cyLHwDBKxZgDBCs4uDBbnteDBU4mvDBNeNfDB cdTDxDBT3RjDBuenwDBuzMhDBTi+wDBCB4hDB cTiwwDB1NVmDBnEIxDBFurkDB7RJxDBjX5kDB cJbDwDB7RNpDBsyXwDBGExnDBZQWwDBy26nDB cpwswDBTiftDBamwvDBktfqDBaRFwDBO0ZsDB cnv3yDBVj5vDBZQUxDBZQluDBDC+xDBIwKvDB clDSzDBb8AxDBnv3yDBVj5vDBuzRzDBhAUwDB cdTOzDByLIzDB+TSzDB03txDBuzNzDBwfjyDB cFDDzDBFDI0DBNzOzDB03szDBcSTzDBWOA0DB cvJEyDBwfD0DBsywyDBR2Q0DBaRayDBamL0DB cU48wDBb8T0DB67pxDBW55zDBKcLxDBe+/zDB c6mTwDBXkU1DB14wwDBlYk0DBGEpwDBT3r0DB cgqfwDBaRF2DB1NGwDB99t1DBW5BwDB8nF2DB cdTDxDBsHh2DBkC5wDBe+E2DBam7wDB0iQ2DB chrKyDBBWt2DBdTDxDBsHh2DBep0xDB5Qr2DB c7RZzDBsHh2DBv05yDBsyx2DBZQIzDBJbi2DB c78g0DBqGx2DBFDwzDBEYf2DBlD8zDBZ712DB c9oC2DBcSP2DBx1F1DBaRs2DBwff1DB/pm2DB cZQV3DBlDi2DB9oC2DBcSP2DBZQg2DBnEi2DB ccS67DBoFx2DB5QK4DBFDi2DBDXY6DB1Ny2DB cPfu/DBqGx2DBUNc9DBc9v2DBqGU+DBvJu2DB cmO1AEBjX82DB6bkAEBFD02DBzBiAEB3kR3DB cRrcBEBFDT2DBpbIBEBwKn2DBljEBEBMdN2DB ccSgCEBDX82DBsy0BEB9oY2DBvUFCEB+T92DB cwqlEEBYlp2DBZQ7CEBmZ72DBdz9DEBlDc2DB ciBrGEBue43DBDiNFEBsH32DBffJGEBoaT3DB c8HnHEBWOH6DBGEAHEBHvP4DB5lpHEBNeN5DB ckCtFEBCBl8DBPqkHEB99A7DBtocGEByLG8DB cWOJDEB8nC+DBKc9EEBR2D9DBmZBEEBHvw9DB ckCXBEBvJw9DBFDRCEBxgU+DBaGGCEBaRK+DB ckiRAEBY6o7DBkCXBEBvJw9DB8y3AEBc9V8DB cc9M9DBYl/6DBXkW/DBy276DB/UT+DB67x6DB c+zcAEB2j78DBc9M9DBYl/6DB4aMAEBDC77DB cSMJ9DBY6o7DB+zcAEB2j78DBmur+DBEYW7DB cEtF7DB5Qh8DB9oS8DBwKz7DBbnr7DB/pN8DB ce+m/DBYlK9DBEtF7DB5Qh8DB++o+DB/pp8DB cJGc8DBamZ9DBe+m/DBYlK9DBQ1m9DBsHu8DB cnvY5DBlDO/DBDXR7DBIFF+DBKxm6DBamy+DB clDo3DBJGd/DBJb54DBIFZ/DBJbd4DBKxZ/DB cQ125DBMIs/DBlDo3DBJGd/DBamS4DBQgs/DB clD+7DBjXo/DBlDb7DBnvr/DBGEf6DBHv0/DB c/U+5DB5QhAEBlD+7DBjXo/DB5ly7DBf/JAEB cJGc8DBgKyAEB/U+5DB5QhAEBjXa6DBMd8AEB cU45+DBx1IAEBv0d+DBj3nAEBQgG+DBaRaAEB cAAg7DB9dMBEBU45+DBx1IAEBx1q9DBzhPBEB cRLQ+DB8HOBEBAAg7DB9dMBEBIF98DBm5aBEB cRLXAEBTisAEBuem/DBi2ABEBsyx/DB+z6AEB cZQy/DBp7SBEBRLXAEBTisAEBkCQAEBnP5AEB cW55+DBT3yBEBmue/DBU4dBEBgVAAEBO0NBEB cyLXAEBg1jBEBW55+DBT3yBEB3kw/DBto6BEB ctIGBEBZw2AEBSMpAEBFjWBEBXE1AEBjXJBEB cVDjBEB8yVAEBmudBEBzBdAEBDCVBEBDipAEB . mrcN2DB/UmxDB coaw3DB14NyDBrcN2DB/UmxDBcS+2DBLywxDB c6mx4DBamRzDBdTE4DBKcZyDB++o4DB5QzyDB cGEc5DBueP0DBzh64DB99wzDBGEc5DBueP0DB cjsY3DBc9N0DBGEc5DBueP0DBlY63DBmZc0DB cbnQ2DB5lr0DBVjD3DBW5E0DBT3a2DBQ1c0DB cW5L2DBO00zDBGZL2DBLHz0DBHaL2DBepC0DB cMdN2DB4PTyDB78M2DB78WzDBRLM2DBaRAzDB crcN2DB/UmxDBmuO2DB1NmxDBrcN2DB/UmxDB . CfRVTNV/AjLuY/AdzNX/AAAAg/A F Np mVDjBEB8yVAEB cVDjBEB8yVAEB8SaBEBOJVAEB++jBEB0XUAEB cLyoCEBepPAEBTXpBEB5QWAEBVDDCEBUNSAEB cH6eGEB7R4+DB0iIEEB/JJAEB6bOFEBamFAEB cnP9IEBCBk7DBolhHEB2jz9DB1t/HEBqxV9DB c67GJEBtIv1DBpbBKEB78l5DBW50JEBcSu3DB cQgjGEB9orzDBamdIEBKx8zDBolkHEBrcxzDB cVjpDEB8nczDBo6gFEBjslzDBjsjEEB9oV0DB ceJTCEB03NxDBkCLDEBEt+yDBjMyCEBZQxyDB cE4HCEBoFwuDBeJTCEB03NxDB9oSCEBlDLwDB cwqcBEBrchrDBc9+BEBhrktDB2uvBEBpblsDB cOU4FEBpwwpDBwqcBEBrchrDBP/SEEBam+pDB caR9KEBlDWsDBOpdHEBZ7ipDBT3RJEBNzmpDB cGkiNEB/U3wDByroMEBfUFvDBFjqMEBW5DvDB cQ1iLEBGEAoDBGkiNEB/U3wDB++zKEBvJspDB c2DzQEBwfPlDByrRMEB99TmDBG5UPEBJb4kDB cDi/VEBW5lmDBmORSEB2jmlDBZwkUEBpwrlDB csHcTEBFucfDBDi/VEBW5lmDB46dTEBqGthDB cVDJVEBBWDYDBvUaTEB2OfbDBwfbUEBMdlYDB cAgNQEBTixQDBVDJVEBBWDYDB/pLREB5QHWDB cBL4GEBTiFIDBAgNQEBTixQDBj3UHEBU4cIDB cFDtFEBPfnHDBeebGEBUNuHDBOJEGEBuzZHDB cMIECEBKc1PDBr8VFEBwK1HDBTCoCEB143ODB c5QR5DBxgkdDBFOgBEBe+yQDBUNA6DBzMZcDB cHag4DB35EgDBfUi4DBv0veDBpwm4DBVjjeDB cFuq3DBcStgDBHag4DB35EgDBwKH4DBU4TgDB cPfh3DBoFKhDBpbl3DBhAygDBKxg3DBe+8gDB c8nk3DBCsSjDB+Tj3DBaRrhDBShi3DBuzxiDB cam+3DBO0tjDBKxl3DBShkjDBQgu3DBtIrjDB cQgY7DB/pekDBZQh4DB6mzjDB5QV6DBhAMkDB cwKd6DB5lwkDBQgY7DB/pekDBwfg7DBYlgkDB cf/U5DBMdAlDBgq45DBaR5kDBVjp5DBkt7kDB cCBZ3DBf/WlDBEYw4DBW5IlDBFDQ4DBDCNlDB cpwN3DBFucfDBCBZ3DBf/WlDBsHl3DBkCqhDB cx1v1DBjshWDBmZ22DBDXlbDBKcm2DBaRgZDB cPf30DBJbLUDBgqd1DBZQiVDBZ7O1DBHv+UDB cMdZ0DB99LQDBPf30DBJbLUDBkt+0DBfUQSDB camozDB/U5ODBCsQ0DBR2sPDBrc+zDBDCSPDB crcHxDB2jCNDB99+yDBaRKODBRLOyDB5QTNDB cNefwDBMdnNDBVj1wDBgV+MDBMIrwDB5lWNDB ciB0vDBrc2NDBNefwDBMdnNDBwKGwDBAAsNDB coaKvDBY6GPDBe+gvDBJbBODBIFfvDBv0bODB cdoWtDB99LQDBoaKvDBY6GPDBMIGuDBzMLPDB czMxrDBQ1APDBdoWtDB99LQDB7RxsDBIFWPDB c5QesDBLHmRDBzMxrDBQ1APDB5lisDBZQSQDB c5ltqDBjsLSDB5QesDBLHmRDBHvgrDBSMIRDB cueLsDB8SPTDB5ltqDBjsLSDBb8AsDB99rRDB cTieqDBQgHVDBueLsDB8SPTDBPfRrDBpbeTDB c670rDBe+wUDBTieqDBQgHVDBsHCrDBzMXUDB ciBrqDBtdcYDB670rDBe+wUDBGZCrDBPK4WDB cwfMqDBFDsbDB4PVqDB355ZDBFuNqDBbn/aDB c+TpqDBO0gdDBHvKqDBLHscDBU4YqDBSMYdDB c67/tDB7RdeDBQ12rDBShJeDBSMHtDBZ7MeDB cyLHwDBKxZgDBCs4uDBbnteDBU4mvDBNeNfDB cdTDxDBT3RjDBuenwDBuzMhDBTi+wDBCB4hDB cTiwwDB1NVmDBnEIxDBFurkDB7RJxDBjX5kDB cJbDwDB7RNpDBsyXwDBGExnDBZQWwDBy26nDB cpwswDBTiftDBamwvDBktfqDBaRFwDBO0ZsDB cnv3yDBVj5vDBZQUxDBZQluDBDC+xDBIwKvDB clDSzDBb8AxDBnv3yDBVj5vDBuzRzDBhAUwDB cdTOzDByLIzDB+TSzDB03txDBuzNzDBwfjyDB cFDDzDBFDI0DBNzOzDB03szDBcSTzDBWOA0DB cvJEyDBwfD0DBsywyDBR2Q0DBaRayDBamL0DB cU48wDBb8T0DB67pxDBW55zDBKcLxDBe+/zDB c6mTwDBXkU1DB14wwDBlYk0DBGEpwDBT3r0DB cgqfwDBaRF2DB1NGwDB99t1DBW5BwDB8nF2DB cdTDxDBsHh2DBkC5wDBe+E2DBam7wDB0iQ2DB chrKyDBBWt2DBdTDxDBsHh2DBep0xDB5Qr2DB c7RZzDBsHh2DBv05yDBsyx2DBZQIzDBJbi2DB c78g0DBqGx2DBFDwzDBEYf2DBlD8zDBZ712DB c9oC2DBcSP2DBx1F1DBaRs2DBwff1DB/pm2DB cZQV3DBlDi2DB9oC2DBcSP2DBZQg2DBnEi2DB ccS67DBoFx2DB5QK4DBFDi2DBDXY6DB1Ny2DB cPfu/DBqGx2DBUNc9DBc9v2DBqGU+DBvJu2DB cmO1AEBjX82DB6bkAEBFD02DBzBiAEB3kR3DB cRrcBEBFDT2DBpbIBEBwKn2DBljEBEBMdN2DB ccSgCEBDX82DBsy0BEB9oY2DBvUFCEB+T92DB cwqlEEBYlp2DBZQ7CEBmZ72DBdz9DEBlDc2DB ciBrGEBue43DBDiNFEBsH32DBffJGEBoaT3DB c8HnHEBWOH6DBGEAHEBHvP4DB5lpHEBNeN5DB ckCtFEBCBl8DBPqkHEB99A7DBtocGEByLG8DB cWOJDEB8nC+DBKc9EEBR2D9DBmZBEEBHvw9DB ckCXBEBvJw9DBFDRCEBxgU+DBaGGCEBaRK+DB ckiRAEBY6o7DBkCXBEBvJw9DB8y3AEBc9V8DB cc9M9DBYl/6DBXkW/DBy276DB/UT+DB67x6DB c+zcAEB2j78DBc9M9DBYl/6DB4aMAEBDC77DB cSMJ9DBY6o7DB+zcAEB2j78DBmur+DBEYW7DB cEtF7DB5Qh8DB9oS8DBwKz7DBbnr7DB/pN8DB ce+m/DBYlK9DBEtF7DB5Qh8DB++o+DB/pp8DB cJGc8DBamZ9DBe+m/DBYlK9DBQ1m9DBsHu8DB cnvY5DBlDO/DBDXR7DBIFF+DBKxm6DBamy+DB clDo3DBJGd/DBJb54DBIFZ/DBJbd4DBKxZ/DB cQ125DBMIs/DBlDo3DBJGd/DBamS4DBQgs/DB clD+7DBjXo/DBlDb7DBnvr/DBGEf6DBHv0/DB c/U+5DB5QhAEBlD+7DBjXo/DB5ly7DBf/JAEB cJGc8DBgKyAEB/U+5DB5QhAEBjXa6DBMd8AEB cU45+DBx1IAEBv0d+DBj3nAEBQgG+DBaRaAEB cAAg7DB9dMBEBU45+DBx1IAEBx1q9DBzhPBEB cRLQ+DB8HOBEBAAg7DB9dMBEBIF98DBm5aBEB cRLXAEBTisAEBuem/DBi2ABEBsyx/DB+z6AEB cZQy/DBp7SBEBRLXAEBTisAEBkCQAEBnP5AEB cW55+DBT3yBEBmue/DBU4dBEBgVAAEBO0NBEB cyLXAEBg1jBEBW55+DBT3yBEB3kw/DBto6BEB ctIGBEBZw2AEBSMpAEBFjWBEBXE1AEBjXJBEB cVDjBEB8yVAEBmudBEBzBdAEBDCVBEBDipAEB . mrcN2DB/UmxDB coaw3DB14NyDBrcN2DB/UmxDBcS+2DBLywxDB c6mx4DBamRzDBdTE4DBKcZyDB++o4DB5QzyDB cGEc5DBueP0DBzh64DB99wzDBGEc5DBueP0DB cjsY3DBc9N0DBGEc5DBueP0DBlY63DBmZc0DB cbnQ2DB5lr0DBVjD3DBW5E0DBT3a2DBQ1c0DB cW5L2DBO00zDBGZL2DBLHz0DBHaL2DBepC0DB cMdN2DB4PTyDB78M2DB78WzDBRLM2DBaRAzDB crcN2DB/UmxDBmuO2DB1NmxDBrcN2DB/UmxDB . LwAAAg/A CsRg+5H/ABDMQ/Aw+6L/AAAAg/A S Np mOJasDBwfQZDB cGENsDBf/OaDBZQYsDBEYlZDBueRsDBdo7ZDB caREsDBueMbDBMdIsDByLjaDB/UGsDBe+2aDB cktAsDB+TnbDBoaDsDBMdVbDBPKCsDBBrebDB cc9/rDBY6wbDBxgAsDBueobDBHa/rDBuzvbDB cWOIsDB6mmbDBepAsDBcSybDB0iHsDBHanbDB cGZZsDBLyWbDB35NsDBv0fbDBy2SsDBnvZbDB ctI0sDBDXRbDBsHisDBR2SbDByLrsDBDXRbDB cNe8sDBPfRbDB+T1sDBDXRbDBR27sDB99TbDB cJG1sDB5QHbDB2j8sDBtIRbDBue1sDB03HbDB c14msDBy2raDBuzvsDB6m+aDBnErsDBKx2aDB c+TcsDBmuhZDBHaesDBpwVaDBOJdsDBpb8ZDB cOJasDBwfQZDBIFcsDBxgaZDBWOVsDB9oSZDB . CfRpjOa/AxDPc/AxDPc/AAAAg/A F Np me+asDB+TNZDB cuzasDByLDaDBc9ZsDBmZgZDB46YsDBjXwZDB cgqEuDBZQObDBnvjsDBGEcbDBUNhtDBxgibDB cNz+uDBe+caDBDXXuDBXkDbDBsHvuDBhAzaDB cMd+vDBktRYDBR2avDBTi1ZDBsHsvDB78JZDB c2OUwDBKxKXDBYlGwDBNz4XDBjsNwDBpwlXDB ckCowDBPKWVDBtIdwDBCBmWDBpwjwDB469VDB ccSpwDBQgfUDB9opwDB8SHVDBEttwDBWOuUDB cLycwDBtdSUDB14mwDBQgXUDBpbgwDBnEXUDB c78/uDBcSCUDB03NwDBam/TDBZ7MvDBKx+TDB cWOMtDBe++VDBNeUuDBLHOUDBfUmtDBqx1UDB cKcnsDBR2OYDBFu8sDBtdqWDBsHxsDB1NdXDB cFDdsDBOJAZDBwKksDBepfYDBMIgsDB8SvYDB chrasDBnvRZDB99bsDBFDGZDBY6bsDBCBMZDB ce+asDB+TNZDBdTasDBPfTZDBe+asDBOJOZDB . LwAAAg/A CfRMv8S/ACGYA/ApjO69AAAAg/A F CsRBCIA+AxCLM+AxCLM+AAAAg/A S Np mEYbtDBLywVDB c9oVtDBiWSXDBKcVtDBShFWDBOJRtDBDXnWDB cwfhtDBGEiYDBNeYtDBDXtXDBv0btDBiBJYDB cAADuDBjX4ZDB++ptDBVjHZDB+TytDBHveZDB cKchuDBkCVaDBBrMuDB2OHaDBepVuDB99PaDB cXPsuDBYlVaDBGZjuDB14VaDBVjquDBb8YaDB cMdquDBgqTaDBktsuDB8nUaDBCBruDBoaUaDB ckCnuDBEYPaDBBWpuDBPKSaDByLouDBKxQaDB c67cuDBepBaDB0ijuDBwKLaDBLHguDBNzGaDB c78CuDBcSKZDB5lTuDBrcyZDBLyJuDBMIeZDB cpbftDB3k0WDB2OztDBCscYDBOJktDBc9qXDB cSMdtDBdTCWDBdTetDBXknWDBSMdtDBdTCWDB cEYbtDBLywVDBSMdtDBdTCWDBnEctDB147VDB . CfRTLt0+AJiIi+AZiJm9AAAAg/A F Np mv03uDBpbeUDB c1NSvDB99rWDBqxEvDBShJVDBjXNvDBW56VDB cx1VvDB351XDB5lUvDBIFEXDBLyWvDB1NdXDB cFuPvDBIwCZDBQ1UvDBcSQYDBU4SvDBnEpYDB cktLvDBy2bZDBgqOvDB7RLZDBLHNvDBVjTZDB cUNKvDBZQkZDBXPLvDBCseZDBHvKvDBNehZDB ctdJvDBAAoZDB99JvDBQglZDB++IvDBb8oZDB cFuRvDBfUSZDB6mMvDBjshZDBO0OvDBtIZZDB cDX1vDBTi/WDBlDkvDB7RnYDB+TvvDB9o2XDB cqx2vDBFueVDB464vDBqGfWDBEt6vDBJb/VDB cnEwvDBHvwUDBY60vDBzMPVDBb8xvDBZQAVDB cIwtvDBJbfUDBpwvvDBPKuUDBe+uvDBlDhUDB czMlvDBzhcUDB3krvDBzhcUDB/pnvDBnvdUDB cpwEvDByLZUDBJbavDBzMXUDBNzPvDB46XUDB cv03uDBpbeUDBzMBvDBYlZUDBv03uDBGZdUDB . CfR3bvd/A8u7O/AlTOZ+AAAAg/A F Np m14hsDBlY8YDB cQgvsDBJGQYDBjslsDBb8sYDBuzqsDBueeYDB c+++sDBLykXDBdT0sDBGZBYDB464sDBdoxXDB cZQytDBU4aWDB67MtDB1NHXDBcSgtDBYltWDB cvJguDBbn9VDBRL/tDBJbNWDBR2QuDB3k6VDB cueFvDBmuNWDBmZsuDBGEAWDBDC6uDBwfEWDB ctIVvDBFugWDBRLLvDBfUSWDBAAQvDBUNaWDB cQg2vDBBrSXDBpwgvDBKcvWDB35svDBO0+WDB czh9vDByL/XDBHa9vDBc9gXDB78+vDB46rXDB cpb7vDB03PYDBLH9vDBHvEYDBbn8vDBIwKYDB c8S5vDBPfXYDBO06vDBxgSYDB8S5vDBPfXYDB cy27vDBJbRYDB8S5vDBPfXYDBhA7vDBMdTYDB cmZAwDBv0DYDB8n9vDByLNYDB7R/vDBY6IYDB cuzLwDB14VXDB03DwDBLH0XDBvJIwDBGZlXDB cTifwDB1N/VDBktSwDB8n4WDBKcawDBUNeWDB cnvowDBAAEVDBQgiwDByLtVDBKclwDBcSYVDB ciBqwDB46rUDBAAqwDBdT8UDBJGpwDB03zUDB cBWlwDB/UdUDBJbqwDBtdoUDBxgmwDBQ1gUDB cwfhwDB2OdUDBU4kwDB46bUDBWOiwDBBWdUDB c0iWwDBPfZUDBO0dwDBBrcUDBwKawDBT3aUDB cYlovDBdTQUDBZ7GwDBYlTUDBVj4vDBdTQUDB cYl9uDB3kSUDBLyavDBdTQUDBiWLvDBEtQUDB c+THuDBQ1oUDBIFruDBqGVUDB/UYuDBZQYUDB caRhtDBR2gVDByL5tDB6m2UDBjXttDB5QNVDB cx1BtDB2OlWDBR2UtDBe+0VDBsyJtDB8SJWDB cvJ3sDBjsTXDB0i9sDBfU0WDBb86sDBdTEXDB c2jnsDBfUUYDB03xsDBtIpXDBRLssDBZQ+XDB ce+isDBx1tYDBO0lsDBBrcYDBy2ksDBhrlYDB cBWisDBjs3YDBJbisDBWOwYDBkChsDBXk1YDB CfR9yLv+A7rv++A/7v/+AAAAA/A F P Tm+6df/A5A9A+AFu2zBD5A9A+C+6df/AGz1RCBAAAAAAAAAAAAAAAg/A Np mdottDBPK8UDB cKxWuDBEtmYDB4P3tDBqGPWDBO09tDBiWeXDB c46tuDBIwaZDBlYduDB355YDBy2kuDBMIMZDB cO01uDBoFpZDBhruuDB99bZDBLy1uDB4PpZDB cSh4uDBx15XDB038uDBT3MZDBuz5uDBv0XYDB cRLguDBPKOUDBDC1uDBlDpWDBHayuDBqGXVDB cKcWuDB5lSUDBMdfuDB5QLUDBQgXuDBNeRUDB cO05tDBxguUDBCBNuDB3kcUDBBWDuDBe+kUDB czhxtDBdo3UDB/p2tDBgqxUDBiW0tDBY6yUDB cQgutDBzM/UDBcSwtDBkt5UDBDXttDBPfBVDB CfRBCIA+AxCLM+AxCLM+AmZmZ/A F p Np mmuSCEBQgOxDB c2uPCEBYlfyDBj3UCEBXPfxDBIlQCEBmuOyDB cki/BEB2jn0DBpQNCEBKxQzDBMoGCEB/p4zDB LwAAAg/A CsRNyID/Av6qL/AxCLM/AAAAg/A S Np my2yvDB8nUPDB cNefwDBMdnNDBy2yvDB8nUPDBBrZwDBc94NDB crcsvDBue2NDBNefwDBMdnNDBQg2vDBwfqNDB cKxSvDB2O/ODBGZivDBrcCODBNeZvDB03rODB ly2yvDB8nUPDB . CfRUO5E/A3arN/A5iLO/APKcN/A F Np mVjxwDBXP4ODB cJbPxDBnEXPDBVjxwDBXP4ODBEtFxDBnEzODB cx1wxDBmZOUDBpwixDBCseQDBU4wxDBPKoSDB cvJixDBzhwaDBLywxDBHa/WDBzhtxDBO0aYDB c6mKxDB+TQgDBMdWxDBfUKdDB8nVxDBnvFfDB cnvMxDB3kOiDB5l/wDBIw9gDBgVLxDBIFlhDB c7RzxDBsy/iDBPKOxDBnE4iDBQ1kxDBx1AjDB cFuGzDBep9iDBWOMyDBf/9iDBJbjyDBDXJjDB c1NL0DBpbMiDBtdmzDBLHziDBrc+zDBxgmiDB cDCl0DBlYjgDBGZa0DBJbthDBGZg0DB14KhDB cpb80DB1NfZDBsHp0DBXkAgDBwK70DBQ1ObDB c8S60DBDXRUDBjs90DBamvXDBFu10DBGEoVDB ce+u1DBjXiWDB8S60DBDXRUDB78f1DBkCtVDB c46a1DBqGXgDBe+u1DBjXiWDBAAi1DBc9OfDB cShe0DBaR+iDB46T1DBRLGhDBJbE1DBSMliDB cv0FzDB5lcjDBNz7zDBZQVjDBZQrzDBBWXjDB cCB8xDBO0gjDBGZgyDBx1hjDBKceyDBIFkjDB cgVDxDBf/ZiDBepqxDBwKfjDBJbNxDBZQ4iDB ccS6wDBEtPgDBXP5wDBHv7hDBfU0wDBlDzgDB ciWUxDB/puaDBZQAxDBEtYfDBJGKxDBdT2bDB cqGkxDBW58TDB6mexDBf/mZDB2jkxDBFuEVDB c8nOxDBfUYQDB9ojxDBnE1SDBDXcxDBNeNRDB cVjxwDBXP4ODBtdAxDBVjhPDBVjxwDBXP4ODB . CfRVSJF/A1SLN/A4e7N/AAAAQ/A F Np mep90DBY6WUDB l3kA1DBU42ZDB c67T2DBgqFhDB3kA1DBU42ZDBCBJ2DBv0PfDB cKxl2DB6mulDBO0e2DBMIjiDBT3s2DBXk7kDB cDCc3DBOJXlDBKxl2DB6mulDBrcD3DBy2TlDB c9oX3DBDCpgDBDCc3DBOJXlDBsyf3DBGZ5hDB c35f2DB7RlZDBPfP3DBBWxeDBwK42DBJb3bDB cep90DBY6WUDBdoH2DBtITXDB6mj1DBcSmVDB . CfRNyID/Av6qL/AxCLM/AAAAQ/A F Np mzMewDBcSwNDB cbnAwDBMImODBZQVwDBMICODBY6IwDBqGRODB cQgtvDBBrUPDBOJ9vDBY6uODBSMvvDBDXTPDB LwAAAg/A CsRVTN1+A3bv9+A7rv++AAAAg/A S Np mlD0zDBaRIPDB cnE+zDBR22PDB466zDBRLSPDBf/7zDBwKnPDB cY6R0DBgqFSDB99D0DBwKjQDBnvQ0DBNeXRDB cO0c0DB/pmZDBtIW0DBQ1sUDBO0c0DB4P/WDB c99/zDB5Q2gDBO0c0DBLyMcDBDCL0DB/UHfDB c4PyzDBcSKiDBUN8zDBBWShDBKx1zDBcSuhDB c03pzDBAAliDBiBxzDBCBUiDBqGtzDB67biDB cIFnzDBDCuiDBuzozDB99niDBoFozDBiBriDB ciWlzDBBWyiDBYlmzDBQgviDBiWlzDBW5ziDB cUNvzDBe+liDBiWlzDB6mviDBxgtzDBZQoiDB caRI0DBGZKiDBCB2zDBv0ciDBamB0DBnvTiDB cEtd0DBYlBhDBvJX0DB0i1hDBoFX0DBAAahDB cGEn0DBDXJfDB99l0DBkCjgDBzMi0DBhrDgDB c6m10DB+TTaDBUNv0DBgqhdDBCsx0DBTi7bDB cqx60DBbnjXDBuz30DBNzYZDBvJ80DBwfeYDB c4620DB0iYVDBjs50DB4P1WDBBr30DBZ7GWDB cUNp0DB67rRDBbn10DB35HUDBZQ40DBBr4SDB cf/g0DB1N1QDBIwl0DBgVaRDBe+k0DBSMGRDB c6mN0DBuzxPDBcSb0DBb8cQDBamS0DBfUKQDB cY69zDB14RPDB2OK0DB1NhPDBXPC0DBU4ePDB cIw3zDBJbJPDBue9zDB6mQPDBsH0zDBIFKPDB CfRRDNU/AhDOY/AiHeY/AmZmJ/A F Np mLy30DBBW5TDB cxg50DBR2sXDBXkz0DBPKKVDBfU60DBDCcWDB cGEn0DBtdFgDBBr30DBTijaDBnE00DB67VdDB c6mN0DBsyGiDBDCh0DBJbvgDBcSg0DBbnfhDB c++pyDB35AjDBxg6zDBR2uiDBYlTzDBtd9iDB cpw9xDB03/iDBktbyDBoFCjDBf/LyDBwKBjDB cShsxDBY68iDBgV5xDBNe/iDBdosxDBVj8iDB LwAAAg/A CsRVTN1+A3bv9+A7rv++AAAAg/A S Np mmZBxDBpwzODB cgqvxDBhrpSDB7RixDBaRoPDBlDsxDBJGaRDB cmuexDBEtOcDBLH5xDBmu5VDBiBuxDBwKFZDB cEtRxDB143eDBpbaxDBEYHdDB5QUxDBZ7+dDB LwAAAg/A CsRVTN1+A3bv9+A7rv++AAAAg/A S Np mO0LsDB2jOTDB csyMsDBx1NUDBamOsDBmZiTDBMIQsDBuz5TDB LwAAAg/A CsRNyID/Av6qL/AxCLM/AAAAg/A S Np mJGgsDB99pRDB cKxbsDBMdzSDB0igsDBx17RDBxgisDB99lSDB LwAAAg/A CsRNyID/Av6qL/AxCLM/AAAAg/A S Np m5lLtDBKchRDB cU44tDBuz1RDBqxftDBsHPRDBZ7ntDBAAmRDB cKcQuDB++9RDBSMAuDB9o8RDBMdIuDB++9RDB c78nuDBSh5RDBoaYuDB++9RDBwKguDBGZ9RDB cZQvuDBQ1yRDBUNouDBGZ5RDBiBwuDBfU0RDB c46puDBhA7RDBSMuuDBLywRDBgVquDBIF6RDB cdoYuDBiWYSDBFukuDBueGSDBjXfuDBwfQSDB coFetDBZ7mSDBfUHuDB0isSDBTiwtDBIw8SDB LwAAAg/A CsRNyID/Av6qL/AxCLM/AAAAg/A S Np m4PNtDBtIPVDB coFPvDBEtkTDBDXxtDBOJQUDBTieuDBfUsTDB cgVJwDBU4qTDBpbivDBgqhTDBXP2vDB35lTDB cSMwwDBXPiTDBCsXwDB8nuTDBCsjwDBAAuTDB czM7wDBPfXTDBNzzwDBR2eTDBuz3wDBepbTDB cKxDxDBXkPTDBQ19wDBaRUTDBktDxDBBWPTDB cmuswDBKxSWDBNeFxDBlYWTDBBW6wDBOJoVDB cDCXwDB4PPXDBXkmwDBFDmWDB5QewDBFu8WDB cUNSwDBSMaXDBzhVwDBoFTXDBuzTwDBzhWXDB cT3PwDBy2fXDBf/RwDBCsaXDBb8PwDBCBgXDB cGZSwDB8nYXDBamPwDB/UfXDBPKSwDB2OZXDB cDCZwDBzh4WDBGZVwDBCsQXDBueWwDBnEBXDB cShlwDBhA1VDBHvewDBGZlWDBJGkwDBXkNWDB cCsrwDBKczUDBQ1mwDBjXeVDBRLswDBIFKVDB csHrwDBpwbUDBTirwDBBrsUDBhrswDB++hUDB cJbkwDB8SXUDBXkqwDBTiZUDB0ilwDBAAYUDB cIw3vDB7RHUDBwfdwDBZ7SUDBpw9vDBIFIUDB cEY1uDB99LUDBrcgvDBSMEUDBNeMvDBWOGUDB cIFmtDBMdFVDBShauDB/pSUDBktAuDBwKjUDB cLHbtDBrcWVDBgqitDBy2JVDBFudtDBdTQVDB cUNHtDBnvHWDBzMUtDBHvmVDBb8MtDBMd1VDB cAA8sDBzhuWDBPfDtDBbnTWDBKc/sDBPKiWDB cwKtsDBDCkXDBLH3sDBMIAXDB8nysDBLyQXDB cKxosDBAAiXDBe+ssDBHvkXDBzMpsDBRLiXDB cnEcsDBpwfXDB46ksDBtdgXDBf/fsDBW5eXDB cnvNsDBShpXDBaRXsDBQ1gXDBQ1RsDBcSkXDB cxgHsDBkCxXDBjsLsDBMIsXDBT3JsDBCsuXDB CfRhCKo9ABDMQ+AJjMS+AAAAg/A F Np mQ1+rDBFuubDB cHvDsDB14NbDBU4BsDBQ1mbDBBWCsDBYlXbDB crcPsDBiB9ZDB6mHsDBT3yaDBYlKsDBNeXaDB c2jYtDBBreVDBsHisDBMdXYDBzhzsDB14vWDB cFD1vDBCsMUDB3k/tDBsHJUDBe+/uDBSMGUDB c99ewDBMdXUDBjXCwDBfUOUDBsHSwDBnvPUDB cSMrwDBdobUDBGEiwDB+TZUDBlDpwDBDXXUDB cMIswDBEt0UDBoFtwDBKcfUDB2OswDBDCwUDB clDiwDBqx9VDBYlrwDBQgNVDBlYmwDB0imVDB ce+SwDBuzRXDBx1cwDBqxZWDBiBZwDBfU2WDB cqG9vDB4PXYDBHaNwDBsHrXDBmuEwDBxgAYDB cLyZvDB46zZDBlYyvDB1N3YDB0invDBIwYZDB cqxctDBLyebDBuz3uDBf/2aDB8SMuDBTibbDB cLyusDB67PbDBpbNtDBx1fbDB359sDBqxNbDB cRLAsDBVjxbDBKcdsDBmZSbDBamPsDBx1lbDB LwAAAg/A CsRhCKo9ABDMQ+AJjMS+AAAAg/A S Np mU4fxDBoFd2DB ce+SxDBtIv1DBnEDxDB4Pp2DBgVPxDBPK51DB csydyDBkC10DBJbexDB/pP1DB8n7xDBkC10DB LwAAAg/A CsRNyID/Av6qL/AxCLM/AAAAg/A S Np m67VyDBgVsrDB cPKxzDBCsFwDB3kOzDBnEWtDBmuozDBrcjuDB cf/3zDBv04xDBwf0zDBQgswDBf/3zDBHvRxDB c468zDBGE9zDBf/3zDBkClyDB468zDBDCRzDB cx14zDBy2o0DB468zDBiBM0DBfU6zDBwKa0DB cYl0zDB8n90DBxg4zDBvJs0DBAA1zDBNe+0DB LwAAAg/A CsRNyID/Av6qL/AxCLM/AAAAg/A S Np muz7zDBGZR0DB cb8/xDBgql0DBiWgzDB1N80DBzhiyDBBrM0DB cNe9wDBaR41DBv0lxDBTi40DBZQGxDB8SY1DB cv0ZxDBdoh2DB782wDBiBQ2DBFu4wDBzMt2DB LwAAAg/A CsRNyID/Av6qL/AxCLM/AAAAg/A S Np mXkm4DBoa30DB caR13DBZQy0DBHah4DBnv70DBv0J4DB3ku0DB cFD72DBepq1DBtdc3DBCs20DB/pG3DBdoT1DB cuzw2DBJGG2DB/p22DBDXz1DBEYz2DBmu81DB c99r2DBPKT2DB3kv2DB3kK2DB99r2DBPKT2DB cLy02DBHvZ2DB99r2DBPKT2DBdTx2DB3kY2DB cR2S3DBZQh2DBDX62DB5lb2DBShO3DBamj2DB LwAAAg/A CsRNyID/Av6qL/AxCLM/AAAAg/A S Np my2W7DBXPnpDB cfUo6DB+TJqDBaRJ7DBzh0pDBx126DBT38pDB csyG5DBsyyrDBBrD6DBIwoqDB/Uk5DBsyMrDB c9oj4DBUNssDBzM54DBcSEsDBBrs4DB03XsDB cBWZ4DBMIBtDBueg4DBfUzsDBrcc4DBhA6sDB cCsV4DBnELtDB67X4DBjXEtDBEtU4DBNeOtDB c2Or4DB+TxsDBf/X4DBJGDtDBpwl4DBQg3sDB cpwl5DBoFEsDB++74DBgVesDBXPR5DBoaSsDB cFuP6DBShmrDBsyz5DB8S6rDBhrA6DBv0urDB cwKw6DBf/XrDBIwZ6DB++grDBb8k6DBamarDB cgqC7DBOJVrDBCBx6DBsyXrDBgqC7DBW5VrDB LwAAAg/A CsRNyID/Av6qL/AxCLM/AAAAg/A S Np mDCT5DB0iXsDB c/pL4DB14fuDBoaw4DBbnCtDBO0c4DBLHrtDB coaE4DBqxRvDBpbG4DBc9vuDB67F4DBFDBvDB cjXD4DBDCivDB46D4DBwKXvDBVjD4DB8ncvDB cjXD4DBR2qvDB5QD4DB99kvDBFuE4DBMdtvDB cU4O4DB1NCvDB4P+3DB++gvDBSML4DBKcKvDB cxgN5DBkCotDBSMe4DBJGguDBLyy4DBSMCuDB cqxr5DBlYRtDBmZV5DBfUgtDB1Ng5DBlYRtDB LwAAAg/A CsRNyID/Av6qL/AxCLM/AAAAg/A S Np mrc24DB+TIuDB cEYh4DBW59uDBAAq4DBMIYuDBPKm4DBdoquDB cjXS4DB5lYwDBuzZ4DBjXcvDBjXS4DBR24vDB ctdU4DB/UGxDBjXS4DBQ1nwDBtIT4DBtI3wDB cY6Y4DBf/axDBIFV4DBBWNxDBY6Y4DBf/axDB cBWZ4DBmuFxDBY6Y4DBf/axDBqxY4DBsyMxDB cW5h4DB/pcwDBPfa4DBgq3wDBgqd4DBIFqwDB c+TF5DBW5UvDBZ7p4DBSMDwDBiB14DBWOqvDB cXkd5DBnE8uDBxgM5DBtdLvDB7RW5DBY6EvDB LwAAAg/A CsRNyID/Av6qL/AxCLM/AAAAg/A S Np mHvp4DBBr6vDB cJbC8DBPfnzDBgV74DBqGoxDBGEc6DBR2CzDB cQgu9DBZ71zDBgqg8DBfUyzDBYlO9DB+T6zDB cmZT+DBU4vzDBsy69DBXP0zDBjXH+DBv0yzDB c++l+DB2OuzDB/UW+DBwKvzDB/Um+DBO0tzDB LwAAAg/A CsRNyID/Av6qL/AxCLM/AAAAg/A S Np mWOU8DBdTwzDB cjXA7DBuzG0DBPK97DBDC6zDBGEf7DBzM+zDB cNzj4DB1NR0DB99T6DBZQT0DBTiw5DB0iS0DB cYlR4DB35Q0DBe+d4DBLHR0DBY6X4DBhAR0DB LwAAAg/A CsRNyID/Av6qL/AxCLM/AAAAg/A S Np mqGL2DB5QG0DB cqGI2DBPK00DB4PK2DB5lA0DBgqM2DBCsu0DB LwAAAg/A CsRNyID/Av6qL/AxCLM/AAAAg/A S Np mhAl2DB3kW0DB c8SX2DB99j0DBEYh2DBv0b0DBqGb2DBwfe0DB cKc11DBf/p1DBOJK2DBW520DBam31DB4PS1DB czM21DBzM+1DB6701DBShv1DBShy1DBcS51DB cdoW2DB35Q2DBx191DBlYI2DB8nK2DBW5N2DB ctIw2DBoaV2DBFud2DBBrS2DB67o2DBtdX2DB LwAAAg/A CsRNyID/Av6qL/AxCLM/AAAAg/A S Np moFm3DBW5M1DB cgqL3DBe+p1DBlYZ3DBe+Q1DBzhR3DBQ1e1DB cjXz2DB03+2DBkt92DBPfE2DBaR22DB1Nh2DB cZQx2DBcSk3DBO0y2DBPfE3DBFDt2DBWOg3DB cjXz2DBxge3DBHvy2DBEtl3DBsyy2DBueg3DB c7R22DB3kS3DBSh02DB3ka3DBzh12DBbnW3DB cO0/2DBpbw2DBue42DBsyG3DB6m72DBgq72DB ccSS3DBnEV2DBSMD3DBpbn2DBZQJ3DBKxZ2DB c2jj3DB14Q2DBFuV3DB7RT2DBxgh3DBb8S2DB czMs3DBAA51DBtIo3DB+TM2DB++q3DBXP/1DB cjsr3DBXPc1DBc9t3DBe+v1DBAAt3DB+Tl1DB cNem3DBCsS1DBaRr3DB+TZ1DB7Rl3DBZ7N1DB LwAAAg/A CfRhCKo9ABDMQ+AJjMS+AAAAg/A F CsRhCKo9ABDMQ+AJjMS+AAAAg/A S Np mFux2DB2jrqDB camL3DBwKorDBGE/2DBIw8qDB++F3DB1NTrDB c3kq3DBYl4sDBbnS3DBjXCsDBktd3DB2jgsDB c9oo3DBlYFtDB14r3DBDC7sDBzMp3DBU4CtDB cJGV3DB2jntDBLyk3DBdoWtDBY6e3DBe+YtDB c/pi2DB4PZvDBlY92DBqxKuDBnEy2DBUNyuDB cv0N2DB2j3wDBcSY2DBQgzvDBv0N2DB/UbwDB c/UL2DBJGYxDBv0N2DBtdCxDB/UL2DB/UNxDB c6mL2DBgVjxDB/UL2DBy2bxDB6mL2DB8nfxDB c6mL2DBO0nxDB6mL2DBpwkxDBAAM2DBR2oxDB LwAAAg/A CsRNyID/Av6qL/AxCLM/AAAAg/A S Np mR224DBKxR0DB c4PN4DBSMX0DBR2t4DBPKQ0DB67Y4DBGZP0DB cVjb3DBkCN0DBO0/3DBOJg0DBW5o3DB4PO0DB cO0T2DB3kt0DB/pB3DBEtK0DBRLj2DB7RZ0DB LwAAAg/A CsRNyID/Av6qL/AxCLM/AAAAg/A S Np mHat2DBO0t0DB cxg01DB9oy0DB4Pb2DBhre0DB78F2DBsHo0DB ciW20DBFDu2DBgqK1DB03L1DBPK+0DB2jA2DB cQ1w0DBlDW3DBIF00DBcS72DB5lx0DBepI3DB ctIx0DB5lj3DBFuw0DBsHY3DBaRv0DBmuh3DB cnvz0DBzhb3DBUNx0DBgqj3DBtdz0DBmZc3DB cwK60DBO0I3DBjs10DB+TV3DBxg30DBLyO3DB cQga1DBKxU2DBHaC1DBfU22DB78M1DB14j2DB cpwB2DB99+1DBOJl1DBW5I2DBVjx1DB+TA2DB cdoT2DB2jA2DB1NF2DBCs+1DBf/Q2DB/UC2DB cb8t2DBGEt0DBMdw2DBiWt1DBhAs2DBMIE1DB LwAAAg/A CfRhCKo9ABDMQ+AJjMS+AAAAg/A F CsRhCKo9ABDMQ+AJjMS+AAAAg/A S Np m3kcyDBoa2rDB cCBTyDB8SpoDBHvwxDBzh4qDBAAzxDB2jspDB ccSfzDB1NinDBAAgyDBAAOoDBjX+yDBhrnnDB LwAAAg/A CsRNyID/Av6qL/AxCLM/AAAAg/A S Np maRJ3DBdoirDB cy214DBVjVpDB8Sh3DBxgrqDBnEN4DBrcAqDB c4Pk5DBEtmoDBhAF5DBbnFpDBxgS5DBhA0oDB cMdk6DBNz5nDBc945DBwKXoDBXPN6DBShFoDB c8S/6DBLHwnDBiBt6DBue1nDBR216DBe+xnDB cEtK7DBwKunDB5QB7DBFuvnDBWON7DBIwtnDB LwAAAg/A CsRNyID/Av6qL/AxCLM/AAAAg/A S Np mgV55DBO0aoDB cOJo6DBhAOoDBfU65DBpbQoDBrcf6DBGZPoDB cIw98DB786nDBXPZ7DBUNGoDBzMM8DBzM+nDB coahAEBam8mDBEYY+DBhA1nDBqGu/DBZ7anDB chAYCEBFuPlDBu+KBEBx1emDBXk0BEBsyGmDB caRHDEB67mjDBmOpCEBc90kDBK8EDEB4PVkDB cy2KDEB5QLjDBRrHDEB46ejDBgKMDEB46SjDB LwAAAg/A CsRNyID/Av6qL/AxCLM/AAAAg/A S Np m5Qm4DB/pGgDB cDXa4DB/p2gDBIFp4DBGEWgDBGZe4DBkCogDB cOJV4DBdojiDBCsQ4DBEtZhDB2jH4DBx1AiDB cf/45DBYlSjDBT3i4DBKxGjDBvJc5DB35EjDB cwKT7DB4PkjDBtIT6DBf/ejDBc926DBwKejDB cRLv/DBpbIkDBkCx8DBue4jDBLyP+DBVj8jDB chrhBEBepdkDBdTaAEBIFRkDB+e+AEBsHikDB co6FCEB46UkDBm5tBEBoFckDBKx5BEBb8WkDB cFORCEBcSQkDB46KCEBoFUkDBE4MCEBCBVkDB c7xVCEBzMMkDBpbSCEB99OkDBW5XCEB5QLkDB LwAAAg/A CsRNyID/Av6qL/AxCLM/AAAAg/A S Np mHa92DBU45mDB cZQa3DBKxXmDBShI3DBwKwmDBEtP3DBXPimDB c5QZ5DBO0FlDBPf93DBKx0lDBFun4DBy2RlDB clD26DB2jDlDBuz05DBsH/kDBYlZ6DB2jDlDB cGZ/7DBIwOlDByLP7DB2jDlDBEtn7DB03FlDB cgVV8DBhAYlDBXkB8DBXkPlDBgVV8DB35UlDB LwAAAg/A CsRNyID/Av6qL/AxCLM/AAAAg/A S Np mTin5DBPKFlDB cDXP9DBIFJkDBEYy6DBpwokDB0iA8DBy2VkDB cMdF+DBpbBkDB03h9DBIFGkDBVjy9DBjXCkDB coaW+DBpbBkDBLyG+DBDXBkDB8nV+DBnE/jDB LwAAAg/A CsRNyID/Av6qL/AxCLM/AAAAg/A S Np mgqb5DBMdzhDB chAC6DBjsihDBTim5DBGErhDBKc35DBtIthDB cIwb6DB8S3gDBueO6DBjXWhDBYlP6DBBWDhDB c7RJ7DBktKgDB1Nq6DBe+ogDBb896DBShbgDB c8SX7DBAAEfDBoaS7DBgV6fDBzMV7DB2OlfDB cwKY7DBNereDBTiX7DBXPAfDBIwZ7DBoaueDB cR2N7DBWO2eDBoaX7DBFDqeDBR2O7DB2O1eDB c9ov6DBqxnfDBwfB7DBmZCfDBqx66DBqGZfDB cIw05DBgqkgDB+Ta6DB67BgDBnvK6DB8nXgDB cQgi5DBSMpgDB8Sv5DBW5ngDBQgo5DBy2ngDB cjsZ5DBdTpgDB9of5DBQ1pgDBKxW5DBdTpgDB cjsZ5DBdTpgDBfUb5DBdTpgDB9ob5DBb8ogDB LwAAAg/A CsRNyID/Av6qL/AxCLM/AAAAg/A S Np mBW55DBmZegDB chAH6DB6mGfDBfUH6DByLSgDBhAH6DB+TpfDB cgqC6DBepXeDBhAH6DBep5eDB2OB6DB78TeDB LwAAAg/A CsRNyID/Av6qL/AxCLM/AAAAg/A S Np mdT95DB5lnhDB cXPu5DBdoOiDBLH95DBTi2hDBjX45DB6mDiDB cpwg5DBe+YiDBLHs5DB78QiDBDXY5DBmufiDB LwAAAg/A CsRNyID/Av6qL/AxCLM/AAAAg/A S Np mgV55DBGZ5hDB ciB87DB78yhDBqGj6DBhrshDBOJR7DB78yhDB cCB6/DB990hDBfUP9DB78yhDBhrm+DBFD1hDB c8HzBEBwfBiDBDCnAEB140hDBdoLBEBkt3hDB cJ7UCEBZ7MiDBLH7BEBueDiDBWOOCEBvJHiDB cwfKDEBR2HjDBuenCEBZ7ciDBRWHDEBR2HjDB LwAAAg/A CsRNyID/Av6qL/AxCLM/AAAAg/A S Np m6754DBEYbpDB cKxK6DBLyLpDBktO5DB35GpDBqxv5DB78MpDB cmup9DBGEqqDBLHO7DBT3IpDBnEF9DBe+spDB ctI0+DBEtFsDBQg89DBDXJrDB5QS+DBU4yrDB c9oqAEBPfLsDBBrv/DBqxmsDBEtOAEBBWfsDB cMoaBEBAAgrDBHv4AEBNeBsDBXPaBEB5lhrDB LwAAAg/A CsRNyID/Av6qL/AxCLM/AAAAg/A S Np muee5DBLHIfDB cGEP9DBmZAbDBJGw6DBgq3dDBLH97DBXkRcDB cwKe+DBMIiZDBc9a9DB8n0aDBPfc+DBdo5ZDB LwAAAg/A CsRNyID/Av6qL/AxCLM/AAAAg/A S Np mdIkEEB0ikSDB csHCHEBwfwODBPKaFEB78zRDBmZLGEBpwPPDB cvplHEBJGGPDBcyMHEBlYqODBjsaHEBoFDPDB cpbrIEB67hPDBAA9HEBwfMPDBPKUIEBBWXPDB c78KJEBCs0PDBo6vIEB99jPDBnERJEBbnlPDB LwAAAg/A CsRNyID/Av6qL/AxCLM/AAAAg/A S Np mIwWIEBjsvZDB ci29KEBlDnbDB7xDJEByLBZDBpwbKEBCseZDB LwAAAg/A CsRNyID/Av6qL/AxCLM/AAAAg/A S Np mGE+GEBfUygDB c67XIEBktchDBFjTHEBBr2gDBiBHIEB146gDB cZbtIEBvJliDBuegIEBuzthDBcdmIEBHaQiDB cmOKJEBNz1jDB9o2IEB3kAjDB1tBJEBiWZjDB c3ZWJEBTihkDBVjMJEBXk9jDB3ZWJEBDXkkDB LwAAAg/A CsRNyID/Av6qL/AxCLM/AAAAg/A S Np mlD6HEBY6whDB cCMKIEBU4HiDBzB8HEB1N8hDBY6DIEBU4HiDB LwAAAg/A CsRNyID/Av6qL/AxCLM/AAAAg/A S Np mNzfJEBdTuaDB c0X4JEBCsGbDB+znJEBKxCbDBH6uJEBkCJbDB LwAAAg/A CsRNyID/Av6qL/AxCLM/AAAAg/A S Np msyrGEBnvxQDB colQHEB3kEQDBbcwGEBQg7PDBsSEHEB3kEQDB LwAAAg/A CsRNyID/Av6qL/AxCLM/AAAAg/A S Np mZ7aFEBpwRJDB cZQQGEBLyuIDBgVrFEBzhyIDBFO+FEBtdaIDB LwAAAg/A CsRNyID/Av6qL/AxCLM/AAAAg/A S Np mVDJQEBXPsUDB c4PoREBNe/VDBgKlQEBY6cVDB6GKREBNzkVDB cxA8UEBy2xYDBJmsSEBAA8WDBb89TEBgqLYDB LwAAAg/A CsRNyID/Av6qL/AxCLM/AAAAg/A S Np mzMsREBGEUjDB c5QvUEBb8tlDBXPuSEBBr/jDBfUtTEBsHAlDB cqmyVEBkClmDBShBVEB9o6lDBWZhVEBkClmDB LwAAAg/A CsRNyID/Av6qL/AxCLM/AAAAg/A S Np mUNVLEBU4uqDB cMdpMEBwf0uDBx1lLEBhr4rDBShRMEB7RltDB cB2JNEBAAGwDBzhyMEBShSvDBrcANEBjXovDB cKcUNEBc9kwDBFONNEB/pQwDBqxQNEBjsawDB cwKaNEBMdzwDBcdVNEBO0nwDBwKaNEBhrzwDB LwAAAg/A CsRNyID/Av6qL/AxCLM/AAAAg/A S Np myLeHEBU4s6DB c14qHEByL/5DBdIkHEBT3j6DBsynHEBamM6DB cf03HEBv0B4DBD30HEBiBU5DBf03HEBfUx4DB cENyHEBepN3DBf03HEBXkv3DBUY0HEBf/e3DB cLytHEBdT52DBpbxHEBxgH3DB0CtHEB1N22DB LwAAAg/A CsRNyID/Av6qL/AxCLM/AAAAg/A S Np mx1YBEBvJz9DB cKcwAEB5Qy9DBLSMBEBkCu9DBXP9AEBfUx9DB clDjAEBv0y9DBCBsAEB5ly9DBeenAEBtdz9DB c/UdAEBsyv9DBjsiAEBKxy9DBc9ZAEBBWv9DB LwAAAg/A CsRNyID/Av6qL/AxCLM/AAAAg/A S Np m2OlBEBPfVAEB cLHrAEBxASAEBkCSBEBfUUAEBqR+AEB++TAEB cpbPAEBN+OAEBbclAEBZbRAEBy2OAEBYFPAEB LwAAAg/A CsRNyID/Av6qL/AxCLM/AAAAg/A S Np mx15+DBzMMAEB camL/DBqG8/DB5QB/DBEYIAEBcSG/DBRrCAEB cQgR/DBfUy/DBQgN/DBLy4/DBmZP/DBSh1/DB cMdU/DBb8t/DBtdS/DBR2w/DBjsV/DBjss/DB LwAAAg/A CsRNyID/Av6qL/AxCLM/AAAAg/A S Np mWuVAEBEYxAEB cr8aAEBXvlAEB8nYAEBTCuAEB1YZAEBNzpAEB cKRdAEBmufAEB1tbAEBHvjAEBcyeAEBUNeAEB cbHQAEBlY2AEBJbXAEBnklAEBBWXAEB8HxAEB LwAAAg/A CsRNyID/Av6qL/AxCLM/AAAAg/A S Np mLyXAEBzM18DB cNThAEBepQ9DBZ7aAEBgV+8DBpwdAEBIFI9DB cRWmAEBDXe9DB6GjAEBiBV9DByLpAEBDXe9DB LwAAAg/A CsRNyID/Av6qL/AxCLM/AAAAg/A S Np mhAf/DBY6L9DB cPfHAEBSMs9DBBWy/DBzhP9DBBLAAEBueg9DB LwAAAg/A CsRNyID/Av6qL/AxCLM/AAAAg/A S Np mIw57DB1Nz/DB caRJ9DB4PG/DBOJO8DBsyc/DBzhr8DBW5L/DB LwAAAg/A CsRNyID/Av6qL/AxCLM/AAAAg/A S Np mCBDzDBgVGwDB clDkzDB78cwDBnvOzDBLyLwDBKxXzDB7RYwDB cSMzzDBzhiwDBktmzDBc9dwDB8SyzDBHvgwDB cep2zDBv0TxDBkt5zDBXkvwDBep2zDBO0FxDB cc96zDBoFEzDBep2zDBbn5xDBc96zDB8SeyDB caRxzDBAAd0DBc96zDBZQUzDB/pB0DBktQ0DB ciBPzDB+Tk0DB99mzDBmuk0DBkCbzDB35l0DB cb83xDBf/m0DB8S1yDBY6g0DBHaOyDBShV0DB cShExDBY6i1DBsHjxDBSM30DBKcTxDBjXN1DB cqxqwDB3kF2DBLH3wDB8S21DBtdAxDBsyP2DB c5QbwDBvJF2DBWOnwDB35D2DBQ1ewDBmuG2DB ctINwDBvJ/1DBmZYwDB14D2DB++OwDBAAB2DB c99TwDBpbW1DBDCEwDBFD21DB99PwDBZ7e1DB c+TbxDBCBA0DBqxfwDBDX90DBoa1wDBwKQ0DB cEYJzDBaRE0DBnv0xDBRL1zDBJb+yDBpbc0DB cjsPzDBaRiyDBFuWzDBR2mzDBDCPzDBlYBzDB cyLSzDB4P2wDBueQzDBpb9xDBQgXzDBuebxDB cCBDzDBgVGwDBf/PzDBZ7mwDBW5JzDBmZWwDB . CfR9yLv+A7rv++A/7v/+AAAAQ/A F Np mb8h4DBDCIgDB csyu3DBoFugDBvJQ4DBdTOgDBCs83DBIwegDB coal3DB46ehDBLyf3DB/p+gDB++k3DBbnRhDB csHn3DBDCGjDBTim3DBNeBiDBIwl3DBIFkiDB cShu3DBQ1jjDBTin3DBmZQjDB9om3DBhAcjDB c6mE5DBEY5jDB9o43DB35tjDBEts4DBW5yjDB cjs66DBAAZkDBjsr5DB99DkDBNeY6DB99HkDB cPKc7DBO0dkDBwKC7DBFuckDBe+U7DB6mfkDB cGZp8DBBrNkDB8n17DBrcXkDBjsP8DBCsTkDB cMda9DBQgGkDBe+58DBLyJkDB+TJ9DBQgGkDB cf/39DBwfAkDBsyd9DBQgGkDBjsy9DBhrCkDB c4PR9DBtd5jDBiB59DBnEAkDBGEb9DBcS6jDB cueQ8DB67wjDBwf48DBHa3jDBx1o8DBsy1jDB cGE35DBR2TjDBsHk7DBIFojDBQ1c6DBU4fjDB cAAx4DB35AjDBJbn5DBT3OjDBx1+4DBnEGjDB csHW4DB0iqiDBe+n4DBTi9iDBb8b4DBYlyiDB cKxU4DBe+BhDBpwE4DBYlSiDBb8M4DBepbhDB c03h4DB/pbgDB78Y4DB4P0gDBVje4DBCspgDB cb8h4DBDCIgDB/pi4DBBWYgDBXkk4DBRLCgDB . CfR9yLv+A7rv++A/7v/+AAAAQ/A F Np mWOq4DB678vDB cLHH5DBsHtxDBgVo4DBbnbwDBBW44DBpwUxDB ctIY6DBy2FzDBjsb5DBIFPyDBkt95DBFuryDB crc46DBbnwzDBAAj6DBamQzDBtI06DBTihzDB c6m46DB5QC0DBR256DBQg1zDBAA76DBTi9zDB c03t6DBxgK0DBMI36DByLF0DB03t6DBxgK0DB cGZQ7DBY6B0DB03t6DBxgK0DBb8J7DBZ7D0DB cbnC8DBjX1zDBsyl7DB2O7zDBPfv7DBnE8zDB c35X8DBcSuzDBkCI8DBNezzDBamZ8DB35uzDB c8nE8DBRLrzDBMdV8DBmZtzDBgqL8DBmZtzDB c8SU7DBzhUzDBZ7z7DBY6lzDBQ1j7DB9oczDB cRLk5DBwK7xDB67p6DBmZ+yDBsHF6DBTidyDB cIFH5DBAAOxDBwfY5DBf/uxDBwfQ5DBhAexDB cWOq4DB678vDBSh74DBiW6wDBLHp4DBOJEwDB . CfRNyID/Av6qL/AxCLM/AAAAA/A F Np mAAWyDBwKqrDB c5Q+yDBQgTuDB2OyyDBVjdsDBgq0yDBepZtDB c++/yDB9oBvDBKxAzDB6miuDBCsBzDBueyuDB c3k9yDBXPQvDB0i/yDBXkFvDBzhAzDBcSNvDB cNetyDB++BvDBqG9yDBktQvDBGZuyDBNzCvDB cyL4xDBkCPuDBepbyDB/UyuDBe+HyDBc9guDB ckCTxDBHagtDB8nqxDB/p/tDBGZfxDBPKwtDB cFudwDB7RHsDBtd+wDByLGtDBCBrwDBv0lsDB cNeOwDBzMkrDBhrYwDBEt7rDBQgSwDBwKwrDB c2jBwDBDCwqDB3kLwDB3kbrDB++BwDBDCwqDB cTiDwDB67HrDBShAwDBDCwqDBKcDwDBlYHrDB cueTwDBpbasDBNzHwDBFugrDBXPLwDB14CsDB cW5nwDB/UStDBBWawDBlDusDBHafwDBAA/sDB cKxMxDBkCPuDBbnvwDB14jtDB67BxDB5Q/tDB cTiGyDB67NvDBepfxDBTiquDBY6vxDB8S1uDB chrxyDBPfwvDBBWRyDBEtZvDBWOlyDBdomvDB ccSUzDBBrUwDBue5yDBgq2vDBnvGzDB5lKwDB cmZxzDBiBhwDBktlzDB8nhwDBjsuzDBzhfwDB chAwzDB0iOwDBnvzzDB+TiwDBjXwzDBR2QwDB cUNrzDB1NivDB14tzDBzMBwDBbntzDBxgvvDB cLyBzDB5QCtDB35gzDBwfpuDBxgUzDBx15tDB cAAWyDBwKqrDBdo7yDBAAwsDBMdVyDBFunrDB . CfRNyID/Av6qL/AxCLM/AAAAQ/A F Np mgVn4DBtd8yDB chr42DBWO5wDB+TK4DBVjOyDBKxM3DB/pxxDB c67/2DBmu0vDB46w2DBoajwDBXk72DBqGKwDB c6mB4DBKcGtDBNeM3DBBW3uDBXkv3DBPKCuDB cDCV4DB/p3rDB5QK4DBuzpsDB7R+3DB++OsDB cHaD6DB++hqDBNz84DB14OrDBSMR5DBMdDrDB cMda7DBYlbpDB8nT6DBwKXqDBCBT7DBwKzpDB cYld6DB++NpDBf/d7DBEYQpDBLHm6DBGZPpDB csHT4DBXk1pDBShw5DB2jGpDBnvu4DB++JpDB cGEM3DBWOgrDBKx93DB4PXqDB46P3DBsH2qDB cDCt3DBjX+sDBlYI3DBKcIsDB0it3DBShrsDB cdTL3DB3kAuDBShs3DBqxQtDBxgT3DB8SwtDB cqGa2DBbn/vDBqx32DBfUnuDBgqi2DBlDVvDB cHvO2DB5lJxDBXPV2DBsyXwDBhrO2DBGExwDB c0iS2DBzMnxDBpwO2DB5lUxDBuzM2DBCBlxDB cMdY3DB99AyDBIwz2DB03zxDBDX52DBBWxxDB cgVn4DBtd8yDB03q3DBWOKyDBJbu4DB++yyDB . CfRNyID/Av6qL/AxCLM/AAAAQ/A F Np m99y2DBNexqDB ciBB3DBpbBtDBT3B3DBW5XrDBmZT3DB35YsDB cXk81DBwf5vDB3kj2DBGZCuDBBWH2DBCsyuDB cEtv1DBepbzDB35x1DBdo/wDBEtv1DBOJVyDB cUNl1DBHvm0DBEtv1DBJGxzDBc9u1DBWOT0DB caR70DBueQ1DBShf1DBLHy0DB5QG1DB5lF1DB cW5O0DBqxX2DB67k0DB5lm1DBtIr0DB/pJ2DB c6mgzDBXPN2DBRLE0DBtId2DB78tzDB67P2DB c35UyDBhAY2DBzhMzDByLJ2DBcSxyDB6mN2DB cMd8xDBLyg2DBtdMyDBqGb2DB35EyDBDXe2DB cv0rxDBO0l2DBEt1xDBmui2DBrcnxDBjXk2DB cHvwyDBy2q2DBPfEyDB67t2DBOJbyDBBrv2DB cwKfzDBpwg2DByLAzDBoan2DB0iOzDB8nc2DB cyLK0DBZ7v2DBNzqzDB/pj2DBPK5zDB99r2DB cwf00DBdos2DBnvb0DBCB02DBv0y0DBktw2DB c6750DBZ7U2DBrc30DBtdl2DBWO40DB7Rh2DB cAAO1DB4Pa1DB6m80DB/pB2DBKcC1DB++j1DB cgqN2DBv0f0DBgqe1DB1NM1DBQ1y1DBWOp0DB cMdM2DBJbL0DB++X2DB1Nc0DB0iN2DBEYV0DB cb8I2DB2jbyDBZ7I2DBe+qzDBHaL2DBpb7yDB cMdt2DB785uDBiWD2DB5QTxDBsyS2DBsy8vDB c2jp3DB678sDBiB+2DBpbQuDBuze3DBnvbtDB cBWa3DBIFSsDBx1s3DBzhzsDBIFh3DBO0fsDB c35B3DB7ROrDBHvP3DBKc8rDBdTN3DBRLjrDB c99y2DBNexqDBhr92DBTiGrDBJbx2DBx1rqDB . CfRNyID/Av6qL/AxCLM/AmZm5+A F Np moFL3DB03drDB cZQt4DB8nOqDB7Rg3DBMIFrDBWOb4DB/UdqDB cDXj6DBpbZpDBx1I5DBsH4pDBy2+5DB5lepDB cAA0/DBR2ypDBCsT8DB0iJpDBIwD+DByLupDB c35EDEBrcJnDBFOBBEBKx4pDBjMSCEBbnpoDB c9IaEEBnv2iDB8yqDEB5lBmDB2OIEEBx1YkDB ct9/EEBPKgfDBXkmEEBgqyhDBxVtEEB9owgDB cXEMJEB4PLVDBAA5FEBKxUZDBVjgHEB67hWDB cqxPPEB8n0UDBuTKLEBMdlTDBeJRNEB0isTDB cnviQEBampVDBUYrPEBfUEVDBChHQEB14RVDB cAAuREBShXWDBU4qQEBBrwVDBW5nREB1NTWDB c4adTEB4PxXDBVjVSEBNezWDB2D2SEBoFRXDB c2jmUEB9oeYDB5wwTEBlDBYDBiWTUEBShLYDB cE4IVEBktFYDBKRpUEBBWhYDBaxJVEBY6MYDB coavUEBnv1XDBIQHVEBIw4XDBaRyUEBLy2XDB cFj9TEBY6WXDBHPcUEBNzuXDBmuPUEBgVuXDB cMIQSEBW5mVDBqGZTEB99nWDBGkzSEBjsjWDB cOUSREBy2DUDBDi+REBEtIVDBKRjREBY6mUDB c2jzQEB99/SDBLSIREBqGvTDB358QEBgVaTDB cg1WQEBx1pRDBgqiQEBaRQSDBlDcQEB8nsRDB camoOEBIwMQDB4P+PEBFucRDB/0APEBlYiQDB csnyNEBb8cPDBWZYOEBcS+PDBQVDOEB67nPDB cXPBKEBFDuLDBP/kMEBy2pODB8SSLEBZQyMDB cO0zHEBpbmJDB2jQJEBoFFLDBBWmIEBhAXKDB c5QUGEBc9SIDBT3THEBktHJDBRW0GEBHatIDB c6bzFEBYl/IDBHaIGEByLJIDBW5/FEBPftIDB cc9zEEBSMYLDB5FfFEBqGdJDBG5GFEBBWtKDB cQg/CEB0igPDBe+IEEBSh5MDB4PpDEBXPyNDB cZ7/AEBEtMUDB5lZCEBEtERDBeemBEBlYwSDB chAo/DB3kQXDBoakAEB6mOVDBW5OAEBaRKWDB cEtG6DBqGdeDBVjI/DBjs5XDB78H6DBrcOeDB csy+5DBDXdgDB/pC6DBQ1MfDBJbJ6DBjXQgDB cyLV6DBf/IgDBqx35DB67lgDBcSc6DB8SCgDB cepb7DBRLkeDByLC6DB14agDBtIj7DBEYHeDB cOJN7DBHvDgDBktT7DBEtCfDBGZV7DBJGkfDB cMdX6DBaR+gDBiWA7DBwKfgDBO0m6DBEYhgDB czh+5DB2jrhDBxgN6DB++QhDBJbE6DBlDZhDB coaB6DBoF6hDB2j75DBU40hDBKc85DBNz7hDB ctdb6DBbn1hDBTiM6DB5Q2hDBT3P6DB5l3hDB cjsq8DBzhxhDBy2P7DBCsshDB2j37DBLyvhDB cZQL+DB4PzhDBnvK9DB8nyhDB6mq9DBKxxhDB ciWkBEBEY8hDBCs1/DBnE4hDB7RvAEBGExhDB cRWpCEBktfiDB7c6BEBoFBiDB+zTCEBepEiDB cdo7CEBsHziDBEYwCEB0ioiDBM90CEBkCqiDB cDiLDEBaRQjDB3kFDEBepAjDBAAMDEBpbJjDB cTC4CEBCBhkDBhLIDEBSMBkDBY6+CEBf/UkDB cT35BEBSMzlDBdoiCEBrcGlDB/JSCEBYlVlDB cjsz+DBU4pnDB5wFBEBjsymDBihVAEB3kFnDB cx1E8DBQ1DoDBZ789DBTi6nDBy2/8DBFu2nDB cXkA6DBAAXoDBpwj7DBSMLoDBmua6DB8SJoDB coFL3DB03drDBjs84DB2j6oDB78g3DB/pdqDB . CfR9yLv+A7rv++A/7v/+AAAAQ/A F Np mKRQBEBO0O2DB csyUBEB++R2DBt9QBEBmZR2DB0iTBEB4PS2DB cDCsBEBCBa2DB/0aBEBnvQ2DBIQmBEBGEV2DB cjsNCEBTi12DBc93BEBXPk2DBv0ACEB2ju2DB cJbvCEBhA62DB8yXCEBhA72DBFDlCEBam72DB cdzCDEBQ1y2DB++1CEBhA52DBNT8CEBQ1y2DB cJm6DEBR2m2DBl4VDEBQ1y2DB5lnDEBqGq2DB c0iLEEB46s2DBp7AEEBpwl2DBcyFEEB/pq2DB cb8fEEBtIq2DBWuREEBDXv2DBbcZEEBsHp2DB cY6LGEBcSc3DBJ7EFEBY6v2DBNToFEBTiK3DB cpbLHEBgVs4DBtokGEBbno3DBPK+GEBktO4DB ctdWHEBIF84DBP/NHEBkCy4DBmOUHEBXk14DB lIFXHEB2j+4DB ctdWHEBIF84DBK8WHEBCs94DB2uWHEBT384DB lhAUHEBXPy4DB cHaLHEB78V4DBW5THEBEYp4DBJ7NHEBIFd4DB c9drGEBc9P3DBsSCHEBLH83DBaR3GEBnEl3DB cbcSEEB14v1DBwfCGEB/pG2DBeJIFEBXkx1DB cWOWDEB1Nw1DBoa+DEBZQv1DBENqDEBEty1DB cmZlCEBgVi1DBmOJDEBamu1DBqxxCEB7Rr1DB c7xNCEBCs60DBt9fCEBHae1DBKxVCEBfUP1DB cBWFCEBnvE0DBMdHCEBDXq0DBy2FCEBCBZ0DB cZwMCEB6mZzDBRLFCEB++9zDB0iMCEB/UgzDB cwKMCEBcSgvDBy2MCEBMdWzDByrWCEBoaWxDB c0i2BEBiBLtDBolHCEBx1suDBG5DCEB7R5tDB c14cBEBoakrDBD3uBEBHawsDBB2jBEBXP8rDB cXkDBEBRL2rDBhgbBEBjsfrDBIwDBEBLH5rDB c3kdBEBXPUuDBlj/AEBmZasDBuzYBEBgVutDB c9IpBEBCsszDBiBqBEB2O3vDBZb4BEBLyGyDB ccyXBEB3530DB2DlBEBY6H0DBaxeBEBZQg0DB cQVzAEB/pb2DBFjPBEBuzT1DBK8BBEBlDN2DB cNz4/DBuzX2DB8nlAEBjXp2DB+eKAEBKcd2DB cU4+5DBb8L2DBiBP+DB/pC2DB46n7DBHaF2DB c+T43DBtdf2DBamK5DBzMP2DB2jC4DBFDa2DB cNe54DBsHl2DBtIo3DBhAo2DB35s4DB8nk2DB cc9U7DBFDs2DBIF55DB8nn2DBnvU6DBFDs2DB cdoi/DB03v2DBGEs8DBFDs2DB3kM+DBqGo2DB cO0YAEBfU52DB6m6/DBkCy2DBGZLAEBPf02DB c1t3AEBbn42DBLHkAEBGZ92DBZ7uAEB3kF3DB cKRQBEBO0O2DB1N+AEBDCv2DBUNHBEBQ1f2DB . CfRNyID/Av6qL/AxCLM/AAAAA/A F Np mqGMIEBLHS0DB cCMHIEB2jL0DBN+IIEBxgO0DBJ7GIEBY6L0DB cqGMIEBLHS0DBb8HIEBZ7L0DBWuJIEBgVO0DB cdzjIEBHvr0DBsSTIEBmZa0DBSMgIEBf/n0DB cOJOJEBHaE2DBe+0IEBVj90DB/pEJEBXPi1DB cRWqJEBFDU5DBYFgJEBf/E3DBxAyJEB9oH4DB c5FdJEB2jR6DBvUoJEBXPo5DBWZhJEBCB/5DB cBrFJEBKcQ7DBFjXJEBrcp6DBQ1NJEBsy76DB cUt0HEB1NY9DBep/IEBBrf7DB9IFIEBFDC9DB cP/iGEBGZw+DB/UbHEBKc69DBmZ+GEBYlU+DB c/0pCEBvpPAEBGkKFEBlDFAEBM9/DEBP/HAEB c7clBEB46TAEBtIiCEBgVQAEB039BEBxATAEB cKxPBEBe+qAEBTCkBEBepaAEBRrSBEB8ynAEB ciB9/DBg1vBEB6m4AEBsSEBEBB2mAEBzBbBEB c3k++DB7RxBEB9or/DBfU0BEBnv5+DBamxBEB c5FGAEBR2gBEBQgR/DBDCyBEBDX+/DBaxmBEB crc6AEB5FnAEBChZAEBjMQBEB5wnAEB/0CBEB cnk7AEBoahAEB8H7AEBJGmAEBHa7AEBc9jAEB cuzWAEBUYtAEBYluAEBQAjAEBXvhAEB0XlAEB l4PWAEB2jsAEB cAgWAEBCspAEB5QWAEB2DsAEBfUWAEBWOqAEB cdoWAEB/JpAEBljWAEBShpAEBamWAEBxVpAEB cf/SAEBZwsAEB8yUAEB5FrAEBvJTAEB9osAEB cepX/DB6mABEB2OHAEB/J1AEBJGy/DB/06AEB c1438DBb8TBEBoF1+DB/JIBEB14y9DBtISBEB cOJj7DBDXMBEBXPg8DBwqUBEB4P27DBRWSBEB cxg68DBmOMBEB5li7DB1tLBEBe+s8DB+zNBEB cjsR+DBHv5AEB2jg9DBqxHBEBktu9DBcdCBEB cAAh/DBhgjAEBYlr+DBLSzAEB0iJ/DBr8rAEB cLHDAEBkNYAEBFus/DBcSfAEBHv5/DBv0bAEB cMoJAEBvUMAEBcyIAEBZ7UAEBgqKAEBkCPAEB cN+MBEBhAJAEBDiIAEBrcJAEB1YFBEBUYJAEB c4PMCEBbnFAEBuehBEBQAIAEBp7vBEBB2GAEB cdTTDEBsSAAEBwfiCEBepEAEBUY9CEB9IEAEB cUtfEEBZQo/DB2OeDEBIw8/DBCsREEBoat/DB cMdMGEBxgf+DBjM8EEBsyd/DBuTBGEB03n+DB c3ZTHEB/UU9DBpwhGEBzhP+DB1NAHEBb8s9DB cYlKJEBpwE4DBwfOIEBQgI8DBktMJEB1NV6DB cFDpIEBsyO1DBR2IJEBktO2DBEt5IEBnEM2DB cqGMIEBLHS0DBpbkIEBO090DBChUIEBtdf0DB . m4PWAEB2jsAEB cnPWAEBVjsAEBnPWAEBFusAEBnPWAEBnvsAEB l4PWAEB2jsAEB . CfRNyID/Av6qL/AxCLM/AAAAQ/A F Np mwfB6DBRLhAEB cuzF7DB7crAEBb8Q6DBdIlAEBf/06DBlDqAEB cb8o9DBKcYAEBHvB8DBZbwAEBlYp8DBQ1jAEB cQ1z+DBIQEAEBoFB+DBLHUAEBuee+DBOULAEB cMdS/DBShy/DB036+DB46BAEBMdS/DBShy/DB cNzA/DBBWJAEBMdS/DBShy/DBpbF/DBf0GAEB coaS+DBQ1bAEBRLz+DB5wQAEBmui+DBR2VAEB cmuO8DBvJyAEB+Ts9DBNzpAEBFu78DBsSwAEB cR2s6DBw/wAEBoFv7DBtdzAEBY6L7DBU40AEB c7895DBNejAEBwfn6DB/UwAEBmu95DBeeiAEB CfRNyID/Av6qL/AxCLM/AAAAQ/A F Np mKxf9DBoa86DB c14IAEB/U57DBaRA+DBb8B7DB03c/DBHae7DB cW5uAEBlY88DBTiXAEBXPI8DBXPkAEBY6j8DB cSBGBEBnvu9DBIQ3AEBYlP9DBO0ABEBBWq9DB cnEWBEBnEv9DBVDIBEBtdw9DBKcUBEBgVy9DB csH5AEBb8q8DB5lWBEBDCu9DBb8/AEBLH78DB czM4/DBGEW7DBeeoAEBShD8DBnkTAEBKxq7DB cktv+DBf//6DB1Nh/DB67L7DB46J/DBf//6DB cKxf9DBoa86DBb8Z+DBf//6DBamf9DBPf76DB . CfRNyID/Av6qL/AxCLM/Ac9oQ/A F Np m67o9DB6mn7DB czBCAEB03y8DBxgi+DB3kz7DBpbj/DBxgU8DB cEtRAEBcSP9DBsSHAEBgq88DBGENAEBqGE9DB cIFeAEBx1x9DB4PWAEBXPa9DBkCZAEBpwn9DB cepoAEBPfv9DB2DfAEBNzz9DBFDoAEB14x9DB cOUbAEBVjE9DBUNpAEB4Pt9DBo6eAEBEYP9DB cDCc/DB35B8DB9IRAEBf/l8DBDC4/DB6mP8DB c67o9DB6mn7DBmuL/DBZ757DBrcn9DBZ7h7DB . CfRNyID/Av6qL/AxCLM/AAAAQ/A F Np mSM1/DB0ihsDB c6GfAEBO0isDBVjGAEBuehsDBBLTAEBwKesDB csnjBEB14qtDBnk4AEBHvssDBJmPBEB0iLtDB cMI9BEB2jnuDBlYtBEBwK6tDByL2BEBtITuDB cmOFCEBKx9uDBUt/BEBvJvuDBZwCCEB671uDB cIlICEBjXJvDB4aGCEB0iBvDBAAHCEBzhMvDB c7xGCEBnE9uDBoFKCEBiWGvDBqGHCEB8SBvDB cUYCCEB0iSuDBgqFCEBgquuDBktECEBJbguDB cMI4BEBzMTtDBc9+BEBSM+tDB2u8BEB9omtDB cNepBEBwKdsDBUtzBEBTiAtDBKctBEBBWwsDB cfUcBEBnEirDB03lBEBpwLsDBIQjBEBCBvrDB cwKRBEBHaqrDBx1aBEB+TfrDBzsSBEBpworDB cxAzAEBMdHsDBpbHBEBQ10rDB++8AEBkC+rDB cSM1/DB0ihsDBBrlAEBkCUsDBNe//DBv0bsDB . CfRNyID/Av6qL/AxCLM/AAAAQ/A F Np mnvxCEBoayjDB c2O8CEBaRSjDBR23CEBT3njDBaG7CEBU4fjDB cgKkCEBiWdiDBW58CEBdTKjDBgKkCEBiWdiDB cXPJDEBXPLjDBgKkCEBiWdiDB46HDEBMdCjDB cShCDEBlY7jDBChKDEBktTjDBYFFDEBWOwjDB cNT/BEBpbvlDBpb2CEBiBwkDBkiUCEBMdRlDB cwKgAEBmZ8mDB67qBEBwKMmDB+epAEBcS2mDB cFDR9DBgV1nDB4Px/DBBWWnDBwKo+DBY6unDB czMV6DBgqLoDBMdR8DBiB6nDBVjX7DBT3DoDB cXk84DBNzKpDBQgA6DBJGOoDBDCV5DB6myoDB cpwy3DBsHWqDBrcj4DB9ojpDB2jL4DBzh9pDB c67R3DBaRGrDB5Qm3DBShiqDBfUW3DBHv0qDB cdTI3DBYlgrDBBrP3DBgVPrDBdTI3DBYlgrDB cgqE3DBoaGrDBdTI3DBYlgrDBpwH3DBgqOrDB c9oz2DBlDuqDBpwB3DBBr+qDB5Qz2DBfU2qDB cgqH4DBbnDpDBuzL3DBnvPqDBJbr3DBsHjpDB cjXx5DBdTlnDBiWm4DBlYhoDB3kJ5DBrc8nDB cU4g9DBQg5mDB0i06DBXP+mDBxgV8DBnE9mDB cH67BEBGELlDBsHr/DB99ymDBkC8AEB3kEmDB cqmMBEBb80kDBBrvBEBe+GlDBYFdBEBjX7kDB c99O+DBW5CkDBbcrAEBiBokDBSMS/DBhrKkDB cGZy9DBMI5jDBuev+DBRLFkDBCsW9DB/U2jDB cAAN7DBMIhjDB99k9DBnv2jDBGEq8DBOJxjDB cmuB5DBx1/iDBSMp6DB99ajDBShB6DBLyXjDB c0ig4DBqxyiDBLy14DBsy8iDB3kq4DB6m4iDB c78S7DBMIOjDBcSs4DBam3iDB5Qo6DBTiGjDB c46V+DBSMojDBlDZ8DBamajDBPKY9DBc9bjDB cCsIBEBjsBkDBUNh/DBqG3jDBRrmAEBb8AkDB cdIOCEBR26jDBKRYBEBkCCkDBBr6BEBShAkDB cnvxCEBoayjDBNeWCEBoa4jDBO0pCEBsHAkDB . CfRNyID/Av6qL/AxCLM/AAAAA/A F Np mepGtDBDXjXDB cb84sDBueEYDBDXAtDB78nXDBjs6sDBmZ8XDB c5QEtDBtIDZDB9o0sDBgVYYDBHvysDBU4EZDB cjsQtDBRLyYDB1NJtDB/pCZDBCsNtDB1N5YDB crcZtDBMd5XDBnvWtDBDCkYDBJGdtDBxgMYDB cepGtDBDXjXDBUNVtDB8SjXDBVjStDB/paXDB . CfR3bvd/AxDPc/Aof+Z/AAAAg/A F p";
34.373203
130
0.792425
61b7c11d07156ba70b27e2cb9b382bac8413d68b
23,985
//! Support code for rustc's built in unit-test and micro-benchmarking //! framework. //! //! Almost all user code will only be interested in `Bencher` and //! `black_box`. All other interactions (such as writing tests and //! benchmarks themselves) should be done via the `#[test]` and //! `#[bench]` attributes. //! //! See the [Testing Chapter](../book/ch11-00-testing.html) of the book for more details. // Currently, not much of this is meant for users. It is intended to // support the simplest interface possible for representing and // running tests while providing a base that other test frameworks may // build off of. // N.B., this is also specified in this crate's Cargo.toml, but librustc_ast contains logic specific to // this crate, which relies on this attribute (rather than the value of `--crate-name` passed by // cargo) to detect this crate. #![crate_name = "test"] #![unstable(feature = "test", issue = "50297")] #![doc(html_root_url = "https://doc.rust-lang.org/nightly/", test(attr(deny(warnings))))] #![cfg_attr(unix, feature(libc))] #![feature(rustc_private)] #![feature(nll)] #![feature(available_concurrency)] #![feature(internal_output_capture)] #![feature(option_unwrap_none)] #![feature(panic_unwind)] #![feature(staged_api)] #![feature(termination_trait_lib)] #![feature(test)] #![feature(total_cmp)] // Public reexports pub use self::bench::{black_box, Bencher}; pub use self::console::run_tests_console; pub use self::options::{ColorConfig, Options, OutputFormat, RunIgnored, ShouldPanic}; pub use self::types::TestName::*; pub use self::types::*; pub use self::ColorConfig::*; pub use cli::TestOpts; // Module to be used by rustc to compile tests in libtest pub mod test { pub use crate::{ assert_test_result, bench::Bencher, cli::{parse_opts, TestOpts}, filter_tests, helpers::metrics::{Metric, MetricMap}, options::{Options, RunIgnored, RunStrategy, ShouldPanic}, run_test, test_main, test_main_static, test_result::{TestResult, TrFailed, TrFailedMsg, TrIgnored, TrOk}, time::{TestExecTime, TestTimeOptions}, types::{ DynTestFn, DynTestName, StaticBenchFn, StaticTestFn, StaticTestName, TestDesc, TestDescAndFn, TestName, TestType, }, }; } use std::{ collections::VecDeque, env, io, io::prelude::Write, panic::{self, catch_unwind, AssertUnwindSafe, PanicInfo}, process::{self, Command, Termination}, sync::mpsc::{channel, Sender}, sync::{Arc, Mutex}, thread, time::{Duration, Instant}, }; pub mod bench; mod cli; mod console; mod event; mod formatters; mod helpers; mod options; pub mod stats; mod test_result; mod time; mod types; #[cfg(test)] mod tests; use event::{CompletedTest, TestEvent}; use helpers::concurrency::get_concurrency; use helpers::exit_code::get_exit_code; use options::{Concurrent, RunStrategy}; use test_result::*; use time::TestExecTime; // Process exit code to be used to indicate test failures. const ERROR_EXIT_CODE: i32 = 101; const SECONDARY_TEST_INVOKER_VAR: &str = "__RUST_TEST_INVOKE"; // The default console test runner. It accepts the command line // arguments and a vector of test_descs. pub fn test_main(args: &[String], tests: Vec<TestDescAndFn>, options: Option<Options>) { let mut opts = match cli::parse_opts(args) { Some(Ok(o)) => o, Some(Err(msg)) => { eprintln!("error: {}", msg); process::exit(ERROR_EXIT_CODE); } None => return, }; if let Some(options) = options { opts.options = options; } if opts.list { if let Err(e) = console::list_tests_console(&opts, tests) { eprintln!("error: io error when listing tests: {:?}", e); process::exit(ERROR_EXIT_CODE); } } else { match console::run_tests_console(&opts, tests) { Ok(true) => {} Ok(false) => process::exit(ERROR_EXIT_CODE), Err(e) => { eprintln!("error: io error when listing tests: {:?}", e); process::exit(ERROR_EXIT_CODE); } } } } /// A variant optimized for invocation with a static test vector. /// This will panic (intentionally) when fed any dynamic tests. /// /// This is the entry point for the main function generated by `rustc --test` /// when panic=unwind. pub fn test_main_static(tests: &[&TestDescAndFn]) { #[cfg(not(target_arch = "bpf"))] let args = env::args().collect::<Vec<_>>(); #[cfg(target_arch = "bpf")] let args: [String; 0] = []; let owned_tests: Vec<_> = tests.iter().map(make_owned_test).collect(); test_main(&args, owned_tests, None) } /// A variant optimized for invocation with a static test vector. /// This will panic (intentionally) when fed any dynamic tests. /// /// Runs tests in panic=abort mode, which involves spawning subprocesses for /// tests. /// /// This is the entry point for the main function generated by `rustc --test` /// when panic=abort. pub fn test_main_static_abort(tests: &[&TestDescAndFn]) { // If we're being run in SpawnedSecondary mode, run the test here. run_test // will then exit the process. if let Ok(name) = env::var(SECONDARY_TEST_INVOKER_VAR) { env::remove_var(SECONDARY_TEST_INVOKER_VAR); let test = tests .iter() .filter(|test| test.desc.name.as_slice() == name) .map(make_owned_test) .next() .unwrap_or_else(|| panic!("couldn't find a test with the provided name '{}'", name)); let TestDescAndFn { desc, testfn } = test; let testfn = match testfn { StaticTestFn(f) => f, _ => panic!("only static tests are supported"), }; run_test_in_spawned_subprocess(desc, Box::new(testfn)); } let args = env::args().collect::<Vec<_>>(); let owned_tests: Vec<_> = tests.iter().map(make_owned_test).collect(); test_main(&args, owned_tests, Some(Options::new().panic_abort(true))) } /// Clones static values for putting into a dynamic vector, which test_main() /// needs to hand out ownership of tests to parallel test runners. /// /// This will panic when fed any dynamic tests, because they cannot be cloned. fn make_owned_test(test: &&TestDescAndFn) -> TestDescAndFn { match test.testfn { StaticTestFn(f) => TestDescAndFn { testfn: StaticTestFn(f), desc: test.desc.clone() }, StaticBenchFn(f) => TestDescAndFn { testfn: StaticBenchFn(f), desc: test.desc.clone() }, _ => panic!("non-static tests passed to test::test_main_static"), } } /// Invoked when unit tests terminate. Should panic if the unit /// Tests is considered a failure. By default, invokes `report()` /// and checks for a `0` result. pub fn assert_test_result<T: Termination>(result: T) { let code = result.report(); assert_eq!( code, 0, "the test returned a termination value with a non-zero status code ({}) \ which indicates a failure", code ); } pub fn run_tests<F>( opts: &TestOpts, tests: Vec<TestDescAndFn>, mut notify_about_test_event: F, ) -> io::Result<()> where F: FnMut(TestEvent) -> io::Result<()>, { use std::collections::{self, HashMap}; use std::hash::BuildHasherDefault; use std::sync::mpsc::RecvTimeoutError; struct RunningTest { join_handle: Option<thread::JoinHandle<()>>, } // Use a deterministic hasher type TestMap = HashMap<TestDesc, RunningTest, BuildHasherDefault<collections::hash_map::DefaultHasher>>; struct TimeoutEntry { desc: TestDesc, timeout: Instant, } let tests_len = tests.len(); let mut filtered_tests = filter_tests(opts, tests); if !opts.bench_benchmarks { filtered_tests = convert_benchmarks_to_tests(filtered_tests); } let filtered_tests = { let mut filtered_tests = filtered_tests; for test in filtered_tests.iter_mut() { test.desc.name = test.desc.name.with_padding(test.testfn.padding()); } filtered_tests }; let filtered_out = tests_len - filtered_tests.len(); let event = TestEvent::TeFilteredOut(filtered_out); notify_about_test_event(event)?; let filtered_descs = filtered_tests.iter().map(|t| t.desc.clone()).collect(); let event = TestEvent::TeFiltered(filtered_descs); notify_about_test_event(event)?; let (filtered_tests, filtered_benchs): (Vec<_>, _) = filtered_tests .into_iter() .partition(|e| matches!(e.testfn, StaticTestFn(_) | DynTestFn(_))); let concurrency = opts.test_threads.unwrap_or_else(get_concurrency); let mut remaining = filtered_tests; remaining.reverse(); let mut pending = 0; let (tx, rx) = channel::<CompletedTest>(); let run_strategy = if opts.options.panic_abort && !opts.force_run_in_process { RunStrategy::SpawnPrimary } else { RunStrategy::InProcess }; let mut running_tests: TestMap = HashMap::default(); let mut timeout_queue: VecDeque<TimeoutEntry> = VecDeque::new(); fn get_timed_out_tests( running_tests: &TestMap, timeout_queue: &mut VecDeque<TimeoutEntry>, ) -> Vec<TestDesc> { let now = Instant::now(); let mut timed_out = Vec::new(); while let Some(timeout_entry) = timeout_queue.front() { if now < timeout_entry.timeout { break; } let timeout_entry = timeout_queue.pop_front().unwrap(); if running_tests.contains_key(&timeout_entry.desc) { timed_out.push(timeout_entry.desc); } } timed_out } fn calc_timeout(timeout_queue: &VecDeque<TimeoutEntry>) -> Option<Duration> { timeout_queue.front().map(|&TimeoutEntry { timeout: next_timeout, .. }| { let now = Instant::now(); if next_timeout >= now { next_timeout - now } else { Duration::new(0, 0) } }) } if concurrency == 1 { while !remaining.is_empty() { let test = remaining.pop().unwrap(); let event = TestEvent::TeWait(test.desc.clone()); notify_about_test_event(event)?; run_test(opts, !opts.run_tests, test, run_strategy, tx.clone(), Concurrent::No) .unwrap_none(); let completed_test = rx.recv().unwrap(); let event = TestEvent::TeResult(completed_test); notify_about_test_event(event)?; } } else { while pending > 0 || !remaining.is_empty() { while pending < concurrency && !remaining.is_empty() { let test = remaining.pop().unwrap(); let timeout = time::get_default_test_timeout(); let desc = test.desc.clone(); let event = TestEvent::TeWait(desc.clone()); notify_about_test_event(event)?; //here no pad let join_handle = run_test( opts, !opts.run_tests, test, run_strategy, tx.clone(), Concurrent::Yes, ); running_tests.insert(desc.clone(), RunningTest { join_handle }); timeout_queue.push_back(TimeoutEntry { desc, timeout }); pending += 1; } let mut res; loop { if let Some(timeout) = calc_timeout(&timeout_queue) { res = rx.recv_timeout(timeout); for test in get_timed_out_tests(&running_tests, &mut timeout_queue) { let event = TestEvent::TeTimeout(test); notify_about_test_event(event)?; } match res { Err(RecvTimeoutError::Timeout) => { // Result is not yet ready, continue waiting. } _ => { // We've got a result, stop the loop. break; } } } else { res = rx.recv().map_err(|_| RecvTimeoutError::Disconnected); break; } } let mut completed_test = res.unwrap(); if let Some(running_test) = running_tests.remove(&completed_test.desc) { if let Some(join_handle) = running_test.join_handle { if let Err(_) = join_handle.join() { if let TrOk = completed_test.result { completed_test.result = TrFailedMsg("panicked after reporting success".to_string()); } } } } let event = TestEvent::TeResult(completed_test); notify_about_test_event(event)?; pending -= 1; } } if opts.bench_benchmarks { // All benchmarks run at the end, in serial. for b in filtered_benchs { let event = TestEvent::TeWait(b.desc.clone()); notify_about_test_event(event)?; run_test(opts, false, b, run_strategy, tx.clone(), Concurrent::No); let completed_test = rx.recv().unwrap(); let event = TestEvent::TeResult(completed_test); notify_about_test_event(event)?; } } Ok(()) } pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> { let mut filtered = tests; let matches_filter = |test: &TestDescAndFn, filter: &str| { let test_name = test.desc.name.as_slice(); match opts.filter_exact { true => test_name == filter, false => test_name.contains(filter), } }; // Remove tests that don't match the test filter if !opts.filters.is_empty() { filtered.retain(|test| opts.filters.iter().any(|filter| matches_filter(test, filter))); } // Skip tests that match any of the skip filters filtered.retain(|test| !opts.skip.iter().any(|sf| matches_filter(test, sf))); // Excludes #[should_panic] tests if opts.exclude_should_panic { filtered.retain(|test| test.desc.should_panic == ShouldPanic::No); } // maybe unignore tests match opts.run_ignored { RunIgnored::Yes => { filtered.iter_mut().for_each(|test| test.desc.ignore = false); } RunIgnored::Only => { filtered.retain(|test| test.desc.ignore); filtered.iter_mut().for_each(|test| test.desc.ignore = false); } RunIgnored::No => {} } // Sort the tests alphabetically filtered.sort_by(|t1, t2| t1.desc.name.as_slice().cmp(t2.desc.name.as_slice())); filtered } pub fn convert_benchmarks_to_tests(tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> { // convert benchmarks to tests, if we're not benchmarking them tests .into_iter() .map(|x| { let testfn = match x.testfn { DynBenchFn(bench) => DynTestFn(Box::new(move || { bench::run_once(|b| __rust_begin_short_backtrace(|| bench.run(b))) })), StaticBenchFn(benchfn) => DynTestFn(Box::new(move || { bench::run_once(|b| __rust_begin_short_backtrace(|| benchfn(b))) })), f => f, }; TestDescAndFn { desc: x.desc, testfn } }) .collect() } pub fn run_test( opts: &TestOpts, force_ignore: bool, test: TestDescAndFn, strategy: RunStrategy, monitor_ch: Sender<CompletedTest>, concurrency: Concurrent, ) -> Option<thread::JoinHandle<()>> { let TestDescAndFn { desc, testfn } = test; // Emscripten can catch panics but other wasm targets cannot let ignore_because_no_process_support = desc.should_panic != ShouldPanic::No && cfg!(target_arch = "wasm32") && !cfg!(target_os = "emscripten"); if force_ignore || desc.ignore || ignore_because_no_process_support { let message = CompletedTest::new(desc, TrIgnored, None, Vec::new()); monitor_ch.send(message).unwrap(); return None; } struct TestRunOpts { pub strategy: RunStrategy, pub nocapture: bool, pub concurrency: Concurrent, pub time: Option<time::TestTimeOptions>, } fn run_test_inner( desc: TestDesc, monitor_ch: Sender<CompletedTest>, testfn: Box<dyn FnOnce() + Send>, opts: TestRunOpts, ) -> Option<thread::JoinHandle<()>> { let concurrency = opts.concurrency; let name = desc.name.clone(); let runtest = move || match opts.strategy { RunStrategy::InProcess => run_test_in_process( desc, opts.nocapture, opts.time.is_some(), testfn, monitor_ch, opts.time, ), RunStrategy::SpawnPrimary => spawn_test_subprocess( desc, opts.nocapture, opts.time.is_some(), monitor_ch, opts.time, ), }; // If the platform is single-threaded we're just going to run // the test synchronously, regardless of the concurrency // level. let supports_threads = !cfg!(target_os = "emscripten") && !cfg!(target_arch = "wasm32"); if concurrency == Concurrent::Yes && supports_threads { let cfg = thread::Builder::new().name(name.as_slice().to_owned()); let mut runtest = Arc::new(Mutex::new(Some(runtest))); let runtest2 = runtest.clone(); match cfg.spawn(move || runtest2.lock().unwrap().take().unwrap()()) { Ok(handle) => Some(handle), Err(e) if e.kind() == io::ErrorKind::WouldBlock => { // `ErrorKind::WouldBlock` means hitting the thread limit on some // platforms, so run the test synchronously here instead. Arc::get_mut(&mut runtest).unwrap().get_mut().unwrap().take().unwrap()(); None } Err(e) => panic!("failed to spawn thread to run test: {}", e), } } else { runtest(); None } } let test_run_opts = TestRunOpts { strategy, nocapture: opts.nocapture, concurrency, time: opts.time_options }; match testfn { DynBenchFn(bencher) => { // Benchmarks aren't expected to panic, so we run them all in-process. crate::bench::benchmark(desc, monitor_ch, opts.nocapture, |harness| { bencher.run(harness) }); None } StaticBenchFn(benchfn) => { // Benchmarks aren't expected to panic, so we run them all in-process. crate::bench::benchmark(desc, monitor_ch, opts.nocapture, benchfn); None } DynTestFn(f) => { match strategy { RunStrategy::InProcess => (), _ => panic!("Cannot run dynamic test fn out-of-process"), }; run_test_inner( desc, monitor_ch, Box::new(move || __rust_begin_short_backtrace(f)), test_run_opts, ) } StaticTestFn(f) => run_test_inner( desc, monitor_ch, Box::new(move || __rust_begin_short_backtrace(f)), test_run_opts, ), } } /// Fixed frame used to clean the backtrace with `RUST_BACKTRACE=1`. #[inline(never)] fn __rust_begin_short_backtrace<F: FnOnce()>(f: F) { f(); // prevent this frame from being tail-call optimised away black_box(()); } fn run_test_in_process( desc: TestDesc, nocapture: bool, report_time: bool, testfn: Box<dyn FnOnce() + Send>, monitor_ch: Sender<CompletedTest>, time_opts: Option<time::TestTimeOptions>, ) { // Buffer for capturing standard I/O let data = Arc::new(Mutex::new(Vec::new())); if !nocapture { io::set_output_capture(Some(data.clone())); } let start = report_time.then(Instant::now); let result = catch_unwind(AssertUnwindSafe(testfn)); let exec_time = start.map(|start| { let duration = start.elapsed(); TestExecTime(duration) }); io::set_output_capture(None); let test_result = match result { Ok(()) => calc_result(&desc, Ok(()), &time_opts, &exec_time), Err(e) => calc_result(&desc, Err(e.as_ref()), &time_opts, &exec_time), }; let stdout = data.lock().unwrap_or_else(|e| e.into_inner()).to_vec(); let message = CompletedTest::new(desc, test_result, exec_time, stdout); monitor_ch.send(message).unwrap(); } fn spawn_test_subprocess( desc: TestDesc, nocapture: bool, report_time: bool, monitor_ch: Sender<CompletedTest>, time_opts: Option<time::TestTimeOptions>, ) { let (result, test_output, exec_time) = (|| { let args = env::args().collect::<Vec<_>>(); let current_exe = &args[0]; let mut command = Command::new(current_exe); command.env(SECONDARY_TEST_INVOKER_VAR, desc.name.as_slice()); if nocapture { command.stdout(process::Stdio::inherit()); command.stderr(process::Stdio::inherit()); } let start = report_time.then(Instant::now); let output = match command.output() { Ok(out) => out, Err(e) => { let err = format!("Failed to spawn {} as child for test: {:?}", args[0], e); return (TrFailed, err.into_bytes(), None); } }; let exec_time = start.map(|start| { let duration = start.elapsed(); TestExecTime(duration) }); let std::process::Output { stdout, stderr, status } = output; let mut test_output = stdout; formatters::write_stderr_delimiter(&mut test_output, &desc.name); test_output.extend_from_slice(&stderr); let result = match (|| -> Result<TestResult, String> { let exit_code = get_exit_code(status)?; Ok(get_result_from_exit_code(&desc, exit_code, &time_opts, &exec_time)) })() { Ok(r) => r, Err(e) => { write!(&mut test_output, "Unexpected error: {}", e).unwrap(); TrFailed } }; (result, test_output, exec_time) })(); let message = CompletedTest::new(desc, result, exec_time, test_output); monitor_ch.send(message).unwrap(); } fn run_test_in_spawned_subprocess(desc: TestDesc, testfn: Box<dyn FnOnce() + Send>) -> ! { let builtin_panic_hook = panic::take_hook(); let record_result = Arc::new(move |panic_info: Option<&'_ PanicInfo<'_>>| { let test_result = match panic_info { Some(info) => calc_result(&desc, Err(info.payload()), &None, &None), None => calc_result(&desc, Ok(()), &None, &None), }; // We don't support serializing TrFailedMsg, so just // print the message out to stderr. if let TrFailedMsg(msg) = &test_result { eprintln!("{}", msg); } if let Some(info) = panic_info { builtin_panic_hook(info); } if let TrOk = test_result { process::exit(test_result::TR_OK); } else { process::exit(test_result::TR_FAILED); } }); let record_result2 = record_result.clone(); panic::set_hook(Box::new(move |info| record_result2(Some(&info)))); testfn(); record_result(None); unreachable!("panic=abort callback should have exited the process") }
34.61039
103
0.5867
501af6a11dd4272e4c49c31fe498631a0da25293
1,145
use crate::internal_prelude::*; #[cfg(hdf5_1_8_15)] mod hdf5_1_8_15 { use super::*; #[repr(C)] #[derive(Copy, Clone, PartialEq, PartialOrd, Debug)] pub enum H5PL_type_t { H5PL_TYPE_ERROR = -1, H5PL_TYPE_FILTER = 0, H5PL_TYPE_NONE = 1, } pub use self::H5PL_type_t::*; pub const H5PL_FILTER_PLUGIN: c_uint = 0x0001; pub const H5PL_ALL_PLUGIN: c_uint = 0xffff; extern "C" { pub fn H5PLget_loading_state(plugin_flags: *mut c_int) -> herr_t; pub fn H5PLset_loading_state(plugin_flags: *mut c_int) -> herr_t; } } #[cfg(hdf5_1_8_15)] pub use self::hdf5_1_8_15::*; #[cfg(hdf5_1_10_1)] extern "C" { pub fn H5PLappend(search_path: *const c_char) -> herr_t; pub fn H5PLprepend(search_path: *const c_char) -> herr_t; pub fn H5PLreplace(search_path: *const c_char, index: c_uint) -> herr_t; pub fn H5PLinsert(search_path: *const c_char, index: c_uint) -> herr_t; pub fn H5PLremove(index: c_uint) -> herr_t; pub fn H5PLget(index: c_uint, path_buf: *mut c_char, buf_size: size_t) -> ssize_t; pub fn H5PLsize(num_paths: *mut c_uint) -> herr_t; }
29.358974
86
0.661135
fe8b82fcf5582c4bf95f937004b4feed70561032
5,995
//! Mappings between old and new `NodeId`s. Also has some support for `AttrId`s. use std::collections::hash_map::Entry; use std::collections::{BTreeSet, HashMap, HashSet}; use std::mem; use std::ops::Bound::Included; use std::ops::Deref; use syntax::ast::{AttrId, NodeId, DUMMY_NODE_ID}; use syntax::source_map::symbol::Symbol; pub const DUMMY_ATTR_ID: AttrId = AttrId(!0); #[derive(Clone, Debug)] pub struct NodeMap { /// Map from current NodeIds to old NodeIds. id_map: HashMap<NodeId, NodeId>, /// Edges from current IDs to new IDs. `commit_nodes()` will merge this into `self.id_map`, so /// that the new IDs become current IDs. pending_edges: BTreeSet<(NodeId, NodeId)>, } impl NodeMap { pub fn new() -> NodeMap { NodeMap { id_map: HashMap::new(), pending_edges: BTreeSet::new(), } } pub fn into_inner(self) -> HashMap<NodeId, NodeId> { self.id_map } pub fn commit(&mut self) { let mut new_id_map = HashMap::new(); trace!("committing edges"); for (id2, id3) in mem::replace(&mut self.pending_edges, BTreeSet::new()) { if id2 == DUMMY_NODE_ID || id3 == DUMMY_NODE_ID { continue; } if let Some(&id1) = self.id_map.get(&id2) { trace!(" {:?} -> {:?} -> {:?}", id3, id2, id1); match new_id_map.entry(id3) { Entry::Vacant(e) => { e.insert(id1); } Entry::Occupied(mut e) => { if *e.get() != id1 { // This is bad - we have two *different* old IDs for the same new ID. // Report a warning, and deterministically pick one as the winner. let winner = if *e.get() < id1 { *e.get() } else { id1 }; warn!( "new {:?} maps to both old {:?} and old {:?} - \ picking {:?} as the winner", id3, *e.get(), id1, winner ); *e.get_mut() = winner; } // Otherwise, both old IDs match - there's no conflict. } } } else { trace!(" {:?} -> {:?} -> NOT FOUND", id3, id2); } } self.id_map = new_id_map; } /// Initialize by mapping every `NodeId` in `nodes` to itself. pub fn init<I: Iterator<Item = NodeId>>(&mut self, nodes: I) { for id in nodes { if id == DUMMY_NODE_ID { continue; } self.id_map.insert(id, id); } } /// Update the NodeId mapping using a list of `(old_id, new_id)` pairs. pub fn add_edges(&mut self, matched_ids: &[(NodeId, NodeId)]) { self.pending_edges.extend(matched_ids.iter().cloned()); } pub fn add_edge(&mut self, id: NodeId, new_id: NodeId) { self.pending_edges.insert((id, new_id)); } /// Save what we know about the origin of node `id`. The origin can be tracked externally and /// restored later with `restore_id`. This is useful when a node will be removed from the AST, /// but could be reinserted later on. pub fn save_origin(&self, id: NodeId) -> Option<NodeId> { self.id_map.get(&id).cloned() } /// Restore saved information about a node's origin. pub fn restore_origin(&mut self, id: NodeId, origin: Option<NodeId>) { if let Some(origin) = origin { self.id_map.insert(id, origin); } } /// Update mark NodeIds to account for the pending (not committed) NodeId changes. pub fn transfer_marks(&self, marks: &mut HashSet<(NodeId, Symbol)>) { let mut new_marks = HashSet::new(); for &(old_id, label) in marks.iter() { let lo = (old_id, NodeId::from_u32(0)); let hi = (old_id, NodeId::MAX); let mut empty = true; for &(_, new_id) in self.pending_edges.range((Included(&lo), Included(&hi))) { trace!(" {:?}: {:?} -> {:?}", label, old_id, new_id); new_marks.insert((new_id, label)); empty = false; } if empty { trace!(" {:?}: {:?} -> DROPPED", label, old_id); } } *marks = new_marks; } /// Update keys of an arbitrary `HashMap` to account for the pending (not committed) NodeId /// changes. pub fn transfer_map<V: Clone>(&self, map: HashMap<NodeId, V>) -> HashMap<NodeId, V> { let mut new_map = HashMap::with_capacity(map.len()); for (old_id, v) in map { let lo = (old_id, NodeId::from_u32(0)); let hi = (old_id, NodeId::MAX); let mut new_ids = self .pending_edges .range((Included(&lo), Included(&hi))) .map(|&(_, new_id)| new_id) .peekable(); // Avoid a clone if there's zero or one new IDs. while let Some(new_id) = new_ids.next() { if new_ids.peek().is_none() { new_map.insert(new_id, v); break; } else { new_map.insert(new_id, v.clone()); } } } new_map } pub fn transfer<'a>(&'a self, id: NodeId) -> impl Iterator<Item = NodeId> + 'a { let lo = (id, NodeId::from_u32(0)); let hi = (id, NodeId::MAX); self.pending_edges .range((Included(&lo), Included(&hi))) .map(|&(_, new_id)| new_id) } } impl Deref for NodeMap { type Target = HashMap<NodeId, NodeId>; fn deref(&self) -> &Self::Target { &self.id_map } }
35.05848
99
0.493078
72e868fb95b9997ea542213248d710333b97391c
40,077
use std::{ path::Path, process::Command, sync::{ atomic::{AtomicBool, Ordering}, Arc, RwLock, }, thread, time::Duration, }; use glium::{ backend::glutin::Display, glutin::{ event::{ElementState, ModifiersState, MouseScrollDelta, VirtualKeyCode, WindowEvent}, event_loop::EventLoopProxy, window::{CursorIcon, Fullscreen}, }, }; use imgui::*; use imgui_glium_renderer::Renderer; use lazy_static::*; use crate::{ min, rect::Rect, util::{Image, UserEvent}, vec2::Vec2, }; pub mod image_view; use image_view::ImageView; pub mod image_list; use image_list::ImageList; pub mod arrows; use arrows::{Action, Arrows}; mod clipboard; pub mod crop; pub mod load_image; pub mod image_loader; use image_loader::ImageLoader; mod save_image; use crop::Crop; pub mod cursor; mod undo_stack; use undo_stack::{UndoFrame, UndoStack}; mod cache; use cache::Cache; mod resize; use resize::Resize; mod filters; const TOP_BAR_SIZE: f32 = 25.0; const BOTTOM_BAR_SIZE: f32 = 22.0; lazy_static! { pub static ref RESIZING: AtomicBool = AtomicBool::new(false); } pub struct App { exit: bool, delay: Option<Duration>, pub image_view: Option<Box<ImageView>>, pub size: Vec2<f32>, pub position: Vec2<i32>, fullscreen: bool, pub top_bar_size: f32, pub bottom_bar_size: f32, proxy: EventLoopProxy<UserEvent>, modifiers: ModifiersState, mouse_position: Vec2<f32>, current_filename: String, image_list: ImageList, arrows: Arrows, stack: UndoStack, pub crop: Box<Crop>, pub cache: Arc<Cache>, pub image_loader: Arc<RwLock<ImageLoader>>, resize: Resize, pub maximized: bool, pub save_size: Vec2<f32>, } impl App { pub fn update( &mut self, ui: &mut Ui<'_>, display: &glium::Display, _renderer: &mut Renderer, window_event: Option<&WindowEvent<'_>>, user_event: Option<&mut UserEvent>, ) -> (bool, Option<Duration>) { self.exit = false; self.delay = None; { let window_context = display.gl_window(); let window = window_context.window(); let (width, height) = display.get_framebuffer_dimensions(); self.size = Vec2::new(width as f32, height as f32); self.maximized = { let max = window.is_maximized(); // this is a cringe hack to save the non maximized size // currently it will never save the size of a window that is the maximum size of the mointor if let Some(monitor) = window.current_monitor() { let mon_size = monitor.size(); if !(width >= mon_size.width || height >= mon_size.height) { self.save_size = Vec2::new(width as f32, height as f32); } } max }; } if let Some(ref mut image) = self.image_view { update_delay(&mut self.delay, &image.animate(display)); } if let Some(event) = user_event { match event { UserEvent::ImageLoaded(images, path) => { let mut replace = true; { let mut guard = self.image_loader.write().unwrap(); match path { Some(path) => match guard.target_file { Some(ref mut target) => { if path != target { replace = false; } else { guard.target_file = None; } } None => replace = false, }, None => guard.target_file = None, } } if let Some(path) = path { self.image_loader.write().unwrap().loading.remove(path); } if replace { cursor::set_cursor_icon(CursorIcon::default(), display); let view = Box::new(ImageView::new(display, images.clone(), path.clone())); self.resize.size = Vec2::new(view.size.x() as i32, view.size.y() as i32); self.image_view = Some(view); self.current_filename = if let Some(path) = path { self.image_list.change_dir(&path); path.file_name().unwrap().to_str().unwrap().to_string() } else { String::new() }; let window_context = display.gl_window(); let window = window_context.window(); if self.current_filename.is_empty() { window.set_title("Simp"); } else { window.set_title(&self.current_filename.to_string()); } self.best_fit(); self.stack.reset(); } } UserEvent::Resize(images) => { if let Some(ref mut view) = self.image_view { view.swap_frames(images.as_mut().unwrap(), display); self.stack.push(UndoFrame::Resize(images.take().unwrap())); } self.best_fit(); RESIZING.store(false, Ordering::SeqCst); } UserEvent::Save(path) => { if let Some(ref view) = self.image_view { save_image::save( self.proxy.clone(), path.clone(), view.frames.clone(), view.rotation, view.horizontal_flip, view.vertical_flip, ); } } UserEvent::LoadError(error, path) => { self.image_loader.write().unwrap().loading.remove(path); cursor::set_cursor_icon(CursorIcon::default(), display); let error = error.clone(); thread::spawn(move || { msgbox::create("Error", &error, msgbox::IconType::Error).unwrap() }); } UserEvent::ErrorMessage(error) => { cursor::set_cursor_icon(CursorIcon::default(), display); let error = error.clone(); thread::spawn(move || { msgbox::create("Error", &error, msgbox::IconType::Error).unwrap() }); } UserEvent::SetCursor(icon) => cursor::set_cursor_icon(*icon, display), UserEvent::Exit => self.exit = true, }; } if let Some(event) = window_event { match event { WindowEvent::Moved(position) => { *self.position.mut_x() = position.x; *self.position.mut_x() = position.y; } WindowEvent::CursorMoved { position, .. } => { self.mouse_position.set_x(position.x as f32); self.mouse_position.set_y(position.y as f32); } WindowEvent::MouseWheel { delta, .. } => { let scroll = match delta { MouseScrollDelta::LineDelta(_, y) => *y, MouseScrollDelta::PixelDelta(pos) => pos.y as f32, }; if self.crop.inner.is_none() { self.zoom(scroll, self.mouse_position); } } WindowEvent::ModifiersChanged(state) => self.modifiers = *state, WindowEvent::DroppedFile(path) => load_image::load( self.proxy.clone(), path, self.cache.clone(), self.image_loader.clone(), ), WindowEvent::KeyboardInput { input, .. } if !self.resize.visible => { if let Some(key) = input.virtual_keycode { match input.state { ElementState::Pressed => match key { VirtualKeyCode::Delete => { if let Some(ref view) = self.image_view { if let Some(ref path) = view.path { delete(path, self.proxy.clone()); } } } VirtualKeyCode::O if self.modifiers.ctrl() => load_image::open( self.proxy.clone(), display, self.cache.clone(), self.image_loader.clone(), ), VirtualKeyCode::S if self.modifiers.ctrl() => save_image::open( self.current_filename.clone(), self.proxy.clone(), display, ), VirtualKeyCode::W if self.modifiers.ctrl() => self.exit = true, VirtualKeyCode::N if self.modifiers.ctrl() => new_window(), VirtualKeyCode::F => { self.largest_fit(); } VirtualKeyCode::B => { self.best_fit(); } VirtualKeyCode::Q => { if let Some(ref mut image) = self.image_view { self.stack.push(UndoFrame::Rotate(-1)); image.rotate(-1); } } VirtualKeyCode::E => { if let Some(ref mut image) = self.image_view { self.stack.push(UndoFrame::Rotate(1)); image.rotate(1); } } VirtualKeyCode::F5 => { if let Some(image) = self.image_view.as_ref() { if let Some(path) = &image.path { self.cache.clear(); load_image::load( self.proxy.clone(), path, self.cache.clone(), self.image_loader.clone(), ); } } } VirtualKeyCode::C if self.modifiers.ctrl() => { if let Some(ref view) = self.image_view { clipboard::copy(view); } } VirtualKeyCode::V if self.modifiers.ctrl() => { clipboard::paste(&self.proxy); } VirtualKeyCode::X if self.modifiers.ctrl() => { self.crop.cropping = true; } VirtualKeyCode::Z if self.modifiers.ctrl() => { self.undo(display); } VirtualKeyCode::Y if self.modifiers.ctrl() => { self.redo(display); } VirtualKeyCode::R if self.modifiers.ctrl() => { self.resize.visible = true; } VirtualKeyCode::Left | VirtualKeyCode::D => { if let Some(path) = self.image_list.previous() { if self.crop.inner.is_none() { load_image::load( self.proxy.clone(), path, self.cache.clone(), self.image_loader.clone(), ); } } } VirtualKeyCode::Right | VirtualKeyCode::A => { if let Some(path) = self.image_list.next() { if self.crop.inner.is_none() { load_image::load( self.proxy.clone(), path, self.cache.clone(), self.image_loader.clone(), ); } } } VirtualKeyCode::F11 => { let window_context = display.gl_window(); let window = window_context.window(); let fullscreen = window.fullscreen(); if fullscreen.is_some() { window.set_fullscreen(None); self.fullscreen = false; self.top_bar_size = TOP_BAR_SIZE; self.bottom_bar_size = BOTTOM_BAR_SIZE; } else { window.set_fullscreen(Some(Fullscreen::Borderless(None))); self.fullscreen = true; self.top_bar_size = 0.0; self.bottom_bar_size = 0.0; } } VirtualKeyCode::Escape => { let window_context = display.gl_window(); let window = window_context.window(); let fullscreen = window.fullscreen(); if fullscreen.is_some() { window.set_fullscreen(None); } } _ => (), }, ElementState::Released => (), } } } WindowEvent::ReceivedCharacter(c) if !self.resize.visible => match c { '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' => { if let Some(ref mut view) = self.image_view { let zoom = c.to_digit(10).unwrap() as f32; view.scale = zoom; } } '+' => { if self.crop.inner.is_none() { self.zoom(1.0, self.size / 2.0); } } '-' => { if self.crop.inner.is_none() { self.zoom(-1.0, self.size / 2.0); } } _ => (), }, _ => (), }; } if let Some(ref mut image) = self.image_view { if ui.is_mouse_dragging(imgui::MouseButton::Left) { if self.crop.cropping { if let Some(ref mut inner) = self.crop.inner { let delta = Vec2::from(ui.mouse_drag_delta()); inner.current += delta; } else { let cursor_pos = self.mouse_position; let delta = Vec2::from(ui.mouse_drag_delta()); self.crop.inner = Some(crop::Inner { start: cursor_pos - delta, current: cursor_pos, }); } } else { let delta = Vec2::from(ui.mouse_drag_delta()); image.position += delta; } ui.reset_mouse_drag_delta(imgui::MouseButton::Left); } else if self.crop.cropping { if let Some(ref inner) = self.crop.inner { let mut size = inner.current - inner.start; *size.mut_x() = size.x().abs(); *size.mut_y() = size.y().abs(); let start = Vec2::new( min!(inner.start.x(), inner.current.x()), min!(inner.start.y(), inner.current.y()), ); let frames = image.crop(Rect::new(start, size), display); if let Some((frames, rotation)) = frames { self.stack.push(UndoFrame::Crop { frames, rotation }) } self.crop.inner = None; self.crop.cropping = false; } } } if let Some(ref mut image) = self.image_view { let image_size = image.real_size(); let mut window_size = self.size; window_size.set_y(window_size.y() - self.top_bar_size - self.bottom_bar_size); if image_size.x() < window_size.x() { image.position.set_x(self.size.x() / 2.0); } else { if image.position.x() - image_size.x() / 2.0 > 0.0 { image.position.set_x(image_size.x() / 2.0); } if image.position.x() + image_size.x() / 2.0 < window_size.x() { image.position.set_x(window_size.x() - image_size.x() / 2.0); } } if image_size.y() < window_size.y() { image.position.set_y(self.size.y() / 2.0); } else { if image.position.y() - image_size.y() / 2.0 > self.top_bar_size { image .position .set_y(image_size.y() / 2.0 + self.top_bar_size); } if image.position.y() + image_size.y() / 2.0 < window_size.y() + self.top_bar_size { image .position .set_y((window_size.y() - image_size.y() / 2.0) + self.top_bar_size); } } } #[allow(deprecated)] let styles = ui.push_style_vars(&[ StyleVar::WindowPadding([10.0, 10.0]), StyleVar::FramePadding([0.0, 6.0]), StyleVar::ItemSpacing([5.0, 10.0]), StyleVar::WindowBorderSize(0.0), ]); #[allow(deprecated)] let colors = ui.push_style_colors(&[ (StyleColor::MenuBarBg, [0.117, 0.117, 0.117, 1.0]), (StyleColor::ButtonHovered, [0.078, 0.078, 0.078, 1.0]), (StyleColor::ButtonActive, [0.078, 0.078, 0.078, 1.0]), ]); if !self.fullscreen { self.menu_bar(display, ui); } self.resize_ui(display, ui); #[allow(deprecated)] let s = ui.push_style_vars(&[ StyleVar::WindowPadding([10.0, 4.0]), StyleVar::FramePadding([0.0, 0.0]), StyleVar::ItemSpacing([0.0, 0.0]), StyleVar::ButtonTextAlign([0.0, 0.5]), ]); #[allow(deprecated)] let c = ui.push_style_colors(&[ (StyleColor::WindowBg, [0.117, 0.117, 0.117, 1.0]), (StyleColor::Button, [0.117, 0.117, 0.117, 1.0]), ]); if !self.fullscreen { self.bottom_bar(ui); } c.pop(ui); s.pop(ui); styles.pop(ui); colors.pop(ui); (self.exit, self.delay) } pub fn resize_ui(&mut self, _display: &Display, ui: &mut Ui<'_>) { #[allow(deprecated)] let s = ui.push_style_vars(&[ StyleVar::WindowPadding([8.0, 8.0]), StyleVar::FramePadding([8.0, 8.0]), StyleVar::ItemSpacing([10.0, 10.0]), StyleVar::ButtonTextAlign([0.0, 0.5]), ]); if self.resize.visible { let mut open = self.image_view.is_some(); let mut open_resize = true; Window::new("Resize") .opened(&mut open) .collapsed(false, Condition::Always) .resizable(false) .size([250.0, 200.0], Condition::Always) .build(ui, || { ui.input_int("Width", &mut self.resize.size.mut_x()).build(); ui.input_int("Height", &mut self.resize.size.mut_y()) .build(); if self.resize.size.x() < 1 { *self.resize.size.mut_x() = 0; } if self.resize.size.y() < 1 { *self.resize.size.mut_y() = 0; } const MAX_SIZE: i32 = 30000; if self.resize.size.x() >= MAX_SIZE { *self.resize.size.mut_x() = MAX_SIZE; } if self.resize.size.y() >= i16::MAX as i32 { *self.resize.size.mut_y() = MAX_SIZE; } ComboBox::new("Resample") .popup_align_left(true) .preview_mode(ComboBoxPreviewMode::Label) .preview_value(filters::FILTERS[self.resize.resample_select_index].1) .build(ui, || { for (index, (_, label)) in filters::FILTERS.iter().enumerate() { let mut select = Selectable::new(label); if index == self.resize.resample_select_index { select = select.selected(true); } if select.build(ui) { self.resize.resample_select_index = index; } } }); if ui.button("Resize") { self.resize(); open_resize = false; } }); self.resize.visible = open && open_resize; } s.pop(ui); } fn resize(&self) { if RESIZING .compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst) .is_err() { return; } let frames = self.image_view.as_ref().unwrap().frames.clone(); let resize = self.resize; let proxy = self.proxy.clone(); thread::spawn(move || { let guard = frames.read().unwrap(); let mut new = Vec::new(); for image in guard.iter() { let buffer = image.buffer().resize_exact( resize.size.x() as u32, resize.size.y() as u32, filters::FILTERS[resize.resample_select_index].0, ); new.push(Image::with_delay(buffer, image.delay)); } let _ = proxy.send_event(UserEvent::Resize(Some(new))); }); } pub fn menu_bar(&mut self, display: &Display, ui: &mut Ui<'_>) { ui.main_menu_bar(|| { ui.menu_with_enabled("File", true, || { if MenuItem::new("Open").shortcut("Ctrl + O").build(ui) { load_image::open( self.proxy.clone(), display, self.cache.clone(), self.image_loader.clone(), ); } if MenuItem::new("Save as") .shortcut("Ctrl + S") .enabled(self.image_view.is_some()) .build(ui) { save_image::open(self.current_filename.clone(), self.proxy.clone(), display); } ui.separator(); if MenuItem::new("New Window").shortcut("Ctrl + N").build(ui) { new_window(); } if MenuItem::new("Refresh") .shortcut("R") .enabled(self.image_view.is_some()) .build(ui) { if let Some(ref path) = self.image_view.as_ref().unwrap().path { load_image::load( self.proxy.clone(), path, self.cache.clone(), self.image_loader.clone(), ); } } ui.separator(); if MenuItem::new("Exit").shortcut("Ctrl + W").build(ui) { self.exit = true; } }); ui.menu_with_enabled("Edit", true, || { if MenuItem::new("Undo") .shortcut("Ctrl + Z") .enabled(self.image_view.is_some()) .build(ui) { self.undo(display); } if MenuItem::new("Redo") .shortcut("Ctrl + Y") .enabled(self.image_view.is_some()) .build(ui) { self.redo(display); } ui.separator(); if MenuItem::new("Copy") .shortcut("Ctrl + C") .enabled(self.image_view.is_some()) .build(ui) { let image = self.image_view.as_ref().unwrap(); clipboard::copy(image); } if MenuItem::new("Paste").shortcut("Ctrl + V").build(ui) { clipboard::paste(&self.proxy); } }); ui.menu_with_enabled("Image", true, || { if MenuItem::new("Rotate Left") .shortcut("Q") .enabled(self.image_view.is_some()) .build(ui) { self.stack.push(UndoFrame::Rotate(-1)); self.image_view.as_mut().unwrap().rotate(-1); } if MenuItem::new("Rotate Right") .shortcut("E") .enabled(self.image_view.is_some()) .build(ui) { self.stack.push(UndoFrame::Rotate(1)); self.image_view.as_mut().unwrap().rotate(1); } ui.separator(); if MenuItem::new("Flip Horizontal") .enabled(self.image_view.is_some()) .build(ui) { self.stack.push(UndoFrame::FlipHorizontal); let image = self.image_view.as_mut().unwrap(); image.flip_horizontal(display); } if MenuItem::new("Flip Vertical") .enabled(self.image_view.is_some()) .build(ui) { self.stack.push(UndoFrame::FlipVertical); let image = self.image_view.as_mut().unwrap(); image.flip_vertical(display); } ui.separator(); if MenuItem::new("Zoom in") .shortcut("+") .enabled(self.image_view.is_some()) .build(ui) { self.zoom(1.0, self.size / 2.0); } if MenuItem::new("Zoom out") .shortcut("-") .enabled(self.image_view.is_some()) .build(ui) { self.zoom(-1.0, self.size / 2.0); } ui.separator(); if MenuItem::new("Best fit") .shortcut("B") .enabled(self.image_view.is_some()) .build(ui) { self.best_fit(); } if MenuItem::new("Largest fit") .shortcut("F") .enabled(self.image_view.is_some()) .build(ui) { self.best_fit(); } ui.separator(); if MenuItem::new("Crop") .shortcut("Ctrl + X") .enabled(self.image_view.is_some()) .build(ui) { self.crop.cropping = true; } if MenuItem::new("Resize") .shortcut("Ctrl + R") .enabled(self.image_view.is_some()) .build(ui) { self.resize.visible = true; } ui.separator(); if MenuItem::new("Delete") .shortcut("Delete") .enabled(self.image_view.is_some()) .build(ui) { if let Some(ref view) = self.image_view { if let Some(ref path) = view.path { delete(path, self.proxy.clone()); } } } }); ui.menu_with_enabled("Help", true, || { if MenuItem::new("Repository").build(ui) { webbrowser::open("https://github.com/Kl4rry/simp").unwrap(); } if MenuItem::new("Report Bug").build(ui) { webbrowser::open("https://github.com/Kl4rry/simp/issues").unwrap(); } ui.separator(); if MenuItem::new("About").build(ui) { let about = format!( "{}\n{}\n{}\n{}", env!("CARGO_PKG_NAME"), env!("CARGO_PKG_DESCRIPTION"), &format!("Version: {}", env!("CARGO_PKG_VERSION")), &format!("Commit: {}", env!("GIT_HASH")), ); thread::spawn(move || { msgbox::create("About", &about, msgbox::IconType::Info).unwrap() }); } }); }); } fn bottom_bar(&mut self, ui: &mut Ui<'_>) { Window::new("Bottom") .position([0.0, self.size.y() - BOTTOM_BAR_SIZE], Condition::Always) .size([self.size.x(), BOTTOM_BAR_SIZE], Condition::Always) .resizable(false) .bg_alpha(1.0) .movable(false) .no_decoration() .focus_on_appearing(false) .always_use_window_padding(true) .build(ui, || { if let Some(image) = self.image_view.as_mut() { let (action, new_delay) = self.arrows.build(ui); update_delay(&mut self.delay, &new_delay); match action { Action::Left => { if let Some(path) = self.image_list.previous() { load_image::load( self.proxy.clone(), path, self.cache.clone(), self.image_loader.clone(), ); } } Action::Right => { if let Some(path) = self.image_list.next() { load_image::load( self.proxy.clone(), path, self.cache.clone(), self.image_loader.clone(), ); } } Action::None => (), } ui.same_line_with_spacing(0.0, 20.0); ui.text(&format!("{} x {}", image.size.x(), image.size.y())); ui.same_line_with_spacing(0.0, 20.0); ui.text(&format!("Zoom: {}%", (image.scale * 100.0).round())); } }); } pub fn undo(&mut self, display: &Display) { let frame = self.stack.undo(); if let Some(frame) = frame { match frame { UndoFrame::Rotate(rot) => { self.image_view.as_mut().unwrap().rotate(-*rot); } UndoFrame::FlipHorizontal => { self.image_view.as_mut().unwrap().flip_horizontal(display); } UndoFrame::FlipVertical => { self.image_view.as_mut().unwrap().flip_vertical(display); } UndoFrame::Crop { frames, rotation } => { let view = self.image_view.as_mut().unwrap(); view.swap_frames(frames, display); std::mem::swap(&mut view.rotation, rotation); } UndoFrame::Resize(frames) => { let view = self.image_view.as_mut().unwrap(); view.swap_frames(frames, display); } } } } pub fn redo(&mut self, display: &Display) { let frame = self.stack.redo(); if let Some(frame) = frame { match frame { UndoFrame::Rotate(rot) => { self.image_view.as_mut().unwrap().rotate(*rot); } UndoFrame::FlipHorizontal => { self.image_view.as_mut().unwrap().flip_horizontal(display); } UndoFrame::FlipVertical => { self.image_view.as_mut().unwrap().flip_vertical(display); } UndoFrame::Crop { frames, rotation } => { let view = self.image_view.as_mut().unwrap(); view.swap_frames(frames, display); std::mem::swap(&mut view.rotation, rotation); } UndoFrame::Resize(frames) => { let view = self.image_view.as_mut().unwrap(); view.swap_frames(frames, display); } } } } fn zoom(&mut self, zoom: f32, mouse_position: Vec2<f32>) { if let Some(ref mut image) = self.image_view { let old_scale = image.scale; image.scale += image.scale * zoom as f32 / 10.0; let new_size = image.scaled(); if (new_size.x() < 100.0 || new_size.y() < 100.0) && old_scale >= image.scale && image.scale < 1.0 { image.scale = min!(old_scale, 1.0); } else { let mouse_to_center = image.position - mouse_position; image.position -= mouse_to_center * (old_scale - image.scale) / old_scale; } } } pub fn best_fit(&mut self) { if let Some(ref mut view) = self.image_view { let scaling = min!( self.size.x() / view.size.x(), (self.size.y() - self.top_bar_size - self.bottom_bar_size) / view.size.y() ); view.scale = min!(scaling, 1.0); view.position = self.size / 2.0; } } pub fn largest_fit(&mut self) { if let Some(ref mut view) = self.image_view { let scaling = min!( self.size.x() / view.size.x(), (self.size.y() - self.top_bar_size - self.bottom_bar_size) / view.size.y() ); view.scale = scaling; view.position = self.size / 2.0; } } pub fn new( proxy: EventLoopProxy<UserEvent>, size: [f32; 2], position: [i32; 2], display: &Display, ) -> Self { const MAX_SIZE: usize = 1_000_000_000; let cache = Arc::new(Cache::new(MAX_SIZE)); let image_loader = Arc::new(RwLock::new(ImageLoader::new())); App { exit: false, delay: None, image_view: None, size: Vec2::from(size), position: Vec2::from(position), fullscreen: false, top_bar_size: TOP_BAR_SIZE, bottom_bar_size: BOTTOM_BAR_SIZE, image_list: ImageList::new(cache.clone(), proxy.clone(), image_loader.clone()), proxy, modifiers: ModifiersState::empty(), mouse_position: Vec2::default(), current_filename: String::new(), arrows: Arrows::new(), stack: UndoStack::new(), crop: Box::new(Crop::new(display)), cache, image_loader, resize: Resize::default(), maximized: true, save_size: Vec2::from(size), } } } pub fn delete<P: AsRef<Path>>(path: P, proxy: EventLoopProxy<UserEvent>) { let path = path.as_ref().to_path_buf(); thread::spawn(move || { let dialog = rfd::MessageDialog::new() .set_level(rfd::MessageLevel::Warning) .set_title("Move to trash") .set_description("Are you sure u want to move this to trash") .set_buttons(rfd::MessageButtons::YesNo) .show(); if dialog { if let Err(error) = trash::delete(path) { let _ = proxy.send_event(UserEvent::ErrorMessage(error.to_string())); } } }); } fn new_window() { let _ = Command::new(std::env::current_exe().unwrap()).spawn(); } fn update_delay(old: &mut Option<Duration>, new: &Option<Duration>) { if let Some(ref mut old_time) = old { if let Some(ref new_time) = new { if *old_time > *new_time { *old_time = *new_time; } } } else { *old = *new; } }
38.132255
108
0.39761
48bcd93b1c2420501ade2b082e5e083155243d9d
4,083
// Copyright 2021, The Tari Project // // Redistribution and use in source and binary forms, with or without modification, are permitted provided that the // following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following // disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the // following disclaimer in the documentation and/or other materials provided with the distribution. // // 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote // products derived from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, // INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. use std::io; use crate::covenants::token::CovenantToken; pub struct CovenantTokenEncoder<'a> { tokens: &'a [CovenantToken], } impl<'a> CovenantTokenEncoder<'a> { pub fn new(tokens: &'a [CovenantToken]) -> Self { Self { tokens } } pub fn write_to<W: io::Write>(&self, writer: &mut W) -> Result<usize, io::Error> { let mut written = 0; for token in self.tokens { written += token.write_to(writer)?; } Ok(written) } } pub(super) trait CovenentWriteExt: io::Write { fn write_u8_fixed(&mut self, v: u8) -> Result<usize, io::Error>; } impl<W: io::Write> CovenentWriteExt for W { fn write_u8_fixed(&mut self, v: u8) -> Result<usize, io::Error> { self.write_all(&[v])?; Ok(1) } } #[cfg(test)] mod tests { use super::*; use crate::{ covenant, covenants::{ byte_codes::{ARG_HASH, ARG_OUTPUT_FIELD, FILTER_AND, FILTER_FIELD_EQ, FILTER_IDENTITY, FILTER_OR}, OutputField, }, }; #[test] fn it_encodes_empty_tokens() { let encoder = CovenantTokenEncoder::new(&[]); let mut buf = Vec::<u8>::new(); let written = encoder.write_to(&mut buf).unwrap(); assert_eq!(buf, [] as [u8; 0]); assert_eq!(written, 0); } #[test] fn it_encodes_tokens_correctly() { let covenant = covenant!(and(identity(), or(identity()))); let encoder = CovenantTokenEncoder::new(covenant.tokens()); let mut buf = Vec::<u8>::new(); let written = encoder.write_to(&mut buf).unwrap(); assert_eq!(buf, [FILTER_AND, FILTER_IDENTITY, FILTER_OR, FILTER_IDENTITY]); assert_eq!(written, 4); } #[test] fn it_encodes_args_correctly() { let dummy = [0u8; 32]; let covenant = covenant!(field_eq(@field::features, @hash(dummy))); let encoder = CovenantTokenEncoder::new(covenant.tokens()); let mut buf = Vec::<u8>::new(); let written = encoder.write_to(&mut buf).unwrap(); assert_eq!(buf[..4], [ FILTER_FIELD_EQ, ARG_OUTPUT_FIELD, OutputField::Features.as_byte(), ARG_HASH ]); assert_eq!(buf[4..], [0u8; 32]); assert_eq!(written, 36); } mod covenant_write_ext { use super::*; #[test] fn it_writes_a_single_byte() { let mut buf = Vec::new(); buf.write_u8_fixed(123u8).unwrap(); assert_eq!(buf, vec![123u8]); } } }
35.504348
119
0.643155
01a0a42c638cbb18d040dd69a4baf380a598be83
1,901
use libc::c_char; use scraper::{ElementRef, Html, Selector}; use std::ffi::{CStr, CString}; pub struct NokogiriRust { document: Html, } impl NokogiriRust { fn parse(html: &str) -> Self { NokogiriRust { document: Html::parse_document(html), } } fn at_css(&self, selector: &str) -> ElementRef { self.document .select(&Selector::parse(selector).unwrap()) .next() .unwrap() } } #[no_mangle] pub extern "C" fn nokogiri_rust_html_parse(html: *const c_char) -> *mut NokogiriRust { let html = unsafe { assert!(!html.is_null()); CStr::from_ptr(html).to_str().unwrap() }; Box::into_raw(Box::new(NokogiriRust::parse(&html))) } #[no_mangle] pub extern "C" fn nokogiri_rust_html_free(ptr: *mut NokogiriRust) { if ptr.is_null() { return; } unsafe { Box::from_raw(ptr); } } #[no_mangle] pub extern "C" fn nokogiri_rust_element_ref_free(ptr: *mut ElementRef<'static>) { if ptr.is_null() { return; } unsafe { Box::from_raw(ptr); } } #[no_mangle] pub extern "C" fn nokogiri_rust_html_at_css( ptr: *const NokogiriRust, selector: *const c_char, ) -> *mut ElementRef<'static> { let nokogiri_rust = unsafe { assert!(!ptr.is_null()); &*ptr }; let selector = unsafe { assert!(!selector.is_null()); CStr::from_ptr(selector) }; let selector_str = selector.to_str().unwrap(); let element_ref = nokogiri_rust.at_css(selector_str); Box::into_raw(Box::new(element_ref)) } #[no_mangle] pub extern "C" fn nokogiri_rust_element_ref_text( ptr: *const ElementRef, ) -> *const c_char { let element_ref = unsafe { assert!(!ptr.is_null()); &*ptr }; let text = element_ref.text().next().unwrap(); CString::new(text).unwrap().into_raw() }
21.602273
86
0.596528
4a61853be4ea6c70a4549dc453a9cf495e3ea7a7
674
// Copyright 2021 Datafuse Labs. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. mod role_cache_mgr; mod role_mgr; mod user_mgr; mod user_udf; mod user_warehouse;
33.7
75
0.753709
56d903fed3b564f3afd468a44d866640eb1bf27a
23,501
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! Selection over an array of receivers //! //! This module contains the implementation machinery necessary for selecting //! over a number of receivers. One large goal of this module is to provide an //! efficient interface to selecting over any receiver of any type. //! //! This is achieved through an architecture of a "receiver set" in which //! receivers are added to a set and then the entire set is waited on at once. //! The set can be waited on multiple times to prevent re-adding each receiver //! to the set. //! //! Usage of this module is currently encouraged to go through the use of the //! `select!` macro. This macro allows naturally binding of variables to the //! received values of receivers in a much more natural syntax then usage of the //! `Select` structure directly. //! //! # Examples //! //! ```rust //! #![feature(mpsc_select)] //! //! use std::sync::mpsc::channel; //! //! let (tx1, rx1) = channel(); //! let (tx2, rx2) = channel(); //! //! tx1.send(1).unwrap(); //! tx2.send(2).unwrap(); //! //! select! { //! val = rx1.recv() => { //! assert_eq!(val.unwrap(), 1); //! }, //! val = rx2.recv() => { //! assert_eq!(val.unwrap(), 2); //! } //! } //! ``` #![allow(dead_code)] #![unstable(feature = "mpsc_select", reason = "This implementation, while likely sufficient, is unsafe and \ likely to be error prone. At some point in the future this \ module will likely be replaced, and it is currently \ unknown how much API breakage that will cause. The ability \ to select over a number of channels will remain forever, \ but no guarantees beyond this are being made")] use core::cell::{Cell, UnsafeCell}; use core::marker; use core::ptr; use core::usize; use sync::mpsc::{Receiver, RecvError}; use sync::mpsc::blocking::{self, SignalToken}; /// The "receiver set" of the select interface. This structure is used to manage /// a set of receivers which are being selected over. pub struct Select { inner: UnsafeCell<SelectInner>, next_id: Cell<usize>, } struct SelectInner { head: *mut Handle<'static, ()>, tail: *mut Handle<'static, ()>, } impl !marker::Send for Select {} /// A handle to a receiver which is currently a member of a `Select` set of /// receivers. This handle is used to keep the receiver in the set as well as /// interact with the underlying receiver. pub struct Handle<'rx, T:Send+'rx> { /// The ID of this handle, used to compare against the return value of /// `Select::wait()` id: usize, selector: *mut SelectInner, next: *mut Handle<'static, ()>, prev: *mut Handle<'static, ()>, added: bool, packet: &'rx (Packet+'rx), // due to our fun transmutes, we be sure to place this at the end. (nothing // previous relies on T) rx: &'rx Receiver<T>, } struct Packets { cur: *mut Handle<'static, ()> } #[doc(hidden)] #[derive(PartialEq)] pub enum StartResult { Installed, Abort, } #[doc(hidden)] pub trait Packet { fn can_recv(&self) -> bool; fn start_selection(&self, token: SignalToken) -> StartResult; fn abort_selection(&self) -> bool; } impl Select { /// Creates a new selection structure. This set is initially empty. /// /// Usage of this struct directly can sometimes be burdensome, and usage is much easier through /// the `select!` macro. /// /// # Examples /// /// ``` /// #![feature(mpsc_select)] /// /// use std::sync::mpsc::Select; /// /// let select = Select::new(); /// ``` pub fn new() -> Select { Select { inner: UnsafeCell::new(SelectInner { head: ptr::null_mut(), tail: ptr::null_mut(), }), next_id: Cell::new(1), } } /// Creates a new handle into this receiver set for a new receiver. Note /// that this does *not* add the receiver to the receiver set, for that you /// must call the `add` method on the handle itself. pub fn handle<'a, T: Send>(&'a self, rx: &'a Receiver<T>) -> Handle<'a, T> { let id = self.next_id.get(); self.next_id.set(id + 1); Handle { id: id, selector: self.inner.get(), next: ptr::null_mut(), prev: ptr::null_mut(), added: false, rx: rx, packet: rx, } } /// Waits for an event on this receiver set. The returned value is *not* an /// index, but rather an id. This id can be queried against any active /// `Handle` structures (each one has an `id` method). The handle with /// the matching `id` will have some sort of event available on it. The /// event could either be that data is available or the corresponding /// channel has been closed. pub fn wait(&self) -> usize { self.wait2(true) } /// Helper method for skipping the preflight checks during testing fn wait2(&self, do_preflight_checks: bool) -> usize { // Note that this is currently an inefficient implementation. We in // theory have knowledge about all receivers in the set ahead of time, // so this method shouldn't really have to iterate over all of them yet // again. The idea with this "receiver set" interface is to get the // interface right this time around, and later this implementation can // be optimized. // // This implementation can be summarized by: // // fn select(receivers) { // if any receiver ready { return ready index } // deschedule { // block on all receivers // } // unblock on all receivers // return ready index // } // // Most notably, the iterations over all of the receivers shouldn't be // necessary. unsafe { // Stage 1: preflight checks. Look for any packets ready to receive if do_preflight_checks { for handle in self.iter() { if (*handle).packet.can_recv() { return (*handle).id(); } } } // Stage 2: begin the blocking process // // Create a number of signal tokens, and install each one // sequentially until one fails. If one fails, then abort the // selection on the already-installed tokens. let (wait_token, signal_token) = blocking::tokens(); for (i, handle) in self.iter().enumerate() { match (*handle).packet.start_selection(signal_token.clone()) { StartResult::Installed => {} StartResult::Abort => { // Go back and abort the already-begun selections for handle in self.iter().take(i) { (*handle).packet.abort_selection(); } return (*handle).id; } } } // Stage 3: no messages available, actually block wait_token.wait(); // Stage 4: there *must* be message available; find it. // // Abort the selection process on each receiver. If the abort // process returns `true`, then that means that the receiver is // ready to receive some data. Note that this also means that the // receiver may have yet to have fully read the `to_wake` field and // woken us up (although the wakeup is guaranteed to fail). // // This situation happens in the window of where a sender invokes // increment(), sees -1, and then decides to wake up the thread. After // all this is done, the sending thread will set `selecting` to // `false`. Until this is done, we cannot return. If we were to // return, then a sender could wake up a receiver which has gone // back to sleep after this call to `select`. // // Note that it is a "fairly small window" in which an increment() // views that it should wake a thread up until the `selecting` bit // is set to false. For now, the implementation currently just spins // in a yield loop. This is very distasteful, but this // implementation is already nowhere near what it should ideally be. // A rewrite should focus on avoiding a yield loop, and for now this // implementation is tying us over to a more efficient "don't // iterate over everything every time" implementation. let mut ready_id = usize::MAX; for handle in self.iter() { if (*handle).packet.abort_selection() { ready_id = (*handle).id; } } // We must have found a ready receiver assert!(ready_id != usize::MAX); return ready_id; } } fn iter(&self) -> Packets { Packets { cur: unsafe { &*self.inner.get() }.head } } } impl<'rx, T: Send> Handle<'rx, T> { /// Retrieves the id of this handle. #[inline] pub fn id(&self) -> usize { self.id } /// Blocks to receive a value on the underlying receiver, returning `Some` on /// success or `None` if the channel disconnects. This function has the same /// semantics as `Receiver.recv` pub fn recv(&mut self) -> Result<T, RecvError> { self.rx.recv() } /// Adds this handle to the receiver set that the handle was created from. This /// method can be called multiple times, but it has no effect if `add` was /// called previously. /// /// This method is unsafe because it requires that the `Handle` is not moved /// while it is added to the `Select` set. pub unsafe fn add(&mut self) { if self.added { return } let selector = &mut *self.selector; let me = self as *mut Handle<'rx, T> as *mut Handle<'static, ()>; if selector.head.is_null() { selector.head = me; selector.tail = me; } else { (*me).prev = selector.tail; assert!((*me).next.is_null()); (*selector.tail).next = me; selector.tail = me; } self.added = true; } /// Removes this handle from the `Select` set. This method is unsafe because /// it has no guarantee that the `Handle` was not moved since `add` was /// called. pub unsafe fn remove(&mut self) { if !self.added { return } let selector = &mut *self.selector; let me = self as *mut Handle<'rx, T> as *mut Handle<'static, ()>; if self.prev.is_null() { assert_eq!(selector.head, me); selector.head = self.next; } else { (*self.prev).next = self.next; } if self.next.is_null() { assert_eq!(selector.tail, me); selector.tail = self.prev; } else { (*self.next).prev = self.prev; } self.next = ptr::null_mut(); self.prev = ptr::null_mut(); self.added = false; } } impl Drop for Select { fn drop(&mut self) { unsafe { assert!((&*self.inner.get()).head.is_null()); assert!((&*self.inner.get()).tail.is_null()); } } } impl<'rx, T: Send> Drop for Handle<'rx, T> { fn drop(&mut self) { unsafe { self.remove() } } } impl Iterator for Packets { type Item = *mut Handle<'static, ()>; fn next(&mut self) -> Option<*mut Handle<'static, ()>> { if self.cur.is_null() { None } else { let ret = Some(self.cur); unsafe { self.cur = (*self.cur).next; } ret } } } #[cfg(test)] #[allow(unused_imports)] mod tests { use prelude::v1::*; use thread; use sync::mpsc::*; // Don't use the libstd version so we can pull in the right Select structure // (std::comm points at the wrong one) macro_rules! select { ( $($name:pat = $rx:ident.$meth:ident() => $code:expr),+ ) => ({ let sel = Select::new(); $( let mut $rx = sel.handle(&$rx); )+ unsafe { $( $rx.add(); )+ } let ret = sel.wait(); $( if ret == $rx.id() { let $name = $rx.$meth(); $code } else )+ { unreachable!() } }) } #[test] fn smoke() { let (tx1, rx1) = channel::<i32>(); let (tx2, rx2) = channel::<i32>(); tx1.send(1).unwrap(); select! { foo = rx1.recv() => { assert_eq!(foo.unwrap(), 1); }, _bar = rx2.recv() => { panic!() } } tx2.send(2).unwrap(); select! { _foo = rx1.recv() => { panic!() }, bar = rx2.recv() => { assert_eq!(bar.unwrap(), 2) } } drop(tx1); select! { foo = rx1.recv() => { assert!(foo.is_err()); }, _bar = rx2.recv() => { panic!() } } drop(tx2); select! { bar = rx2.recv() => { assert!(bar.is_err()); } } } #[test] fn smoke2() { let (_tx1, rx1) = channel::<i32>(); let (_tx2, rx2) = channel::<i32>(); let (_tx3, rx3) = channel::<i32>(); let (_tx4, rx4) = channel::<i32>(); let (tx5, rx5) = channel::<i32>(); tx5.send(4).unwrap(); select! { _foo = rx1.recv() => { panic!("1") }, _foo = rx2.recv() => { panic!("2") }, _foo = rx3.recv() => { panic!("3") }, _foo = rx4.recv() => { panic!("4") }, foo = rx5.recv() => { assert_eq!(foo.unwrap(), 4); } } } #[test] fn closed() { let (_tx1, rx1) = channel::<i32>(); let (tx2, rx2) = channel::<i32>(); drop(tx2); select! { _a1 = rx1.recv() => { panic!() }, a2 = rx2.recv() => { assert!(a2.is_err()); } } } #[test] fn unblocks() { let (tx1, rx1) = channel::<i32>(); let (_tx2, rx2) = channel::<i32>(); let (tx3, rx3) = channel::<i32>(); let _t = thread::spawn(move|| { for _ in 0..20 { thread::yield_now(); } tx1.send(1).unwrap(); rx3.recv().unwrap(); for _ in 0..20 { thread::yield_now(); } }); select! { a = rx1.recv() => { assert_eq!(a.unwrap(), 1); }, _b = rx2.recv() => { panic!() } } tx3.send(1).unwrap(); select! { a = rx1.recv() => { assert!(a.is_err()) }, _b = rx2.recv() => { panic!() } } } #[test] fn both_ready() { let (tx1, rx1) = channel::<i32>(); let (tx2, rx2) = channel::<i32>(); let (tx3, rx3) = channel::<()>(); let _t = thread::spawn(move|| { for _ in 0..20 { thread::yield_now(); } tx1.send(1).unwrap(); tx2.send(2).unwrap(); rx3.recv().unwrap(); }); select! { a = rx1.recv() => { assert_eq!(a.unwrap(), 1); }, a = rx2.recv() => { assert_eq!(a.unwrap(), 2); } } select! { a = rx1.recv() => { assert_eq!(a.unwrap(), 1); }, a = rx2.recv() => { assert_eq!(a.unwrap(), 2); } } assert_eq!(rx1.try_recv(), Err(TryRecvError::Empty)); assert_eq!(rx2.try_recv(), Err(TryRecvError::Empty)); tx3.send(()).unwrap(); } #[test] fn stress() { const AMT: i32 = 10000; let (tx1, rx1) = channel::<i32>(); let (tx2, rx2) = channel::<i32>(); let (tx3, rx3) = channel::<()>(); let _t = thread::spawn(move|| { for i in 0..AMT { if i % 2 == 0 { tx1.send(i).unwrap(); } else { tx2.send(i).unwrap(); } rx3.recv().unwrap(); } }); for i in 0..AMT { select! { i1 = rx1.recv() => { assert!(i % 2 == 0 && i == i1.unwrap()); }, i2 = rx2.recv() => { assert!(i % 2 == 1 && i == i2.unwrap()); } } tx3.send(()).unwrap(); } } #[test] fn cloning() { let (tx1, rx1) = channel::<i32>(); let (_tx2, rx2) = channel::<i32>(); let (tx3, rx3) = channel::<()>(); let _t = thread::spawn(move|| { rx3.recv().unwrap(); tx1.clone(); assert_eq!(rx3.try_recv(), Err(TryRecvError::Empty)); tx1.send(2).unwrap(); rx3.recv().unwrap(); }); tx3.send(()).unwrap(); select! { _i1 = rx1.recv() => {}, _i2 = rx2.recv() => panic!() } tx3.send(()).unwrap(); } #[test] fn cloning2() { let (tx1, rx1) = channel::<i32>(); let (_tx2, rx2) = channel::<i32>(); let (tx3, rx3) = channel::<()>(); let _t = thread::spawn(move|| { rx3.recv().unwrap(); tx1.clone(); assert_eq!(rx3.try_recv(), Err(TryRecvError::Empty)); tx1.send(2).unwrap(); rx3.recv().unwrap(); }); tx3.send(()).unwrap(); select! { _i1 = rx1.recv() => {}, _i2 = rx2.recv() => panic!() } tx3.send(()).unwrap(); } #[test] fn cloning3() { let (tx1, rx1) = channel::<()>(); let (tx2, rx2) = channel::<()>(); let (tx3, rx3) = channel::<()>(); let _t = thread::spawn(move|| { let s = Select::new(); let mut h1 = s.handle(&rx1); let mut h2 = s.handle(&rx2); unsafe { h2.add(); } unsafe { h1.add(); } assert_eq!(s.wait(), h2.id); tx3.send(()).unwrap(); }); for _ in 0..1000 { thread::yield_now(); } drop(tx1.clone()); tx2.send(()).unwrap(); rx3.recv().unwrap(); } #[test] fn preflight1() { let (tx, rx) = channel(); tx.send(()).unwrap(); select! { _n = rx.recv() => {} } } #[test] fn preflight2() { let (tx, rx) = channel(); tx.send(()).unwrap(); tx.send(()).unwrap(); select! { _n = rx.recv() => {} } } #[test] fn preflight3() { let (tx, rx) = channel(); drop(tx.clone()); tx.send(()).unwrap(); select! { _n = rx.recv() => {} } } #[test] fn preflight4() { let (tx, rx) = channel(); tx.send(()).unwrap(); let s = Select::new(); let mut h = s.handle(&rx); unsafe { h.add(); } assert_eq!(s.wait2(false), h.id); } #[test] fn preflight5() { let (tx, rx) = channel(); tx.send(()).unwrap(); tx.send(()).unwrap(); let s = Select::new(); let mut h = s.handle(&rx); unsafe { h.add(); } assert_eq!(s.wait2(false), h.id); } #[test] fn preflight6() { let (tx, rx) = channel(); drop(tx.clone()); tx.send(()).unwrap(); let s = Select::new(); let mut h = s.handle(&rx); unsafe { h.add(); } assert_eq!(s.wait2(false), h.id); } #[test] fn preflight7() { let (tx, rx) = channel::<()>(); drop(tx); let s = Select::new(); let mut h = s.handle(&rx); unsafe { h.add(); } assert_eq!(s.wait2(false), h.id); } #[test] fn preflight8() { let (tx, rx) = channel(); tx.send(()).unwrap(); drop(tx); rx.recv().unwrap(); let s = Select::new(); let mut h = s.handle(&rx); unsafe { h.add(); } assert_eq!(s.wait2(false), h.id); } #[test] fn preflight9() { let (tx, rx) = channel(); drop(tx.clone()); tx.send(()).unwrap(); drop(tx); rx.recv().unwrap(); let s = Select::new(); let mut h = s.handle(&rx); unsafe { h.add(); } assert_eq!(s.wait2(false), h.id); } #[test] fn oneshot_data_waiting() { let (tx1, rx1) = channel(); let (tx2, rx2) = channel(); let _t = thread::spawn(move|| { select! { _n = rx1.recv() => {} } tx2.send(()).unwrap(); }); for _ in 0..100 { thread::yield_now() } tx1.send(()).unwrap(); rx2.recv().unwrap(); } #[test] fn stream_data_waiting() { let (tx1, rx1) = channel(); let (tx2, rx2) = channel(); tx1.send(()).unwrap(); tx1.send(()).unwrap(); rx1.recv().unwrap(); rx1.recv().unwrap(); let _t = thread::spawn(move|| { select! { _n = rx1.recv() => {} } tx2.send(()).unwrap(); }); for _ in 0..100 { thread::yield_now() } tx1.send(()).unwrap(); rx2.recv().unwrap(); } #[test] fn shared_data_waiting() { let (tx1, rx1) = channel(); let (tx2, rx2) = channel(); drop(tx1.clone()); tx1.send(()).unwrap(); rx1.recv().unwrap(); let _t = thread::spawn(move|| { select! { _n = rx1.recv() => {} } tx2.send(()).unwrap(); }); for _ in 0..100 { thread::yield_now() } tx1.send(()).unwrap(); rx2.recv().unwrap(); } #[test] fn sync1() { let (tx, rx) = sync_channel::<i32>(1); tx.send(1).unwrap(); select! { n = rx.recv() => { assert_eq!(n.unwrap(), 1); } } } #[test] fn sync2() { let (tx, rx) = sync_channel::<i32>(0); let _t = thread::spawn(move|| { for _ in 0..100 { thread::yield_now() } tx.send(1).unwrap(); }); select! { n = rx.recv() => { assert_eq!(n.unwrap(), 1); } } } #[test] fn sync3() { let (tx1, rx1) = sync_channel::<i32>(0); let (tx2, rx2): (Sender<i32>, Receiver<i32>) = channel(); let _t = thread::spawn(move|| { tx1.send(1).unwrap(); }); let _t = thread::spawn(move|| { tx2.send(2).unwrap(); }); select! { n = rx1.recv() => { let n = n.unwrap(); assert_eq!(n, 1); assert_eq!(rx2.recv().unwrap(), 2); }, n = rx2.recv() => { let n = n.unwrap(); assert_eq!(n, 2); assert_eq!(rx1.recv().unwrap(), 1); } } } }
30.720261
99
0.489171
7a7d65f654cbb9cca919ae62db2f36cefeb7b0be
11,026
use crate::{ json_wallet::{ Aes128Ctr, ChecksumModule, Cipher, CipherModule, Crypto, EmptyMap, EmptyString, JsonWallet, Kdf, KdfModule, Sha256Checksum, TypeField, Version, }, KeyType, ValidatorPath, }; pub use bip39::{Mnemonic, Seed as Bip39Seed}; pub use eth2_key_derivation::{DerivedKey, DerivedKeyError}; use eth2_keystore::{ decrypt, default_kdf, encrypt, keypair_from_secret, Keystore, KeystoreBuilder, IV_SIZE, SALT_SIZE, }; pub use eth2_keystore::{Error as KeystoreError, PlainText}; use rand::prelude::*; use serde::{Deserialize, Serialize}; use std::io::{Read, Write}; pub use uuid::Uuid; #[derive(Debug, PartialEq)] pub enum Error { KeystoreError(KeystoreError), PathExhausted, EmptyPassword, EmptySeed, InvalidNextAccount { old: u32, new: u32 }, } impl From<KeystoreError> for Error { fn from(e: KeystoreError) -> Error { Error::KeystoreError(e) } } impl From<DerivedKeyError> for Error { fn from(e: DerivedKeyError) -> Error { match e { DerivedKeyError::EmptySeed => Error::EmptySeed, } } } /// Contains the two keystores required for an eth2 validator. pub struct ValidatorKeystores { /// Contains the secret key used for signing every-day consensus messages (blocks, /// attestations, etc). pub voting: Keystore, /// Contains the secret key that should eventually be required for withdrawing stacked ETH. pub withdrawal: Keystore, } /// Constructs a `Keystore`. /// /// Generates the KDF `salt` and AES `IV` using `rand::thread_rng()`. pub struct WalletBuilder<'a> { seed: PlainText, password: &'a [u8], kdf: Kdf, cipher: Cipher, uuid: Uuid, name: String, nextaccount: u32, } impl<'a> WalletBuilder<'a> { /// Creates a new builder for a seed specified as a BIP-39 `Mnemonic` (where the nmemonic itself does /// not have a passphrase). /// /// ## Errors /// /// Returns `Error::EmptyPassword` if `password == ""`. pub fn from_mnemonic( mnemonic: &Mnemonic, password: &'a [u8], name: String, ) -> Result<Self, Error> { let seed = Bip39Seed::new(mnemonic, ""); Self::from_seed_bytes(seed.as_bytes(), password, name) } /// Creates a new builder from a `seed` specified as a byte slice. /// /// ## Errors /// /// Returns `Error::EmptyPassword` if `password == ""`. pub fn from_seed_bytes(seed: &[u8], password: &'a [u8], name: String) -> Result<Self, Error> { if password.is_empty() { Err(Error::EmptyPassword) } else if seed.is_empty() { Err(Error::EmptySeed) } else { let salt = rand::thread_rng().gen::<[u8; SALT_SIZE]>(); let iv = rand::thread_rng().gen::<[u8; IV_SIZE]>().to_vec().into(); Ok(Self { seed: seed.to_vec().into(), password, kdf: default_kdf(salt.to_vec()), cipher: Cipher::Aes128Ctr(Aes128Ctr { iv }), uuid: Uuid::new_v4(), nextaccount: 0, name, }) } } /// Consumes `self`, returning an encrypted `Wallet`. pub fn build(self) -> Result<Wallet, Error> { Wallet::encrypt( self.seed.as_bytes(), self.password, self.kdf, self.cipher, self.uuid, self.name, self.nextaccount, ) } } #[derive(Debug, PartialEq, Serialize, Deserialize)] #[serde(transparent)] pub struct Wallet { json: JsonWallet, } impl Wallet { /// Instantiates `Self`, encrypting the `seed` using `password` (via `kdf` and `cipher`). /// /// The `uuid`, `name` and `nextaccount` are carried through into the created wallet. fn encrypt( seed: &[u8], password: &[u8], kdf: Kdf, cipher: Cipher, uuid: Uuid, name: String, nextaccount: u32, ) -> Result<Self, Error> { let (cipher_text, checksum) = encrypt(seed, password, &kdf, &cipher)?; Ok(Self { json: JsonWallet { crypto: Crypto { kdf: KdfModule { function: kdf.function(), params: kdf, message: EmptyString, }, checksum: ChecksumModule { function: Sha256Checksum::function(), params: EmptyMap, message: checksum.to_vec().into(), }, cipher: CipherModule { function: cipher.function(), params: cipher, message: cipher_text.into(), }, }, uuid, nextaccount, version: Version::one(), type_field: TypeField::Hd, name, }, }) } /// Produces a `Keystore` (encrypted with `keystore_password`) for the validator at /// `self.nextaccount`, incrementing `self.nextaccount` if the keystore was successfully /// generated. /// /// Uses the default encryption settings of `KeystoreBuilder`, not necessarily those that were /// used to encrypt `self`. /// /// ## Errors /// /// - If `wallet_password` is unable to decrypt `self`. /// - If `keystore_password.is_empty()`. /// - If `self.nextaccount == u32::max_value()`. pub fn next_validator( &mut self, wallet_password: &[u8], voting_keystore_password: &[u8], withdrawal_keystore_password: &[u8], ) -> Result<ValidatorKeystores, Error> { // Helper closure to reduce code duplication when generating keys. // // It is not a function on `self` to help protect against generating keys without // incrementing `nextaccount`. let derive = |key_type: KeyType, password: &[u8]| -> Result<Keystore, Error> { let (secret, path) = recover_validator_secret(self, wallet_password, self.json.nextaccount, key_type)?; let keypair = keypair_from_secret(secret.as_bytes())?; KeystoreBuilder::new(&keypair, password, format!("{}", path))? .build() .map_err(Into::into) }; let keystores = ValidatorKeystores { voting: derive(KeyType::Voting, voting_keystore_password)?, withdrawal: derive(KeyType::Withdrawal, withdrawal_keystore_password)?, }; self.json.nextaccount = self .json .nextaccount .checked_add(1) .ok_or(Error::PathExhausted)?; Ok(keystores) } /// Returns the value of the JSON wallet `nextaccount` field. /// /// This will be the index of the next wallet generated with `Self::next_validator`. pub fn nextaccount(&self) -> u32 { self.json.nextaccount } /// Sets the value of the JSON wallet `nextaccount` field. /// /// This will be the index of the next wallet generated with `Self::next_validator`. /// /// ## Errors /// /// Returns `Err(())` if `nextaccount` is less than `self.nextaccount()` without mutating /// `self`. This is to protect against duplicate validator generation. pub fn set_nextaccount(&mut self, nextaccount: u32) -> Result<(), Error> { if nextaccount >= self.nextaccount() { self.json.nextaccount = nextaccount; Ok(()) } else { Err(Error::InvalidNextAccount { old: self.json.nextaccount, new: nextaccount, }) } } /// Returns the value of the JSON wallet `name` field. pub fn name(&self) -> &str { &self.json.name } /// Returns the value of the JSON wallet `uuid` field. pub fn uuid(&self) -> &Uuid { &self.json.uuid } /// Returns the value of the JSON wallet `type` field. pub fn type_field(&self) -> String { self.json.type_field.clone().into() } /// Returns the master seed of this wallet. Care should be taken not to leak this seed. pub fn decrypt_seed(&self, password: &[u8]) -> Result<PlainText, Error> { decrypt(password, &self.json.crypto).map_err(Into::into) } /// Encodes `self` as a JSON object. pub fn to_json_string(&self) -> Result<String, Error> { serde_json::to_string(self) .map_err(|e| KeystoreError::UnableToSerialize(format!("{}", e))) .map_err(Into::into) } /// Returns `self` from an encoded JSON object. pub fn from_json_str(json_string: &str) -> Result<Self, Error> { serde_json::from_str(json_string) .map_err(|e| KeystoreError::InvalidJson(format!("{}", e))) .map_err(Into::into) } /// Encodes self as a JSON object to the given `writer`. pub fn to_json_writer<W: Write>(&self, writer: W) -> Result<(), Error> { serde_json::to_writer(writer, self) .map_err(|e| KeystoreError::WriteError(format!("{}", e))) .map_err(Into::into) } /// Instantiates `self` from a JSON `reader`. pub fn from_json_reader<R: Read>(reader: R) -> Result<Self, Error> { serde_json::from_reader(reader) .map_err(|e| KeystoreError::ReadError(format!("{}", e))) .map_err(Into::into) } } /// Returns `(secret, path)` for the `key_type` for the validator at `index`. /// /// This function should only be used for recovering lost keys, not creating new ones because it /// does not update `wallet.nextaccount`. Using this function to generate new keys can easily /// result in the same key being unknowingly generated twice. /// /// To generate consecutive keys safely, use `Wallet::next_voting_keystore`. pub fn recover_validator_secret( wallet: &Wallet, wallet_password: &[u8], index: u32, key_type: KeyType, ) -> Result<(PlainText, ValidatorPath), Error> { let path = ValidatorPath::new(index, key_type); let secret = wallet.decrypt_seed(wallet_password)?; let master = DerivedKey::from_seed(secret.as_bytes()).map_err(Error::from)?; let destination = path.iter_nodes().fold(master, |dk, i| dk.child(*i)); Ok((destination.secret().to_vec().into(), path)) } /// Returns `(secret, path)` for the `key_type` for the validator at `index`. /// /// This function should only be used for key recovery since it can easily lead to key duplication. pub fn recover_validator_secret_from_mnemonic( secret: &[u8], index: u32, key_type: KeyType, ) -> Result<(PlainText, ValidatorPath), Error> { let path = ValidatorPath::new(index, key_type); let master = DerivedKey::from_seed(secret).map_err(Error::from)?; let destination = path.iter_nodes().fold(master, |dk, i| dk.child(*i)); Ok((destination.secret().to_vec().into(), path)) }
33.311178
105
0.583893
3ac7ea7cccea1f5ccc7e11de52277e5509da0254
360
#![allow(dead_code)] //! Runtime Driver Protocol //! //! The runtime driver protocol... pub const PROTOCOL_GUID: r_efi::base::Guid = r_efi::base::Guid::from_fields( 0xd487ddb4, 0x008b, 0x11d9, 0xaf, 0xdc, &[0x00, 0x10, 0x83, 0xff, 0xca, 0x4d], ); #[repr(C)] pub struct Protocol { pub value: r_efi::base::Handle, }
20
77
0.6
1d9e6f7b6443864a7e9998c94484f55c1437bac1
45
pub mod ocean; pub mod paint; pub mod shape;
11.25
14
0.733333
4a88419426130fca46c617065048e6a4525a5933
7,793
#![allow(trivial_casts)] use super::{InnermostTypeId, Metadata, Pointer}; use core::{ any::{Any, TypeId}, cell::{Ref, RefCell, RefMut}, marker::Unsize, ptr, }; #[cfg(all(feature = "alloc", not(feature = "std")))] use alloc::{boxed::Box, rc, sync}; #[cfg(feature = "std")] use std::{boxed::Box, rc, sync}; #[cfg(feature = "alloc")] use super::TypeIdDeterminationError::UnableToUpgradeWeakReference; type T = i32; type U = dyn PartialEq<T>; const ADDRESS: *mut () = 0xdeadbeef_usize as _; const METADATA: Metadata<U> = ptr::metadata::<U>(ptr::null::<T>()); trait Foo { fn double(&mut self); fn get(&self) -> i32; } impl Foo for i32 { fn double(&mut self) { *self *= 2; } fn get(&self) -> i32 { *self } } fn metadata<T: Unsize<U>, U: ?Sized>() -> Metadata<U> { ptr::metadata::<U>(ptr::null::<T>()) } #[test] fn raw_const_ptr_coerces() { const ADDRESS: *const () = self::ADDRESS; unsafe { let ptr: *const dyn Any = ADDRESS as *const T; let coerced = ptr.coerce::<U>(METADATA); assert_eq!(coerced.to_raw_parts(), (ADDRESS, METADATA)); } } #[test] fn raw_mut_ptr_coerces() { unsafe { let ptr: *mut dyn Any = ADDRESS as *mut T; let coerced = ptr.coerce::<U>(METADATA); assert_eq!(coerced.to_raw_parts(), (ADDRESS, METADATA)); } } #[test] fn non_null_coerces() { unsafe { let ptr: ptr::NonNull<dyn Any> = ptr::NonNull::new_unchecked(ADDRESS as *mut T); let coerced = ptr.coerce::<U>(METADATA); assert_eq!( coerced.to_raw_parts(), (ptr::NonNull::new_unchecked(ADDRESS), METADATA) ); } } #[test] fn ref_coerces() { unsafe { let ptr: &dyn Any = &12345; let coerced = ptr.coerce::<U>(METADATA); assert!(coerced.eq(&12345)); } } #[test] fn mut_ref_coerces() { unsafe { let ptr: &mut dyn Any = &mut 12345; let coerced = ptr.coerce::<dyn Foo>(metadata::<T, dyn Foo>()); coerced.double(); assert_eq!(coerced.get(), 12345 * 2); } } #[test] fn cell_ref_coerces() { unsafe { let cell = RefCell::new(12345); let borrow: Ref<'_, dyn Any> = cell.borrow(); let coerced = borrow.coerce::<U>(METADATA); assert!(coerced.eq(&12345)); } } #[test] fn cell_refmut_coerces() { unsafe { let cell = RefCell::new(12345); let borrow: RefMut<'_, dyn Any> = cell.borrow_mut(); let mut coerced = borrow.coerce::<dyn Foo>(metadata::<T, dyn Foo>()); coerced.double(); assert_eq!(coerced.get(), 12345 * 2); } } #[cfg(feature = "alloc")] #[test] fn box_coerces() { unsafe { let boxed: Box<dyn Any> = Box::new(12345); let coerced = boxed.coerce::<U>(METADATA); assert!(coerced.eq(&12345)); } } #[cfg(feature = "alloc")] #[test] fn strong_rc_coerces() { unsafe { let rc: rc::Rc<dyn Any> = rc::Rc::new(12345); let coerced = rc.coerce::<U>(METADATA); assert!(coerced.eq(&12345)); } } #[cfg(feature = "alloc")] #[test] fn weak_rc_coerces() { unsafe { let rc = rc::Rc::new(12345); let weak: rc::Weak<dyn Any> = rc::Rc::downgrade(&rc) as _; let coerced = weak.coerce::<U>(METADATA); assert!(coerced.upgrade().unwrap().eq(&12345)); } } #[cfg(feature = "alloc")] #[test] fn weak_rc_coerces_even_if_dangling() { unsafe { let weak: rc::Weak<dyn Any> = rc::Rc::downgrade(&rc::Rc::new(12345)) as _; let coerced = weak.coerce::<U>(METADATA); assert!(coerced.upgrade().is_none()); } } #[cfg(feature = "alloc")] #[test] fn strong_arc_coerces() { unsafe { let arc: sync::Arc<dyn Any> = sync::Arc::new(12345); let coerced = arc.coerce::<U>(METADATA); assert!(coerced.eq(&12345)); } } #[cfg(feature = "alloc")] #[test] fn weak_arc_coerces() { unsafe { let arc = sync::Arc::new(12345); let weak: sync::Weak<dyn Any> = sync::Arc::downgrade(&arc) as _; let coerced = weak.coerce::<U>(METADATA); assert!(coerced.upgrade().unwrap().eq(&12345)); } } #[cfg(feature = "alloc")] #[test] fn weak_arc_coerces_even_if_dangling() { unsafe { let weak: sync::Weak<dyn Any> = sync::Arc::downgrade(&sync::Arc::new(12345)) as _; let coerced = weak.coerce::<U>(METADATA); assert!(coerced.upgrade().is_none()); } } #[test] fn compound_types_transitively_coerce() { unsafe { let cell = RefCell::new(12345); let compound: &RefCell<dyn Any> = &cell; let coerced = compound.coerce::<U>(METADATA); assert!(coerced.borrow().eq(&12345)); } } #[test] fn innermost_type_id_of_ref() { let ptr: &dyn Any = &12345; let type_id = ptr.innermost_type_id().unwrap(); assert_eq!(type_id, TypeId::of::<i32>()); } #[test] fn innermost_type_id_of_mut_ref() { let ptr: &mut dyn Any = &mut 12345; let type_id = ptr.innermost_type_id().unwrap(); assert_eq!(type_id, TypeId::of::<i32>()); } #[test] fn innermost_type_id_of_cell_ref() { let cell = RefCell::new(12345); let borrow: Ref<'_, dyn Any> = cell.borrow(); let type_id = borrow.innermost_type_id().unwrap(); assert_eq!(type_id, TypeId::of::<i32>()); } #[test] fn innermost_type_id_of_cell_refmut() { let cell = RefCell::new(12345); let borrow: RefMut<'_, dyn Any> = cell.borrow_mut(); let type_id = borrow.innermost_type_id().unwrap(); assert_eq!(type_id, TypeId::of::<i32>()); } #[cfg(feature = "alloc")] #[test] fn innermost_type_id_of_box() { let boxed: Box<dyn Any> = Box::new(12345); let type_id = boxed.innermost_type_id().unwrap(); assert_eq!(type_id, TypeId::of::<i32>()); } #[cfg(feature = "alloc")] #[test] fn innermost_type_id_of_strong_rc() { let rc: rc::Rc<dyn Any> = rc::Rc::new(12345); let type_id = rc.innermost_type_id().unwrap(); assert_eq!(type_id, TypeId::of::<i32>()); } #[cfg(feature = "alloc")] #[test] fn innermost_type_id_of_weak_rc() { let rc = rc::Rc::new(12345); let weak: rc::Weak<dyn Any> = rc::Rc::downgrade(&rc) as _; let type_id = weak.innermost_type_id().unwrap(); assert_eq!(type_id, TypeId::of::<i32>()); } #[cfg(feature = "alloc")] #[test] fn innermost_type_id_of_weak_rc_fails_if_dangling() { let weak: rc::Weak<dyn Any> = rc::Rc::downgrade(&rc::Rc::new(12345)) as _; let type_id = weak.innermost_type_id(); assert_eq!( type_id, Err(UnableToUpgradeWeakReference { type_name: "alloc::rc::Weak<dyn core::any::Any>", }) ); } #[cfg(feature = "alloc")] #[test] fn innermost_type_id_of_strong_arc() { let arc: sync::Arc<dyn Any> = sync::Arc::new(12345); let type_id = arc.innermost_type_id().unwrap(); assert_eq!(type_id, TypeId::of::<i32>()); } #[cfg(feature = "alloc")] #[test] fn innermost_type_id_of_weak_arc() { let arc = sync::Arc::new(12345); let weak: sync::Weak<dyn Any> = sync::Arc::downgrade(&arc) as _; let type_id = weak.innermost_type_id().unwrap(); assert_eq!(type_id, TypeId::of::<i32>()); } #[cfg(feature = "alloc")] #[test] fn innermost_type_id_of_weak_arc_fails_if_dangling() { let weak: sync::Weak<dyn Any> = sync::Arc::downgrade(&sync::Arc::new(12345)) as _; let type_id = weak.innermost_type_id(); assert_eq!( type_id, Err(UnableToUpgradeWeakReference { type_name: "alloc::sync::Weak<dyn core::any::Any>", }) ); } #[test] fn innermost_type_id_of_compound_types_are_transitive() { let cell = RefCell::new(12345); let compound: &RefCell<dyn Any> = &cell; let type_id = compound.innermost_type_id().unwrap(); assert_eq!(type_id, TypeId::of::<i32>()); }
23.831804
90
0.594123
ab51016c459154b3660cc7584ac6162d4fc14a6c
3,582
// Copyright (c) The Libra Core Contributors // SPDX-License-Identifier: Apache-2.0 use crate::FuzzTarget; use failure::prelude::*; use sha1::{Digest, Sha1}; use solana_libra_proptest_helpers::ValueGenerator; use std::{ env, ffi::OsString, fs, io::Write, path::{Path, PathBuf}, process::Command, }; /// Generates data for this fuzz target into the output directory. Returns the number of items /// generated. /// /// The corpus directory should be present at the time this method is called. pub fn make_corpus( target: FuzzTarget, num_items: usize, corpus_dir: &Path, debug: bool, ) -> Result<usize> { // TODO: Allow custom proptest configs? let mut gen = ValueGenerator::new(); let mut sha1 = Sha1::new(); let mut idx = 0; while idx < num_items { let result = match target.generate(idx, &mut gen) { Some(bytes) => bytes, None => { // No value could be generated. Assume that corpus generation has been exhausted. break; } }; // Use the SHA-1 of the result as the file name. sha1.input(&result); let hash = sha1.result_reset(); let name = hex::encode(hash.as_slice()); let path = corpus_dir.join(name); let mut f = fs::File::create(&path) .with_context(|_| format!("Failed to create file: {:?}", path))?; if debug { println!("Writing {} bytes to file: {:?}", result.len(), path); } f.write_all(&result) .with_context(|_| format!("Failed to write to file: {:?}", path))?; idx += 1; } Ok(idx) } /// Fuzz a target by running `cargo fuzz run`. pub fn fuzz_target( target: FuzzTarget, corpus_dir: PathBuf, artifact_dir: PathBuf, mut args: Vec<OsString>, ) -> Result<()> { static FUZZ_RUNNER: &str = "fuzz_runner"; // Do a bit of arg parsing -- look for a "--" and insert the target and corpus directory // before that. let dash_dash_pos = args.iter().position(|x| x == "--"); let splice_pos = dash_dash_pos.unwrap_or_else(|| args.len()); args.splice( splice_pos..splice_pos, vec![FUZZ_RUNNER.into(), corpus_dir.into()], ); // The artifact dir goes at the end. if dash_dash_pos.is_none() { args.push("--".into()); } let mut artifact_arg: OsString = "-artifact_prefix=".into(); artifact_arg.push(&artifact_dir); // Add a trailing slash as required by libfuzzer to put the artifact in a directory. artifact_arg.push("/"); args.push(artifact_arg); // Pass the target name in as an environment variable. // Use the manifest directory as the current one. let manifest_dir = match env::var_os("CARGO_MANIFEST_DIR") { Some(dir) => dir, None => bail!("Fuzzing requires CARGO_MANIFEST_DIR to be set (are you using `cargo run`?)"), }; let status = Command::new("cargo") .arg("fuzz") .arg("run") .args(args) .current_dir(manifest_dir) .env(FuzzTarget::ENV_VAR, target.name()) .status() .context("cargo fuzz run errored")?; if !status.success() { bail!("cargo fuzz run failed with status {}", status); } Ok(()) } /// List all known fuzz targets. pub fn list_targets(no_desc: bool) { for target in FuzzTarget::all_targets() { if no_desc { println!("{}", target.name()) } else { println!(" * {0: <24} {1}", target.name(), target.description()) } } }
30.10084
100
0.590173
0e9a207e5fb44e2b2e6d64f23e082f9758697945
1,386
#![no_std] #[macro_use] extern crate alloc; extern crate contract_ffi; use contract_ffi::contract_api::{runtime, storage, Error, TURef}; use contract_ffi::key::Key; use contract_ffi::unwrap_or_revert::UnwrapOrRevert; use contract_ffi::value::uint::U512; const POS_CONTRACT_NAME: &str = "pos"; const UNBOND_METHOD_NAME: &str = "unbond"; // Unbonding contract. // // Accepts unbonding amount (of type `Option<u64>`) as first argument. // Unbonding with `None` unbonds all stakes in the PoS contract. // Otherwise (`Some<u64>`) unbonds with part of the bonded stakes. #[no_mangle] pub extern "C" fn call() { let pos_key = runtime::get_key(POS_CONTRACT_NAME).unwrap_or_revert_with(Error::GetKey); let pos_turef: TURef<Key> = pos_key .to_turef() .unwrap_or_revert_with(Error::UnexpectedKeyVariant); let pos_contract = storage::read(pos_turef) .unwrap_or_revert_with(Error::Read) .unwrap_or_revert_with(Error::ValueNotFound); let pos_pointer = pos_contract .to_c_ptr() .unwrap_or_revert_with(Error::UnexpectedKeyVariant); let unbond_amount: Option<U512> = runtime::get_arg::<Option<u64>>(0) .unwrap_or_revert_with(Error::MissingArgument) .unwrap_or_revert_with(Error::InvalidArgument) .map(Into::into); runtime::call_contract(pos_pointer, &(UNBOND_METHOD_NAME, unbond_amount), &vec![]) }
33
91
0.718615
ffee169323a9fd3996d130753f251e71c0cc154c
1,859
/* Copyright 2019 Supercomputing Systems AG Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ use serde_json::{json, Value}; pub const REQUEST_TRANSFER: u32 = 3; pub fn chain_get_block_hash() -> Value { json!({ "method": "chain_getBlockHash", "params": [0], "jsonrpc": "2.0", "id": "1", }) } pub fn state_get_metadata() -> Value { json!({ "method": "state_getMetadata", "params": null, "jsonrpc": "2.0", "id": "1", }) } pub fn state_get_runtime_version() -> Value { json!({ "method": "state_getRuntimeVersion", "params": null, "jsonrpc": "2.0", "id": "1", }) } pub fn state_subscribe_storage(key: &str) -> Value { json!({ "method": "state_subscribeStorage", "params": [[key]], "jsonrpc": "2.0", "id": "1", }) } pub fn state_get_storage(key_hash: &str) -> Value { json_req("state_getStorage", key_hash, 1 as u32) } pub fn author_submit_and_watch_extrinsic(xthex_prefixed: &str) -> Value { json_req( "author_submitAndWatchExtrinsic", xthex_prefixed, REQUEST_TRANSFER, ) } fn json_req(method: &str, params: &str, id: u32) -> Value { json!({ "method": method, "params": [params], "jsonrpc": "2.0", "id": id.to_string(), }) }
23.833333
75
0.607854
fe08ae6dd393ed86ce8d3a005620aaf0678151c2
2,565
#[doc = r"Value read from the register"] pub struct R { bits: u32, } #[doc = r"Value to write to the register"] pub struct W { bits: u32, } impl super::_2_DBRISE { #[doc = r"Modifies the contents of the register"] #[inline(always)] pub fn modify<F>(&self, f: F) where for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W, { let bits = self.register.get(); self.register.set(f(&R { bits }, &mut W { bits }).bits); } #[doc = r"Reads the contents of the register"] #[inline(always)] pub fn read(&self) -> R { R { bits: self.register.get(), } } #[doc = r"Writes to the register"] #[inline(always)] pub fn write<F>(&self, f: F) where F: FnOnce(&mut W) -> &mut W, { self.register.set( f(&mut W { bits: Self::reset_value(), }) .bits, ); } #[doc = r"Reset value of the register"] #[inline(always)] pub const fn reset_value() -> u32 { 0 } #[doc = r"Writes the reset value to the register"] #[inline(always)] pub fn reset(&self) { self.register.set(Self::reset_value()) } } #[doc = r"Value of the field"] pub struct PWM_2_DBRISE_RISEDELAYR { bits: u16, } impl PWM_2_DBRISE_RISEDELAYR { #[doc = r"Value of the field as raw bits"] #[inline(always)] pub fn bits(&self) -> u16 { self.bits } } #[doc = r"Proxy"] pub struct _PWM_2_DBRISE_RISEDELAYW<'a> { w: &'a mut W, } impl<'a> _PWM_2_DBRISE_RISEDELAYW<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u16) -> &'a mut W { self.w.bits &= !(4095 << 0); self.w.bits |= ((value as u32) & 4095) << 0; self.w } } impl R { #[doc = r"Value of the register as raw bits"] #[inline(always)] pub fn bits(&self) -> u32 { self.bits } #[doc = "Bits 0:11 - Dead-Band Rise Delay"] #[inline(always)] pub fn pwm_2_dbrise_risedelay(&self) -> PWM_2_DBRISE_RISEDELAYR { let bits = ((self.bits >> 0) & 4095) as u16; PWM_2_DBRISE_RISEDELAYR { bits } } } impl W { #[doc = r"Writes raw bits to the register"] #[inline(always)] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.bits = bits; self } #[doc = "Bits 0:11 - Dead-Band Rise Delay"] #[inline(always)] pub fn pwm_2_dbrise_risedelay(&mut self) -> _PWM_2_DBRISE_RISEDELAYW { _PWM_2_DBRISE_RISEDELAYW { w: self } } }
25.65
74
0.5423
2864af72349023f057aa88e114715fe5fa9de560
1,026
#![feature(c_unwind)] #[macro_use] extern crate gmod; extern crate font_loader as fonts; use fonts::system_fonts; #[lua_function] unsafe fn get_installed_fonts(lua: gmod::lua::State) -> i32 { let sys_fonts = system_fonts::query_all(); lua.new_table(); for (i, font) in sys_fonts.iter().enumerate() { lua.push_integer((i + 1) as isize); lua.push_string(font.as_str()); lua.set_table(-3); } 1 } #[lua_function] unsafe fn font_exists(lua: gmod::lua::State) -> i32 { let sys_fonts = system_fonts::query_all(); let input = lua.check_string(1); let res = sys_fonts.contains(&String::from(input.as_ref())); lua.push_boolean(res); 1 } #[gmod13_open] unsafe fn gmod13_open(lua: gmod::lua::State) -> i32 { lua.new_table(); lua.push_function(get_installed_fonts); lua.set_field(-2, lua_string!("GetAll")); lua.push_function(font_exists); lua.set_field(-2, lua_string!("Exists")); lua.set_global(lua_string!("fonts")); 0 } #[gmod13_close] unsafe fn gmod13_close(_: gmod::lua::State) -> i32 { 0 }
20.117647
61
0.692982
e46d66246c900c7c31185224ea8ba9e78b6a7647
23,440
extern crate provider_archive; use crate::keys::extract_keypair; use crate::util::{convert_error, format_output, Output, OutputKind, Result}; use nkeys::KeyPairType; use provider_archive::*; use serde_json::json; use std::fs::File; use std::io::prelude::*; use std::path::PathBuf; use structopt::clap::AppSettings; use structopt::StructOpt; const GZIP_MAGIC: [u8; 2] = [0x1f, 0x8b]; #[derive(Debug, StructOpt, Clone)] #[structopt( global_settings(&[AppSettings::ColoredHelp, AppSettings::VersionlessSubcommands]), name = "par")] pub(crate) struct ParCli { #[structopt(flatten)] command: ParCliCommand, } impl ParCli { pub(crate) fn command(self) -> ParCliCommand { self.command } } #[derive(Debug, Clone, StructOpt)] pub(crate) enum ParCliCommand { /// Build a provider archive file #[structopt(name = "create")] Create(CreateCommand), /// Inspect a provider archive file #[structopt(name = "inspect")] Inspect(InspectCommand), /// Insert a provider into a provider archive file #[structopt(name = "insert")] Insert(InsertCommand), } #[derive(StructOpt, Debug, Clone)] pub(crate) struct CreateCommand { /// Capability contract ID (e.g. wasmcloud:messaging or wasmcloud:keyvalue). #[structopt(short = "c", long = "capid")] capid: String, /// Vendor string to help identify the publisher of the provider (e.g. Redis, Cassandra, wasmcloud, etc). Not unique. #[structopt(short = "v", long = "vendor")] vendor: String, /// Monotonically increasing revision number #[structopt(short = "r", long = "revision")] revision: Option<i32>, /// Human friendly version string #[structopt(long = "version")] version: Option<String>, /// Location of key files for signing. Defaults to $WASH_KEYS ($HOME/.wash/keys) #[structopt( short = "d", long = "directory", env = "WASH_KEYS", hide_env_values = true )] directory: Option<PathBuf>, /// Path to issuer seed key (account). If this flag is not provided, the will be sourced from $WASH_KEYS ($HOME/.wash/keys) or generated for you if it cannot be found. #[structopt( short = "i", long = "issuer", env = "WASH_ISSUER_KEY", hide_env_values = true )] issuer: Option<String>, /// Path to subject seed key (service). If this flag is not provided, the will be sourced from $WASH_KEYS ($HOME/.wash/keys) or generated for you if it cannot be found. #[structopt( short = "s", long = "subject", env = "WASH_SUBJECT_KEY", hide_env_values = true )] subject: Option<String>, /// Name of the capability provider #[structopt(short = "n", long = "name")] name: String, /// Architecture of provider binary in format ARCH-OS (e.g. x86_64-linux) #[structopt(short = "a", long = "arch")] arch: String, /// Path to provider binary for populating the archive #[structopt(short = "b", long = "binary")] binary: String, /// File output destination path #[structopt(long = "destination")] destination: Option<String>, /// Include a compressed provider archive #[structopt(long = "compress")] compress: bool, /// Disables autogeneration of signing keys #[structopt(long = "disable-keygen")] disable_keygen: bool, #[structopt(flatten)] pub(crate) output: Output, } #[derive(StructOpt, Debug, Clone)] pub(crate) struct InspectCommand { /// Path to provider archive or OCI URL of provider archive #[structopt(name = "archive")] archive: String, /// Digest to verify artifact against (if OCI URL is provided for <archive>) #[structopt(short = "d", long = "digest")] digest: Option<String>, /// Allow latest artifact tags (if OCI URL is provided for <archive>) #[structopt(long = "allow-latest")] allow_latest: bool, /// OCI username, if omitted anonymous authentication will be used #[structopt( short = "u", long = "user", env = "WASH_REG_USER", hide_env_values = true )] user: Option<String>, /// OCI password, if omitted anonymous authentication will be used #[structopt( short = "p", long = "password", env = "WASH_REG_PASSWORD", hide_env_values = true )] password: Option<String>, /// Allow insecure (HTTP) registry connections #[structopt(long = "insecure")] insecure: bool, #[structopt(flatten)] pub(crate) output: Output, } #[derive(StructOpt, Debug, Clone)] pub(crate) struct InsertCommand { /// Path to provider archive #[structopt(name = "archive")] archive: String, /// Architecture of binary in format ARCH-OS (e.g. x86_64-linux) #[structopt(short = "a", long = "arch")] arch: String, /// Path to provider binary to insert into archive #[structopt(short = "b", long = "binary")] binary: String, /// Location of key files for signing. Defaults to $WASH_KEYS ($HOME/.wash/keys) #[structopt( short = "d", long = "directory", env = "WASH_KEYS", hide_env_values = true )] directory: Option<PathBuf>, /// Path to issuer seed key (account). If this flag is not provided, the will be sourced from $WASH_KEYS ($HOME/.wash/keys) or generated for you if it cannot be found. #[structopt( short = "i", long = "issuer", env = "WASH_ISSUER_KEY", hide_env_values = true )] issuer: Option<String>, /// Path to subject seed key (service). If this flag is not provided, the will be sourced from $WASH_KEYS ($HOME/.wash/keys) or generated for you if it cannot be found. #[structopt( short = "s", long = "subject", env = "WASH_SUBJECT_KEY", hide_env_values = true )] subject: Option<String>, /// Disables autogeneration of signing keys #[structopt(long = "disable-keygen")] disable_keygen: bool, #[structopt(flatten)] pub(crate) output: Output, } pub(crate) async fn handle_command(command: ParCliCommand) -> Result<String> { match command { ParCliCommand::Create(cmd) => handle_create(cmd), ParCliCommand::Inspect(cmd) => handle_inspect(cmd).await, ParCliCommand::Insert(cmd) => handle_insert(cmd), } } /// Creates a provider archive using an initial architecture target, provider, and signing keys pub(crate) fn handle_create(cmd: CreateCommand) -> Result<String> { let mut par = ProviderArchive::new( &cmd.capid, &cmd.name, &cmd.vendor, cmd.revision, cmd.version, ); let mut f = File::open(cmd.binary.clone())?; let mut lib = Vec::new(); f.read_to_end(&mut lib)?; let issuer = extract_keypair( cmd.issuer, Some(cmd.binary.clone()), cmd.directory.clone(), KeyPairType::Account, cmd.disable_keygen, )?; let subject = extract_keypair( cmd.subject, Some(cmd.binary.clone()), cmd.directory, KeyPairType::Service, cmd.disable_keygen, )?; par.add_library(&cmd.arch, &lib).map_err(convert_error)?; let extension = if cmd.compress { ".par.gz" } else { ".par" }; let outfile = match cmd.destination { Some(path) => path, None => format!( "{}{}", PathBuf::from(cmd.binary.clone()) .file_stem() .unwrap() .to_str() .unwrap() .to_string(), extension ), }; Ok( if par .write(&outfile, &issuer, &subject, cmd.compress) .is_err() { format!( "Error writing PAR. Please ensure directory {:?} exists", PathBuf::from(outfile).parent().unwrap(), ) } else { format_output( format!("Successfully created archive {}", outfile), json!({"result": "success", "file": outfile}), &cmd.output.kind, ) }, ) } /// Loads a provider archive and outputs the contents of the claims pub(crate) async fn handle_inspect(cmd: InspectCommand) -> Result<String> { let archive = match File::open(&cmd.archive) { Ok(mut f) => { let mut buf = Vec::new(); f.read_to_end(&mut buf)?; ProviderArchive::try_load(&buf).map_err(|e| format!("{}", e))? } Err(_) => { let artifact = crate::reg::pull_artifact( cmd.archive, cmd.digest, cmd.allow_latest, cmd.user, cmd.password, cmd.insecure, ) .await?; ProviderArchive::try_load(&artifact).map_err(|e| format!("{}", e))? } }; let claims = archive.claims().unwrap(); let metadata = claims.metadata.unwrap(); let output = match cmd.output.kind { OutputKind::Json => { let friendly_rev = if metadata.rev.is_some() { format!("{}", metadata.rev.unwrap()) } else { "None".to_string() }; let friendly_ver = metadata.ver.unwrap_or_else(|| "None".to_string()); format!( "{}", json!({"name": metadata.name.unwrap(), "issuer": claims.issuer, "service": claims.subject, "capability_contract_id": metadata.capid, "vendor": metadata.vendor, "ver": friendly_ver, "rev": friendly_rev, "targets": archive.targets()}) ) } OutputKind::Text => { use term_table::row::Row; use term_table::table_cell::*; use term_table::Table; let mut table = Table::new(); crate::util::configure_table_style(&mut table); table.add_row(Row::new(vec![TableCell::new_with_alignment( format!("{} - Provider Archive", metadata.name.unwrap()), 2, Alignment::Center, )])); table.add_row(Row::new(vec![ TableCell::new("Account"), TableCell::new_with_alignment(claims.issuer, 1, Alignment::Right), ])); table.add_row(Row::new(vec![ TableCell::new("Service"), TableCell::new_with_alignment(claims.subject, 1, Alignment::Right), ])); table.add_row(Row::new(vec![ TableCell::new("Capability Contract ID"), TableCell::new_with_alignment(metadata.capid, 1, Alignment::Right), ])); table.add_row(Row::new(vec![ TableCell::new("Vendor"), TableCell::new_with_alignment(metadata.vendor, 1, Alignment::Right), ])); if let Some(ver) = metadata.ver { table.add_row(Row::new(vec![ TableCell::new("Version"), TableCell::new_with_alignment(ver, 1, Alignment::Right), ])); } if let Some(rev) = metadata.rev { table.add_row(Row::new(vec![ TableCell::new("Revision"), TableCell::new_with_alignment(rev, 1, Alignment::Right), ])); } table.add_row(Row::new(vec![TableCell::new_with_alignment( "Supported Architecture Targets", 2, Alignment::Center, )])); table.add_row(Row::new(vec![TableCell::new_with_alignment( archive.targets().join("\n"), 2, Alignment::Left, )])); table.render() } }; Ok(output) } /// Loads a provider archive and attempts to insert an additional provider into it pub(crate) fn handle_insert(cmd: InsertCommand) -> Result<String> { let mut buf = Vec::new(); let mut f = File::open(cmd.archive.clone())?; f.read_to_end(&mut buf)?; let mut par = ProviderArchive::try_load(&buf).map_err(convert_error)?; let issuer = extract_keypair( cmd.issuer, Some(cmd.binary.clone()), cmd.directory.clone(), KeyPairType::Account, cmd.disable_keygen, )?; let subject = extract_keypair( cmd.subject, Some(cmd.binary.clone()), cmd.directory, KeyPairType::Service, cmd.disable_keygen, )?; let mut f = File::open(cmd.binary.clone())?; let mut lib = Vec::new(); f.read_to_end(&mut lib)?; par.add_library(&cmd.arch, &lib).map_err(convert_error)?; par.write(&cmd.archive, &issuer, &subject, is_compressed(&buf)?) .map_err(convert_error)?; Ok(format_output( format!( "Successfully inserted {} into archive {}", cmd.binary, cmd.archive ), json!({"result": "success", "file": cmd.archive}), &cmd.output.kind, )) } /// Inspects the byte slice for a GZIP header, and returns true if the file is compressed fn is_compressed(input: &[u8]) -> Result<bool> { if input.len() < 2 { return Err("Not enough bytes to be a valid PAR file".into()); } Ok(input[0..2] == GZIP_MAGIC) } #[cfg(test)] mod test { use super::*; // Uses all flags and options of the `par create` command // to ensure API does not change between versions #[test] fn test_par_create_comprehensive() { const ISSUER: &str = "SAAJLQZDZO57THPTIIEELEY7FJYOJZQWQD7FF4J67TUYTSCOXTF7R4Y3VY"; const SUBJECT: &str = "SVAH7IN6QE6XODCGIIWZQDZ5LNSSS4FNEO6SNHZSSASW4BBBKSZ6KWTKWY"; let create_long = ParCli::from_iter_safe(&[ "par", "create", "--arch", "x86_64-testrunner", "--binary", "./testrunner.so", "--capid", "wasmcloud:test", "--name", "CreateTest", "--vendor", "TestRunner", "--destination", "./test.par.gz", "--revision", "1", "--version", "1.11.111", "--directory", "./tests/fixtures", "--issuer", ISSUER, "--subject", SUBJECT, "--output", "text", "--disable-keygen", "--compress", ]) .unwrap(); match create_long.command { ParCliCommand::Create(CreateCommand { capid, vendor, revision, version, directory, issuer, subject, name, arch, binary, destination, compress, disable_keygen, output, }) => { assert_eq!(capid, "wasmcloud:test"); assert_eq!(arch, "x86_64-testrunner"); assert_eq!(binary, "./testrunner.so"); assert_eq!(directory.unwrap(), PathBuf::from("./tests/fixtures")); assert_eq!(issuer.unwrap(), ISSUER); assert_eq!(subject.unwrap(), SUBJECT); assert_eq!(output.kind, OutputKind::Text); assert_eq!(name, "CreateTest"); assert_eq!(vendor, "TestRunner"); assert_eq!(destination.unwrap(), "./test.par.gz"); assert_eq!(revision.unwrap(), 1); assert_eq!(version.unwrap(), "1.11.111"); assert!(disable_keygen); assert!(compress); } cmd => panic!("par insert constructed incorrect command {:?}", cmd), } let create_short = ParCli::from_iter_safe(&[ "par", "create", "-a", "x86_64-testrunner", "-b", "./testrunner.so", "-c", "wasmcloud:test", "-n", "CreateTest", "-v", "TestRunner", "--destination", "./test.par.gz", "-r", "1", "--version", "1.11.111", "-d", "./tests/fixtures", "-i", ISSUER, "-s", SUBJECT, "-o", "json", ]) .unwrap(); match create_short.command { ParCliCommand::Create(CreateCommand { capid, vendor, revision, version, directory, issuer, subject, name, arch, binary, destination, compress, disable_keygen, output, }) => { assert_eq!(capid, "wasmcloud:test"); assert_eq!(arch, "x86_64-testrunner"); assert_eq!(binary, "./testrunner.so"); assert_eq!(directory.unwrap(), PathBuf::from("./tests/fixtures")); assert_eq!(issuer.unwrap(), ISSUER); assert_eq!(subject.unwrap(), SUBJECT); assert_eq!(output.kind, OutputKind::Json); assert_eq!(name, "CreateTest"); assert_eq!(vendor, "TestRunner"); assert_eq!(destination.unwrap(), "./test.par.gz"); assert_eq!(revision.unwrap(), 1); assert_eq!(version.unwrap(), "1.11.111"); assert!(!disable_keygen); assert!(!compress); } cmd => panic!("par insert constructed incorrect command {:?}", cmd), } } // Uses all flags and options of the `par insert` command // to ensure API does not change between versions #[test] fn test_par_insert_comprehensive() { const ISSUER: &str = "SAAJLQZDZO57THPTQLEELEY7FJYOJZQWQD7FF4J67TUYTSCOXTF7R4Y3VY"; const SUBJECT: &str = "SVAH7IN6QE6XODCGQAWZQDZ5LNSSS4FNEO6SNHZSSASW4BBBKSZ6KWTKWY"; let insert_short = ParCli::from_iter_safe(&[ "par", "insert", "libtest.par.gz", "-a", "x86_64-testrunner", "-b", "./testrunner.so", "-d", "./tests/fixtures", "-i", ISSUER, "-s", SUBJECT, "-o", "text", "--disable-keygen", ]) .unwrap(); match insert_short.command { ParCliCommand::Insert(InsertCommand { archive, arch, binary, directory, issuer, subject, output, disable_keygen, }) => { assert_eq!(archive, "libtest.par.gz"); assert_eq!(arch, "x86_64-testrunner"); assert_eq!(binary, "./testrunner.so"); assert_eq!(directory.unwrap(), PathBuf::from("./tests/fixtures")); assert_eq!(issuer.unwrap(), ISSUER); assert_eq!(subject.unwrap(), SUBJECT); assert_eq!(output.kind, OutputKind::Text); assert!(disable_keygen); } cmd => panic!("par insert constructed incorrect command {:?}", cmd), } let insert_long = ParCli::from_iter_safe(&[ "par", "insert", "libtest.par.gz", "--arch", "x86_64-testrunner", "--binary", "./testrunner.so", "--directory", "./tests/fixtures", "--issuer", ISSUER, "--subject", SUBJECT, "--output", "text", ]) .unwrap(); match insert_long.command { ParCliCommand::Insert(InsertCommand { archive, arch, binary, directory, issuer, subject, output, disable_keygen, }) => { assert_eq!(archive, "libtest.par.gz"); assert_eq!(arch, "x86_64-testrunner"); assert_eq!(binary, "./testrunner.so"); assert_eq!(directory.unwrap(), PathBuf::from("./tests/fixtures")); assert_eq!(issuer.unwrap(), ISSUER); assert_eq!(subject.unwrap(), SUBJECT); assert_eq!(output.kind, OutputKind::Text); assert!(!disable_keygen); } cmd => panic!("par insert constructed incorrect command {:?}", cmd), } } // Uses all flags and options of the `par inspect` command // to ensure API does not change between versions #[test] fn test_par_inspect_comprehensive() { const LOCAL: &str = "./coolthing.par.gz"; const REMOTE: &str = "wasmcloud.azurecr.io/coolthing.par.gz"; let inspect_long = ParCli::from_iter_safe(&[ "par", "inspect", LOCAL, "--digest", "sha256:blah", "--output", "json", "--password", "secret", "--user", "name", ]) .unwrap(); match inspect_long.command { ParCliCommand::Inspect(InspectCommand { archive, digest, allow_latest, user, password, insecure, output, }) => { assert_eq!(archive, LOCAL); assert_eq!(digest.unwrap(), "sha256:blah"); assert!(!allow_latest); assert!(!insecure); assert_eq!(user.unwrap(), "name"); assert_eq!(password.unwrap(), "secret"); assert_eq!(output.kind, OutputKind::Json); } cmd => panic!("par inspect constructed incorrect command {:?}", cmd), } let inspect_short = ParCli::from_iter_safe(&[ "par", "inspect", REMOTE, "-d", "sha256:blah", "-o", "json", "-p", "secret", "-u", "name", "--allow-latest", "--insecure", ]) .unwrap(); match inspect_short.command { ParCliCommand::Inspect(InspectCommand { archive, digest, allow_latest, user, password, insecure, output, }) => { assert_eq!(archive, REMOTE); assert_eq!(digest.unwrap(), "sha256:blah"); assert!(allow_latest); assert!(insecure); assert_eq!(user.unwrap(), "name"); assert_eq!(password.unwrap(), "secret"); assert_eq!(output.kind, OutputKind::Json); } cmd => panic!("par inspect constructed incorrect command {:?}", cmd), } } }
31.505376
172
0.510538
8ae073c42b920e2480b6284ad235110ac9d424e6
19,888
/*! Provides a convenient and fluent builder interface for constructing policies. # Example ```rust use aws_iam::model::*; use aws_iam::model::builder::*; use aws_iam::io::write_to_writer; use std::io::stdout; let policy: Policy = PolicyBuilder::new() .named("confidential-data-access") .evaluate_statement( StatementBuilder::new() .auto_named() .allows() .unspecified_principals() .may_perform_actions(vec!["s3:List*", "s3:Get*"]) .on_resources(vec![ "arn:aws:s3:::confidential-data", "arn:aws:s3:::confidential-data/_*", ]) .if_condition( ConditionBuilder::new_bool() .right_hand_bool("aws:MultiFactorAuthPresent", true) .if_exists(), ), ) .into(); write_to_writer(stdout(), &policy); ``` */ use crate::model::*; use std::collections::HashMap; // ------------------------------------------------------------------------------------------------ // Public Types // ------------------------------------------------------------------------------------------------ /// /// The top-level `Policy` builder. /// #[derive(Debug)] pub struct PolicyBuilder { version: Option<Version>, id: Option<String>, statements: Vec<Statement>, } /// /// A `Statement` builder, used with `PolicyBuilder::evaluate_statement()`. /// #[derive(Debug, Clone)] pub struct StatementBuilder { sid: Option<String>, effect: Effect, principals: HashMap<PrincipalType, Vec<String>>, p_direction: Option<bool>, actions: Vec<QString>, a_direction: Option<bool>, resources: Vec<String>, r_direction: Option<bool>, condition: Option<HashMap<ConditionOperator, HashMap<QString, OneOrAll<ConditionValue>>>>, } /// /// A `Condition` builder, used with `StatementBuilder::if_condition()`. #[derive(Debug)] pub struct ConditionBuilder { operator: ConditionOperator, rhs: HashMap<QString, OneOrAll<ConditionValue>>, } // ------------------------------------------------------------------------------------------------ // Implementations // ------------------------------------------------------------------------------------------------ impl Default for PolicyBuilder { fn default() -> Self { PolicyBuilder { version: None, id: None, statements: Vec::new(), } } } impl PolicyBuilder { /// Create a new, empty, policy builder pub fn new() -> Self { Default::default() } /// Set the version of this policy. pub fn version(&mut self, version: Version) -> &mut Self { self.version = Some(version); self } /// Use the IAM default for the version of this policy pub fn default_version(&mut self) -> &mut Self { self.version = Some(Policy::default_version()); self } /// Set the id of this policy pub fn named(&mut self, id: &str) -> &mut Self { self.id = Some(id.to_string()); self } /// Set the id of this policy to a randomly generate value. pub fn auto_named(&mut self) -> &mut Self { self.id = Some(Policy::new_id()); self } /// Add a statement to this policy. pub fn evaluate_statement(&mut self, statement: &mut StatementBuilder) -> &mut Self { self.statements.push(statement.into()); self } /// Add a list of statements to this policy. pub fn evaluate_statements(&mut self, statements: &mut Vec<StatementBuilder>) -> &mut Self { self.statements.extend( statements .iter_mut() .map(|sb| sb.into()) .collect::<Vec<Statement>>(), ); self } } impl From<&mut PolicyBuilder> for Policy { fn from(pb: &mut PolicyBuilder) -> Self { Policy { version: pb.version.clone(), id: pb.id.clone(), statement: match pb.statements.len() { 0 => panic!("no statements!"), 1 => OneOrAll::One(pb.statements.remove(0)), _ => OneOrAll::All(pb.statements.drain(0..).collect()), }, } } } impl Default for StatementBuilder { fn default() -> Self { StatementBuilder { sid: None, effect: Effect::Deny, principals: HashMap::new(), p_direction: None, actions: Vec::new(), a_direction: None, resources: Vec::new(), r_direction: None, condition: None, } } } impl StatementBuilder { /// Create a new, empty, statement builder pub fn new() -> Self { Default::default() } /// Set the id of this statement pub fn named(&mut self, sid: &str) -> &mut Self { self.sid = Some(sid.to_string()); self } /// Set the id of this statement to a randomly generate value. pub fn auto_named(&mut self) -> &mut Self { self.sid = Some(Statement::new_sid()); self } /// Set the effect of this statement to `Allow`. pub fn allows(&mut self) -> &mut Self { self.effect = Effect::Allow; self } /// Set the effect of this statement to `Deny`. pub fn does_not_allow(&mut self) -> &mut Self { self.effect = Effect::Deny; self } /// Unsets the principal associated with this statement pub fn unspecified_principals(&mut self) -> &mut Self { self.principals.clear(); self } /// Sets the principal of this statement to be a wildcard. pub fn any_principal(&mut self, p_type: PrincipalType) -> &mut Self { self.p_direction = Some(true); self.principals.insert(p_type, Vec::new()); self } /// Sets the principal of this statement to be only this value. pub fn only_this_principal(&mut self, p_type: PrincipalType, arn: &str) -> &mut Self { self.only_these_principals(p_type, vec![arn]); self } /// Sets the principal of this statement to be any of these values. pub fn only_these_principals(&mut self, p_type: PrincipalType, arns: Vec<&str>) -> &mut Self { match self.p_direction { None => self.p_direction = Some(true), Some(false) => panic!("you can't have principal *and* not principal"), _ => (), }; let existing = self.principals.entry(p_type).or_default(); existing.extend(arns.iter().map(|s| s.to_string()).collect::<Vec<String>>()); self } /// Sets the principal of this statement to exclude this value. pub fn not_this_principal(&mut self, p_type: PrincipalType, arn: &str) -> &mut Self { self.not_these_principals(p_type, vec![arn]); self } /// Sets the principal of this statement to exclude of these values. pub fn not_these_principals(&mut self, p_type: PrincipalType, arns: Vec<&str>) -> &mut Self { match self.p_direction { None => self.p_direction = Some(false), Some(true) => panic!("you can't have principal *and* not principal"), _ => (), }; let existing = self.principals.entry(p_type).or_default(); existing.extend(arns.iter().map(|s| s.to_string()).collect::<Vec<String>>()); self } /// Sets the action of this statement to be a wildcard. pub fn may_perform_any_action(&mut self) -> &mut Self { self.a_direction = Some(true); self.actions = Vec::new(); self } /// Sets the action of this statement to be only this value. pub fn may_perform_action(&mut self, action: &str) -> &mut Self { self.may_perform_actions(vec![action]); self } /// Sets the action of this statement to be any of these values. pub fn may_perform_actions(&mut self, actions: Vec<&str>) -> &mut Self { match self.a_direction { None => self.a_direction = Some(true), Some(false) => panic!("you can't have action *and* not action"), _ => (), }; self.actions.extend( actions .iter() .map(|s| s.parse().unwrap()) .collect::<Vec<QString>>(), ); self } /// Sets the action of this statement to exclude the wildcard. pub fn may_perform_no_action(&mut self) -> &mut Self { self.a_direction = Some(false); self.actions = Vec::new(); self } /// Sets the action of this statement to exclude this value. pub fn may_not_perform_action(&mut self, action: &str) -> &mut Self { self.may_not_perform_actions(vec![action]); self } /// Sets the action of this statement to exclude any of these values. pub fn may_not_perform_actions(&mut self, actions: Vec<&str>) -> &mut Self { match self.a_direction { None => self.a_direction = Some(false), Some(true) => panic!("you can't have action *and* not action"), _ => (), }; self.actions.extend( actions .iter() .map(|s| s.parse().unwrap()) .collect::<Vec<QString>>(), ); self } /// Sets the resource of this statement to be a wildcard. pub fn on_any_resource(&mut self) -> &mut Self { self.r_direction = Some(true); self.resources = Vec::new(); self } /// Sets the resource of this statement to be only this value. pub fn on_resource(&mut self, resource: &str) -> &mut Self { self.on_resources(vec![resource]); self } /// Sets the resource of this statement to be any of these values. pub fn on_resources(&mut self, resources: Vec<&str>) -> &mut Self { match self.r_direction { None => self.r_direction = Some(true), Some(false) => panic!("you can't have resource *and* not resource"), _ => (), }; self.resources.extend( resources .iter() .map(|s| s.to_string()) .collect::<Vec<String>>(), ); self } /// Sets the resource of this statement to exclude the wildcard. pub fn on_no_resource(&mut self) -> &mut Self { self.r_direction = Some(false); self.resources = Vec::new(); self } /// Sets the resource of this statement to exclude this value. pub fn not_on_resource(&mut self, resource: &str) -> &mut Self { self.not_on_resources(vec![resource]); self } /// Sets the resource of this statement to exclude any of these values. pub fn not_on_resources(&mut self, resources: Vec<&str>) -> &mut Self { match self.r_direction { None => self.r_direction = Some(false), Some(true) => panic!("you can't have resource *and* not resource"), _ => (), }; self.resources.extend( resources .iter() .map(|s| s.to_string()) .collect::<Vec<String>>(), ); self } /// Adds this condition to the statement. pub fn if_condition(&mut self, condition: &mut ConditionBuilder) -> &mut Self { if self.condition.is_none() { self.condition = Some(HashMap::new()); } let conditions = self.condition.as_mut().unwrap(); let existing = conditions.entry(condition.operator.clone()).or_default(); existing.extend(condition.rhs.drain()); self } } impl From<&mut StatementBuilder> for Statement { fn from(sb: &mut StatementBuilder) -> Self { let principal = match sb.p_direction { None => None, Some(direction) => { let inner: HashMap<PrincipalType, OneOrAny> = sb .principals .iter_mut() .map(|(k, v)| { ( k.clone(), match v.len() { 0 => OneOrAny::Any, 1 => OneOrAny::One(v.remove(0)), _ => OneOrAny::AnyOf(v.drain(0..).collect()), }, ) }) .collect(); Some(if direction { Principal::Principal(inner) } else { Principal::NotPrincipal(inner) }) } }; let action_inner = match sb.actions.len() { 0 => OneOrAny::Any, 1 => OneOrAny::One(sb.actions.remove(0)), _ => OneOrAny::AnyOf(sb.actions.drain(0..).collect()), }; let action = match sb.a_direction { None => panic!("must have an action"), Some(true) => Action::Action(action_inner), Some(false) => Action::NotAction(action_inner), }; let resource_inner = match sb.resources.len() { 0 => OneOrAny::Any, 1 => OneOrAny::One(sb.resources.remove(0)), _ => OneOrAny::AnyOf(sb.resources.drain(0..).collect()), }; let resource = match sb.r_direction { None => panic!("must have a resource"), Some(true) => Resource::Resource(resource_inner), Some(false) => Resource::NotResource(resource_inner), }; Statement { sid: sb.sid.clone(), principal, effect: sb.effect.clone(), action, resource, condition: sb.condition.clone(), } } } impl ConditionBuilder { /// Create a new Condition with the provided operator. pub fn new(operator: GlobalConditionOperator) -> Self { ConditionBuilder { operator: ConditionOperator { quantifier: None, operator, if_exists: false, }, rhs: Default::default(), } } /// Create a new Condition with operator = `StringEquals` pub fn new_string_equals() -> Self { ConditionBuilder { operator: ConditionOperator { quantifier: None, operator: GlobalConditionOperator::StringEquals, if_exists: false, }, rhs: Default::default(), } } /// Create a new Condition with operator = `StringNotEquals` pub fn new_string_not_equals() -> Self { ConditionBuilder { operator: ConditionOperator { quantifier: None, operator: GlobalConditionOperator::StringNotEquals, if_exists: false, }, rhs: Default::default(), } } /// Create a new Condition with operator = `NumericEquals` pub fn new_numeric_equals() -> Self { ConditionBuilder { operator: ConditionOperator { quantifier: None, operator: GlobalConditionOperator::NumericEquals, if_exists: false, }, rhs: Default::default(), } } /// Create a new Condition with operator = `NumericNotEquals` pub fn new_numeric_not_equals() -> Self { ConditionBuilder { operator: ConditionOperator { quantifier: None, operator: GlobalConditionOperator::NumericNotEquals, if_exists: false, }, rhs: Default::default(), } } /// Create a new Condition with operator = `Bool` pub fn new_bool() -> Self { ConditionBuilder { operator: ConditionOperator { quantifier: None, operator: GlobalConditionOperator::Bool, if_exists: false, }, rhs: Default::default(), } } /// Add the _for-all-values_ quantifier. pub fn for_all(&mut self) -> &mut Self { self.operator.quantifier = Some(ConditionOperatorQuantifier::ForAllValues); self } /// Add the _for-any-value_ quantifier. pub fn for_any(&mut self) -> &mut Self { self.operator.quantifier = Some(ConditionOperatorQuantifier::ForAnyValue); self } /// Add a list of values to the _right-hand-sidse_ of this condition. pub fn right_hand_side(&mut self, key: &str, values: &mut Vec<ConditionValue>) -> &mut Self { let values = match values.len() { 0 => panic!("you must specify at least one value"), 1 => OneOrAll::One(values.remove(0)), _ => OneOrAll::All(values.drain(0..).collect()), }; self.rhs.insert(key.parse().unwrap(), values); self } /// Add a string value to the _right-hand-sidse_ of this condition. pub fn right_hand_str(&mut self, key: &str, value: &str) -> &mut Self { self.rhs.insert( key.parse().unwrap(), OneOrAll::One(ConditionValue::String(value.to_string())), ); self } /// Add a integer value to the _right-hand-sidse_ of this condition. pub fn right_hand_int(&mut self, key: &str, value: i64) -> &mut Self { self.rhs.insert( key.parse().unwrap(), OneOrAll::One(ConditionValue::Integer(value)), ); self } /// Add a float value to the _right-hand-sidse_ of this condition. pub fn right_hand_float(&mut self, key: &str, value: f64) -> &mut Self { self.rhs.insert( key.parse().unwrap(), OneOrAll::One(ConditionValue::Float(value)), ); self } /// Add a boolean value to the _right-hand-sidse_ of this condition. pub fn right_hand_bool(&mut self, key: &str, value: bool) -> &mut Self { self.rhs.insert( key.parse().unwrap(), OneOrAll::One(ConditionValue::Bool(value)), ); self } /// Add the _if-exists_ constraint pub fn if_exists(&mut self) -> &mut Self { self.operator.if_exists = true; self } /// /// Convert this one condition into a complete Condition for a statement. /// pub fn build_as_condition( &self, ) -> HashMap<ConditionOperator, HashMap<QString, OneOrAll<ConditionValue>>> { let mut map: HashMap<ConditionOperator, HashMap<QString, OneOrAll<ConditionValue>>> = HashMap::default(); map.insert(self.operator.clone(), self.rhs.clone()); map } } // ------------------------------------------------------------------------------------------------ // Unit Tests // ------------------------------------------------------------------------------------------------ #[cfg(test)] mod tests { use super::*; use crate::io::write_to_writer; use std::io::stdout; #[test] fn test_simple_builder() { let policy: Policy = PolicyBuilder::new() .named("confidential-data-access") .evaluate_statement( StatementBuilder::new() .auto_named() .allows() .unspecified_principals() .may_perform_actions(vec!["s3:List*", "s3:Get*"]) .on_resources(vec![ "arn:aws:s3:::confidential-data", "arn:aws:s3:::confidential-data/*", ]) .if_condition( ConditionBuilder::new_bool() .right_hand_bool("aws:MultiFactorAuthPresent", true) .if_exists(), ), ) .into(); write_to_writer(stdout(), &policy).expect("well that was unexpected"); } }
32.129241
99
0.527353
11a0b08f72b9a08a9d3a48b7e23863f74d758152
4,133
use byteorder::{BigEndian, ReadBytesExt}; use log::*; use multisock::{SocketAddr, Stream}; use ssh_agent::proto; use ssh_agent::proto::public_key::PublicKey; use ssh_agent::proto::signature; use ssh_agent::proto::{from_bytes, to_bytes, Message}; use std::io::{Read, Write}; // use std::mem::size_of; use std::net::Shutdown; use super::error::RsshErr; type ErrType = Box<dyn std::error::Error>; pub struct AgentClient<'a> { addr: &'a str, stream: Option<Stream>, } static NET_RETRY_CNT: u32 = 3; impl<'a> AgentClient<'a> { pub fn new(addr: &str) -> AgentClient { AgentClient { addr, stream: None } } fn read_message(stream: &mut Stream) -> Result<Message, ErrType> { let length = stream.read_u32::<BigEndian>()? as usize; debug!("read_message len={}", length); let mut buffer: Vec<u8> = vec![0; length as usize]; stream.read_exact(buffer.as_mut_slice())?; trace!("Read {} bytes: {:02X?}", buffer.len(), buffer); let msg: Message = from_bytes(buffer.as_slice())?; Ok(msg) } fn write_message(stream: &mut Stream, msg: &Message) -> Result<(), ErrType> { let mut bytes = to_bytes(&to_bytes(msg)?)?; stream.write_all(&mut bytes)?; trace!("Written {} bytes: {:02X?}", bytes.len(), bytes); Ok(()) } fn connect(&mut self) -> Result<(), ErrType> { let addr = if self.addr.starts_with('/') { String::from("unix:") + self.addr } else { String::from(self.addr) }; let sockaddr: SocketAddr = addr.parse()?; if let Some(ref mut s) = self.stream { let _ = s.shutdown(Shutdown::Both); self.stream = None; } self.stream = Some(Stream::connect(&sockaddr)?); info!("Connected to {:?}", sockaddr); Ok(()) } fn call_agent_once(&mut self, cmd: &Message) -> Result<Message, ErrType> { if self.stream.is_none() { self.connect()?; } let sock = self.stream.as_mut().unwrap(); Self::write_message(sock, cmd)?; Self::read_message(sock) } fn call_agent(&mut self, cmd: &Message, retry: u32) -> Result<Message, ErrType> { let mut ret: Result<Message, ErrType> = Err(RsshErr::RetryLT1Err.into_ptr()); for _i in 0..retry { ret = self.call_agent_once(cmd); if let Ok(val) = ret { return Ok(val); } } ret } pub fn list_identities(&mut self) -> Result<Vec<PublicKey>, ErrType> { let msg = self.call_agent(&Message::RequestIdentities, NET_RETRY_CNT)?; if let Message::IdentitiesAnswer(keys) = msg { let mut result = vec![]; for item in keys { debug!( "list_identities: {:02X?} ({})", item.pubkey_blob, item.comment ); if let Ok(pubkey) = from_bytes(&item.pubkey_blob) { result.push(pubkey); } } Ok(result) } else { Err(RsshErr::InvalidRspErr.into_ptr()) } } pub fn sign_data<'b>( &mut self, data: &'b [u8], pubkey: &'b PublicKey, ) -> Result<Vec<u8>, ErrType> { let mut flags = 0u32; match pubkey { PublicKey::Rsa(_) => flags = signature::RSA_SHA2_256, _ => {} } let args = proto::SignRequest { pubkey_blob: to_bytes(pubkey)?, data: data.to_vec(), flags, }; let msg = self.call_agent(&Message::SignRequest(args), NET_RETRY_CNT)?; if let Message::Failure = msg { return Err(RsshErr::AgentFailureErr.into_ptr()); } if let Message::SignResponse(val) = msg { // println!("signature payload: {:?}", val); // if let Ok(mut file) = std::fs::File::create("sign.bin") { // file.write_all(&val); // } Ok(val) } else { Err(RsshErr::InvalidRspErr.into_ptr()) } } }
31.549618
85
0.530849
b9196acbcf9efb1a3e399fb37d5ad355089182b8
38,488
// Copyright Materialize, Inc. All rights reserved. // // Use of this software is governed by the Business Source License // included in the LICENSE file. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0. //! An interactive dataflow server. use std::any::Any; use std::cell::RefCell; use std::collections::HashMap; use std::collections::VecDeque; use std::net::TcpStream; use std::pin::Pin; use std::rc::Rc; use std::rc::Weak; use std::sync::Mutex; use differential_dataflow::operators::arrange::arrangement::Arrange; use differential_dataflow::trace::cursor::Cursor; use differential_dataflow::trace::TraceReader; use differential_dataflow::Collection; use futures::channel::mpsc::UnboundedReceiver; use futures::executor::block_on; use futures::future::TryFutureExt; use futures::sink::{Sink, SinkExt}; use serde::{Deserialize, Serialize}; use timely::communication::allocator::generic::GenericBuilder; use timely::communication::allocator::zero_copy::initialize::initialize_networking_from_sockets; use timely::communication::initialize::WorkerGuards; use timely::communication::Allocate; use timely::dataflow::operators::unordered_input::UnorderedHandle; use timely::dataflow::operators::ActivateCapability; use timely::order::PartialOrder; use timely::progress::frontier::Antichain; use timely::progress::ChangeBatch; use timely::worker::Worker as TimelyWorker; use dataflow_types::logging::LoggingConfig; use dataflow_types::{ Consistency, DataflowDesc, DataflowError, Diff, ExternalSourceConnector, IndexDesc, MzOffset, PeekResponse, Timestamp, Update, }; use expr::{GlobalId, PartitionId, RowSetFinishing, SourceInstanceId}; use ore::future::channel::mpsc::ReceiverExt; use repr::{Datum, RelationType, Row, RowArena}; use self::metrics::Metrics; use super::render; use crate::arrangement::manager::{TraceBundle, TraceManager}; use crate::logging; use crate::logging::materialized::MaterializedEvent; use crate::operator::CollectionExt; use crate::source::SourceToken; mod metrics; /// A [`comm::broadcast::Token`] that permits broadcasting commands to the /// Timely workers. pub struct BroadcastToken; impl comm::broadcast::Token for BroadcastToken { type Item = SequencedCommand; /// Returns true, to enable loopback. /// /// Since the coordinator lives on the same process as one set of /// workers, we need to enable loopback so that broadcasts are /// transmitted intraprocess and visible to those workers. fn loopback(&self) -> bool { true } } /// Explicit instructions for timely dataflow workers. #[derive(Clone, Debug, Serialize, Deserialize)] pub enum SequencedCommand { /// Create a sequence of dataflows. CreateDataflows(Vec<DataflowDesc>), /// Drop the sources bound to these names. DropSources(Vec<GlobalId>), /// Drop the sinks bound to these names. DropSinks(Vec<GlobalId>), /// Drop the indexes bound to these namees. DropIndexes(Vec<GlobalId>), /// Peek at an arrangement. /// /// This request elicits data from the worker, by naming an /// arrangement and some actions to appy to the results before /// returning them. Peek { /// The identifier of the arrangement. id: GlobalId, /// The identifier of this peek request. /// /// Used in responses and cancelation requests. conn_id: u32, /// A communication link for sending a response. tx: comm::mpsc::Sender<PeekResponse>, /// The logical timestamp at which the arrangement is queried. timestamp: Timestamp, /// Actions to apply to the result set before returning them. finishing: RowSetFinishing, /// A projection that should be applied to results. project: Option<Vec<usize>>, /// A list of predicates that should restrict the set of results. filter: Vec<expr::ScalarExpr>, }, /// Cancel the peek associated with the given `conn_id`. CancelPeek { /// The identifier of the peek request to cancel. conn_id: u32, }, /// Create a local input named `index.on_id` CreateLocalInput { /// A name to use for the input. name: String, /// A globally unique identifier to use for the local input's index. index_id: GlobalId, /// Contains the global id of the local input /// and the keys that its index is arranged on index: IndexDesc, /// The relation type of the input. on_type: RelationType, /// A timestamp to which all local input (including this one)'s capabilities should be /// advanced. advance_to: Timestamp, }, /// Insert `updates` into the local input named `id`. Insert { /// Identifier of the local input. id: GlobalId, /// A list of updates to be introduced to the input. updates: Vec<Update>, /// A timestamp to which all local input's capabilities should be advanced. advance_to: Timestamp, }, /// Enable compaction in views. /// /// Each entry in the vector names a view and provides a frontier after which /// accumulations must be correct. The workers gain the liberty of compacting /// the corresponding maintained traces up through that frontier. AllowCompaction(Vec<(GlobalId, Antichain<Timestamp>)>), /// Append a new event to the log stream. AppendLog(MaterializedEvent), /// Advance worker timestamp AdvanceSourceTimestamp { /// The ID of the timestamped source id: SourceInstanceId, /// TODO(ncrooks) partition_count: i32, /// TODO(ncrooks) pid: PartitionId, /// TODO(ncrooks) timestamp: Timestamp, /// TODO(ncrooks) offset: MzOffset, }, /// Request that feedback is streamed to the provided channel. EnableFeedback(comm::mpsc::Sender<WorkerFeedbackWithMeta>), /// Disconnect inputs, drain dataflows, and shut down timely workers. Shutdown, } /// Information from timely dataflow workers. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct WorkerFeedbackWithMeta { /// Identifies the worker by its identifier. pub worker_id: usize, /// The feedback itself. pub message: WorkerFeedback, } /// Responses the worker can provide back to the coordinator. #[derive(Clone, Debug, Serialize, Deserialize)] pub enum WorkerFeedback { /// A list of identifiers of traces, with prior and new upper frontiers. FrontierUppers(Vec<(GlobalId, ChangeBatch<Timestamp>)>), /// The id of a source whose source connector has been dropped DroppedSource(SourceInstanceId), /// The id of a source whose source connector has been created CreateSource(SourceInstanceId, ExternalSourceConnector), } /// Initiates a timely dataflow computation, processing materialized commands. /// /// TODO(benesch): pass a config struct here, or find some other way to cut /// down on the number of arguments. #[allow(clippy::too_many_arguments)] pub fn serve<C>( sockets: Vec<Option<TcpStream>>, threads: usize, process: usize, switchboard: comm::Switchboard<C>, executor: tokio::runtime::Handle, logging_config: Option<dataflow_types::logging::LoggingConfig>, ) -> Result<WorkerGuards<()>, String> where C: comm::Connection, { assert!(threads > 0); // Construct endpoints for each thread that will receive the coordinator's // sequenced command stream. // // TODO(benesch): package up this idiom of handing out ownership of N items // to the N timely threads that will be spawned. The Mutex<Vec<Option<T>>> // is hard to read through. let command_rxs = { let mut rx = switchboard.broadcast_rx(BroadcastToken).fanout(); let command_rxs = Mutex::new((0..threads).map(|_| Some(rx.attach())).collect::<Vec<_>>()); executor.spawn( rx.shuttle() .map_err(|err| panic!("failure shuttling dataflow receiver commands: {}", err)), ); command_rxs }; let log_fn = Box::new(|_| None); let (builders, guard) = initialize_networking_from_sockets(sockets, process, threads, log_fn) .map_err(|err| format!("failed to initialize networking: {}", err))?; let builders = builders.into_iter().map(GenericBuilder::ZeroCopy).collect(); timely::execute::execute_from(builders, Box::new(guard), move |timely_worker| { executor.enter(|| { let command_rx = command_rxs.lock().unwrap()[timely_worker.index() % threads] .take() .unwrap() .request_unparks(&executor); let worker_idx = timely_worker.index(); Worker { inner: timely_worker, pending_peeks: Vec::new(), traces: TraceManager::default(), logging_config: logging_config.clone(), feedback_tx: None, command_rx, materialized_logger: None, sink_tokens: HashMap::new(), local_inputs: HashMap::new(), reported_frontiers: HashMap::new(), metrics: Metrics::for_worker_id(worker_idx), ts_histories: Default::default(), ts_source_mapping: HashMap::new(), ts_source_drops: Default::default(), } .run() }) }) } /// A type wrapper for the number of partitions associated with a source pub type PartitionCount = i32; /// A type wrapper for a timestamp update that consists of a PartitionCount, a Timestamp, /// and a MzOffset pub type TimestampUpdate = (PartitionCount, Timestamp, MzOffset); /// Map of source ID to per-partition timestamp history. /// /// Timestamp history is a vector of /// 1) BYO timestamp updates which are tuples (partition_count, timestamp, offset), /// where the correct timestamp for a given offset `x` is the highest timestamp value for /// the first offset >= `x`. /// 2) RT timestamp updates which are integers (partition_count) which represent the current /// partition_count associated with the source pub type TimestampHistories = Rc<RefCell<HashMap<SourceInstanceId, HashMap<PartitionId, VecDeque<TimestampUpdate>>>>>; /// List of sources that need to start being timestamped or have been dropped and no longer require /// timestamping. /// /// A source inserts an ADD request to this vector on source creation, and adds a /// DELETE request once the operator for the source is dropped. pub type TimestampChanges = Rc< RefCell< Vec<( SourceInstanceId, Option<(ExternalSourceConnector, Consistency)>, )>, >, >; /// State maintained for each worker thread. /// /// Much of this state can be viewed as local variables for the worker thread, /// holding state that persists across function calls. struct Worker<'w, A> where A: Allocate, { inner: &'w mut TimelyWorker<A>, pending_peeks: Vec<PendingPeek>, traces: TraceManager, logging_config: Option<LoggingConfig>, feedback_tx: Option<Pin<Box<dyn Sink<WorkerFeedbackWithMeta, Error = ()>>>>, command_rx: UnboundedReceiver<SequencedCommand>, materialized_logger: Option<logging::materialized::Logger>, sink_tokens: HashMap<GlobalId, Box<dyn Any>>, local_inputs: HashMap<GlobalId, LocalInput>, ts_source_mapping: HashMap<SourceInstanceId, Weak<Option<SourceToken>>>, ts_histories: TimestampHistories, ts_source_drops: TimestampChanges, reported_frontiers: HashMap<GlobalId, Antichain<Timestamp>>, metrics: Metrics, } impl<'w, A> Worker<'w, A> where A: Allocate + 'w, { /// Initializes timely dataflow logging and publishes as a view. /// /// The initialization respects the setting of `self.logging_config`, and in particular /// if it is set to `None` then nothing happens. This has the potential to crash and burn /// if logging is not initialized and anyone tries to use it. fn initialize_logging(&mut self) { if let Some(logging) = &self.logging_config { use crate::logging::BatchLogger; use timely::dataflow::operators::capture::event::link::EventLink; let granularity_ms = std::cmp::max(1, logging.granularity_ns() / 1_000_000) as Timestamp; // Establish loggers first, so we can either log the logging or not, as we like. let t_linked = std::rc::Rc::new(EventLink::new()); let mut t_logger = BatchLogger::new(t_linked.clone(), granularity_ms); let d_linked = std::rc::Rc::new(EventLink::new()); let mut d_logger = BatchLogger::new(d_linked.clone(), granularity_ms); let m_linked = std::rc::Rc::new(EventLink::new()); let mut m_logger = BatchLogger::new(m_linked.clone(), granularity_ms); // Construct logging dataflows and endpoints before registering any. let t_traces = logging::timely::construct(&mut self.inner, logging, t_linked); let d_traces = logging::differential::construct(&mut self.inner, logging, d_linked); let m_traces = logging::materialized::construct(&mut self.inner, logging, m_linked); // Register each logger endpoint. self.inner .log_register() .insert::<timely::logging::TimelyEvent, _>("timely", move |time, data| { t_logger.publish_batch(time, data) }); self.inner .log_register() .insert::<differential_dataflow::logging::DifferentialEvent, _>( "differential/arrange", move |time, data| d_logger.publish_batch(time, data), ); self.inner .log_register() .insert::<logging::materialized::MaterializedEvent, _>( "materialized", move |time, data| m_logger.publish_batch(time, data), ); let errs = self.inner.dataflow::<Timestamp, _, _>(|scope| { Collection::<_, DataflowError, isize>::empty(scope) .arrange() .trace }); // Install traces as maintained indexes for (log, (_, trace)) in t_traces { self.traces .set(log.index_id(), TraceBundle::new(trace, errs.clone())); self.reported_frontiers .insert(log.index_id(), Antichain::from_elem(0)); } for (log, (_, trace)) in d_traces { self.traces .set(log.index_id(), TraceBundle::new(trace, errs.clone())); self.reported_frontiers .insert(log.index_id(), Antichain::from_elem(0)); } for (log, (_, trace)) in m_traces { self.traces .set(log.index_id(), TraceBundle::new(trace, errs.clone())); self.reported_frontiers .insert(log.index_id(), Antichain::from_elem(0)); } self.materialized_logger = self.inner.log_register().get("materialized"); } } /// Disables timely dataflow logging. /// /// This does not unpublish views and is only useful to terminate logging streams to ensure that /// materialized can terminate cleanly. fn shutdown_logging(&mut self) { self.inner.log_register().remove("timely"); self.inner.log_register().remove("differential/arrange"); self.inner.log_register().remove("materialized"); } /// Draws from `dataflow_command_receiver` until shutdown. fn run(&mut self) { // Logging can be initialized with a "granularity" in nanoseconds, so that events are only // produced at logical times that are multiples of this many nanoseconds, which can reduce // the churn of the underlying computation. self.initialize_logging(); let mut shutdown = false; while !shutdown { // Enable trace compaction. self.traces.maintenance(); // Ask Timely to execute a unit of work. If Timely decides there's // nothing to do, it will park the thread. We rely on another thread // unparking us when there's new work to be done, e.g., when sending // a command or when new Kafka messages have arrived. self.inner.step_or_park(None); // Report frontier information back the coordinator. self.report_frontiers(); self.report_source_drops(); // Handle any received commands. let mut cmds = vec![]; while let Ok(Some(cmd)) = self.command_rx.try_next() { cmds.push(cmd); } self.metrics.observe_command_queue(&cmds); for cmd in cmds { if let SequencedCommand::Shutdown = cmd { shutdown = true; } self.metrics.observe_command(&cmd); self.handle_command(cmd); } self.metrics.observe_pending_peeks(&self.pending_peeks); self.metrics.observe_command_finish(); self.process_peeks(); } } /// Send source drop notifications to the coordinator fn report_source_drops(&mut self) { let mut updates = self.ts_source_drops.borrow_mut(); for (id, ksc) in updates.iter() { if ksc.is_none() { // A source was deleted self.ts_histories.borrow_mut().remove(id); self.ts_source_mapping.remove(id); let connector = self.feedback_tx.as_mut().unwrap(); block_on(connector.send(WorkerFeedbackWithMeta { worker_id: self.inner.index(), message: WorkerFeedback::DroppedSource(*id), })) .unwrap(); } else { // A source was created let connector = self.feedback_tx.as_mut().unwrap(); block_on(connector.send(WorkerFeedbackWithMeta { worker_id: self.inner.index(), message: WorkerFeedback::CreateSource(*id, ksc.as_ref().unwrap().0.clone()), })) .unwrap(); } } updates.clear(); } /// Send progress information to the coordinator. fn report_frontiers(&mut self) { if let Some(feedback_tx) = &mut self.feedback_tx { let mut upper = Antichain::new(); let mut progress = Vec::new(); let ids = self.traces.traces.keys().cloned().collect::<Vec<_>>(); for id in ids { if let Some(traces) = self.traces.get_mut(&id) { // Read the upper frontier and compare to what we've reported. traces.oks_mut().read_upper(&mut upper); let lower = self .reported_frontiers .get_mut(&id) .expect("Frontier missing!"); if lower != &upper { let mut changes = ChangeBatch::new(); for time in lower.elements().iter() { changes.update(time.clone(), -1); } for time in upper.elements().iter() { changes.update(time.clone(), 1); } let lower = self.reported_frontiers.get_mut(&id).unwrap(); changes.compact(); if !changes.is_empty() { progress.push((id, changes)); } lower.clone_from(&upper); } } } if !progress.is_empty() { block_on(feedback_tx.send(WorkerFeedbackWithMeta { worker_id: self.inner.index(), message: WorkerFeedback::FrontierUppers(progress), })) .unwrap(); } } } fn handle_command(&mut self, cmd: SequencedCommand) { match cmd { SequencedCommand::CreateDataflows(dataflows) => { for dataflow in dataflows.into_iter() { for (id, _, _) in dataflow.index_exports.iter() { self.reported_frontiers.insert(*id, Antichain::from_elem(0)); if let Some(logger) = self.materialized_logger.as_mut() { logger.log(MaterializedEvent::Dataflow(*id, true)); } } render::build_dataflow( dataflow, &mut self.traces, self.inner, &mut self.sink_tokens, &mut self.ts_source_mapping, self.ts_histories.clone(), self.ts_source_drops.clone(), &mut self.materialized_logger, ); } } SequencedCommand::DropSources(names) => { for name in names { self.local_inputs.remove(&name); } } SequencedCommand::DropSinks(ids) => { for id in ids { self.sink_tokens.remove(&id); } } SequencedCommand::DropIndexes(ids) => { for id in ids { self.traces.del_trace(&id); if let Some(logger) = self.materialized_logger.as_mut() { logger.log(MaterializedEvent::Dataflow(id, false)); } self.reported_frontiers .remove(&id) .expect("Dropped index with no frontier"); } } SequencedCommand::Peek { id, timestamp, conn_id, tx, finishing, project, filter, } => { // Acquire a copy of the trace suitable for fulfilling the peek. let mut trace_bundle = self.traces.get(&id).unwrap().clone(); let timestamp_frontier = Antichain::from_elem(timestamp); let empty_frontier = Antichain::new(); trace_bundle .oks_mut() .advance_by(timestamp_frontier.borrow()); trace_bundle .errs_mut() .advance_by(timestamp_frontier.borrow()); trace_bundle .oks_mut() .distinguish_since(empty_frontier.borrow()); trace_bundle .errs_mut() .distinguish_since(empty_frontier.borrow()); // Prepare a description of the peek work to do. let mut peek = PendingPeek { id, conn_id, tx, timestamp, finishing, trace_bundle, project, filter, }; // Log the receipt of the peek. if let Some(logger) = self.materialized_logger.as_mut() { logger.log(MaterializedEvent::Peek(peek.as_log_event(), true)); } // Attempt to fulfill the peek. let fulfilled = peek.seek_fulfillment(&mut Antichain::new()); if !fulfilled { self.pending_peeks.push(peek); } else { // Log the fulfillment of the peek. if let Some(logger) = self.materialized_logger.as_mut() { logger.log(MaterializedEvent::Peek(peek.as_log_event(), false)); } } self.metrics.observe_pending_peeks(&self.pending_peeks); } SequencedCommand::CancelPeek { conn_id } => { let logger = &mut self.materialized_logger; self.pending_peeks.retain(|peek| { if peek.conn_id == conn_id { let mut tx = block_on(peek.tx.connect()).unwrap(); block_on(tx.send(PeekResponse::Canceled)).unwrap(); if let Some(logger) = logger { logger.log(MaterializedEvent::Peek(peek.as_log_event(), false)); } false // don't retain } else { true // retain } }) } SequencedCommand::CreateLocalInput { name, index_id, index, on_type, advance_to, } => { render::build_local_input( &mut self.traces, self.inner, &mut self.local_inputs, index_id, &name, index, on_type, ); self.reported_frontiers .insert(index_id, Antichain::from_elem(0)); for (_, local_input) in self.local_inputs.iter_mut() { local_input.capability.downgrade(&advance_to); } } SequencedCommand::Insert { id, updates, advance_to, } => { if let Some(input) = self.local_inputs.get_mut(&id) { let mut session = input.handle.session(input.capability.clone()); for update in updates { assert!(update.timestamp >= *input.capability.time()); session.give((update.row, update.timestamp, update.diff)); } } for (_, local_input) in self.local_inputs.iter_mut() { local_input.capability.downgrade(&advance_to); } } SequencedCommand::AllowCompaction(list) => { for (id, frontier) in list { self.traces.allow_compaction(id, frontier.borrow()); } } SequencedCommand::AppendLog(event) => { if self.inner.index() == 0 { if let Some(logger) = self.materialized_logger.as_mut() { logger.log(event); } } } SequencedCommand::EnableFeedback(tx) => { self.feedback_tx = Some(Box::pin(block_on(tx.connect()).unwrap().sink_map_err( |err| panic!("error sending worker feedback: {}", err), ))); } SequencedCommand::Shutdown => { // this should lead timely to wind down eventually self.traces.del_all_traces(); self.shutdown_logging(); } SequencedCommand::AdvanceSourceTimestamp { id, partition_count, pid, timestamp, offset, } => { let mut timestamps = self.ts_histories.borrow_mut(); if let Some(entries) = timestamps.get_mut(&id) { let ts = entries.entry(pid).or_insert_with(VecDeque::new); let (_, last_ts, last_offset) = ts.back().unwrap_or(&(0, 0, MzOffset { offset: 0 })); assert!( offset >= *last_offset, "offset should not go backwards, but {} < {}", offset, last_offset ); assert!( timestamp > *last_ts, "timestamp should move forwards, but {} <= {}", timestamp, last_ts ); ts.push_back((partition_count, timestamp, offset)); let source = self .ts_source_mapping .get(&id) .expect("Id should be present"); if let Some(source) = source.upgrade() { if let Some(token) = &*source { token.activate(); } } } } } } /// Scan pending peeks and attempt to retire each. fn process_peeks(&mut self) { let mut upper = Antichain::new(); let pending_peeks_len = self.pending_peeks.len(); let mut pending_peeks = std::mem::replace( &mut self.pending_peeks, Vec::with_capacity(pending_peeks_len), ); for mut peek in pending_peeks.drain(..) { let success = peek.seek_fulfillment(&mut upper); if !success { self.pending_peeks.push(peek); } else { // Log the fulfillment of the peek. if let Some(logger) = self.materialized_logger.as_mut() { logger.log(MaterializedEvent::Peek(peek.as_log_event(), false)); } } } } } pub(crate) struct LocalInput { pub handle: UnorderedHandle<Timestamp, (Row, Timestamp, Diff)>, pub capability: ActivateCapability<Timestamp>, } /// An in-progress peek, and data to eventually fulfill it. #[derive(Clone)] struct PendingPeek { /// The identifier of the dataflow to peek. id: GlobalId, /// The ID of the connection that submitted the peek. For logging only. conn_id: u32, /// A transmitter connected to the intended recipient of the peek. tx: comm::mpsc::Sender<PeekResponse>, /// Time at which the collection should be materialized. timestamp: Timestamp, /// Finishing operations to perform on the peek, like an ordering and a /// limit. finishing: RowSetFinishing, project: Option<Vec<usize>>, filter: Vec<expr::ScalarExpr>, /// The data from which the trace derives. trace_bundle: TraceBundle, } impl PendingPeek { /// Produces a corresponding log event. pub fn as_log_event(&self) -> crate::logging::materialized::Peek { crate::logging::materialized::Peek::new(self.id, self.timestamp, self.conn_id) } /// Attempts to fulfill the peek and reports success. /// /// To produce output at `peek.timestamp`, we must be certain that /// it is no longer changing. A trace guarantees that all future /// changes will be greater than or equal to an element of `upper`. /// /// If an element of `upper` is less or equal to `peek.timestamp`, /// then there can be further updates that would change the output. /// If no element of `upper` is less or equal to `peek.timestamp`, /// then for any time `t` less or equal to `peek.timestamp` it is /// not the case that `upper` is less or equal to that timestamp, /// and so the result cannot further evolve. fn seek_fulfillment(&mut self, upper: &mut Antichain<Timestamp>) -> bool { self.trace_bundle.oks_mut().read_upper(upper); if upper.less_equal(&self.timestamp) { return false; } self.trace_bundle.errs_mut().read_upper(upper); if upper.less_equal(&self.timestamp) { return false; } let response = match self.collect_finished_data() { Ok(rows) => PeekResponse::Rows(rows), Err(text) => PeekResponse::Error(text), }; let mut tx = block_on(self.tx.connect()).unwrap(); let tx_result = block_on(tx.send(response)); if let Err(e) = tx_result { block_on(tx.send(PeekResponse::Error(e.to_string()))).unwrap(); } true } /// Collects data for a known-complete peek. fn collect_finished_data(&mut self) -> Result<Vec<Row>, String> { // Check if there exist any errors and, if so, return whatever one we // find first. let (mut cursor, storage) = self.trace_bundle.errs_mut().cursor(); while cursor.key_valid(&storage) { let mut copies = 0; cursor.map_times(&storage, |time, diff| { if time.less_equal(&self.timestamp) { copies += diff; } }); if copies < 0 { return Err(format!( "Negative multiplicity: {} for {}", copies, cursor.key(&storage), )); } if copies > 0 { return Err(cursor.key(&storage).to_string()); } cursor.step_key(&storage); } let (mut cursor, storage) = self.trace_bundle.oks_mut().cursor(); let mut results = Vec::new(); // We can limit the record enumeration if i. there is a limit set, // and ii. if the specified ordering is empty (specifies no order). let limit = if self.finishing.order_by.is_empty() { self.finishing.limit.map(|l| l + self.finishing.offset) } else { None }; let mut datums = Vec::new(); while cursor.key_valid(&storage) && limit.map(|l| results.len() < l).unwrap_or(true) { while cursor.val_valid(&storage) && limit.map(|l| results.len() < l).unwrap_or(true) { let row = cursor.val(&storage); let mut retain = true; if !self.filter.is_empty() { datums.clear(); datums.extend(row.iter()); // Before (expensively) determining how many copies of a row // we have, let's eliminate rows that we don't care about. let temp_storage = RowArena::new(); for predicate in &self.filter { let d = predicate .eval(&datums, &temp_storage) .map_err(|e| e.to_string())?; if d != Datum::True { retain = false; break; } } } if retain { // Differential dataflow represents collections with binary counts, // but our output representation is unary (as many rows as reported // by the count). We should determine this count, and especially if // it is non-zero, before producing any output data. let mut copies = 0; cursor.map_times(&storage, |time, diff| { if time.less_equal(&self.timestamp) { copies += diff; } }); if copies < 0 { return Err(format!( "Negative multiplicity: {} for {:?}", copies, row.unpack(), )); } // TODO: We could push a count here, as we create owned output later. for _ in 0..copies { results.push(row); } } cursor.step_val(&storage); } cursor.step_key(&storage) } // If we have extracted a projection, we should re-write the order_by columns. if let Some(columns) = &self.project { for key in self.finishing.order_by.iter_mut() { key.column = columns[key.column]; } } // TODO: We could sort here in any case, as it allows a merge sort at the coordinator. if let Some(limit) = self.finishing.limit { let offset_plus_limit = limit + self.finishing.offset; if results.len() > offset_plus_limit { // The `results` should be sorted by `Row`, which means we only // need to re-order `results` when there is a non-empty order_by. if !self.finishing.order_by.is_empty() { pdqselect::select_by(&mut results, offset_plus_limit, |left, right| { expr::compare_columns( &self.finishing.order_by, &left.unpack(), &right.unpack(), || left.cmp(right), ) }); } results.truncate(offset_plus_limit); } } Ok(if let Some(columns) = &self.project { let mut row_packer = repr::RowPacker::new(); results .iter() .map({ move |row| { let datums = row.unpack(); row_packer.pack(columns.iter().map(|i| datums[*i])) } }) .collect() } else { results.iter().map(|row| (*row).clone()).collect() }) } } /// The presence of this function forces `rustc` to instantiate the /// slow-to-compile differential and timely templates while compiling this /// crate. This means that iterating on crates that depend upon this crate is /// much faster, because these templates don't need to be reinstantiated /// whenever a downstream dependency changes. And iterating on this crate /// doesn't really become slower, because you needed to instantiate these /// templates anyway to run tests. pub fn __explicit_instantiation__() { ore::hint::black_box(serve::<tokio::net::TcpStream> as fn(_, _, _, _, _, _) -> _); }
40.008316
100
0.549574
0e723f4100d5da865aae187284f513b8cc88def2
22,452
//! The Daily Byte solutions. use std::collections::{HashMap, HashSet, LinkedList, VecDeque}; use std::iter::FromIterator; pub fn reverse_string(s: &str) -> String { s.chars().rev().collect() } // Given two binary strings (strings containing only 1s and 0s) return // their sum (also as a binary string). Note: neither binary string // will contain leading 0s unless the string itself is 0 // // Ex: Given the following binary strings... // // "100" + "1", return "101" // "11" + "1", return "100" // "1" + "0", return "1" pub fn add_bytes(a: &str, b: &str) -> String { let x: usize = a.chars().rev().enumerate().fold(0, |acc, (i, n)| match n { '1' => acc + (1 << i), _ => acc, }); let y: usize = b.chars().rev().enumerate().fold(0, |acc, (i, n)| match n { '1' => acc + (1 << i), _ => acc, }); let mut solution = "".to_string(); let mut z = x + y; while z > 0 { if z % 2 == 1 { solution.push_str("1") } else { solution.push_str("0") } z = z / 2; } solution .chars() .rev() .collect::<String>() .trim_start_matches('0') .to_string() } // Given a string, return whether or not it forms a palindrome // ignoring case and non-alphabetical characters. pub fn palindrome_special(s: &str) -> bool { let chars = s.chars().collect::<Vec<char>>(); // Two pointers at the beginning and end of string. let mut left = 0; let mut right = chars.len() - 1; // Keep moving until we meet the middle. while left < right { // If either are non-alph, we skip. if !chars[left].is_alphabetic() { left += 1; continue; } if !chars[right].is_alphabetic() { right -= 1; continue; } // Compare lower case versions. In Unicode, this could // translate into muliple code points, thus the vector // comparison. if chars[left].to_lowercase().collect::<Vec<_>>() != chars[right].to_lowercase().collect::<Vec<_>>() { return false; } left += 1; right -= 1; } // If the two pointers have met, we are done. true } pub fn vaccum_cleaner(s: &str) -> bool { let mut origin = (0, 0); for m in s.chars() { match m { 'L' => origin.0 -= 1, 'R' => origin.0 += 1, 'U' => origin.1 += 1, 'D' => origin.1 -= 1, _ => (), } } origin == (0, 0) } pub fn correctly_capitalized(s: &str) -> bool { s.to_uppercase() == s || s.to_lowercase() == s || (s.chars().next().unwrap().is_uppercase() && { let rest: String = s.chars().skip(1).collect(); rest.to_lowercase() == rest }) } // Given an array of strings, return the longest common prefix that is // shared amongst all strings. Note: you may assume all strings only // contain lowercase alphabetical characters. // // Ex: Given the following arrays... // // ["colorado", "color", "cold"], return "col" // ["a", "b", "c"], return "" // ["spot", "spotty", "spotted"], return "spot" pub fn longest_common_prefix<'a>(ss: &[&'a str]) -> &'a str { let mut longest = ss[0]; for s in ss[1..].iter() { let mut matches = 0; for (a, b) in s.chars().zip(longest.chars()) { if a == b { matches += 1; } else { break; } } longest = &longest[0..matches]; } longest } // Given a string and the ability to delete at most one character, // return whether or not it can form a palindrome. Note: a palindrome // is a sequence of characters that reads the same forwards and // backwards. // // Ex: Given the following strings... // // "abcba", return true // "foobof", return true (remove the first 'o', the second 'o', or 'b') // "abccab", return false pub fn valid_palindrome_removal(s: &str) -> bool { valid_palindrome_removal_helper(&s.chars().collect::<Vec<char>>(), false) } // Here we just use recursion to check each inner string to see if // it's a palindrome. We only need to track if we've skipped. fn valid_palindrome_removal_helper(c: &[char], skipped: bool) -> bool { if c.len() < 2 { // a zero or one length string is a palindrome. return true; } // Are they equal? If we haven't skipped, will skipping one side, // make them equal? if c.first().unwrap() == c.last().unwrap() { return valid_palindrome_removal_helper(&c[1..c.len() - 1], false); } else if !skipped && c[1] == *c.last().unwrap() { return valid_palindrome_removal_helper(&c[1..c.len() - 1], true); } else if !skipped && *c.first().unwrap() == c[c.len() - 2] { return valid_palindrome_removal_helper(&c[1..c.len() - 1], true); } false } // Given an array of integers, return whether or not two numbers sum // to a given target, k. Note: you may not sum a number with itself. // // Ex: Given the following... // // [1, 3, 8, 2], k = 10, return true (8 + 2) // [3, 9, 13, 7], k = 8, return false // [4, 2, 6, 5, 2], k = 4, return true (2 + 2) pub fn two_sum(ii: &[i64], k: i64) -> bool { // Use a map to store the values needed and when we come upon one, // return the solution. let mut needed = HashMap::new(); for i in ii { // If this is one of the values needed, then we found a solution. if let Some(_) = needed.get(i) { return true; } needed.insert(k - i, i); } false } // Given a string representing your stones and another string // representing a list of jewels, return the number of stones that you // have that are also jewels. // // Ex: Given the following jewels and stones... // // jewels = "abc", stones = "ac", return 2 // jewels = "Af", stones = "AaaddfFf", return 3 // jewels = "AYOPD", stones = "ayopd", return 0 pub fn jewels_and_stones(jewels: &str, stones: &str) -> usize { // Use a hashmap instead of "contains" because contains is O(n) // and hashmap is O(1). Note, this is only useful for larger // jewel/stone values. If they are small, there really isn't a // difference and setting up the hashmap may be more expensive. let jj: HashMap<char, bool> = jewels.chars().map(|c| (c, true)).collect(); stones.chars().fold(0, |acc, c| match jj.get(&c) { Some(_) => acc + 1, None => acc, }) } // Given two strings s and t return whether or not s is an anagram of // t. Note: An anagram is a word formed by reordering the letters of // another word. // // Ex: Given the following strings... // // s = "cat", t = "tac", return true // s = "listen", t = "silent", return true // s = "program", t = "function", return false pub fn valid_anagram(s: &str, t: &str) -> bool { let mut ss: HashMap<char, bool> = s.chars().map(|c| (c, false)).collect(); for c in t.chars() { match ss.get(&c) { None => return false, Some(used) => match used { true => return false, false => ss.insert(c, true), }, }; } true } // Given a string, return the index of its first unique character. If // a unique character does not exist, return -1. // // Ex: Given the following strings... // // "abcabd", return 2 // "thedailybyte", return 1 // "developer", return 0 pub fn first_unique_character(s: &str) -> i64 { let mut counts = HashMap::new(); for c in s.chars() { counts.insert( c, match counts.get(&c) { Some(n) => n + 1, None => 1, }, ); } for (i, c) in s.chars().enumerate() { match counts.get(&c) { Some(n) => match *n == 1 { true => return i as i64, false => (), }, None => (), } } -1 } // You are given two strings, s and t which only consist of lowercase // letters. /t is generated by shuffling the letters in s as well as // potentially adding an additional random character. Return the letter // that was randomly added to t if it exists, otherwise, return ’ ‘. // // Note: You may assume that at most one additional character can be // added to t. // // Ex: Given the following strings... // s = "foobar", t = "barfoot", return 't' // s = "ide", t = "idea", return 'a' // s = "coding", t "ingcod", return '' pub fn spot_difference(s: &str, t: &str) -> Option<char> { // Store the values in a map :) let mut ss = HashMap::new(); for c in s.chars() { ss.insert( c, match ss.get(&c) { Some(n) => n + 1, None => 1, }, ); } // Do the same for T. let mut tt = HashMap::new(); for c in t.chars() { tt.insert( c, match tt.get(&c) { Some(n) => n + 1, None => 1, }, ); } // Compare the two hashmaps for a difference. for (k, v) in tt { match ss.get(&k) { None => return Some(k), Some(c) => match *c == v { true => (), false => return Some(k), }, } } None } // Given two integer arrays, return their intersection. Note: the // intersection is the set of elements that are common to both arrays. // // Ex: Given the following arrays... // // nums1 = [2, 4, 4, 2], nums2 = [2, 4], return [2, 4] // nums1 = [1, 2, 3, 3], nums2 = [3, 3], return [3] // nums1 = [2, 4, 6, 8], nums2 = [1, 3, 5, 7], return [] pub fn intersection(ii: &[usize], jj: &[usize]) -> Vec<usize> { let s1 = ii.iter().cloned().collect::<HashSet<usize>>(); let s2 = jj.iter().cloned().collect::<HashSet<usize>>(); s1.intersection(&s2).map(|i| *i).collect() } // Given two strings representing sentences, return the words that are // not common to both strings (i.e. the words that only appear in one of // the sentences). You may assume that each sentence is a sequence of // words (without punctuation) correctly separated using space // characters. // // Ex: given the following strings... // // sentence1 = "the quick", sentence2 = "brown fox", return ["the", "quick", "brown", "fox"] // sentence1 = "the tortoise beat the haire", sentence2 = "the tortoise lost to the haire", return ["beat", "to", "lost"] // sentence1 = "copper coffee pot", sentence2 = "hot coffee pot", return ["copper", "hot"] pub fn uncommon_words<'a>(s: &'a str, t: &'a str) -> Vec<&'a str> { let ss = s.split(' ').collect::<HashSet<&str>>(); ss.symmetric_difference(&t.split(' ').collect::<HashSet<&str>>()) .map(|s| *s) .collect() } // Given two sorted linked lists, merge them together in ascending order // and return a reference to the merged list // // Ex: Given the following lists... // // list1 = 1->2->3, list2 = 4->5->6->null, return 1->2->3->4->5->6->null // list1 = 1->3->5, list2 = 2->4->6->null, return 1->2->3->4->5->6->null // list1 = 4->4->7, list2 = 1->5->6->null, return 1->4->4->5->6->7->null pub fn merged_linked_lists<'a>( l1: LinkedList<usize>, l2: LinkedList<usize>, ) -> LinkedList<usize> { let mut ll = LinkedList::new(); let mut i1 = l1.iter(); let mut i2 = l2.iter(); let mut v1 = i1.next(); let mut v2 = i2.next(); while v1 != None || v2 != None { if v1 == None && v2 != None { ll.push_back(v2.unwrap().clone()); v2 = i2.next(); } else if v1 != None && v2 == None { ll.push_back(v1.unwrap().clone()); v1 = i1.next(); } else { if v1.unwrap() < v2.unwrap() { ll.push_back(v1.unwrap().clone()); v1 = i1.next(); } else { ll.push_back(v2.unwrap().clone()); v2 = i2.next(); } } } ll } // Given a string only containing the following characters (, ), {, }, // [, and ] return whether or not the opening and closing characters are // in a valid order. // // Ex: Given the following strings... // // "(){}[]", return true // "(({[]}))", return true // "{(})", return false pub fn validate_characters(s: &str) -> bool { let mut state = Vec::new(); for c in s.chars() { match c { '{' => state.push(c), '(' => state.push(c), '[' => state.push(c), '}' => match state.last() { Some(last) => match last { '{' => { state.pop(); } _ => return false, }, None => return false, }, ']' => match state.last() { Some(last) => match last { '[' => { state.pop(); } _ => return false, }, None => return false, }, ')' => match state.last() { Some(last) => match last { '(' => { state.pop(); } _ => return false, }, None => return false, }, _ => return false, }; } state.len() == 0 } // Given two strings s and t, which represents a sequence of keystrokes, // where # denotes a backspace, return whether or not the sequences // produce the same result. // // Ex: Given the following strings... // // s = "ABC#", t = "CD##AB", return true // s = "como#pur#ter", t = "computer", return true // s = "cof#dim#ng", t = "code", return false pub fn compare_keystrokes(s: &str, t: &str) -> bool { compare_keystrokes_helper(s) == compare_keystrokes_helper(t) } pub fn compare_keystrokes_helper(s: &str) -> Vec<char> { // NOTE: It should be possible to do this backwards and just keep a // count of '#' and ignore characters. Not sure how much faster it // would be in comparison to this method. The push/pop does have a // cost but unless the '#' count is large, it may be negligible. // Both are still technically 2N. let mut ss = Vec::new(); for c in s.chars() { match c { '#' => { ss.pop(); } _ => ss.push(c), }; } ss } // Given a string s containing only lowercase letters, continuously // remove adjacent characters that are the same and return the result. // // Ex: Given the following strings... // // s = "abccba", return "" // s = "foobar", return "fbar" // s = "abccbefggfe", return "a" pub fn remove_adjacent_characters(s: &str) -> String { let mut stack = Vec::new(); for c in s.chars() { match stack.last() { Some(d) => match c == *d { true => { stack.pop(); } false => stack.push(c), }, None => stack.push(c), } } stack.iter().collect() } // Given two arrays of numbers, where the first array is a subset of the // second array, return an array containing all the next greater // elements for each element in the first array, in the second array. If // there is no greater element for any element, output -1 for that // number. // // Ex: Given the following arrays… // // nums1 = [4,1,2], nums2 = [1,3,4,2], return [-1, 3, -1] // nums1 = [2,4], nums2 = [1,2,3,4], return [3, -1] pub fn greater_elements(nums1: &[i32], nums2: &[i32]) -> Vec<i32> { let mut next = HashMap::new(); let mut stack = Vec::new(); // We use a stack here to track the elements. This works a bit // backwards. For each number, we consult the stack and pop until // we've found one that's smaller. At that point we have found the // greater element. for i in nums2 { while stack.len() > 0 && i > stack.last().unwrap() { next.insert(stack.pop().unwrap(), *i); } stack.push(*i); } let mut greater = Vec::new(); for i in nums1 { greater.push(match next.get(i) { Some(v) => *v, None => -1, }); } greater } pub struct CallCounter { pings: VecDeque<usize>, } // Create a class CallCounter that tracks the number of calls a client // has made within the last 3 seconds. Your class should contain one // method, ping(int t) that receives the current timestamp (in // milliseconds) of a new call being made and returns the number of // calls made within the last 3 seconds. // // Note: you may assume that the time associated with each subsequent // call to ping is strictly increasing. // // Ex: Given the following calls to ping… // // ping(1), return 1 (1 call within the last 3 seconds) // ping(300), return 2 (2 calls within the last 3 seconds) // ping(3000), return 3 (3 calls within the last 3 seconds) // ping(3002), return 3 (3 calls within the last 3 seconds) // ping(7000), return 1 (1 call within the last 3 seconds) impl CallCounter { pub fn new() -> CallCounter { CallCounter { pings: VecDeque::new(), } } pub fn ping(&mut self, when: usize) -> usize { // retain() probably easier but for large N, this will be faster // and only visit values that need to be removed. self.pings.push_back(when); loop { match self.pings.front() { Some(p) => match when - p <= 3000 { true => return self.pings.len(), false => { self.pings.pop_front(); } }, None => return self.pings.len(), } } } } // Design a class, MovingAverage, which contains a method, next that is // responsible for returning the moving average from a stream of // integers. // // Note: a moving average is the average of a subset of data at a given // point in time. // // Ex: Given the following series of events... // // // i.e. the moving average has a capacity of 3. // MovingAverage movingAverage = new MovingAverage(3); // m.next(3) returns 3 because (3 / 1) = 3 // m.next(5) returns 4 because (3 + 5) / 2 = 4 // m.next(7) = returns 5 because (3 + 5 + 7) / 3 = 5 // m.next(6) = returns 6 because (5 + 7 + 6) / 3 = 6 pub struct MovingAverage { size: usize, items: VecDeque<i64>, } impl MovingAverage { pub fn new(size: usize) -> MovingAverage { MovingAverage { size: size, items: VecDeque::new(), } } pub fn next(&mut self, i: i64) -> f64 { self.items.push_back(i); while self.items.len() > self.size { self.items.pop_front(); } self.items.iter().sum::<i64>() as f64 / self.items.len() as f64 } } #[cfg(test)] mod tests { use crate::daily_byte::*; macro_rules! assert_eq_sorted { ($l:expr, $r:expr) => { let mut left = $l; left.sort(); let mut right = $r; right.sort(); assert_eq!(left, right); }; } #[test] fn test_moving_average() { let mut m = MovingAverage::new(3); assert_eq!(m.next(3), 3.); assert_eq!(m.next(5), 4.); assert_eq!(m.next(7), 5.); assert_eq!(m.next(6), 6.); } #[test] fn test_call_counter() { let mut cc = CallCounter::new(); assert_eq!(cc.ping(1), 1); assert_eq!(cc.ping(300), 2); assert_eq!(cc.ping(3000), 3); assert_eq!(cc.ping(3002), 3); assert_eq!(cc.ping(7000), 1); } #[test] fn test_greater_elements() { assert_eq!(greater_elements(&[4, 1, 2], &[1, 3, 4, 2]), [-1, 3, -1]); assert_eq!(greater_elements(&[2, 4], &[1, 2, 3, 4]), [3, -1]); } #[test] fn test_remove_adjacent_characters() { assert_eq!(remove_adjacent_characters("abccba"), "".to_string()); assert_eq!(remove_adjacent_characters("foobar"), "fbar".to_string()); assert_eq!(remove_adjacent_characters("abccbefggfe"), "a".to_string()); } #[test] fn test_compare_keystrokes() { assert_eq!(compare_keystrokes("ABC#", "CD##AB"), true); assert_eq!(compare_keystrokes("como#pur#ter", "computer"), true); assert_eq!(compare_keystrokes("cof#dim#ng", "code"), false); } #[test] fn test_validate_characters() { assert_eq!(validate_characters("(){}[]"), true); assert_eq!(validate_characters("(({[]}))"), true); assert_eq!( validate_characters("((()()(){[]}{}{}({}{}{})[()[]]))"), true ); assert_eq!(validate_characters("{(})"), false); } #[test] fn test_merged_linked_lists() { assert_eq!( merged_linked_lists( LinkedList::<usize>::from_iter(vec![1, 2, 3].iter().cloned()), LinkedList::<usize>::from_iter(vec![4, 5, 6].iter().cloned()) ), LinkedList::<usize>::from_iter( vec![1, 2, 3, 4, 5, 6].iter().cloned() ) ); assert_eq!( merged_linked_lists( LinkedList::<usize>::from_iter(vec![1, 3, 5].iter().cloned()), LinkedList::<usize>::from_iter(vec![2, 4, 6].iter().cloned()) ), LinkedList::<usize>::from_iter( vec![1, 2, 3, 4, 5, 6].iter().cloned() ) ); assert_eq!( merged_linked_lists( LinkedList::<usize>::from_iter(vec![4, 4, 7].iter().cloned()), LinkedList::<usize>::from_iter(vec![1, 5, 6].iter().cloned()) ), LinkedList::<usize>::from_iter( vec![1, 4, 4, 5, 6, 7].iter().cloned() ) ); } #[test] fn test_uncommon_words() { assert_eq_sorted!( uncommon_words("the quick", "brown fox"), vec!["quick", "the", "brown", "fox"] ); assert_eq_sorted!( uncommon_words( "the tortoise beat the haire", "the tortoise lost to the haire" ), vec!["beat", "to", "lost"] ); assert_eq_sorted!( uncommon_words("copper coffee pot", "hot coffee pot"), vec!["copper", "hot"] ); } #[test] fn test_intersection() { assert_eq_sorted!(intersection(&[2, 4, 4, 2], &[2, 4]), [2, 4]); assert_eq_sorted!(intersection(&[1, 2, 3, 3], &[3, 3]), [3]); assert_eq_sorted!(intersection(&[2, 4, 6, 8], &[1, 3, 5, 7]), []); } #[test] fn test_spot_difference() { assert_eq!(spot_difference("foobar", "barfoot"), Some('t')); assert_eq!(spot_difference("ide", "idea"), Some('a')); assert_eq!(spot_difference("coding", "ingcod"), None); } #[test] fn test_first_unique_character() { assert_eq!(first_unique_character("abcabd"), 2); assert_eq!(first_unique_character("thedailybyte"), 1); assert_eq!(first_unique_character("developer"), 0); } #[test] fn test_valid_anagram() { assert_eq!(valid_anagram("cat", "tac"), true); assert_eq!(valid_anagram("listen", "silent"), true); assert_eq!(valid_anagram("program", "function"), false); } #[test] fn test_jewels_and_stones() { assert_eq!(jewels_and_stones("abc", "ac"), 2); assert_eq!(jewels_and_stones("Af", "AaaddfFf"), 3); assert_eq!(jewels_and_stones("AYOPD", "ayopd"), 0); } #[test] fn test_two_sum() { assert_eq!(two_sum(&[1, 3, 8, 2], 10), true); assert_eq!(two_sum(&[3, 9, 13, 7], 8), false); assert_eq!(two_sum(&[4, 2, 6, 5, 2], 4), true); } #[test] fn test_valid_palindrome_removal() { assert_eq!(valid_palindrome_removal("abcba"), true); assert_eq!(valid_palindrome_removal("foobof"), true); assert_eq!(valid_palindrome_removal("abccab"), false); } #[test] fn test_longest_common_prefix() { assert_eq!( longest_common_prefix(&["colorado", "color", "cold"]), "col" ); assert_eq!(longest_common_prefix(&["a", "b", "c"]), ""); assert_eq!( longest_common_prefix(&["spot", "spotty", "spotted"]), "spot" ); } #[test] fn test_add_bytes() { assert_eq!(add_bytes("100", "1"), "101"); assert_eq!(add_bytes("1", "0"), "1"); assert_eq!(add_bytes("10", "1"), "11"); assert_eq!(add_bytes("11", "1"), "100"); } #[test] fn test_correctly_capitalized() { assert_eq!(correctly_capitalized("USA"), true); assert_eq!(correctly_capitalized("Calvin"), true); assert_eq!(correctly_capitalized("compUter"), false); assert_eq!(correctly_capitalized("coding"), true); } #[test] fn test_reverse_string() { assert_eq!(reverse_string("Cat"), "taC".to_string()); assert_eq!( reverse_string("The Daily Byte"), "etyB yliaD ehT".to_string() ); assert_eq!(reverse_string("civic"), "civic".to_string()); } #[test] fn test_palindrome_special() { assert_eq!(palindrome_special("level"), true); assert_eq!(palindrome_special("algorithm"), false); assert_eq!(palindrome_special("A man, a plan, a canal: Panama."), true); } #[test] fn test_vacuum_cleaner() { assert_eq!(vaccum_cleaner("LR"), true); assert_eq!(vaccum_cleaner("URURD"), false); assert_eq!(vaccum_cleaner("RUULLDRD"), true); } }
27.214545
121
0.614823