hexsha
stringlengths 40
40
| size
int64 2
1.05M
| content
stringlengths 2
1.05M
| avg_line_length
float64 1.33
100
| max_line_length
int64 1
1k
| alphanum_fraction
float64 0.25
1
|
---|---|---|---|---|---|
ebb0ff52cdef11c2d534e290031c645c97290eba | 9,429 | #[allow(unused_imports)]
use test_env_log::test;
#[allow(unused_imports)]
use pact_models::PactSpecification;
#[allow(unused_imports)]
use serde_json;
#[allow(unused_imports)]
use expectest::prelude::*;
#[allow(unused_imports)]
use pact_matching::models::{Interaction, http_interaction_from_json};
#[allow(unused_imports)]
use pact_matching::{match_interaction_request, match_interaction_response};
#[test]
fn different_param_order() {
println!("FILE: tests/spec_testcases/v1/request/query/different param order.json");
#[allow(unused_mut)]
let mut pact: serde_json::Value = serde_json::from_str(r#"
{
"match": true,
"comment": "Query strings are matched using basic string equality, these are not equal. (not supported by pact-jvm)",
"expected" : {
"method": "GET",
"path": "/path",
"query": "alligator=Mary&hippo=John",
"headers": {}
},
"actual": {
"method": "GET",
"path": "/path",
"query": "hippo=John&alligator=Mary",
"headers": {}
}
}
"#).unwrap();
let interaction_json = serde_json::json!({"type": "Synchronous/HTTP", "request": pact.get("expected").unwrap()});
let expected = http_interaction_from_json("tests/spec_testcases/v1/request/query/different param order.json", &interaction_json, &PactSpecification::V1).unwrap();
println!("EXPECTED: {:?}", expected);
println!("BODY: {}", expected.contents().str_value());
let interaction_json = serde_json::json!({"type": "Synchronous/HTTP", "request": pact.get("actual").unwrap()});
let actual = http_interaction_from_json("tests/spec_testcases/v1/request/query/different param order.json", &interaction_json, &PactSpecification::V1).unwrap();
println!("ACTUAL: {:?}", actual);
println!("BODY: {}", actual.contents().str_value());
let pact_match = pact.get("match").unwrap();
let result = match_interaction_request(expected, actual, &PactSpecification::V1).unwrap().mismatches();
println!("RESULT: {:?}", result);
if pact_match.as_bool().unwrap() {
expect!(result.iter()).to(be_empty());
} else {
expect!(result.iter()).to_not(be_empty());
}
}
#[test]
fn different_param_values() {
println!("FILE: tests/spec_testcases/v1/request/query/different param values.json");
#[allow(unused_mut)]
let mut pact: serde_json::Value = serde_json::from_str(r#"
{
"match": false,
"comment": "Queries are not the same - hippo is Fred instead of John",
"expected" : {
"method": "GET",
"path": "/path",
"query": "alligator=Mary&hippo=John",
"headers": {}
},
"actual": {
"method": "GET",
"path": "/path",
"query": "alligator=Mary&hippo=Fred",
"headers": {}
}
}
"#).unwrap();
let interaction_json = serde_json::json!({"type": "Synchronous/HTTP", "request": pact.get("expected").unwrap()});
let expected = http_interaction_from_json("tests/spec_testcases/v1/request/query/different param values.json", &interaction_json, &PactSpecification::V1).unwrap();
println!("EXPECTED: {:?}", expected);
println!("BODY: {}", expected.contents().str_value());
let interaction_json = serde_json::json!({"type": "Synchronous/HTTP", "request": pact.get("actual").unwrap()});
let actual = http_interaction_from_json("tests/spec_testcases/v1/request/query/different param values.json", &interaction_json, &PactSpecification::V1).unwrap();
println!("ACTUAL: {:?}", actual);
println!("BODY: {}", actual.contents().str_value());
let pact_match = pact.get("match").unwrap();
let result = match_interaction_request(expected, actual, &PactSpecification::V1).unwrap().mismatches();
println!("RESULT: {:?}", result);
if pact_match.as_bool().unwrap() {
expect!(result.iter()).to(be_empty());
} else {
expect!(result.iter()).to_not(be_empty());
}
}
#[test]
fn matches_with_equals_in_the_query_value() {
println!("FILE: tests/spec_testcases/v1/request/query/matches with equals in the query value.json");
#[allow(unused_mut)]
let mut pact: serde_json::Value = serde_json::from_str(r#"
{
"match": true,
"comment": "Queries are equivalent",
"expected" : {
"method": "GET",
"path": "/path",
"query": "options=delete.topic.enable=true&broker=1",
"headers": {}
},
"actual": {
"method": "GET",
"path": "/path",
"query": "options=delete.topic.enable%3Dtrue&broker=1",
"headers": {}
}
}
"#).unwrap();
let interaction_json = serde_json::json!({"type": "Synchronous/HTTP", "request": pact.get("expected").unwrap()});
let expected = http_interaction_from_json("tests/spec_testcases/v1/request/query/matches with equals in the query value.json", &interaction_json, &PactSpecification::V1).unwrap();
println!("EXPECTED: {:?}", expected);
println!("BODY: {}", expected.contents().str_value());
let interaction_json = serde_json::json!({"type": "Synchronous/HTTP", "request": pact.get("actual").unwrap()});
let actual = http_interaction_from_json("tests/spec_testcases/v1/request/query/matches with equals in the query value.json", &interaction_json, &PactSpecification::V1).unwrap();
println!("ACTUAL: {:?}", actual);
println!("BODY: {}", actual.contents().str_value());
let pact_match = pact.get("match").unwrap();
let result = match_interaction_request(expected, actual, &PactSpecification::V1).unwrap().mismatches();
println!("RESULT: {:?}", result);
if pact_match.as_bool().unwrap() {
expect!(result.iter()).to(be_empty());
} else {
expect!(result.iter()).to_not(be_empty());
}
}
#[test]
fn trailing_amperand() {
println!("FILE: tests/spec_testcases/v1/request/query/trailing amperand.json");
#[allow(unused_mut)]
let mut pact: serde_json::Value = serde_json::from_str(r#"
{
"match": true,
"comment": "Query strings are matched using basic string equality, these are not equal. (not supported by pact-jvm)",
"expected" : {
"method": "GET",
"path": "/path",
"query": "alligator=Mary&hippo=John",
"headers": {}
},
"actual": {
"method": "GET",
"path": "/path",
"query": "alligator=Mary&hippo=John&",
"headers": {}
}
}
"#).unwrap();
let interaction_json = serde_json::json!({"type": "Synchronous/HTTP", "request": pact.get("expected").unwrap()});
let expected = http_interaction_from_json("tests/spec_testcases/v1/request/query/trailing amperand.json", &interaction_json, &PactSpecification::V1).unwrap();
println!("EXPECTED: {:?}", expected);
println!("BODY: {}", expected.contents().str_value());
let interaction_json = serde_json::json!({"type": "Synchronous/HTTP", "request": pact.get("actual").unwrap()});
let actual = http_interaction_from_json("tests/spec_testcases/v1/request/query/trailing amperand.json", &interaction_json, &PactSpecification::V1).unwrap();
println!("ACTUAL: {:?}", actual);
println!("BODY: {}", actual.contents().str_value());
let pact_match = pact.get("match").unwrap();
let result = match_interaction_request(expected, actual, &PactSpecification::V1).unwrap().mismatches();
println!("RESULT: {:?}", result);
if pact_match.as_bool().unwrap() {
expect!(result.iter()).to(be_empty());
} else {
expect!(result.iter()).to_not(be_empty());
}
}
#[test]
fn matches() {
println!("FILE: tests/spec_testcases/v1/request/query/matches.json");
#[allow(unused_mut)]
let mut pact: serde_json::Value = serde_json::from_str(r#"
{
"match": true,
"comment": "Queries are the same",
"expected" : {
"method": "GET",
"path": "/path",
"query": "alligator=Mary&hippo=John",
"headers": {}
},
"actual": {
"method": "GET",
"path": "/path",
"query": "alligator=Mary&hippo=John",
"headers": {}
}
}
"#).unwrap();
let interaction_json = serde_json::json!({"type": "Synchronous/HTTP", "request": pact.get("expected").unwrap()});
let expected = http_interaction_from_json("tests/spec_testcases/v1/request/query/matches.json", &interaction_json, &PactSpecification::V1).unwrap();
println!("EXPECTED: {:?}", expected);
println!("BODY: {}", expected.contents().str_value());
let interaction_json = serde_json::json!({"type": "Synchronous/HTTP", "request": pact.get("actual").unwrap()});
let actual = http_interaction_from_json("tests/spec_testcases/v1/request/query/matches.json", &interaction_json, &PactSpecification::V1).unwrap();
println!("ACTUAL: {:?}", actual);
println!("BODY: {}", actual.contents().str_value());
let pact_match = pact.get("match").unwrap();
let result = match_interaction_request(expected, actual, &PactSpecification::V1).unwrap().mismatches();
println!("RESULT: {:?}", result);
if pact_match.as_bool().unwrap() {
expect!(result.iter()).to(be_empty());
} else {
expect!(result.iter()).to_not(be_empty());
}
}
| 41.355263 | 183 | 0.617245 |
387f62dde2e4355570cea509ab51af35e9a5d77a | 5,075 | use crate::format::problem::*;
use crate::format::solution::*;
use crate::helpers::*;
#[test]
fn can_use_strict_and_any_relation_for_one_vehicle() {
let problem = Problem {
plan: Plan {
jobs: vec![
create_delivery_job("job1", vec![1., 0.]),
create_delivery_job("job2", vec![2., 0.]),
create_delivery_job("job3", vec![3., 0.]),
create_delivery_job("job4", vec![4., 0.]),
create_delivery_job("job5", vec![5., 0.]),
create_delivery_job("job6", vec![6., 0.]),
create_delivery_job("job7", vec![7., 0.]),
],
relations: Some(vec![
Relation {
type_field: RelationType::Strict,
jobs: to_strings(vec!["departure", "job4", "job2", "job6"]),
vehicle_id: "my_vehicle_1".to_string(),
shift_index: None,
},
Relation {
type_field: RelationType::Any,
jobs: to_strings(vec!["job1", "job3"]),
vehicle_id: "my_vehicle_1".to_string(),
shift_index: None,
},
]),
..create_empty_plan()
},
fleet: Fleet { vehicles: vec![create_default_vehicle_type()], profiles: create_default_matrix_profiles() },
..create_empty_problem()
};
let matrix = create_matrix_from_problem(&problem);
let solution = solve_with_metaheuristic(problem, Some(vec![matrix]));
assert_eq!(
solution,
Solution {
statistic: Statistic {
cost: 53.,
distance: 18,
duration: 25,
times: Timing { driving: 18, serving: 7, ..Timing::default() },
},
tours: vec![Tour {
vehicle_id: "my_vehicle_1".to_string(),
type_id: "my_vehicle".to_string(),
shift_index: 0,
stops: vec![
create_stop_with_activity(
"departure",
"departure",
(0., 0.),
7,
("1970-01-01T00:00:00Z", "1970-01-01T00:00:00Z"),
0
),
create_stop_with_activity(
"job4",
"delivery",
(4., 0.),
6,
("1970-01-01T00:00:04Z", "1970-01-01T00:00:05Z"),
4
),
create_stop_with_activity(
"job2",
"delivery",
(2., 0.),
5,
("1970-01-01T00:00:07Z", "1970-01-01T00:00:08Z"),
6
),
create_stop_with_activity(
"job6",
"delivery",
(6., 0.),
4,
("1970-01-01T00:00:12Z", "1970-01-01T00:00:13Z"),
10
),
create_stop_with_activity(
"job7",
"delivery",
(7., 0.),
3,
("1970-01-01T00:00:14Z", "1970-01-01T00:00:15Z"),
11
),
create_stop_with_activity(
"job5",
"delivery",
(5., 0.),
2,
("1970-01-01T00:00:17Z", "1970-01-01T00:00:18Z"),
13
),
create_stop_with_activity(
"job3",
"delivery",
(3., 0.),
1,
("1970-01-01T00:00:20Z", "1970-01-01T00:00:21Z"),
15
),
create_stop_with_activity(
"job1",
"delivery",
(1., 0.),
0,
("1970-01-01T00:00:23Z", "1970-01-01T00:00:24Z"),
17
),
create_stop_with_activity(
"arrival",
"arrival",
(0., 0.),
0,
("1970-01-01T00:00:25Z", "1970-01-01T00:00:25Z"),
18
)
],
statistic: Statistic {
cost: 53.,
distance: 18,
duration: 25,
times: Timing { driving: 18, serving: 7, ..Timing::default() },
},
}],
..create_empty_solution()
}
);
}
| 36.510791 | 115 | 0.349754 |
625d97336e75bf97a4988ffee216f1b8c93cd8a0 | 2,142 | use crate::errors::*;
use crate::types::*;
use uuid::Uuid;
/// Deletes a supergroup or channel along with all messages in the corresponding chat. This will release the supergroup or channel username and remove all members; requires owner privileges in the supergroup or channel. Chats with more than 1000 members can't be deleted using this method
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct DeleteSupergroup {
#[doc(hidden)]
#[serde(rename(serialize = "@extra", deserialize = "@extra"))]
extra: Option<String>,
#[serde(rename(serialize = "@client_id", deserialize = "@client_id"))]
client_id: Option<i32>,
/// Identifier of the supergroup or channel
supergroup_id: i32,
#[serde(rename(serialize = "@type"))]
td_type: String,
}
impl RObject for DeleteSupergroup {
#[doc(hidden)]
fn extra(&self) -> Option<&str> {
self.extra.as_deref()
}
#[doc(hidden)]
fn client_id(&self) -> Option<i32> {
self.client_id
}
}
impl RFunction for DeleteSupergroup {}
impl DeleteSupergroup {
pub fn from_json<S: AsRef<str>>(json: S) -> RTDResult<Self> {
Ok(serde_json::from_str(json.as_ref())?)
}
pub fn builder() -> RTDDeleteSupergroupBuilder {
let mut inner = DeleteSupergroup::default();
inner.extra = Some(Uuid::new_v4().to_string());
inner.td_type = "deleteSupergroup".to_string();
RTDDeleteSupergroupBuilder { inner }
}
pub fn supergroup_id(&self) -> i32 {
self.supergroup_id
}
}
#[doc(hidden)]
pub struct RTDDeleteSupergroupBuilder {
inner: DeleteSupergroup,
}
impl RTDDeleteSupergroupBuilder {
pub fn build(&self) -> DeleteSupergroup {
self.inner.clone()
}
pub fn supergroup_id(&mut self, supergroup_id: i32) -> &mut Self {
self.inner.supergroup_id = supergroup_id;
self
}
}
impl AsRef<DeleteSupergroup> for DeleteSupergroup {
fn as_ref(&self) -> &DeleteSupergroup {
self
}
}
impl AsRef<DeleteSupergroup> for RTDDeleteSupergroupBuilder {
fn as_ref(&self) -> &DeleteSupergroup {
&self.inner
}
}
| 27.461538 | 288 | 0.661064 |
8703d4132dc3aae523c16aee53d82921ceb21578 | 20,090 | mod binary;
mod boolean;
mod fixed_len_bytes;
mod levels;
mod primitive;
mod record_batch;
mod schema;
mod utf8;
mod utils;
pub mod stream;
use crate::array::*;
use crate::bitmap::Bitmap;
use crate::buffer::{Buffer, MutableBuffer};
use crate::datatypes::*;
use crate::error::{ArrowError, Result};
use crate::io::parquet::read::is_type_nullable;
use crate::io::parquet::write::levels::NestedInfo;
use crate::types::days_ms;
use crate::types::NativeType;
use parquet2::metadata::ColumnDescriptor;
pub use parquet2::{
compression::CompressionCodec,
read::CompressedPage,
schema::types::ParquetType,
write::{DynIter, RowGroupIter},
write::{Version, WriteOptions},
};
use parquet2::{
metadata::SchemaDescriptor, schema::KeyValue, write::write_file as parquet_write_file,
};
pub use record_batch::RowGroupIterator;
use schema::schema_to_metadata_key;
pub use schema::to_parquet_type;
pub(self) fn decimal_length_from_precision(precision: usize) -> usize {
// digits = floor(log_10(2^(8*n - 1) - 1))
// ceil(digits) = log10(2^(8*n - 1) - 1)
// 10^ceil(digits) = 2^(8*n - 1) - 1
// 10^ceil(digits) + 1 = 2^(8*n - 1)
// log2(10^ceil(digits) + 1) = (8*n - 1)
// log2(10^ceil(digits) + 1) + 1 = 8*n
// (log2(10^ceil(a) + 1) + 1) / 8 = n
(((10.0_f64.powi(precision as i32) + 1.0).log2() + 1.0) / 8.0).ceil() as usize
}
/// Creates a parquet [`SchemaDescriptor`] from a [`Schema`].
pub fn to_parquet_schema(schema: &Schema) -> Result<SchemaDescriptor> {
let parquet_types = schema
.fields()
.iter()
.map(to_parquet_type)
.collect::<Result<Vec<_>>>()?;
Ok(SchemaDescriptor::new("root".to_string(), parquet_types))
}
/// Writes
pub fn write_file<'a, W, I>(
writer: &mut W,
row_groups: I,
schema: &Schema,
parquet_schema: SchemaDescriptor,
options: WriteOptions,
key_value_metadata: Option<Vec<KeyValue>>,
) -> Result<()>
where
W: std::io::Write + std::io::Seek,
I: Iterator<Item = Result<RowGroupIter<'a, ArrowError>>>,
{
let key_value_metadata = key_value_metadata
.map(|mut x| {
x.push(schema_to_metadata_key(schema));
x
})
.or_else(|| Some(vec![schema_to_metadata_key(schema)]));
let created_by = Some("Arrow2 - Native Rust implementation of Arrow".to_string());
Ok(parquet_write_file(
writer,
row_groups,
parquet_schema,
options,
created_by,
key_value_metadata,
)?)
}
pub fn array_to_page(
array: &dyn Array,
descriptor: ColumnDescriptor,
options: WriteOptions,
) -> Result<CompressedPage> {
// using plain encoding format
match array.data_type() {
DataType::Boolean => {
boolean::array_to_page(array.as_any().downcast_ref().unwrap(), options, descriptor)
}
// casts below MUST match the casts done at the metadata (field -> parquet type).
DataType::UInt8 => primitive::array_to_page::<u8, i32>(
array.as_any().downcast_ref().unwrap(),
options,
descriptor,
),
DataType::UInt16 => primitive::array_to_page::<u16, i32>(
array.as_any().downcast_ref().unwrap(),
options,
descriptor,
),
DataType::UInt32 => primitive::array_to_page::<u32, i32>(
array.as_any().downcast_ref().unwrap(),
options,
descriptor,
),
DataType::UInt64 => primitive::array_to_page::<u64, i64>(
array.as_any().downcast_ref().unwrap(),
options,
descriptor,
),
DataType::Int8 => primitive::array_to_page::<i8, i32>(
array.as_any().downcast_ref().unwrap(),
options,
descriptor,
),
DataType::Int16 => primitive::array_to_page::<i16, i32>(
array.as_any().downcast_ref().unwrap(),
options,
descriptor,
),
DataType::Int32 | DataType::Date32 | DataType::Time32(_) => {
primitive::array_to_page::<i32, i32>(
array.as_any().downcast_ref().unwrap(),
options,
descriptor,
)
}
DataType::Int64
| DataType::Date64
| DataType::Time64(_)
| DataType::Timestamp(_, _)
| DataType::Duration(_) => primitive::array_to_page::<i64, i64>(
array.as_any().downcast_ref().unwrap(),
options,
descriptor,
),
DataType::Float32 => primitive::array_to_page::<f32, f32>(
array.as_any().downcast_ref().unwrap(),
options,
descriptor,
),
DataType::Float64 => primitive::array_to_page::<f64, f64>(
array.as_any().downcast_ref().unwrap(),
options,
descriptor,
),
DataType::Utf8 => {
utf8::array_to_page::<i32>(array.as_any().downcast_ref().unwrap(), options, descriptor)
}
DataType::LargeUtf8 => {
utf8::array_to_page::<i64>(array.as_any().downcast_ref().unwrap(), options, descriptor)
}
DataType::Binary => binary::array_to_page::<i32>(
array.as_any().downcast_ref().unwrap(),
options,
descriptor,
),
DataType::LargeBinary => binary::array_to_page::<i64>(
array.as_any().downcast_ref().unwrap(),
options,
descriptor,
),
DataType::Null => {
let array = Int32Array::new_null(DataType::Int32, array.len());
primitive::array_to_page::<i32, i32>(&array, options, descriptor)
}
DataType::Interval(IntervalUnit::YearMonth) => {
let array = array
.as_any()
.downcast_ref::<PrimitiveArray<i32>>()
.unwrap();
let mut values = MutableBuffer::<u8>::with_capacity(12 * array.len());
array.values().iter().for_each(|x| {
let bytes = &x.to_le_bytes();
values.extend_from_slice(bytes);
values.extend_constant(8, 0);
});
let array = FixedSizeBinaryArray::from_data(
DataType::FixedSizeBinary(12),
values.into(),
array.validity().clone(),
);
fixed_len_bytes::array_to_page(&array, options, descriptor)
}
DataType::Interval(IntervalUnit::DayTime) => {
let array = array
.as_any()
.downcast_ref::<PrimitiveArray<days_ms>>()
.unwrap();
let mut values = MutableBuffer::<u8>::with_capacity(12 * array.len());
array.values().iter().for_each(|x| {
let bytes = &x.to_le_bytes();
values.extend_constant(4, 0); // months
values.extend_from_slice(bytes); // days and seconds
});
let array = FixedSizeBinaryArray::from_data(
DataType::FixedSizeBinary(12),
values.into(),
array.validity().clone(),
);
fixed_len_bytes::array_to_page(&array, options, descriptor)
}
DataType::FixedSizeBinary(_) => fixed_len_bytes::array_to_page(
array.as_any().downcast_ref().unwrap(),
options,
descriptor,
),
DataType::Decimal(precision, _) => {
let precision = *precision;
if precision <= 9 {
let array = array
.as_any()
.downcast_ref::<PrimitiveArray<i128>>()
.unwrap();
let values = array.values().iter().map(|x| *x as i32);
let values = Buffer::from_trusted_len_iter(values);
let array = PrimitiveArray::<i32>::from_data(
DataType::Int32,
values,
array.validity().clone(),
);
primitive::array_to_page::<i32, i32>(&array, options, descriptor)
} else if precision <= 18 {
let array = array
.as_any()
.downcast_ref::<PrimitiveArray<i128>>()
.unwrap();
let values = array.values().iter().map(|x| *x as i64);
let values = Buffer::from_trusted_len_iter(values);
let array = PrimitiveArray::<i64>::from_data(
DataType::Int64,
values,
array.validity().clone(),
);
primitive::array_to_page::<i64, i64>(&array, options, descriptor)
} else {
let array = array
.as_any()
.downcast_ref::<PrimitiveArray<i128>>()
.unwrap();
let size = decimal_length_from_precision(precision);
let mut values = MutableBuffer::<u8>::new(); // todo: this can be estimated
array.values().iter().for_each(|x| {
let bytes = &x.to_be_bytes()[16 - size..];
values.extend_from_slice(bytes)
});
let array = FixedSizeBinaryArray::from_data(
DataType::FixedSizeBinary(size as i32),
values.into(),
array.validity().clone(),
);
fixed_len_bytes::array_to_page(&array, options, descriptor)
}
}
DataType::FixedSizeList(_, _) | DataType::List(_) | DataType::LargeList(_) => {
nested_array_to_page(array, descriptor, options)
}
other => Err(ArrowError::NotYetImplemented(format!(
"Writing parquet V1 pages for data type {:?}",
other
))),
}
}
macro_rules! dyn_nested_prim {
($from:ty, $to:ty, $offset:ty, $values:expr, $nested:expr,$descriptor:expr, $options:expr) => {{
let values = $values.as_any().downcast_ref().unwrap();
primitive::nested_array_to_page::<$from, $to, $offset>(
values,
$options,
$descriptor,
$nested,
)
}};
}
fn list_array_to_page<O: Offset>(
offsets: &[O],
validity: &Option<Bitmap>,
values: &dyn Array,
descriptor: ColumnDescriptor,
options: WriteOptions,
) -> Result<CompressedPage> {
use DataType::*;
let is_optional = is_type_nullable(descriptor.type_());
let nested = NestedInfo::new(offsets, validity, is_optional);
match values.data_type() {
Boolean => {
let values = values.as_any().downcast_ref().unwrap();
boolean::nested_array_to_page::<O>(values, options, descriptor, nested)
}
UInt8 => dyn_nested_prim!(u8, i32, O, values, nested, descriptor, options),
UInt16 => dyn_nested_prim!(u16, i32, O, values, nested, descriptor, options),
UInt32 => dyn_nested_prim!(u32, i32, O, values, nested, descriptor, options),
UInt64 => dyn_nested_prim!(u64, i64, O, values, nested, descriptor, options),
Int8 => dyn_nested_prim!(i8, i32, O, values, nested, descriptor, options),
Int16 => dyn_nested_prim!(i16, i32, O, values, nested, descriptor, options),
Int32 | Date32 | Time32(_) => {
dyn_nested_prim!(i32, i32, O, values, nested, descriptor, options)
}
Int64 | Date64 | Time64(_) | Timestamp(_, _) | Duration(_) => {
dyn_nested_prim!(i64, i64, O, values, nested, descriptor, options)
}
Float32 => dyn_nested_prim!(f32, f32, O, values, nested, descriptor, options),
Float64 => dyn_nested_prim!(f64, f64, O, values, nested, descriptor, options),
Utf8 => {
let values = values.as_any().downcast_ref().unwrap();
let is_optional = is_type_nullable(descriptor.type_());
utf8::nested_array_to_page::<i32, O>(
values,
options,
descriptor,
NestedInfo::new(offsets, validity, is_optional),
)
}
LargeUtf8 => {
let values = values.as_any().downcast_ref().unwrap();
let is_optional = is_type_nullable(descriptor.type_());
utf8::nested_array_to_page::<i64, O>(
values,
options,
descriptor,
NestedInfo::new(offsets, validity, is_optional),
)
}
Binary => {
let values = values.as_any().downcast_ref().unwrap();
let is_optional = is_type_nullable(descriptor.type_());
binary::nested_array_to_page::<i32, O>(
values,
options,
descriptor,
NestedInfo::new(offsets, validity, is_optional),
)
}
LargeBinary => {
let values = values.as_any().downcast_ref().unwrap();
let is_optional = is_type_nullable(descriptor.type_());
binary::nested_array_to_page::<i64, O>(
values,
options,
descriptor,
NestedInfo::new(offsets, validity, is_optional),
)
}
_ => todo!(),
}
}
fn nested_array_to_page(
array: &dyn Array,
descriptor: ColumnDescriptor,
options: WriteOptions,
) -> Result<CompressedPage> {
match array.data_type() {
DataType::List(_) => {
let array = array.as_any().downcast_ref::<ListArray<i32>>().unwrap();
list_array_to_page(
array.offsets(),
array.validity(),
array.values().as_ref(),
descriptor,
options,
)
}
DataType::LargeList(_) => {
let array = array.as_any().downcast_ref::<ListArray<i64>>().unwrap();
list_array_to_page(
array.offsets(),
array.validity(),
array.values().as_ref(),
descriptor,
options,
)
}
DataType::FixedSizeList(_, size) => {
let array = array.as_any().downcast_ref::<FixedSizeListArray>().unwrap();
let offsets = (0..array.len())
.map(|x| size * x as i32)
.collect::<Vec<_>>();
list_array_to_page(
&offsets,
array.validity(),
array.values().as_ref(),
descriptor,
options,
)
}
_ => todo!(),
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::error::Result;
use std::io::Cursor;
use super::super::tests::*;
fn round_trip(
column: usize,
nullable: bool,
nested: bool,
version: Version,
compression: CompressionCodec,
) -> Result<()> {
let (array, statistics) = if nested {
(
pyarrow_nested_nullable(column),
pyarrow_nested_nullable_statistics(column),
)
} else if nullable {
(
pyarrow_nullable(column),
pyarrow_nullable_statistics(column),
)
} else {
(
pyarrow_required(column),
pyarrow_required_statistics(column),
)
};
let field = Field::new("a1", array.data_type().clone(), nullable);
let schema = Schema::new(vec![field]);
let options = WriteOptions {
write_statistics: true,
compression,
version,
};
let parquet_schema = to_parquet_schema(&schema)?;
// one row group
// one column chunk
// one page
let row_groups =
std::iter::once(Result::Ok(DynIter::new(std::iter::once(Ok(DynIter::new(
std::iter::once(array.as_ref())
.zip(parquet_schema.columns().to_vec().into_iter())
.map(|(array, descriptor)| array_to_page(array, descriptor, options)),
))))));
let mut writer = Cursor::new(vec![]);
write_file(
&mut writer,
row_groups,
&schema,
parquet_schema,
options,
None,
)?;
let data = writer.into_inner();
let (result, stats) = read_column(&mut Cursor::new(data), 0, 0)?;
assert_eq!(array.as_ref(), result.as_ref());
assert_eq!(statistics.as_ref(), stats.unwrap().as_ref());
Ok(())
}
#[test]
fn test_int64_optional_v1() -> Result<()> {
round_trip(0, true, false, Version::V1, CompressionCodec::Uncompressed)
}
#[test]
fn test_int64_required_v1() -> Result<()> {
round_trip(0, false, false, Version::V1, CompressionCodec::Uncompressed)
}
#[test]
fn test_int64_optional_v2() -> Result<()> {
round_trip(0, true, false, Version::V2, CompressionCodec::Uncompressed)
}
#[test]
fn test_int64_optional_v2_compressed() -> Result<()> {
round_trip(0, true, false, Version::V2, CompressionCodec::Snappy)
}
#[test]
fn test_utf8_optional_v1() -> Result<()> {
round_trip(2, true, false, Version::V1, CompressionCodec::Uncompressed)
}
#[test]
fn test_utf8_required_v1() -> Result<()> {
round_trip(2, false, false, Version::V1, CompressionCodec::Uncompressed)
}
#[test]
fn test_utf8_optional_v2() -> Result<()> {
round_trip(2, true, false, Version::V2, CompressionCodec::Uncompressed)
}
#[test]
fn test_utf8_required_v2() -> Result<()> {
round_trip(2, false, false, Version::V2, CompressionCodec::Uncompressed)
}
#[test]
fn test_utf8_optional_v2_compressed() -> Result<()> {
round_trip(2, true, false, Version::V2, CompressionCodec::Snappy)
}
#[test]
fn test_utf8_required_v2_compressed() -> Result<()> {
round_trip(2, false, false, Version::V2, CompressionCodec::Snappy)
}
#[test]
fn test_bool_optional_v1() -> Result<()> {
round_trip(3, true, false, Version::V1, CompressionCodec::Uncompressed)
}
#[test]
fn test_bool_required_v1() -> Result<()> {
round_trip(3, false, false, Version::V1, CompressionCodec::Uncompressed)
}
#[test]
fn test_bool_optional_v2_uncompressed() -> Result<()> {
round_trip(3, true, false, Version::V2, CompressionCodec::Uncompressed)
}
#[test]
fn test_bool_required_v2_uncompressed() -> Result<()> {
round_trip(3, false, false, Version::V2, CompressionCodec::Uncompressed)
}
#[test]
fn test_bool_required_v2_compressed() -> Result<()> {
round_trip(3, false, false, Version::V2, CompressionCodec::Snappy)
}
#[test]
fn test_list_int64_optional_v2() -> Result<()> {
round_trip(0, true, true, Version::V2, CompressionCodec::Uncompressed)
}
#[test]
fn test_list_int64_optional_v1() -> Result<()> {
round_trip(0, true, true, Version::V1, CompressionCodec::Uncompressed)
}
#[test]
fn test_list_bool_optional_v2() -> Result<()> {
round_trip(4, true, true, Version::V2, CompressionCodec::Uncompressed)
}
#[test]
fn test_list_bool_optional_v1() -> Result<()> {
round_trip(4, true, true, Version::V1, CompressionCodec::Uncompressed)
}
#[test]
fn test_list_utf8_optional_v2() -> Result<()> {
round_trip(5, true, true, Version::V2, CompressionCodec::Uncompressed)
}
#[test]
fn test_list_utf8_optional_v1() -> Result<()> {
round_trip(5, true, true, Version::V1, CompressionCodec::Uncompressed)
}
#[test]
fn test_list_large_binary_optional_v2() -> Result<()> {
round_trip(6, true, true, Version::V2, CompressionCodec::Uncompressed)
}
#[test]
fn test_list_large_binary_optional_v1() -> Result<()> {
round_trip(6, true, true, Version::V1, CompressionCodec::Uncompressed)
}
}
| 33.427621 | 100 | 0.54674 |
5b216f0aea2b35a74656d1c940c21931342d8ddc | 1,904 | use std::{env, io, path::PathBuf, str};
use which::{self, which_in};
use crate::utils::process::{command_output, command_spawn_wait};
const INSTALL_DIRS: &[&str] = &["/usr/local", "/home/linuxbrew/.linuxbrew", "~/.linuxbrew"];
pub fn has_brew() -> bool {
match brew_exe() {
Some(exe) => command_output(exe, &["--version"]).is_ok(),
None => false,
}
}
pub fn brew<S>(args: &[S]) -> io::Result<()>
where
S: AsRef<str>,
{
match brew_exe() {
Some(exe) => command_spawn_wait(exe, args).map(|_| ()),
None => Err(io::Error::new(io::ErrorKind::NotFound, "brew")),
}
}
pub fn brew_output<S>(args: &[S]) -> io::Result<String>
where
S: AsRef<str>,
{
match brew_exe() {
Some(exe) => {
let output = command_output(exe, args)?;
Ok(format!(
"{}\n{}",
String::from_utf8_lossy(&output.stdout).trim(),
String::from_utf8_lossy(&output.stderr).trim(),
))
}
None => Err(io::Error::new(io::ErrorKind::NotFound, "brew")),
}
}
pub fn brew_prefix() -> Option<PathBuf> {
let home_dir = dirs::home_dir().expect("brew: no $HOME");
for prefix in INSTALL_DIRS {
let dir = PathBuf::from(if prefix.starts_with("~/") {
prefix.replace("~", &home_dir.to_string_lossy())
} else {
prefix.to_owned().to_string()
});
if which_in(
"brew",
Some(dir.join("bin")),
env::current_dir().expect("brew: no $PWD"),
)
.is_ok()
{
return Some(dir);
}
}
None
}
fn brew_exe() -> Option<PathBuf> {
match brew_prefix() {
Some(p) => Some(p.join("bin").join("brew")),
None => None,
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn brew_exe_does_not_panic() {
brew_exe();
}
}
| 23.8 | 92 | 0.515231 |
d78d65574475df088dab76778da9e4e5eae6bc87 | 1,438 | use std::io;
use unicode_width::UnicodeWidthStr;
pub struct AsciiWriter<T: io::Write> {
writer: T,
}
impl<T: io::Write> AsciiWriter<T> {
pub fn new(writer: T) -> Self {
AsciiWriter { writer }
}
}
impl<T: io::Write> crate::Write for AsciiWriter<T> {
fn write(&mut self, table: crate::Table) -> crate::Result<()> {
let column_widths = table.column_widths();
let mut border = String::new();
for width in column_widths.iter() {
border += &format!("+-{}-", "-".repeat(width.clone()));
}
border += "+\n";
self.writer.write(border.as_bytes())?;
if let Some(headers) = table.headers {
for (j, value) in headers.iter().enumerate() {
let spaces = " ".repeat(column_widths[j] - UnicodeWidthStr::width(value as &str));
let cell = format!("| {}{} ", value, spaces);
self.writer.write(cell.as_bytes())?;
}
self.writer.write(b"|\n")?;
self.writer.write(border.as_bytes())?;
}
for row in table.rows.iter() {
for (j, value) in row.iter().enumerate() {
let spaces = " ".repeat(column_widths[j] - UnicodeWidthStr::width(value as &str));
let cell = format!("| {}{} ", value, spaces);
self.writer.write(cell.as_bytes())?;
}
self.writer.write(b"|\n")?;
}
self.writer.write(border.as_bytes())?;
Ok(())
}
fn flush(&mut self) -> crate::Result<()> {
self.writer.flush()?;
Ok(())
}
}
| 25.678571 | 90 | 0.570236 |
87d35cbf73c54fc9a378545f25e54c3ca0873476 | 9,080 | use crate::{error::{Error,
Result},
protocol::{self,
ShutdownMethod},
service::Service};
use core::{os::{process::{handle_from_pid,
windows_child::{ExitStatus,
Handle}},
users::get_current_username},
util};
use std::{collections::HashMap,
env,
io,
mem,
time::{Duration,
Instant}};
use winapi::{shared::{minwindef::{DWORD,
LPDWORD,
MAX_PATH},
winerror::WAIT_TIMEOUT},
um::{handleapi::{self,
INVALID_HANDLE_VALUE},
processthreadsapi,
synchapi,
tlhelp32::{self,
LPPROCESSENTRY32W,
PROCESSENTRY32W,
TH32CS_SNAPPROCESS},
winbase::{INFINITE,
WAIT_OBJECT_0},
wincon}};
const PROCESS_ACTIVE: u32 = 259;
type ProcessTable = HashMap<DWORD, Vec<DWORD>>;
pub struct Process {
handle: Handle,
last_status: Option<ExitStatus>,
}
impl Process {
fn new(handle: Handle) -> Self {
Process { handle,
last_status: None }
}
pub fn id(&self) -> u32 { unsafe { processthreadsapi::GetProcessId(self.handle.raw()) as u32 } }
/// Attempt to gracefully terminate a process and then forcefully kill it after
/// 8 seconds if it has not terminated.
pub fn kill(&mut self) -> ShutdownMethod {
if self.status().is_some() {
return ShutdownMethod::AlreadyExited;
}
let ret = unsafe { wincon::GenerateConsoleCtrlEvent(1, self.id()) };
if ret == 0 {
debug!("Failed to send ctrl-break to pid {}: {}",
self.id(),
io::Error::last_os_error());
}
let shutdown_timeout = Duration::from_secs(8);
let start_time = Instant::now();
loop {
if ret == 0 || start_time.elapsed() > shutdown_timeout {
let proc_table = build_proc_table();
terminate_process_descendants(&proc_table, self.id());
return ShutdownMethod::Killed;
}
if self.status().is_some() {
return ShutdownMethod::GracefulTermination;
}
}
}
pub fn wait(&mut self) -> io::Result<ExitStatus> {
unsafe {
let res = synchapi::WaitForSingleObject(self.handle.raw(), INFINITE);
if res != WAIT_OBJECT_0 {
return Err(io::Error::last_os_error());
}
let mut status = 0;
cvt(processthreadsapi::GetExitCodeProcess(self.handle.raw(), &mut status))?;
Ok(ExitStatus::from(status))
}
}
pub fn try_wait(&mut self) -> io::Result<Option<ExitStatus>> {
unsafe {
match synchapi::WaitForSingleObject(self.handle.raw(), 0) {
WAIT_OBJECT_0 => {}
WAIT_TIMEOUT => return Ok(None),
_ => return Err(io::Error::last_os_error()),
}
let mut status = 0;
cvt(processthreadsapi::GetExitCodeProcess(self.handle.raw(), &mut status))?;
Ok(Some(ExitStatus::from(status)))
}
}
fn status(&mut self) -> Option<ExitStatus> {
if self.last_status.is_some() {
return self.last_status;
}
match exit_code(&self.handle) {
Some(PROCESS_ACTIVE) => None,
Some(code) => {
self.last_status = Some(ExitStatus::from(code));
self.last_status
}
None => None,
}
}
}
pub fn run(msg: protocol::Spawn) -> Result<Service> {
debug!("launcher is spawning {}", msg.binary);
let ps_cmd = format!("iex $(gc {} | out-string)", &msg.binary);
let password = msg.svc_password.clone();
let user = match msg.svc_user.as_ref() {
Some(u) => {
// In the case where we are spawning on behalf of an older Supervisor
// we will need to revert to older 'get_current_username' behavior. When
// running as the Local System account, the former behavior would return
// the host name followed by a dollar sign. The new behavior returns
// 'system'. Both the supervisor and the launcher behavior must match.
// Otherwise if we are running under system, we will interpret the user
// to spawn as different from ourselves and thus attempt to logon as ourselves
// which will fail since you cannot simply logon as system. One day we can
// remove this when we are confident everyone is on a recent supervisor
// and launcher.
let mut username = u.to_string();
if get_current_username()? == Some("system".to_string()) {
if let Ok(cn) = env::var("COMPUTERNAME") {
if u == &(cn.to_lowercase() + "$") {
username = "system".to_string();
}
}
}
username
}
None => {
return Err(Error::UserNotFound(String::from("")));
}
};
let new_env = msg.env.clone().into_iter().collect();
match util::spawn_pwsh(&ps_cmd, &new_env, &user, password) {
Ok(child) => {
let process = Process::new(child.handle);
Ok(Service::new(msg, process, child.stdout, child.stderr))
}
Err(_) => Err(Error::Spawn(io::Error::last_os_error())),
}
}
fn build_proc_table() -> ProcessTable {
let processes_snap_handle =
unsafe { tlhelp32::CreateToolhelp32Snapshot(TH32CS_SNAPPROCESS, 0) };
if processes_snap_handle == INVALID_HANDLE_VALUE {
error!("Failed to call CreateToolhelp32Snapshot: {}",
io::Error::last_os_error());
return ProcessTable::new();
}
let mut table = ProcessTable::new();
let mut process_entry = PROCESSENTRY32W { dwSize: mem::size_of::<PROCESSENTRY32W>()
as u32,
cntUsage: 0,
th32ProcessID: 0,
th32DefaultHeapID: 0,
th32ModuleID: 0,
cntThreads: 0,
th32ParentProcessID: 0,
pcPriClassBase: 0,
dwFlags: 0,
szExeFile: [0; MAX_PATH], };
// Get the first process from the snapshot.
match unsafe {
tlhelp32::Process32FirstW(processes_snap_handle,
&mut process_entry as LPPROCESSENTRY32W)
} {
1 => {
// First process worked, loop to find the process with the correct name.
let mut process_success: i32 = 1;
// Loop through all processes until we find one where `szExeFile` == `name`.
while process_success == 1 {
let children = table.entry(process_entry.th32ParentProcessID)
.or_insert_with(Vec::new);
(*children).push(process_entry.th32ProcessID);
process_success =
unsafe { tlhelp32::Process32NextW(processes_snap_handle, &mut process_entry) };
}
unsafe { handleapi::CloseHandle(processes_snap_handle) };
}
_ => unsafe {
handleapi::CloseHandle(processes_snap_handle);
},
}
table
}
fn cvt(i: i32) -> io::Result<i32> {
if i == 0 {
Err(io::Error::last_os_error())
} else {
Ok(i)
}
}
fn exit_code(handle: &Handle) -> Option<u32> {
let mut exit_code: u32 = 0;
unsafe {
let ret = processthreadsapi::GetExitCodeProcess(handle.raw(), &mut exit_code as LPDWORD);
if ret == 0 {
error!("Failed to retrieve Exit Code: {}",
io::Error::last_os_error());
return None;
}
}
Some(exit_code)
}
fn terminate_process_descendants(table: &ProcessTable, pid: DWORD) {
if let Some(children) = table.get(&pid) {
for child in children {
terminate_process_descendants(table, *child);
}
}
unsafe {
if let Some(h) = handle_from_pid(pid) {
if processthreadsapi::TerminateProcess(h, 1) == 0 {
error!("Failed to call TerminateProcess on pid {}: {}",
pid,
io::Error::last_os_error());
}
}
}
}
| 37.366255 | 100 | 0.49978 |
e2d90b7191fe104b8ee5383a429eb3ae5729cb77 | 1,307 | mod position;
mod vent;
#[cfg(test)]
mod tests;
use std::collections::HashMap;
use position::Position;
use vent::Vent;
type Input = Vec<Vent>;
pub fn parse_input(text: &str) -> Input {
text.lines().map(str::parse).map(Result::unwrap).collect()
}
pub fn solve_part1(input: Input) -> usize {
let mut position_count = HashMap::<Position, isize>::new();
input
.iter()
.filter(|vent| !vent.is_diag())
.flat_map(Vent::positions)
.fold(&mut position_count, |position_count, pos| {
if let Some(count) = position_count.get_mut(&pos) {
*count += 1;
} else {
position_count.insert(pos, 1);
}
position_count
});
position_count.into_values().filter(|&v| v > 1).count()
}
pub fn solve_part2(input: Input) -> usize {
let mut position_count = HashMap::<Position, isize>::new();
input
.iter()
.flat_map(Vent::positions)
.fold(&mut position_count, |position_count, pos| {
if let Some(count) = position_count.get_mut(&pos) {
*count += 1;
} else {
position_count.insert(pos, 1);
}
position_count
});
position_count.into_values().filter(|&v| v > 1).count()
}
| 25.134615 | 63 | 0.557001 |
0a363c28e6e31f90b8cc8f5de4c5faeb14f4eb1a | 2,782 | '''
Problem Challenge 1
K Pairs with Largest Sums (Hard)
Given two sorted arrays in descending order, find ‘K’ pairs with the largest sum where each pair consists of numbers from both the arrays.
Example 1:
Input: L1=[9, 8, 2], L2=[6, 3, 1], K=3
Output: [9, 3], [9, 6], [8, 6]
Explanation: These 3 pairs have the largest sum. No other pair has a sum larger than any of these.
Example 2:
Input: L1=[5, 2, 1], L2=[2, -1], K=3
Output: [5, 2], [5, -1], [2, 2]
'''
#mycode
from heapq import *
def find_k_largest_pairs(nums1, nums2, k):
result = []
# TODO: Write your code here
heap = []
for i in range(min(k, len(nums1))):
for j in range(min(k, len(nums2))):
if len(heap) < k:
heappush(heap,(nums1[i]+nums2[j],[nums1[i],nums2[j]]))
else:
if nums1[i]+nums2[j] > heap[0][0]:
heappop(heap)
heappush(heap,(nums1[i]+nums2[j],[nums1[i], nums2[j]]))
while heap:
_, ans = heappop(heap)
result.append(ans)
return result
def main():
print("Pairs with largest sum are: " +
str(find_k_largest_pairs([9, 8, 2], [6, 3, 1], 3)))
main()
#answer
from __future__ import print_function
from heapq import *
def find_k_largest_pairs(nums1, nums2, k):
minHeap = []
for i in range(0, min(k, len(nums1))):
for j in range(min(k, len(nums2))):
if len(minHeap) < k:
heappush(minHeap, (nums1[i] + nums2[j], i, j))
else:
# if the sum of the two numbers from the two arrays is smaller than the smallest(top)
# element of the heap, we can 'break' here. Since the arrays are sorted in the
# descending order, we'll not be able to find a pair with a higher sum moving forward
if nums1[i] + nums2[j] < minHeap[0][0]:
break
else: # we have a pair with a larger sum, remove top and insert this pair in the heap
heappop(minHeap)
heappush(minHeap, (nums1[i] + nums2[j], i, j))
result = []
for (num, i, j) in minHeap:
result.append([nums1[i], nums2[j]])
return result
def main():
print("Pairs with largest sum are: " +
str(find_k_largest_pairs([9, 8, 2], [6, 3, 1], 3)))
main()
'''
Time complexity
Since, at most, we’ll be going through all the elements of both arrays and we will add/remove one element in the heap in each step,
the time complexity of the above algorithm will be O(N*M*logK) where ‘N’ and ‘M’ are the total number of elements in both arrays, respectively.
If we assume that both arrays have at least ‘K’ elements then the time complexity can be simplified to O(K^2logK),
because we are not iterating more than ‘K’ elements in both arrays.
Space complexity
The space complexity will be O(K) because, at any time, our Min Heap will be storing ‘K’ largest pairs.
''' | 27.544554 | 143 | 0.637311 |
01e3628ca47718a5dfcf9c00d2b4bb6bda93189e | 5,403 | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
use aws_auth::Credentials;
use aws_sigv4_poc::{PayloadChecksumKind, SignableBody, SigningSettings, UriEncoding};
use aws_types::region::SigningRegion;
use aws_types::SigningService;
use http::header::HeaderName;
use smithy_http::body::SdkBody;
use std::error::Error;
use std::fmt;
use std::time::SystemTime;
#[derive(Eq, PartialEq, Clone, Copy)]
pub enum SigningAlgorithm {
SigV4,
}
#[derive(Eq, PartialEq, Clone, Copy)]
pub enum HttpSignatureType {
/// A signature for a full http request should be computed, with header updates applied to the signing result.
HttpRequestHeaders,
/* Currently Unsupported
/// A signature for a full http request should be computed, with query param updates applied to the signing result.
///
/// This is typically used for presigned URLs & is currently unsupported.
HttpRequestQueryParams,
*/
}
/// Signing Configuration for an Operation
///
/// Although these fields MAY be customized on a per request basis, they are generally static
/// for a given operation
#[derive(Clone, PartialEq, Eq)]
pub struct OperationSigningConfig {
pub algorithm: SigningAlgorithm,
pub signature_type: HttpSignatureType,
pub signing_options: SigningOptions,
}
impl OperationSigningConfig {
/// Placeholder method to provide a the signing configuration used for most operation
///
/// In the future, we will code-generate a default configuration for each service
pub fn default_config() -> Self {
OperationSigningConfig {
algorithm: SigningAlgorithm::SigV4,
signature_type: HttpSignatureType::HttpRequestHeaders,
signing_options: SigningOptions {
double_uri_encode: true,
content_sha256_header: false,
},
}
}
}
#[derive(Clone, Eq, PartialEq)]
#[non_exhaustive]
pub struct SigningOptions {
pub double_uri_encode: bool,
pub content_sha256_header: bool,
/*
Currently unsupported:
pub normalize_uri_path: bool,
pub omit_session_token: bool,
*/
}
/// Signing Configuration for an individual Request
///
/// These fields may vary on a per-request basis
#[derive(Clone, PartialEq, Eq)]
pub struct RequestConfig<'a> {
pub request_ts: SystemTime,
pub region: &'a SigningRegion,
pub service: &'a SigningService,
}
#[derive(Clone, Default)]
pub struct SigV4Signer {
// In the future, the SigV4Signer will use the CRT signer. This will require constructing
// and holding an instance of the signer, so prevent people from constructing a SigV4Signer without
// going through the constructor.
_private: (),
}
impl fmt::Debug for SigV4Signer {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut formatter = f.debug_struct("SigV4Signer");
formatter.finish()
}
}
pub type SigningError = Box<dyn Error + Send + Sync>;
impl SigV4Signer {
pub fn new() -> Self {
SigV4Signer { _private: () }
}
/// Sign a request using the SigV4 Protocol
///
/// Although the direct signing implementation MAY be used directly. End users will not typically
/// interact with this code. It is generally used via middleware in the request pipeline. See [`SigV4SigningStage`](crate::middleware::SigV4SigningStage).
pub fn sign(
&self,
operation_config: &OperationSigningConfig,
request_config: &RequestConfig<'_>,
credentials: &Credentials,
request: &mut http::Request<SdkBody>,
) -> Result<(), SigningError> {
let mut settings = SigningSettings::default();
settings.uri_encoding = if operation_config.signing_options.double_uri_encode {
UriEncoding::Double
} else {
UriEncoding::Single
};
settings.payload_checksum_kind = if operation_config.signing_options.content_sha256_header {
PayloadChecksumKind::XAmzSha256
} else {
PayloadChecksumKind::NoHeader
};
let sigv4_config = aws_sigv4_poc::Config {
access_key: credentials.access_key_id(),
secret_key: credentials.secret_access_key(),
security_token: credentials.session_token(),
region: request_config.region.as_ref(),
svc: request_config.service.as_ref(),
date: request_config.request_ts,
settings,
};
// A body that is already in memory can be signed directly. A body that is not in memory
// (any sort of streaming body) will be signed via UNSIGNED-PAYLOAD.
// The final enhancement that will come a bit later is writing a `SignableBody::Precomputed`
// into the property bag when we have a sha 256 middleware that can compute a streaming checksum
// for replayable streams but currently even replayable streams will result in `UNSIGNED-PAYLOAD`
let signable_body = request
.body()
.bytes()
.map(SignableBody::Bytes)
.unwrap_or(SignableBody::UnsignedPayload);
for (key, value) in aws_sigv4_poc::sign_core(request, signable_body, &sigv4_config)? {
request
.headers_mut()
.append(HeaderName::from_static(key), value);
}
Ok(())
}
}
| 34.858065 | 158 | 0.671109 |
6147b10c0af2c78a6d67f63fd54fb907f86b2632 | 9,101 | // Copyright © 2021 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
use crate::GuestMemoryMmap;
use std::fs::File;
use std::io::{Read, Seek, SeekFrom};
use thiserror::Error;
use vm_memory::{ByteValued, Bytes, GuestAddress, GuestMemoryError};
#[derive(Error, Debug)]
pub enum TdvfError {
#[error("Failed read TDVF descriptor: {0}")]
ReadDescriptor(#[source] std::io::Error),
#[error("Failed read TDVF descriptor offset: {0}")]
ReadDescriptorOffset(#[source] std::io::Error),
#[error("Invalid descriptor signature")]
InvalidDescriptorSignature,
#[error("Invalid descriptor size")]
InvalidDescriptorSize,
#[error("Invalid descriptor version")]
InvalidDescriptorVersion,
#[error("Failed to write HOB details to guest memory: {0}")]
GuestMemoryWriteHob(#[source] GuestMemoryError),
}
// TDVF_DESCRIPTOR
#[repr(packed)]
pub struct TdvfDescriptor {
signature: [u8; 4],
length: u32,
version: u32,
num_sections: u32, // NumberOfSectionEntry
}
// TDVF_SECTION
#[repr(packed)]
#[derive(Clone, Copy, Default, Debug)]
pub struct TdvfSection {
pub data_offset: u32,
pub data_size: u32, // RawDataSize
pub address: u64, // MemoryAddress
pub size: u64, // MemoryDataSize
pub r#type: TdvfSectionType,
pub attributes: u32,
}
#[repr(u32)]
#[derive(Clone, Copy, Debug)]
pub enum TdvfSectionType {
Bfv,
Cfv,
TdHob,
TempMem,
Reserved = 0xffffffff,
}
impl Default for TdvfSectionType {
fn default() -> Self {
TdvfSectionType::Reserved
}
}
pub fn parse_tdvf_sections(file: &mut File) -> Result<Vec<TdvfSection>, TdvfError> {
// The 32-bit offset to the TDVF metadata is located 32 bytes from
// the end of the file.
// See "TDVF Metadata Pointer" in "TDX Virtual Firmware Design Guide
file.seek(SeekFrom::End(-0x20))
.map_err(TdvfError::ReadDescriptorOffset)?;
let mut descriptor_offset: [u8; 4] = [0; 4];
file.read_exact(&mut descriptor_offset)
.map_err(TdvfError::ReadDescriptorOffset)?;
let descriptor_offset = u32::from_le_bytes(descriptor_offset) as u64;
file.seek(SeekFrom::Start(descriptor_offset))
.map_err(TdvfError::ReadDescriptor)?;
let mut descriptor: TdvfDescriptor = unsafe { std::mem::zeroed() };
// Safe as we read exactly the size of the descriptor header
file.read_exact(unsafe {
std::slice::from_raw_parts_mut(
&mut descriptor as *mut _ as *mut u8,
std::mem::size_of::<TdvfDescriptor>(),
)
})
.map_err(TdvfError::ReadDescriptor)?;
if &descriptor.signature != b"TDVF" {
return Err(TdvfError::InvalidDescriptorSignature);
}
if descriptor.length as usize
!= std::mem::size_of::<TdvfDescriptor>()
+ std::mem::size_of::<TdvfSection>() * descriptor.num_sections as usize
{
return Err(TdvfError::InvalidDescriptorSize);
}
if descriptor.version != 1 {
return Err(TdvfError::InvalidDescriptorVersion);
}
let mut sections = Vec::new();
sections.resize_with(descriptor.num_sections as usize, TdvfSection::default);
// Safe as we read exactly the advertised sections
file.read_exact(unsafe {
std::slice::from_raw_parts_mut(
sections.as_mut_ptr() as *mut u8,
descriptor.num_sections as usize * std::mem::size_of::<TdvfSection>(),
)
})
.map_err(TdvfError::ReadDescriptor)?;
Ok(sections)
}
#[repr(u16)]
#[derive(Copy, Clone, Debug)]
enum HobType {
Handoff = 0x1,
ResourceDescriptor = 0x3,
Unused = 0xfffe,
EndOfHobList = 0xffff,
}
impl Default for HobType {
fn default() -> Self {
HobType::Unused
}
}
#[repr(C)]
#[derive(Copy, Clone, Default, Debug)]
struct HobHeader {
r#type: HobType,
length: u16,
reserved: u32,
}
unsafe impl ByteValued for HobHeader {}
#[repr(C)]
#[derive(Copy, Clone, Default, Debug)]
struct HobHandoffInfoTable {
header: HobHeader,
version: u32,
efi_memory_top: u64,
efi_memory_bottom: u64,
efi_free_memory_top: u64,
efi_free_memory_bottom: u64,
efi_end_of_hob_list: u64,
}
unsafe impl ByteValued for HobHandoffInfoTable {}
#[repr(C)]
#[derive(Copy, Clone, Default, Debug)]
struct EfiGuid {
data1: u32,
data2: u16,
data3: u16,
data4: [u8; 8],
}
#[repr(C)]
#[derive(Copy, Clone, Default, Debug)]
struct HobResourceDescriptor {
header: HobHeader,
owner: EfiGuid,
resource_type: u32,
resource_attribute: u32,
physical_start: u64,
resource_length: u64,
}
unsafe impl ByteValued for HobResourceDescriptor {}
pub struct TdHob {
start_offset: u64,
current_offset: u64,
}
fn align_hob(v: u64) -> u64 {
(v + 7) / 8 * 8
}
impl TdHob {
fn update_offset<T>(&mut self) {
self.current_offset = align_hob(self.current_offset + std::mem::size_of::<T>() as u64)
}
pub fn start(offset: u64) -> TdHob {
// Leave a gap to place the HandoffTable at the start as it can only be filled in later
let mut hob = TdHob {
start_offset: offset,
current_offset: offset,
};
hob.update_offset::<HobHandoffInfoTable>();
hob
}
pub fn finish(&mut self, mem: &GuestMemoryMmap) -> Result<(), TdvfError> {
// Write end
let end = HobHeader {
r#type: HobType::EndOfHobList,
length: std::mem::size_of::<HobHeader>() as u16,
reserved: 0,
};
info!("Writing HOB end {:x} {:x?}", self.current_offset, end);
mem.write_obj(end, GuestAddress(self.current_offset))
.map_err(TdvfError::GuestMemoryWriteHob)?;
self.update_offset::<HobHeader>();
// Write handoff, delayed as it needs end of HOB list
let efi_end_of_hob_list = self.current_offset;
let handoff = HobHandoffInfoTable {
header: HobHeader {
r#type: HobType::Handoff,
length: std::mem::size_of::<HobHandoffInfoTable>() as u16,
reserved: 0,
},
version: 0x9,
efi_memory_top: 0,
efi_memory_bottom: 0,
efi_free_memory_top: 0,
efi_free_memory_bottom: 0,
efi_end_of_hob_list,
};
info!("Writing HOB start {:x} {:x?}", self.start_offset, handoff);
mem.write_obj(handoff, GuestAddress(self.start_offset))
.map_err(TdvfError::GuestMemoryWriteHob)
}
pub fn add_resource(
&mut self,
mem: &GuestMemoryMmap,
physical_start: u64,
resource_length: u64,
resource_type: u32,
resource_attribute: u32,
) -> Result<(), TdvfError> {
let resource_descriptor = HobResourceDescriptor {
header: HobHeader {
r#type: HobType::ResourceDescriptor,
length: std::mem::size_of::<HobResourceDescriptor>() as u16,
reserved: 0,
},
owner: EfiGuid::default(),
resource_type,
resource_attribute,
physical_start,
resource_length,
};
info!(
"Writing HOB resource {:x} {:x?}",
self.current_offset, resource_descriptor
);
mem.write_obj(resource_descriptor, GuestAddress(self.current_offset))
.map_err(TdvfError::GuestMemoryWriteHob)?;
self.update_offset::<HobResourceDescriptor>();
Ok(())
}
pub fn add_memory_resource(
&mut self,
mem: &GuestMemoryMmap,
physical_start: u64,
resource_length: u64,
ram: bool,
) -> Result<(), TdvfError> {
self.add_resource(
mem,
physical_start,
resource_length,
if ram {
0 /* EFI_RESOURCE_SYSTEM_MEMORY */
} else {
0x5 /*EFI_RESOURCE_MEMORY_RESERVED */
},
/* TODO:
* QEMU currently fills it in like this:
* EFI_RESOURCE_ATTRIBUTE_PRESENT | EFI_RESOURCE_ATTRIBUTE_INITIALIZED | EFI_RESOURCE_ATTRIBUTE_TESTED
* which differs from the spec (due to TDVF implementation issue?)
*/
0x7,
)
}
pub fn add_mmio_resource(
&mut self,
mem: &GuestMemoryMmap,
physical_start: u64,
resource_length: u64,
) -> Result<(), TdvfError> {
self.add_resource(
mem,
physical_start,
resource_length,
0x1, /* EFI_RESOURCE_MEMORY_MAPPED_IO */
/*
* EFI_RESOURCE_ATTRIBUTE_PRESENT | EFI_RESOURCE_ATTRIBUTE_INITIALIZED | EFI_RESOURCE_ATTRIBUTE_UNCACHEABLE
*/
0x403,
)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
#[ignore]
fn test_parse_tdvf_sections() {
let mut f = std::fs::File::open("tdvf.fd").unwrap();
let sections = parse_tdvf_sections(&mut f).unwrap();
for section in sections {
eprintln!("{:x?}", section)
}
}
}
| 28.440625 | 119 | 0.607955 |
0e02928495f060aa008a26219b5c0952788d762d | 14,255 | //! `TcpStream` split support.
//!
//! A `TcpStream` can be split into a `ReadHalf` and a
//! `WriteHalf` with the `TcpStream::split` method. `ReadHalf`
//! implements `AsyncRead` while `WriteHalf` implements `AsyncWrite`.
//!
//! Compared to the generic split of `AsyncRead + AsyncWrite`, this specialized
//! split has no associated overhead and enforces all invariants at the type
//! level.
use crate::future::poll_fn;
use crate::io::{AsyncRead, AsyncWrite, Interest, ReadBuf, Ready};
use crate::net::TcpStream;
use std::io;
use std::net::{Shutdown, SocketAddr};
use std::pin::Pin;
use std::task::{Context, Poll};
cfg_io_util! {
use bytes::BufMut;
}
/// Borrowed read half of a [`TcpStream`], created by [`split`].
///
/// Reading from a `ReadHalf` is usually done using the convenience methods found on the
/// [`AsyncReadExt`] trait.
///
/// [`TcpStream`]: TcpStream
/// [`split`]: TcpStream::split()
/// [`AsyncReadExt`]: trait@crate::io::AsyncReadExt
#[derive(Debug)]
pub struct ReadHalf<'a>(&'a TcpStream);
/// Borrowed write half of a [`TcpStream`], created by [`split`].
///
/// Note that in the [`AsyncWrite`] implementation of this type, [`poll_shutdown`] will
/// shut down the TCP stream in the write direction.
///
/// Writing to an `WriteHalf` is usually done using the convenience methods found
/// on the [`AsyncWriteExt`] trait.
///
/// [`TcpStream`]: TcpStream
/// [`split`]: TcpStream::split()
/// [`AsyncWrite`]: trait@crate::io::AsyncWrite
/// [`poll_shutdown`]: fn@crate::io::AsyncWrite::poll_shutdown
/// [`AsyncWriteExt`]: trait@crate::io::AsyncWriteExt
#[derive(Debug)]
pub struct WriteHalf<'a>(&'a TcpStream);
pub(crate) fn split(stream: &mut TcpStream) -> (ReadHalf<'_>, WriteHalf<'_>) {
(ReadHalf(&*stream), WriteHalf(&*stream))
}
impl ReadHalf<'_> {
/// Attempts to receive data on the socket, without removing that data from
/// the queue, registering the current task for wakeup if data is not yet
/// available.
///
/// Note that on multiple calls to `poll_peek` or `poll_read`, only the
/// `Waker` from the `Context` passed to the most recent call is scheduled
/// to receive a wakeup.
///
/// See the [`TcpStream::poll_peek`] level documentation for more details.
///
/// # Examples
///
/// ```no_run
/// use tokio::io::{self, ReadBuf};
/// use tokio::net::TcpStream;
///
/// use futures::future::poll_fn;
///
/// #[tokio::main]
/// async fn main() -> io::Result<()> {
/// let mut stream = TcpStream::connect("127.0.0.1:8000").await?;
/// let (mut read_half, _) = stream.split();
/// let mut buf = [0; 10];
/// let mut buf = ReadBuf::new(&mut buf);
///
/// poll_fn(|cx| {
/// read_half.poll_peek(cx, &mut buf)
/// }).await?;
///
/// Ok(())
/// }
/// ```
///
/// [`TcpStream::poll_peek`]: TcpStream::poll_peek
pub fn poll_peek(
&mut self,
cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<usize>> {
self.0.poll_peek(cx, buf)
}
/// Receives data on the socket from the remote address to which it is
/// connected, without removing that data from the queue. On success,
/// returns the number of bytes peeked.
///
/// See the [`TcpStream::peek`] level documentation for more details.
///
/// [`TcpStream::peek`]: TcpStream::peek
///
/// # Examples
///
/// ```no_run
/// use tokio::net::TcpStream;
/// use tokio::io::AsyncReadExt;
/// use std::error::Error;
///
/// #[tokio::main]
/// async fn main() -> Result<(), Box<dyn Error>> {
/// // Connect to a peer
/// let mut stream = TcpStream::connect("127.0.0.1:8080").await?;
/// let (mut read_half, _) = stream.split();
///
/// let mut b1 = [0; 10];
/// let mut b2 = [0; 10];
///
/// // Peek at the data
/// let n = read_half.peek(&mut b1).await?;
///
/// // Read the data
/// assert_eq!(n, read_half.read(&mut b2[..n]).await?);
/// assert_eq!(&b1[..n], &b2[..n]);
///
/// Ok(())
/// }
/// ```
///
/// The [`read`] method is defined on the [`AsyncReadExt`] trait.
///
/// [`read`]: fn@crate::io::AsyncReadExt::read
/// [`AsyncReadExt`]: trait@crate::io::AsyncReadExt
pub async fn peek(&mut self, buf: &mut [u8]) -> io::Result<usize> {
let mut buf = ReadBuf::new(buf);
poll_fn(|cx| self.poll_peek(cx, &mut buf)).await
}
/// Waits for any of the requested ready states.
///
/// This function is usually paired with `try_read()` or `try_write()`. It
/// can be used to concurrently read / write to the same socket on a single
/// task without splitting the socket.
///
/// This function is equivalent to [`TcpStream::ready`].
///
/// # Cancel safety
///
/// This method is cancel safe. Once a readiness event occurs, the method
/// will continue to return immediately until the readiness event is
/// consumed by an attempt to read or write that fails with `WouldBlock` or
/// `Poll::Pending`.
pub async fn ready(&self, interest: Interest) -> io::Result<Ready> {
self.0.ready(interest).await
}
/// Waits for the socket to become readable.
///
/// This function is equivalent to `ready(Interest::READABLE)` and is usually
/// paired with `try_read()`.
///
/// This function is also equivalent to [`TcpStream::ready`].
///
/// # Cancel safety
///
/// This method is cancel safe. Once a readiness event occurs, the method
/// will continue to return immediately until the readiness event is
/// consumed by an attempt to read that fails with `WouldBlock` or
/// `Poll::Pending`.
pub async fn readable(&self) -> io::Result<()> {
self.0.readable().await
}
/// Tries to read data from the stream into the provided buffer, returning how
/// many bytes were read.
///
/// Receives any pending data from the socket but does not wait for new data
/// to arrive. On success, returns the number of bytes read. Because
/// `try_read()` is non-blocking, the buffer does not have to be stored by
/// the async task and can exist entirely on the stack.
///
/// Usually, [`readable()`] or [`ready()`] is used with this function.
///
/// [`readable()`]: Self::readable()
/// [`ready()`]: Self::ready()
///
/// # Return
///
/// If data is successfully read, `Ok(n)` is returned, where `n` is the
/// number of bytes read. `Ok(0)` indicates the stream's read half is closed
/// and will no longer yield data. If the stream is not ready to read data
/// `Err(io::ErrorKind::WouldBlock)` is returned.
pub fn try_read(&self, buf: &mut [u8]) -> io::Result<usize> {
self.0.try_read(buf)
}
/// Tries to read data from the stream into the provided buffers, returning
/// how many bytes were read.
///
/// Data is copied to fill each buffer in order, with the final buffer
/// written to possibly being only partially filled. This method behaves
/// equivalently to a single call to [`try_read()`] with concatenated
/// buffers.
///
/// Receives any pending data from the socket but does not wait for new data
/// to arrive. On success, returns the number of bytes read. Because
/// `try_read_vectored()` is non-blocking, the buffer does not have to be
/// stored by the async task and can exist entirely on the stack.
///
/// Usually, [`readable()`] or [`ready()`] is used with this function.
///
/// [`try_read()`]: Self::try_read()
/// [`readable()`]: Self::readable()
/// [`ready()`]: Self::ready()
///
/// # Return
///
/// If data is successfully read, `Ok(n)` is returned, where `n` is the
/// number of bytes read. `Ok(0)` indicates the stream's read half is closed
/// and will no longer yield data. If the stream is not ready to read data
/// `Err(io::ErrorKind::WouldBlock)` is returned.
pub fn try_read_vectored(&self, bufs: &mut [io::IoSliceMut<'_>]) -> io::Result<usize> {
self.0.try_read_vectored(bufs)
}
cfg_io_util! {
/// Tries to read data from the stream into the provided buffer, advancing the
/// buffer's internal cursor, returning how many bytes were read.
///
/// Receives any pending data from the socket but does not wait for new data
/// to arrive. On success, returns the number of bytes read. Because
/// `try_read_buf()` is non-blocking, the buffer does not have to be stored by
/// the async task and can exist entirely on the stack.
///
/// Usually, [`readable()`] or [`ready()`] is used with this function.
///
/// [`readable()`]: Self::readable()
/// [`ready()`]: Self::ready()
///
/// # Return
///
/// If data is successfully read, `Ok(n)` is returned, where `n` is the
/// number of bytes read. `Ok(0)` indicates the stream's read half is closed
/// and will no longer yield data. If the stream is not ready to read data
/// `Err(io::ErrorKind::WouldBlock)` is returned.
pub fn try_read_buf<B: BufMut>(&self, buf: &mut B) -> io::Result<usize> {
self.0.try_read_buf(buf)
}
}
/// Returns the remote address that this stream is connected to.
pub fn peer_addr(&self) -> io::Result<SocketAddr> {
self.0.peer_addr()
}
/// Returns the local address that this stream is bound to.
pub fn local_addr(&self) -> io::Result<SocketAddr> {
self.0.local_addr()
}
}
impl WriteHalf<'_> {
/// Waits for any of the requested ready states.
///
/// This function is usually paired with `try_read()` or `try_write()`. It
/// can be used to concurrently read / write to the same socket on a single
/// task without splitting the socket.
///
/// This function is equivalent to [`TcpStream::ready`].
///
/// # Cancel safety
///
/// This method is cancel safe. Once a readiness event occurs, the method
/// will continue to return immediately until the readiness event is
/// consumed by an attempt to read or write that fails with `WouldBlock` or
/// `Poll::Pending`.
pub async fn ready(&self, interest: Interest) -> io::Result<Ready> {
self.0.ready(interest).await
}
/// Waits for the socket to become writable.
///
/// This function is equivalent to `ready(Interest::WRITABLE)` and is usually
/// paired with `try_write()`.
///
/// # Cancel safety
///
/// This method is cancel safe. Once a readiness event occurs, the method
/// will continue to return immediately until the readiness event is
/// consumed by an attempt to write that fails with `WouldBlock` or
/// `Poll::Pending`.
pub async fn writable(&self) -> io::Result<()> {
self.0.writable().await
}
/// Tries to write a buffer to the stream, returning how many bytes were
/// written.
///
/// The function will attempt to write the entire contents of `buf`, but
/// only part of the buffer may be written.
///
/// This function is usually paired with `writable()`.
///
/// # Return
///
/// If data is successfully written, `Ok(n)` is returned, where `n` is the
/// number of bytes written. If the stream is not ready to write data,
/// `Err(io::ErrorKind::WouldBlock)` is returned.
pub fn try_write(&self, buf: &[u8]) -> io::Result<usize> {
self.0.try_write(buf)
}
/// Tries to write several buffers to the stream, returning how many bytes
/// were written.
///
/// Data is written from each buffer in order, with the final buffer read
/// from possible being only partially consumed. This method behaves
/// equivalently to a single call to [`try_write()`] with concatenated
/// buffers.
///
/// This function is usually paired with `writable()`.
///
/// [`try_write()`]: Self::try_write()
///
/// # Return
///
/// If data is successfully written, `Ok(n)` is returned, where `n` is the
/// number of bytes written. If the stream is not ready to write data,
/// `Err(io::ErrorKind::WouldBlock)` is returned.
pub fn try_write_vectored(&self, bufs: &[io::IoSlice<'_>]) -> io::Result<usize> {
self.0.try_write_vectored(bufs)
}
/// Returns the remote address that this stream is connected to.
pub fn peer_addr(&self) -> io::Result<SocketAddr> {
self.0.peer_addr()
}
/// Returns the local address that this stream is bound to.
pub fn local_addr(&self) -> io::Result<SocketAddr> {
self.0.local_addr()
}
}
impl AsyncRead for ReadHalf<'_> {
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
self.0.poll_read_priv(cx, buf)
}
}
impl AsyncWrite for WriteHalf<'_> {
fn poll_write(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
self.0.poll_write_priv(cx, buf)
}
fn poll_write_vectored(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
bufs: &[io::IoSlice<'_>],
) -> Poll<io::Result<usize>> {
self.0.poll_write_vectored_priv(cx, bufs)
}
fn is_write_vectored(&self) -> bool {
self.0.is_write_vectored()
}
#[inline]
fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<io::Result<()>> {
// tcp flush is a no-op
Poll::Ready(Ok(()))
}
// `poll_shutdown` on a write half shutdowns the stream in the "write" direction.
fn poll_shutdown(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<io::Result<()>> {
self.0.shutdown_std(Shutdown::Write).into()
}
}
impl AsRef<TcpStream> for ReadHalf<'_> {
fn as_ref(&self) -> &TcpStream {
self.0
}
}
impl AsRef<TcpStream> for WriteHalf<'_> {
fn as_ref(&self) -> &TcpStream {
self.0
}
}
| 35.460199 | 91 | 0.595931 |
76bcbc11be3dea43ec4d1e5ff56e22297c85ab54 | 5,101 | mod client;
mod config;
mod exchange;
mod message;
mod queue;
mod restapi;
use env_logger::Builder;
use hyper::service::{make_service_fn, service_fn};
use hyper::Server;
use log::{error, info};
use std::convert::Infallible;
use std::fmt;
use std::io::Write;
use tokio::net::TcpListener;
use tokio::signal;
pub type Result<T> = std::result::Result<T, Error>;
pub type Error = Box<dyn std::error::Error + Send + Sync>;
#[derive(Clone)]
pub struct Context {
pub exchange_manager: exchange::manager::ExchangeManagerSink,
pub queue_manager: queue::manager::QueueManagerSink,
}
#[derive(Debug, PartialEq)]
pub enum ErrorScope {
Connection,
Channel,
}
impl Default for ErrorScope {
fn default() -> Self {
ErrorScope::Connection
}
}
#[derive(Debug, Default)]
pub struct RuntimeError {
pub scope: ErrorScope,
pub channel: metalmq_codec::frame::Channel,
pub code: u16,
pub text: String,
pub class_id: u16,
pub method_id: u16,
}
impl From<RuntimeError> for metalmq_codec::frame::AMQPFrame {
fn from(err: RuntimeError) -> metalmq_codec::frame::AMQPFrame {
match err.scope {
ErrorScope::Connection => {
metalmq_codec::frame::connection_close(err.channel, err.code, &err.text, err.class_id, err.method_id)
}
ErrorScope::Channel => {
metalmq_codec::frame::channel_close(err.channel, err.code, &err.text, err.class_id, err.method_id)
}
}
}
}
impl std::fmt::Display for RuntimeError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{:?}", self)
}
}
impl std::error::Error for RuntimeError {}
impl RuntimeError {
fn to_err<T>(self) -> Result<T> {
Err(Box::new(self))
}
}
#[macro_export]
macro_rules! chk {
($val:expr) => {
match $val {
ok @ Ok(_) => ok,
Err(e) => {
log::error!("Error {:?}", e);
Err(e)
}
}
};
}
#[macro_export]
macro_rules! logerr {
($val:expr) => {
if let Err(e) = $val {
error!("Error {:?}", e);
}
};
}
#[macro_export]
macro_rules! send {
($channel:expr, $message:expr) => {
$channel
.send_timeout($message, tokio::time::Duration::from_secs(1))
.await
};
}
fn setup_logger() {
let mut builder = Builder::from_default_env();
builder
.format_timestamp_millis()
.format(|buf, record| {
let mut lvl = buf.style();
lvl.set_bold(true);
match record.level() {
log::Level::Error => lvl.set_color(env_logger::fmt::Color::Red),
log::Level::Warn => lvl.set_color(env_logger::fmt::Color::Yellow),
log::Level::Info => lvl.set_color(env_logger::fmt::Color::Green),
log::Level::Debug => lvl.set_color(env_logger::fmt::Color::Rgb(160, 160, 160)),
log::Level::Trace => lvl.set_color(env_logger::fmt::Color::Rgb(96, 96, 96)),
};
writeln!(
buf,
"{} - [{:5}] {}:{} - {}",
buf.timestamp_millis(),
lvl.value(record.level()),
record.file().unwrap_or_default(),
record.line().unwrap_or_default(),
record.args()
)
})
.write_style(env_logger::WriteStyle::Always)
.init();
}
async fn start_http(context: Context, url: &str) -> Result<()> {
let http_addr = url.parse()?;
info!("Start HTTP admin API on {}", url);
let make_svc = make_service_fn(move |_conn| {
let context = context.clone();
async move { Ok::<_, Infallible>(service_fn(move |req| restapi::route(req, context.clone()))) }
});
let server = Server::bind(&http_addr).serve(make_svc);
tokio::spawn(async move {
if let Err(e) = server.await {
eprintln!("HTTP error {}", e);
}
});
Ok(())
}
async fn start_amqp(context: Context, url: &str) -> Result<()> {
info!("Start AMQP listening on {}", url);
let listener = TcpListener::bind(url).await?;
loop {
let (socket, _) = listener.accept().await?;
let ctx = Context {
queue_manager: context.queue_manager.clone(),
exchange_manager: context.exchange_manager.clone(),
};
tokio::spawn(async move {
if let Err(e) = client::conn::handle_client(socket, ctx).await {
error!("Error handling client {:?}", e)
}
});
}
}
#[tokio::main]
pub async fn main() -> Result<()> {
setup_logger();
let cli_config = config::cli();
let config = config::parse_config(&cli_config.config_file_path)?;
let context = Context {
exchange_manager: exchange::manager::start(),
queue_manager: queue::manager::start(),
};
start_http(context.clone(), &config.network.http_listen).await?;
start_amqp(context, &config.network.amqp_listen).await?;
signal::ctrl_c().await?;
Ok(())
}
| 25.252475 | 117 | 0.563615 |
01f290248fda4a0142227e2d7ed8803cd2c5674c | 7,361 | use crate::prelude::*;
use algebra::{Field, Group};
use r1cs_core::{ConstraintSystem, SynthesisError};
use std::{borrow::Borrow, fmt::Debug};
pub mod curves;
pub use self::curves::{
short_weierstrass::bls12,
twisted_edwards::{edwards_sw6, jubjub},
};
pub trait GroupGadget<G: Group, ConstraintF: Field>:
Sized
+ ToBytesGadget<ConstraintF>
+ NEqGadget<ConstraintF>
+ EqGadget<ConstraintF>
+ ToBitsGadget<ConstraintF>
+ CondSelectGadget<ConstraintF>
+ AllocGadget<G, ConstraintF>
+ Clone
+ Debug
{
type Value: Debug;
type Variable;
fn get_value(&self) -> Option<Self::Value>;
fn get_variable(&self) -> Self::Variable;
fn zero<CS: ConstraintSystem<ConstraintF>>(cs: CS) -> Result<Self, SynthesisError>;
fn add<CS: ConstraintSystem<ConstraintF>>(
&self,
cs: CS,
other: &Self,
) -> Result<Self, SynthesisError>;
fn sub<CS: ConstraintSystem<ConstraintF>>(
&self,
mut cs: CS,
other: &Self,
) -> Result<Self, SynthesisError> {
let neg_other = other.negate(cs.ns(|| "Negate other"))?;
self.add(cs.ns(|| "Self - other"), &neg_other)
}
fn add_constant<CS: ConstraintSystem<ConstraintF>>(
&self,
cs: CS,
other: &G,
) -> Result<Self, SynthesisError>;
fn sub_constant<CS: ConstraintSystem<ConstraintF>>(
&self,
mut cs: CS,
other: &G,
) -> Result<Self, SynthesisError> {
let neg_other = -(*other);
self.add_constant(cs.ns(|| "Self - other"), &neg_other)
}
fn double_in_place<CS: ConstraintSystem<ConstraintF>>(
&mut self,
cs: CS,
) -> Result<(), SynthesisError>;
fn negate<CS: ConstraintSystem<ConstraintF>>(&self, cs: CS) -> Result<Self, SynthesisError>;
/// Inputs must be specified in *little-endian* form.
/// If the addition law is incomplete for the identity element,
/// `result` must not be the identity element.
fn mul_bits<'a, CS: ConstraintSystem<ConstraintF>>(
&self,
mut cs: CS,
result: &Self,
bits: impl Iterator<Item = &'a Boolean>,
) -> Result<Self, SynthesisError> {
let mut power = self.clone();
let mut result = result.clone();
for (i, bit) in bits.enumerate() {
let new_encoded = result.add(&mut cs.ns(|| format!("Add {}-th power", i)), &power)?;
result = Self::conditionally_select(
&mut cs.ns(|| format!("Select {}", i)),
bit.borrow(),
&new_encoded,
&result,
)?;
power.double_in_place(&mut cs.ns(|| format!("{}-th Doubling", i)))?;
}
Ok(result)
}
fn precomputed_base_scalar_mul<'a, CS, I, B>(
&mut self,
mut cs: CS,
scalar_bits_with_base_powers: I,
) -> Result<(), SynthesisError>
where
CS: ConstraintSystem<ConstraintF>,
I: Iterator<Item = (B, &'a G)>,
B: Borrow<Boolean>,
G: 'a,
{
for (i, (bit, base_power)) in scalar_bits_with_base_powers.enumerate() {
let new_encoded = self.add_constant(
&mut cs.ns(|| format!("Add {}-th base power", i)),
&base_power,
)?;
*self = Self::conditionally_select(
&mut cs.ns(|| format!("Conditional Select {}", i)),
bit.borrow(),
&new_encoded,
&self,
)?;
}
Ok(())
}
fn precomputed_base_3_bit_signed_digit_scalar_mul<'a, CS, I, J, B>(
_: CS,
_: &[B],
_: &[J],
) -> Result<Self, SynthesisError>
where
CS: ConstraintSystem<ConstraintF>,
I: Borrow<[Boolean]>,
J: Borrow<[I]>,
B: Borrow<[G]>,
{
Err(SynthesisError::AssignmentMissing)
}
fn precomputed_base_multiscalar_mul<'a, CS, T, I, B>(
mut cs: CS,
bases: &[B],
scalars: I,
) -> Result<Self, SynthesisError>
where
CS: ConstraintSystem<ConstraintF>,
T: 'a + ToBitsGadget<ConstraintF> + ?Sized,
I: Iterator<Item = &'a T>,
B: Borrow<[G]>,
{
let mut result = Self::zero(&mut cs.ns(|| "Declare Result"))?;
// Compute ∏(h_i^{m_i}) for all i.
for (i, (bits, base_powers)) in scalars.zip(bases).enumerate() {
let base_powers = base_powers.borrow();
let bits = bits.to_bits(&mut cs.ns(|| format!("Convert Scalar {} to bits", i)))?;
result.precomputed_base_scalar_mul(
cs.ns(|| format!("Chunk {}", i)),
bits.iter().zip(base_powers),
)?;
}
Ok(result)
}
fn cost_of_add() -> usize;
fn cost_of_double() -> usize;
}
#[cfg(test)]
mod test {
use algebra::Field;
use r1cs_core::ConstraintSystem;
use crate::{prelude::*, test_constraint_system::TestConstraintSystem};
use algebra::groups::Group;
use rand;
pub(crate) fn group_test<
ConstraintF: Field,
G: Group,
GG: GroupGadget<G, ConstraintF>,
CS: ConstraintSystem<ConstraintF>,
>(
cs: &mut CS,
a: GG,
b: GG,
) {
let zero = GG::zero(cs.ns(|| "Zero")).unwrap();
assert_eq!(zero, zero);
// a == a
assert_eq!(a, a);
// a + 0 = a
assert_eq!(a.add(cs.ns(|| "a_plus_zero"), &zero).unwrap(), a);
// a - 0 = a
assert_eq!(a.sub(cs.ns(|| "a_minus_zero"), &zero).unwrap(), a);
// a - a = 0
assert_eq!(a.sub(cs.ns(|| "a_minus_a"), &a).unwrap(), zero);
// a + b = b + a
let a_b = a.add(cs.ns(|| "a_plus_b"), &b).unwrap();
let b_a = b.add(cs.ns(|| "b_plus_a"), &a).unwrap();
assert_eq!(a_b, b_a);
// (a + b) + a = a + (b + a)
let ab_a = a_b.add(&mut cs.ns(|| "a_b_plus_a"), &a).unwrap();
let a_ba = a.add(&mut cs.ns(|| "a_plus_b_a"), &b_a).unwrap();
assert_eq!(ab_a, a_ba);
// a.double() = a + a
let a_a = a.add(cs.ns(|| "a + a"), &a).unwrap();
let mut a2 = a.clone();
a2.double_in_place(cs.ns(|| "2a")).unwrap();
assert_eq!(a2, a_a);
// b.double() = b + b
let mut b2 = b.clone();
b2.double_in_place(cs.ns(|| "2b")).unwrap();
let b_b = b.add(cs.ns(|| "b + b"), &b).unwrap();
assert_eq!(b2, b_b);
let _ = a.to_bytes(&mut cs.ns(|| "ToBytes")).unwrap();
let _ = a.to_bytes_strict(&mut cs.ns(|| "ToBytes Strict")).unwrap();
let _ = b.to_bytes(&mut cs.ns(|| "b ToBytes")).unwrap();
let _ = b
.to_bytes_strict(&mut cs.ns(|| "b ToBytes Strict"))
.unwrap();
}
#[test]
fn jubjub_group_gadgets_test() {
use crate::groups::jubjub::JubJubGadget;
use algebra::{curves::jubjub::JubJubProjective, fields::jubjub::fq::Fq};
let mut cs = TestConstraintSystem::<Fq>::new();
let a: JubJubProjective = rand::random();
let b: JubJubProjective = rand::random();
let a = JubJubGadget::alloc(&mut cs.ns(|| "generate_a"), || Ok(a)).unwrap();
let b = JubJubGadget::alloc(&mut cs.ns(|| "generate_b"), || Ok(b)).unwrap();
group_test::<_, JubJubProjective, _, _>(&mut cs.ns(|| "GroupTest(a, b)"), a, b);
}
}
| 31.059072 | 96 | 0.532672 |
67b2c61ce02a215029d71ec169f1eb5241398dfd | 549 | extern crate chrono;
extern crate olin;
use self::chrono::TimeZone;
use olin::log;
/// This tests for https://github.com/CommonWA/cwa-spec/blob/master/ns/time.md
pub extern "C" fn test() -> Result<(), i32> {
log::info("running ns::time tests");
let now: i64 = olin::time::ts();
let dt = chrono::Utc.timestamp(now, 0);
log::info(&format!("ts: {}, dt: {}", now, dt.to_rfc3339()));
let now = olin::time::now();
log::info(&format!("time::now(): {}", now.to_rfc3339()));
log::info("ns::time tests passed");
Ok(())
}
| 24.954545 | 78 | 0.593807 |
8ff7c51c8685aa8f496fe1a093467684dbf5d830 | 155 | use proconio::input;
fn main() {
input! {
s: String,
}
let ans = if s == "ABC" { "ARC" } else { "ABC" };
println!("{}", ans);
}
| 12.916667 | 53 | 0.425806 |
29f92a389345fad0cd048f86d4269db99d809f86 | 8,315 | use ic_btc_types_internal::{
BitcoinAdapterRequest, BitcoinAdapterRequestWrapper, BitcoinAdapterResponse,
};
use ic_protobuf::{
bitcoin::v1 as pb_bitcoin,
proxy::{try_from_option_field, ProxyDecodeError},
};
use serde::{Deserialize, Serialize};
use std::{
collections::{BTreeMap, VecDeque},
convert::TryFrom,
};
/// Maximum number of requests to Bitcoin Adapter that can be present in the queue.
const REQUEST_QUEUE_CAPACITY: u32 = 500;
/// Errors that can be returned when handling the `BitcoinState`.
#[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub enum BitcoinStateError {
/// Bitcoin testnet feature not enabled.
TestnetFeatureNotEnabled,
/// No corresponding request found when trying to push a response.
NonMatchingResponse { callback_id: u64 },
/// Enqueueing a request failed due to full queue to the Bitcoin adapter.
QueueFull { capacity: u32 },
}
impl std::error::Error for BitcoinStateError {}
impl std::fmt::Display for BitcoinStateError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
BitcoinStateError::TestnetFeatureNotEnabled => {
write!(f, "Bitcoin testnet feature not enabled.")
}
BitcoinStateError::NonMatchingResponse { callback_id } => {
write!(
f,
"Attempted to push a response for callback id {} without an in-flight corresponding request",
callback_id
)
}
BitcoinStateError::QueueFull { capacity } => {
write!(
f,
"Request can not be enqueued because the queue has reached its capacity of {}.",
capacity
)
}
}
}
}
/// Represents the queues for requests to and responses from the Bitcoin Adapter.
/// See `ic_protobuf::bitcoin::v1` for documentation of the fields.
#[derive(Clone, Debug, PartialEq)]
struct AdapterQueues {
next_callback_id: u64,
requests: BTreeMap<u64, BitcoinAdapterRequest>,
responses: VecDeque<BitcoinAdapterResponse>,
requests_queue_capacity: u32,
in_flight_get_successors_requests_num: u32,
}
/// Represents the bitcoin state of the subnet.
/// See `ic_protobuf::bitcoin::v1` for documentation of the fields.
#[derive(Clone, Debug, PartialEq)]
pub struct BitcoinState {
adapter_queues: AdapterQueues,
}
impl Default for BitcoinState {
fn default() -> Self {
Self::new(REQUEST_QUEUE_CAPACITY)
}
}
impl BitcoinState {
pub fn new(requests_queue_capacity: u32) -> Self {
Self {
adapter_queues: AdapterQueues {
next_callback_id: 0,
requests: BTreeMap::new(),
responses: VecDeque::new(),
requests_queue_capacity,
in_flight_get_successors_requests_num: 0,
},
}
}
/// Pushes a `BitcoinAdapterRequestWrapper` to the `BitcoinState`.
///
/// Returns a `BitcoinStateError` if there's no room left in the queue for new requests.
pub(crate) fn push_request(
&mut self,
request: BitcoinAdapterRequestWrapper,
) -> Result<(), BitcoinStateError> {
if self.adapter_queues.requests.len() as u32 >= self.adapter_queues.requests_queue_capacity
{
return Err(BitcoinStateError::QueueFull {
capacity: self.adapter_queues.requests_queue_capacity,
});
}
if let BitcoinAdapterRequestWrapper::GetSuccessorsRequest(_) = request {
self.adapter_queues.in_flight_get_successors_requests_num += 1;
}
self.adapter_queues.requests.insert(
self.adapter_queues.next_callback_id,
BitcoinAdapterRequest {
request,
callback_id: self.adapter_queues.next_callback_id,
},
);
self.adapter_queues.next_callback_id += 1;
Ok(())
}
/// Returns true iff there's at least an in-flight `GetSuccessorsRequest`.
pub fn has_in_flight_get_successors_requests(&self) -> bool {
self.adapter_queues.in_flight_get_successors_requests_num > 0
}
/// Returns an iterator over the existing requests to the Bitcoin Adapter.
pub fn adapter_requests_iter(
&self,
) -> std::collections::btree_map::Iter<'_, u64, BitcoinAdapterRequest> {
self.adapter_queues.requests.iter()
}
/// Pushes a `BitcoinAdapterResponse` onto the `BitcoinState`. It also clears
/// the in-flight request that corresponds to this response.
///
/// Returns a `BitcoinStateError::NonMatchingResponse` error if there is no
/// corresponding in-flight request when the response is pushed.
pub(crate) fn push_response(
&mut self,
response: BitcoinAdapterResponse,
) -> Result<(), BitcoinStateError> {
match self.adapter_queues.requests.remove(&response.callback_id) {
None => Err(BitcoinStateError::NonMatchingResponse {
callback_id: response.callback_id,
}),
Some(r) => {
if let BitcoinAdapterRequestWrapper::GetSuccessorsRequest(_) = r.request {
self.adapter_queues.in_flight_get_successors_requests_num -= 1;
}
self.adapter_queues.responses.push_back(response);
Ok(())
}
}
}
/// Pops the next `BitcoinAdapterResponse` from the `BitcoinState`.
pub fn pop_response(&mut self) -> Option<BitcoinAdapterResponse> {
self.adapter_queues.responses.pop_front()
}
/// Returns the number of requests to the Bitcoin Adapter.
pub fn num_adapter_requests(&self) -> usize {
self.adapter_queues.requests.len()
}
/// Returns the number of responses from the Bitcoin Adapter.
pub fn num_adapter_responses(&self) -> usize {
self.adapter_queues.responses.len()
}
}
impl From<&AdapterQueues> for pb_bitcoin::AdapterQueues {
fn from(queues: &AdapterQueues) -> pb_bitcoin::AdapterQueues {
pb_bitcoin::AdapterQueues {
next_callback_id: queues.next_callback_id,
requests: queues.requests.iter().map(|(_, v)| v.into()).collect(),
responses: queues.responses.iter().map(|x| x.into()).collect(),
requests_queue_capacity: queues.requests_queue_capacity,
}
}
}
impl TryFrom<pb_bitcoin::AdapterQueues> for AdapterQueues {
type Error = ProxyDecodeError;
fn try_from(queues: pb_bitcoin::AdapterQueues) -> Result<Self, Self::Error> {
let mut requests = BTreeMap::new();
let mut in_flight_get_successors_requests_num = 0;
for r in queues.requests.into_iter() {
let bitcoin_adapter_request = BitcoinAdapterRequest::try_from(r)?;
if let BitcoinAdapterRequestWrapper::GetSuccessorsRequest(_) =
bitcoin_adapter_request.request
{
in_flight_get_successors_requests_num += 1;
}
requests.insert(bitcoin_adapter_request.callback_id, bitcoin_adapter_request);
}
let mut responses = VecDeque::new();
for r in queues.responses.into_iter() {
responses.push_back(BitcoinAdapterResponse::try_from(r)?);
}
Ok(AdapterQueues {
next_callback_id: queues.next_callback_id,
requests,
responses,
requests_queue_capacity: queues.requests_queue_capacity,
in_flight_get_successors_requests_num,
})
}
}
impl From<&BitcoinState> for pb_bitcoin::BitcoinState {
fn from(bitcoin_state: &BitcoinState) -> pb_bitcoin::BitcoinState {
pb_bitcoin::BitcoinState {
adapter_queues: Some((&bitcoin_state.adapter_queues).into()),
}
}
}
impl TryFrom<pb_bitcoin::BitcoinState> for BitcoinState {
type Error = ProxyDecodeError;
fn try_from(bitcoin_state: pb_bitcoin::BitcoinState) -> Result<Self, Self::Error> {
let adapter_queues: AdapterQueues =
try_from_option_field(bitcoin_state.adapter_queues, "BitcoinState::adapter_queues")?;
Ok(BitcoinState { adapter_queues })
}
}
#[cfg(test)]
mod tests;
| 35.840517 | 113 | 0.641491 |
9c6150cfbfa7bbdc99a2da73ac65f65b0829770b | 10,239 | use std::collections::BTreeSet;
use std::fs::create_dir_all;
use std::marker::PhantomData;
use std::ops::Deref;
use std::path::Path;
use std::sync::Arc;
use fst::IntoStreamer;
use milli::heed::{EnvOpenOptions, RoTxn};
use milli::update::{IndexerConfig, Setting};
use milli::{obkv_to_json, FieldDistribution};
use serde::{Deserialize, Serialize};
use serde_json::{Map, Value};
use time::OffsetDateTime;
use uuid::Uuid;
use crate::EnvSizer;
use super::error::IndexError;
use super::error::Result;
use super::updates::{MinWordSizeTyposSetting, TypoSettings};
use super::{Checked, Settings};
pub type Document = Map<String, Value>;
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct IndexMeta {
#[serde(with = "time::serde::rfc3339")]
pub created_at: OffsetDateTime,
#[serde(with = "time::serde::rfc3339")]
pub updated_at: OffsetDateTime,
pub primary_key: Option<String>,
}
impl IndexMeta {
pub fn new(index: &Index) -> Result<Self> {
let txn = index.read_txn()?;
Self::new_txn(index, &txn)
}
pub fn new_txn(index: &Index, txn: &milli::heed::RoTxn) -> Result<Self> {
let created_at = index.created_at(txn)?;
let updated_at = index.updated_at(txn)?;
let primary_key = index.primary_key(txn)?.map(String::from);
Ok(Self {
created_at,
updated_at,
primary_key,
})
}
}
#[derive(Serialize, Debug)]
#[serde(rename_all = "camelCase")]
pub struct IndexStats {
#[serde(skip)]
pub size: u64,
pub number_of_documents: u64,
/// Whether the current index is performing an update. It is initially `None` when the
/// index returns it, since it is the `UpdateStore` that knows what index is currently indexing. It is
/// later set to either true or false, we we retrieve the information from the `UpdateStore`
pub is_indexing: Option<bool>,
pub field_distribution: FieldDistribution,
}
#[derive(Clone, derivative::Derivative)]
#[derivative(Debug)]
pub struct Index {
pub uuid: Uuid,
#[derivative(Debug = "ignore")]
pub inner: Arc<milli::Index>,
#[derivative(Debug = "ignore")]
pub indexer_config: Arc<IndexerConfig>,
}
impl Deref for Index {
type Target = milli::Index;
fn deref(&self) -> &Self::Target {
self.inner.as_ref()
}
}
impl Index {
pub fn open(
path: impl AsRef<Path>,
size: usize,
uuid: Uuid,
update_handler: Arc<IndexerConfig>,
) -> Result<Self> {
log::debug!("opening index in {}", path.as_ref().display());
create_dir_all(&path)?;
let mut options = EnvOpenOptions::new();
options.map_size(size);
let inner = Arc::new(milli::Index::new(options, &path)?);
Ok(Index {
inner,
uuid,
indexer_config: update_handler,
})
}
/// Asynchronously close the underlying index
pub fn close(self) {
self.inner.as_ref().clone().prepare_for_closing();
}
pub fn stats(&self) -> Result<IndexStats> {
let rtxn = self.read_txn()?;
Ok(IndexStats {
size: self.size(),
number_of_documents: self.number_of_documents(&rtxn)?,
is_indexing: None,
field_distribution: self.field_distribution(&rtxn)?,
})
}
pub fn meta(&self) -> Result<IndexMeta> {
IndexMeta::new(self)
}
pub fn settings(&self) -> Result<Settings<Checked>> {
let txn = self.read_txn()?;
self.settings_txn(&txn)
}
pub fn uuid(&self) -> Uuid {
self.uuid
}
pub fn settings_txn(&self, txn: &RoTxn) -> Result<Settings<Checked>> {
let displayed_attributes = self
.displayed_fields(txn)?
.map(|fields| fields.into_iter().map(String::from).collect());
let searchable_attributes = self
.user_defined_searchable_fields(txn)?
.map(|fields| fields.into_iter().map(String::from).collect());
let filterable_attributes = self.filterable_fields(txn)?.into_iter().collect();
let sortable_attributes = self.sortable_fields(txn)?.into_iter().collect();
let criteria = self
.criteria(txn)?
.into_iter()
.map(|c| c.to_string())
.collect();
let stop_words = self
.stop_words(txn)?
.map(|stop_words| -> Result<BTreeSet<_>> {
Ok(stop_words.stream().into_strs()?.into_iter().collect())
})
.transpose()?
.unwrap_or_default();
let distinct_field = self.distinct_field(txn)?.map(String::from);
// in milli each word in the synonyms map were split on their separator. Since we lost
// this information we are going to put space between words.
let synonyms = self
.synonyms(txn)?
.iter()
.map(|(key, values)| {
(
key.join(" "),
values.iter().map(|value| value.join(" ")).collect(),
)
})
.collect();
let min_typo_word_len = MinWordSizeTyposSetting {
one_typo: Setting::Set(self.min_word_len_one_typo(txn)?),
two_typos: Setting::Set(self.min_word_len_two_typos(txn)?),
};
let disabled_words = self
.exact_words(txn)?
.into_stream()
.into_strs()?
.into_iter()
.collect();
let disabled_attributes = self
.exact_attributes(txn)?
.into_iter()
.map(String::from)
.collect();
let typo_tolerance = TypoSettings {
enabled: Setting::Set(self.authorize_typos(txn)?),
min_word_size_for_typos: Setting::Set(min_typo_word_len),
disable_on_words: Setting::Set(disabled_words),
disable_on_attributes: Setting::Set(disabled_attributes),
};
Ok(Settings {
displayed_attributes: match displayed_attributes {
Some(attrs) => Setting::Set(attrs),
None => Setting::Reset,
},
searchable_attributes: match searchable_attributes {
Some(attrs) => Setting::Set(attrs),
None => Setting::Reset,
},
filterable_attributes: Setting::Set(filterable_attributes),
sortable_attributes: Setting::Set(sortable_attributes),
ranking_rules: Setting::Set(criteria),
stop_words: Setting::Set(stop_words),
distinct_attribute: match distinct_field {
Some(field) => Setting::Set(field),
None => Setting::Reset,
},
synonyms: Setting::Set(synonyms),
typo_tolerance: Setting::Set(typo_tolerance),
_kind: PhantomData,
})
}
/// Return the total number of documents contained in the index + the selected documents.
pub fn retrieve_documents<S: AsRef<str>>(
&self,
offset: usize,
limit: usize,
attributes_to_retrieve: Option<Vec<S>>,
) -> Result<(u64, Vec<Document>)> {
let txn = self.read_txn()?;
let fields_ids_map = self.fields_ids_map(&txn)?;
let all_fields: Vec<_> = fields_ids_map.iter().map(|(id, _)| id).collect();
let iter = self.documents.range(&txn, &(..))?.skip(offset).take(limit);
let mut documents = Vec::new();
for entry in iter {
let (_id, obkv) = entry?;
let document = obkv_to_json(&all_fields, &fields_ids_map, obkv)?;
let document = match &attributes_to_retrieve {
Some(attributes_to_retrieve) => permissive_json_pointer::select_values(
&document,
attributes_to_retrieve.iter().map(|s| s.as_ref()),
),
None => document,
};
documents.push(document);
}
let number_of_documents = self.number_of_documents(&txn)?;
Ok((number_of_documents, documents))
}
pub fn retrieve_document<S: AsRef<str>>(
&self,
doc_id: String,
attributes_to_retrieve: Option<Vec<S>>,
) -> Result<Document> {
let txn = self.read_txn()?;
let fields_ids_map = self.fields_ids_map(&txn)?;
let all_fields: Vec<_> = fields_ids_map.iter().map(|(id, _)| id).collect();
let internal_id = self
.external_documents_ids(&txn)?
.get(doc_id.as_bytes())
.ok_or_else(|| IndexError::DocumentNotFound(doc_id.clone()))?;
let document = self
.documents(&txn, std::iter::once(internal_id))?
.into_iter()
.next()
.map(|(_, d)| d)
.ok_or(IndexError::DocumentNotFound(doc_id))?;
let document = obkv_to_json(&all_fields, &fields_ids_map, document)?;
let document = match &attributes_to_retrieve {
Some(attributes_to_retrieve) => permissive_json_pointer::select_values(
&document,
attributes_to_retrieve.iter().map(|s| s.as_ref()),
),
None => document,
};
Ok(document)
}
pub fn size(&self) -> u64 {
self.env.size()
}
pub fn snapshot(&self, path: impl AsRef<Path>) -> Result<()> {
let mut dst = path.as_ref().join(format!("indexes/{}/", self.uuid));
create_dir_all(&dst)?;
dst.push("data.mdb");
let _txn = self.write_txn()?;
self.inner
.env
.copy_to_path(dst, milli::heed::CompactionOption::Enabled)?;
Ok(())
}
}
/// When running tests, when a server instance is dropped, the environment is not actually closed,
/// leaving a lot of open file descriptors.
impl Drop for Index {
fn drop(&mut self) {
// When dropping the last instance of an index, we want to close the index
// Note that the close is actually performed only if all the instances a effectively
// dropped
if Arc::strong_count(&self.inner) == 1 {
self.inner.as_ref().clone().prepare_for_closing();
}
}
}
| 32.198113 | 106 | 0.580818 |
4bafbac85b82b48627cb5e5d752ebe33d43c9991 | 62,526 | use std::collections::HashMap;
use crate::{
protobufs::graplinc::grapl::api::graph::v1beta1::{
DecrementOnlyIntProp as DecrementOnlyIntPropProto,
DecrementOnlyUintProp as DecrementOnlyUintPropProto,
Edge as EdgeProto,
EdgeList as EdgeListProto,
GraphDescription as GraphDescriptionProto,
IdStrategy as IdStrategyProto,
IdentifiedGraph as IdentifiedGraphProto,
IdentifiedNode as IdentifiedNodeProto,
ImmutableIntProp as ImmutableIntPropProto,
ImmutableStrProp as ImmutableStrPropProto,
ImmutableUintProp as ImmutableUintPropProto,
IncrementOnlyIntProp as IncrementOnlyIntPropProto,
IncrementOnlyUintProp as IncrementOnlyUintPropProto,
MergedEdge as MergedEdgeProto,
MergedEdgeList as MergedEdgeListProto,
MergedGraph as MergedGraphProto,
MergedNode as MergedNodeProto,
NodeDescription as NodeDescriptionProto,
NodeProperty as NodePropertyProto,
Session as SessionProto,
Static as StaticProto,
},
serde_impl,
type_url,
SerDeError,
};
// A helper macro to generate `From` impl boilerplate.
macro_rules ! impl_from_for_unit {
($into_t:ty, $field:tt, $from_t:ty) => {
impl From<$from_t> for $into_t
{
fn from(p: $from_t) -> Self {
let p = p.to_owned().into();
Self {$field: p}
}
}
};
($into_t:ty, $field:tt, $head:ty, $($tail:ty),*) => {
impl_from_for_unit!($into_t, $field, $head);
impl_from_for_unit!($into_t, $field, $($tail),*);
};
}
//
// Session
//
#[derive(Debug, PartialEq, Clone)]
pub struct Session {
pub primary_key_properties: Vec<String>,
pub primary_key_requires_asset_id: bool,
pub create_time: u64, // TODO: use Timestamp
pub last_seen_time: u64, // TODO: use Timestamp
pub terminate_time: u64, // TODO: use Timestamp
}
impl From<SessionProto> for Session {
fn from(session_proto: SessionProto) -> Self {
Session {
primary_key_properties: session_proto.primary_key_properties,
primary_key_requires_asset_id: session_proto.primary_key_requires_asset_id,
create_time: session_proto.create_time,
last_seen_time: session_proto.last_seen_time,
terminate_time: session_proto.terminate_time,
}
}
}
impl From<Session> for SessionProto {
fn from(session: Session) -> Self {
SessionProto {
primary_key_properties: session.primary_key_properties,
primary_key_requires_asset_id: session.primary_key_requires_asset_id,
create_time: session.create_time,
last_seen_time: session.last_seen_time,
terminate_time: session.terminate_time,
}
}
}
impl type_url::TypeUrl for Session {
const TYPE_URL: &'static str = "graplsecurity.com/graplinc.grapl.api.graph.v1beta1.Session";
}
impl serde_impl::ProtobufSerializable for Session {
type ProtobufMessage = SessionProto;
}
//
// Static
//
#[derive(Debug, PartialEq, Clone)]
pub struct Static {
pub primary_key_properties: Vec<String>,
pub primary_key_requires_asset_id: bool,
}
impl From<StaticProto> for Static {
fn from(static_proto: StaticProto) -> Self {
Static {
primary_key_properties: static_proto.primary_key_properties,
primary_key_requires_asset_id: static_proto.primary_key_requires_asset_id,
}
}
}
impl From<Static> for StaticProto {
fn from(static_: Static) -> Self {
StaticProto {
primary_key_properties: static_.primary_key_properties,
primary_key_requires_asset_id: static_.primary_key_requires_asset_id,
}
}
}
impl type_url::TypeUrl for Static {
const TYPE_URL: &'static str = "graplsecurity.com/graplinc.grapl.api.graph.v1beta1.Static";
}
impl serde_impl::ProtobufSerializable for Static {
type ProtobufMessage = StaticProto;
}
//
// IdStrategy
//
#[derive(Debug, PartialEq, Clone)]
pub enum Strategy {
Session(Session),
Static(Static),
}
#[derive(Debug, PartialEq, Clone)]
pub struct IdStrategy {
pub strategy: Strategy,
}
impl TryFrom<IdStrategyProto> for IdStrategy {
type Error = SerDeError;
fn try_from(id_strategy_proto: IdStrategyProto) -> Result<Self, Self::Error> {
match id_strategy_proto.strategy {
Some(
crate::protobufs::graplinc::grapl::api::graph::v1beta1::id_strategy::Strategy::Session(
session_proto
)
) => {
let session: Session = session_proto.into();
Ok(IdStrategy {
strategy: Strategy::Session(session)
})
},
Some(
crate::protobufs::graplinc::grapl::api::graph::v1beta1::id_strategy::Strategy::Static(
static_proto
)
) => {
let static_: Static = static_proto.into();
Ok(IdStrategy {
strategy: Strategy::Static(static_)
})
},
None => Err(SerDeError::MissingField("strategy")),
}
}
}
impl From<IdStrategy> for IdStrategyProto {
fn from(id_strategy: IdStrategy) -> Self {
match id_strategy.strategy {
Strategy::Session(session) => IdStrategyProto {
strategy: Some(
crate::protobufs::graplinc::grapl::api::graph::v1beta1::id_strategy::Strategy::Session(
session.into()
)
),
},
Strategy::Static(static_) => IdStrategyProto {
strategy: Some(
crate::protobufs::graplinc::grapl::api::graph::v1beta1::id_strategy::Strategy::Static(
static_.into()
)
),
},
}
}
}
impl type_url::TypeUrl for IdStrategy {
const TYPE_URL: &'static str = "graplsecurity.com/graplinc.grapl.api.graph.v1beta1.IdStrategy";
}
impl serde_impl::ProtobufSerializable for IdStrategy {
type ProtobufMessage = IdStrategyProto;
}
impl From<Static> for IdStrategy {
fn from(strategy: Static) -> IdStrategy {
IdStrategy {
strategy: Strategy::Static(strategy),
}
}
}
impl From<Session> for IdStrategy {
fn from(strategy: Session) -> IdStrategy {
IdStrategy {
strategy: Strategy::Session(strategy),
}
}
}
//
// IncrementOnlyUintProp
//
#[derive(Debug, PartialOrd, Ord, PartialEq, Eq, Clone)]
pub struct IncrementOnlyUintProp {
pub prop: u64,
}
impl IncrementOnlyUintProp {
pub fn as_inner(&self) -> u64 {
self.prop
}
pub fn merge_property(&mut self, other_prop: &Self) {
tracing::trace!(message="IncrementOnlyUintProp merge", self_prop=?self, other_prop=?other_prop);
self.prop = std::cmp::max(self.prop, other_prop.prop);
}
}
impl From<IncrementOnlyUintPropProto> for IncrementOnlyUintProp {
fn from(increment_only_uint_prop_proto: IncrementOnlyUintPropProto) -> Self {
IncrementOnlyUintProp {
prop: increment_only_uint_prop_proto.prop,
}
}
}
impl From<IncrementOnlyUintProp> for IncrementOnlyUintPropProto {
fn from(increment_only_uint_prop: IncrementOnlyUintProp) -> Self {
IncrementOnlyUintPropProto {
prop: increment_only_uint_prop.prop,
}
}
}
impl type_url::TypeUrl for IncrementOnlyUintProp {
const TYPE_URL: &'static str =
"graplsecurity.com/graplinc.grapl.api.graph.v1beta1.IncrementOnlyUintProp";
}
impl serde_impl::ProtobufSerializable for IncrementOnlyUintProp {
type ProtobufMessage = IncrementOnlyUintPropProto;
}
impl std::string::ToString for IncrementOnlyUintProp {
fn to_string(&self) -> String {
self.prop.to_string()
}
}
impl_from_for_unit!(
IncrementOnlyUintProp,
prop,
u64,
u32,
u16,
u8,
&u64,
&u32,
&u16,
&u8
);
//
// ImmutableUintProp
//
#[derive(Debug, PartialOrd, Ord, PartialEq, Eq, Clone)]
pub struct ImmutableUintProp {
pub prop: u64,
}
impl ImmutableUintProp {
pub fn as_inner(&self) -> u64 {
self.prop
}
pub fn merge_property(&mut self, other_prop: &Self) {
tracing::trace!(message="ImmutableUintProp merge", self_prop=?self, other_prop=?other_prop);
}
}
impl From<ImmutableUintPropProto> for ImmutableUintProp {
fn from(immutable_uint_prop_proto: ImmutableUintPropProto) -> Self {
ImmutableUintProp {
prop: immutable_uint_prop_proto.prop,
}
}
}
impl From<ImmutableUintProp> for ImmutableUintPropProto {
fn from(immutable_uint_prop: ImmutableUintProp) -> Self {
ImmutableUintPropProto {
prop: immutable_uint_prop.prop,
}
}
}
impl type_url::TypeUrl for ImmutableUintProp {
const TYPE_URL: &'static str =
"graplsecurity.com/graplinc.grapl.api.graph.v1beta1.ImmutableUintProp";
}
impl serde_impl::ProtobufSerializable for ImmutableUintProp {
type ProtobufMessage = ImmutableUintPropProto;
}
impl std::string::ToString for ImmutableUintProp {
fn to_string(&self) -> String {
self.prop.to_string()
}
}
impl_from_for_unit!(
ImmutableUintProp,
prop,
u64,
u32,
u16,
u8,
&u64,
&u32,
&u16,
&u8
);
//
// DecrementOnlyUintProp
//
#[derive(Debug, PartialOrd, Ord, PartialEq, Eq, Clone)]
pub struct DecrementOnlyUintProp {
pub prop: u64,
}
impl DecrementOnlyUintProp {
pub fn as_inner(&self) -> u64 {
self.prop
}
pub fn merge_property(&mut self, other_prop: &Self) {
self.prop = std::cmp::min(self.prop, other_prop.prop);
}
}
impl From<DecrementOnlyUintPropProto> for DecrementOnlyUintProp {
fn from(decrement_only_uint_prop_proto: DecrementOnlyUintPropProto) -> Self {
DecrementOnlyUintProp {
prop: decrement_only_uint_prop_proto.prop,
}
}
}
impl From<DecrementOnlyUintProp> for DecrementOnlyUintPropProto {
fn from(decrement_only_uint_prop: DecrementOnlyUintProp) -> Self {
DecrementOnlyUintPropProto {
prop: decrement_only_uint_prop.prop,
}
}
}
impl type_url::TypeUrl for DecrementOnlyUintProp {
const TYPE_URL: &'static str =
"graplsecurity.com/graplinc.grapl.api.graph.v1beta1.DecrementOnlyUintProp";
}
impl serde_impl::ProtobufSerializable for DecrementOnlyUintProp {
type ProtobufMessage = DecrementOnlyUintPropProto;
}
impl std::string::ToString for DecrementOnlyUintProp {
fn to_string(&self) -> String {
self.prop.to_string()
}
}
impl_from_for_unit!(
DecrementOnlyUintProp,
prop,
u64,
u32,
u16,
u8,
&u64,
&u32,
&u16,
&u8
);
//
// IncrementOnlyIntProp
//
#[derive(Debug, PartialOrd, Ord, PartialEq, Eq, Clone)]
pub struct IncrementOnlyIntProp {
pub prop: i64,
}
impl IncrementOnlyIntProp {
pub fn as_inner(&self) -> i64 {
self.prop
}
pub fn merge_property(&mut self, other_prop: &Self) {
tracing::trace!(message="IncrementOnlyIntProp merge", self_prop=?self, other_prop=?other_prop);
self.prop = std::cmp::max(self.prop, other_prop.prop);
}
}
impl From<IncrementOnlyIntPropProto> for IncrementOnlyIntProp {
fn from(increment_only_int_prop_proto: IncrementOnlyIntPropProto) -> Self {
IncrementOnlyIntProp {
prop: increment_only_int_prop_proto.prop,
}
}
}
impl From<IncrementOnlyIntProp> for IncrementOnlyIntPropProto {
fn from(increment_only_int_prop: IncrementOnlyIntProp) -> Self {
IncrementOnlyIntPropProto {
prop: increment_only_int_prop.prop,
}
}
}
impl type_url::TypeUrl for IncrementOnlyIntProp {
const TYPE_URL: &'static str =
"graplsecurity.com/graplinc.grapl.api.graph.v1beta1.IncrementOnlyIntProp";
}
impl serde_impl::ProtobufSerializable for IncrementOnlyIntProp {
type ProtobufMessage = IncrementOnlyIntPropProto;
}
impl std::string::ToString for IncrementOnlyIntProp {
fn to_string(&self) -> String {
self.prop.to_string()
}
}
impl_from_for_unit!(
IncrementOnlyIntProp,
prop,
i64,
i32,
i16,
i8,
&i64,
&i32,
&i16,
&i8
);
//
// DecrementOnlyIntProp
//
#[derive(Debug, PartialOrd, Ord, PartialEq, Eq, Clone)]
pub struct DecrementOnlyIntProp {
pub prop: i64,
}
impl DecrementOnlyIntProp {
pub fn as_inner(&self) -> i64 {
self.prop
}
pub fn merge_property(&mut self, other_prop: &Self) {
self.prop = std::cmp::min(self.prop, other_prop.prop);
}
}
impl From<DecrementOnlyIntPropProto> for DecrementOnlyIntProp {
fn from(decrement_only_int_prop_proto: DecrementOnlyIntPropProto) -> Self {
DecrementOnlyIntProp {
prop: decrement_only_int_prop_proto.prop,
}
}
}
impl From<DecrementOnlyIntProp> for DecrementOnlyIntPropProto {
fn from(decrement_only_int_prop: DecrementOnlyIntProp) -> Self {
DecrementOnlyIntPropProto {
prop: decrement_only_int_prop.prop,
}
}
}
impl type_url::TypeUrl for DecrementOnlyIntProp {
const TYPE_URL: &'static str =
"graplsecurity.com/graplinc.grapl.api.graph.v1beta1.DecrementOnlyIntProp";
}
impl serde_impl::ProtobufSerializable for DecrementOnlyIntProp {
type ProtobufMessage = DecrementOnlyIntPropProto;
}
impl std::string::ToString for DecrementOnlyIntProp {
fn to_string(&self) -> String {
self.prop.to_string()
}
}
impl_from_for_unit!(
DecrementOnlyIntProp,
prop,
i64,
i32,
i16,
i8,
&i64,
&i32,
&i16,
&i8
);
//
// ImmutableIntProp
//
#[derive(Debug, PartialOrd, Ord, PartialEq, Eq, Clone)]
pub struct ImmutableIntProp {
pub prop: i64,
}
impl ImmutableIntProp {
pub fn as_inner(&self) -> i64 {
self.prop
}
pub fn merge_property(&mut self, other_prop: &Self) {
tracing::trace!(message="ImmutableIntProp merge", self_prop=?self, other_prop=?other_prop);
}
}
impl From<ImmutableIntPropProto> for ImmutableIntProp {
fn from(immutable_int_prop_proto: ImmutableIntPropProto) -> Self {
ImmutableIntProp {
prop: immutable_int_prop_proto.prop,
}
}
}
impl From<ImmutableIntProp> for ImmutableIntPropProto {
fn from(immutable_int_prop: ImmutableIntProp) -> Self {
ImmutableIntPropProto {
prop: immutable_int_prop.prop,
}
}
}
impl type_url::TypeUrl for ImmutableIntProp {
const TYPE_URL: &'static str =
"graplsecurity.com/graplinc.grapl.api.graph.v1beta1.ImmutableIntProp";
}
impl serde_impl::ProtobufSerializable for ImmutableIntProp {
type ProtobufMessage = ImmutableIntPropProto;
}
impl std::string::ToString for ImmutableIntProp {
fn to_string(&self) -> String {
self.prop.to_string()
}
}
impl_from_for_unit!(
ImmutableIntProp,
prop,
i64,
i32,
i16,
i8,
&i64,
&i32,
&i16,
&i8
);
//
// ImmutableStrProp
//
#[derive(Debug, PartialEq, Clone)]
pub struct ImmutableStrProp {
pub prop: String,
}
impl ImmutableStrProp {
pub fn as_inner(&self) -> &str {
self.prop.as_str()
}
pub fn merge_property(&mut self, other_prop: &Self) {
tracing::trace!(message="ImmutableStrProp merge", self_prop=?self, other_prop=?other_prop);
}
}
impl From<ImmutableStrPropProto> for ImmutableStrProp {
fn from(immutable_str_prop_proto: ImmutableStrPropProto) -> Self {
ImmutableStrProp {
prop: immutable_str_prop_proto.prop,
}
}
}
impl From<ImmutableStrProp> for ImmutableStrPropProto {
fn from(immutable_str_prop: ImmutableStrProp) -> Self {
ImmutableStrPropProto {
prop: immutable_str_prop.prop,
}
}
}
impl type_url::TypeUrl for ImmutableStrProp {
const TYPE_URL: &'static str =
"graplsecurity.com/graplinc.grapl.api.graph.v1beta1.ImmutableStrProp";
}
impl serde_impl::ProtobufSerializable for ImmutableStrProp {
type ProtobufMessage = ImmutableStrPropProto;
}
impl std::string::ToString for ImmutableStrProp {
fn to_string(&self) -> String {
self.prop.to_string()
}
}
impl_from_for_unit!(
ImmutableStrProp,
prop,
String,
&String,
&str,
&std::borrow::Cow<'_, str>
);
//
// NodeProperty
//
#[derive(Debug, PartialEq, Clone)]
pub enum Property {
IncrementOnlyUintProp(IncrementOnlyUintProp),
DecrementOnlyUintProp(DecrementOnlyUintProp),
ImmutableUintProp(ImmutableUintProp),
IncrementOnlyIntProp(IncrementOnlyIntProp),
DecrementOnlyIntProp(DecrementOnlyIntProp),
ImmutableIntProp(ImmutableIntProp),
ImmutableStrProp(ImmutableStrProp),
}
impl Property {
pub fn merge_property(&mut self, other: &Self) {
match (self, other) {
(
Property::IncrementOnlyUintProp(ref mut self_prop),
Property::IncrementOnlyUintProp(ref other_prop),
) => self_prop.merge_property(other_prop),
(
Property::ImmutableUintProp(ref mut self_prop),
Property::ImmutableUintProp(ref other_prop),
) => self_prop.merge_property(other_prop),
(
Property::DecrementOnlyUintProp(ref mut self_prop),
Property::DecrementOnlyUintProp(ref other_prop),
) => self_prop.merge_property(other_prop),
(
Property::DecrementOnlyIntProp(ref mut self_prop),
Property::DecrementOnlyIntProp(ref other_prop),
) => self_prop.merge_property(other_prop),
(
Property::IncrementOnlyIntProp(ref mut self_prop),
Property::IncrementOnlyIntProp(ref other_prop),
) => self_prop.merge_property(other_prop),
(
Property::ImmutableIntProp(ref mut self_prop),
Property::ImmutableIntProp(ref other_prop),
) => self_prop.merge_property(other_prop),
(
Property::ImmutableStrProp(ref mut self_prop),
Property::ImmutableStrProp(ref other_prop),
) => self_prop.merge_property(other_prop),
// technically we could improve type safety here by exhausting the combinations,
// but I'm not going to type that all out right now
// TODO: figure out what this comment means ^^
(p, op) => {
// Currently we don't guarantee that randomly generated nodes will have consistent
// property types when they share a property name
// TODO: figure out what this comment means ^^
debug_assert!(false, "Invalid property merge: {:?} {:?}", p, op);
tracing::warn!("Invalid property merge: {:?} {:?}", p, op);
}
}
}
}
impl From<ImmutableUintProp> for Property {
fn from(p: ImmutableUintProp) -> Self {
Self::ImmutableUintProp(p)
}
}
impl From<IncrementOnlyUintProp> for Property {
fn from(p: IncrementOnlyUintProp) -> Self {
Self::IncrementOnlyUintProp(p)
}
}
impl From<DecrementOnlyUintProp> for Property {
fn from(p: DecrementOnlyUintProp) -> Self {
Self::DecrementOnlyUintProp(p)
}
}
impl From<ImmutableIntProp> for Property {
fn from(p: ImmutableIntProp) -> Self {
Self::ImmutableIntProp(p)
}
}
impl From<IncrementOnlyIntProp> for Property {
fn from(p: IncrementOnlyIntProp) -> Self {
Self::IncrementOnlyIntProp(p)
}
}
impl From<DecrementOnlyIntProp> for Property {
fn from(p: DecrementOnlyIntProp) -> Self {
Self::DecrementOnlyIntProp(p)
}
}
impl From<ImmutableStrProp> for Property {
fn from(p: ImmutableStrProp) -> Self {
Self::ImmutableStrProp(p)
}
}
impl std::string::ToString for Property {
fn to_string(&self) -> String {
match self {
Property::IncrementOnlyUintProp(increment_only_uint_prop) => {
increment_only_uint_prop.to_string()
}
Property::ImmutableUintProp(immutable_uint_prop) => immutable_uint_prop.to_string(),
Property::DecrementOnlyUintProp(decrement_only_uint_prop) => {
decrement_only_uint_prop.to_string()
}
Property::DecrementOnlyIntProp(decrement_only_int_prop) => {
decrement_only_int_prop.to_string()
}
Property::IncrementOnlyIntProp(increment_only_int_prop) => {
increment_only_int_prop.to_string()
}
Property::ImmutableIntProp(immutable_int_prop) => immutable_int_prop.to_string(),
Property::ImmutableStrProp(immutable_str_prop) => immutable_str_prop.to_string(),
}
}
}
#[derive(Debug, PartialEq, Clone)]
pub struct NodeProperty {
pub property: Property,
}
impl NodeProperty {
pub fn merge(&mut self, other: &Self) {
self.property.merge_property(&other.property)
}
pub fn as_increment_only_uint(&self) -> Option<IncrementOnlyUintProp> {
match self.property {
Property::IncrementOnlyUintProp(ref prop) => Some(prop.clone()),
_ => None,
}
}
pub fn as_immutable_uint(&self) -> Option<ImmutableUintProp> {
match self.property {
Property::ImmutableUintProp(ref prop) => Some(prop.clone()),
_ => None,
}
}
pub fn as_decrement_only_uint(&self) -> Option<DecrementOnlyUintProp> {
match self.property {
Property::DecrementOnlyUintProp(ref prop) => Some(prop.clone()),
_ => None,
}
}
pub fn as_decrement_only_int(&self) -> Option<DecrementOnlyIntProp> {
match self.property {
Property::DecrementOnlyIntProp(ref prop) => Some(prop.clone()),
_ => None,
}
}
pub fn as_increment_only_int(&self) -> Option<IncrementOnlyIntProp> {
match self.property {
Property::IncrementOnlyIntProp(ref prop) => Some(prop.clone()),
_ => None,
}
}
pub fn as_immutable_int(&self) -> Option<ImmutableIntProp> {
match self.property {
Property::ImmutableIntProp(ref prop) => Some(prop.clone()),
_ => None,
}
}
pub fn as_immutable_str(&self) -> Option<&ImmutableStrProp> {
match self.property {
Property::ImmutableStrProp(ref prop) => Some(prop),
_ => None,
}
}
}
impl TryFrom<NodePropertyProto> for NodeProperty {
type Error = SerDeError;
fn try_from(node_property_proto: NodePropertyProto) -> Result<Self, Self::Error> {
match node_property_proto.property {
Some(
crate::protobufs::graplinc::grapl::api::graph::v1beta1::node_property::Property::IncrementOnlyUint(
increment_only_uint_prop_proto
)
) => Ok(NodeProperty {
property: Property::IncrementOnlyUintProp(
increment_only_uint_prop_proto.into()
)
}),
Some(
crate::protobufs::graplinc::grapl::api::graph::v1beta1::node_property::Property::DecrementOnlyUint(
decrement_only_uint_prop_proto
)
) => Ok(NodeProperty {
property: Property::DecrementOnlyUintProp(
decrement_only_uint_prop_proto.into()
)
}),
Some(
crate::protobufs::graplinc::grapl::api::graph::v1beta1::node_property::Property::ImmutableUint(
immutable_uint_prop_proto
)
) => Ok(NodeProperty {
property: Property::ImmutableUintProp(
immutable_uint_prop_proto.into()
)
}),
Some(
crate::protobufs::graplinc::grapl::api::graph::v1beta1::node_property::Property::IncrementOnlyInt(
increment_only_int_prop_proto
)
) => Ok(NodeProperty {
property: Property::IncrementOnlyIntProp(
increment_only_int_prop_proto.into()
)
}),
Some(
crate::protobufs::graplinc::grapl::api::graph::v1beta1::node_property::Property::DecrementOnlyInt(
decrement_only_int_prop_proto
)
) => Ok(NodeProperty {
property: Property::DecrementOnlyIntProp(
decrement_only_int_prop_proto.into()
)
}),
Some(
crate::protobufs::graplinc::grapl::api::graph::v1beta1::node_property::Property::ImmutableInt(
immutable_int_prop_proto
)
) => Ok(NodeProperty {
property: Property::ImmutableIntProp(
immutable_int_prop_proto.into()
)
}),
Some(
crate::protobufs::graplinc::grapl::api::graph::v1beta1::node_property::Property::ImmutableStr(
immutable_str_prop_proto
)
) => Ok(NodeProperty {
property: Property::ImmutableStrProp(
immutable_str_prop_proto.into()
)
}),
None => Err(SerDeError::MissingField("property")),
}
}
}
impl From<NodeProperty> for NodePropertyProto {
fn from(node_property: NodeProperty) -> Self {
match node_property.property {
Property::IncrementOnlyUintProp(increment_only_uint_prop) => NodePropertyProto {
property: Some(
crate::protobufs::graplinc::grapl::api::graph::v1beta1::node_property::Property::IncrementOnlyUint(
increment_only_uint_prop.into()
)
)
},
Property::DecrementOnlyUintProp(decrement_only_uint_prop) => NodePropertyProto {
property: Some(
crate::protobufs::graplinc::grapl::api::graph::v1beta1::node_property::Property::DecrementOnlyUint(
decrement_only_uint_prop.into()
)
)
},
Property::ImmutableUintProp(immutable_uint_prop) => NodePropertyProto {
property: Some(
crate::protobufs::graplinc::grapl::api::graph::v1beta1::node_property::Property::ImmutableUint(
immutable_uint_prop.into()
)
)
},
Property::IncrementOnlyIntProp(increment_only_int_prop) => NodePropertyProto {
property: Some(
crate::protobufs::graplinc::grapl::api::graph::v1beta1::node_property::Property::IncrementOnlyInt(
increment_only_int_prop.into()
)
)
},
Property::DecrementOnlyIntProp(decrement_only_int_prop) => NodePropertyProto {
property: Some(
crate::protobufs::graplinc::grapl::api::graph::v1beta1::node_property::Property::DecrementOnlyInt(
decrement_only_int_prop.into()
)
)
},
Property::ImmutableIntProp(immutable_int_prop) => NodePropertyProto {
property: Some(
crate::protobufs::graplinc::grapl::api::graph::v1beta1::node_property::Property::ImmutableInt(
immutable_int_prop.into()
)
)
},
Property::ImmutableStrProp(immutable_str_prop) => NodePropertyProto {
property: Some(
crate::protobufs::graplinc::grapl::api::graph::v1beta1::node_property::Property::ImmutableStr(
immutable_str_prop.into()
)
)
},
}
}
}
impl type_url::TypeUrl for NodeProperty {
const TYPE_URL: &'static str =
"graplsecurity.com/graplinc.grapl.api.graph.v1beta1.NodeProperty";
}
impl serde_impl::ProtobufSerializable for NodeProperty {
type ProtobufMessage = NodePropertyProto;
}
impl std::string::ToString for NodeProperty {
fn to_string(&self) -> String {
match &self.property {
Property::IncrementOnlyUintProp(increment_only_uint_prop) => {
increment_only_uint_prop.to_string()
}
Property::ImmutableUintProp(immutable_uint_prop) => immutable_uint_prop.to_string(),
Property::DecrementOnlyUintProp(decrement_only_uint_prop) => {
decrement_only_uint_prop.to_string()
}
Property::DecrementOnlyIntProp(decrement_only_int_prop) => {
decrement_only_int_prop.to_string()
}
Property::IncrementOnlyIntProp(increment_only_int_prop) => {
increment_only_int_prop.to_string()
}
Property::ImmutableIntProp(immutable_int_prop) => immutable_int_prop.to_string(),
Property::ImmutableStrProp(immutable_str_prop) => immutable_str_prop.to_string(),
}
}
}
impl<T> From<T> for NodeProperty
where
T: Into<Property>,
{
fn from(t: T) -> Self {
NodeProperty { property: t.into() }
}
}
//
// NodeDescription
//
#[derive(Debug, PartialEq, Clone)]
pub struct NodeDescription {
pub properties: HashMap<String, NodeProperty>,
pub node_key: String,
pub node_type: String,
pub id_strategy: Vec<IdStrategy>,
}
impl NodeDescription {
pub fn get_property(&self, name: impl AsRef<str>) -> Option<&NodeProperty> {
self.properties.get(name.as_ref())
}
pub fn set_property(&mut self, name: impl Into<String>, value: impl Into<NodeProperty>) {
self.properties.insert(name.into(), value.into());
}
pub fn set_key(&mut self, key: String) {
self.node_key = key;
}
}
impl NodeDescription {
pub fn merge(&mut self, other: &Self) {
debug_assert_eq!(self.node_type, other.node_type);
debug_assert_eq!(self.node_key, other.node_key);
for (prop_name, prop_value) in other.properties.iter() {
match self.properties.get_mut(prop_name) {
Some(self_prop) => self_prop.merge(prop_value),
None => {
self.properties
.insert(prop_name.clone(), prop_value.clone());
}
}
}
}
pub fn get_node_key(&self) -> &str {
self.node_key.as_str()
}
pub fn clone_node_key(&self) -> String {
self.node_key.clone()
}
}
impl TryFrom<NodeDescriptionProto> for NodeDescription {
type Error = SerDeError;
fn try_from(node_description_proto: NodeDescriptionProto) -> Result<Self, Self::Error> {
let mut properties = HashMap::with_capacity(node_description_proto.properties.len());
for (key, property) in node_description_proto.properties {
properties.insert(key, NodeProperty::try_from(property)?);
}
let mut id_strategy = Vec::with_capacity(node_description_proto.id_strategy.len());
for strategy in node_description_proto.id_strategy {
id_strategy.push(IdStrategy::try_from(strategy)?);
}
Ok(NodeDescription {
properties,
node_key: node_description_proto.node_key,
node_type: node_description_proto.node_type,
id_strategy,
})
}
}
impl From<NodeDescription> for NodeDescriptionProto {
fn from(node_description: NodeDescription) -> Self {
let mut properties = HashMap::with_capacity(node_description.properties.len());
for (key, property) in node_description.properties {
properties.insert(key, NodePropertyProto::from(property));
}
let mut id_strategy = Vec::with_capacity(node_description.id_strategy.len());
for strategy in node_description.id_strategy {
id_strategy.push(IdStrategyProto::from(strategy));
}
NodeDescriptionProto {
properties,
node_key: node_description.node_key,
node_type: node_description.node_type,
id_strategy,
}
}
}
impl type_url::TypeUrl for NodeDescription {
const TYPE_URL: &'static str =
"graplsecurity.com/graplinc.grapl.api.graph.v1beta1.NodeDescription";
}
impl serde_impl::ProtobufSerializable for NodeDescription {
type ProtobufMessage = NodeDescriptionProto;
}
//
// IdentifiedNode
//
#[derive(Debug, PartialEq, Clone)]
pub struct IdentifiedNode {
pub properties: HashMap<String, NodeProperty>,
pub node_key: String,
pub node_type: String,
}
impl IdentifiedNode {
pub fn merge(&mut self, other: &Self) {
debug_assert_eq!(self.node_type, other.node_type);
debug_assert_eq!(self.node_key, other.node_key);
for (prop_name, prop_value) in other.properties.iter() {
match self.properties.get_mut(prop_name) {
Some(self_prop) => self_prop.merge(prop_value),
None => {
self.properties
.insert(prop_name.clone(), prop_value.clone());
}
}
}
}
pub fn get_cache_identities_for_predicates(&self) -> Vec<Vec<u8>> {
let mut predicate_cache_identities = Vec::with_capacity(self.properties.len());
for (key, prop) in &self.properties {
let prop_value = prop.property.to_string();
predicate_cache_identities.push(format!("{}:{}:{}", &self.node_key, key, prop_value));
}
predicate_cache_identities
.into_iter()
.map(|item| item.into_bytes())
.collect()
}
pub fn into(self, uid: u64) -> MergedNode {
MergedNode {
uid,
properties: self.properties,
node_key: self.node_key,
node_type: self.node_type,
}
}
pub fn get_node_key(&self) -> &str {
self.node_key.as_str()
}
pub fn clone_node_key(&self) -> String {
self.node_key.clone()
}
}
impl TryFrom<IdentifiedNodeProto> for IdentifiedNode {
type Error = SerDeError;
fn try_from(identified_node_proto: IdentifiedNodeProto) -> Result<Self, Self::Error> {
let mut properties = HashMap::with_capacity(identified_node_proto.properties.len());
for (key, property) in identified_node_proto.properties {
properties.insert(key, NodeProperty::try_from(property)?);
}
Ok(IdentifiedNode {
properties,
node_key: identified_node_proto.node_key,
node_type: identified_node_proto.node_type,
})
}
}
impl From<IdentifiedNode> for IdentifiedNodeProto {
fn from(identified_node: IdentifiedNode) -> Self {
let mut properties = HashMap::with_capacity(identified_node.properties.len());
for (key, property) in identified_node.properties {
properties.insert(key, NodePropertyProto::from(property));
}
IdentifiedNodeProto {
properties,
node_key: identified_node.node_key,
node_type: identified_node.node_type,
}
}
}
impl type_url::TypeUrl for IdentifiedNode {
const TYPE_URL: &'static str =
"graplsecurity.com/graplinc.grapl.api.graph.v1beta1.IdentifiedNode";
}
impl serde_impl::ProtobufSerializable for IdentifiedNode {
type ProtobufMessage = IdentifiedNodeProto;
}
impl From<NodeDescription> for IdentifiedNode {
fn from(n: NodeDescription) -> Self {
IdentifiedNode {
properties: n.properties,
node_key: n.node_key,
node_type: n.node_type,
}
}
}
//
// MergedNode
//
#[derive(Debug, PartialEq, Clone)]
pub struct MergedNode {
pub properties: HashMap<String, NodeProperty>,
pub uid: u64,
pub node_key: String,
pub node_type: String,
}
impl MergedNode {
pub fn from(n: IdentifiedNode, uid: u64) -> Self {
Self {
uid,
properties: n.properties,
node_key: n.node_key,
node_type: n.node_type,
}
}
pub fn merge(&mut self, other: &Self) {
debug_assert_eq!(self.node_type, other.node_type);
debug_assert_eq!(self.node_key, other.node_key);
for (prop_name, prop_value) in other.properties.iter() {
match self.properties.get_mut(prop_name) {
Some(self_prop) => self_prop.merge(prop_value),
None => {
self.properties
.insert(prop_name.clone(), prop_value.clone());
}
}
}
}
pub fn get_node_key(&self) -> &str {
self.node_key.as_str()
}
pub fn clone_node_key(&self) -> String {
self.node_key.clone()
}
}
impl TryFrom<MergedNodeProto> for MergedNode {
type Error = SerDeError;
fn try_from(merged_node_proto: MergedNodeProto) -> Result<Self, Self::Error> {
let mut properties = HashMap::with_capacity(merged_node_proto.properties.len());
for (key, property) in merged_node_proto.properties {
properties.insert(key, NodeProperty::try_from(property)?);
}
Ok(MergedNode {
properties,
uid: merged_node_proto.uid,
node_key: merged_node_proto.node_key,
node_type: merged_node_proto.node_type,
})
}
}
impl From<MergedNode> for MergedNodeProto {
fn from(merged_node: MergedNode) -> Self {
let mut properties = HashMap::with_capacity(merged_node.properties.len());
for (key, property) in merged_node.properties {
properties.insert(key, NodePropertyProto::from(property));
}
MergedNodeProto {
properties,
uid: merged_node.uid,
node_key: merged_node.node_key,
node_type: merged_node.node_type,
}
}
}
impl type_url::TypeUrl for MergedNode {
const TYPE_URL: &'static str = "graplsecurity.com/graplinc.grapl.api.graph.v1beta1.MergedNode";
}
impl serde_impl::ProtobufSerializable for MergedNode {
type ProtobufMessage = MergedNodeProto;
}
//
// Edge
//
#[derive(Debug, PartialEq, Clone)]
pub struct Edge {
pub to_node_key: String,
pub from_node_key: String,
pub edge_name: String,
}
impl From<EdgeProto> for Edge {
fn from(edge_proto: EdgeProto) -> Self {
Edge {
to_node_key: edge_proto.to_node_key,
from_node_key: edge_proto.from_node_key,
edge_name: edge_proto.edge_name,
}
}
}
impl From<Edge> for EdgeProto {
fn from(edge: Edge) -> Self {
EdgeProto {
from_node_key: edge.from_node_key,
to_node_key: edge.to_node_key,
edge_name: edge.edge_name,
}
}
}
impl type_url::TypeUrl for Edge {
const TYPE_URL: &'static str = "graplsecurity.com/graplinc.grapl.api.graph.v1beta1.Edge";
}
impl serde_impl::ProtobufSerializable for Edge {
type ProtobufMessage = EdgeProto;
}
//
// MergedEdge
//
#[derive(Debug, PartialEq, Clone)]
pub struct MergedEdge {
pub from_uid: String,
pub from_node_key: String,
pub to_uid: String,
pub to_node_key: String,
pub edge_name: String,
}
impl From<MergedEdgeProto> for MergedEdge {
fn from(merged_edge_proto: MergedEdgeProto) -> Self {
MergedEdge {
from_uid: merged_edge_proto.from_uid,
from_node_key: merged_edge_proto.from_node_key,
to_uid: merged_edge_proto.to_uid,
to_node_key: merged_edge_proto.to_node_key,
edge_name: merged_edge_proto.edge_name,
}
}
}
impl From<MergedEdge> for MergedEdgeProto {
fn from(merged_edge: MergedEdge) -> Self {
MergedEdgeProto {
from_uid: merged_edge.from_uid,
from_node_key: merged_edge.from_node_key,
to_uid: merged_edge.to_uid,
to_node_key: merged_edge.to_node_key,
edge_name: merged_edge.edge_name,
}
}
}
impl type_url::TypeUrl for MergedEdge {
const TYPE_URL: &'static str = "graplsecurity.com/graplinc.grapl.api.graph.v1beta1.MergedEdge";
}
impl serde_impl::ProtobufSerializable for MergedEdge {
type ProtobufMessage = MergedEdgeProto;
}
//
// EdgeList
//
#[derive(Debug, PartialEq, Clone)]
pub struct EdgeList {
pub edges: Vec<Edge>,
}
impl From<EdgeListProto> for EdgeList {
fn from(edge_list_proto: EdgeListProto) -> Self {
let mut edges = Vec::with_capacity(edge_list_proto.edges.len());
for edge in edge_list_proto.edges {
edges.push(Edge::from(edge));
}
EdgeList { edges }
}
}
impl From<EdgeList> for EdgeListProto {
fn from(edge_list: EdgeList) -> Self {
let mut edges = Vec::with_capacity(edge_list.edges.len());
for edge in edge_list.edges {
edges.push(EdgeProto::from(edge));
}
EdgeListProto { edges }
}
}
impl type_url::TypeUrl for EdgeList {
const TYPE_URL: &'static str = "graplsecurity.com/graplinc.grapl.api.graph.v1beta1.EdgeList";
}
impl serde_impl::ProtobufSerializable for EdgeList {
type ProtobufMessage = EdgeListProto;
}
//
// MergedEdgeList
//
#[derive(Debug, PartialEq, Clone)]
pub struct MergedEdgeList {
pub edges: Vec<MergedEdge>,
}
impl From<MergedEdgeListProto> for MergedEdgeList {
fn from(merged_edge_list_proto: MergedEdgeListProto) -> Self {
let mut edges = Vec::with_capacity(merged_edge_list_proto.edges.len());
for edge in merged_edge_list_proto.edges {
edges.push(MergedEdge::from(edge));
}
MergedEdgeList { edges }
}
}
impl From<MergedEdgeList> for MergedEdgeListProto {
fn from(merged_edge_list: MergedEdgeList) -> Self {
let mut edges = Vec::with_capacity(merged_edge_list.edges.len());
for edge in merged_edge_list.edges {
edges.push(MergedEdgeProto::from(edge));
}
MergedEdgeListProto { edges }
}
}
impl type_url::TypeUrl for MergedEdgeList {
const TYPE_URL: &'static str =
"graplsecurity.com/graplinc.grapl.api.graph.v1beta1.MergedEdgeList";
}
impl serde_impl::ProtobufSerializable for MergedEdgeList {
type ProtobufMessage = MergedEdgeListProto;
}
//
// GraphDescription
//
#[derive(Debug, Default, PartialEq, Clone)]
pub struct GraphDescription {
pub nodes: HashMap<String, NodeDescription>,
pub edges: HashMap<String, EdgeList>,
}
impl GraphDescription {
pub fn new() -> Self {
Self {
nodes: Default::default(),
edges: Default::default(),
}
}
pub fn add_node(&mut self, node: impl Into<NodeDescription>) {
let node = node.into();
match self.nodes.get_mut(&node.node_key) {
Some(n) => n.merge(&node),
None => {
self.nodes.insert(node.clone_node_key(), node);
}
};
}
pub fn add_edge(
&mut self,
edge_name: impl Into<String>,
from_node_key: impl Into<String>,
to_node_key: impl Into<String>,
) {
let from_node_key = from_node_key.into();
let to_node_key = to_node_key.into();
let edge_name = edge_name.into();
assert_ne!(from_node_key, to_node_key);
let edge = Edge {
from_node_key: from_node_key.clone(),
to_node_key,
edge_name,
};
let edge_list: &mut Vec<Edge> = &mut self
.edges
.entry(from_node_key)
.or_insert_with(|| EdgeList {
edges: Vec::with_capacity(1),
})
.edges;
edge_list.push(edge);
}
pub fn merge(&mut self, other: &Self) {
for (node_key, other_node) in other.nodes.iter() {
match self.nodes.get_mut(node_key) {
Some(n) => n.merge(other_node),
None => {
self.nodes.insert(node_key.clone(), other_node.clone());
}
};
}
for edge_list in other.edges.values() {
for edge in edge_list.edges.iter() {
self.add_edge(
edge.edge_name.clone(),
edge.from_node_key.clone(),
edge.to_node_key.clone(),
);
}
}
}
pub fn is_empty(&self) -> bool {
self.nodes.is_empty() && self.edges.is_empty()
}
}
impl TryFrom<GraphDescriptionProto> for GraphDescription {
type Error = SerDeError;
fn try_from(graph_description_proto: GraphDescriptionProto) -> Result<Self, Self::Error> {
let mut nodes = HashMap::with_capacity(graph_description_proto.nodes.len());
for (key, node_description) in graph_description_proto.nodes {
nodes.insert(key, NodeDescription::try_from(node_description)?);
}
let mut edges = HashMap::with_capacity(graph_description_proto.edges.len());
for (key, edge_list) in graph_description_proto.edges {
edges.insert(key, EdgeList::from(edge_list));
}
Ok(GraphDescription { nodes, edges })
}
}
impl From<GraphDescription> for GraphDescriptionProto {
fn from(graph_description: GraphDescription) -> Self {
let mut nodes = HashMap::with_capacity(graph_description.nodes.len());
for (key, node_description) in graph_description.nodes {
nodes.insert(key, NodeDescriptionProto::from(node_description));
}
let mut edges = HashMap::with_capacity(graph_description.edges.len());
for (key, edge_list) in graph_description.edges {
edges.insert(key, EdgeListProto::from(edge_list));
}
GraphDescriptionProto { nodes, edges }
}
}
impl type_url::TypeUrl for GraphDescription {
const TYPE_URL: &'static str =
"graplsecurity.com/graplinc.grapl.api.graph.v1beta1.GraphDescription";
}
impl serde_impl::ProtobufSerializable for GraphDescription {
type ProtobufMessage = GraphDescriptionProto;
}
//
// IdentifiedGraph
//
#[derive(Debug, Default, PartialEq, Clone)]
pub struct IdentifiedGraph {
pub nodes: HashMap<String, IdentifiedNode>,
pub edges: HashMap<String, EdgeList>,
}
impl IdentifiedGraph {
pub fn new() -> Self {
Self {
nodes: Default::default(),
edges: Default::default(),
}
}
pub fn add_node(&mut self, node: IdentifiedNode) {
match self.nodes.get_mut(&node.node_key) {
Some(n) => n.merge(&node),
None => {
self.nodes.insert(node.clone_node_key(), node);
}
};
}
pub fn add_edge(
&mut self,
edge_name: impl Into<String>,
from_node_key: impl Into<String>,
to_node_key: impl Into<String>,
) {
let from_node_key = from_node_key.into();
let to_node_key = to_node_key.into();
assert_ne!(from_node_key, to_node_key);
let edge_name = edge_name.into();
let edge = Edge {
from_node_key: from_node_key.clone(),
to_node_key,
edge_name,
};
let edge_list: &mut Vec<Edge> = &mut self
.edges
.entry(from_node_key)
.or_insert_with(|| EdgeList {
edges: Vec::with_capacity(1),
})
.edges;
edge_list.push(edge);
}
pub fn merge(&mut self, other: &Self) {
for (node_key, other_node) in other.nodes.iter() {
match self.nodes.get_mut(node_key) {
Some(n) => n.merge(other_node),
None => {
self.nodes.insert(node_key.clone(), other_node.clone());
}
};
}
for edge_list in other.edges.values() {
for edge in edge_list.edges.iter() {
self.add_edge(
edge.edge_name.clone(),
edge.from_node_key.clone(),
edge.to_node_key.clone(),
);
}
}
}
pub fn is_empty(&self) -> bool {
self.nodes.is_empty() && self.edges.is_empty()
}
}
impl TryFrom<IdentifiedGraphProto> for IdentifiedGraph {
type Error = SerDeError;
fn try_from(identified_graph_proto: IdentifiedGraphProto) -> Result<Self, Self::Error> {
let mut nodes = HashMap::with_capacity(identified_graph_proto.nodes.len());
for (key, identified_node) in identified_graph_proto.nodes {
nodes.insert(key, IdentifiedNode::try_from(identified_node)?);
}
let mut edges = HashMap::with_capacity(identified_graph_proto.edges.len());
for (key, edge_list) in identified_graph_proto.edges {
edges.insert(key, EdgeList::from(edge_list));
}
Ok(IdentifiedGraph { nodes, edges })
}
}
impl From<IdentifiedGraph> for IdentifiedGraphProto {
fn from(identified_graph: IdentifiedGraph) -> Self {
let mut nodes = HashMap::with_capacity(identified_graph.nodes.len());
for (key, identified_node) in identified_graph.nodes {
nodes.insert(key, IdentifiedNodeProto::from(identified_node));
}
let mut edges = HashMap::with_capacity(identified_graph.edges.len());
for (key, edge_list) in identified_graph.edges {
edges.insert(key, EdgeListProto::from(edge_list));
}
IdentifiedGraphProto { nodes, edges }
}
}
impl type_url::TypeUrl for IdentifiedGraph {
const TYPE_URL: &'static str =
"graplsecurity.com/graplinc.grapl.api.graph.v1beta1.IdentifiedGraph";
}
impl serde_impl::ProtobufSerializable for IdentifiedGraph {
type ProtobufMessage = IdentifiedGraphProto;
}
//
// MergedGraph
//
#[derive(Debug, Default, PartialEq, Clone)]
pub struct MergedGraph {
pub nodes: HashMap<String, MergedNode>,
pub edges: HashMap<String, MergedEdgeList>,
}
impl MergedGraph {
pub fn new() -> Self {
Self {
nodes: Default::default(),
edges: Default::default(),
}
}
pub fn add_node(&mut self, node: MergedNode) {
match self.nodes.get_mut(&node.node_key) {
Some(n) => n.merge(&node),
None => {
self.nodes.insert(node.clone_node_key(), node);
}
};
}
pub fn add_merged_edge(&mut self, edge: MergedEdge) {
let from_node_key = edge.from_node_key.clone();
let edge_list: &mut Vec<MergedEdge> = &mut self
.edges
.entry(from_node_key)
.or_insert_with(|| MergedEdgeList {
edges: Vec::with_capacity(1),
})
.edges;
edge_list.push(edge);
}
pub fn add_edge(
&mut self,
edge_name: impl Into<String>,
from_node_key: impl Into<String>,
from_uid: impl Into<String>,
to_node_key: impl Into<String>,
to_uid: impl Into<String>,
) {
let edge_name = edge_name.into();
let from_node_key = from_node_key.into();
let from_uid = from_uid.into();
let to_node_key = to_node_key.into();
let to_uid = to_uid.into();
assert_ne!(from_node_key, to_node_key);
assert_ne!(from_uid, to_uid);
let edge = MergedEdge {
from_node_key: from_node_key.clone(),
from_uid,
to_node_key,
to_uid,
edge_name,
};
let edge_list: &mut Vec<MergedEdge> = &mut self
.edges
.entry(from_node_key)
.or_insert_with(|| MergedEdgeList {
edges: Vec::with_capacity(1),
})
.edges;
edge_list.push(edge);
}
pub fn merge(&mut self, other: &Self) {
for (node_key, other_node) in other.nodes.iter() {
match self.nodes.get_mut(node_key) {
Some(n) => n.merge(other_node),
None => {
self.nodes.insert(node_key.clone(), other_node.clone());
}
};
}
for edge_list in other.edges.values() {
for edge in edge_list.edges.iter() {
self.add_edge(
edge.edge_name.clone(),
edge.from_node_key.clone(),
edge.from_uid.clone(),
edge.to_node_key.clone(),
edge.to_uid.clone(),
);
}
}
}
pub fn is_empty(&self) -> bool {
self.nodes.is_empty() && self.edges.is_empty()
}
}
impl TryFrom<MergedGraphProto> for MergedGraph {
type Error = SerDeError;
fn try_from(merged_graph_proto: MergedGraphProto) -> Result<Self, Self::Error> {
let mut nodes = HashMap::with_capacity(merged_graph_proto.nodes.len());
for (key, merged_node) in merged_graph_proto.nodes {
nodes.insert(key, MergedNode::try_from(merged_node)?);
}
let mut edges = HashMap::with_capacity(merged_graph_proto.edges.len());
for (key, merged_edge_list) in merged_graph_proto.edges {
edges.insert(key, MergedEdgeList::from(merged_edge_list));
}
Ok(MergedGraph { nodes, edges })
}
}
impl From<MergedGraph> for MergedGraphProto {
fn from(merged_graph: MergedGraph) -> Self {
let mut nodes = HashMap::with_capacity(merged_graph.nodes.len());
for (key, merged_node) in merged_graph.nodes {
nodes.insert(key, MergedNodeProto::from(merged_node));
}
let mut edges = HashMap::with_capacity(merged_graph.edges.len());
for (key, merged_edge_list) in merged_graph.edges {
edges.insert(key, MergedEdgeListProto::from(merged_edge_list));
}
MergedGraphProto { nodes, edges }
}
}
impl type_url::TypeUrl for MergedGraph {
const TYPE_URL: &'static str = "graplsecurity.com/graplinc.grapl.api.graph.v1beta1.MergedGraph";
}
impl serde_impl::ProtobufSerializable for MergedGraph {
type ProtobufMessage = MergedGraphProto;
}
#[cfg(test)]
pub mod test {
// TODO: refactor these tests to use proptest instead of quickcheck
use std::{
collections::HashMap,
hash::Hasher,
};
use quickcheck::{
Arbitrary,
Gen,
};
use quickcheck_macros::quickcheck;
use tracing_subscriber::{
EnvFilter,
FmtSubscriber,
};
use super::*;
impl Arbitrary for IncrementOnlyIntProp {
fn arbitrary(g: &mut Gen) -> Self {
Self {
prop: i64::arbitrary(g),
}
}
}
impl Arbitrary for DecrementOnlyIntProp {
fn arbitrary(g: &mut Gen) -> Self {
Self {
prop: i64::arbitrary(g),
}
}
}
impl Arbitrary for ImmutableIntProp {
fn arbitrary(g: &mut Gen) -> Self {
Self {
prop: i64::arbitrary(g),
}
}
}
impl Arbitrary for IncrementOnlyUintProp {
fn arbitrary(g: &mut Gen) -> Self {
Self {
prop: u64::arbitrary(g),
}
}
}
impl Arbitrary for DecrementOnlyUintProp {
fn arbitrary(g: &mut Gen) -> Self {
Self {
prop: u64::arbitrary(g),
}
}
}
impl Arbitrary for ImmutableUintProp {
fn arbitrary(g: &mut Gen) -> Self {
Self {
prop: u64::arbitrary(g),
}
}
}
impl Arbitrary for ImmutableStrProp {
fn arbitrary(g: &mut Gen) -> Self {
Self {
prop: String::arbitrary(g),
}
}
}
impl Arbitrary for Property {
fn arbitrary(g: &mut Gen) -> Self {
let props = &[
Property::IncrementOnlyIntProp(IncrementOnlyIntProp::arbitrary(g)),
Property::DecrementOnlyIntProp(DecrementOnlyIntProp::arbitrary(g)),
Property::ImmutableIntProp(ImmutableIntProp::arbitrary(g)),
Property::IncrementOnlyUintProp(IncrementOnlyUintProp::arbitrary(g)),
Property::DecrementOnlyUintProp(DecrementOnlyUintProp::arbitrary(g)),
Property::ImmutableUintProp(ImmutableUintProp::arbitrary(g)),
Property::ImmutableStrProp(ImmutableStrProp::arbitrary(g)),
];
g.choose(props).unwrap().clone()
}
}
impl Arbitrary for NodeProperty {
fn arbitrary(g: &mut Gen) -> Self {
NodeProperty {
property: Property::arbitrary(g),
}
}
}
fn hash(bytes: &[impl AsRef<[u8]>]) -> u64 {
let mut hasher = std::collections::hash_map::DefaultHasher::new();
for _bytes in bytes {
hasher.write(_bytes.as_ref());
}
hasher.finish() as u64
}
fn choice<T: Clone>(bytes: impl AsRef<[u8]>, choices: &[T]) -> T {
let mut hasher = std::collections::hash_map::DefaultHasher::new();
hasher.write(bytes.as_ref());
let choice_index = (hasher.finish() as usize) % choices.len();
choices[choice_index].clone()
}
fn choose_property(node_key: &str, property_name: &str, g: &mut Gen) -> NodeProperty {
let s = format!("{}{}", node_key, property_name);
let props = &[
Property::IncrementOnlyIntProp(IncrementOnlyIntProp::arbitrary(g)),
Property::DecrementOnlyIntProp(DecrementOnlyIntProp::arbitrary(g)),
Property::IncrementOnlyUintProp(IncrementOnlyUintProp::arbitrary(g)),
Property::DecrementOnlyUintProp(DecrementOnlyUintProp::arbitrary(g)),
Property::ImmutableIntProp(ImmutableIntProp::from(
hash(&[node_key, property_name]) as i64
)),
Property::ImmutableUintProp(ImmutableUintProp::from(hash(&[node_key, property_name]))),
Property::ImmutableStrProp(ImmutableStrProp::from(s)),
];
let p: Property = choice(node_key, props);
p.into()
}
impl Arbitrary for IdentifiedNode {
fn arbitrary(g: &mut Gen) -> Self {
let node_keys = &[
"c413e25e-9c50-4faf-8e61-f8bfb0e0d18e".to_string(),
"0d5c9261-2b6e-4094-8de3-b349cb0aa310".to_string(),
"ed1f73df-f38d-43c0-87b0-5aff06e1f68b".to_string(),
"6328e956-117e-4f7f-8a5b-c56be1111f43".to_string(),
];
let node_key = g.choose(node_keys).unwrap().clone();
let node_types = &["Process", "File", "IpAddress"];
let node_type = choice(&node_key, node_types);
let mut properties = HashMap::new();
let property_names: Vec<String> = Vec::arbitrary(g);
for property_name in property_names {
let property = choose_property(&node_key, &property_name, g);
properties.insert(property_name.to_owned(), property);
}
IdentifiedNode {
node_key: node_key.to_owned(),
node_type: node_type.to_owned(),
properties,
}
}
}
fn init_test_env() {
let subscriber = FmtSubscriber::builder()
.with_env_filter(EnvFilter::from_default_env())
.finish();
let _ = tracing::subscriber::set_global_default(subscriber);
}
#[quickcheck]
fn test_merge_str(x: ImmutableStrProp, y: ImmutableStrProp) {
init_test_env();
let original = x;
let mut x = original.clone();
x.merge_property(&y);
assert_eq!(original, x);
}
#[quickcheck]
fn test_merge_immutable_int(mut x: ImmutableIntProp, y: ImmutableIntProp) {
init_test_env();
let original = x.clone();
x.merge_property(&y);
assert_eq!(x, original);
}
#[quickcheck]
fn test_merge_immutable_uint(mut x: ImmutableUintProp, y: ImmutableUintProp) {
init_test_env();
let original = x.clone();
x.merge_property(&y);
assert_eq!(x, original);
}
#[quickcheck]
fn test_merge_uint_max(mut x: IncrementOnlyUintProp, y: IncrementOnlyUintProp) {
init_test_env();
x.merge_property(&y);
assert_eq!(x.clone(), std::cmp::max(x, y));
}
#[quickcheck]
fn test_merge_int_max(mut x: IncrementOnlyIntProp, y: IncrementOnlyIntProp) {
init_test_env();
x.merge_property(&y);
assert_eq!(x.clone(), std::cmp::max(x, y));
}
#[quickcheck]
fn test_merge_uint_min(mut x: DecrementOnlyUintProp, y: DecrementOnlyUintProp) {
init_test_env();
x.merge_property(&y);
assert_eq!(x.clone(), std::cmp::min(x, y));
}
#[quickcheck]
fn test_merge_int_min(mut x: DecrementOnlyIntProp, y: DecrementOnlyIntProp) {
init_test_env();
x.merge_property(&y);
assert_eq!(x.clone(), std::cmp::min(x, y));
}
#[quickcheck]
fn test_merge_incr_uint_commutative(mut properties: Vec<IncrementOnlyUintProp>) {
init_test_env();
if properties.is_empty() {
return;
}
properties.sort_unstable();
let max_value = properties.iter().max().unwrap().to_owned();
let mut first_x = properties[0].clone();
for property in properties.iter() {
first_x.merge_property(property)
}
let properties: Vec<_> = properties.into_iter().rev().collect();
let mut first_y = properties[0].clone();
for property in properties.iter() {
first_y.merge_property(property)
}
assert_eq!(first_x, first_y);
assert_eq!(first_x, max_value);
}
#[quickcheck]
fn test_merge_identified_node(mut node_0: IdentifiedNode, node_1: IdentifiedNode) {
if node_0.node_key != node_1.node_key {
return;
}
// let original = node_0.clone();
node_0.merge(&node_1);
// for (_o_pred_name, o_pred_val) in original.iter() {
// let mut copy = o_pred_val.clone();
// }
}
}
| 29.507315 | 119 | 0.607315 |
500431f3b4db15d1537b7d4adf28125843bcc6dc | 19,875 | #[derive(Clone)]
/// Flags group `shared`.
pub struct Flags {
bytes: [u8; 8],
}
impl Flags {
/// Create flags shared settings group.
#[allow(unused_variables)]
pub fn new(builder: Builder) -> Self {
let bvec = builder.state_for("shared");
let mut shared = Self { bytes: [0; 8] };
debug_assert_eq!(bvec.len(), 8);
shared.bytes[0..8].copy_from_slice(&bvec);
shared
}
}
/// Values for `shared.regalloc`.
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub enum Regalloc {
/// `backtracking`.
Backtracking,
/// `backtracking_checked`.
BacktrackingChecked,
/// `experimental_linear_scan`.
ExperimentalLinearScan,
/// `experimental_linear_scan_checked`.
ExperimentalLinearScanChecked,
}
impl fmt::Display for Regalloc {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str(match *self {
Self::Backtracking => "backtracking",
Self::BacktrackingChecked => "backtracking_checked",
Self::ExperimentalLinearScan => "experimental_linear_scan",
Self::ExperimentalLinearScanChecked => "experimental_linear_scan_checked",
})
}
}
impl str::FromStr for Regalloc {
type Err = ();
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"backtracking" => Ok(Self::Backtracking),
"backtracking_checked" => Ok(Self::BacktrackingChecked),
"experimental_linear_scan" => Ok(Self::ExperimentalLinearScan),
"experimental_linear_scan_checked" => Ok(Self::ExperimentalLinearScanChecked),
_ => Err(()),
}
}
}
/// Values for `shared.opt_level`.
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub enum OptLevel {
/// `none`.
None,
/// `speed`.
Speed,
/// `speed_and_size`.
SpeedAndSize,
}
impl fmt::Display for OptLevel {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str(match *self {
Self::None => "none",
Self::Speed => "speed",
Self::SpeedAndSize => "speed_and_size",
})
}
}
impl str::FromStr for OptLevel {
type Err = ();
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"none" => Ok(Self::None),
"speed" => Ok(Self::Speed),
"speed_and_size" => Ok(Self::SpeedAndSize),
_ => Err(()),
}
}
}
/// Values for `shared.tls_model`.
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub enum TlsModel {
/// `none`.
None,
/// `elf_gd`.
ElfGd,
/// `macho`.
Macho,
/// `coff`.
Coff,
}
impl fmt::Display for TlsModel {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str(match *self {
Self::None => "none",
Self::ElfGd => "elf_gd",
Self::Macho => "macho",
Self::Coff => "coff",
})
}
}
impl str::FromStr for TlsModel {
type Err = ();
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"none" => Ok(Self::None),
"elf_gd" => Ok(Self::ElfGd),
"macho" => Ok(Self::Macho),
"coff" => Ok(Self::Coff),
_ => Err(()),
}
}
}
/// Values for `shared.libcall_call_conv`.
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub enum LibcallCallConv {
/// `isa_default`.
IsaDefault,
/// `fast`.
Fast,
/// `cold`.
Cold,
/// `system_v`.
SystemV,
/// `windows_fastcall`.
WindowsFastcall,
/// `baldrdash_system_v`.
BaldrdashSystemV,
/// `baldrdash_windows`.
BaldrdashWindows,
/// `probestack`.
Probestack,
}
impl fmt::Display for LibcallCallConv {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str(match *self {
Self::IsaDefault => "isa_default",
Self::Fast => "fast",
Self::Cold => "cold",
Self::SystemV => "system_v",
Self::WindowsFastcall => "windows_fastcall",
Self::BaldrdashSystemV => "baldrdash_system_v",
Self::BaldrdashWindows => "baldrdash_windows",
Self::Probestack => "probestack",
})
}
}
impl str::FromStr for LibcallCallConv {
type Err = ();
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"isa_default" => Ok(Self::IsaDefault),
"fast" => Ok(Self::Fast),
"cold" => Ok(Self::Cold),
"system_v" => Ok(Self::SystemV),
"windows_fastcall" => Ok(Self::WindowsFastcall),
"baldrdash_system_v" => Ok(Self::BaldrdashSystemV),
"baldrdash_windows" => Ok(Self::BaldrdashWindows),
"probestack" => Ok(Self::Probestack),
_ => Err(()),
}
}
}
/// User-defined settings.
#[allow(dead_code)]
impl Flags {
/// Get a view of the boolean predicates.
pub fn predicate_view(&self) -> crate::settings::PredicateView {
crate::settings::PredicateView::new(&self.bytes[6..])
}
/// Dynamic numbered predicate getter.
fn numbered_predicate(&self, p: usize) -> bool {
self.bytes[6 + p / 8] & (1 << (p % 8)) != 0
}
/// Register allocator to use with the MachInst backend.
///
/// This selects the register allocator as an option among those offered by the `regalloc.rs`
/// crate. Please report register allocation bugs to the maintainers of this crate whenever
/// possible.
///
/// Note: this only applies to target that use the MachInst backend. As of 2020-04-17, this
/// means the x86_64 backend doesn't use this yet.
///
/// Possible values:
///
/// - `backtracking` is a greedy, backtracking register allocator as implemented in
/// Spidermonkey's optimizing tier IonMonkey. It may take more time to allocate registers, but
/// it should generate better code in general, resulting in better throughput of generated
/// code.
/// - `backtracking_checked` is the backtracking allocator with additional self checks that may
/// take some time to run, and thus these checks are disabled by default.
/// - `experimental_linear_scan` is an experimental linear scan allocator. It may take less
/// time to allocate registers, but generated code's quality may be inferior. As of
/// 2020-04-17, it is still experimental and it should not be used in production settings.
/// - `experimental_linear_scan_checked` is the linear scan allocator with additional self
/// checks that may take some time to run, and thus these checks are disabled by default.
pub fn regalloc(&self) -> Regalloc {
match self.bytes[0] {
0 => {
Regalloc::Backtracking
}
1 => {
Regalloc::BacktrackingChecked
}
2 => {
Regalloc::ExperimentalLinearScan
}
3 => {
Regalloc::ExperimentalLinearScanChecked
}
_ => {
panic!("Invalid enum value")
}
}
}
/// Optimization level:
///
/// - none: Minimise compile time by disabling most optimizations.
/// - speed: Generate the fastest possible code
/// - speed_and_size: like "speed", but also perform transformations
/// aimed at reducing code size.
pub fn opt_level(&self) -> OptLevel {
match self.bytes[1] {
0 => {
OptLevel::None
}
1 => {
OptLevel::Speed
}
2 => {
OptLevel::SpeedAndSize
}
_ => {
panic!("Invalid enum value")
}
}
}
/// Defines the model used to perform TLS accesses.
pub fn tls_model(&self) -> TlsModel {
match self.bytes[2] {
3 => {
TlsModel::Coff
}
1 => {
TlsModel::ElfGd
}
2 => {
TlsModel::Macho
}
0 => {
TlsModel::None
}
_ => {
panic!("Invalid enum value")
}
}
}
/// Defines the calling convention to use for LibCalls call expansion,
/// since it may be different from the ISA default calling convention.
///
/// The default value is to use the same calling convention as the ISA
/// default calling convention.
///
/// This list should be kept in sync with the list of calling
/// conventions available in isa/call_conv.rs.
pub fn libcall_call_conv(&self) -> LibcallCallConv {
match self.bytes[3] {
5 => {
LibcallCallConv::BaldrdashSystemV
}
6 => {
LibcallCallConv::BaldrdashWindows
}
2 => {
LibcallCallConv::Cold
}
1 => {
LibcallCallConv::Fast
}
0 => {
LibcallCallConv::IsaDefault
}
7 => {
LibcallCallConv::Probestack
}
3 => {
LibcallCallConv::SystemV
}
4 => {
LibcallCallConv::WindowsFastcall
}
_ => {
panic!("Invalid enum value")
}
}
}
/// Number of pointer-sized words pushed by the baldrdash prologue.
///
/// Functions with the `baldrdash` calling convention don't generate their
/// own prologue and epilogue. They depend on externally generated code
/// that pushes a fixed number of words in the prologue and restores them
/// in the epilogue.
///
/// This setting configures the number of pointer-sized words pushed on the
/// stack when the Cranelift-generated code is entered. This includes the
/// pushed return address on x86.
pub fn baldrdash_prologue_words(&self) -> u8 {
self.bytes[4]
}
/// The log2 of the size of the stack guard region.
///
/// Stack frames larger than this size will have stack overflow checked
/// by calling the probestack function.
///
/// The default is 12, which translates to a size of 4096.
pub fn probestack_size_log2(&self) -> u8 {
self.bytes[5]
}
/// Run the Cranelift IR verifier at strategic times during compilation.
///
/// This makes compilation slower but catches many bugs. The verifier is always enabled by
/// default, which is useful during development.
pub fn enable_verifier(&self) -> bool {
self.numbered_predicate(0)
}
/// Enable Position-Independent Code generation
pub fn is_pic(&self) -> bool {
self.numbered_predicate(1)
}
/// Use colocated libcalls.
///
/// Generate code that assumes that libcalls can be declared "colocated",
/// meaning they will be defined along with the current function, such that
/// they can use more efficient addressing.
pub fn use_colocated_libcalls(&self) -> bool {
self.numbered_predicate(2)
}
/// Generate explicit checks around native division instructions to avoid
/// their trapping.
///
/// This is primarily used by SpiderMonkey which doesn't install a signal
/// handler for SIGFPE, but expects a SIGILL trap for division by zero.
///
/// On ISAs like ARM where the native division instructions don't trap,
/// this setting has no effect - explicit checks are always inserted.
pub fn avoid_div_traps(&self) -> bool {
self.numbered_predicate(3)
}
/// Enable the use of floating-point instructions
///
/// Disabling use of floating-point instructions is not yet implemented.
pub fn enable_float(&self) -> bool {
self.numbered_predicate(4)
}
/// Enable NaN canonicalization
///
/// This replaces NaNs with a single canonical value, for users requiring
/// entirely deterministic WebAssembly computation. This is not required
/// by the WebAssembly spec, so it is not enabled by default.
pub fn enable_nan_canonicalization(&self) -> bool {
self.numbered_predicate(5)
}
/// Enable the use of the pinned register.
///
/// This register is excluded from register allocation, and is completely under the control of
/// the end-user. It is possible to read it via the get_pinned_reg instruction, and to set it
/// with the set_pinned_reg instruction.
pub fn enable_pinned_reg(&self) -> bool {
self.numbered_predicate(6)
}
/// Use the pinned register as the heap base.
///
/// Enabling this requires the enable_pinned_reg setting to be set to true. It enables a custom
/// legalization of the `heap_addr` instruction so it will use the pinned register as the heap
/// base, instead of fetching it from a global value.
///
/// Warning! Enabling this means that the pinned register *must* be maintained to contain the
/// heap base address at all times, during the lifetime of a function. Using the pinned
/// register for other purposes when this is set is very likely to cause crashes.
pub fn use_pinned_reg_as_heap_base(&self) -> bool {
self.numbered_predicate(7)
}
/// Enable the use of SIMD instructions.
pub fn enable_simd(&self) -> bool {
self.numbered_predicate(8)
}
/// Enable the use of atomic instructions
pub fn enable_atomics(&self) -> bool {
self.numbered_predicate(9)
}
/// Enable safepoint instruction insertions.
///
/// This will allow the emit_stackmaps() function to insert the safepoint
/// instruction on top of calls and interrupt traps in order to display the
/// live reference values at that point in the program.
pub fn enable_safepoints(&self) -> bool {
self.numbered_predicate(10)
}
/// Emit not-yet-relocated function addresses as all-ones bit patterns.
pub fn emit_all_ones_funcaddrs(&self) -> bool {
self.numbered_predicate(11)
}
/// Enable the use of stack probes, for calling conventions which support this
/// functionality.
pub fn enable_probestack(&self) -> bool {
self.numbered_predicate(12)
}
/// Set this to true of the stack probe function modifies the stack pointer
/// itself.
pub fn probestack_func_adjusts_sp(&self) -> bool {
self.numbered_predicate(13)
}
/// Enable the use of jump tables in generated machine code.
pub fn enable_jump_tables(&self) -> bool {
self.numbered_predicate(14)
}
/// Enable Spectre mitigation on heap bounds checks.
///
/// This is a no-op for any heap that needs no bounds checks; e.g.,
/// if the limit is static and the guard region is large enough that
/// the index cannot reach past it.
///
/// This option is enabled by default because it is highly
/// recommended for secure sandboxing. The embedder should consider
/// the security implications carefully before disabling this option.
pub fn enable_heap_access_spectre_mitigation(&self) -> bool {
self.numbered_predicate(15)
}
}
static DESCRIPTORS: [detail::Descriptor; 22] = [
detail::Descriptor {
name: "regalloc",
offset: 0,
detail: detail::Detail::Enum { last: 3, enumerators: 0 },
},
detail::Descriptor {
name: "opt_level",
offset: 1,
detail: detail::Detail::Enum { last: 2, enumerators: 4 },
},
detail::Descriptor {
name: "tls_model",
offset: 2,
detail: detail::Detail::Enum { last: 3, enumerators: 7 },
},
detail::Descriptor {
name: "libcall_call_conv",
offset: 3,
detail: detail::Detail::Enum { last: 7, enumerators: 11 },
},
detail::Descriptor {
name: "baldrdash_prologue_words",
offset: 4,
detail: detail::Detail::Num,
},
detail::Descriptor {
name: "probestack_size_log2",
offset: 5,
detail: detail::Detail::Num,
},
detail::Descriptor {
name: "enable_verifier",
offset: 6,
detail: detail::Detail::Bool { bit: 0 },
},
detail::Descriptor {
name: "is_pic",
offset: 6,
detail: detail::Detail::Bool { bit: 1 },
},
detail::Descriptor {
name: "use_colocated_libcalls",
offset: 6,
detail: detail::Detail::Bool { bit: 2 },
},
detail::Descriptor {
name: "avoid_div_traps",
offset: 6,
detail: detail::Detail::Bool { bit: 3 },
},
detail::Descriptor {
name: "enable_float",
offset: 6,
detail: detail::Detail::Bool { bit: 4 },
},
detail::Descriptor {
name: "enable_nan_canonicalization",
offset: 6,
detail: detail::Detail::Bool { bit: 5 },
},
detail::Descriptor {
name: "enable_pinned_reg",
offset: 6,
detail: detail::Detail::Bool { bit: 6 },
},
detail::Descriptor {
name: "use_pinned_reg_as_heap_base",
offset: 6,
detail: detail::Detail::Bool { bit: 7 },
},
detail::Descriptor {
name: "enable_simd",
offset: 7,
detail: detail::Detail::Bool { bit: 0 },
},
detail::Descriptor {
name: "enable_atomics",
offset: 7,
detail: detail::Detail::Bool { bit: 1 },
},
detail::Descriptor {
name: "enable_safepoints",
offset: 7,
detail: detail::Detail::Bool { bit: 2 },
},
detail::Descriptor {
name: "emit_all_ones_funcaddrs",
offset: 7,
detail: detail::Detail::Bool { bit: 3 },
},
detail::Descriptor {
name: "enable_probestack",
offset: 7,
detail: detail::Detail::Bool { bit: 4 },
},
detail::Descriptor {
name: "probestack_func_adjusts_sp",
offset: 7,
detail: detail::Detail::Bool { bit: 5 },
},
detail::Descriptor {
name: "enable_jump_tables",
offset: 7,
detail: detail::Detail::Bool { bit: 6 },
},
detail::Descriptor {
name: "enable_heap_access_spectre_mitigation",
offset: 7,
detail: detail::Detail::Bool { bit: 7 },
},
];
static ENUMERATORS: [&str; 19] = [
"backtracking",
"backtracking_checked",
"experimental_linear_scan",
"experimental_linear_scan_checked",
"none",
"speed",
"speed_and_size",
"none",
"elf_gd",
"macho",
"coff",
"isa_default",
"fast",
"cold",
"system_v",
"windows_fastcall",
"baldrdash_system_v",
"baldrdash_windows",
"probestack",
];
static HASH_TABLE: [u16; 32] = [
0xffff,
18,
0xffff,
2,
11,
6,
15,
13,
14,
0,
19,
21,
5,
0xffff,
16,
0xffff,
12,
0xffff,
0xffff,
9,
17,
8,
20,
3,
0xffff,
0xffff,
0xffff,
0xffff,
1,
4,
10,
7,
];
static PRESETS: [(u8, u8); 0] = [
];
static TEMPLATE: detail::Template = detail::Template {
name: "shared",
descriptors: &DESCRIPTORS,
enumerators: &ENUMERATORS,
hash_table: &HASH_TABLE,
defaults: &[0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x11, 0xd2],
presets: &PRESETS,
};
/// Create a `settings::Builder` for the shared settings group.
pub fn builder() -> Builder {
Builder::new(&TEMPLATE)
}
impl fmt::Display for Flags {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(f, "[shared]")?;
for d in &DESCRIPTORS {
if !d.detail.is_preset() {
write!(f, "{} = ", d.name)?;
TEMPLATE.format_toml_value(d.detail, self.bytes[d.offset as usize], f)?;
writeln!(f)?;
}
}
Ok(())
}
}
| 31.8 | 99 | 0.575547 |
7a341a22d7b5bdf9d6e79e6b8bfb1c7ef233e3c1 | 25,714 | // Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::fs::{File, OpenOptions};
use std::io::{Read, Write};
#[cfg(unix)]
use std::os::unix::fs::PermissionsExt;
use std::sync::Arc;
use itertools::Itertools;
use jujutsu_lib::backend::{Conflict, ConflictPart, TreeValue};
use jujutsu_lib::op_store::WorkspaceId;
use jujutsu_lib::repo::ReadonlyRepo;
use jujutsu_lib::repo_path::{RepoPath, RepoPathComponent};
use jujutsu_lib::settings::UserSettings;
use jujutsu_lib::testutils;
use jujutsu_lib::tree_builder::TreeBuilder;
use jujutsu_lib::working_copy::WorkingCopy;
use test_case::test_case;
#[test_case(false ; "local backend")]
#[test_case(true ; "git backend")]
fn test_root(use_git: bool) {
// Test that the working copy is clean and empty after init.
let settings = testutils::user_settings();
let mut test_workspace = testutils::init_workspace(&settings, use_git);
let repo = &test_workspace.repo;
let wc = test_workspace.workspace.working_copy_mut();
let mut locked_wc = wc.start_mutation();
let new_tree_id = locked_wc.write_tree();
locked_wc.discard();
let checkout_id = repo.view().get_checkout(&WorkspaceId::default()).unwrap();
let checkout_commit = repo.store().get_commit(checkout_id).unwrap();
assert_eq!(&new_tree_id, checkout_commit.tree().id());
assert_eq!(&new_tree_id, repo.store().empty_tree_id());
}
#[test_case(false ; "local backend")]
#[test_case(true ; "git backend")]
fn test_checkout_file_transitions(use_git: bool) {
// Tests switching between commits where a certain path is of one type in one
// commit and another type in the other. Includes a "missing" type, so we cover
// additions and removals as well.
let settings = testutils::user_settings();
let mut test_workspace = testutils::init_workspace(&settings, use_git);
let repo = &test_workspace.repo;
let store = repo.store().clone();
let workspace_root = test_workspace.workspace.workspace_root().clone();
#[derive(Debug, Clone, Copy)]
enum Kind {
Missing,
Normal,
Executable,
Conflict,
#[cfg_attr(windows, allow(dead_code))]
Symlink,
Tree,
GitSubmodule,
}
fn write_path(
settings: &UserSettings,
repo: &Arc<ReadonlyRepo>,
tree_builder: &mut TreeBuilder,
kind: Kind,
path: &str,
) {
let store = repo.store();
let value = match kind {
Kind::Missing => {
return;
}
Kind::Normal => {
let id = testutils::write_file(
store,
&RepoPath::from_internal_string(path),
"normal file contents",
);
TreeValue::Normal {
id,
executable: false,
}
}
Kind::Executable => {
let id = testutils::write_file(
store,
&RepoPath::from_internal_string(path),
"executable file contents",
);
TreeValue::Normal {
id,
executable: true,
}
}
Kind::Conflict => {
let base_file_id = testutils::write_file(
store,
&RepoPath::from_internal_string(path),
"base file contents",
);
let left_file_id = testutils::write_file(
store,
&RepoPath::from_internal_string(path),
"left file contents",
);
let right_file_id = testutils::write_file(
store,
&RepoPath::from_internal_string(path),
"right file contents",
);
let conflict = Conflict {
removes: vec![ConflictPart {
value: TreeValue::Normal {
id: base_file_id,
executable: false,
},
}],
adds: vec![
ConflictPart {
value: TreeValue::Normal {
id: left_file_id,
executable: false,
},
},
ConflictPart {
value: TreeValue::Normal {
id: right_file_id,
executable: false,
},
},
],
};
let conflict_id = store.write_conflict(&conflict).unwrap();
TreeValue::Conflict(conflict_id)
}
Kind::Symlink => {
let id = store
.write_symlink(&RepoPath::from_internal_string(path), "target")
.unwrap();
TreeValue::Symlink(id)
}
Kind::Tree => {
let mut sub_tree_builder = store.tree_builder(store.empty_tree_id().clone());
let file_path = path.to_owned() + "/file";
write_path(
settings,
repo,
&mut sub_tree_builder,
Kind::Normal,
&file_path,
);
let id = sub_tree_builder.write_tree();
TreeValue::Tree(id)
}
Kind::GitSubmodule => {
let mut tx = repo.start_transaction("test");
let id = testutils::create_random_commit(settings, repo)
.write_to_repo(tx.mut_repo())
.id()
.clone();
tx.commit();
TreeValue::GitSubmodule(id)
}
};
tree_builder.set(RepoPath::from_internal_string(path), value);
}
let mut kinds = vec![
Kind::Missing,
Kind::Normal,
Kind::Executable,
Kind::Conflict,
Kind::Tree,
];
#[cfg(unix)]
kinds.push(Kind::Symlink);
if use_git {
kinds.push(Kind::GitSubmodule);
}
let mut left_tree_builder = store.tree_builder(store.empty_tree_id().clone());
let mut right_tree_builder = store.tree_builder(store.empty_tree_id().clone());
let mut files = vec![];
for left_kind in &kinds {
for right_kind in &kinds {
let path = format!("{:?}_{:?}", left_kind, right_kind);
write_path(&settings, repo, &mut left_tree_builder, *left_kind, &path);
write_path(&settings, repo, &mut right_tree_builder, *right_kind, &path);
files.push((*left_kind, *right_kind, path));
}
}
let left_tree_id = left_tree_builder.write_tree();
let right_tree_id = right_tree_builder.write_tree();
let left_tree = store.get_tree(&RepoPath::root(), &left_tree_id).unwrap();
let right_tree = store.get_tree(&RepoPath::root(), &right_tree_id).unwrap();
let wc = test_workspace.workspace.working_copy_mut();
wc.check_out(repo.op_id().clone(), None, &left_tree)
.unwrap();
wc.check_out(repo.op_id().clone(), None, &right_tree)
.unwrap();
// Check that the working copy is clean.
let mut locked_wc = wc.start_mutation();
let new_tree_id = locked_wc.write_tree();
locked_wc.discard();
assert_eq!(new_tree_id, right_tree_id);
for (_left_kind, right_kind, path) in &files {
let wc_path = workspace_root.join(path);
let maybe_metadata = wc_path.symlink_metadata();
match right_kind {
Kind::Missing => {
assert!(maybe_metadata.is_err(), "{:?} should not exist", path);
}
Kind::Normal => {
assert!(maybe_metadata.is_ok(), "{:?} should exist", path);
let metadata = maybe_metadata.unwrap();
assert!(metadata.is_file(), "{:?} should be a file", path);
#[cfg(unix)]
assert_eq!(
metadata.permissions().mode() & 0o111,
0,
"{:?} should not be executable",
path
);
}
Kind::Executable => {
assert!(maybe_metadata.is_ok(), "{:?} should exist", path);
let metadata = maybe_metadata.unwrap();
assert!(metadata.is_file(), "{:?} should be a file", path);
#[cfg(unix)]
assert_ne!(
metadata.permissions().mode() & 0o111,
0,
"{:?} should be executable",
path
);
}
Kind::Conflict => {
assert!(maybe_metadata.is_ok(), "{:?} should exist", path);
let metadata = maybe_metadata.unwrap();
assert!(metadata.is_file(), "{:?} should be a file", path);
#[cfg(unix)]
assert_eq!(
metadata.permissions().mode() & 0o111,
0,
"{:?} should not be executable",
path
);
}
Kind::Symlink => {
assert!(maybe_metadata.is_ok(), "{:?} should exist", path);
let metadata = maybe_metadata.unwrap();
assert!(
metadata.file_type().is_symlink(),
"{:?} should be a symlink",
path
);
}
Kind::Tree => {
assert!(maybe_metadata.is_ok(), "{:?} should exist", path);
let metadata = maybe_metadata.unwrap();
assert!(metadata.is_dir(), "{:?} should be a directory", path);
}
Kind::GitSubmodule => {
// Not supported for now
assert!(maybe_metadata.is_err(), "{:?} should not exist", path);
}
};
}
}
#[test]
fn test_reset() {
let settings = testutils::user_settings();
let mut test_workspace = testutils::init_workspace(&settings, false);
let repo = &test_workspace.repo;
let workspace_root = test_workspace.workspace.workspace_root().clone();
let ignored_path = RepoPath::from_internal_string("ignored");
let gitignore_path = RepoPath::from_internal_string(".gitignore");
let tree_without_file = testutils::create_tree(repo, &[(&gitignore_path, "ignored\n")]);
let tree_with_file = testutils::create_tree(
repo,
&[(&gitignore_path, "ignored\n"), (&ignored_path, "code")],
);
let wc = test_workspace.workspace.working_copy_mut();
wc.check_out(repo.op_id().clone(), None, &tree_with_file)
.unwrap();
// Test the setup: the file should exist on disk and in the tree state.
assert!(ignored_path.to_fs_path(&workspace_root).is_file());
assert!(wc.file_states().contains_key(&ignored_path));
// After we reset to the commit without the file, it should still exist on disk,
// but it should not be in the tree state, and it should not get added when we
// commit the working copy (because it's ignored).
let mut locked_wc = wc.start_mutation();
locked_wc.reset(&tree_without_file).unwrap();
locked_wc.finish(repo.op_id().clone());
assert!(ignored_path.to_fs_path(&workspace_root).is_file());
assert!(!wc.file_states().contains_key(&ignored_path));
let mut locked_wc = wc.start_mutation();
let new_tree_id = locked_wc.write_tree();
assert_eq!(new_tree_id, *tree_without_file.id());
locked_wc.discard();
// After we reset to the commit without the file, it should still exist on disk,
// but it should not be in the tree state, and it should not get added when we
// commit the working copy (because it's ignored).
let mut locked_wc = wc.start_mutation();
locked_wc.reset(&tree_without_file).unwrap();
locked_wc.finish(repo.op_id().clone());
assert!(ignored_path.to_fs_path(&workspace_root).is_file());
assert!(!wc.file_states().contains_key(&ignored_path));
let mut locked_wc = wc.start_mutation();
let new_tree_id = locked_wc.write_tree();
assert_eq!(new_tree_id, *tree_without_file.id());
locked_wc.discard();
// Now test the opposite direction: resetting to a commit where the file is
// tracked. The file should become tracked (even though it's ignored).
let mut locked_wc = wc.start_mutation();
locked_wc.reset(&tree_with_file).unwrap();
locked_wc.finish(repo.op_id().clone());
assert!(ignored_path.to_fs_path(&workspace_root).is_file());
assert!(wc.file_states().contains_key(&ignored_path));
let mut locked_wc = wc.start_mutation();
let new_tree_id = locked_wc.write_tree();
assert_eq!(new_tree_id, *tree_with_file.id());
locked_wc.discard();
}
#[test]
fn test_checkout_discard() {
// Start a mutation, do a checkout, and then discard the mutation. The working
// copy files should remain changed, but the state files should not be
// written.
let settings = testutils::user_settings();
let mut test_workspace = testutils::init_workspace(&settings, false);
let repo = test_workspace.repo.clone();
let workspace_root = test_workspace.workspace.workspace_root().clone();
let file1_path = RepoPath::from_internal_string("file1");
let file2_path = RepoPath::from_internal_string("file2");
let store = repo.store();
let tree1 = testutils::create_tree(&repo, &[(&file1_path, "contents")]);
let tree2 = testutils::create_tree(&repo, &[(&file2_path, "contents")]);
let wc = test_workspace.workspace.working_copy_mut();
let state_path = wc.state_path().to_path_buf();
wc.check_out(repo.op_id().clone(), None, &tree1).unwrap();
// Test the setup: the file should exist on disk and in the tree state.
assert!(file1_path.to_fs_path(&workspace_root).is_file());
assert!(wc.file_states().contains_key(&file1_path));
// Start a checkout
let mut locked_wc = wc.start_mutation();
locked_wc.check_out(&tree2).unwrap();
// The change should be reflected in the working copy but not saved
assert!(!file1_path.to_fs_path(&workspace_root).is_file());
assert!(file2_path.to_fs_path(&workspace_root).is_file());
let reloaded_wc = WorkingCopy::load(store.clone(), workspace_root.clone(), state_path.clone());
assert!(reloaded_wc.file_states().contains_key(&file1_path));
assert!(!reloaded_wc.file_states().contains_key(&file2_path));
locked_wc.discard();
// The change should remain in the working copy, but not in memory and not saved
assert!(wc.file_states().contains_key(&file1_path));
assert!(!wc.file_states().contains_key(&file2_path));
assert!(!file1_path.to_fs_path(&workspace_root).is_file());
assert!(file2_path.to_fs_path(&workspace_root).is_file());
let reloaded_wc = WorkingCopy::load(store.clone(), workspace_root, state_path);
assert!(reloaded_wc.file_states().contains_key(&file1_path));
assert!(!reloaded_wc.file_states().contains_key(&file2_path));
}
#[test_case(false ; "local backend")]
#[test_case(true ; "git backend")]
fn test_commit_racy_timestamps(use_git: bool) {
// Tests that file modifications are detected even if they happen the same
// millisecond as the updated working copy state.
let _home_dir = testutils::new_user_home();
let settings = testutils::user_settings();
let mut test_workspace = testutils::init_workspace(&settings, use_git);
let repo = &test_workspace.repo;
let workspace_root = test_workspace.workspace.workspace_root().clone();
let file_path = workspace_root.join("file");
let mut previous_tree_id = repo.store().empty_tree_id().clone();
let wc = test_workspace.workspace.working_copy_mut();
for i in 0..100 {
{
let mut file = OpenOptions::new()
.create(true)
.write(true)
.open(&file_path)
.unwrap();
file.write_all(format!("contents {}", i).as_bytes())
.unwrap();
}
let mut locked_wc = wc.start_mutation();
let new_tree_id = locked_wc.write_tree();
locked_wc.discard();
assert_ne!(new_tree_id, previous_tree_id);
previous_tree_id = new_tree_id;
}
}
#[test_case(false ; "local backend")]
#[test_case(true ; "git backend")]
fn test_gitignores(use_git: bool) {
// Tests that .gitignore files are respected.
let _home_dir = testutils::new_user_home();
let settings = testutils::user_settings();
let mut test_workspace = testutils::init_workspace(&settings, use_git);
let repo = &test_workspace.repo;
let workspace_root = test_workspace.workspace.workspace_root().clone();
let gitignore_path = RepoPath::from_internal_string(".gitignore");
let added_path = RepoPath::from_internal_string("added");
let modified_path = RepoPath::from_internal_string("modified");
let removed_path = RepoPath::from_internal_string("removed");
let ignored_path = RepoPath::from_internal_string("ignored");
let subdir_modified_path = RepoPath::from_internal_string("dir/modified");
let subdir_ignored_path = RepoPath::from_internal_string("dir/ignored");
testutils::write_working_copy_file(&workspace_root, &gitignore_path, "ignored\n");
testutils::write_working_copy_file(&workspace_root, &modified_path, "1");
testutils::write_working_copy_file(&workspace_root, &removed_path, "1");
std::fs::create_dir(workspace_root.join("dir")).unwrap();
testutils::write_working_copy_file(&workspace_root, &subdir_modified_path, "1");
let wc = test_workspace.workspace.working_copy_mut();
let mut locked_wc = wc.start_mutation();
let new_tree_id1 = locked_wc.write_tree();
locked_wc.finish(repo.op_id().clone());
let tree1 = repo
.store()
.get_tree(&RepoPath::root(), &new_tree_id1)
.unwrap();
let files1 = tree1.entries().map(|(name, _value)| name).collect_vec();
assert_eq!(
files1,
vec![
gitignore_path.clone(),
subdir_modified_path.clone(),
modified_path.clone(),
removed_path.clone(),
]
);
testutils::write_working_copy_file(
&workspace_root,
&gitignore_path,
"ignored\nmodified\nremoved\n",
);
testutils::write_working_copy_file(&workspace_root, &added_path, "2");
testutils::write_working_copy_file(&workspace_root, &modified_path, "2");
std::fs::remove_file(removed_path.to_fs_path(&workspace_root)).unwrap();
testutils::write_working_copy_file(&workspace_root, &ignored_path, "2");
testutils::write_working_copy_file(&workspace_root, &subdir_modified_path, "2");
testutils::write_working_copy_file(&workspace_root, &subdir_ignored_path, "2");
let mut locked_wc = wc.start_mutation();
let new_tree_id2 = locked_wc.write_tree();
locked_wc.discard();
let tree2 = repo
.store()
.get_tree(&RepoPath::root(), &new_tree_id2)
.unwrap();
let files2 = tree2.entries().map(|(name, _value)| name).collect_vec();
assert_eq!(
files2,
vec![
gitignore_path,
added_path,
subdir_modified_path,
modified_path,
]
);
}
#[test_case(false ; "local backend")]
#[test_case(true ; "git backend")]
fn test_gitignores_checkout_overwrites_ignored(use_git: bool) {
// Tests that a .gitignore'd file gets overwritten if check out a commit where
// the file is tracked.
let _home_dir = testutils::new_user_home();
let settings = testutils::user_settings();
let mut test_workspace = testutils::init_workspace(&settings, use_git);
let repo = &test_workspace.repo;
let workspace_root = test_workspace.workspace.workspace_root().clone();
// Write an ignored file called "modified" to disk
let gitignore_path = RepoPath::from_internal_string(".gitignore");
testutils::write_working_copy_file(&workspace_root, &gitignore_path, "modified\n");
let modified_path = RepoPath::from_internal_string("modified");
testutils::write_working_copy_file(&workspace_root, &modified_path, "garbage");
// Create a tree that adds the same file but with different contents
let mut tree_builder = repo
.store()
.tree_builder(repo.store().empty_tree_id().clone());
testutils::write_normal_file(&mut tree_builder, &modified_path, "contents");
let tree_id = tree_builder.write_tree();
let tree = repo.store().get_tree(&RepoPath::root(), &tree_id).unwrap();
// Now check out the tree that adds the file "modified" with contents
// "contents". The exiting contents ("garbage") should be replaced in the
// working copy.
let wc = test_workspace.workspace.working_copy_mut();
wc.check_out(repo.op_id().clone(), None, &tree).unwrap();
// Check that the new contents are in the working copy
let path = workspace_root.join("modified");
assert!(path.is_file());
let mut file = File::open(path).unwrap();
let mut buf = Vec::new();
file.read_to_end(&mut buf).unwrap();
assert_eq!(buf, b"contents");
// Check that the file is in the tree created by committing the working copy
let mut locked_wc = wc.start_mutation();
let new_tree_id = locked_wc.write_tree();
locked_wc.discard();
let new_tree = repo
.store()
.get_tree(&RepoPath::root(), &new_tree_id)
.unwrap();
assert!(new_tree
.entry(&RepoPathComponent::from("modified"))
.is_some());
}
#[test_case(false ; "local backend")]
#[test_case(true ; "git backend")]
fn test_gitignores_ignored_directory_already_tracked(use_git: bool) {
// Tests that a .gitignore'd directory that already has a tracked file in it
// does not get removed when committing the working directory.
let _home_dir = testutils::new_user_home();
let settings = testutils::user_settings();
let mut test_workspace = testutils::init_workspace(&settings, use_git);
let repo = &test_workspace.repo;
// Add a .gitignore file saying to ignore the directory "ignored/"
let gitignore_path = RepoPath::from_internal_string(".gitignore");
testutils::write_working_copy_file(
test_workspace.workspace.workspace_root(),
&gitignore_path,
"/ignored/\n",
);
let file_path = RepoPath::from_internal_string("ignored/file");
// Create a tree that adds a file in the ignored directory
let mut tree_builder = repo
.store()
.tree_builder(repo.store().empty_tree_id().clone());
testutils::write_normal_file(&mut tree_builder, &file_path, "contents");
let tree_id = tree_builder.write_tree();
let tree = repo.store().get_tree(&RepoPath::root(), &tree_id).unwrap();
// Check out the tree with the file in ignored/
let wc = test_workspace.workspace.working_copy_mut();
wc.check_out(repo.op_id().clone(), None, &tree).unwrap();
// Check that the file is still in the tree created by committing the working
// copy (that it didn't get removed because the directory is ignored)
let mut locked_wc = wc.start_mutation();
let new_tree_id = locked_wc.write_tree();
locked_wc.discard();
let new_tree = repo
.store()
.get_tree(&RepoPath::root(), &new_tree_id)
.unwrap();
assert!(new_tree.path_value(&file_path).is_some());
}
#[test_case(false ; "local backend")]
#[test_case(true ; "git backend")]
fn test_dotgit_ignored(use_git: bool) {
// Tests that .git directories and files are always ignored (we could accept
// them if the backend is not git).
let _home_dir = testutils::new_user_home();
let settings = testutils::user_settings();
let mut test_workspace = testutils::init_workspace(&settings, use_git);
let repo = &test_workspace.repo;
let workspace_root = test_workspace.workspace.workspace_root().clone();
// Test with a .git/ directory (with a file in, since we don't write empty
// trees)
let dotgit_path = workspace_root.join(".git");
std::fs::create_dir(&dotgit_path).unwrap();
testutils::write_working_copy_file(
&workspace_root,
&RepoPath::from_internal_string(".git/file"),
"contents",
);
let mut locked_wc = test_workspace.workspace.working_copy_mut().start_mutation();
let new_tree_id = locked_wc.write_tree();
assert_eq!(new_tree_id, *repo.store().empty_tree_id());
locked_wc.discard();
std::fs::remove_dir_all(&dotgit_path).unwrap();
// Test with a .git file
testutils::write_working_copy_file(
&workspace_root,
&RepoPath::from_internal_string(".git"),
"contents",
);
let mut locked_wc = test_workspace.workspace.working_copy_mut().start_mutation();
let new_tree_id = locked_wc.write_tree();
assert_eq!(new_tree_id, *repo.store().empty_tree_id());
locked_wc.discard();
}
| 40.304075 | 99 | 0.614685 |
5b57bfff3ff63a7cac03b09365c8263d709b8776 | 12,365 | use nimiq_blockchain::{chain_info::ChainInfo, chain_store::ChainStore};
use nimiq_database::{volatile::VolatileEnvironment, WriteTransaction};
use nimiq_hash::{Blake2bHash, Hash};
use nimiq_network_primitives::networks::{get_network_info, NetworkId};
use nimiq_block::Difficulty;
#[test]
fn it_can_store_the_chain_head() {
let env = VolatileEnvironment::new(3).unwrap();
let store = ChainStore::new(&env);
assert!(store.get_head(None).is_none());
let mut txn = WriteTransaction::new(&env);
let head = Blake2bHash::from([1u8; Blake2bHash::SIZE]);
store.set_head(&mut txn, &head);
txn.commit();
assert_eq!(store.get_head(None).unwrap(), head);
}
#[test]
fn it_can_store_chain_info_with_body() {
let env = VolatileEnvironment::new(3).unwrap();
let store = ChainStore::new(&env);
let genesis_block = get_network_info(NetworkId::Main).unwrap().genesis_block.clone();
let genesis_hash = genesis_block.header.hash();
let chain_info = ChainInfo::initial(genesis_block);
assert!(chain_info.head.body.is_some());
let mut txn = WriteTransaction::new(&env);
store.put_chain_info(&mut txn, &genesis_hash, &chain_info, true);
txn.commit();
let mut chain_info_no_body = chain_info.clone();
chain_info_no_body.head.body = None;
assert_eq!(store.get_chain_info(&genesis_hash, true, None).unwrap(), chain_info);
assert_eq!(store.get_chain_info(&genesis_hash, false, None).unwrap(), chain_info_no_body);
assert_eq!(store.get_block(&genesis_hash, true, None).unwrap(), chain_info.head);
assert_eq!(store.get_block(&genesis_hash, false, None).unwrap(), chain_info_no_body.head);
}
#[test]
fn it_can_store_chain_info_without_body() {
let env = VolatileEnvironment::new(3).unwrap();
let store = ChainStore::new(&env);
let genesis_block = get_network_info(NetworkId::Main).unwrap().genesis_block.clone();
let genesis_hash = genesis_block.header.hash();
let chain_info = ChainInfo::initial(genesis_block);
let mut txn = WriteTransaction::new(&env);
store.put_chain_info(&mut txn, &genesis_hash, &chain_info, false);
txn.commit();
let mut chain_info_no_body = chain_info.clone();
chain_info_no_body.head.body = None;
assert_eq!(store.get_chain_info(&genesis_hash, false, None).unwrap(), chain_info_no_body);
assert_eq!(store.get_chain_info(&genesis_hash, true, None).unwrap(), chain_info_no_body);
assert_eq!(store.get_block(&genesis_hash, false, None).unwrap(), chain_info_no_body.head);
assert_eq!(store.get_block(&genesis_hash, true, None), None);
}
#[test]
fn it_can_retrieve_chain_info_by_height() {
let env = VolatileEnvironment::new(3).unwrap();
let store = ChainStore::new(&env);
let block1 = get_network_info(NetworkId::Main).unwrap().genesis_block.clone();
let hash1 = block1.header.hash::<Blake2bHash>();
let info1 = ChainInfo::initial(block1.clone());
let mut block2 = block1.clone();
block2.header.height = 5;
let hash2 = block2.header.hash::<Blake2bHash>();
let mut info2 = ChainInfo::initial(block2);
info2.total_difficulty = Difficulty::from(4711);
let mut block3_1 = block1.clone();
block3_1.header.height = 28883;
block3_1.header.interlink_hash = [2u8; Blake2bHash::SIZE].into();
let hash3_1 = block3_1.header.hash::<Blake2bHash>();
let mut info3_1 = ChainInfo::initial(block3_1);
info3_1.on_main_chain = false;
let mut block3_2 = block1.clone();
block3_2.header.height = 28883;
block3_2.header.interlink_hash = [5u8; Blake2bHash::SIZE].into();
let hash3_2 = block3_2.header.hash::<Blake2bHash>();
let mut info3_2 = ChainInfo::initial(block3_2);
info3_2.main_chain_successor = Some(Blake2bHash::from([1u8; Blake2bHash::SIZE]));
let mut txn = WriteTransaction::new(&env);
store.put_chain_info(&mut txn, &hash1, &info1, true);
store.put_chain_info(&mut txn, &hash2, &info2, false);
store.put_chain_info(&mut txn, &hash3_1, &info3_1, true);
store.put_chain_info(&mut txn, &hash3_2, &info3_2, true);
txn.commit();
let mut info2_no_body = info2.clone();
info2_no_body.head.body = None;
assert_eq!(store.get_chain_info_at(1, true, None).unwrap(), info1);
assert_eq!(store.get_chain_info_at(5, false, None).unwrap(), info2_no_body);
assert_eq!(store.get_chain_info_at(5, true, None).unwrap(), info2_no_body);
assert_eq!(store.get_chain_info_at(28883, true, None).unwrap(), info3_2);
let mut info3_2_no_body = info3_2.clone();
info3_2_no_body.head.body = None;
assert_eq!(store.get_chain_info_at(28883, false, None).unwrap(), info3_2_no_body);
}
#[test]
fn it_can_get_blocks_backward() {
let env = VolatileEnvironment::new(3).unwrap();
let store = ChainStore::new(&env);
let mut txn = WriteTransaction::new(&env);
let mut block = get_network_info(NetworkId::Main).unwrap().genesis_block.clone();
store.put_chain_info(&mut txn, &block.header.hash::<Blake2bHash>(), &ChainInfo::initial(block.clone()), true);
for _ in 0..20 {
let mut b = block.clone();
b.header.prev_hash = block.header.hash();
b.header.height = block.header.height + 1;
let hash = b.header.hash::<Blake2bHash>();
store.put_chain_info(&mut txn, &hash, &ChainInfo::initial(b.clone()), true);
block = b;
}
txn.commit();
let mut blocks = store.get_blocks_backward(&block.header.prev_hash, 10, true, None);
assert_eq!(blocks.len(), 10);
assert_eq!(blocks[0].header.height, 19);
assert_eq!(blocks[9].header.height, 10);
assert!(blocks[0].body.is_some());
assert!(blocks[9].body.is_some());
blocks = store.get_blocks_backward(&block.header.prev_hash, 18, false, None);
assert_eq!(blocks.len(), 18);
assert_eq!(blocks[0].header.height, 19);
assert_eq!(blocks[17].header.height, 2);
assert!(blocks[0].body.is_none());
assert!(blocks[17].body.is_none());
blocks = store.get_blocks_backward(&block.header.prev_hash, 19, true, None);
assert_eq!(blocks.len(), 19);
assert_eq!(blocks[0].header.height, 19);
assert_eq!(blocks[18].header.height, 1);
assert!(blocks[0].body.is_some());
assert!(blocks[18].body.is_some());
blocks = store.get_blocks_backward(&block.header.prev_hash, 20, false, None);
assert_eq!(blocks.len(), 19);
assert_eq!(blocks[0].header.height, 19);
assert_eq!(blocks[18].header.height, 1);
assert!(blocks[0].body.is_none());
assert!(blocks[18].body.is_none());
blocks = store.get_blocks_backward(&block.header.hash::<Blake2bHash>(), 20, false, None);
assert_eq!(blocks.len(), 20);
assert_eq!(blocks[0].header.height, 20);
assert_eq!(blocks[19].header.height, 1);
blocks = store.get_blocks_backward(&block.header.hash::<Blake2bHash>(), 20, false, None);
assert_eq!(blocks.len(), 20);
assert_eq!(blocks[0].header.height, 20);
assert_eq!(blocks[19].header.height, 1);
blocks = store.get_blocks_backward(&blocks[18].header.hash::<Blake2bHash>(), 20, true, None);
assert_eq!(blocks.len(), 1);
assert_eq!(blocks[0].header.height, 1);
assert!(blocks[0].body.is_some());
blocks = store.get_blocks_backward(&blocks[0].header.hash::<Blake2bHash>(), 20, false, None);
assert_eq!(blocks.len(), 0);
}
#[test]
fn it_can_get_blocks_forward() {
let env = VolatileEnvironment::new(3).unwrap();
let store = ChainStore::new(&env);
let mut txn = WriteTransaction::new(&env);
let network_info = get_network_info(NetworkId::Main).unwrap();
let mut block = network_info.genesis_block.clone();
let mut chain_infos = vec![ChainInfo::initial(block.clone())];
let mut hash;
for _ in 0..20 {
let mut b = block.clone();
b.header.prev_hash = block.header.hash();
b.header.height = block.header.height + 1;
hash = b.header.hash::<Blake2bHash>();
chain_infos.last_mut().unwrap().main_chain_successor = Some(hash);
chain_infos.push(ChainInfo::initial(b.clone()));
block = b;
}
for chain_info in chain_infos.iter() {
let hash = chain_info.head.header.hash();
store.put_chain_info(&mut txn, &hash, chain_info, true);
}
txn.commit();
let second_block_hash = chain_infos.first().unwrap().main_chain_successor.as_ref().unwrap();
let mut blocks = store.get_blocks_forward(second_block_hash, 10, true, None);
assert_eq!(blocks.len(), 10);
assert_eq!(blocks[0].header.height, 3);
assert_eq!(blocks[9].header.height, 12);
assert!(blocks[0].body.is_some());
assert!(blocks[9].body.is_some());
blocks = store.get_blocks_forward(second_block_hash, 18, false, None);
assert_eq!(blocks.len(), 18);
assert_eq!(blocks[0].header.height, 3);
assert_eq!(blocks[17].header.height, 20);
assert!(blocks[0].body.is_none());
assert!(blocks[17].body.is_none());
blocks = store.get_blocks_forward(second_block_hash, 19, true, None);
assert_eq!(blocks.len(), 19);
assert_eq!(blocks[0].header.height, 3);
assert_eq!(blocks[18].header.height, 21);
assert!(blocks[0].body.is_some());
assert!(blocks[18].body.is_some());
blocks = store.get_blocks_forward(second_block_hash, 20, false, None);
assert_eq!(blocks.len(), 19);
assert_eq!(blocks[0].header.height, 3);
assert_eq!(blocks[18].header.height, 21);
assert!(blocks[0].body.is_none());
assert!(blocks[18].body.is_none());
blocks = store.get_blocks_forward(&network_info.genesis_hash, 20, false, None);
assert_eq!(blocks.len(), 20);
assert_eq!(blocks[0].header.height, 2);
assert_eq!(blocks[19].header.height, 21);
blocks = store.get_blocks_forward(&network_info.genesis_hash, 20, false, None);
assert_eq!(blocks.len(), 20);
assert_eq!(blocks[0].header.height, 2);
assert_eq!(blocks[19].header.height, 21);
blocks = store.get_blocks_forward(&chain_infos[19].head.header.hash(), 20, true, None);
assert_eq!(blocks.len(), 1);
assert_eq!(blocks[0].header.height, 21);
assert!(blocks[0].body.is_some());
blocks = store.get_blocks_forward(&chain_infos[20].head.header.hash(), 20, false, None);
assert_eq!(blocks.len(), 0);
}
#[test]
fn it_can_remove_chain_info() {
let env = VolatileEnvironment::new(3).unwrap();
let store = ChainStore::new(&env);
let block1 = get_network_info(NetworkId::Main).unwrap().genesis_block.clone();
let hash1 = block1.header.hash::<Blake2bHash>();
let info1 = ChainInfo::initial(block1.clone());
let mut block2_1 = block1.clone();
block2_1.header.height = 28883;
block2_1.header.interlink_hash = [2u8; Blake2bHash::SIZE].into();
let hash2_1 = block2_1.header.hash::<Blake2bHash>();
let mut info2_1 = ChainInfo::initial(block2_1);
info2_1.on_main_chain = false;
let mut block2_2 = block1.clone();
block2_2.header.height = 28883;
block2_2.header.interlink_hash = [5u8; Blake2bHash::SIZE].into();
let hash2_2 = block2_2.header.hash::<Blake2bHash>();
let mut info2_2 = ChainInfo::initial(block2_2);
info2_2.main_chain_successor = Some(Blake2bHash::from([1u8; Blake2bHash::SIZE]));
let mut txn = WriteTransaction::new(&env);
store.put_chain_info(&mut txn, &hash1, &info1, true);
store.put_chain_info(&mut txn, &hash2_1, &info2_1, true);
store.put_chain_info(&mut txn, &hash2_2, &info2_2, true);
txn.commit();
assert_eq!(store.get_chain_info_at(1, true, None).unwrap(), info1);
assert_eq!(store.get_chain_info(&hash1, true, None).unwrap(), info1);
txn = WriteTransaction::new(&env);
store.remove_chain_info(&mut txn, &hash1, 1);
txn.commit();
assert!(store.get_chain_info_at(1, true, None).is_none());
assert!(store.get_chain_info(&hash1, true, None).is_none());
assert_eq!(store.get_chain_info_at(28883, true, None).unwrap(), info2_2);
assert_eq!(store.get_chain_info(&hash2_1, true, None).unwrap(), info2_1);
assert_eq!(store.get_chain_info(&hash2_2, true, None).unwrap(), info2_2);
txn = WriteTransaction::new(&env);
store.remove_chain_info(&mut txn, &hash2_1, 28883);
txn.commit();
assert_eq!(store.get_chain_info_at(28883, true, None).unwrap(), info2_2);
assert!(store.get_chain_info(&hash2_1, true, None).is_none());
assert_eq!(store.get_chain_info(&hash2_2, true, None).unwrap(), info2_2);
}
| 41.079734 | 114 | 0.684674 |
4a7206e39987c3637076dadf8be1052db38e2c7d | 1,292 | use std::io::Cursor;
use std::mem::size_of;
use nue::{Encode, Decode, Un, Aligned};
#[derive(NueEncode, NueDecode, PartialEq, Debug)]
struct _PodTest;
#[test]
fn encode_decode() {
#[derive(PodPacked)]
struct POD1 {
_0: u8,
_1: Un<u16>,
}
const UNIT: &'static () = &();
#[derive(NueEncode, NueDecode, PartialEq, Debug)]
struct POD2 {
_0: u8,
#[nue(align = "1", skip = "0", limit = "2", consume = "true", cond = "self._0 == 1", default = "0u16.unaligned()")]
_1: Un<u16>,
#[nue(cond = "false", default = "UNIT")]
_2: &'static (),
_3: (),
}
let pod1 = POD1 { _0: 1, _1: 2u16.unaligned() };
let pod2 = POD2 { _0: 1, _1: 2u16.unaligned(), _2: UNIT, _3: () };
let buffer1 = Vec::new();
let mut buffer1 = Cursor::new(buffer1);
pod1.encode(&mut buffer1).unwrap();
let buffer2 = Vec::new();
let mut buffer2 = Cursor::new(buffer2);
pod2.encode(&mut buffer2).unwrap();
let buffer1 = buffer1.into_inner();
let buffer2 = buffer2.into_inner();
assert_eq!(size_of::<POD1>(), 3);
assert_eq!(&buffer1, &buffer2);
let mut buffer2 = Cursor::new(buffer2);
let pod2_decoded = Decode::decode(&mut buffer2).unwrap();
assert_eq!(&pod2, &pod2_decoded);
}
| 26.367347 | 123 | 0.569659 |
5d45f5084c5eec399396315a493b2c134f4a410b | 223 | // build-pass
// only-x86_64
#![feature(llvm_asm)]
fn main() {
unsafe {
// "nop" :: "r"(x) : "eax" : "volatile"
let x = 10;
llvm_asm!("\x6Eop" :: "\x72"(x) : "\x65ax" : "\x76olatile");
}
}
| 17.153846 | 68 | 0.44843 |
bb069cb945dd24338ea7ee1526d2c2662ab94207 | 5,005 | use crate::sys::jsize;
use log::error;
use std::ptr::NonNull;
use crate::objects::release_mode::ReleaseMode;
use crate::sys::{jboolean, jbyte, jchar, jdouble, jfloat, jint, jlong, jshort};
use crate::{errors::*, objects::JObject, sys, JNIEnv};
/// Trait to define type array access/release
pub trait TypeArray {
/// getter
fn get(env: &JNIEnv, obj: JObject, is_copy: &mut jboolean) -> Result<*mut Self>;
/// releaser
fn release(env: &JNIEnv, obj: JObject, ptr: NonNull<Self>, mode: i32) -> Result<()>;
}
// TypeArray builder
macro_rules! type_array {
( $jni_type:ty, $jni_get:tt, $jni_release:tt ) => {
/// $jni_type array access/release impl
impl TypeArray for $jni_type {
/// Get Java $jni_type array
fn get(env: &JNIEnv, obj: JObject, is_copy: &mut jboolean) -> Result<*mut Self> {
let internal = env.get_native_interface();
// Even though this method may throw OoME, use `jni_unchecked`
// instead of `jni_non_null_call` to remove (a slight) overhead
// of exception checking. An error will still be detected as a `null`
// result inside AutoArray ctor. Also, modern Hotspot in case of lack
// of memory will return null and won't throw an exception:
// https://sourcegraph.com/github.com/openjdk/jdk/-/blob/src/hotspot/share/memory/allocation.hpp#L488-489
let res = jni_unchecked!(internal, $jni_get, *obj, is_copy);
Ok(res)
}
/// Release Java $jni_type array
fn release(env: &JNIEnv, obj: JObject, ptr: NonNull<Self>, mode: i32) -> Result<()> {
let internal = env.get_native_interface();
jni_unchecked!(internal, $jni_release, *obj, ptr.as_ptr(), mode as i32);
Ok(())
}
}
};
}
type_array!(jint, GetIntArrayElements, ReleaseIntArrayElements);
type_array!(jlong, GetLongArrayElements, ReleaseLongArrayElements);
type_array!(jbyte, GetByteArrayElements, ReleaseByteArrayElements);
type_array!(
jboolean,
GetBooleanArrayElements,
ReleaseBooleanArrayElements
);
type_array!(jchar, GetCharArrayElements, ReleaseCharArrayElements);
type_array!(jshort, GetShortArrayElements, ReleaseShortArrayElements);
type_array!(jfloat, GetFloatArrayElements, ReleaseFloatArrayElements);
type_array!(jdouble, GetDoubleArrayElements, ReleaseDoubleArrayElements);
/// Auto-release wrapper for pointer-based generic arrays.
///
/// This wrapper is used to wrap pointers returned by Get<Type>ArrayElements.
/// While wrapped, the object can be accessed via the `From` impl.
///
/// AutoArray provides automatic array release through a call to appropriate
/// Release<Type>ArrayElements when it goes out of scope.
pub struct AutoArray<'a: 'b, 'b, T: TypeArray> {
obj: JObject<'a>,
ptr: NonNull<T>,
mode: ReleaseMode,
is_copy: bool,
env: &'b JNIEnv<'a>,
}
impl<'a, 'b, T: TypeArray> AutoArray<'a, 'b, T> {
pub(crate) fn new(env: &'b JNIEnv<'a>, obj: JObject<'a>, mode: ReleaseMode) -> Result<Self> {
let mut is_copy: jboolean = 0x00;
Ok(AutoArray {
obj,
ptr: {
let ptr = T::get(env, obj, &mut is_copy)?;
NonNull::new(ptr).ok_or(Error::NullPtr("Non-null ptr expected"))?
},
mode,
is_copy: is_copy == sys::JNI_TRUE,
env,
})
}
/// Get a reference to the wrapped pointer
pub fn as_ptr(&self) -> *mut T {
self.ptr.as_ptr()
}
/// Commits the changes to the array, if it is a copy
pub fn commit(&self) -> Result<()> {
self.release_array_elements(sys::JNI_COMMIT)
}
pub fn release(&self) -> Result<()> {
self.release_array_elements(sys::JNI_ABORT)
}
fn release_array_elements(&self, mode: i32) -> Result<()> {
T::release(self.env, self.obj, self.ptr, mode)
}
/// Don't commit the changes to the array on release (if it is a copy).
/// This has no effect if the array is not a copy.
/// This method is useful to change the release mode of an array originally created
/// with `ReleaseMode::CopyBack`.
pub fn discard(&mut self) {
self.mode = ReleaseMode::NoCopyBack;
}
/// Indicates if the array is a copy or not
pub fn is_copy(&self) -> bool {
self.is_copy
}
/// Returns the array size
pub fn size(&self) -> Result<jsize> {
self.env.get_array_length(*self.obj)
}
}
impl<'a, 'b, T: TypeArray> Drop for AutoArray<'a, 'b, T> {
fn drop(&mut self) {
let res = self.release_array_elements(self.mode as i32);
match res {
Ok(()) => {}
Err(e) => error!("error releasing array: {:#?}", e),
}
}
}
impl<'a, T: TypeArray> From<&'a AutoArray<'a, '_, T>> for *mut T {
fn from(other: &'a AutoArray<T>) -> *mut T {
other.as_ptr()
}
}
| 35.246479 | 121 | 0.613387 |
d9749b031f363b36c1a759762a31de6f5cb57002 | 5,335 | use std::env;
use actix_web::{get, middleware, web, App, HttpRequest, HttpResponse, HttpServer, guard};
use dotenv::dotenv;
use serde::{Serialize, Deserialize};
use qrcode::QrCode;
use qrcode::render::svg;
use std::fs::File;
use std::io::prelude::*;
const MAX_SIZE: usize = 4096;
#[derive(Debug, Serialize, Deserialize)]
struct QrObject {
// Basic
pub qr_code_text: String,
#[serde(default = "Extension::png")]
pub image_format: Extension,
#[serde(default = "default_image_width")]
pub image_width: u32,
#[serde(default)]
pub download: bool,
// Design
#[serde(default = "default_color_black")]
pub foreground_color: String,
#[serde(default = "default_color_white")]
pub background_color: String,
// Marker Left
#[serde(default = "default_color_black")]
pub marker_left_inner_color: String,
#[serde(default = "default_color_black")]
pub marker_left_outer_color: String,
#[serde(default = "Marker::default")]
pub marker_left_template: Marker,
// Marker Right
#[serde(default = "default_color_black")]
pub marker_right_inner_color: String,
#[serde(default = "default_color_black")]
pub marker_right_outer_color: String,
#[serde(default = "Marker::default")]
pub marker_right_template: Marker,
// Marker Top
#[serde(default = "default_color_black")]
pub marker_top_inner_color: String,
#[serde(default = "default_color_black")]
pub marker_top_outer_color: String,
#[serde(default = "Marker::default")]
pub marker_top_template: Marker,
// Marker Bottom
#[serde(default = "default_color_black")]
pub marker_bottom_inner_color: String,
#[serde(default = "default_color_black")]
pub marker_bottom_outer_color: String,
#[serde(default = "Marker::default")]
pub marker_bottom_template: Marker,
// Shape and Logo
#[serde(default = "Shape::square")]
pub qr_code_shape: Shape,
#[serde(default = "Logo::scan")]
pub qr_code_logo: Logo,
// Frame
#[serde(default = "default_color_black")]
pub frame_color: String,
#[serde(default = "default_frame_text")]
pub frame_text: String,
#[serde(default = "default_color_white")]
pub frame_text_color: String,
#[serde(default = "Frame::bottom")]
pub frame_name: Frame,
}
#[derive(Serialize, Deserialize, Debug)]
enum Extension { PNG, SVG }
impl Extension {
fn png() -> Self { Extension::PNG }
}
#[derive(Serialize, Deserialize, Debug)]
enum Frame { NoFrame, TopHeader, BottomFrame, BottomTooltip }
impl Frame {
fn bottom() -> Self { Frame::BottomFrame }
}
#[derive(Serialize, Deserialize, Debug)]
enum Marker { Version1, Version2, Version3, Version4, Version5 }
impl Marker {
fn default() -> Self { Marker::Version1 }
}
#[derive(Serialize, Deserialize, Debug)]
enum Logo { NoLogo, ScanMeSquare, ScanMe }
impl Logo {
fn scan() -> Self { Logo::ScanMe }
}
#[derive(Serialize, Deserialize, Debug)]
enum Shape { Square, Rounded, Circles }
impl Shape {
fn square() -> Self { Shape::Square }
}
fn default_frame_text() -> String {
"Scan me".to_string()
}
fn default_color_white() -> String {
"#ffffff".to_string()
}
fn default_color_black() -> String {
"#000000".to_string()
}
fn default_image_width() -> u32 {
500
}
async fn create(info: web::Json<QrObject>, req: HttpRequest) -> String {
println!("request: {:?}", req);
println!("model: {:?}", info);
let code = QrCode::new(info.qr_code_text.as_bytes()).unwrap();
let image = code.render()
.min_dimensions(info.image_width, info.image_width)
.dark_color(svg::Color(info.foreground_color.as_str()))
.light_color(svg::Color(info.background_color.as_str()))
.build();
let mut file = File::create("output.svg").unwrap();
file.write_all(image.as_bytes()).unwrap();
image
}
/// 404 handler
async fn p404() -> &'static str {
"Page not found!\r\n"
}
#[get("/")]
async fn index() -> &'static str {
"Hello world!\r\n"
}
#[actix_web::main]
async fn main() -> std::io::Result<()> {
dotenv().ok();
env::set_var("RUST_LOG", "actix_todo=debug,actix_web=info");
env_logger::init();
HttpServer::new(|| {
App::new()
.wrap(middleware::DefaultHeaders::new().header("X-Version", "0.2"))
.wrap(middleware::Compress::default())
.wrap(middleware::Logger::default())
.data(web::JsonConfig::default().limit(MAX_SIZE)) // <- limit size of the payload (global configuration)
// index
.service(index)
// Create QR code
.service(web::resource("/create")
// change json extractor configuration
.route(web::post().to(create)))
// default
.default_service(
// 404 for GET request
web::resource("")
.route(web::get().to(p404))
// all requests that are not `GET`
.route(
web::route()
.guard(guard::Not(guard::Get()))
.to(HttpResponse::MethodNotAllowed),
),
)
})
.bind("127.0.0.1:8000")?
.run()
.await
}
| 25.772947 | 116 | 0.610309 |
0e3c8fafa6fcf95220c33c2e91775f988755f2fb | 1,835 | //---------------------------------------------------------------------------//
// Copyright (c) 2017-2022 Ismael Gutiérrez González. All rights reserved.
//
// This file is part of the Rusted PackFile Manager (RPFM) project,
// which can be found here: https://github.com/Frodo45127/rpfm.
//
// This file is licensed under the MIT license, which can be found here:
// https://github.com/Frodo45127/rpfm/blob/master/LICENSE.
//---------------------------------------------------------------------------//
/*!
Module with all the code to interact with RigidModel PackedFiles.
This is really a dummy module, as all the logic for this is done in the view through Phazer's lib.
!*/
use serde_derive::{Serialize, Deserialize};
/// This represents the value that every RigidModel PackedFile has in their 0-4 bytes. A.k.a it's signature or preamble.
#[allow(dead_code)]
const PACKED_FILE_TYPE: &str = "RMV2";
/// Extension used by RigidModel PackedFiles.
pub const EXTENSION: &str = ".rigid_model_v2";
//---------------------------------------------------------------------------//
// Enum & Structs
//---------------------------------------------------------------------------//
/// This struct contains a RigidModel decoded in memory.
#[derive(Clone, Debug,PartialEq, Serialize, Deserialize)]
pub struct RigidModel {
pub data: Vec<u8>,
}
//---------------------------------------------------------------------------//
// Implementations
//---------------------------------------------------------------------------//
/// Implementation of RigidModel.
impl RigidModel {
pub fn read(packed_file_data: &[u8]) -> Self {
Self {
data: packed_file_data.to_vec(),
}
}
pub fn save(&self) -> Vec<u8> {
self.data.to_vec()
}
}
| 34.622642 | 120 | 0.493733 |
219fe0263ad257776eb926c179106124e8c120ac | 6,703 | extern crate rustyline;
extern crate easy_ll;
extern crate weld;
extern crate libc;
use rustyline::error::ReadlineError;
use rustyline::Editor;
use std::env;
use std::path::Path;
use std::path::PathBuf;
use std::fs::File;
use std::error::Error;
use std::io::prelude::*;
use std::fmt;
use std::collections::HashMap;
use weld::*;
use weld::llvm::LlvmGenerator;
use weld::parser::*;
use weld::pretty_print::*;
use weld::type_inference::*;
use weld::sir::ast_to_sir;
use weld::util::load_runtime_library;
use weld::util::MERGER_BC;
enum ReplCommands {
LoadFile,
}
impl fmt::Display for ReplCommands {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
ReplCommands::LoadFile => write!(f, "load"),
}
}
}
/// Processes the LoadFile command.
///
/// The argument is a filename containing a Weld program. Returns the string
/// representation of the program or an error with an error message.
fn process_loadfile(arg: String) -> Result<String, String> {
if arg.len() == 0 {
return Err("Error: expected argument for command 'load'".to_string());
}
let path = Path::new(&arg);
let path_display = path.display();
let mut file;
match File::open(&path) {
Err(why) => {
return Err(format!("Error: couldn't open {}: {}",
path_display,
why.description()));
}
Ok(res) => {
file = res;
}
}
let mut contents = String::new();
match file.read_to_string(&mut contents) {
Err(why) => {
return Err(format!("Error: couldn't read {}: {}",
path_display,
why.description()));
}
_ => {}
}
Ok(contents.trim().to_string())
}
fn main() {
let home_path = env::home_dir().unwrap_or(PathBuf::new());
let history_file_path = home_path.join(".weld_history");
let history_file_path = history_file_path.to_str().unwrap_or(".weld_history");
let mut reserved_words = HashMap::new();
reserved_words.insert(ReplCommands::LoadFile.to_string(), ReplCommands::LoadFile);
let mut rl = Editor::<()>::new();
if let Err(_) = rl.load_history(&history_file_path) {}
loop {
let raw_readline = rl.readline(">> ");
let readline;
match raw_readline {
Ok(raw_readline) => {
rl.add_history_entry(&raw_readline);
readline = raw_readline;
}
Err(ReadlineError::Interrupted) => {
println!("Exiting!");
break;
}
Err(ReadlineError::Eof) => {
println!("Exiting!");
break;
}
Err(err) => {
println!("Error: {:?}", err);
break;
}
}
let trimmed = readline.trim();
if trimmed == "" {
continue;
}
let program;
// Do some basic token parsing here.
let mut tokens = trimmed.splitn(2, " ");
let command = tokens.next().unwrap();
let arg = tokens.next().unwrap_or("");
if reserved_words.contains_key(command) {
let command = reserved_words.get(command).unwrap();
match *command {
ReplCommands::LoadFile => {
match process_loadfile(arg.to_string()) {
Err(s) => {
println!("{}", s);
continue;
}
Ok(code) => {
program = parse_program(&code);
}
}
}
}
} else {
program = parse_program(trimmed);
}
if let Err(ref e) = program {
println!("Error during parsing: {:?}", e);
continue;
}
let program = program.unwrap();
println!("Raw structure:\n{:?}\n", program);
let expr = macro_processor::process_program(&program);
if let Err(ref e) = expr {
println!("Error during macro substitution: {}", e);
continue;
}
let mut expr = expr.unwrap();
println!("After macro substitution:\n{}\n", print_expr(&expr));
transforms::inline_apply(&mut expr);
println!("After inline_apply:\n{}\n", print_expr(&expr));
transforms::uniquify(&mut expr);
println!("After uniquify :\n{}\n", print_expr(&expr));
if let Err(ref e) = infer_types(&mut expr) {
println!("Error during type inference: {}\n", e);
println!("Partially inferred types:\n{}\n", print_typed_expr(&expr));
continue;
}
println!("After type inference:\n{}\n", print_typed_expr(&expr));
println!("Expression type: {}\n", print_type(&expr.ty));
let mut expr = expr.to_typed().unwrap();
transforms::inline_zips(&mut expr);
println!("After inlining zips:\n{}\n", print_typed_expr(&expr));
transforms::fuse_loops_horizontal(&mut expr);
println!("After horizontal loop fusion:\n{}\n",
print_typed_expr(&expr));
transforms::fuse_loops_vertical(&mut expr);
transforms::uniquify(&mut expr);
println!("After vertical loop fusion:\n{}\n", print_typed_expr(&expr));
println!("final program raw: {:?}", expr);
let sir_result = ast_to_sir(&expr);
match sir_result {
Ok(sir) => {
println!("SIR representation:\n{}\n", &sir);
let mut llvm_gen = LlvmGenerator::new();
if let Err(ref e) = llvm_gen.add_function_on_pointers("run", &sir) {
println!("Error during LLVM code gen:\n{}\n", e);
} else {
let llvm_code = llvm_gen.result();
println!("LLVM code:\n{}\n", llvm_code);
if let Err(e) = load_runtime_library() {
println!("Couldn't load runtime: {}", e);
continue;
}
if let Err(ref e) = easy_ll::compile_module(&llvm_code, Some(MERGER_BC)) {
println!("Error during LLVM compilation:\n{}\n", e);
} else {
println!("LLVM module compiled successfully\n");
}
}
}
Err(ref e) => {
println!("Error during SIR code gen:\n{}\n", e);
}
}
}
rl.save_history(&history_file_path).unwrap();
}
| 31.767773 | 94 | 0.511114 |
3994831c0b3008f413721201c438e1c386a9b291 | 4,766 | // Copyright (c) The Dijets Core Contributors
// SPDX-License-Identifier: Apache-2.0
use dijets_types::{account_config, transaction::SignedTransaction, vm_status::VMStatus};
use language_e2e_tests::{
account::Account,
common_transactions::create_account_txn,
execution_strategies::{
basic_strategy::BasicExecutor,
guided_strategy::{
AnnotatedTransaction, GuidedExecutor, PartitionedGuidedStrategy,
UnPartitionedGuidedStrategy,
},
multi_strategy::MultiExecutor,
random_strategy::RandomExecutor,
types::Executor,
},
};
fn txn(seq_num: u64) -> SignedTransaction {
let account = Account::new();
let dijets_root = Account::new_dijets_root();
create_account_txn(
&dijets_root,
&account,
seq_num + 1,
0,
account_config::xus_tag(),
)
}
#[test]
fn test_execution_strategies() {
{
println!("===========================================================================");
println!("TESTING BASIC STRATEGY");
println!("===========================================================================");
let big_block = (0..10).map(txn).collect();
let mut exec = BasicExecutor::new();
exec.execute_block(big_block).unwrap();
}
{
println!("===========================================================================");
println!("TESTING RANDOM STRATEGY");
println!("===========================================================================");
let big_block = (0..10).map(txn).collect();
let mut exec = RandomExecutor::from_os_rng();
exec.execute_block(big_block).unwrap();
}
{
println!("===========================================================================");
println!("TESTING GUIDED STRATEGY");
println!("===========================================================================");
let mut block1: Vec<_> = (0..10)
.map(|i| AnnotatedTransaction::Txn(Box::new(txn(i))))
.collect();
block1.push(AnnotatedTransaction::Block);
let mut block = (0..5)
.map(|i| AnnotatedTransaction::Txn(Box::new(txn(i + 10))))
.collect();
block1.append(&mut block);
block1.push(AnnotatedTransaction::Block);
let mut block: Vec<_> = (0..7)
.map(|i| AnnotatedTransaction::Txn(Box::new(txn(i + 15))))
.collect();
block1.append(&mut block);
block1.push(AnnotatedTransaction::Block);
let mut block = (0..20)
.map(|i| AnnotatedTransaction::Txn(Box::new(txn(i + 22))))
.collect();
block1.append(&mut block);
let mut exec = GuidedExecutor::new(PartitionedGuidedStrategy);
exec.execute_block(block1).unwrap();
}
{
println!("===========================================================================");
println!("TESTING COMPOSED STRATEGY 1");
println!("===========================================================================");
let mut block1: Vec<_> = (0..10)
.map(|i| AnnotatedTransaction::Txn(Box::new(txn(i))))
.collect();
block1.push(AnnotatedTransaction::Block);
let mut block = (0..5)
.map(|i| AnnotatedTransaction::Txn(Box::new(txn(i + 10))))
.collect();
block1.append(&mut block);
block1.push(AnnotatedTransaction::Block);
let mut block: Vec<_> = (0..7)
.map(|i| AnnotatedTransaction::Txn(Box::new(txn(i + 15))))
.collect();
block1.append(&mut block);
block1.push(AnnotatedTransaction::Block);
let mut block = (0..20)
.map(|i| AnnotatedTransaction::Txn(Box::new(txn(i + 22))))
.collect();
block1.append(&mut block);
let mut exec = MultiExecutor::<AnnotatedTransaction, VMStatus>::new();
exec.add_executor(GuidedExecutor::new(PartitionedGuidedStrategy));
exec.add_executor(GuidedExecutor::new(UnPartitionedGuidedStrategy));
exec.execute_block(block1).unwrap();
}
{
println!("===========================================================================");
println!("TESTING COMPOSED STRATEGY 2");
println!("===========================================================================");
let block = (0..10).map(txn).collect();
let mut exec = MultiExecutor::<SignedTransaction, VMStatus>::new();
exec.add_executor(RandomExecutor::from_os_rng());
exec.add_executor(RandomExecutor::from_os_rng());
exec.add_executor(RandomExecutor::from_os_rng());
exec.execute_block(block).unwrap();
}
}
| 39.38843 | 96 | 0.490138 |
3a6532dce0206e17e54ed9c66343edaba590d6cb | 18,294 | // Copyright 2015, The inlinable_string crate Developers. See the COPYRIGHT file
// at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT
// or http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
//! A trait that exists to abstract string operations over any number of
//! concrete string type implementations.
//!
//! See the [crate level documentation](./../index.html) for more.
use std::borrow::{Borrow, Cow};
use std::cmp::PartialEq;
use std::fmt::Display;
use std::string::{FromUtf16Error, FromUtf8Error};
/// A trait that exists to abstract string operations over any number of
/// concrete string type implementations.
///
/// See the [crate level documentation](./../index.html) for more.
pub trait StringExt<'a>:
Borrow<str>
+ Display
+ PartialEq<str>
+ PartialEq<&'a str>
+ PartialEq<String>
+ PartialEq<Cow<'a, str>>
{
/// Creates a new string buffer initialized with the empty string.
///
/// # Examples
///
/// ```
/// use inlinable_string::{InlinableString, StringExt};
///
/// let s = InlinableString::new();
/// ```
fn new() -> Self
where
Self: Sized;
/// Creates a new string buffer with the given capacity. The string will be
/// able to hold at least `capacity` bytes without reallocating. If
/// `capacity` is less than or equal to `INLINE_STRING_CAPACITY`, the string
/// will not heap allocate.
///
/// # Examples
///
/// ```
/// use inlinable_string::{InlinableString, StringExt};
///
/// let s = InlinableString::with_capacity(10);
/// ```
fn with_capacity(capacity: usize) -> Self
where
Self: Sized;
/// Returns the vector as a string buffer, if possible, taking care not to
/// copy it.
///
/// # Failure
///
/// If the given vector is not valid UTF-8, then the original vector and the
/// corresponding error is returned.
///
/// # Examples
///
/// ```
/// use inlinable_string::{InlinableString, StringExt};
///
/// let hello_vec = vec![104, 101, 108, 108, 111];
/// let s = InlinableString::from_utf8(hello_vec).unwrap();
/// assert_eq!(s, "hello");
///
/// let invalid_vec = vec![240, 144, 128];
/// let s = InlinableString::from_utf8(invalid_vec).err().unwrap();
/// let err = s.utf8_error();
/// assert_eq!(s.into_bytes(), [240, 144, 128]);
/// ```
fn from_utf8(vec: Vec<u8>) -> Result<Self, FromUtf8Error>
where
Self: Sized;
/// Converts a vector of bytes to a new UTF-8 string.
/// Any invalid UTF-8 sequences are replaced with U+FFFD REPLACEMENT CHARACTER.
///
/// # Examples
///
/// ```
/// use inlinable_string::{InlinableString, StringExt};
///
/// let input = b"Hello \xF0\x90\x80World";
/// let output = InlinableString::from_utf8_lossy(input);
/// assert_eq!(output, "Hello \u{FFFD}World");
/// ```
fn from_utf8_lossy(v: &'a [u8]) -> Cow<'a, str>
where
Self: Sized,
{
String::from_utf8_lossy(v)
}
/// Decode a UTF-16 encoded vector `v` into a `InlinableString`, returning `None`
/// if `v` contains any invalid data.
///
/// # Examples
///
/// ```
/// use inlinable_string::{InlinableString, StringExt};
///
/// // 𝄞music
/// let mut v = &mut [0xD834, 0xDD1E, 0x006d, 0x0075,
/// 0x0073, 0x0069, 0x0063];
/// assert_eq!(InlinableString::from_utf16(v).unwrap(),
/// InlinableString::from("𝄞music"));
///
/// // 𝄞mu<invalid>ic
/// v[4] = 0xD800;
/// assert!(InlinableString::from_utf16(v).is_err());
/// ```
fn from_utf16(v: &[u16]) -> Result<Self, FromUtf16Error>
where
Self: Sized;
/// Decode a UTF-16 encoded vector `v` into a string, replacing
/// invalid data with the replacement character (U+FFFD).
///
/// # Examples
///
/// ```
/// use inlinable_string::{InlinableString, StringExt};
///
/// // 𝄞mus<invalid>ic<invalid>
/// let v = &[0xD834, 0xDD1E, 0x006d, 0x0075,
/// 0x0073, 0xDD1E, 0x0069, 0x0063,
/// 0xD834];
///
/// assert_eq!(InlinableString::from_utf16_lossy(v),
/// InlinableString::from("𝄞mus\u{FFFD}ic\u{FFFD}"));
/// ```
fn from_utf16_lossy(v: &[u16]) -> Self
where
Self: Sized;
/// Creates a new `InlinableString` from a length, capacity, and pointer.
///
/// # Safety
///
/// This is _very_ unsafe because:
///
/// * We call `String::from_raw_parts` to get a `Vec<u8>`. Therefore, this
/// function inherits all of its unsafety, see [its
/// documentation](https://doc.rust-lang.org/nightly/collections/vec/struct.Vec.html#method.from_raw_parts)
/// for the invariants it expects, they also apply to this function.
///
/// * We assume that the `Vec` contains valid UTF-8.
unsafe fn from_raw_parts(buf: *mut u8, length: usize, capacity: usize) -> Self
where
Self: Sized;
/// Converts a vector of bytes to a new `InlinableString` without checking
/// if it contains valid UTF-8.
///
/// # Safety
///
/// This is unsafe because it assumes that the UTF-8-ness of the vector has
/// already been validated.
unsafe fn from_utf8_unchecked(bytes: Vec<u8>) -> Self
where
Self: Sized;
/// Returns the underlying byte buffer, encoded as UTF-8.
///
/// # Examples
///
/// ```
/// use inlinable_string::{InlinableString, StringExt};
///
/// let s = InlinableString::from("hello");
/// let bytes = s.into_bytes();
/// assert_eq!(bytes, [104, 101, 108, 108, 111]);
/// ```
fn into_bytes(self) -> Vec<u8>;
/// Pushes the given string onto this string buffer.
///
/// # Examples
///
/// ```
/// use inlinable_string::{InlinableString, StringExt};
///
/// let mut s = InlinableString::from("foo");
/// s.push_str("bar");
/// assert_eq!(s, "foobar");
/// ```
fn push_str(&mut self, string: &str);
/// Returns the number of bytes that this string buffer can hold without
/// reallocating.
///
/// # Examples
///
/// ```
/// use inlinable_string::{InlinableString, StringExt};
///
/// let s = InlinableString::with_capacity(10);
/// assert!(s.capacity() >= 10);
/// ```
fn capacity(&self) -> usize;
/// Reserves capacity for at least `additional` more bytes to be inserted
/// in the given `InlinableString`. The collection may reserve more space to avoid
/// frequent reallocations.
///
/// # Panics
///
/// Panics if the new capacity overflows `usize`.
///
/// # Examples
///
/// ```
/// use inlinable_string::{InlinableString, StringExt};
///
/// let mut s = InlinableString::new();
/// s.reserve(10);
/// assert!(s.capacity() >= 10);
/// ```
fn reserve(&mut self, additional: usize);
/// Reserves the minimum capacity for exactly `additional` more bytes to be
/// inserted in the given `InlinableString`. Does nothing if the capacity is already
/// sufficient.
///
/// Note that the allocator may give the collection more space than it
/// requests. Therefore capacity can not be relied upon to be precisely
/// minimal. Prefer `reserve` if future insertions are expected.
///
/// # Panics
///
/// Panics if the new capacity overflows `usize`.
///
/// # Examples
///
/// ```
/// use inlinable_string::{InlinableString, StringExt};
///
/// let mut s = InlinableString::new();
/// s.reserve_exact(10);
/// assert!(s.capacity() >= 10);
/// ```
fn reserve_exact(&mut self, additional: usize);
/// Shrinks the capacity of this string buffer to match its length. If the
/// string's length is less than `INLINE_STRING_CAPACITY` and the string is
/// heap-allocated, then it is demoted to inline storage.
///
/// # Examples
///
/// ```
/// use inlinable_string::{InlinableString, StringExt};
///
/// let mut s = InlinableString::from("foo");
/// s.reserve(100);
/// assert!(s.capacity() >= 100);
/// s.shrink_to_fit();
/// assert_eq!(s.capacity(), inlinable_string::INLINE_STRING_CAPACITY);
/// ```
fn shrink_to_fit(&mut self);
/// Adds the given character to the end of the string.
///
/// # Examples
///
/// ```
/// use inlinable_string::{InlinableString, StringExt};
///
/// let mut s = InlinableString::from("abc");
/// s.push('1');
/// s.push('2');
/// s.push('3');
/// assert_eq!(s, "abc123");
/// ```
fn push(&mut self, ch: char);
/// Works with the underlying buffer as a byte slice.
///
/// # Examples
///
/// ```
/// use inlinable_string::{InlinableString, StringExt};
///
/// let s = InlinableString::from("hello");
/// assert_eq!(s.as_bytes(), [104, 101, 108, 108, 111]);
/// ```
fn as_bytes(&self) -> &[u8];
/// Shortens a string to the specified length.
///
/// # Panics
///
/// Panics if `new_len` > current length, or if `new_len` is not a character
/// boundary.
///
/// # Examples
///
/// ```
/// use inlinable_string::{InlinableString, StringExt};
///
/// let mut s = InlinableString::from("hello");
/// s.truncate(2);
/// assert_eq!(s, "he");
/// ```
fn truncate(&mut self, new_len: usize);
/// Removes the last character from the string buffer and returns it.
/// Returns `None` if this string buffer is empty.
///
/// # Examples
///
/// ```
/// use inlinable_string::{InlinableString, StringExt};
///
/// let mut s = InlinableString::from("foo");
/// assert_eq!(s.pop(), Some('o'));
/// assert_eq!(s.pop(), Some('o'));
/// assert_eq!(s.pop(), Some('f'));
/// assert_eq!(s.pop(), None);
/// ```
fn pop(&mut self) -> Option<char>;
/// Removes the character from the string buffer at byte position `idx` and
/// returns it.
///
/// # Warning
///
/// This is an O(n) operation as it requires copying every element in the
/// buffer.
///
/// # Panics
///
/// If `idx` does not lie on a character boundary, or if it is out of
/// bounds, then this function will panic.
///
/// # Examples
///
/// ```
/// use inlinable_string::{InlinableString, StringExt};
///
/// let mut s = InlinableString::from("foo");
/// assert_eq!(s.remove(0), 'f');
/// assert_eq!(s.remove(1), 'o');
/// assert_eq!(s.remove(0), 'o');
/// ```
fn remove(&mut self, idx: usize) -> char;
/// Inserts a character into the string buffer at byte position `idx`.
///
/// # Warning
///
/// This is an O(n) operation as it requires copying every element in the
/// buffer.
///
/// # Examples
///
/// ```
/// use inlinable_string::{InlinableString, StringExt};
///
/// let mut s = InlinableString::from("foo");
/// s.insert(2, 'f');
/// assert!(s == "fofo");
/// ```
///
/// # Panics
///
/// If `idx` does not lie on a character boundary or is out of bounds, then
/// this function will panic.
fn insert(&mut self, idx: usize, ch: char);
/// Views the string buffer as a mutable sequence of bytes.
///
/// # Safety
///
/// This is unsafe because it does not check to ensure that the resulting
/// string will be valid UTF-8.
///
/// # Examples
///
/// ```
/// use inlinable_string::{InlinableString, StringExt};
///
/// let mut s = InlinableString::from("hello");
/// unsafe {
/// let slice = s.as_mut_slice();
/// assert!(slice == &[104, 101, 108, 108, 111]);
/// slice.reverse();
/// }
/// assert_eq!(s, "olleh");
/// ```
unsafe fn as_mut_slice(&mut self) -> &mut [u8];
/// Returns the number of bytes in this string.
///
/// # Examples
///
/// ```
/// use inlinable_string::{InlinableString, StringExt};
///
/// let a = InlinableString::from("foo");
/// assert_eq!(a.len(), 3);
/// ```
fn len(&self) -> usize;
/// Returns true if the string contains no bytes
///
/// # Examples
///
/// ```
/// use inlinable_string::{InlinableString, StringExt};
///
/// let mut v = InlinableString::new();
/// assert!(v.is_empty());
/// v.push('a');
/// assert!(!v.is_empty());
/// ```
#[inline]
fn is_empty(&self) -> bool {
self.len() == 0
}
/// Truncates the string, returning it to 0 length.
///
/// # Examples
///
/// ```
/// use inlinable_string::{InlinableString, StringExt};
///
/// let mut s = InlinableString::from("foo");
/// s.clear();
/// assert!(s.is_empty());
/// ```
#[inline]
fn clear(&mut self) {
self.truncate(0);
}
}
impl<'a> StringExt<'a> for String {
#[inline]
fn new() -> Self {
String::new()
}
#[inline]
fn with_capacity(capacity: usize) -> Self {
String::with_capacity(capacity)
}
#[inline]
fn from_utf8(vec: Vec<u8>) -> Result<Self, FromUtf8Error> {
String::from_utf8(vec)
}
#[inline]
fn from_utf16(v: &[u16]) -> Result<Self, FromUtf16Error> {
String::from_utf16(v)
}
#[inline]
fn from_utf16_lossy(v: &[u16]) -> Self {
String::from_utf16_lossy(v)
}
#[inline]
unsafe fn from_raw_parts(buf: *mut u8, length: usize, capacity: usize) -> Self {
String::from_raw_parts(buf, length, capacity)
}
#[inline]
unsafe fn from_utf8_unchecked(bytes: Vec<u8>) -> Self {
String::from_utf8_unchecked(bytes)
}
#[inline]
fn into_bytes(self) -> Vec<u8> {
String::into_bytes(self)
}
#[inline]
fn push_str(&mut self, string: &str) {
String::push_str(self, string)
}
#[inline]
fn capacity(&self) -> usize {
String::capacity(self)
}
#[inline]
fn reserve(&mut self, additional: usize) {
String::reserve(self, additional)
}
#[inline]
fn reserve_exact(&mut self, additional: usize) {
String::reserve_exact(self, additional)
}
#[inline]
fn shrink_to_fit(&mut self) {
String::shrink_to_fit(self)
}
#[inline]
fn push(&mut self, ch: char) {
String::push(self, ch)
}
#[inline]
fn as_bytes(&self) -> &[u8] {
String::as_bytes(self)
}
#[inline]
fn truncate(&mut self, new_len: usize) {
String::truncate(self, new_len)
}
#[inline]
fn pop(&mut self) -> Option<char> {
String::pop(self)
}
#[inline]
fn remove(&mut self, idx: usize) -> char {
String::remove(self, idx)
}
#[inline]
fn insert(&mut self, idx: usize, ch: char) {
String::insert(self, idx, ch)
}
#[inline]
unsafe fn as_mut_slice(&mut self) -> &mut [u8] {
&mut *(self.as_mut_str() as *mut str as *mut [u8])
}
#[inline]
fn len(&self) -> usize {
String::len(self)
}
}
#[cfg(test)]
mod std_string_stringext_sanity_tests {
// Sanity tests for std::string::String's StringExt implementation.
use super::StringExt;
#[test]
fn test_new() {
let s = <String as StringExt>::new();
assert!(StringExt::is_empty(&s));
}
#[test]
fn test_with_capacity() {
let s = <String as StringExt>::with_capacity(10);
assert!(StringExt::capacity(&s) >= 10);
}
#[test]
fn test_from_utf8() {
let s = <String as StringExt>::from_utf8(vec![104, 101, 108, 108, 111]);
assert_eq!(s.unwrap(), "hello");
}
#[test]
fn test_from_utf16() {
let v = &mut [0xD834, 0xDD1E, 0x006d, 0x0075, 0x0073, 0x0069, 0x0063];
let s = <String as StringExt>::from_utf16(v);
assert_eq!(s.unwrap(), "𝄞music");
}
#[test]
fn test_from_utf16_lossy() {
let input = b"Hello \xF0\x90\x80World";
let output = <String as StringExt>::from_utf8_lossy(input);
assert_eq!(output, "Hello \u{FFFD}World");
}
#[test]
fn test_into_bytes() {
let s = String::from("hello");
let bytes = StringExt::into_bytes(s);
assert_eq!(bytes, [104, 101, 108, 108, 111]);
}
#[test]
fn test_push_str() {
let mut s = String::from("hello");
StringExt::push_str(&mut s, " world");
assert_eq!(s, "hello world");
}
#[test]
fn test_capacity() {
let s = <String as StringExt>::with_capacity(100);
assert!(String::capacity(&s) >= 100);
}
#[test]
fn test_reserve() {
let mut s = <String as StringExt>::new();
StringExt::reserve(&mut s, 100);
assert!(String::capacity(&s) >= 100);
}
#[test]
fn test_reserve_exact() {
let mut s = <String as StringExt>::new();
StringExt::reserve_exact(&mut s, 100);
assert!(String::capacity(&s) >= 100);
}
#[test]
fn test_shrink_to_fit() {
let mut s = <String as StringExt>::with_capacity(100);
StringExt::push_str(&mut s, "foo");
StringExt::shrink_to_fit(&mut s);
assert_eq!(String::capacity(&s), 3);
}
#[test]
fn test_push() {
let mut s = String::new();
StringExt::push(&mut s, 'a');
assert_eq!(s, "a");
}
#[test]
fn test_truncate() {
let mut s = String::from("foo");
StringExt::truncate(&mut s, 1);
assert_eq!(s, "f");
}
#[test]
fn test_pop() {
let mut s = String::from("foo");
assert_eq!(StringExt::pop(&mut s), Some('o'));
assert_eq!(StringExt::pop(&mut s), Some('o'));
assert_eq!(StringExt::pop(&mut s), Some('f'));
assert_eq!(StringExt::pop(&mut s), None);
}
}
| 27.760243 | 113 | 0.554717 |
6aa3ceced761eff72602067a450b93e8e2a7304d | 16,160 | #![allow(dead_code, unused_variables)]
extern crate core_compat;
extern crate png;
extern crate xml_writer;
use std::path::Path;
use std::path::PathBuf;
use std::fs::File;
use std::fs::read_dir;
use std::io::Read;
use std::io::BufWriter;
use core_compat::entity::resource_file::ResourceFile;
use core_compat::entity::resource::Resource;
use core_compat::entity::rmd::Rmd;
use core_compat::entity::rmd_type::RmdType;
use core_compat::entity::map::Map;
use core_compat::entity::list::List;
use core_compat::error::Error;
use core_compat::parser::rle::parse_rle;
use core_compat::parser::rmd::parse_rmd;
use core_compat::parser::rmm::parse_rmm;
use core_compat::parser::lst::parse_lst;
static OUTPUT_PATH: &'static str = "./temp/";
// This is the list of data folder's and list files for them
static RLE_ENTRIES: [(&'static str, &'static str, &'static str, &'static str, bool); 16] = [
// type |short| source path | source list path | type 2?
("bullets", "bul", "./data/RLEs/Bul", "./data/RLEs/bul.lst", false),
("icons", "ico", "./data/RLEs/Ico", "./data/RLEs/ico.lst", false),
("objects", "obj", "./data/RLEs/Obj", "./data/RLEs/obj.lst", true),
("tiles", "tle", "./data/RLEs/Tle", "./data/RLEs/tle.lst", false),
("interface", "int", "./data/RLEs/Int", "./data/RLEs/int.lst", false),
("philar", "ch0", "./data/RLEs/Chr/C00", "./data/RLEs/Chr/c00.lst", false),
("azlar", "ch1", "./data/RLEs/Chr/C01", "./data/RLEs/Chr/c01.lst", false),
("sadad", "ch2", "./data/RLEs/Chr/C02", "./data/RLEs/Chr/c02.lst", false),
("destino", "ch3", "./data/RLEs/Chr/C03", "./data/RLEs/Chr/c03.lst", false),
("jarexx", "ch4", "./data/RLEs/Chr/C04", "./data/RLEs/Chr/c04.lst", false),
("canon", "ch5", "./data/RLEs/Chr/C05", "./data/RLEs/Chr/c05.lst", false),
("kitara", "ch6", "./data/RLEs/Chr/C06", "./data/RLEs/Chr/c06.lst", false),
("lunarena", "ch7", "./data/RLEs/Chr/C07", "./data/RLEs/Chr/c07.lst", false),
("lavita", "ch8", "./data/RLEs/Chr/C08", "./data/RLEs/Chr/c08.lst", false),
("ch_9_gm", "ch9", "./data/RLEs/Chr/C09", "./data/RLEs/Chr/c09.lst", false),
("extra_chr", "etc", "./data/RLEs/Chr/Etc", "./data/RLEs/Chr/etc.lst", false),
// The sounds one is the only one which is a little different...
// ("Sounds", "snd", "../data/RLEs/Snd", "../data/RLEs/snd.lst"),
];
static RMM_ENTRY: (&'static str, &'static str) =
("maps", "./data/DATAs/Map");
static RMD_ENTRIES: [(&'static str, &'static str, &'static str, RmdType); 5] = [
("bullet", "bul", "./data/DATAs/Bul", RmdType::Bullet),
("char", "chr", "./data/DATAs/Chr", RmdType::Character),
("icon", "ico", "./data/DATAs/Ico", RmdType::Icon),
("object", "obj", "./data/DATAs/Obj", RmdType::Object),
("tile", "tle", "./data/DATAs/Tle", RmdType::Tile),
];
fn main() {
println!("Starting from directory: {:?}", ::std::env::current_dir().unwrap());
// create directory - print errors...
let root_out_dir = Path::new(OUTPUT_PATH);
println!("Creating directory: {:?}", root_out_dir.canonicalize().unwrap());
match std::fs::create_dir(root_out_dir) {
Ok(_) => (),
Err(e) => println!("{:?}", e),
}
// parse the list file and insert them into the database
// convert_rle_data(false);
// convert the maps ...
export_map_images();
// convert_rmm_data();
// ... and rmd files
// convert_rmd_data();
println!("finished!");
}
fn export_map_images() {
// create the output directory if it doesn't exist yet
let mut map_out_dir = PathBuf::new();
map_out_dir.push(OUTPUT_PATH);
map_out_dir.push("map");
println!("Creating directory: {:?}", map_out_dir);
match std::fs::create_dir(map_out_dir) {
Ok(_) => (),
Err(e) => println!("{:?}", e),
}
// book-keeping of map data paths
let (kind, path) = RMM_ENTRY;
let map_path = Path::new(path);
let map_file_paths = read_dir(map_path).unwrap();
// parse the map files in the map directory
let mut map_list: Vec<Map> = Vec::new();
for entry in map_file_paths {
let entry = entry.unwrap();
let path = entry.path();
let map: Map = match load_rmm_data(&path) {
Ok(map) => map,
Err(e) => {
println!("{:?}", e);
println!("{:?}", path);
continue;
}
};
map_list.push(map);
}
println!("parsed {} map entries.", map_list.len());
for map in map_list.iter() {
println!("map: {}, name: {}", map.number, map.name);
}
}
fn convert_rmd_data() {
// create the output directory if it doesn't exist yet
let mut data_out_dir = PathBuf::new();
data_out_dir.push(OUTPUT_PATH);
data_out_dir.push("data");
println!("Creating directory: {:?}", data_out_dir);
match std::fs::create_dir(data_out_dir) {
Ok(_) => (),
Err(e) => println!("{:?}", e),
}
// read every folder
for &(kind, short, path, rmd_type) in RMD_ENTRIES.iter() {
let data_paths = read_dir(path).unwrap();
// read every file
for entry in data_paths {
let entry = entry.unwrap();
let path = entry.path();
let dat_file: Rmd = load_rmd_data(&path, rmd_type).unwrap();
}
}
}
fn convert_rmm_data() {
// create the output directory if it doesn't exist yet
let mut map_out_dir = PathBuf::new();
map_out_dir.push(OUTPUT_PATH);
map_out_dir.push("map");
println!("Creating directory: {:?}", map_out_dir);
match std::fs::create_dir(map_out_dir) {
Ok(_) => (),
Err(e) => println!("{:?}", e),
}
// book-keeping of map data paths
let (kind, path) = RMM_ENTRY;
let map_path = Path::new(path);
let map_file_paths = read_dir(map_path).unwrap();
// parse the map files in the map directory
let mut map_list: Vec<Map> = Vec::new();
for entry in map_file_paths {
let entry = entry.unwrap();
let path = entry.path();
let map: Map = match load_rmm_data(&path) {
Ok(map) => map,
Err(e) => {
println!("{:?}", e);
println!("{:?}", path);
continue;
}
};
map_list.push(map);
}
println!("parsed {} map entries.", map_list.len());
// export the files as xml data in the output directory
for map in map_list {
let map_out_file_name = format!("{}_{:03}.xml", kind, map.number());
let mut path_buf = PathBuf::new();
path_buf.push(OUTPUT_PATH);
path_buf.push("map");
path_buf.push(map_out_file_name);
let file = File::create(&path_buf).unwrap();
let writer = BufWriter::new(file);
let mut xml = xml_writer::XmlWriter::new(writer);
xml.begin_elem("map").unwrap();
// map number
xml.begin_elem("number").unwrap();
xml.text(&format!("{}", map.number())).unwrap();
xml.end_elem().unwrap();
// size_x
xml.begin_elem("size_x").unwrap();
xml.text(&format!("{}", map.size_x())).unwrap();
xml.end_elem().unwrap();
// size_y
xml.begin_elem("size_y").unwrap();
xml.text(&format!("{}", map.size_y())).unwrap();
xml.end_elem().unwrap();
// events
// TODO: The exported events seem a little wonky...
/*
for event in map.events {
xml.begin_elem("event").unwrap();
xml.attr("number", &format!("{}", event.number)).unwrap();
xml.attr("left", &format!("{}", event.left)).unwrap();
xml.attr("top", &format!("{}", event.top)).unwrap();
xml.attr("right", &format!("{}", event.right)).unwrap();
xml.attr("bottom", &format!("{}", event.bottom)).unwrap();
xml.end_elem().unwrap();
}
*/
// tiles
let mut x = 0;
let mut y = 0;
let max_x = map.size_x();
let max_y = map.size_y();
for tile in map.tiles() {
// <tile>
xml.begin_elem("tile").unwrap();
// <x>
xml.begin_elem("x").unwrap();
xml.text(&format!("{}", &format!("{}", x))).unwrap();
xml.end_elem().unwrap();
// <y>
xml.begin_elem("y").unwrap();
xml.text(&format!("{}", &format!("{}", y))).unwrap();
xml.end_elem().unwrap();
// <object_ref> rm data reference
xml.begin_elem("object_ref").unwrap();
xml.attr("file", &format!("{}", tile.obj_rmd_entry.file())).unwrap();
xml.attr("index", &format!("{}", tile.obj_rmd_entry.index())).unwrap();
xml.end_elem().unwrap();
// <tile_ref> rm data reference
xml.begin_elem("tile_ref").unwrap();
xml.attr("file", &format!("{}", tile.tle_rmd_entry.file())).unwrap();
xml.attr("index", &format!("{}", tile.tle_rmd_entry.index())).unwrap();
xml.end_elem().unwrap();
// <warp>
xml.begin_elem("warp").unwrap();
xml.text(&format!("{}", tile.warp)).unwrap();
xml.end_elem().unwrap();
// <collision>
xml.begin_elem("collision").unwrap();
xml.text(&format!("{}", tile.collision)).unwrap();
xml.end_elem().unwrap();
// </tile>
xml.end_elem().unwrap();
// handle coordinate increments
x += 1;
if x >= max_x {
y += 1;
x = 0;
}
}
if y != max_y {
println!("Map dimension mis-match: y:{}, max_y: {}", y, max_y);
}
xml.close().unwrap();
xml.flush().unwrap();
}
}
fn convert_rle_data(use_lst: bool) {
for &(kind, short_kind, folder, list_name, use_v2) in RLE_ENTRIES.iter() {
dbg!("file: {:?}", &kind);
// create a sub-folder for the data if it doesn't exist
let mut out_dir = PathBuf::new();
out_dir.push(OUTPUT_PATH);
out_dir.push(short_kind);
println!("Creating directory: {:?}", out_dir);
match std::fs::create_dir(&out_dir) {
Ok(_) => (),
Err(e) => println!("{:?}", e),
}
println!("Created: {:?}", out_dir.canonicalize().unwrap());
let list_data = List { items: vec![] };
if use_lst {
// load the data from the list file
let list_path = Path::new(list_name);
let list_data = load_list_data(&list_path, use_v2).unwrap();
dbg!(list_data.items.len());
}
// load the actual sprites into the database
let rle_paths: Vec<std::fs::DirEntry> = read_dir(folder).unwrap().filter_map(|ent| ent.ok()).collect();
let mut resources = Vec::<Resource>::new();
dbg!(rle_paths.len());
for entry in rle_paths.into_iter() {
let path = entry.path();
match load_rle_data(&path) {
Err(e) => {
dbg!(e);
}
Ok(res_file) => {
for resource in res_file.resources {
resources.push(resource);
}
}
}
}
dbg!(&resources.len());
// Commit all of the sprite objects in one transaction
let mut combi_entries: Vec<RleCombiEntry> = Vec::new();
let mut matches = 0;
if use_lst {
for rle in resources.iter() {
for item in &list_data.items {
if item.entry.file() == rle.file_num
&& item.entry.index() == rle.index
{
matches += 1;
let file_name = format!("{}_{}.png", &short_kind, item.id);
combi_entries.push(RleCombiEntry {
id: item.id,
name: item.name.clone(),
x_offset: rle.offset_x,
y_offset: rle.offset_y,
width: rle.width,
height: rle.height,
file_name: file_name.clone(),
});
// Generate the png files
write_png(short_kind, &file_name, rle);
}
}
}
dbg!(matches);
// write out descriptor file
write_list_file(kind, combi_entries);
} else {
for rle in resources.iter() {
let file_name = format!("{}_{}-{}.png", &short_kind, rle.file_num, rle.index);
write_png(short_kind, &file_name, rle);
}
}
} // end kind entry loop
}
fn write_png(subdir: &str, file_name: &str, rle: &Resource) {
let mut path_buf = PathBuf::new();
path_buf.push(OUTPUT_PATH);
path_buf.push(&subdir);
path_buf.push(file_name);
if let Ok(file) = File::create(&path_buf) {
let ref mut writer = BufWriter::new(file);
let mut encoder = png::Encoder::new(writer,
rle.width as u32,
rle.height as u32);
encoder.set_color(png::ColorType::Rgba);
encoder.set_depth(png::BitDepth::Eight);
let mut writer = encoder.write_header().unwrap();
writer.write_image_data(&rle.image_raw).unwrap();
}
}
fn write_list_file(kind: &str, combi_entries: Vec<RleCombiEntry>) {
let file_name = format!("{}.xml", kind);
let mut path_buf = PathBuf::new();
path_buf.push(OUTPUT_PATH);
path_buf.push(file_name);
let file = File::create(&path_buf).unwrap();
let writer = BufWriter::new(file);
let kind_str = format!("{}", kind);
let mut xml = xml_writer::XmlWriter::new(writer);
xml.begin_elem(&kind_str).unwrap();
for entry in combi_entries {
xml.begin_elem("entry").unwrap();
xml.attr("id", &format!("{}", entry.id)).unwrap();
xml.attr("name", &entry.name).unwrap();
xml.attr("x_offset", &format!("{}", entry.x_offset)).unwrap();
xml.attr("y_offset", &format!("{}", entry.y_offset)).unwrap();
xml.attr("width", &format!("{}", entry.width)).unwrap();
xml.attr("height", &format!("{}", entry.height)).unwrap();
xml.attr("file_name", &entry.file_name).unwrap();
xml.end_elem().unwrap();
}
xml.end_elem().unwrap();
xml.close().unwrap();
xml.flush().unwrap();
}
fn load_rmd_data(path: &Path, kind: RmdType) -> Result<Rmd, Error> {
let mut file = File::open(path)?;
let mut bytes = Vec::<u8>::new();
file.read_to_end(&mut bytes)?;
parse_rmd(kind, &bytes)
}
fn load_rmm_data(path: &Path) -> Result<Map, Error> {
let mut file = File::open(path)?;
let mut bytes = Vec::<u8>::new();
file.read_to_end(&mut bytes)?;
parse_rmm(&bytes)
}
fn load_list_data(path: &Path, use_v2: bool) -> Result<List, Error> {
let mut file = File::open(path)?;
let mut bytes = Vec::<u8>::new();
file.read_to_end(&mut bytes)?;
parse_lst(&bytes, use_v2)
}
fn load_rle_data(path: &Path) -> Result<ResourceFile, Error> {
// open and read the file
let mut file = File::open(path)?;
let mut bytes = Vec::<u8>::new();
file.read_to_end(&mut bytes)?;
// parse the file number
let mut file_num = 0xFFFF;
if let Some(stem) = path.file_stem() {
if let Some(stem) = stem.to_str() {
let num: String = stem.matches(char::is_numeric).collect();
file_num = num.parse().unwrap_or(0xFFFF);
// we really only need a maximum of 5 digits...
file_num = file_num % 10_000;
}
}
// parse && append results
parse_rle(file_num, &mut bytes)
}
struct RleCombiEntry {
id: u32,
name: String,
x_offset: i32,
y_offset: i32,
width: i32,
height: i32,
file_name: String,
}
| 35.516484 | 111 | 0.532859 |
188ded8f9e5ff85f7ec997cbc7b079b0e8a3f464 | 5,526 | // Copyright (c) The Libra Core Contributors
// SPDX-License-Identifier: Apache-2.0
#![forbid(unsafe_code)]
use libra_config::config::NodeConfig;
use libra_logger::prelude::*;
use libra_types::{chain_id::ChainId, PeerId};
use std::{
path::PathBuf,
sync::{
atomic::{AtomicBool, Ordering},
Arc,
},
};
use structopt::StructOpt;
#[derive(Debug, StructOpt)]
#[structopt(about = "Libra Node")]
struct Args {
#[structopt(
short = "f",
long,
required_unless = "test",
help = "Path to NodeConfig"
)]
config: Option<PathBuf>,
#[structopt(short = "d", long, help = "Disable logging")]
no_logging: bool,
#[structopt(long, help = "Enable a single validator testnet")]
test: bool,
#[structopt(long, help = "Enabling random ports for testnet")]
random_ports: bool,
}
#[global_allocator]
static ALLOC: jemallocator::Jemalloc = jemallocator::Jemalloc;
fn main() {
let args = Args::from_args();
if args.test {
println!("Entering test mode, this should never be used in production!");
load_test_environment(args.config, args.no_logging, args.random_ports);
} else {
let config = NodeConfig::load(args.config.unwrap()).expect("Failed to load node config");
println!("Using node config {:?}", &config);
start(args.no_logging, &config, None);
};
}
fn start(no_logging: bool, config: &NodeConfig, log_file: Option<PathBuf>) {
crash_handler::setup_panic_handler();
let logger = if !no_logging {
let mut logger = libra_logger::Logger::new();
logger
.channel_size(config.logger.chan_size)
.is_async(config.logger.is_async)
.level(config.logger.level)
.read_env();
if let Some(log_file) = log_file {
logger.printer(Box::new(FileWriter::new(log_file)));
}
Some(logger.build())
} else {
None
};
// Let's now log some important information, since the logger is set up
info!(config = config, "Loaded config");
if config.metrics.enabled {
for network in &config.full_node_networks {
let peer_id = network.peer_id();
setup_metrics(peer_id, &config);
}
if let Some(network) = config.validator_network.as_ref() {
let peer_id = network.peer_id();
setup_metrics(peer_id, &config);
}
}
if fail::has_failpoints() {
warn!("Failpoints is enabled");
if let Some(failpoints) = &config.failpoints {
for (point, actions) in failpoints {
fail::cfg(point, actions).expect("fail to set actions for failpoint");
}
}
} else if config.failpoints.is_some() {
warn!("failpoints is set in config, but the binary doesn't compile with this feature");
}
let _node_handle = libra_node::main_node::setup_environment(&config, logger);
let term = Arc::new(AtomicBool::new(false));
while !term.load(Ordering::Acquire) {
std::thread::park();
}
}
fn setup_metrics(peer_id: PeerId, config: &NodeConfig) {
libra_metrics::dump_all_metrics_to_file_periodically(
&config.metrics.dir(),
&format!("{}.metrics", peer_id),
config.metrics.collection_interval_ms,
);
}
fn load_test_environment(config_path: Option<PathBuf>, no_logging: bool, random_ports: bool) {
// Either allocate a temppath or reuse the passed in path and make sure the directory exists
let config_temp_path = libra_temppath::TempPath::new();
let config_path = config_path.unwrap_or_else(|| config_temp_path.as_ref().to_path_buf());
std::fs::DirBuilder::new()
.recursive(true)
.create(&config_path)
.unwrap();
let config_path = config_path.canonicalize().unwrap();
// Build a single validator network
let template = NodeConfig::default_for_validator();
let builder =
libra_genesis_tool::config_builder::ValidatorBuilder::new(1, template, &config_path)
.randomize_first_validator_ports(random_ports);
let test_config = config_builder::SwarmConfig::build(&builder, &config_path).unwrap();
// Prepare log file since we cannot automatically route logs to stderr
let mut log_file = config_path.clone();
log_file.push("validator.log");
// Build a waypoint file so that clients / docker can grab it easily
let mut waypoint_file_path = config_path.clone();
waypoint_file_path.push("waypoint.txt");
std::io::Write::write_all(
&mut std::fs::File::create(&waypoint_file_path).unwrap(),
test_config.waypoint.to_string().as_bytes(),
)
.unwrap();
// Intentionally leave out instructions on how to connect with different applications
println!("Completed generating configuration:");
println!("\tLog file: {:?}", log_file);
println!("\tConfig path: {:?}", test_config.config_files[0]);
println!(
"\tLibra root key path: {:?}",
test_config.libra_root_key_path
);
println!("\tWaypoint: {}", test_config.waypoint);
let config = NodeConfig::load(&test_config.config_files[0]).unwrap();
println!("\tJSON-RPC endpoint: {}", config.rpc.address);
println!(
"\tFullNode network: {}",
config.full_node_networks[0].listen_address
);
println!("\tChainId: {}", ChainId::test());
println!();
println!("Libra is running, press ctrl-c to exit");
println!();
start(no_logging, &config, Some(log_file))
}
| 33.90184 | 97 | 0.641513 |
91d77897948e08ce86a987126c8dd75874dd35ad | 5,450 | use crate::errno::Errno;
use crate::sys::signal::Signal;
use crate::unistd::Pid;
use crate::Result;
use cfg_if::cfg_if;
use libc::{self, c_int};
use std::ptr;
pub type RequestType = c_int;
cfg_if! {
if #[cfg(any(target_os = "dragonfly",
target_os = "freebsd",
target_os = "macos",
target_os = "openbsd"))] {
#[doc(hidden)]
pub type AddressType = *mut ::libc::c_char;
} else {
#[doc(hidden)]
pub type AddressType = *mut ::libc::c_void;
}
}
libc_enum! {
#[repr(i32)]
/// Ptrace Request enum defining the action to be taken.
pub enum Request {
PT_TRACE_ME,
PT_READ_I,
PT_READ_D,
#[cfg(target_os = "macos")]
PT_READ_U,
PT_WRITE_I,
PT_WRITE_D,
#[cfg(target_os = "macos")]
PT_WRITE_U,
PT_CONTINUE,
PT_KILL,
#[cfg(any(any(target_os = "dragonfly",
target_os = "freebsd",
target_os = "macos"),
all(target_os = "openbsd", target_arch = "x86_64"),
all(target_os = "netbsd", any(target_arch = "x86_64",
target_arch = "powerpc"))))]
PT_STEP,
PT_ATTACH,
PT_DETACH,
#[cfg(target_os = "macos")]
PT_SIGEXC,
#[cfg(target_os = "macos")]
PT_THUPDATE,
#[cfg(target_os = "macos")]
PT_ATTACHEXC
}
}
unsafe fn ptrace_other(
request: Request,
pid: Pid,
addr: AddressType,
data: c_int,
) -> Result<c_int> {
Errno::result(libc::ptrace(
request as RequestType,
libc::pid_t::from(pid),
addr,
data,
))
.map(|_| 0)
}
/// Sets the process as traceable, as with `ptrace(PT_TRACEME, ...)`
///
/// Indicates that this process is to be traced by its parent.
/// This is the only ptrace request to be issued by the tracee.
pub fn traceme() -> Result<()> {
unsafe { ptrace_other(Request::PT_TRACE_ME, Pid::from_raw(0), ptr::null_mut(), 0).map(drop) }
}
/// Attach to a running process, as with `ptrace(PT_ATTACH, ...)`
///
/// Attaches to the process specified by `pid`, making it a tracee of the calling process.
pub fn attach(pid: Pid) -> Result<()> {
unsafe { ptrace_other(Request::PT_ATTACH, pid, ptr::null_mut(), 0).map(drop) }
}
/// Detaches the current running process, as with `ptrace(PT_DETACH, ...)`
///
/// Detaches from the process specified by `pid` allowing it to run freely, optionally delivering a
/// signal specified by `sig`.
pub fn detach<T: Into<Option<Signal>>>(pid: Pid, sig: T) -> Result<()> {
let data = match sig.into() {
Some(s) => s as c_int,
None => 0,
};
unsafe { ptrace_other(Request::PT_DETACH, pid, ptr::null_mut(), data).map(drop) }
}
/// Restart the stopped tracee process, as with `ptrace(PTRACE_CONT, ...)`
///
/// Continues the execution of the process with PID `pid`, optionally
/// delivering a signal specified by `sig`.
pub fn cont<T: Into<Option<Signal>>>(pid: Pid, sig: T) -> Result<()> {
let data = match sig.into() {
Some(s) => s as c_int,
None => 0,
};
unsafe {
// Ignore the useless return value
ptrace_other(Request::PT_CONTINUE, pid, 1 as AddressType, data).map(drop)
}
}
/// Issues a kill request as with `ptrace(PT_KILL, ...)`
///
/// This request is equivalent to `ptrace(PT_CONTINUE, ..., SIGKILL);`
pub fn kill(pid: Pid) -> Result<()> {
unsafe { ptrace_other(Request::PT_KILL, pid, 0 as AddressType, 0).map(drop) }
}
/// Move the stopped tracee process forward by a single step as with
/// `ptrace(PT_STEP, ...)`
///
/// Advances the execution of the process with PID `pid` by a single step optionally delivering a
/// signal specified by `sig`.
///
/// # Example
/// ```rust
/// use nix::sys::ptrace::step;
/// use nix::unistd::Pid;
/// use nix::sys::signal::Signal;
/// use nix::sys::wait::*;
/// fn main() {
/// // If a process changes state to the stopped state because of a SIGUSR1
/// // signal, this will step the process forward and forward the user
/// // signal to the stopped process
/// match waitpid(Pid::from_raw(-1), None) {
/// Ok(WaitStatus::Stopped(pid, Signal::SIGUSR1)) => {
/// let _ = step(pid, Signal::SIGUSR1);
/// }
/// _ => {},
/// }
/// }
/// ```
#[cfg(any(
any(target_os = "dragonfly", target_os = "freebsd", target_os = "macos"),
all(target_os = "openbsd", target_arch = "x86_64"),
all(
target_os = "netbsd",
any(target_arch = "x86_64", target_arch = "powerpc")
)
))]
pub fn step<T: Into<Option<Signal>>>(pid: Pid, sig: T) -> Result<()> {
let data = match sig.into() {
Some(s) => s as c_int,
None => 0,
};
unsafe { ptrace_other(Request::PT_STEP, pid, ptr::null_mut(), data).map(drop) }
}
/// Reads a word from a processes memory at the given address
pub fn read(pid: Pid, addr: AddressType) -> Result<c_int> {
unsafe {
// Traditionally there was a difference between reading data or
// instruction memory but not in modern systems.
ptrace_other(Request::PT_READ_D, pid, addr, 0)
}
}
/// Writes a word into the processes memory at the given address
pub fn write(pid: Pid, addr: AddressType, data: c_int) -> Result<()> {
unsafe { ptrace_other(Request::PT_WRITE_D, pid, addr, data).map(drop) }
}
| 31.321839 | 99 | 0.585872 |
bb5b567aeccf142ea3f652ace5428d7d7fbbca7f | 2,061 | pub(crate) mod serial;
use std::fmt;
use std::str::FromStr;
use semver::{ReqParseError, SemVerError, Version, VersionReq};
use notion_fail::{ExitCode, Fallible, NotionFail, ResultExt};
use self::serial::parse_requirements;
#[derive(Debug, Clone)]
pub enum VersionSpec {
Latest,
Semver(VersionReq),
}
impl fmt::Display for VersionSpec {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
match *self {
VersionSpec::Latest => write!(f, "latest"),
VersionSpec::Semver(ref req) => req.fmt(f),
}
}
}
impl Default for VersionSpec {
fn default() -> Self {
VersionSpec::Latest
}
}
impl VersionSpec {
pub fn exact(version: &Version) -> Self {
VersionSpec::Semver(VersionReq::exact(version))
}
pub fn parse(s: impl AsRef<str>) -> Fallible<Self> {
let s = s.as_ref();
s.parse()
.with_context(VersionParseError::from_req_parse_error)
}
pub fn parse_requirements(s: impl AsRef<str>) -> Fallible<VersionReq> {
parse_requirements(s.as_ref()).with_context(VersionParseError::from_req_parse_error)
}
pub fn parse_version(s: impl AsRef<str>) -> Fallible<Version> {
Version::parse(s.as_ref()).with_context(VersionParseError::from_semver_error)
}
}
impl FromStr for VersionSpec {
type Err = ReqParseError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
if s == "latest" {
return Ok(VersionSpec::Latest);
}
Ok(VersionSpec::Semver(parse_requirements(s)?))
}
}
#[derive(Debug, Fail, NotionFail)]
#[fail(display = "{}", error)]
#[notion_fail(code = "NoVersionMatch")]
pub(crate) struct VersionParseError {
pub(crate) error: String,
}
impl VersionParseError {
fn from_req_parse_error(error: &ReqParseError) -> Self {
VersionParseError {
error: error.to_string(),
}
}
fn from_semver_error(error: &SemVerError) -> Self {
VersionParseError {
error: error.to_string(),
}
}
}
| 24.247059 | 92 | 0.622999 |
1616d8276c3957d2ae49842e77a139d2e212a2c6 | 1,792 | #[macro_use]
extern crate log;
use std::cell::RefCell;
use std::{thread::sleep, time::Duration};
thread_local!(static NOTIFY: RefCell<bool> = RefCell::new(true));
struct Context<'a> {
waker: &'a Waker,
}
impl<'a> Context<'a> {
fn from_waker(waker: &'a Waker) -> Self {
info!("Context from_waker");
Context { waker }
}
fn waker(&self) -> &'a Waker {
info!("Context waker");
&self.waker
}
}
#[derive(Debug)]
struct Waker;
impl Waker {
fn wake(&self) {
NOTIFY.with(|f| {
info!("Waker wake:{} ",*f.borrow());
*f.borrow_mut() = true;
})
}
}
enum Poll<T> {
Ready(T),
Pending,
}
trait Future {
type Output;
fn poll(&mut self, cx: &Context) -> Poll<Self::Output>;
}
#[derive(Default)]
struct MyFuture {
count: u32,
}
impl Future for MyFuture {
type Output = i32;
fn poll(&mut self, ctx: &Context) -> Poll<Self::Output> {
info!("impl Future for MyFuture: {}",self.count);
sleep(Duration::from_secs(1));
match self.count {
3 => Poll::Ready(3),
_ => {
self.count += 1;
ctx.waker().wake();
Poll::Pending
}
}
}
}
fn run<F>(mut f: F) -> F::Output
where
F: Future,
{
NOTIFY.with(|n| loop {
info!("run...");
if *n.borrow() {
*n.borrow_mut() = false;
info!("{:?}",Waker);
let ctx = Context::from_waker(&Waker);
info!("before Poll...");
if let Poll::Ready(val) = f.poll(&ctx) {
return val;
}
}
})
}
fn main() {
env_logger::init();
let my_future = MyFuture::default();
println!("Output: {}", run(my_future));
}
| 19.478261 | 65 | 0.491071 |
bf7153c573844a3431cc3d6166c547ff099cadcf | 31,721 | use crate::{
acknowledgement::{Acknowledgements, DeliveryTag},
auth::Credentials,
channel_status::{ChannelState, ChannelStatus},
close_on_drop,
connection_status::{ConnectionState, ConnectionStep},
consumer::Consumer,
executor::{Executor, ExecutorExt},
frames::{ExpectedReply, Frames},
id_sequence::IdSequence,
internal_rpc::InternalRPCHandle,
message::{BasicGetMessage, BasicReturnMessage, Delivery},
protocol::{self, AMQPClass, AMQPError, AMQPHardError, AMQPSoftError},
publisher_confirm::PublisherConfirm,
queue::Queue,
queues::Queues,
returned_messages::ReturnedMessages,
types::*,
waker::Waker,
BasicProperties, CloseOnDrop, Configuration, ConfirmationPromise, Connection, ConnectionStatus,
Error, ExchangeKind, Promise, PromiseChain, PromiseResolver, Result,
};
use amq_protocol::frame::{AMQPContentHeader, AMQPFrame};
use log::{debug, error, info, log_enabled, trace, Level::Trace};
use parking_lot::Mutex;
use std::{convert::TryFrom, fmt, sync::Arc};
#[cfg(test)]
use crate::queue::QueueState;
#[derive(Clone)]
pub struct Channel {
id: u16,
configuration: Configuration,
status: ChannelStatus,
connection_status: ConnectionStatus,
acknowledgements: Acknowledgements,
delivery_tag: IdSequence<DeliveryTag>,
queues: Queues,
returned_messages: ReturnedMessages,
waker: Waker,
internal_rpc: InternalRPCHandle,
frames: Frames,
executor: Arc<dyn Executor>,
}
impl fmt::Debug for Channel {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Channel")
.field("id", &self.id)
.field("configuration", &self.configuration)
.field("status", &self.status)
.field("connection_status", &self.connection_status)
.field("acknowledgements", &self.acknowledgements)
.field("delivery_tag", &self.delivery_tag)
.field("queues", &self.queues)
.field("returned_messages", &self.returned_messages)
.field("waker", &self.waker)
.field("frames", &self.frames)
.field("executor", &self.executor)
.finish()
}
}
impl Channel {
#[allow(clippy::too_many_arguments)]
pub(crate) fn new(
channel_id: u16,
configuration: Configuration,
connection_status: ConnectionStatus,
waker: Waker,
internal_rpc: InternalRPCHandle,
frames: Frames,
executor: Arc<dyn Executor>,
) -> Channel {
let returned_messages = ReturnedMessages::default();
Channel {
id: channel_id,
configuration,
status: ChannelStatus::default(),
connection_status,
acknowledgements: Acknowledgements::new(returned_messages.clone()),
delivery_tag: IdSequence::new(false),
queues: Queues::default(),
returned_messages,
waker,
internal_rpc,
frames,
executor,
}
}
pub fn status(&self) -> &ChannelStatus {
&self.status
}
fn set_closed(&self, error: Error) -> Result<()> {
self.set_state(ChannelState::Closed);
self.error_publisher_confirms(error.clone());
self.cancel_consumers()
.and(self.internal_rpc.remove_channel(self.id, error))
}
fn set_error(&self, error: Error) -> Result<()> {
self.set_state(ChannelState::Error);
self.error_publisher_confirms(error.clone());
self.error_consumers(error.clone())
.and(self.internal_rpc.remove_channel(self.id, error))
}
fn register_internal_promise(&self, promise: Promise<()>) -> Result<()> {
self.executor
.spawn_internal(promise, self.internal_rpc.clone())
}
pub(crate) fn error_publisher_confirms(&self, error: Error) {
self.acknowledgements.on_channel_error(self.id, error);
}
pub(crate) fn cancel_consumers(&self) -> Result<()> {
self.queues.cancel_consumers()
}
pub(crate) fn error_consumers(&self, error: Error) -> Result<()> {
self.queues.error_consumers(error)
}
pub(crate) fn set_state(&self, state: ChannelState) {
self.status.set_state(state);
}
pub fn id(&self) -> u16 {
self.id
}
fn wake(&self) -> Result<()> {
trace!("channel {} wake", self.id);
self.waker.wake()
}
fn assert_channel0(&self, class_id: u16, method_id: u16) -> Result<()> {
if self.id == 0 {
Ok(())
} else {
error!(
"Got a connection frame on channel {}, closing connection",
self.id
);
let error = AMQPError::new(
AMQPHardError::COMMANDINVALID.into(),
format!("connection frame received on channel {}", self.id).into(),
);
self.internal_rpc.close_connection(
error.get_id(),
error.get_message().to_string(),
class_id,
method_id,
)?;
Err(Error::ProtocolError(error))
}
}
pub fn close(&self, reply_code: ShortUInt, reply_text: &str) -> Promise<()> {
self.do_channel_close(reply_code, reply_text, 0, 0)
}
pub fn exchange_declare(
&self,
exchange: &str,
kind: ExchangeKind,
options: ExchangeDeclareOptions,
arguments: FieldTable,
) -> Promise<()> {
self.do_exchange_declare(exchange, kind.kind(), options, arguments)
}
pub fn wait_for_confirms(&self) -> ConfirmationPromise<Vec<BasicReturnMessage>> {
if let Some(promise) = self.acknowledgements.get_last_pending() {
trace!("Waiting for pending confirms");
let returned_messages = self.returned_messages.clone();
promise.traverse(move |_| Ok(returned_messages.drain()))
} else {
trace!("No confirms to wait for");
ConfirmationPromise::new_with_data(Ok(Vec::default()))
}
}
#[cfg(test)]
pub(crate) fn register_queue(&self, queue: QueueState) {
self.queues.register(queue);
}
pub(crate) fn send_method_frame(
&self,
method: AMQPClass,
resolver: PromiseResolver<()>,
expected_reply: Option<ExpectedReply>,
) -> Result<()> {
self.send_frame(AMQPFrame::Method(self.id, method), resolver, expected_reply)
}
pub(crate) fn send_frame(
&self,
frame: AMQPFrame,
resolver: PromiseResolver<()>,
expected_reply: Option<ExpectedReply>,
) -> Result<()> {
trace!("channel {} send_frame", self.id);
self.frames.push(self.id, frame, resolver, expected_reply);
self.wake()
}
fn send_method_frame_with_body(
&self,
method: AMQPClass,
payload: Vec<u8>,
properties: BasicProperties,
publisher_confirms_result: Option<PublisherConfirm>,
) -> Result<PromiseChain<PublisherConfirm>> {
let class_id = method.get_amqp_class_id();
let header = AMQPContentHeader {
class_id,
weight: 0,
body_size: payload.len() as u64,
properties,
};
let frame_max = self.configuration.frame_max();
let mut frames = vec![
AMQPFrame::Method(self.id, method),
AMQPFrame::Header(self.id, class_id, Box::new(header)),
];
// a content body frame 8 bytes of overhead
frames.extend(
payload
.as_slice()
.chunks(frame_max as usize - 8)
.map(|chunk| AMQPFrame::Body(self.id, chunk.into())),
);
// tweak to make rustc happy
let data = Arc::new(Mutex::new((
publisher_confirms_result,
self.returned_messages.clone(),
)));
trace!("channel {} send_frames", self.id);
let promise = self.frames.push_frames(frames);
self.wake()?;
Ok(promise.traverse(move |res| {
res.map(|()| {
let mut data = data.lock();
data.0
.take()
.unwrap_or_else(|| PublisherConfirm::not_requested(data.1.clone()))
})
}))
}
pub(crate) fn handle_content_header_frame(
&self,
size: u64,
properties: BasicProperties,
) -> Result<()> {
if let ChannelState::WillReceiveContent(queue_name, request_id_or_consumer_tag) =
self.status.state()
{
if size > 0 {
self.set_state(ChannelState::ReceivingContent(
queue_name.clone(),
request_id_or_consumer_tag.clone(),
size as usize,
));
} else {
self.set_state(ChannelState::Connected);
}
if let Some(queue_name) = queue_name {
self.queues.handle_content_header_frame(
queue_name.as_str(),
request_id_or_consumer_tag,
size,
properties,
)?;
} else {
self.returned_messages.set_delivery_properties(properties);
if size == 0 {
self.returned_messages
.new_delivery_complete(self.status.confirm());
}
}
Ok(())
} else {
self.set_error(Error::InvalidChannelState(self.status.state()))
}
}
pub(crate) fn handle_body_frame(&self, payload: Vec<u8>) -> Result<()> {
let payload_size = payload.len();
if let ChannelState::ReceivingContent(
queue_name,
request_id_or_consumer_tag,
remaining_size,
) = self.status.state()
{
if remaining_size >= payload_size {
if let Some(queue_name) = queue_name.as_ref() {
self.queues.handle_body_frame(
queue_name.as_str(),
request_id_or_consumer_tag.clone(),
remaining_size,
payload_size,
payload,
)?;
} else {
self.returned_messages.receive_delivery_content(payload);
if remaining_size == payload_size {
self.returned_messages
.new_delivery_complete(self.status.confirm());
}
}
if remaining_size == payload_size {
self.set_state(ChannelState::Connected);
} else {
self.set_state(ChannelState::ReceivingContent(
queue_name,
request_id_or_consumer_tag,
remaining_size - payload_size,
));
}
Ok(())
} else {
error!("body frame too large");
self.set_error(Error::InvalidBodyReceived)
}
} else {
self.set_error(Error::InvalidChannelState(self.status.state()))
}
}
fn before_basic_publish(&self) -> Option<PublisherConfirm> {
if self.status.confirm() {
let delivery_tag = self.delivery_tag.next();
Some(
self.acknowledgements
.register_pending(delivery_tag, self.id),
)
} else {
None
}
}
fn acknowledgement_error(&self, error: Error, class_id: u16, method_id: u16) -> Result<()> {
error!("Got a bad acknowledgement from server, closing channel");
self.register_internal_promise(self.do_channel_close(
AMQPSoftError::PRECONDITIONFAILED.get_id(),
"precondition failed",
class_id,
method_id,
))?;
Err(error)
}
fn on_connection_start_ok_sent(
&self,
resolver: PromiseResolver<CloseOnDrop<Connection>>,
connection: Connection,
credentials: Credentials,
) -> Result<()> {
self.connection_status
.set_connection_step(ConnectionStep::StartOk(resolver, connection, credentials));
Ok(())
}
fn on_connection_open_sent(
&self,
resolver: PromiseResolver<CloseOnDrop<Connection>>,
) -> Result<()> {
self.connection_status
.set_connection_step(ConnectionStep::Open(resolver));
Ok(())
}
fn on_connection_close_sent(&self) -> Result<()> {
self.internal_rpc.set_connection_closing()
}
fn on_connection_close_ok_sent(&self, error: Error) -> Result<()> {
if let Error::ProtocolError(_) = error {
self.internal_rpc.set_connection_error(error)
} else {
self.internal_rpc.set_connection_closed(error)
}
}
fn before_channel_close(&self) {
self.set_state(ChannelState::Closing);
}
fn on_channel_close_ok_sent(&self, error: Error) -> Result<()> {
self.set_closed(error)
}
fn on_basic_recover_async_sent(&self) -> Result<()> {
self.queues.drop_prefetched_messages()
}
fn on_basic_ack_sent(&self, multiple: bool, delivery_tag: DeliveryTag) -> Result<()> {
if multiple && delivery_tag == 0 {
self.queues.drop_prefetched_messages()
} else {
Ok(())
}
}
fn on_basic_nack_sent(&self, multiple: bool, delivery_tag: DeliveryTag) -> Result<()> {
if multiple && delivery_tag == 0 {
self.queues.drop_prefetched_messages()
} else {
Ok(())
}
}
fn tune_connection_configuration(&self, channel_max: u16, frame_max: u32, heartbeat: u16) {
// If we disable the heartbeat (0) but the server don't, follow it and enable it too
// If both us and the server want heartbeat enabled, pick the lowest value.
if self.configuration.heartbeat() == 0
|| (heartbeat != 0 && heartbeat < self.configuration.heartbeat())
{
self.configuration.set_heartbeat(heartbeat);
}
if channel_max != 0 {
// 0 means we want to take the server's value
// If both us and the server specified a channel_max, pick the lowest value.
if self.configuration.channel_max() == 0
|| channel_max < self.configuration.channel_max()
{
self.configuration.set_channel_max(channel_max);
}
}
if self.configuration.channel_max() == 0 {
self.configuration.set_channel_max(u16::max_value());
}
if frame_max != 0 {
// 0 means we want to take the server's value
// If both us and the server specified a frame_max, pick the lowest value.
if self.configuration.frame_max() == 0 || frame_max < self.configuration.frame_max() {
self.configuration.set_frame_max(frame_max);
}
}
if self.configuration.frame_max() == 0 {
self.configuration.set_frame_max(u32::max_value());
}
}
fn on_connection_start_received(&self, method: protocol::connection::Start) -> Result<()> {
trace!("Server sent connection::Start: {:?}", method);
let state = self.connection_status.state();
if let (
ConnectionState::Connecting,
Some(ConnectionStep::ProtocolHeader(
resolver,
connection,
credentials,
mechanism,
mut options,
)),
) = (state.clone(), self.connection_status.connection_step())
{
let mechanism_str = mechanism.to_string();
let locale = options.locale.clone();
if !method
.mechanisms
.split_whitespace()
.any(|m| m == mechanism_str)
{
error!("unsupported mechanism: {}", mechanism);
}
if !method.locales.split_whitespace().any(|l| l == locale) {
error!("unsupported locale: {}", mechanism);
}
if !options.client_properties.contains_key("product")
|| !options.client_properties.contains_key("version")
{
options.client_properties.insert(
"product".into(),
AMQPValue::LongString(env!("CARGO_PKG_NAME").into()),
);
options.client_properties.insert(
"version".into(),
AMQPValue::LongString(env!("CARGO_PKG_VERSION").into()),
);
}
options
.client_properties
.insert("platform".into(), AMQPValue::LongString("rust".into()));
let mut capabilities = FieldTable::default();
capabilities.insert("publisher_confirms".into(), AMQPValue::Boolean(true));
capabilities.insert(
"exchange_exchange_bindings".into(),
AMQPValue::Boolean(true),
);
capabilities.insert("basic.nack".into(), AMQPValue::Boolean(true));
capabilities.insert("consumer_cancel_notify".into(), AMQPValue::Boolean(true));
capabilities.insert("connection.blocked".into(), AMQPValue::Boolean(true));
// FIXME: consumer_priorities
capabilities.insert(
"authentication_failure_close".into(),
AMQPValue::Boolean(true),
);
// FIXME: per_consumer_qos
// FIXME: direct_reply_to
options
.client_properties
.insert("capabilities".into(), AMQPValue::FieldTable(capabilities));
self.register_internal_promise(self.connection_start_ok(
options.client_properties,
&mechanism_str,
&credentials.sasl_auth_string(mechanism),
&locale,
resolver,
connection,
credentials,
))
} else {
error!("Invalid state: {:?}", state);
let error = Error::InvalidConnectionState(state);
self.internal_rpc.set_connection_error(error.clone())?;
Err(error)
}
}
fn on_connection_secure_received(&self, method: protocol::connection::Secure) -> Result<()> {
trace!("Server sent connection::Secure: {:?}", method);
let state = self.connection_status.state();
if let (ConnectionState::Connecting, Some(ConnectionStep::StartOk(.., credentials))) =
(state.clone(), self.connection_status.connection_step())
{
self.register_internal_promise(
self.connection_secure_ok(&credentials.rabbit_cr_demo_answer()),
)
} else {
error!("Invalid state: {:?}", state);
let error = Error::InvalidConnectionState(state);
self.internal_rpc.set_connection_error(error.clone())?;
Err(error)
}
}
fn on_connection_tune_received(&self, method: protocol::connection::Tune) -> Result<()> {
debug!("Server sent Connection::Tune: {:?}", method);
let state = self.connection_status.state();
if let (
ConnectionState::Connecting,
Some(ConnectionStep::StartOk(resolver, connection, _)),
) = (state.clone(), self.connection_status.connection_step())
{
self.tune_connection_configuration(
method.channel_max,
method.frame_max,
method.heartbeat,
);
self.register_internal_promise(self.connection_tune_ok(
self.configuration.channel_max(),
self.configuration.frame_max(),
self.configuration.heartbeat(),
))?;
self.register_internal_promise(self.connection_open(
&self.connection_status.vhost(),
connection,
resolver,
))
} else {
error!("Invalid state: {:?}", state);
let error = Error::InvalidConnectionState(state);
self.internal_rpc.set_connection_error(error.clone())?;
Err(error)
}
}
fn on_connection_open_ok_received(
&self,
_: protocol::connection::OpenOk,
connection: Connection,
) -> Result<()> {
let state = self.connection_status.state();
if let (ConnectionState::Connecting, Some(ConnectionStep::Open(resolver))) =
(state.clone(), self.connection_status.connection_step())
{
self.connection_status.set_state(ConnectionState::Connected);
resolver.swear(Ok(CloseOnDrop::new(connection)));
Ok(())
} else {
error!("Invalid state: {:?}", state);
let error = Error::InvalidConnectionState(state);
self.internal_rpc.set_connection_error(error.clone())?;
Err(error)
}
}
fn on_connection_close_received(&self, method: protocol::connection::Close) -> Result<()> {
let error = AMQPError::try_from(method.clone())
.map(|error| {
error!(
"Connection closed on channel {} by {}:{} => {:?} => {}",
self.id, method.class_id, method.method_id, error, method.reply_text
);
Error::ProtocolError(error)
})
.unwrap_or_else(|error| {
error!("{}", error);
info!("Connection closed on channel {}: {:?}", self.id, method);
Error::InvalidConnectionState(ConnectionState::Closed)
});
self.internal_rpc.set_connection_closing()?;
self.frames.drop_pending(error.clone());
if let Some(resolver) = self.connection_status.connection_resolver() {
resolver.swear(Err(error.clone()));
}
self.internal_rpc.send_connection_close_ok(error)
}
fn on_connection_blocked_received(&self, _method: protocol::connection::Blocked) -> Result<()> {
self.connection_status.block();
Ok(())
}
fn on_connection_unblocked_received(
&self,
_method: protocol::connection::Unblocked,
) -> Result<()> {
self.connection_status.unblock();
self.wake()
}
fn on_connection_close_ok_received(&self) -> Result<()> {
self.internal_rpc
.set_connection_closed(Error::InvalidConnectionState(ConnectionState::Closed))
}
fn on_channel_open_ok_received(
&self,
_method: protocol::channel::OpenOk,
resolver: PromiseResolver<CloseOnDrop<Channel>>,
) -> Result<()> {
self.set_state(ChannelState::Connected);
resolver.swear(Ok(CloseOnDrop::new(self.clone())));
Ok(())
}
fn on_channel_flow_received(&self, method: protocol::channel::Flow) -> Result<()> {
self.status.set_send_flow(method.active);
self.register_internal_promise(self.channel_flow_ok(ChannelFlowOkOptions {
active: method.active,
}))
}
fn on_channel_flow_ok_received(
&self,
method: protocol::channel::FlowOk,
resolver: PromiseResolver<Boolean>,
) -> Result<()> {
// Nothing to do here, the server just confirmed that we paused/resumed the receiving flow
resolver.swear(Ok(method.active));
Ok(())
}
fn on_channel_close_received(&self, method: protocol::channel::Close) -> Result<()> {
let error = AMQPError::try_from(method.clone())
.map(|error| {
error!(
"Channel closed on channel {} by {}:{} => {:?} => {}",
self.id, method.class_id, method.method_id, error, method.reply_text
);
Error::ProtocolError(error)
})
.unwrap_or_else(|error| {
error!("{}", error);
info!("Channel closed on channel {}: {:?}", self.id, method);
Error::InvalidChannelState(ChannelState::Closing)
});
self.set_state(ChannelState::Closing);
self.register_internal_promise(self.channel_close_ok(error))
}
fn on_channel_close_ok_received(&self) -> Result<()> {
self.set_closed(Error::InvalidChannelState(ChannelState::Closed))
}
fn on_queue_delete_ok_received(
&self,
method: protocol::queue::DeleteOk,
resolver: PromiseResolver<LongUInt>,
queue: ShortString,
) -> Result<()> {
self.queues.deregister(queue.as_str());
resolver.swear(Ok(method.message_count));
Ok(())
}
fn on_queue_purge_ok_received(
&self,
method: protocol::queue::PurgeOk,
resolver: PromiseResolver<LongUInt>,
) -> Result<()> {
resolver.swear(Ok(method.message_count));
Ok(())
}
fn on_queue_declare_ok_received(
&self,
method: protocol::queue::DeclareOk,
resolver: PromiseResolver<Queue>,
) -> Result<()> {
let queue = Queue::new(method.queue, method.message_count, method.consumer_count);
self.queues.register(queue.clone().into());
resolver.swear(Ok(queue));
Ok(())
}
fn on_basic_get_ok_received(
&self,
method: protocol::basic::GetOk,
resolver: PromiseResolver<Option<BasicGetMessage>>,
queue: ShortString,
) -> Result<()> {
self.queues.start_basic_get_delivery(
queue.as_str(),
BasicGetMessage::new(
method.delivery_tag,
method.exchange,
method.routing_key,
method.redelivered,
method.message_count,
),
resolver,
);
self.set_state(ChannelState::WillReceiveContent(Some(queue), None));
Ok(())
}
fn on_basic_get_empty_received(&self, _: protocol::basic::GetEmpty) -> Result<()> {
match self.frames.next_expected_reply(self.id) {
Some(Reply::BasicGetOk(resolver, _)) => {
resolver.swear(Ok(None));
Ok(())
}
_ => {
self.set_error(Error::UnexpectedReply)?;
Err(Error::UnexpectedReply)
}
}
}
#[allow(clippy::too_many_arguments)]
fn on_basic_consume_ok_received(
&self,
method: protocol::basic::ConsumeOk,
resolver: PromiseResolver<Consumer>,
queue: ShortString,
) -> Result<()> {
let consumer = Consumer::new(method.consumer_tag.clone(), self.executor.clone());
self.queues
.register_consumer(queue.as_str(), method.consumer_tag, consumer.clone());
resolver.swear(Ok(consumer));
Ok(())
}
fn on_basic_deliver_received(&self, method: protocol::basic::Deliver) -> Result<()> {
if let Some(queue_name) = self.queues.start_consumer_delivery(
method.consumer_tag.as_str(),
Delivery::new(
method.delivery_tag,
method.exchange,
method.routing_key,
method.redelivered,
),
) {
self.set_state(ChannelState::WillReceiveContent(
Some(queue_name),
Some(method.consumer_tag),
));
}
Ok(())
}
fn on_basic_cancel_received(&self, method: protocol::basic::Cancel) -> Result<()> {
self.queues
.deregister_consumer(method.consumer_tag.as_str())
.and(if !method.nowait {
self.register_internal_promise(self.basic_cancel_ok(method.consumer_tag.as_str()))
} else {
Ok(())
})
}
fn on_basic_cancel_ok_received(&self, method: protocol::basic::CancelOk) -> Result<()> {
self.queues
.deregister_consumer(method.consumer_tag.as_str())
}
fn on_basic_ack_received(&self, method: protocol::basic::Ack) -> Result<()> {
if self.status.confirm() {
if method.multiple {
if method.delivery_tag > 0 {
self.acknowledgements
.ack_all_before(method.delivery_tag)
.or_else(|err| {
self.acknowledgement_error(
err,
method.get_amqp_class_id(),
method.get_amqp_method_id(),
)
})?;
} else {
self.acknowledgements.ack_all_pending();
}
} else {
self.acknowledgements
.ack(method.delivery_tag)
.or_else(|err| {
self.acknowledgement_error(
err,
method.get_amqp_class_id(),
method.get_amqp_method_id(),
)
})?;
}
}
Ok(())
}
fn on_basic_nack_received(&self, method: protocol::basic::Nack) -> Result<()> {
if self.status.confirm() {
if method.multiple {
if method.delivery_tag > 0 {
self.acknowledgements
.nack_all_before(method.delivery_tag)
.or_else(|err| {
self.acknowledgement_error(
err,
method.get_amqp_class_id(),
method.get_amqp_method_id(),
)
})?;
} else {
self.acknowledgements.nack_all_pending();
}
} else {
self.acknowledgements
.nack(method.delivery_tag)
.or_else(|err| {
self.acknowledgement_error(
err,
method.get_amqp_class_id(),
method.get_amqp_method_id(),
)
})?;
}
}
Ok(())
}
fn on_basic_return_received(&self, method: protocol::basic::Return) -> Result<()> {
self.returned_messages
.start_new_delivery(BasicReturnMessage::new(
method.exchange,
method.routing_key,
method.reply_code,
method.reply_text,
));
self.set_state(ChannelState::WillReceiveContent(None, None));
Ok(())
}
fn on_basic_recover_ok_received(&self) -> Result<()> {
self.queues.drop_prefetched_messages()
}
fn on_confirm_select_ok_received(&self) -> Result<()> {
self.status.set_confirm();
Ok(())
}
fn on_access_request_ok_received(&self, _: protocol::access::RequestOk) -> Result<()> {
Ok(())
}
}
impl close_on_drop::__private::Closable for Channel {
fn close(&self, reply_code: ShortUInt, reply_text: &str) -> Promise<()> {
if self.status().is_connected() {
Channel::close(self, reply_code, reply_text)
} else {
Promise::new_with_data(Ok(()))
}
}
}
include!(concat!(env!("OUT_DIR"), "/channel.rs"));
| 34.781798 | 100 | 0.546988 |
fbb33b94d95770e8e79533513af65d45f018ce14 | 1,300 | // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// min-lldb-version: 310
// compile-flags:-g
// === GDB TESTS ===================================================================================
// gdb-command:run
// gdb-command:print a
// gdbg-check:$1 = {1, 2, 3}
// gdbr-check:$1 = [1, 2, 3]
// gdb-command:print vec::VECT
// gdbg-check:$2 = {4, 5, 6}
// gdbr-check:$2 = [4, 5, 6]
// === LLDB TESTS ==================================================================================
// lldb-command:run
// lldb-command:print a
// lldb-check:[...]$0 = [1, 2, 3]
#![allow(unused_variables)]
#![feature(omit_gdb_pretty_printer_section)]
#![omit_gdb_pretty_printer_section]
static mut VECT: [i32; 3] = [1, 2, 3];
fn main() {
let a = [1, 2, 3];
unsafe {
VECT[0] = 4;
VECT[1] = 5;
VECT[2] = 6;
}
zzz(); // #break
}
fn zzz() {()}
| 25.490196 | 100 | 0.543077 |
7a48031161670564cca2563296a73dc6f0b05e0a | 28,831 | // Copyright 2014 The Servo Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(non_upper_case_globals)]
use phf_shared;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use std::borrow::Cow;
use std::cmp::Ordering::{self, Equal};
use std::fmt;
use std::hash::{Hash, Hasher};
use std::marker::PhantomData;
use std::mem;
use std::ops;
use std::slice;
use std::str;
use std::sync::Mutex;
use std::sync::atomic::AtomicIsize;
use std::sync::atomic::Ordering::SeqCst;
use shared::{STATIC_TAG, INLINE_TAG, DYNAMIC_TAG, TAG_MASK, MAX_INLINE_LEN, STATIC_SHIFT_BITS,
ENTRY_ALIGNMENT, pack_static};
use self::UnpackedAtom::{Dynamic, Inline, Static};
#[cfg(feature = "log-events")]
use event::Event;
#[cfg(not(feature = "log-events"))]
macro_rules! log (($e:expr) => (()));
const NB_BUCKETS: usize = 1 << 12; // 4096
const BUCKET_MASK: u64 = (1 << 12) - 1;
struct StringCache {
buckets: [Option<Box<StringCacheEntry>>; NB_BUCKETS],
}
lazy_static! {
static ref STRING_CACHE: Mutex<StringCache> = Mutex::new(StringCache::new());
}
struct StringCacheEntry {
next_in_bucket: Option<Box<StringCacheEntry>>,
hash: u64,
ref_count: AtomicIsize,
string: Box<str>,
}
impl StringCacheEntry {
fn new(next: Option<Box<StringCacheEntry>>, hash: u64, string: String)
-> StringCacheEntry {
StringCacheEntry {
next_in_bucket: next,
hash: hash,
ref_count: AtomicIsize::new(1),
string: string.into_boxed_str(),
}
}
}
impl StringCache {
fn new() -> StringCache {
StringCache {
buckets: unsafe { mem::zeroed() },
}
}
fn add(&mut self, string: Cow<str>, hash: u64) -> *mut StringCacheEntry {
let bucket_index = (hash & BUCKET_MASK) as usize;
{
let mut ptr: Option<&mut Box<StringCacheEntry>> =
self.buckets[bucket_index].as_mut();
while let Some(entry) = ptr.take() {
if entry.hash == hash && &*entry.string == &*string {
if entry.ref_count.fetch_add(1, SeqCst) > 0 {
return &mut **entry;
}
// Uh-oh. The pointer's reference count was zero, which means someone may try
// to free it. (Naive attempts to defend against this, for example having the
// destructor check to see whether the reference count is indeed zero, don't
// work due to ABA.) Thus we need to temporarily add a duplicate string to the
// list.
entry.ref_count.fetch_sub(1, SeqCst);
break;
}
ptr = entry.next_in_bucket.as_mut();
}
}
debug_assert!(mem::align_of::<StringCacheEntry>() >= ENTRY_ALIGNMENT);
let string = string.into_owned();
let _string_clone = if cfg!(feature = "log-events") {
string.clone()
} else {
"".to_owned()
};
let mut entry = Box::new(StringCacheEntry::new(
self.buckets[bucket_index].take(), hash, string));
let ptr: *mut StringCacheEntry = &mut *entry;
self.buckets[bucket_index] = Some(entry);
log!(Event::Insert(ptr as u64, _string_clone));
ptr
}
fn remove(&mut self, key: u64) {
let ptr = key as *mut StringCacheEntry;
let bucket_index = {
let value: &StringCacheEntry = unsafe { &*ptr };
debug_assert!(value.ref_count.load(SeqCst) == 0);
(value.hash & BUCKET_MASK) as usize
};
let mut current: &mut Option<Box<StringCacheEntry>> = &mut self.buckets[bucket_index];
loop {
let entry_ptr: *mut StringCacheEntry = match current.as_mut() {
Some(entry) => &mut **entry,
None => break,
};
if entry_ptr == ptr {
mem::drop(mem::replace(current, unsafe { (*entry_ptr).next_in_bucket.take() }));
break;
}
current = unsafe { &mut (*entry_ptr).next_in_bucket };
}
log!(Event::Remove(key));
}
}
/// A static `PhfStrSet`
///
/// This trait is implemented by static sets of interned strings generated using
/// `string_cache_codegen`, and `EmptyStaticAtomSet` for when strings will be added dynamically.
///
/// It is used by the methods of [`Atom`] to check if a string is present in the static set.
///
/// [`Atom`]: struct.Atom.html
pub trait StaticAtomSet {
/// Get the location of the static string set in the binary.
fn get() -> &'static PhfStrSet;
/// Get the index of the empty string, which is in every set and is used for `Atom::default`.
fn empty_string_index() -> u32;
}
/// A string set created using a [perfect hash function], specifically
/// [Hash, Displace and Compress].
///
/// See the CHD document for the meaning of the struct fields.
///
/// [perfect hash function]: https://en.wikipedia.org/wiki/Perfect_hash_function
/// [Hash, Displace and Compress]: http://cmph.sourceforge.net/papers/esa09.pdf
pub struct PhfStrSet {
pub key: u64,
pub disps: &'static [(u32, u32)],
pub atoms: &'static [&'static str],
pub hashes: &'static [u32],
}
/// An empty static atom set for when only dynamic strings will be added
pub struct EmptyStaticAtomSet;
impl StaticAtomSet for EmptyStaticAtomSet {
fn get() -> &'static PhfStrSet {
// The name is a lie: this set is not empty (it contains the empty string)
// but that’s only to avoid divisions by zero in rust-phf.
static SET: PhfStrSet = PhfStrSet {
key: 0,
disps: &[(0, 0)],
atoms: &[""],
// "" SipHash'd, and xored with u64_hash_to_u32.
hashes: &[0x3ddddef3],
};
&SET
}
fn empty_string_index() -> u32 {
0
}
}
/// Use this if you don’t care about static atoms.
pub type DefaultAtom = Atom<EmptyStaticAtomSet>;
/// Represents a string that has been interned.
///
/// In reality this contains a complex packed datastructure and the methods to extract information
/// from it, along with type information to tell the compiler which static set it corresponds to.
pub struct Atom<Static: StaticAtomSet> {
/// This field is public so that the `atom!()` macros can use it.
/// You should not otherwise access this field.
#[doc(hidden)]
pub unsafe_data: u64,
#[doc(hidden)]
pub phantom: PhantomData<Static>,
}
impl<Static: StaticAtomSet> ::precomputed_hash::PrecomputedHash for Atom<Static> {
fn precomputed_hash(&self) -> u32 {
self.get_hash()
}
}
impl<'a, Static: StaticAtomSet> From<&'a Atom<Static>> for Atom<Static> {
fn from(atom: &'a Self) -> Self {
atom.clone()
}
}
fn u64_hash_as_u32(h: u64) -> u32 {
// This may or may not be great...
((h >> 32) ^ h) as u32
}
impl<Static: StaticAtomSet> Atom<Static> {
#[inline(always)]
unsafe fn unpack(&self) -> UnpackedAtom {
UnpackedAtom::from_packed(self.unsafe_data)
}
/// Get the hash of the string as it is stored in the set.
pub fn get_hash(&self) -> u32 {
match unsafe { self.unpack() } {
Static(index) => {
let static_set = Static::get();
static_set.hashes[index as usize]
}
Dynamic(entry) => {
let entry = entry as *mut StringCacheEntry;
u64_hash_as_u32(unsafe { (*entry).hash })
}
Inline(..) => {
u64_hash_as_u32(self.unsafe_data)
}
}
}
}
impl<Static: StaticAtomSet> Default for Atom<Static> {
#[inline]
fn default() -> Self {
Atom {
unsafe_data: pack_static(Static::empty_string_index()),
phantom: PhantomData
}
}
}
impl<Static: StaticAtomSet> Hash for Atom<Static> {
#[inline]
fn hash<H>(&self, state: &mut H) where H: Hasher {
state.write_u32(self.get_hash())
}
}
impl<Static: StaticAtomSet> Eq for Atom<Static> {}
// NOTE: This impl requires that a given string must always be interned the same way.
impl<Static: StaticAtomSet> PartialEq for Atom<Static> {
#[inline]
fn eq(&self, other: &Self) -> bool {
self.unsafe_data == other.unsafe_data
}
}
impl<Static: StaticAtomSet> PartialEq<str> for Atom<Static> {
fn eq(&self, other: &str) -> bool {
&self[..] == other
}
}
impl<Static: StaticAtomSet> PartialEq<Atom<Static>> for str {
fn eq(&self, other: &Atom<Static>) -> bool {
self == &other[..]
}
}
impl<Static: StaticAtomSet> PartialEq<String> for Atom<Static> {
fn eq(&self, other: &String) -> bool {
&self[..] == &other[..]
}
}
impl<'a, Static: StaticAtomSet> From<Cow<'a, str>> for Atom<Static> {
#[inline]
fn from(string_to_add: Cow<'a, str>) -> Self {
let static_set = Static::get();
let hash = phf_shared::hash(&*string_to_add, static_set.key);
let index = phf_shared::get_index(hash, static_set.disps, static_set.atoms.len());
let unpacked = if static_set.atoms[index as usize] == string_to_add {
Static(index)
} else {
let len = string_to_add.len();
if len <= MAX_INLINE_LEN {
let mut buf: [u8; 7] = [0; 7];
buf[..len].copy_from_slice(string_to_add.as_bytes());
Inline(len as u8, buf)
} else {
Dynamic(STRING_CACHE.lock().unwrap().add(string_to_add, hash) as *mut ())
}
};
let data = unsafe { unpacked.pack() };
log!(Event::Intern(data));
Atom { unsafe_data: data, phantom: PhantomData }
}
}
impl<'a, Static: StaticAtomSet> From<&'a str> for Atom<Static> {
#[inline]
fn from(string_to_add: &str) -> Self {
Atom::from(Cow::Borrowed(string_to_add))
}
}
impl<Static: StaticAtomSet> From<String> for Atom<Static> {
#[inline]
fn from(string_to_add: String) -> Self {
Atom::from(Cow::Owned(string_to_add))
}
}
impl<Static: StaticAtomSet> Clone for Atom<Static> {
#[inline(always)]
fn clone(&self) -> Self {
unsafe {
match from_packed_dynamic(self.unsafe_data) {
Some(entry) => {
let entry = entry as *mut StringCacheEntry;
(*entry).ref_count.fetch_add(1, SeqCst);
},
None => (),
}
}
Atom {
unsafe_data: self.unsafe_data,
phantom: PhantomData,
}
}
}
impl<Static: StaticAtomSet> Drop for Atom<Static> {
#[inline]
fn drop(&mut self) {
// Out of line to guide inlining.
fn drop_slow<Static: StaticAtomSet>(this: &mut Atom<Static>) {
STRING_CACHE.lock().unwrap().remove(this.unsafe_data);
}
unsafe {
match from_packed_dynamic(self.unsafe_data) {
Some(entry) => {
let entry = entry as *mut StringCacheEntry;
if (*entry).ref_count.fetch_sub(1, SeqCst) == 1 {
drop_slow(self);
}
}
_ => (),
}
}
}
}
impl<Static: StaticAtomSet> ops::Deref for Atom<Static> {
type Target = str;
#[inline]
fn deref(&self) -> &str {
unsafe {
match self.unpack() {
Inline(..) => {
let buf = inline_orig_bytes(&self.unsafe_data);
str::from_utf8_unchecked(buf)
},
Static(idx) => Static::get().atoms.get(idx as usize).expect("bad static atom"),
Dynamic(entry) => {
let entry = entry as *mut StringCacheEntry;
&(*entry).string
}
}
}
}
}
impl<Static: StaticAtomSet> fmt::Display for Atom<Static> {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
<str as fmt::Display>::fmt(self, f)
}
}
impl<Static: StaticAtomSet> fmt::Debug for Atom<Static> {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let ty_str = unsafe {
match self.unpack() {
Dynamic(..) => "dynamic",
Inline(..) => "inline",
Static(..) => "static",
}
};
write!(f, "Atom('{}' type={})", &*self, ty_str)
}
}
impl<Static: StaticAtomSet> PartialOrd for Atom<Static> {
#[inline]
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
if self.unsafe_data == other.unsafe_data {
return Some(Equal);
}
self.as_ref().partial_cmp(other.as_ref())
}
}
impl<Static: StaticAtomSet> Ord for Atom<Static> {
#[inline]
fn cmp(&self, other: &Self) -> Ordering {
if self.unsafe_data == other.unsafe_data {
return Equal;
}
self.as_ref().cmp(other.as_ref())
}
}
impl<Static: StaticAtomSet> AsRef<str> for Atom<Static> {
fn as_ref(&self) -> &str {
&self
}
}
impl<Static: StaticAtomSet> Serialize for Atom<Static> {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer {
let string: &str = self.as_ref();
string.serialize(serializer)
}
}
impl<'a, Static: StaticAtomSet> Deserialize<'a> for Atom<Static> {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'a> {
let string: String = try!(Deserialize::deserialize(deserializer));
Ok(Atom::from(string))
}
}
// AsciiExt requires mutating methods, so we just implement the non-mutating ones.
// We don't need to implement is_ascii because there's no performance improvement
// over the one from &str.
impl<Static: StaticAtomSet> Atom<Static> {
fn from_mutated_str<F: FnOnce(&mut str)>(s: &str, f: F) -> Self {
let mut buffer: [u8; 64] = unsafe { mem::uninitialized() };
if let Some(buffer_prefix) = buffer.get_mut(..s.len()) {
buffer_prefix.copy_from_slice(s.as_bytes());
let as_str = unsafe { ::std::str::from_utf8_unchecked_mut(buffer_prefix) };
f(as_str);
Atom::from(&*as_str)
} else {
let mut string = s.to_owned();
f(&mut string);
Atom::from(string)
}
}
/// Like [`to_ascii_uppercase`].
///
/// [`to_ascii_uppercase`]: https://doc.rust-lang.org/std/ascii/trait.AsciiExt.html#tymethod.to_ascii_uppercase
pub fn to_ascii_uppercase(&self) -> Self {
for (i, b) in self.bytes().enumerate() {
if let b'a' ... b'z' = b {
return Atom::from_mutated_str(self, |s| s[i..].make_ascii_uppercase())
}
}
self.clone()
}
/// Like [`to_ascii_lowercase`].
///
/// [`to_ascii_lowercase`]: https://doc.rust-lang.org/std/ascii/trait.AsciiExt.html#tymethod.to_ascii_lowercase
pub fn to_ascii_lowercase(&self) -> Self {
for (i, b) in self.bytes().enumerate() {
if let b'A' ... b'Z' = b {
return Atom::from_mutated_str(self, |s| s[i..].make_ascii_lowercase())
}
}
self.clone()
}
/// Like [`eq_ignore_ascii_case`].
///
/// [`eq_ignore_ascii_case`]: https://doc.rust-lang.org/std/ascii/trait.AsciiExt.html#tymethod.eq_ignore_ascii_case
pub fn eq_ignore_ascii_case(&self, other: &Self) -> bool {
(self == other) || self.eq_str_ignore_ascii_case(&**other)
}
/// Like [`eq_ignore_ascii_case`], but takes an unhashed string as `other`.
///
/// [`eq_ignore_ascii_case`]: https://doc.rust-lang.org/std/ascii/trait.AsciiExt.html#tymethod.eq_ignore_ascii_case
pub fn eq_str_ignore_ascii_case(&self, other: &str) -> bool {
(&**self).eq_ignore_ascii_case(other)
}
}
// Atoms use a compact representation which fits this enum in a single u64.
// Inlining avoids actually constructing the unpacked representation in memory.
#[allow(missing_copy_implementations)]
enum UnpackedAtom {
/// Pointer to a dynamic table entry. Must be 16-byte aligned!
Dynamic(*mut ()),
/// Length + bytes of string.
Inline(u8, [u8; 7]),
/// Index in static interning table.
Static(u32),
}
#[inline(always)]
fn inline_atom_slice(x: &u64) -> &[u8] {
unsafe {
let x: *const u64 = x;
let mut data = x as *const u8;
// All except the lowest byte, which is first in little-endian, last in big-endian.
if cfg!(target_endian = "little") {
data = data.offset(1);
}
let len = 7;
slice::from_raw_parts(data, len)
}
}
#[inline(always)]
fn inline_atom_slice_mut(x: &mut u64) -> &mut [u8] {
unsafe {
let x: *mut u64 = x;
let mut data = x as *mut u8;
// All except the lowest byte, which is first in little-endian, last in big-endian.
if cfg!(target_endian = "little") {
data = data.offset(1);
}
let len = 7;
slice::from_raw_parts_mut(data, len)
}
}
impl UnpackedAtom {
/// Pack a key, fitting it into a u64 with flags and data. See `string_cache_shared` for
/// hints for the layout.
#[inline(always)]
unsafe fn pack(self) -> u64 {
match self {
Static(n) => pack_static(n),
Dynamic(p) => {
let n = p as u64;
debug_assert!(0 == n & TAG_MASK);
n
}
Inline(len, buf) => {
debug_assert!((len as usize) <= MAX_INLINE_LEN);
let mut data: u64 = (INLINE_TAG as u64) | ((len as u64) << 4);
{
let dest = inline_atom_slice_mut(&mut data);
dest.copy_from_slice(&buf)
}
data
}
}
}
/// Unpack a key, extracting information from a single u64 into useable structs.
#[inline(always)]
unsafe fn from_packed(data: u64) -> UnpackedAtom {
debug_assert!(DYNAMIC_TAG == 0); // Dynamic is untagged
match (data & TAG_MASK) as u8 {
DYNAMIC_TAG => Dynamic(data as *mut ()),
STATIC_TAG => Static((data >> STATIC_SHIFT_BITS) as u32),
INLINE_TAG => {
let len = ((data & 0xf0) >> 4) as usize;
debug_assert!(len <= MAX_INLINE_LEN);
let mut buf: [u8; 7] = [0; 7];
let src = inline_atom_slice(&data);
buf.copy_from_slice(src);
Inline(len as u8, buf)
},
_ => debug_unreachable!(),
}
}
}
/// Used for a fast path in Clone and Drop.
#[inline(always)]
unsafe fn from_packed_dynamic(data: u64) -> Option<*mut ()> {
if (DYNAMIC_TAG as u64) == (data & TAG_MASK) {
Some(data as *mut ())
} else {
None
}
}
/// For as_slice on inline atoms, we need a pointer into the original
/// string contents.
///
/// It's undefined behavior to call this on a non-inline atom!!
#[inline(always)]
unsafe fn inline_orig_bytes<'a>(data: &'a u64) -> &'a [u8] {
match UnpackedAtom::from_packed(*data) {
Inline(len, _) => {
let src = inline_atom_slice(&data);
&src[..(len as usize)]
}
_ => debug_unreachable!(),
}
}
#[cfg(test)]
#[macro_use]
mod tests {
use std::mem;
use std::thread;
use super::{StaticAtomSet, StringCacheEntry};
use super::UnpackedAtom::{Dynamic, Inline, Static};
use shared::ENTRY_ALIGNMENT;
include!(concat!(env!("OUT_DIR"), "/test_atom.rs"));
pub type Atom = TestAtom;
#[test]
fn test_as_slice() {
let s0 = Atom::from("");
assert!(s0.as_ref() == "");
let s1 = Atom::from("class");
assert!(s1.as_ref() == "class");
let i0 = Atom::from("blah");
assert!(i0.as_ref() == "blah");
let s0 = Atom::from("BLAH");
assert!(s0.as_ref() == "BLAH");
let d0 = Atom::from("zzzzzzzzzz");
assert!(d0.as_ref() == "zzzzzzzzzz");
let d1 = Atom::from("ZZZZZZZZZZ");
assert!(d1.as_ref() == "ZZZZZZZZZZ");
}
macro_rules! unpacks_to (($e:expr, $t:pat) => (
match unsafe { Atom::from($e).unpack() } {
$t => (),
_ => panic!("atom has wrong type"),
}
));
#[test]
fn test_types() {
unpacks_to!("", Static(..));
unpacks_to!("id", Static(..));
unpacks_to!("body", Static(..));
unpacks_to!("c", Inline(..)); // "z" is a static atom
unpacks_to!("zz", Inline(..));
unpacks_to!("zzz", Inline(..));
unpacks_to!("zzzz", Inline(..));
unpacks_to!("zzzzz", Inline(..));
unpacks_to!("zzzzzz", Inline(..));
unpacks_to!("zzzzzzz", Inline(..));
unpacks_to!("zzzzzzzz", Dynamic(..));
unpacks_to!("zzzzzzzzzzzzz", Dynamic(..));
}
#[test]
fn test_equality() {
let s0 = Atom::from("fn");
let s1 = Atom::from("fn");
let s2 = Atom::from("loop");
let i0 = Atom::from("blah");
let i1 = Atom::from("blah");
let i2 = Atom::from("blah2");
let d0 = Atom::from("zzzzzzzz");
let d1 = Atom::from("zzzzzzzz");
let d2 = Atom::from("zzzzzzzzz");
assert!(s0 == s1);
assert!(s0 != s2);
assert!(i0 == i1);
assert!(i0 != i2);
assert!(d0 == d1);
assert!(d0 != d2);
assert!(s0 != i0);
assert!(s0 != d0);
assert!(i0 != d0);
}
#[test]
fn default() {
assert_eq!(TestAtom::default(), test_atom!(""));
assert_eq!(&*TestAtom::default(), "");
}
#[test]
fn ord() {
fn check(x: &str, y: &str) {
assert_eq!(x < y, Atom::from(x) < Atom::from(y));
assert_eq!(x.cmp(y), Atom::from(x).cmp(&Atom::from(y)));
assert_eq!(x.partial_cmp(y), Atom::from(x).partial_cmp(&Atom::from(y)));
}
check("a", "body");
check("asdf", "body");
check("zasdf", "body");
check("z", "body");
check("a", "bbbbb");
check("asdf", "bbbbb");
check("zasdf", "bbbbb");
check("z", "bbbbb");
}
#[test]
fn clone() {
let s0 = Atom::from("fn");
let s1 = s0.clone();
let s2 = Atom::from("loop");
let i0 = Atom::from("blah");
let i1 = i0.clone();
let i2 = Atom::from("blah2");
let d0 = Atom::from("zzzzzzzz");
let d1 = d0.clone();
let d2 = Atom::from("zzzzzzzzz");
assert!(s0 == s1);
assert!(s0 != s2);
assert!(i0 == i1);
assert!(i0 != i2);
assert!(d0 == d1);
assert!(d0 != d2);
assert!(s0 != i0);
assert!(s0 != d0);
assert!(i0 != d0);
}
macro_rules! assert_eq_fmt (($fmt:expr, $x:expr, $y:expr) => ({
let x = $x;
let y = $y;
if x != y {
panic!("assertion failed: {} != {}",
format_args!($fmt, x),
format_args!($fmt, y));
}
}));
#[test]
fn repr() {
fn check(s: &str, data: u64) {
assert_eq_fmt!("0x{:016X}", Atom::from(s).unsafe_data, data);
}
fn check_static(s: &str, x: Atom) {
assert_eq_fmt!("0x{:016X}", x.unsafe_data, Atom::from(s).unsafe_data);
assert_eq!(0x2, x.unsafe_data & 0xFFFF_FFFF);
// The index is unspecified by phf.
assert!((x.unsafe_data >> 32) <= TestAtomStaticSet::get().atoms.len() as u64);
}
// This test is here to make sure we don't change atom representation
// by accident. It may need adjusting if there are changes to the
// static atom table, the tag values, etc.
// Static atoms
check_static("a", test_atom!("a"));
check_static("address", test_atom!("address"));
check_static("area", test_atom!("area"));
// Inline atoms
check("e", 0x0000_0000_0000_6511);
check("xyzzy", 0x0000_797A_7A79_7851);
check("xyzzy01", 0x3130_797A_7A79_7871);
// Dynamic atoms. This is a pointer so we can't verify every bit.
assert_eq!(0x00, Atom::from("a dynamic string").unsafe_data & 0xf);
}
#[test]
fn assert_sizes() {
use std::mem;
struct EmptyWithDrop;
impl Drop for EmptyWithDrop {
fn drop(&mut self) {}
}
let compiler_uses_inline_drop_flags = mem::size_of::<EmptyWithDrop>() > 0;
// Guard against accidental changes to the sizes of things.
assert_eq!(mem::size_of::<Atom>(),
if compiler_uses_inline_drop_flags { 16 } else { 8 });
assert_eq!(mem::size_of::<super::StringCacheEntry>(),
8 + 4 * mem::size_of::<usize>());
}
#[test]
fn test_threads() {
for _ in 0_u32..100 {
thread::spawn(move || {
let _ = Atom::from("a dynamic string");
let _ = Atom::from("another string");
});
}
}
#[test]
fn atom_macro() {
assert_eq!(test_atom!("body"), Atom::from("body"));
assert_eq!(test_atom!("font-weight"), Atom::from("font-weight"));
}
#[test]
fn match_atom() {
assert_eq!(2, match Atom::from("head") {
test_atom!("br") => 1,
test_atom!("html") | test_atom!("head") => 2,
_ => 3,
});
assert_eq!(3, match Atom::from("body") {
test_atom!("br") => 1,
test_atom!("html") | test_atom!("head") => 2,
_ => 3,
});
assert_eq!(3, match Atom::from("zzzzzz") {
test_atom!("br") => 1,
test_atom!("html") | test_atom!("head") => 2,
_ => 3,
});
}
#[test]
fn ensure_deref() {
// Ensure we can Deref to a &str
let atom = Atom::from("foobar");
let _: &str = &atom;
}
#[test]
fn ensure_as_ref() {
// Ensure we can as_ref to a &str
let atom = Atom::from("foobar");
let _: &str = atom.as_ref();
}
#[test]
fn string_cache_entry_alignment_is_sufficient() {
assert!(mem::align_of::<StringCacheEntry>() >= ENTRY_ALIGNMENT);
}
#[test]
fn test_ascii_lowercase() {
assert_eq!(Atom::from("").to_ascii_lowercase(), Atom::from(""));
assert_eq!(Atom::from("aZ9").to_ascii_lowercase(), Atom::from("az9"));
assert_eq!(Atom::from("The Quick Brown Fox!").to_ascii_lowercase(), Atom::from("the quick brown fox!"));
assert_eq!(Atom::from("JE VAIS À PARIS").to_ascii_lowercase(), Atom::from("je vais À paris"));
}
#[test]
fn test_ascii_uppercase() {
assert_eq!(Atom::from("").to_ascii_uppercase(), Atom::from(""));
assert_eq!(Atom::from("aZ9").to_ascii_uppercase(), Atom::from("AZ9"));
assert_eq!(Atom::from("The Quick Brown Fox!").to_ascii_uppercase(), Atom::from("THE QUICK BROWN FOX!"));
assert_eq!(Atom::from("Je vais à Paris").to_ascii_uppercase(), Atom::from("JE VAIS à PARIS"));
}
#[test]
fn test_eq_ignore_ascii_case() {
assert!(Atom::from("").eq_ignore_ascii_case(&Atom::from("")));
assert!(Atom::from("aZ9").eq_ignore_ascii_case(&Atom::from("aZ9")));
assert!(Atom::from("aZ9").eq_ignore_ascii_case(&Atom::from("Az9")));
assert!(Atom::from("The Quick Brown Fox!").eq_ignore_ascii_case(&Atom::from("THE quick BROWN fox!")));
assert!(Atom::from("Je vais à Paris").eq_ignore_ascii_case(&Atom::from("je VAIS à PARIS")));
assert!(!Atom::from("").eq_ignore_ascii_case(&Atom::from("az9")));
assert!(!Atom::from("aZ9").eq_ignore_ascii_case(&Atom::from("")));
assert!(!Atom::from("aZ9").eq_ignore_ascii_case(&Atom::from("9Za")));
assert!(!Atom::from("The Quick Brown Fox!").eq_ignore_ascii_case(&Atom::from("THE quick BROWN fox!!")));
assert!(!Atom::from("Je vais à Paris").eq_ignore_ascii_case(&Atom::from("JE vais À paris")));
}
#[test]
fn test_from_string() {
assert!(Atom::from("camembert".to_owned()) == Atom::from("camembert"));
}
}
#[cfg(all(test, feature = "unstable"))]
#[path = "bench.rs"]
mod bench;
| 31.474891 | 119 | 0.552634 |
9cca9153c6c1b2716efb5378c07cdc3efdf3d139 | 28,474 | //! CodeBlock
//!
//! This module is for the CodeBlock which implements a function representation in the VM
use crate::{
builtins::function::{
arguments::Arguments, Captures, ClosureFunctionSignature, Function,
NativeFunctionSignature, ThisMode,
},
context::StandardObjects,
environment::{
function_environment_record::{BindingStatus, FunctionEnvironmentRecord},
lexical_environment::Environment,
},
gc::{Finalize, Trace},
object::{internal_methods::get_prototype_from_constructor, JsObject, ObjectData},
property::PropertyDescriptor,
syntax::ast::node::FormalParameter,
vm::{call_frame::FinallyReturn, CallFrame, Opcode},
Context, JsResult, JsString, JsValue,
};
use gc::Gc;
use std::{convert::TryInto, fmt::Write, mem::size_of};
/// This represents whether a value can be read from [`CodeBlock`] code.
pub unsafe trait Readable {}
unsafe impl Readable for u8 {}
unsafe impl Readable for i8 {}
unsafe impl Readable for u16 {}
unsafe impl Readable for i16 {}
unsafe impl Readable for u32 {}
unsafe impl Readable for i32 {}
unsafe impl Readable for u64 {}
unsafe impl Readable for i64 {}
unsafe impl Readable for f32 {}
unsafe impl Readable for f64 {}
/// The internal representation of a JavaScript function.
///
/// A CodeBlock is generated for each function compiled by the [ByteCompiler](crate::bytecompiler::ByteCompiler).
/// It stores the bytecode and the other attributes of the function.
#[derive(Debug, Trace, Finalize)]
pub struct CodeBlock {
/// Name of this function
pub(crate) name: JsString,
/// The number of arguments expected.
pub(crate) length: u32,
/// Is this function in strict mode.
pub(crate) strict: bool,
/// Is this function a constructor.
pub(crate) constructor: bool,
/// [[ThisMode]]
pub(crate) this_mode: ThisMode,
/// Parameters passed to this function.
pub(crate) params: Box<[FormalParameter]>,
/// Bytecode
pub(crate) code: Vec<u8>,
/// Literals
pub(crate) literals: Vec<JsValue>,
/// Variables names
pub(crate) variables: Vec<JsString>,
/// Functions inside this function
pub(crate) functions: Vec<Gc<CodeBlock>>,
/// Indicates if the codeblock contains a lexical name `arguments`
pub(crate) lexical_name_argument: bool,
}
impl CodeBlock {
/// Constructs a new `CodeBlock`.
pub fn new(name: JsString, length: u32, strict: bool, constructor: bool) -> Self {
Self {
code: Vec::new(),
literals: Vec::new(),
variables: Vec::new(),
functions: Vec::new(),
name,
length,
strict,
constructor,
this_mode: ThisMode::Global,
params: Vec::new().into_boxed_slice(),
lexical_name_argument: false,
}
}
/// Read type T from code.
///
/// # Safety
///
/// Does not check if read happens out-of-bounds.
pub unsafe fn read_unchecked<T: Readable>(&self, offset: usize) -> T {
// This has to be an unaligned read because we can't guarantee that
// the types are aligned.
self.code.as_ptr().add(offset).cast::<T>().read_unaligned()
}
/// Read type T from code.
#[track_caller]
pub fn read<T: Readable>(&self, offset: usize) -> T {
assert!(offset + size_of::<T>() - 1 < self.code.len());
// Safety: We checked that it is not an out-of-bounds read,
// so this is safe.
unsafe { self.read_unchecked(offset) }
}
/// Get the operands after the `Opcode` pointed to by `pc` as a `String`.
/// Modifies the `pc` to point to the next instruction.
///
/// Returns an empty `String` if no operands are present.
pub(crate) fn instruction_operands(&self, pc: &mut usize) -> String {
let opcode: Opcode = self.code[*pc].try_into().unwrap();
*pc += size_of::<Opcode>();
match opcode {
Opcode::PushInt8 => {
let result = self.read::<i8>(*pc).to_string();
*pc += size_of::<i8>();
result
}
Opcode::PushInt16 => {
let result = self.read::<i16>(*pc).to_string();
*pc += size_of::<i16>();
result
}
Opcode::PushInt32 => {
let result = self.read::<i32>(*pc).to_string();
*pc += size_of::<i32>();
result
}
Opcode::PushRational => {
let operand = self.read::<f64>(*pc);
*pc += size_of::<f64>();
ryu_js::Buffer::new().format(operand).to_string()
}
Opcode::PushLiteral
| Opcode::Jump
| Opcode::JumpIfFalse
| Opcode::JumpIfNotUndefined
| Opcode::CatchStart
| Opcode::FinallySetJump
| Opcode::Case
| Opcode::Default
| Opcode::LogicalAnd
| Opcode::LogicalOr
| Opcode::Coalesce
| Opcode::Call
| Opcode::CallWithRest
| Opcode::New
| Opcode::NewWithRest
| Opcode::ForInLoopInitIterator
| Opcode::ForInLoopNext => {
let result = self.read::<u32>(*pc).to_string();
*pc += size_of::<u32>();
result
}
Opcode::TryStart => {
let operand1 = self.read::<u32>(*pc);
*pc += size_of::<u32>();
let operand2 = self.read::<u32>(*pc);
*pc += size_of::<u32>();
format!("{}, {}", operand1, operand2)
}
Opcode::GetFunction => {
let operand = self.read::<u32>(*pc);
*pc += size_of::<u32>();
format!(
"{:04}: '{}' (length: {})",
operand,
self.functions[operand as usize].name,
self.functions[operand as usize].length
)
}
Opcode::DefInitArg
| Opcode::DefVar
| Opcode::DefInitVar
| Opcode::DefLet
| Opcode::DefInitLet
| Opcode::DefInitConst
| Opcode::GetName
| Opcode::GetNameOrUndefined
| Opcode::SetName
| Opcode::GetPropertyByName
| Opcode::SetPropertyByName
| Opcode::DefineOwnPropertyByName
| Opcode::SetPropertyGetterByName
| Opcode::SetPropertySetterByName
| Opcode::DeletePropertyByName
| Opcode::ConcatToString
| Opcode::CopyDataProperties => {
let operand = self.read::<u32>(*pc);
*pc += size_of::<u32>();
format!("{:04}: '{}'", operand, self.variables[operand as usize])
}
Opcode::Pop
| Opcode::Dup
| Opcode::Swap
| Opcode::PushZero
| Opcode::PushOne
| Opcode::PushNaN
| Opcode::PushPositiveInfinity
| Opcode::PushNegativeInfinity
| Opcode::PushNull
| Opcode::PushTrue
| Opcode::PushFalse
| Opcode::PushUndefined
| Opcode::PushEmptyObject
| Opcode::Add
| Opcode::Sub
| Opcode::Div
| Opcode::Mul
| Opcode::Mod
| Opcode::Pow
| Opcode::ShiftRight
| Opcode::ShiftLeft
| Opcode::UnsignedShiftRight
| Opcode::BitOr
| Opcode::BitAnd
| Opcode::BitXor
| Opcode::BitNot
| Opcode::In
| Opcode::Eq
| Opcode::StrictEq
| Opcode::NotEq
| Opcode::StrictNotEq
| Opcode::GreaterThan
| Opcode::GreaterThanOrEq
| Opcode::LessThan
| Opcode::LessThanOrEq
| Opcode::InstanceOf
| Opcode::TypeOf
| Opcode::Void
| Opcode::LogicalNot
| Opcode::Pos
| Opcode::Neg
| Opcode::Inc
| Opcode::Dec
| Opcode::GetPropertyByValue
| Opcode::SetPropertyByValue
| Opcode::DefineOwnPropertyByValue
| Opcode::SetPropertyGetterByValue
| Opcode::SetPropertySetterByValue
| Opcode::DeletePropertyByValue
| Opcode::ToBoolean
| Opcode::Throw
| Opcode::TryEnd
| Opcode::CatchEnd
| Opcode::CatchEnd2
| Opcode::FinallyStart
| Opcode::FinallyEnd
| Opcode::This
| Opcode::Return
| Opcode::PushDeclarativeEnvironment
| Opcode::PushFunctionEnvironment
| Opcode::PopEnvironment
| Opcode::InitIterator
| Opcode::IteratorNext
| Opcode::IteratorNextFull
| Opcode::IteratorClose
| Opcode::IteratorToArray
| Opcode::RequireObjectCoercible
| Opcode::ValueNotNullOrUndefined
| Opcode::RestParameterInit
| Opcode::RestParameterPop
| Opcode::PushValueToArray
| Opcode::PushIteratorToArray
| Opcode::PushNewArray
| Opcode::PopOnReturnAdd
| Opcode::PopOnReturnSub
| Opcode::Nop => String::new(),
}
}
}
impl std::fmt::Display for CodeBlock {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
if self.name != "<main>" {
f.write_char('\n')?;
}
writeln!(
f,
"{:-^width$}",
format!("Compiled Output: '{}'", self.name),
width = 70
)?;
writeln!(
f,
" Location Count Opcode Operands"
)?;
f.write_char('\n')?;
let mut pc = 0;
let mut count = 0;
while pc < self.code.len() {
let opcode: Opcode = self.code[pc].try_into().unwrap();
write!(
f,
" {:06} {:04} {:<27}",
pc,
count,
opcode.as_str()
)?;
writeln!(f, "{}", self.instruction_operands(&mut pc))?;
count += 1;
}
f.write_char('\n')?;
f.write_str("Literals:\n")?;
if !self.literals.is_empty() {
for (i, value) in self.literals.iter().enumerate() {
writeln!(f, " {:04}: <{}> {}", i, value.type_of(), value.display())?;
}
} else {
writeln!(f, " <empty>")?;
}
f.write_char('\n')?;
f.write_str("Names:\n")?;
if !self.variables.is_empty() {
for (i, value) in self.variables.iter().enumerate() {
writeln!(f, " {:04}: {}", i, value)?;
}
} else {
writeln!(f, " <empty>")?;
}
f.write_char('\n')?;
f.write_str("Functions:\n")?;
if !self.functions.is_empty() {
for (i, code) in self.functions.iter().enumerate() {
writeln!(
f,
" {:04}: name: '{}' (length: {})",
i, code.name, code.length
)?;
}
} else {
writeln!(f, " <empty>")?;
}
Ok(())
}
}
#[derive(Debug)]
#[allow(missing_copy_implementations)]
pub struct JsVmFunction {}
impl JsVmFunction {
#[allow(clippy::new_ret_no_self)]
pub fn new(code: Gc<CodeBlock>, environment: Environment, context: &mut Context) -> JsObject {
let function_prototype = context.standard_objects().function_object().prototype();
let prototype = context.construct_object();
let name_property = PropertyDescriptor::builder()
.value(code.name.clone())
.writable(false)
.enumerable(false)
.configurable(true)
.build();
let length_property = PropertyDescriptor::builder()
.value(code.length)
.writable(false)
.enumerable(false)
.configurable(true)
.build();
let function = Function::VmOrdinary { code, environment };
let constructor =
JsObject::from_proto_and_data(function_prototype, ObjectData::function(function));
let constructor_property = PropertyDescriptor::builder()
.value(constructor.clone())
.writable(true)
.enumerable(false)
.configurable(true)
.build();
prototype
.define_property_or_throw("constructor", constructor_property, context)
.unwrap();
let prototype_property = PropertyDescriptor::builder()
.value(prototype)
.writable(true)
.enumerable(false)
.configurable(false)
.build();
constructor
.define_property_or_throw("prototype", prototype_property, context)
.unwrap();
constructor
.define_property_or_throw("name", name_property, context)
.unwrap();
constructor
.define_property_or_throw("length", length_property, context)
.unwrap();
constructor
}
}
pub(crate) enum FunctionBody {
Ordinary {
code: Gc<CodeBlock>,
environment: Environment,
},
Native {
function: NativeFunctionSignature,
},
Closure {
function: Box<dyn ClosureFunctionSignature>,
captures: Captures,
},
}
// TODO: this should be modified to not take `exit_on_return` and then moved to `internal_methods`
impl JsObject {
pub(crate) fn call_internal(
&self,
this: &JsValue,
args: &[JsValue],
context: &mut Context,
) -> JsResult<JsValue> {
let this_function_object = self.clone();
// let mut has_parameter_expressions = false;
if !self.is_callable() {
return context.throw_type_error("not a callable function");
}
let mut construct = false;
let body = {
let object = self.borrow();
let function = object.as_function().unwrap();
match function {
Function::Native {
function,
constructor,
} => {
if *constructor {
construct = true;
}
FunctionBody::Native {
function: *function,
}
}
Function::Closure {
function, captures, ..
} => FunctionBody::Closure {
function: function.clone(),
captures: captures.clone(),
},
Function::VmOrdinary { code, environment } => FunctionBody::Ordinary {
code: code.clone(),
environment: environment.clone(),
},
Function::Ordinary { .. } => unreachable!(),
}
};
match body {
FunctionBody::Native { function } if construct => {
function(&JsValue::undefined(), args, context)
}
FunctionBody::Native { function } => function(this, args, context),
FunctionBody::Closure { function, captures } => {
(function)(this, args, captures, context)
}
FunctionBody::Ordinary { code, environment } => {
let lexical_this_mode = code.this_mode == ThisMode::Lexical;
// Create a new Function environment whose parent is set to the scope of the function declaration (self.environment)
// <https://tc39.es/ecma262/#sec-prepareforordinarycall>
let local_env = FunctionEnvironmentRecord::new(
this_function_object.clone(),
if !lexical_this_mode {
Some(this.clone())
} else {
None
},
Some(environment.clone()),
// Arrow functions do not have a this binding https://tc39.es/ecma262/#sec-function-environment-records
if lexical_this_mode {
BindingStatus::Lexical
} else {
BindingStatus::Uninitialized
},
JsValue::undefined(),
context,
)?;
// Turn local_env into Environment so it can be cloned
let local_env: Environment = local_env.into();
// Push the environment first so that it will be used by default parameters
context.push_environment(local_env.clone());
let mut arguments_in_parameter_names = false;
let mut is_simple_parameter_list = true;
let mut has_parameter_expressions = false;
for param in code.params.iter() {
has_parameter_expressions = has_parameter_expressions || param.init().is_some();
arguments_in_parameter_names =
arguments_in_parameter_names || param.names().contains(&"arguments");
is_simple_parameter_list = is_simple_parameter_list
&& !param.is_rest_param()
&& param.is_identifier()
&& param.init().is_none()
}
// An arguments object is added when all of the following conditions are met
// - If not in an arrow function (10.2.11.16)
// - If the parameter list does not contain `arguments` (10.2.11.17)
// - If there are default parameters or if lexical names and function names do not contain `arguments` (10.2.11.18)
//
// https://tc39.es/ecma262/#sec-functiondeclarationinstantiation
if !lexical_this_mode
&& !arguments_in_parameter_names
&& (has_parameter_expressions || !code.lexical_name_argument)
{
// Add arguments object
let arguments_obj =
if context.strict() || code.strict || !is_simple_parameter_list {
Arguments::create_unmapped_arguments_object(args, context)
} else {
Arguments::create_mapped_arguments_object(
&this_function_object,
&code.params,
args,
&local_env,
context,
)
};
local_env.create_mutable_binding("arguments", false, true, context)?;
local_env.initialize_binding("arguments", arguments_obj.into(), context)?;
}
let arg_count = args.len();
// Push function arguments to the stack.
let args = if code.params.len() > args.len() {
let mut v = args.to_vec();
v.extend(vec![JsValue::Undefined; code.params.len() - args.len()]);
v
} else {
args.to_vec()
};
for arg in args.iter().rev() {
context.vm.push(arg)
}
let param_count = code.params.len();
let this = if this.is_null_or_undefined() {
context
.get_global_this_binding()
.expect("global env must have this binding")
} else {
this.to_object(context)
.expect("conversion to object cannot fail here")
.into()
};
context.vm.push_frame(CallFrame {
prev: None,
code,
this,
pc: 0,
catch: Vec::new(),
finally_return: FinallyReturn::None,
finally_jump: Vec::new(),
pop_on_return: 0,
pop_env_on_return: 0,
param_count,
arg_count,
});
let result = context.run();
context.pop_environment();
if has_parameter_expressions {
context.pop_environment();
}
result
}
}
}
pub(crate) fn construct_internal(
&self,
args: &[JsValue],
this_target: &JsValue,
context: &mut Context,
) -> JsResult<JsValue> {
let this_function_object = self.clone();
// let mut has_parameter_expressions = false;
if !self.is_constructor() {
return context.throw_type_error("not a constructor function");
}
let body = {
let object = self.borrow();
let function = object.as_function().unwrap();
match function {
Function::Native { function, .. } => FunctionBody::Native {
function: *function,
},
Function::Closure {
function, captures, ..
} => FunctionBody::Closure {
function: function.clone(),
captures: captures.clone(),
},
Function::VmOrdinary { code, environment } => FunctionBody::Ordinary {
code: code.clone(),
environment: environment.clone(),
},
Function::Ordinary { .. } => unreachable!(),
}
};
match body {
FunctionBody::Native { function, .. } => function(this_target, args, context),
FunctionBody::Closure { function, captures } => {
(function)(this_target, args, captures, context)
}
FunctionBody::Ordinary { code, environment } => {
let this: JsValue = {
// If the prototype of the constructor is not an object, then use the default object
// prototype as prototype for the new object
// see <https://tc39.es/ecma262/#sec-ordinarycreatefromconstructor>
// see <https://tc39.es/ecma262/#sec-getprototypefromconstructor>
let prototype = get_prototype_from_constructor(
this_target,
StandardObjects::object_object,
context,
)?;
JsObject::from_proto_and_data(prototype, ObjectData::ordinary()).into()
};
let lexical_this_mode = code.this_mode == ThisMode::Lexical;
// Create a new Function environment whose parent is set to the scope of the function declaration (self.environment)
// <https://tc39.es/ecma262/#sec-prepareforordinarycall>
let local_env = FunctionEnvironmentRecord::new(
this_function_object.clone(),
Some(this.clone()),
Some(environment),
// Arrow functions do not have a this binding https://tc39.es/ecma262/#sec-function-environment-records
if lexical_this_mode {
BindingStatus::Lexical
} else {
BindingStatus::Uninitialized
},
JsValue::undefined(),
context,
)?;
// Turn local_env into Environment so it can be cloned
let local_env: Environment = local_env.into();
// Push the environment first so that it will be used by default parameters
context.push_environment(local_env.clone());
let mut arguments_in_parameter_names = false;
let mut is_simple_parameter_list = true;
let mut has_parameter_expressions = false;
for param in code.params.iter() {
has_parameter_expressions = has_parameter_expressions || param.init().is_some();
arguments_in_parameter_names =
arguments_in_parameter_names || param.names().contains(&"arguments");
is_simple_parameter_list = is_simple_parameter_list
&& !param.is_rest_param()
&& param.is_identifier()
&& param.init().is_none()
}
// An arguments object is added when all of the following conditions are met
// - If not in an arrow function (10.2.11.16)
// - If the parameter list does not contain `arguments` (10.2.11.17)
// - If there are default parameters or if lexical names and function names do not contain `arguments` (10.2.11.18)
//
// https://tc39.es/ecma262/#sec-functiondeclarationinstantiation
if !lexical_this_mode
&& !arguments_in_parameter_names
&& (has_parameter_expressions || !code.lexical_name_argument)
{
// Add arguments object
let arguments_obj =
if context.strict() || code.strict || !is_simple_parameter_list {
Arguments::create_unmapped_arguments_object(args, context)
} else {
Arguments::create_mapped_arguments_object(
&this_function_object,
&code.params,
args,
&local_env,
context,
)
};
local_env.create_mutable_binding("arguments", false, true, context)?;
local_env.initialize_binding("arguments", arguments_obj.into(), context)?;
}
let arg_count = args.len();
// Push function arguments to the stack.
let args = if code.params.len() > args.len() {
let mut v = args.to_vec();
v.extend(vec![JsValue::Undefined; code.params.len() - args.len()]);
v
} else {
args.to_vec()
};
for arg in args.iter().rev() {
context.vm.push(arg)
}
let param_count = code.params.len();
let this = if this.is_null_or_undefined() {
context
.get_global_this_binding()
.expect("global env must have this binding")
} else {
this.to_object(context)
.expect("conversion to object cannot fail here")
.into()
};
context.vm.push_frame(CallFrame {
prev: None,
code,
this,
pc: 0,
catch: Vec::new(),
finally_return: FinallyReturn::None,
finally_jump: Vec::new(),
pop_on_return: 0,
pop_env_on_return: 0,
param_count,
arg_count,
});
let result = context.run()?;
let this = context.get_this_binding();
context.pop_environment();
if has_parameter_expressions {
context.pop_environment();
}
if result.is_object() {
Ok(result)
} else {
this
}
}
}
}
}
| 35.906683 | 132 | 0.491922 |
1d6d3c264da5568a2b64732f410d0285385a97a3 | 2,980 | use std::fmt::*;
/// Tokens in the Raptorex programming language.
#[derive(Debug, Clone, PartialEq)]
pub enum Token<'a> {
// Data types
String(String),
Num(i32),
Dec(f32),
Bool(bool),
// Identifiers, e.g. variable, function names, keywords
Identifier(&'a str),
// Keywords, e.g. struct, impl, as
Keyword(&'a str),
// Data types, usually these tokens reference a return/variable type
// e.g. foo: **String** = "foo"
Datatype(&'a str),
// Operators, e.g +, -, *, /
Add,
Sub,
Mul,
Div,
Modulo,
Pow,
// Common syntax, e.g. =>, =, :=, (
ThinArrow,
FatArrow,
Eq,
EqEq,
ColonEq,
EqBangEq,
Lt,
Le,
Gt,
Ge,
Dot,
Colon,
SemiColon,
LeftParen,
RightParen,
LeftBrace,
RightBrace,
LeftBracket,
RightBracket,
Comma,
}
impl Display for Token<'_> {
fn fmt(&self, f: &mut Formatter) -> std::fmt::Result {
match self {
// Data types
Token::String(s) => write!(f, "\"{}\"", s)?,
Token::Num(n) => write!(f, "{}", n)?,
Token::Dec(d) => write!(f, "{}", d)?,
Token::Bool(b) => write!(f, "{}", b)?,
// Identifiers, e.g. variable, function names
Token::Identifier(i) => write!(f, "{}", i)?,
// Keywords, e.g. struct, impl, as
Token::Keyword(k) => write!(f, "{}", k)?,
// Data types, usually these tokens reference a return/variable type
// e.g. foo: **String** = "foo"
Token::Datatype(dt) => write!(f, "{}", dt)?,
// Operators, e.g +, -, *, /
Token::Add => write!(f, "+")?,
Token::Sub => write!(f, "-")?,
Token::Mul => write!(f, "*")?,
Token::Div => write!(f, "/")?,
Token::Modulo => write!(f, "%")?,
Token::Pow => write!(f, "^")?,
// Common syntax, e.g. =>, =, :=, (
Token::ThinArrow => write!(f, "->")?,
Token::FatArrow => write!(f, "=>")?,
Token::Eq => write!(f, "=")?,
Token::EqEq => write!(f, "==")?,
Token::ColonEq => write!(f, ":=")?,
Token::EqBangEq => write!(f, "=!=")?,
Token::Lt => write!(f, "<")?,
Token::Le => write!(f, "<=")?,
Token::Gt => write!(f, ">")?,
Token::Ge => write!(f, ">=")?,
Token::Dot => write!(f, ".")?,
Token::Colon => write!(f, ":")?,
Token::SemiColon => write!(f, ";")?,
Token::LeftParen => write!(f, "(")?,
Token::RightParen => write!(f, ")")?,
// need to escape left and right brace
Token::LeftBrace => write!(f, "{{")?,
Token::RightBrace => write!(f, "}}")?,
Token::LeftBracket => write!(f, "[")?,
Token::RightBracket => write!(f, "]")?,
Token::Comma => write!(f, ",")?,
}
Ok(())
}
}
| 30.721649 | 80 | 0.434228 |
fc0f6de9785a9b424b6ca8050a6269cab3721aaf | 1,708 | /*
* Client Portal Web API
*
* Client Portal Web API
*
* The version of the OpenAPI document: 1.0.0
*
* Generated by: https://openapi-generator.tech
*/
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SummaryExcludedAccounts {
#[serde(rename = "lastUpdateAttempt", skip_serializing_if = "Option::is_none")]
pub last_update_attempt: Option<String>,
#[serde(rename = "fiName", skip_serializing_if = "Option::is_none")]
pub fi_name: Option<String>,
#[serde(rename = "acctTitle", skip_serializing_if = "Option::is_none")]
pub acct_title: Option<String>,
#[serde(rename = "acctNumAtFI", skip_serializing_if = "Option::is_none")]
pub acct_num_at_fi: Option<String>,
#[serde(rename = "acctId", skip_serializing_if = "Option::is_none")]
pub acct_id: Option<String>,
#[serde(rename = "lastUpdate", skip_serializing_if = "Option::is_none")]
pub last_update: Option<String>,
#[serde(rename = "harvestCode", skip_serializing_if = "Option::is_none")]
pub harvest_code: Option<i32>,
#[serde(rename = "lastUpdateStatusCode", skip_serializing_if = "Option::is_none")]
pub last_update_status_code: Option<String>,
#[serde(rename = "rc", skip_serializing_if = "Option::is_none")]
pub rc: Option<i32>,
}
impl SummaryExcludedAccounts {
pub fn new() -> SummaryExcludedAccounts {
SummaryExcludedAccounts {
last_update_attempt: None,
fi_name: None,
acct_title: None,
acct_num_at_fi: None,
acct_id: None,
last_update: None,
harvest_code: None,
last_update_status_code: None,
rc: None,
}
}
}
| 32.226415 | 86 | 0.653981 |
69b0734640b4b274b86a2ee7ff0d55653200681d | 434 | use doc_comment::{doc_comment, doctest};
use wasm_bindgen_test::*;
#[wasm_bindgen_test]
fn test_sample_app() {
doc_comment!(include_str!(concat!(
env!("OUT_DIR"),
"/getting-started/build-a-sample-app.md"
)));
}
#[wasm_bindgen_test]
fn test_optimizations() {
doctest!("advanced-topics/optimizations.md");
}
#[wasm_bindgen_test]
fn test_properties() {
doctest!("concepts/components/properties.md");
}
| 20.666667 | 50 | 0.68894 |
9b37ad35d765e0bff06ea5a8868fbd024bd1976c | 1,668 | use std::io;
use std::collections::VecDeque;
const PRIME: usize = 131071;
/// Reads the whole input and returns a VecDeque of chars.
///
/// Why this is needed? Rust does not provide a `read_char` function and this
/// is the only way I find to read each char withou extern crates.
fn input() -> VecDeque<char> {
// Vector to hold the whole stdio input
let mut input: VecDeque<char> = VecDeque::new();
loop {
// Read a line
let mut line = String::new();
io::stdin().read_line(&mut line)
.expect("Error: Unable to read user input.");
// Check if there is no more lines to read
if line.trim() == "" { break }
// Convert the line into a vector of chars and add it to the input
input.append(&mut line.chars().collect::<VecDeque<char>>());
}
// Return the input
input
}
fn main() {
// Read whole input
let mut input = input();
loop {
// Read the binary number
let mut binary: Vec<String> = vec![];
while let Some(c) = input.pop_front() {
// Add valid chars to the binary string
if c == '0' || c == '1' { binary.push(format!("{}", c)); }
// The binary string was fully read
if c == '#' { break }
}
// Check if the end condition was reached
if binary.is_empty() { break }
// Convert the read binary into a unsigned number
let mut num = 0;
let binary: Vec<usize> = binary.iter()
.map(|s| s.parse::<usize>())
.filter_map(Result::ok).collect();
for &c in &binary { num = (num << 1) + c; }
// Check if num is bigger than
if num >= PRIME { num %= PRIME; }
// Print solution
println!("{}", if num != 0 { "NO" } else { "YES" });
}
}
| 31.471698 | 77 | 0.595923 |
165100ebadd4af76e28081c4e66f7d48d0719e3f | 6,243 | // Copyright 2019 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This test is run with --test-threasds=1 to prevent any tests from running in parallel.
//
// Running in parallel is something we want to control for specific tests cases especially
// when testing hardware stream processors.
#![cfg(test)]
use anyhow;
use h264_stream::*;
use lazy_static::lazy_static;
use std::{fs::File, io::Read, rc::Rc, result::Result};
use stream_processor_decoder_factory::*;
use stream_processor_test::*;
use video_frame_hasher::*;
pub const BEAR_TEST_FILE: &str = "/pkg/data/bear.h264";
lazy_static! {
static ref LOGGER: () = ::fuchsia_syslog::init().expect("Initializing syslog");
static ref BEAR_DIGEST: ExpectedDigest = ExpectedDigest::new(
"bear.h264 decoded digest",
"1dc4d1510fc4d26173480f5e689e38dca7c1fa2df1894085f1bcee9c0d19acf7",
);
}
// TODO(turnage): Add test spec for buffers released between streams.
// TODO(turnage): Add hash validator for NV12 and YV12.
#[test]
fn test_serial_bear_on_same_codec() -> std::result::Result<(), ::anyhow::Error> {
with_large_stack(|| {
*LOGGER;
let stream = Rc::new(TimestampedStream {
source: H264Stream::from_file(BEAR_TEST_FILE)?,
timestamps: 0..,
});
let frame_count_validator = Rc::new(OutputPacketCountValidator {
expected_output_packet_count: stream.video_frame_count(),
});
let hash_validator = Rc::new(VideoFrameHasher { expected_digest: *BEAR_DIGEST });
let spec = TestSpec {
cases: vec![
TestCase {
name: "Simple bear test run 1 on same channel",
stream: stream.clone(),
validators: vec![
Rc::new(TerminatesWithValidator {
expected_terminal_output: Output::Eos { stream_lifetime_ordinal: 1 },
}),
frame_count_validator.clone(),
hash_validator.clone(),
],
stream_options: None,
},
TestCase {
name: "Simple bear test run 2 on same channel",
stream,
validators: vec![
Rc::new(TerminatesWithValidator {
expected_terminal_output: Output::Eos { stream_lifetime_ordinal: 3 },
}),
frame_count_validator,
hash_validator,
],
stream_options: Some(StreamOptions {
queue_format_details: false,
..StreamOptions::default()
}),
},
],
relation: CaseRelation::Serial,
stream_processor_factory: Rc::new(DecoderFactory),
};
fuchsia_async::Executor::new()?.run_singlethreaded(spec.run())
})
}
#[test]
fn bear_with_sei_itu_t35() -> Result<(), anyhow::Error> {
with_large_stack(|| {
*LOGGER;
let mut nal_stream = H264SeiItuT35 {
country_code: H264SeiItuT35::COUNTRY_CODE_UNITED_STATES,
country_code_extension: 0,
payload: vec![0xde, 0xad, 0xbe, 0xef],
}
.as_bytes()?;
File::open(BEAR_TEST_FILE)?.read_to_end(&mut nal_stream)?;
let stream =
Rc::new(TimestampedStream { source: H264Stream::from(nal_stream), timestamps: 0.. });
let frame_count_validator = Rc::new(OutputPacketCountValidator {
expected_output_packet_count: stream.video_frame_count(),
});
let hash_validator = Rc::new(VideoFrameHasher { expected_digest: *BEAR_DIGEST });
let spec = TestSpec {
cases: vec![TestCase {
name: "Modified Bear with SEI ITU-T T.35 data test run",
stream: stream,
validators: vec![
Rc::new(TerminatesWithValidator {
expected_terminal_output: Output::Eos { stream_lifetime_ordinal: 1 },
}),
frame_count_validator,
hash_validator,
],
stream_options: None,
}],
relation: CaseRelation::Serial,
stream_processor_factory: Rc::new(DecoderFactory),
};
fuchsia_async::Executor::new()?.run_singlethreaded(spec.run())
})
}
#[test]
fn bear_with_large_sei_itu_t35() -> Result<(), anyhow::Error> {
with_large_stack(|| {
*LOGGER;
let mut nal_stream = H264SeiItuT35 {
country_code: H264SeiItuT35::COUNTRY_CODE_UNITED_STATES,
country_code_extension: 0,
payload: vec![0xde, 0xad, 0xbe, 0xef],
}
.as_bytes()?;
// Appending 0s to an annex-B NAL shouldn't change the behavior.
nal_stream.resize(428, 0);
File::open(BEAR_TEST_FILE)?.read_to_end(&mut nal_stream)?;
let stream =
Rc::new(TimestampedStream { source: H264Stream::from(nal_stream), timestamps: 0.. });
let frame_count_validator = Rc::new(OutputPacketCountValidator {
expected_output_packet_count: stream.video_frame_count(),
});
let hash_validator = Rc::new(VideoFrameHasher { expected_digest: *BEAR_DIGEST });
let spec = TestSpec {
cases: vec![TestCase {
name: "Modified Bear with Large SEI ITU-T T.35 data test run",
stream: stream,
validators: vec![
Rc::new(TerminatesWithValidator {
expected_terminal_output: Output::Eos { stream_lifetime_ordinal: 1 },
}),
frame_count_validator,
hash_validator,
],
stream_options: None,
}],
relation: CaseRelation::Serial,
stream_processor_factory: Rc::new(DecoderFactory),
};
fuchsia_async::Executor::new()?.run_singlethreaded(spec.run())
})
}
| 35.674286 | 97 | 0.565754 |
672802951fd5ad9dc485fcc98f395e974f4021a6 | 858 | use resources::Amount;
/// A create account operation represents a new account creation.
#[derive(Debug, Clone)]
pub struct CreateAccount {
account: String,
funder: String,
starting_balance: Amount,
}
impl CreateAccount {
/// Creates a new CreateAccount
pub fn new(account: String, funder: String, starting_balance: Amount) -> CreateAccount {
CreateAccount {
account,
funder,
starting_balance,
}
}
/// The public address of a new account that was funded.
pub fn account(&self) -> &str {
&self.account
}
/// The public address of the account that funded a new account.
pub fn funder(&self) -> &str {
&self.funder
}
/// Amount the account was funded.
pub fn starting_balance(&self) -> Amount {
self.starting_balance
}
}
| 24.514286 | 92 | 0.618881 |
abfda4ac4026f2ab7d3a78bc27aec00026676fc7 | 12,471 | // Copyright 2019-2021 Tauri Programme within The Commons Conservancy
// SPDX-License-Identifier: Apache-2.0
// SPDX-License-Identifier: MIT
//! A layer between raw [`Runtime`] webview windows and Tauri.
use crate::{
api::config::WindowConfig,
event::{Event, EventHandler},
hooks::{InvokeMessage, InvokePayload, PageLoadPayload},
runtime::{
tag::ToJsString,
webview::{FileDropHandler, WebviewAttributes, WebviewRpcHandler},
Dispatch, Runtime,
},
sealed::{ManagerBase, RuntimeOrDispatch},
Icon, Manager, Params, WindowBuilder,
};
use serde::Serialize;
use serde_json::Value as JsonValue;
use std::hash::{Hash, Hasher};
/// A webview window that has yet to be built.
pub struct PendingWindow<M: Params> {
/// The label that the window will be named.
pub label: M::Label,
/// The [`WindowBuilder`] that the window will be created with.
pub window_attributes: <<M::Runtime as Runtime>::Dispatcher as Dispatch>::WindowBuilder,
/// The [`WebviewAttributes`] that the webview will be created with.
pub webview_attributes: WebviewAttributes,
/// How to handle RPC calls on the webview window.
pub rpc_handler: Option<WebviewRpcHandler<M>>,
/// How to handle a file dropping onto the webview window.
pub file_drop_handler: Option<FileDropHandler<M>>,
/// The resolved URL to load on the webview.
pub url: String,
}
impl<M: Params> PendingWindow<M> {
/// Create a new [`PendingWindow`] with a label and starting url.
pub fn new(
window_attributes: <<M::Runtime as Runtime>::Dispatcher as Dispatch>::WindowBuilder,
webview_attributes: WebviewAttributes,
label: M::Label,
) -> Self {
Self {
window_attributes,
webview_attributes,
label,
rpc_handler: None,
file_drop_handler: None,
url: "tauri://localhost".to_string(),
}
}
/// Create a new [`PendingWindow`] from a [`WindowConfig`] with a label and starting url.
pub fn with_config(
window_config: WindowConfig,
webview_attributes: WebviewAttributes,
label: M::Label,
) -> Self {
Self {
window_attributes:
<<<M::Runtime as Runtime>::Dispatcher as Dispatch>::WindowBuilder>::with_config(
window_config,
),
webview_attributes,
label,
rpc_handler: None,
file_drop_handler: None,
url: "tauri://localhost".to_string(),
}
}
}
/// A webview window that is not yet managed by Tauri.
pub struct DetachedWindow<M: Params> {
/// Name of the window
pub label: M::Label,
/// The [`Dispatch`](crate::runtime::Dispatch) associated with the window.
pub dispatcher: <M::Runtime as Runtime>::Dispatcher,
}
impl<M: Params> Clone for DetachedWindow<M> {
fn clone(&self) -> Self {
Self {
label: self.label.clone(),
dispatcher: self.dispatcher.clone(),
}
}
}
impl<M: Params> Hash for DetachedWindow<M> {
/// Only use the [`DetachedWindow`]'s label to represent its hash.
fn hash<H: Hasher>(&self, state: &mut H) {
self.label.hash(state)
}
}
impl<M: Params> Eq for DetachedWindow<M> {}
impl<M: Params> PartialEq for DetachedWindow<M> {
/// Only use the [`DetachedWindow`]'s label to compare equality.
fn eq(&self, other: &Self) -> bool {
self.label.eq(&other.label)
}
}
/// We want to export the runtime related window at the crate root, but not look like a re-export.
pub(crate) mod export {
use super::*;
use crate::runtime::{manager::WindowManager, tag::TagRef};
use std::borrow::Borrow;
/// A webview window managed by Tauri.
///
/// This type also implements [`Manager`] which allows you to manage other windows attached to
/// the same application.
///
/// TODO: expand these docs since this is a pretty important type
pub struct Window<P: Params> {
/// The webview window created by the runtime.
window: DetachedWindow<P>,
/// The manager to associate this webview window with.
manager: WindowManager<P>,
}
impl<M: Params> Clone for Window<M> {
fn clone(&self) -> Self {
Self {
window: self.window.clone(),
manager: self.manager.clone(),
}
}
}
impl<P: Params> Hash for Window<P> {
/// Only use the [`Window`]'s label to represent its hash.
fn hash<H: Hasher>(&self, state: &mut H) {
self.window.label.hash(state)
}
}
impl<P: Params> Eq for Window<P> {}
impl<P: Params> PartialEq for Window<P> {
/// Only use the [`Window`]'s label to compare equality.
fn eq(&self, other: &Self) -> bool {
self.window.label.eq(&other.window.label)
}
}
impl<P: Params> Manager<P> for Window<P> {}
impl<P: Params> ManagerBase<P> for Window<P> {
fn manager(&self) -> &WindowManager<P> {
&self.manager
}
fn runtime(&mut self) -> RuntimeOrDispatch<'_, P> {
RuntimeOrDispatch::Dispatch(self.dispatcher())
}
}
impl<P: Params> Window<P> {
/// Create a new window that is attached to the manager.
pub(crate) fn new(manager: WindowManager<P>, window: DetachedWindow<P>) -> Self {
Self { window, manager }
}
/// The current window's dispatcher.
pub(crate) fn dispatcher(&self) -> <P::Runtime as Runtime>::Dispatcher {
self.window.dispatcher.clone()
}
/// How to handle this window receiving an [`InvokeMessage`].
pub(crate) fn on_message(self, command: String, payload: InvokePayload) -> crate::Result<()> {
let manager = self.manager.clone();
match command.as_str() {
"__initialized" => {
let payload: PageLoadPayload = serde_json::from_value(payload.inner)?;
manager.run_on_page_load(self, payload);
}
_ => {
let message = InvokeMessage::new(self, command.to_string(), payload);
if let Some(module) = &message.payload.tauri_module {
let module = module.to_string();
crate::endpoints::handle(module, message, manager.config(), manager.package_info());
} else if command.starts_with("plugin:") {
manager.extend_api(message);
} else {
manager.run_invoke_handler(message);
}
}
}
Ok(())
}
/// The label of this window.
pub fn label(&self) -> &P::Label {
&self.window.label
}
pub(crate) fn emit_internal<E: ?Sized, S>(
&self,
event: &E,
payload: Option<S>,
) -> crate::Result<()>
where
P::Event: Borrow<E>,
E: TagRef<P::Event>,
S: Serialize,
{
let js_payload = match payload {
Some(payload_value) => serde_json::to_value(payload_value)?,
None => JsonValue::Null,
};
self.eval(&format!(
"window['{}']({{event: {}, payload: {}}}, '{}')",
self.manager.event_emit_function_name(),
event.to_js_string()?,
js_payload,
self.manager.generate_salt(),
))?;
Ok(())
}
/// Emits an event to the current window.
pub fn emit<E: ?Sized, S>(&self, event: &E, payload: Option<S>) -> crate::Result<()>
where
P::Event: Borrow<E>,
E: TagRef<P::Event>,
S: Serialize,
{
self.emit_internal(event, payload)
}
/// Emits an event on all windows except this one.
pub fn emit_others<E: ?Sized, S>(&self, event: &E, payload: Option<S>) -> crate::Result<()>
where
P::Event: Borrow<E>,
E: TagRef<P::Event>,
S: Serialize + Clone,
{
self.manager.emit_filter(event, payload, |w| w != self)
}
/// Listen to an event on this window.
pub fn listen<E: Into<P::Event>, F>(&self, event: E, handler: F) -> EventHandler
where
F: Fn(Event) + Send + 'static,
{
let label = self.window.label.clone();
self.manager.listen(event.into(), Some(label), handler)
}
/// Listen to a an event on this window a single time.
pub fn once<E: Into<P::Event>, F>(&self, event: E, handler: F) -> EventHandler
where
F: Fn(Event) + Send + 'static,
{
let label = self.window.label.clone();
self.manager.once(event.into(), Some(label), handler)
}
/// Triggers an event on this window.
pub fn trigger<E: ?Sized>(&self, event: &E, data: Option<String>)
where
P::Event: Borrow<E>,
E: TagRef<P::Event>,
{
let label = self.window.label.clone();
self.manager.trigger(event, Some(label), data)
}
/// Evaluates JavaScript on this window.
pub fn eval(&self, js: &str) -> crate::Result<()> {
self.window.dispatcher.eval_script(js)
}
/// Determines if this window should be resizable.
pub fn set_resizable(&self, resizable: bool) -> crate::Result<()> {
self.window.dispatcher.set_resizable(resizable)
}
/// Set this window's title.
pub fn set_title(&self, title: &str) -> crate::Result<()> {
self.window.dispatcher.set_title(title.to_string())
}
/// Maximizes this window.
pub fn maximize(&self) -> crate::Result<()> {
self.window.dispatcher.maximize()
}
/// Un-maximizes this window.
pub fn unmaximize(&self) -> crate::Result<()> {
self.window.dispatcher.unmaximize()
}
/// Minimizes this window.
pub fn minimize(&self) -> crate::Result<()> {
self.window.dispatcher.minimize()
}
/// Un-minimizes this window.
pub fn unminimize(&self) -> crate::Result<()> {
self.window.dispatcher.unminimize()
}
/// Show this window.
pub fn show(&self) -> crate::Result<()> {
self.window.dispatcher.show()
}
/// Hide this window.
pub fn hide(&self) -> crate::Result<()> {
self.window.dispatcher.hide()
}
/// Closes this window.
pub fn close(&self) -> crate::Result<()> {
self.window.dispatcher.close()
}
/// Determines if this window should be [decorated].
///
/// [decorated]: https://en.wikipedia.org/wiki/Window_(computing)#Window_decoration
pub fn set_decorations(&self, decorations: bool) -> crate::Result<()> {
self.window.dispatcher.set_decorations(decorations)
}
/// Determines if this window should always be on top of other windows.
pub fn set_always_on_top(&self, always_on_top: bool) -> crate::Result<()> {
self.window.dispatcher.set_always_on_top(always_on_top)
}
/// Sets this window's width.
pub fn set_width(&self, width: impl Into<f64>) -> crate::Result<()> {
self.window.dispatcher.set_width(width.into())
}
/// Sets this window's height.
pub fn set_height(&self, height: impl Into<f64>) -> crate::Result<()> {
self.window.dispatcher.set_height(height.into())
}
/// Resizes this window.
pub fn resize(&self, width: impl Into<f64>, height: impl Into<f64>) -> crate::Result<()> {
self.window.dispatcher.resize(width.into(), height.into())
}
/// Sets this window's minimum size.
pub fn set_min_size(
&self,
min_width: impl Into<f64>,
min_height: impl Into<f64>,
) -> crate::Result<()> {
self
.window
.dispatcher
.set_min_size(min_width.into(), min_height.into())
}
/// Sets this window's maximum size.
pub fn set_max_size(
&self,
max_width: impl Into<f64>,
max_height: impl Into<f64>,
) -> crate::Result<()> {
self
.window
.dispatcher
.set_max_size(max_width.into(), max_height.into())
}
/// Sets this window's x position.
pub fn set_x(&self, x: impl Into<f64>) -> crate::Result<()> {
self.window.dispatcher.set_x(x.into())
}
/// Sets this window's y position.
pub fn set_y(&self, y: impl Into<f64>) -> crate::Result<()> {
self.window.dispatcher.set_y(y.into())
}
/// Sets this window's position.
pub fn set_position(&self, x: impl Into<f64>, y: impl Into<f64>) -> crate::Result<()> {
self.window.dispatcher.set_position(x.into(), y.into())
}
/// Determines if this window should be fullscreen.
pub fn set_fullscreen(&self, fullscreen: bool) -> crate::Result<()> {
self.window.dispatcher.set_fullscreen(fullscreen)
}
/// Sets this window' icon.
pub fn set_icon(&self, icon: Icon) -> crate::Result<()> {
self.window.dispatcher.set_icon(icon)
}
/// Starts dragging the window.
pub fn start_dragging(&self) -> crate::Result<()> {
self.window.dispatcher.start_dragging()
}
pub(crate) fn verify_salt(&self, salt: String) -> bool {
self.manager.verify_salt(salt)
}
}
}
| 29.763723 | 98 | 0.617914 |
6287d1b5a76c0925a9f6ac8596c9067471594ab9 | 17,380 | mod resource_id;
mod endpoint;
mod poll;
mod registry;
mod driver;
mod remote_addr;
mod transport;
mod loader;
/// Module that specify the pattern to follow to create adapters.
/// This module is not part of the public API itself,
/// it must be used from the internals to build new adapters.
pub mod adapter;
// Reexports
pub use adapter::{SendStatus};
pub use resource_id::{ResourceId, ResourceType};
pub use endpoint::{Endpoint};
pub use remote_addr::{RemoteAddr, ToRemoteAddr};
pub use transport::{Transport};
pub use driver::{NetEvent};
pub use poll::{Readiness};
use loader::{DriverLoader, ActionControllerList, EventProcessorList};
use poll::{Poll, PollEvent};
use strum::{IntoEnumIterator};
use std::net::{SocketAddr, ToSocketAddrs};
use std::time::{Duration, Instant};
use std::io::{self};
/// Create a network instance giving its controller and processor.
pub fn split() -> (NetworkController, NetworkProcessor) {
let mut drivers = DriverLoader::default();
Transport::iter().for_each(|transport| transport.mount_adapter(&mut drivers));
let (poll, controllers, processors) = drivers.take();
let network_controller = NetworkController::new(controllers);
let network_processor = NetworkProcessor::new(poll, processors);
(network_controller, network_processor)
}
/// Shareable instance in charge of control all the connections.
pub struct NetworkController {
controllers: ActionControllerList,
}
impl NetworkController {
fn new(controllers: ActionControllerList) -> NetworkController {
Self { controllers }
}
/// Creates a connection to the specified address.
/// The endpoint, an identifier of the new connection, will be returned.
/// This function will generate a [`NetEvent::Connected`] event with the result of the connection.
/// This call will **NOT** block to perform the connection.
///
/// Note that this function can return an error in the case the internal socket
/// could not be binded or open in the OS, but never will return an error an regarding
/// the connection itself.
/// If you want to check if the connection has been established or not you have to read the
/// boolean indicator in the [`NetEvent::Connected`] event.
///
/// Example
/// ```
/// use message_io::node::{self, NodeEvent};
/// use message_io::network::{Transport, NetEvent};
///
/// let (handler, listener) = node::split();
/// handler.signals().send_with_timer((), std::time::Duration::from_secs(1));
///
/// let (id, addr) = handler.network().listen(Transport::FramedTcp, "127.0.0.1:0").unwrap();
/// let (conn_endpoint, _) = handler.network().connect(Transport::FramedTcp, addr).unwrap();
/// // The socket could not be able to send yet.
///
/// listener.for_each(move |event| match event {
/// NodeEvent::Network(net_event) => match net_event {
/// NetEvent::Connected(endpoint, established) => {
/// assert_eq!(conn_endpoint, endpoint);
/// if established {
/// println!("Connected!");
/// handler.network().send(endpoint, &[42]);
/// }
/// else {
/// println!("Could not connect");
/// }
/// },
/// NetEvent::Accepted(endpoint, listening_id) => {
/// assert_eq!(id, listening_id);
/// println!("New connected endpoint: {}", endpoint.addr());
/// },
/// _ => (),
/// }
/// NodeEvent::Signal(_) => handler.stop(),
/// });
/// ```
pub fn connect(
&self,
transport: Transport,
addr: impl ToRemoteAddr,
) -> io::Result<(Endpoint, SocketAddr)> {
let addr = addr.to_remote_addr().unwrap();
self.controllers[transport.id() as usize].connect(addr).map(|(endpoint, addr)| {
log::trace!("Connect to {}", endpoint);
(endpoint, addr)
})
}
/// Creates a connection to the specified address.
/// This function is similar to [`NetworkController::connect()`] but will block
/// until for the connection is ready.
/// If the connection can not be established, a `ConnectionRefused` error will be returned.
///
/// Note that the `Connect` event will be also generated.
///
/// Since this function blocks the current thread, it must NOT be used inside
/// the network callback because the internal event could not be processed.
///
/// In order to get the best scalability and performance, use the non-blocking
/// [`NetworkController::connect()`] version.
///
/// Example
/// ```
/// use message_io::node::{self, NodeEvent};
/// use message_io::network::{Transport, NetEvent};
///
/// let (handler, listener) = node::split();
/// handler.signals().send_with_timer((), std::time::Duration::from_secs(1));
///
/// let (id, addr) = handler.network().listen(Transport::FramedTcp, "127.0.0.1:0").unwrap();
/// match handler.network().connect_sync(Transport::FramedTcp, addr) {
/// Ok((endpoint, _)) => {
/// println!("Connected!");
/// handler.network().send(endpoint, &[42]);
/// }
/// Err(err) if err.kind() == std::io::ErrorKind::ConnectionRefused => {
/// println!("Could not connect");
/// }
/// Err(err) => println!("An OS error creating the socket"),
/// }
/// ```
pub fn connect_sync(
&self,
transport: Transport,
addr: impl ToRemoteAddr,
) -> io::Result<(Endpoint, SocketAddr)> {
let (endpoint, addr) = self.connect(transport, addr)?;
loop {
std::thread::sleep(Duration::from_millis(1));
match self.is_ready(endpoint.resource_id()) {
Some(true) => return Ok((endpoint, addr)),
Some(false) => continue,
None => {
return Err(io::Error::new(
io::ErrorKind::ConnectionRefused,
"Connection refused",
))
}
}
}
}
/// Listen messages from specified transport.
/// The giver address will be used as interface and listening port.
/// If the port can be opened, a [ResourceId] identifying the listener is returned
/// along with the local address, or an error if not.
/// The address is returned despite you passed as parameter because
/// when a `0` port is specified, the OS will give choose the value.
pub fn listen(
&self,
transport: Transport,
addr: impl ToSocketAddrs,
) -> io::Result<(ResourceId, SocketAddr)> {
let addr = addr.to_socket_addrs().unwrap().next().unwrap();
self.controllers[transport.id() as usize].listen(addr).map(|(resource_id, addr)| {
log::trace!("Listening at {} by {}", addr, resource_id);
(resource_id, addr)
})
}
/// Send the data message thought the connection represented by the given endpoint.
/// This function returns a [`SendStatus`] indicating the status of this send.
/// There is no guarantee that send over a correct connection generates a [`SendStatus::Sent`]
/// because any time a connection can be disconnected (even while you are sending).
/// Except cases where you need to be sure that the message has been sent,
/// you will want to process a [`NetEvent::Disconnected`] to determine if the connection +
/// is *alive* instead of check if `send()` returned [`SendStatus::ResourceNotFound`].
pub fn send(&self, endpoint: Endpoint, data: &[u8]) -> SendStatus {
log::trace!("Sending {} bytes to {}...", data.len(), endpoint);
let status =
self.controllers[endpoint.resource_id().adapter_id() as usize].send(endpoint, data);
log::trace!("Send status: {:?}", status);
status
}
/// Remove a network resource.
/// Returns `false` if the resource id doesn't exists.
/// This is used to remove resources as connection or listeners.
/// Resources of endpoints generated by listening in connection oriented transports
/// can also be removed to close the connection.
/// Removing an already connected connection implies a disconnection.
/// Note that non-oriented connections as UDP use its listener resource to manage all
/// remote endpoints internally, the remotes have not resource for themselfs.
/// It means that all generated `Endpoint`s share the `ResourceId` of the listener and
/// if you remove this resource you are removing the listener of all of them.
/// For that cases there is no need to remove the resource because non-oriented connections
/// have not connection itself to close, 'there is no spoon'.
pub fn remove(&self, resource_id: ResourceId) -> bool {
log::trace!("Remove {}", resource_id);
let value = self.controllers[resource_id.adapter_id() as usize].remove(resource_id);
log::trace!("Removed: {}", value);
value
}
/// Check a resource specified by `resource_id` is ready.
/// If the status is `true` means that the resource is ready to use.
/// In connection oriented transports, it implies the resource is connected.
/// If the status is `false` it means that the resource is not yet ready to use.
/// If the resource has been removed, disconnected, or does not exists in the network,
/// a `None` is returned.
pub fn is_ready(&self, resource_id: ResourceId) -> Option<bool> {
self.controllers[resource_id.adapter_id() as usize].is_ready(resource_id)
}
}
/// Instance in charge of process input network events.
/// These events are offered to the user as a [`NetEvent`] its processing data.
pub struct NetworkProcessor {
poll: Poll,
processors: EventProcessorList,
}
impl NetworkProcessor {
fn new(poll: Poll, processors: EventProcessorList) -> Self {
Self { poll, processors }
}
/// Process the next poll event.
/// This method waits the timeout specified until the poll event is generated.
/// If `None` is passed as timeout, it will wait indefinitely.
/// Note that there is no 1-1 relation between an internal poll event and a [`NetEvent`].
/// You need to assume that process an internal poll event could call 0 or N times to
/// the callback with diferents `NetEvent`s.
pub fn process_poll_event(
&mut self,
timeout: Option<Duration>,
mut event_callback: impl FnMut(NetEvent<'_>),
) {
let processors = &mut self.processors;
self.poll.process_event(timeout, |poll_event| {
match poll_event {
PollEvent::Network(resource_id, interest) => {
let processor = &processors[resource_id.adapter_id() as usize];
processor.process(resource_id, interest, &mut |net_event| {
log::trace!("Processed {:?}", net_event);
event_callback(net_event);
});
}
#[allow(dead_code)] //TODO: remove it with native event support
PollEvent::Waker => todo!(),
}
});
}
/// Process poll events until there is no more events during a `timeout` duration.
/// This method makes succesive calls to [`NetworkProcessor::process_poll_event()`].
pub fn process_poll_events_until_timeout(
&mut self,
timeout: Duration,
mut event_callback: impl FnMut(NetEvent<'_>),
) {
loop {
let now = Instant::now();
self.process_poll_event(Some(timeout), |e| event_callback(e));
if now.elapsed() > timeout {
break
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::time::{Duration};
use crate::util::thread::{NamespacedThread};
use test_case::test_case;
lazy_static::lazy_static! {
static ref TIMEOUT: Duration = Duration::from_millis(1000);
static ref LOCALHOST_CONN_TIMEOUT: Duration = Duration::from_millis(5000);
}
#[cfg_attr(feature = "tcp", test_case(Transport::Tcp))]
#[cfg_attr(feature = "tcp", test_case(Transport::FramedTcp))]
#[cfg_attr(feature = "websocket", test_case(Transport::Ws))]
fn successful_connection(transport: Transport) {
let (controller, mut processor) = self::split();
let (listener_id, addr) = controller.listen(transport, "127.0.0.1:0").unwrap();
let (endpoint, _) = controller.connect(transport, addr).unwrap();
let mut was_connected = 0;
let mut was_accepted = 0;
processor.process_poll_events_until_timeout(*TIMEOUT, |net_event| match net_event {
NetEvent::Connected(net_endpoint, status) => {
assert!(status);
assert_eq!(endpoint, net_endpoint);
was_connected += 1;
}
NetEvent::Accepted(_, net_listener_id) => {
assert_eq!(listener_id, net_listener_id);
was_accepted += 1;
}
_ => unreachable!(),
});
assert_eq!(was_accepted, 1);
assert_eq!(was_connected, 1);
}
#[cfg_attr(feature = "tcp", test_case(Transport::Tcp))]
#[cfg_attr(feature = "tcp", test_case(Transport::FramedTcp))]
#[cfg_attr(feature = "websocket", test_case(Transport::Ws))]
fn successful_connection_sync(transport: Transport) {
let (controller, mut processor) = self::split();
let (_, addr) = controller.listen(transport, "127.0.0.1:0").unwrap();
let mut thread = NamespacedThread::spawn("test", move || {
let (endpoint, _) = controller.connect_sync(transport, addr).unwrap();
assert!(controller.is_ready(endpoint.resource_id()).unwrap());
});
processor.process_poll_events_until_timeout(*TIMEOUT, |_| ());
thread.join();
}
#[cfg_attr(feature = "tcp", test_case(Transport::Tcp))]
#[cfg_attr(feature = "tcp", test_case(Transport::FramedTcp))]
#[cfg_attr(feature = "websocket", test_case(Transport::Ws))]
fn unreachable_connection(transport: Transport) {
let (controller, mut processor) = self::split();
// Ensure that addr is not using by other process
// because it takes some secs to be reusable.
let (listener_id, addr) = controller.listen(transport, "127.0.0.1:0").unwrap();
controller.remove(listener_id);
let (endpoint, _) = controller.connect(transport, addr).unwrap();
assert_eq!(controller.send(endpoint, &[42]), SendStatus::ResourceNotAvailable);
assert!(!controller.is_ready(endpoint.resource_id()).unwrap());
let mut was_disconnected = false;
processor.process_poll_events_until_timeout(*LOCALHOST_CONN_TIMEOUT, |net_event| {
match net_event {
NetEvent::Connected(net_endpoint, status) => {
assert!(!status);
assert_eq!(endpoint, net_endpoint);
was_disconnected = true;
}
_ => unreachable!(),
}
});
assert!(was_disconnected);
}
#[cfg_attr(feature = "tcp", test_case(Transport::Tcp))]
#[cfg_attr(feature = "tcp", test_case(Transport::FramedTcp))]
#[cfg_attr(feature = "websocket", test_case(Transport::Ws))]
fn unreachable_connection_sync(transport: Transport) {
let (controller, mut processor) = self::split();
// Ensure that addr is not using by other process
// because it takes some secs to be reusable.
let (listener_id, addr) = controller.listen(transport, "127.0.0.1:0").unwrap();
controller.remove(listener_id);
let mut thread = NamespacedThread::spawn("test", move || {
let err = controller.connect_sync(transport, addr).unwrap_err();
assert_eq!(err.kind(), io::ErrorKind::ConnectionRefused);
});
processor.process_poll_events_until_timeout(*LOCALHOST_CONN_TIMEOUT, |_| ());
thread.join();
}
#[test]
fn create_remove_listener() {
let (controller, mut processor) = self::split();
let (listener_id, _) = controller.listen(Transport::Tcp, "127.0.0.1:0").unwrap();
assert!(controller.remove(listener_id)); // Do not generate an event
assert!(!controller.remove(listener_id));
processor.process_poll_events_until_timeout(*TIMEOUT, |_| unreachable!());
}
#[test]
fn create_remove_listener_with_connection() {
let (controller, mut processor) = self::split();
let (listener_id, addr) = controller.listen(Transport::Tcp, "127.0.0.1:0").unwrap();
controller.connect(Transport::Tcp, addr).unwrap();
let mut was_accepted = false;
processor.process_poll_events_until_timeout(*TIMEOUT, |net_event| match net_event {
NetEvent::Connected(..) => (),
NetEvent::Accepted(_, _) => {
assert!(controller.remove(listener_id));
assert!(!controller.remove(listener_id));
was_accepted = true;
}
_ => unreachable!(),
});
assert!(was_accepted);
}
}
| 41.28266 | 102 | 0.614902 |
ef5083b833d6bc146735e53271b2c85158d98055 | 1,069 | #[doc = "Reader of register SMVAL4"]
pub type R = crate::R<u16, super::SMVAL4>;
#[doc = "Writer for register SMVAL4"]
pub type W = crate::W<u16, super::SMVAL4>;
#[doc = "Register SMVAL4 `reset()`'s with value 0"]
impl crate::ResetValue for super::SMVAL4 {
type Type = u16;
#[inline(always)]
fn reset_value() -> Self::Type {
0
}
}
#[doc = "Reader of field `VAL4`"]
pub type VAL4_R = crate::R<u16, u16>;
#[doc = "Write proxy for field `VAL4`"]
pub struct VAL4_W<'a> {
w: &'a mut W,
}
impl<'a> VAL4_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u16) -> &'a mut W {
self.w.bits = (self.w.bits & !0xffff) | ((value as u16) & 0xffff);
self.w
}
}
impl R {
#[doc = "Bits 0:15 - Value Register 4"]
#[inline(always)]
pub fn val4(&self) -> VAL4_R {
VAL4_R::new((self.bits & 0xffff) as u16)
}
}
impl W {
#[doc = "Bits 0:15 - Value Register 4"]
#[inline(always)]
pub fn val4(&mut self) -> VAL4_W {
VAL4_W { w: self }
}
}
| 26.073171 | 74 | 0.561272 |
f5b17a614955624edd54ac44394ced91cd095e53 | 10,757 | #[doc = "Register `TX_CLR` reader"]
pub struct R(crate::R<TX_CLR_SPEC>);
impl core::ops::Deref for R {
type Target = crate::R<TX_CLR_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl From<crate::R<TX_CLR_SPEC>> for R {
#[inline(always)]
fn from(reader: crate::R<TX_CLR_SPEC>) -> Self {
R(reader)
}
}
#[doc = "Register `TX_CLR` writer"]
pub struct W(crate::W<TX_CLR_SPEC>);
impl core::ops::Deref for W {
type Target = crate::W<TX_CLR_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl core::ops::DerefMut for W {
#[inline(always)]
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl From<crate::W<TX_CLR_SPEC>> for W {
#[inline(always)]
fn from(writer: crate::W<TX_CLR_SPEC>) -> Self {
W(writer)
}
}
#[doc = "Decode to trim the nominal 17\n\nValue on reset: 2"]
#[derive(Clone, Copy, Debug, PartialEq)]
#[repr(u8)]
pub enum D_CAL_A {
#[doc = "0: Maximum current, approximately 19% above nominal."]
VALUE0 = 0,
#[doc = "7: Nominal"]
VALUE7 = 7,
#[doc = "15: Minimum current, approximately 19% below nominal."]
VALUE15 = 15,
}
impl From<D_CAL_A> for u8 {
#[inline(always)]
fn from(variant: D_CAL_A) -> Self {
variant as _
}
}
#[doc = "Field `D_CAL` reader - Decode to trim the nominal 17"]
pub struct D_CAL_R(crate::FieldReader<u8, D_CAL_A>);
impl D_CAL_R {
#[inline(always)]
pub(crate) fn new(bits: u8) -> Self {
D_CAL_R(crate::FieldReader::new(bits))
}
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> Option<D_CAL_A> {
match self.bits {
0 => Some(D_CAL_A::VALUE0),
7 => Some(D_CAL_A::VALUE7),
15 => Some(D_CAL_A::VALUE15),
_ => None,
}
}
#[doc = "Checks if the value of the field is `VALUE0`"]
#[inline(always)]
pub fn is_value0(&self) -> bool {
**self == D_CAL_A::VALUE0
}
#[doc = "Checks if the value of the field is `VALUE7`"]
#[inline(always)]
pub fn is_value7(&self) -> bool {
**self == D_CAL_A::VALUE7
}
#[doc = "Checks if the value of the field is `VALUE15`"]
#[inline(always)]
pub fn is_value15(&self) -> bool {
**self == D_CAL_A::VALUE15
}
}
impl core::ops::Deref for D_CAL_R {
type Target = crate::FieldReader<u8, D_CAL_A>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `D_CAL` writer - Decode to trim the nominal 17"]
pub struct D_CAL_W<'a> {
w: &'a mut W,
}
impl<'a> D_CAL_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: D_CAL_A) -> &'a mut W {
unsafe { self.bits(variant.into()) }
}
#[doc = "Maximum current, approximately 19% above nominal."]
#[inline(always)]
pub fn value0(self) -> &'a mut W {
self.variant(D_CAL_A::VALUE0)
}
#[doc = "Nominal"]
#[inline(always)]
pub fn value7(self) -> &'a mut W {
self.variant(D_CAL_A::VALUE7)
}
#[doc = "Minimum current, approximately 19% below nominal."]
#[inline(always)]
pub fn value15(self) -> &'a mut W {
self.variant(D_CAL_A::VALUE15)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !0x0f) | (value as u32 & 0x0f);
self.w
}
}
#[doc = "Field `TXCAL45DM` reader - Decode to trim the nominal 45ohm series termination resistance to the USB_DM output pin"]
pub struct TXCAL45DM_R(crate::FieldReader<u8, u8>);
impl TXCAL45DM_R {
#[inline(always)]
pub(crate) fn new(bits: u8) -> Self {
TXCAL45DM_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for TXCAL45DM_R {
type Target = crate::FieldReader<u8, u8>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `TXCAL45DM` writer - Decode to trim the nominal 45ohm series termination resistance to the USB_DM output pin"]
pub struct TXCAL45DM_W<'a> {
w: &'a mut W,
}
impl<'a> TXCAL45DM_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x0f << 8)) | ((value as u32 & 0x0f) << 8);
self.w
}
}
#[doc = "Field `TXENCAL45DN` reader - Enable resistance calibration on DN."]
pub struct TXENCAL45DN_R(crate::FieldReader<bool, bool>);
impl TXENCAL45DN_R {
#[inline(always)]
pub(crate) fn new(bits: bool) -> Self {
TXENCAL45DN_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for TXENCAL45DN_R {
type Target = crate::FieldReader<bool, bool>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `TXENCAL45DN` writer - Enable resistance calibration on DN."]
pub struct TXENCAL45DN_W<'a> {
w: &'a mut W,
}
impl<'a> TXENCAL45DN_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 13)) | ((value as u32 & 0x01) << 13);
self.w
}
}
#[doc = "Field `TXCAL45DP` reader - Decode to trim the nominal 45ohm series termination resistance to the USB_DP output pin"]
pub struct TXCAL45DP_R(crate::FieldReader<u8, u8>);
impl TXCAL45DP_R {
#[inline(always)]
pub(crate) fn new(bits: u8) -> Self {
TXCAL45DP_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for TXCAL45DP_R {
type Target = crate::FieldReader<u8, u8>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `TXCAL45DP` writer - Decode to trim the nominal 45ohm series termination resistance to the USB_DP output pin"]
pub struct TXCAL45DP_W<'a> {
w: &'a mut W,
}
impl<'a> TXCAL45DP_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x0f << 16)) | ((value as u32 & 0x0f) << 16);
self.w
}
}
#[doc = "Field `TXENCAL45DP` reader - Enable resistance calibration on DP."]
pub struct TXENCAL45DP_R(crate::FieldReader<bool, bool>);
impl TXENCAL45DP_R {
#[inline(always)]
pub(crate) fn new(bits: bool) -> Self {
TXENCAL45DP_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for TXENCAL45DP_R {
type Target = crate::FieldReader<bool, bool>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `TXENCAL45DP` writer - Enable resistance calibration on DP."]
pub struct TXENCAL45DP_W<'a> {
w: &'a mut W,
}
impl<'a> TXENCAL45DP_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 21)) | ((value as u32 & 0x01) << 21);
self.w
}
}
impl R {
#[doc = "Bits 0:3 - Decode to trim the nominal 17"]
#[inline(always)]
pub fn d_cal(&self) -> D_CAL_R {
D_CAL_R::new((self.bits & 0x0f) as u8)
}
#[doc = "Bits 8:11 - Decode to trim the nominal 45ohm series termination resistance to the USB_DM output pin"]
#[inline(always)]
pub fn txcal45dm(&self) -> TXCAL45DM_R {
TXCAL45DM_R::new(((self.bits >> 8) & 0x0f) as u8)
}
#[doc = "Bit 13 - Enable resistance calibration on DN."]
#[inline(always)]
pub fn txencal45dn(&self) -> TXENCAL45DN_R {
TXENCAL45DN_R::new(((self.bits >> 13) & 0x01) != 0)
}
#[doc = "Bits 16:19 - Decode to trim the nominal 45ohm series termination resistance to the USB_DP output pin"]
#[inline(always)]
pub fn txcal45dp(&self) -> TXCAL45DP_R {
TXCAL45DP_R::new(((self.bits >> 16) & 0x0f) as u8)
}
#[doc = "Bit 21 - Enable resistance calibration on DP."]
#[inline(always)]
pub fn txencal45dp(&self) -> TXENCAL45DP_R {
TXENCAL45DP_R::new(((self.bits >> 21) & 0x01) != 0)
}
}
impl W {
#[doc = "Bits 0:3 - Decode to trim the nominal 17"]
#[inline(always)]
pub fn d_cal(&mut self) -> D_CAL_W {
D_CAL_W { w: self }
}
#[doc = "Bits 8:11 - Decode to trim the nominal 45ohm series termination resistance to the USB_DM output pin"]
#[inline(always)]
pub fn txcal45dm(&mut self) -> TXCAL45DM_W {
TXCAL45DM_W { w: self }
}
#[doc = "Bit 13 - Enable resistance calibration on DN."]
#[inline(always)]
pub fn txencal45dn(&mut self) -> TXENCAL45DN_W {
TXENCAL45DN_W { w: self }
}
#[doc = "Bits 16:19 - Decode to trim the nominal 45ohm series termination resistance to the USB_DP output pin"]
#[inline(always)]
pub fn txcal45dp(&mut self) -> TXCAL45DP_W {
TXCAL45DP_W { w: self }
}
#[doc = "Bit 21 - Enable resistance calibration on DP."]
#[inline(always)]
pub fn txencal45dp(&mut self) -> TXENCAL45DP_W {
TXENCAL45DP_W { w: self }
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.0.bits(bits);
self
}
}
#[doc = "USB PHY Transmitter Control Register\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [tx_clr](index.html) module"]
pub struct TX_CLR_SPEC;
impl crate::RegisterSpec for TX_CLR_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [tx_clr::R](R) reader structure"]
impl crate::Readable for TX_CLR_SPEC {
type Reader = R;
}
#[doc = "`write(|w| ..)` method takes [tx_clr::W](W) writer structure"]
impl crate::Writable for TX_CLR_SPEC {
type Writer = W;
}
#[doc = "`reset()` method sets TX_CLR to value 0x0a00_0402"]
impl crate::Resettable for TX_CLR_SPEC {
#[inline(always)]
fn reset_value() -> Self::Ux {
0x0a00_0402
}
}
| 32.206587 | 423 | 0.596821 |
09bfe981a717b5c11763864f9ffb2bc1653c36bb | 15,204 | use std::net::SocketAddr;
use std::time::Duration;
use futures::channel::mpsc::{channel, Receiver, Sender};
use futures::sink::SinkExt;
use futures::stream::StreamExt;
use crate::discovery;
use crate::internal::commands;
use crate::internal::driver::{Driver, Report};
use crate::internal::messaging::{Msg, OpMsg};
use crate::types::{self, ClusterSettings, OperationError, Settings, StreamMetadata};
/// Represents a connection to a single node. `Client` maintains a full duplex
/// connection to the EventStore server. An EventStore connection operates
/// quite differently than say a SQL connection. Normally when you use an
/// EventStore connection you want to keep the connection open for a much
/// longer of time than when you use a SQL connection.
///
/// Another difference is that with the EventStore connection, all operations
/// are handled in a full async manner (even if you call the synchronous
/// behaviors). Many threads can use an EventStore connection at the same time
/// or a single thread can make many asynchronous requests. To get the most
/// performance out of the connection, it is generally recommended to use it
/// in this way.
#[derive(Clone)]
pub struct Connection {
sender: Sender<Msg>,
}
/// Helps constructing a connection to the server.
pub struct ConnectionBuilder {
pub settings: Settings,
}
impl ConnectionBuilder {
/// Maximum delay of inactivity before the client sends a heartbeat request.
pub fn heartbeat_delay(mut self, delay: Duration) -> Self {
self.settings.heartbeat_delay = delay;
self
}
/// Maximum delay the server has to issue a heartbeat response.
pub fn heartbeat_timeout(mut self, timeout: Duration) -> Self {
self.settings.heartbeat_timeout = timeout;
self
}
/// Delay in which an operation will be retried if no response arrived.
pub fn operation_timeout(mut self, timeout: Duration) -> Self {
self.settings.operation_timeout = timeout;
self
}
/// Retry strategy when an operation has timeout.
pub fn operation_retry(mut self, strategy: types::Retry) -> Self {
self.settings.operation_retry = strategy;
self
}
/// Retry strategy when failing to connect.
pub fn connection_retry(mut self, strategy: types::Retry) -> Self {
self.settings.connection_retry = strategy;
self
}
/// 'Credentials' to use if other `Credentials` are not explicitly supplied
/// when issuing commands.
pub fn with_default_user(mut self, user: types::Credentials) -> Self {
self.settings.default_user = Some(user);
self
}
/// Default connection name.
pub fn with_connection_name<S>(mut self, name: S) -> Self
where
S: AsRef<str>,
{
self.settings.connection_name = Some(name.as_ref().to_owned());
self
}
/// The period used to check pending command. Those checks include if the
/// the connection has timeout or if the command was issued with a
/// different connection.
pub fn operation_check_period(mut self, period: Duration) -> Self {
self.settings.operation_check_period = period;
self
}
/// Maximum delay to create a successful connection to a node.
pub fn connection_timeout(mut self, period: Duration) -> Self {
self.settings.connection_timeout = period;
self
}
/// Maximum delay to physically connect to a node. This property differs from
/// `connection_timeout` by referencing the delay to have a connected socket to a node, whereas
/// `connection_timeout` refers to the whole connection, validation included.
pub fn socket_connection_timeout(mut self, period: Duration) -> Self {
self.settings.socket_connection_timeout = period;
self
}
#[cfg(feature = "tls")]
/// Enable secure connection with the server/cluster.
pub fn enable_secure_connection(mut self, config: crate::SecureSettings) -> Self {
self.settings.tls_client_config = Some(config);
self
}
/// Creates a connection to a single EventStore node. The connection will
/// start right away.
pub async fn single_node_connection(self, addr: SocketAddr) -> Connection {
self.start_common_with_runtime(DiscoveryProcess::Static(addr))
.await
}
/// Creates a connection to a cluster of EventStore nodes.
pub async fn cluster_nodes_connection(self, setts: ClusterSettings) -> Connection {
self.start_common_with_runtime(DiscoveryProcess::Cluster(Box::new(setts)))
.await
}
async fn start_common_with_runtime(self, discovery: DiscoveryProcess) -> Connection {
let mut client = Connection::make(self.settings, discovery);
client.start().await;
client
}
}
const DEFAULT_BOX_SIZE: usize = 500;
async fn connection_state_machine(
mut sender: Sender<Msg>,
mut recv: Receiver<Msg>,
mut driver: Driver,
) {
async fn closing(sender: &mut Sender<Msg>, driver: &mut Driver) {
driver.close_connection();
let _ = sender.send(Msg::Marker).await;
info!("Closing the connection...");
info!("Start clearing uncomplete operations...");
}
// Live state
while let Some(msg) = recv.next().await {
match msg {
Msg::Start => driver.start().await,
Msg::Establish(endpoint) => driver.on_establish(endpoint),
Msg::Established(id) => driver.on_established(id).await,
Msg::ConnectionClosed(conn_id, error) => driver.on_connection_closed(conn_id, &error),
Msg::Arrived(pkg) => driver.on_package_arrived(pkg).await,
Msg::Transmit(pkg, mailbox) => driver.on_transmit(mailbox, pkg).await,
Msg::Send(pkg) => driver.on_send_pkg(pkg).await,
Msg::Tick => {
if let Report::Quit = driver.on_tick().await {
closing(&mut sender, &mut driver).await;
break;
}
}
// It's impossible to receive `Msg::Marker` at `State::Live` state.
// However we can hit two birds with one stone with pattern-matching
// coverage checker.
Msg::Shutdown | Msg::Marker => {
info!("User-shutdown request received.");
closing(&mut sender, &mut driver).await;
break;
}
}
}
// Closing state
while let Some(msg) = recv.next().await {
match msg {
Msg::Transmit(_, mut mailbox) => {
let _ = mailbox.send(OpMsg::Failed(OperationError::Aborted)).await;
}
Msg::Arrived(pkg) => driver.on_package_arrived(pkg).await,
Msg::Marker => {
// We've reached the end of our checkpoint, we can properly
// aborts uncompleted operations.
driver.abort().await;
info!("Connection closed properly.");
break;
}
_ => {}
}
}
}
enum DiscoveryProcess {
Static(SocketAddr),
Cluster(Box<ClusterSettings>),
}
impl Connection {
/// Return a connection builder.
pub fn builder() -> ConnectionBuilder {
ConnectionBuilder {
settings: Default::default(),
}
}
fn make(settings: Settings, discovery: DiscoveryProcess) -> Connection {
let sender = Self::initialize(settings, discovery);
Connection { sender }
}
fn initialize(settings: Settings, discovery: DiscoveryProcess) -> Sender<Msg> {
let (sender, recv) = channel(DEFAULT_BOX_SIZE);
let (start_discovery, run_discovery) = futures::channel::mpsc::channel(DEFAULT_BOX_SIZE);
let cloned_sender = sender.clone();
match discovery {
DiscoveryProcess::Static(addr) => {
let endpoint = types::Endpoint::from_addr(addr);
let action = discovery::constant::discover(run_discovery, sender.clone(), endpoint);
tokio::spawn(action);
}
DiscoveryProcess::Cluster(setts) => {
#[cfg(feature = "tls")]
{
let secure_mode = settings.tls_client_config.is_some();
let action = discovery::cluster::discover(
run_discovery,
sender.clone(),
*setts,
secure_mode,
);
tokio::spawn(action);
}
#[cfg(not(feature = "tls"))]
{
let action =
discovery::cluster::discover(run_discovery, sender.clone(), *setts, false);
tokio::spawn(action);
}
}
};
let driver = Driver::new(settings, start_discovery, sender.clone());
tokio::spawn(connection_state_machine(cloned_sender, recv, driver));
sender
}
async fn start(&mut self) {
let _ = self.sender.send(Msg::Start).await;
}
/// Sends events to a given stream.
pub fn write_events<S>(&self, stream: S) -> commands::WriteEvents
where
S: AsRef<str>,
{
commands::WriteEvents::new(self.sender.clone(), stream)
}
/// Sets the metadata for a stream.
pub fn write_stream_metadata<S>(
&self,
stream: S,
metadata: StreamMetadata,
) -> commands::WriteStreamMetadata
where
S: AsRef<str>,
{
commands::WriteStreamMetadata::new(self.sender.clone(), stream, metadata)
}
/// Reads a single event from a given stream.
pub fn read_event<S>(&self, stream: S, event_number: i64) -> commands::ReadEvent
where
S: AsRef<str>,
{
commands::ReadEvent::new(self.sender.clone(), stream, event_number)
}
/// Gets the metadata of a stream.
pub fn read_stream_metadata<S>(&self, stream: S) -> commands::ReadStreamMetadata
where
S: AsRef<str>,
{
commands::ReadStreamMetadata::new(self.sender.clone(), stream)
}
/// Starts a transaction on a given stream.
pub fn start_transaction<S>(&self, stream: S) -> commands::TransactionStart
where
S: AsRef<str>,
{
commands::TransactionStart::new(self.sender.clone(), stream)
}
/// Reads events from a given stream. The reading can be done forward and
/// backward.
pub fn read_stream<S>(&self, stream: S) -> commands::ReadStreamEvents
where
S: AsRef<str>,
{
commands::ReadStreamEvents::new(self.sender.clone(), stream)
}
/// Reads events for the system stream `$all`. The reading can be done
/// forward and backward.
pub fn read_all(&self) -> commands::ReadAllEvents {
commands::ReadAllEvents::new(self.sender.clone())
}
/// Deletes a given stream. By default, the server performs a soft delete,
/// More information can be found on the [Deleting streams and events]
/// page.
///
/// [Deleting stream and events]: https://eventstore.org/docs/server/deleting-streams-and-events/index.html
pub fn delete_stream<S>(&self, stream: S) -> commands::DeleteStream
where
S: AsRef<str>,
{
commands::DeleteStream::new(self.sender.clone(), stream)
}
/// Subscribes to a given stream. You will get notified of each new events
/// written to this stream.
pub fn subcribe_to_stream<S>(&self, stream_id: S) -> commands::SubscribeToStream
where
S: AsRef<str>,
{
commands::SubscribeToStream::new(self.sender.clone(), stream_id)
}
/// Subscribes to a given stream. This kind of subscription specifies a
/// starting point (by default, the beginning of a stream). For a regular
/// stream, that starting point will be an event number. For the system
/// stream `$all`, it will be a position in the transaction file
/// (see [`subscribe_to_all_from`]). This subscription will fetch every event
/// until the end of the stream, then will dispatch subsequently written
/// events.
///
/// For example, if a starting point of 50 is specified when a stream has
/// 100 events in it, the subscriber can expect to see events 51 through
/// 100, and then any events subsequenttly written events until such time
/// as the subscription is dropped or closed.
///
/// [`subscribe_to_all_from`]: #method.subscribe_to_all_from
pub fn subscribe_to_stream_from<S>(&self, stream: S) -> commands::RegularCatchupSubscribe
where
S: AsRef<str>,
{
commands::RegularCatchupSubscribe::new(self.sender.clone(), stream)
}
/// Like [`subscribe_to_stream_from`] but specific to system `$all` stream.
///
/// [`subscribe_to_stream_from`]: #method.subscribe_to_stream_from
pub fn subscribe_to_all_from(&self) -> commands::AllCatchupSubscribe {
commands::AllCatchupSubscribe::new(self.sender.clone())
}
/// Creates a persistent subscription group on a stream.
///
/// Persistent subscriptions are special kind of subscription where the
/// server remembers the state of the subscription. This allows for many
/// different modes of operations compared to a regular or catchup
/// subscription where the client holds the subscription state.
pub fn create_persistent_subscription<S>(
&self,
stream_id: S,
group_name: S,
) -> commands::CreatePersistentSubscription
where
S: AsRef<str>,
{
commands::CreatePersistentSubscription::new(stream_id, group_name, self.sender.clone())
}
/// Updates a persistent subscription group on a stream.
pub fn update_persistent_subscription<S>(
&self,
stream_id: S,
group_name: S,
) -> commands::UpdatePersistentSubscription
where
S: AsRef<str>,
{
commands::UpdatePersistentSubscription::new(stream_id, group_name, self.sender.clone())
}
/// Deletes a persistent subscription group on a stream.
pub fn delete_persistent_subscription<S>(
&self,
stream_id: S,
group_name: S,
) -> commands::DeletePersistentSubscription
where
S: AsRef<str>,
{
commands::DeletePersistentSubscription::new(stream_id, group_name, self.sender.clone())
}
/// Connects to a persistent subscription group on a stream.
pub fn connect_persistent_subscription<S>(
&self,
stream_id: S,
group_name: S,
) -> commands::ConnectToPersistentSubscription
where
S: AsRef<str>,
{
commands::ConnectToPersistentSubscription::new(stream_id, group_name, self.sender.clone())
}
/// Closes the connection to the server.
///
/// When closing a connection, a `Connection` might have ongoing operations
/// running. `shutdown` makes sure the `Connection` has handled
/// everything properly when returning.
///
/// `shutdown` blocks the current thread.
pub async fn shutdown(mut self) {
let _ = self.sender.send(Msg::Shutdown).await;
}
}
| 34.791762 | 111 | 0.628716 |
1820ef7b6c95dc74d65121e67d7a155d8187e168 | 13,245 | use crate::{error::msg_from_errno, GroupSlice};
use libzmq_sys as sys;
use sys::errno;
use libc::size_t;
use log::error;
use serde::{Deserialize, Serialize};
use std::{
ffi::CStr,
fmt,
os::raw::c_void,
ptr, slice,
str::{self, Utf8Error},
};
/// A generated ID used to route messages to the approriate client.
///
/// A `RoutingId` is an unique temporary identifier for each `Client`
/// connection on a `Server` socket generated by ØMQ.
///
/// # Example
/// ```
/// # fn main() -> Result<(), anyhow::Error> {
/// use libzmq::{prelude::*, *};
///
/// let addr: TcpAddr = "127.0.0.1:*".try_into()?;
///
/// let server = ServerBuilder::new()
/// .bind(addr)
/// .build()?;
///
/// let bound = server.last_endpoint()?;
///
/// let client = ClientBuilder::new()
/// .connect(bound)
/// .build()?;
///
/// // The client initiates the conversation.
/// client.send("")?;
/// // ØMQ generates a `RoutingId` for the client upon reception of the
/// // first message.
/// let msg = server.recv_msg()?;
/// let id = msg.routing_id().unwrap();
///
/// // This `RoutingId` is used to route messages back to the `Client`.
/// server.route("", id)?;
/// #
/// # Ok(())
/// # }
/// ```
#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Hash)]
pub struct RoutingId(pub u32);
/// A handle to a message owned by ØMQ.
///
/// A ØMQ message is a discrete unit of data passed between applications
/// or components of the same application. ØMQ messages have no internal
/// structure and from the point of view of ØMQ itself they are considered
/// to be opaque binary data.
pub struct Msg {
msg: sys::zmq_msg_t,
}
impl From<RoutingId> for u32 {
fn from(id: RoutingId) -> u32 {
id.0
}
}
impl From<u32> for RoutingId {
fn from(u: u32) -> Self {
Self(u)
}
}
impl Msg {
/// Create an empty `Msg`.
///
/// See [`zmq_msg_init`].
///
/// [`zmq_msg_init`]: http://api.zeromq.org/master:zmq-msg-init
///
/// ```
/// use libzmq::Msg;
///
/// let msg = Msg::new();
///
/// assert!(msg.is_empty());
/// ```
pub fn new() -> Self {
Self::default()
}
/// Create a `Msg` preallocated with `len` zeroed bytes.
///
/// See [`zmq_msg_init_size`].
///
/// [`zmq_msg_init_size`]: http://api.zeromq.org/master:zmq-msg-init-size
///
/// ```
/// use libzmq::Msg;
///
/// let size = 420;
/// let msg = Msg::with_size(size);
///
/// assert_eq!(msg.len(), size);
/// ```
pub fn with_size(size: usize) -> Self {
unsafe {
Self::deferred_alloc(|msg| {
sys::zmq_msg_init_size(msg, size as size_t)
})
}
}
/// Returns the message content size in bytes.
///
/// See [`zmq_msg_size`].
///
/// [`zmq_msg_size`]: http://api.zeromq.org/master:zmq-msg-size
pub fn len(&self) -> usize {
unsafe { sys::zmq_msg_size(self.as_ptr()) }
}
/// Returns `true` if the message content has size zero.
pub fn is_empty(&self) -> bool {
self.len() == 0
}
/// Return the message content as a `str` slice if it is valid UTF-8.
///
/// ```
/// # fn main() -> Result<(), anyhow::Error> {
/// use libzmq::Msg;
///
/// let text = "blzit";
/// let msg = Msg::from(text);
///
/// assert_eq!(msg.to_str()?, text);
/// #
/// # Ok(())
/// # }
/// ```
pub fn to_str(&self) -> Result<&str, Utf8Error> {
str::from_utf8(self.as_bytes())
}
/// Return the message content as a byte slice.
///
/// ```
/// use libzmq::Msg;
///
/// let bytes: &[u8] = b"blzit";
/// let msg = Msg::from(bytes);
///
/// assert_eq!(msg.as_bytes(), bytes);
/// ```
pub fn as_bytes(&self) -> &[u8] {
// This is safe because we're constraining the slice to the lifetime of
// this message.
unsafe {
let ptr = &self.msg as *const _ as *mut _;
let data = sys::zmq_msg_data(ptr);
slice::from_raw_parts(data as *mut u8, self.len())
}
}
/// Return the message content as a mutable byte slice.
pub fn as_bytes_mut(&mut self) -> &mut [u8] {
// This is safe because we're constraining the slice to the lifetime of
// this message.
unsafe {
let data = sys::zmq_msg_data(self.as_mut_ptr());
slice::from_raw_parts_mut(data as *mut u8, self.len())
}
}
/// Get routing ID property on the message.
///
/// See [`zmq_msg_routing_id`].
///
/// [`zmq_msg_routing_id`]: http://api.zeromq.org/master:zmq-msg-routing-id
pub fn routing_id(&self) -> Option<RoutingId> {
let rc = unsafe {
// This is safe since `zmq_msg_routing_id` has the wrong signature.
// The `msg` pointer should be `*const zmq_msg_t` since
// the it is not modified by the operation.
let ptr = self.as_ptr() as *mut _;
sys::zmq_msg_routing_id(ptr)
};
if rc == 0 {
None
} else {
Some(RoutingId(rc))
}
}
/// Set routing ID property on the message.
///
/// # Usage Contract
/// * Cannot be zero
///
/// # Returned Error Variants
/// * [`InvalidInput`] (if contract is not followed)
///
/// See [`zmq_msg_set_routing_id`].
///
/// [`zmq_msg_set_routing_id`]: http://api.zeromq.org/master:zmq-msg-set-routing-id
/// [`InvalidInput`]: ../enum.Error.html#variant.InvalidInput
pub fn set_routing_id(&mut self, routing_id: RoutingId) {
let rc = unsafe {
sys::zmq_msg_set_routing_id(self.as_mut_ptr(), routing_id.0)
};
// Should never occur.
if rc != 0 {
let errno = unsafe { sys::zmq_errno() };
panic!(msg_from_errno(errno));
}
}
/// The group property on the message.
pub fn group(&self) -> Option<&GroupSlice> {
// This is safe we don't actually mutate the msg.
let mut_msg_ptr = self.as_ptr() as *mut _;
let char_ptr = unsafe { sys::zmq_msg_group(mut_msg_ptr) };
if char_ptr.is_null() {
None
} else {
let c_str = unsafe { CStr::from_ptr(char_ptr) };
Some(GroupSlice::from_c_str_unchecked(c_str))
}
}
/// Set the group property on the message.
///
/// ```
/// # fn main() -> Result<(), anyhow::Error> {
/// use libzmq::{prelude::TryInto, Msg, Group};
///
/// let a: Group = "A".try_into()?;
///
/// let mut msg: Msg = "some msg".into();
/// msg.set_group(&a);
/// assert_eq!(a, msg.group().unwrap());
/// #
/// # Ok(())
/// # }
/// ```
///
/// # Usage Contract
/// * Cannot hold more than 15 characters.
///
/// # Returned Error Variants
/// * [`InvalidInput`] (if contract is not followed)
///
/// [`InvalidInput`]: ../enum.Error.html#variant.InvalidInput
pub fn set_group<G>(&mut self, group: G)
where
G: AsRef<GroupSlice>,
{
let group = group.as_ref();
let rc = unsafe {
sys::zmq_msg_set_group(self.as_mut_ptr(), group.as_c_str().as_ptr())
};
// Should never occur.
if rc == -1 {
let errno = unsafe { sys::zmq_errno() };
panic!(msg_from_errno(errno));
}
}
// Defers the allocation of a zmq_msg_t to the closure.
//
// TODO Consider allocating without zeroing.
// https://doc.rust-lang.org/std/mem/union.MaybeUninit.html
unsafe fn deferred_alloc<F>(f: F) -> Msg
where
F: FnOnce(&mut sys::zmq_msg_t) -> i32,
{
// This calls mem::zeroed().
let mut msg = sys::zmq_msg_t::default();
let rc = f(&mut msg);
if rc == -1 {
panic!(msg_from_errno(sys::zmq_errno()));
}
Msg { msg }
}
pub(crate) fn as_mut_ptr(&mut self) -> *mut sys::zmq_msg_t {
&mut self.msg
}
pub(crate) fn as_ptr(&self) -> *const sys::zmq_msg_t {
&self.msg
}
pub(crate) fn has_more(&self) -> bool {
let rc = unsafe { sys::zmq_msg_more(self.as_ptr()) };
rc != 0
}
}
impl PartialEq for Msg {
/// Compares the two underlying raw C pointers.
fn eq(&self, other: &Self) -> bool {
ptr::eq(self.as_ptr(), other.as_ptr())
}
}
impl Eq for Msg {}
impl fmt::Debug for Msg {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}", self.as_bytes())
}
}
impl Default for Msg {
/// Initialises an empty ØMQ message.
///
/// See [`zmq_msg_init`].
///
/// [`zmq_msg_init`]: http://api.zeromq.org/master:zmq-msg-init
fn default() -> Self {
unsafe { Self::deferred_alloc(|msg| sys::zmq_msg_init(msg)) }
}
}
impl Clone for Msg {
/// Copy the content of the message into another message.
///
/// See [`zmq_msg_copy`].
///
/// [`zmq_msg_copy`]: http://api.zeromq.org/master:zmq-msg-copy
fn clone(&self) -> Self {
let mut msg = Msg::new();
let rc = unsafe {
// This is safe since `zmq_msg_copy` has the wrong signature.
// The `src_` pointer should be `*const zmq_msg_t` since
// the source message is not modified by the operation.
let ptr = self.as_ptr() as *mut _;
sys::zmq_msg_copy(msg.as_mut_ptr(), ptr)
};
if rc != 0 {
let errno = unsafe { sys::zmq_errno() };
match errno {
errno::EFAULT => panic!("invalid message"),
_ => panic!(msg_from_errno(errno)),
}
}
msg
}
}
impl Drop for Msg {
/// Releases the ØMQ message.
///
/// See [`zmq_msg_close`].
///
/// [`zmq_msg_close`]: http://api.zeromq.org/master:zmq-msg-close
fn drop(&mut self) {
let rc = unsafe { sys::zmq_msg_close(self.as_mut_ptr()) };
if rc != 0 {
let errno = unsafe { sys::zmq_errno() };
error!("error while dropping message: {}", msg_from_errno(errno));
}
}
}
impl From<Box<[u8]>> for Msg {
/// Converts of box of bytes into a `Msg` without copying.
fn from(data: Box<[u8]>) -> Msg {
unsafe extern "C" fn drop_zmq_msg_t(
data: *mut c_void,
_hint: *mut c_void,
) {
// Convert the pointer back into a Box and drop it.
Box::from_raw(data as *mut u8);
}
if data.is_empty() {
return Msg::new();
}
let size = data.len() as size_t;
let data = Box::into_raw(data);
unsafe {
Self::deferred_alloc(|msg| {
sys::zmq_msg_init_data(
msg,
data as *mut c_void,
size,
Some(drop_zmq_msg_t),
ptr::null_mut(), // hint
)
})
}
}
}
impl<'a> From<&[u8]> for Msg {
/// Converts a byte slice into a `Msg` by copying.
fn from(slice: &[u8]) -> Self {
unsafe {
let mut msg = Msg::with_size(slice.len());
ptr::copy_nonoverlapping(
slice.as_ptr(),
msg.as_bytes_mut().as_mut_ptr(),
slice.len(),
);
msg
}
}
}
macro_rules! array_impls {
($($N:expr)+) => {
$(
impl From<[u8; $N]> for Msg {
/// Converts an array into a `Msg` without copying.
fn from(array: [u8; $N]) -> Self {
let boxed: Box<[u8]> = Box::new(array);
Msg::from(boxed)
}
}
)+
}
}
array_impls! {
0 1 2 3 4 5 6 7 8 9
10 11 12 13 14 15 16 17 18 19
20 21 22 23 24 25 26 27 28 29
30 31 32
}
impl From<Vec<u8>> for Msg {
/// Converts a byte vector into a `Msg` without copying.
fn from(bytes: Vec<u8>) -> Self {
Msg::from(bytes.into_boxed_slice())
}
}
impl<'a> From<&'a str> for Msg {
/// Converts a `str` slice into a `Msg` by copying.
fn from(text: &str) -> Self {
Msg::from(text.as_bytes())
}
}
impl From<String> for Msg {
/// Converts a `String` into a `Msg` by without copying.
fn from(text: String) -> Self {
Msg::from(text.into_bytes())
}
}
impl<'a, T> From<&'a T> for Msg
where
T: Into<Msg> + Clone,
{
fn from(v: &'a T) -> Self {
v.clone().into()
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::mem;
#[test]
fn test_cast_routing_id_slice() {
assert_eq!(mem::size_of::<u32>(), mem::size_of::<RoutingId>());
let routing_stack: &[u32] = &[1, 2, 3, 4];
// Cast &[u32] as &[RoutingId].
let cast_stack = unsafe {
slice::from_raw_parts(
routing_stack.as_ptr() as *const RoutingId,
routing_stack.len(),
)
};
for (&i, &j) in routing_stack.iter().zip(cast_stack.iter()) {
assert_eq!(i, j.0);
}
}
}
| 26.279762 | 87 | 0.516723 |
c1de4e4fecba166b8c8810a83b87c8e04083c7a5 | 8,506 | #[cfg(any(
not(unix),
target_os = "macos",
target_os = "android",
target_os = "ios",
target_os = "emscripten"
))]
fn main() {
unimplemented!()
}
#[cfg(all(
unix,
not(any(
target_os = "macos",
target_os = "android",
target_os = "ios",
target_os = "emscripten"
))
))]
fn main() {
wayland::main();
}
#[cfg(all(
unix,
not(any(
target_os = "macos",
target_os = "android",
target_os = "ios",
target_os = "emscripten"
))
))]
mod wayland {
extern crate andrew;
extern crate copypasta;
extern crate smithay_client_toolkit as sctk;
use wayland::copypasta::wayland_clipboard::create_clipboards;
use wayland::copypasta::ClipboardProvider;
use std::io::{Read, Seek, SeekFrom, Write};
use std::sync::{atomic, Arc, Mutex};
use wayland::sctk::keyboard::{map_keyboard_auto, Event as KbEvent, KeyState};
use wayland::sctk::utils::{DoubleMemPool, MemPool};
use wayland::sctk::window::{ConceptFrame, Event as WEvent, Window};
use wayland::sctk::Environment;
use wayland::sctk::reexports::client::protocol::{wl_shm, wl_surface};
use wayland::sctk::reexports::client::{Display, NewProxy};
use wayland::andrew::shapes::rectangle;
use wayland::andrew::text;
use wayland::andrew::text::fontconfig;
pub fn main() {
let (display, mut event_queue) =
Display::connect_to_env().expect("Failed to connect to the wayland server.");
let env = Environment::from_display(&*display, &mut event_queue).unwrap();
let (mut ctx, _) = create_clipboards(&display);
let cb_contents = Arc::new(Mutex::new(String::new()));
let seat = env.manager.instantiate_range(2, 6, NewProxy::implement_dummy).unwrap();
let need_redraw = Arc::new(atomic::AtomicBool::new(false));
let need_redraw_clone = need_redraw.clone();
let cb_contents_clone = cb_contents.clone();
map_keyboard_auto(&seat, move |event: KbEvent, _| {
if let KbEvent::Key { state: KeyState::Pressed, utf8: Some(text), .. } = event {
if text == " " {
*cb_contents_clone.lock().unwrap() = ctx.get_contents().unwrap();
need_redraw_clone.store(true, atomic::Ordering::Relaxed)
} else if text == "s" {
ctx.set_contents(
"This is an example text thats been copied to the wayland clipboard :)"
.to_string(),
)
.unwrap();
} else if text == "t" {
ctx.set_contents("Alternative text :)".to_string()).unwrap();
}
}
})
.unwrap();
let mut dimensions = (320u32, 240u32);
let surface = env.compositor.create_surface(NewProxy::implement_dummy).unwrap();
let next_action = Arc::new(Mutex::new(None::<WEvent>));
let waction = next_action.clone();
let mut window =
Window::<ConceptFrame>::init_from_env(&env, surface, dimensions, move |evt| {
let mut next_action = waction.lock().unwrap();
// Keep last event in priority order : Close > Configure > Refresh
let replace = match (&evt, &*next_action) {
(_, &None)
| (_, &Some(WEvent::Refresh))
| (&WEvent::Configure { .. }, &Some(WEvent::Configure { .. }))
| (&WEvent::Close, _) => true,
_ => false,
};
if replace {
*next_action = Some(evt);
}
})
.expect("Failed to create a window !");
window.new_seat(&seat);
window.set_title("Clipboard".to_string());
let mut pools =
DoubleMemPool::new(&env.shm, || {}).expect("Failed to create a memory pool !");
let mut font_data = Vec::new();
std::fs::File::open(
&fontconfig::FontConfig::new().unwrap().get_regular_family_fonts("sans").unwrap()[0],
)
.unwrap()
.read_to_end(&mut font_data)
.unwrap();
if !env.shell.needs_configure() {
// initial draw to bootstrap on wl_shell
if let Some(pool) = pools.pool() {
redraw(pool, window.surface(), dimensions, &font_data, "".to_string());
}
window.refresh();
}
loop {
match next_action.lock().unwrap().take() {
Some(WEvent::Close) => break,
Some(WEvent::Refresh) => {
window.refresh();
window.surface().commit();
},
Some(WEvent::Configure { new_size, .. }) => {
if let Some((w, h)) = new_size {
window.resize(w, h);
dimensions = (w, h)
}
window.refresh();
if let Some(pool) = pools.pool() {
redraw(
pool,
window.surface(),
dimensions,
&font_data,
cb_contents.lock().unwrap().clone(),
);
}
},
None => {},
}
if need_redraw.swap(false, atomic::Ordering::Relaxed) {
if let Some(pool) = pools.pool() {
redraw(
pool,
window.surface(),
dimensions,
&font_data,
cb_contents.lock().unwrap().clone(),
);
}
window.surface().damage_buffer(0, 0, dimensions.0 as i32, dimensions.1 as i32);
window.surface().commit();
}
event_queue.dispatch().unwrap();
}
}
fn redraw(
pool: &mut MemPool,
surface: &wl_surface::WlSurface,
dimensions: (u32, u32),
font_data: &[u8],
cb_contents: String,
) {
let (buf_x, buf_y) = (dimensions.0 as usize, dimensions.1 as usize);
pool.resize(4 * buf_x * buf_y).expect("Failed to resize the memory pool.");
let mut buf = vec![0; 4 * buf_x * buf_y];
let mut canvas =
andrew::Canvas::new(&mut buf, buf_x, buf_y, 4 * buf_x, andrew::Endian::native());
let bg = rectangle::Rectangle::new((0, 0), (buf_x, buf_y), None, Some([255, 170, 20, 45]));
canvas.draw(&bg);
let text_box = rectangle::Rectangle::new(
(buf_x / 30, buf_y / 35),
(buf_x - 2 * (buf_x / 30), (buf_x as f32 / 14.) as usize),
Some((3, [255, 255, 255, 255], rectangle::Sides::ALL, Some(4))),
None,
);
canvas.draw(&text_box);
let helper_text = text::Text::new(
(buf_x / 25, buf_y / 30),
[255, 255, 255, 255],
font_data,
buf_x as f32 / 40.,
2.0,
"Press space to draw clipboard contents",
);
canvas.draw(&helper_text);
let helper_text = text::Text::new(
(buf_x / 25, buf_y / 15),
[255, 255, 255, 255],
font_data,
buf_x as f32 / 40.,
2.0,
"Press 's' to store example text to clipboard",
);
canvas.draw(&helper_text);
for i in (0..cb_contents.len()).step_by(36) {
let content = if cb_contents.len() < i + 36 {
cb_contents[i..].to_string()
} else {
cb_contents[i..i + 36].to_string()
};
let text = text::Text::new(
(buf_x / 10, buf_y / 8 + (i as f32 * buf_y as f32 / 1000.) as usize),
[255, 255, 255, 255],
font_data,
buf_x as f32 / 40.,
2.0,
content,
);
canvas.draw(&text);
}
pool.seek(SeekFrom::Start(0)).unwrap();
pool.write_all(canvas.buffer).unwrap();
pool.flush().unwrap();
let new_buffer =
pool.buffer(0, buf_x as i32, buf_y as i32, 4 * buf_x as i32, wl_shm::Format::Argb8888);
surface.attach(Some(&new_buffer), 0, 0);
surface.commit();
}
}
| 34.024 | 99 | 0.487068 |
b92e1b6162b9cffd4a407c2b30fe4f2add324c24 | 14,053 | use std::{
fmt::{self, Display, Formatter},
io::{self, Read, Seek, SeekFrom},
};
use thiserror::Error;
#[derive(Debug, Error)]
pub enum TokenizeError {
#[error("generic IO error: {0}")]
Io(#[from] io::Error),
#[error("unexpected character {0}")]
UnexpectedCharacter(char),
#[error("error parsing number")]
NumberParseError,
}
/// An operator.
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub enum Op {
// Arithmetic
Add,
Sub,
Mul,
Div,
Mod,
// Miscellaneous
Assign,
RangeExclusive,
ArrayPush,
// Comparison
Eq,
Neq,
Lt,
Gt,
LtEq,
GtEq,
// Logic
Not,
And,
Or,
}
/// A separator.
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub enum Sep {
Comma,
Colon,
BraceOpen,
BraceClose,
ParensOpen,
ParensClose,
BracketOpen,
BracketClose,
}
/// A token, some significant piece of data to be parsed by the AST.
#[derive(Debug, Clone, PartialEq)]
pub enum Token {
/// A separator.
Sep(Sep),
/// An operator.
Op(Op),
/// An identifier. Not escaped, quoted, etc.
Identifier(String),
/// A string. Any text that is surrounded by quotes. Supports quote escaping.
String(String),
/// A number. Decimals optional.
Number(f64),
/// A boolean.
Boolean(bool),
}
impl Display for Token {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match self {
Self::Sep(Sep::Comma) => write!(f, ","),
Self::Sep(Sep::Colon) => write!(f, ":"),
Self::Sep(Sep::BraceOpen) => write!(f, "{{"),
Self::Sep(Sep::BraceClose) => write!(f, "}}"),
Self::Sep(Sep::ParensOpen) => write!(f, "("),
Self::Sep(Sep::ParensClose) => write!(f, ")"),
Self::Sep(Sep::BracketOpen) => write!(f, "["),
Self::Sep(Sep::BracketClose) => write!(f, "]"),
Self::Op(Op::Add) => write!(f, "+"),
Self::Op(Op::Sub) => write!(f, "-"),
Self::Op(Op::Mul) => write!(f, "*"),
Self::Op(Op::Div) => write!(f, "/"),
Self::Op(Op::Mod) => write!(f, "%"),
Self::Op(Op::Assign) => write!(f, "="),
Self::Op(Op::RangeExclusive) => write!(f, ".."),
Self::Op(Op::ArrayPush) => write!(f, "<<"),
Self::Op(Op::Lt) => write!(f, "<"),
Self::Op(Op::Gt) => write!(f, ">"),
Self::Op(Op::Eq) => write!(f, "=="),
Self::Op(Op::Neq) => write!(f, "!="),
Self::Op(Op::LtEq) => write!(f, "<="),
Self::Op(Op::GtEq) => write!(f, ">="),
Self::Op(Op::And) => write!(f, "&&"),
Self::Op(Op::Or) => write!(f, "||"),
Self::Op(Op::Not) => write!(f, "!"),
Self::Identifier(ident) => write!(f, "{}", ident),
Self::String(str) => write!(f, "\"{}\"", str),
Self::Number(num) => write!(f, "{}", num),
Self::Boolean(bool) => write!(f, "{}", if *bool { "true" } else { "false" }),
}
}
}
impl From<Op> for Token {
fn from(op: Op) -> Self {
Self::Op(op)
}
}
impl From<Sep> for Token {
fn from(sep: Sep) -> Self {
Self::Sep(sep)
}
}
/// A tokenizer. Wraps around a `Read + Seek` and reads out a stream of tokens, to be
/// parsed by the AST.
pub struct Tokenizer<R: Read + Seek> {
reader: R,
}
impl<R: Read + Seek> Tokenizer<R> {
pub fn new(reader: R) -> Self {
Tokenizer { reader }
}
/// Tokenize from the reader, converting to a `Result<Vec<Token>, TokenizeError>`.
pub fn tokenize(mut self) -> Result<Vec<Token>, TokenizeError> {
let mut tokens = vec![];
while let Ok(c) = self.peek_next() {
match c {
// whitespace: ignore
_ if c.is_whitespace() => self.skip()?,
// alphabetical characters: identifier
'A'..='Z' | 'a'..='z' => {
let ident = self.read_identifier()?;
match ident.as_str() {
// handle keywords
"true" | "yes" => tokens.push(Token::Boolean(true)),
"false" | "no" => tokens.push(Token::Boolean(false)),
// otherwise, this is a normal identifier
_ => tokens.push(Token::Identifier(ident)),
}
}
// a quote: string
'"' => tokens.push(Token::String(self.read_string()?)),
// a number: number
'0'..='9' => tokens.push(Token::Number(self.read_number(false)?)),
// a number sign: comment
'#' => {
self.read_while(|c| c != '\n')?;
}
// arithmetic operators
'+' => {
tokens.push(Token::Op(Op::Add));
self.skip()?;
}
'-' => {
// this could be two things:
// a number (there must be a numerical character IMMEDIATELY after this one)
// anything else
self.skip()?;
match self.peek_next()? {
'0'..='9' => tokens.push(Token::Number(self.read_number(true)?)),
_ => tokens.push(Token::Op(Op::Sub)),
}
}
'*' => {
tokens.push(Token::Op(Op::Mul));
self.skip()?;
}
'/' => {
tokens.push(Token::Op(Op::Div));
self.skip()?;
}
'%' => {
tokens.push(Token::Op(Op::Mod));
self.skip()?;
}
// other operators/separators
'<' => {
self.skip()?;
match self.peek_next() {
Ok('=') => {
tokens.push(Token::Op(Op::LtEq));
self.skip()?;
}
Ok('<') => {
tokens.push(Token::Op(Op::ArrayPush));
self.skip()?;
}
_ => tokens.push(Token::Op(Op::Lt)),
}
}
'>' => {
self.skip()?;
match self.peek_next() {
Ok('=') => {
tokens.push(Token::Op(Op::GtEq));
self.skip()?;
}
_ => tokens.push(Token::Op(Op::Gt)),
}
}
',' => {
tokens.push(Sep::Comma.into());
self.skip()?;
}
'{' => {
tokens.push(Sep::BraceOpen.into());
self.skip()?;
}
'}' => {
tokens.push(Sep::BraceClose.into());
self.skip()?;
}
'(' => {
tokens.push(Sep::ParensOpen.into());
self.skip()?;
}
')' => {
tokens.push(Sep::ParensClose.into());
self.skip()?;
}
'[' => {
tokens.push(Sep::BracketOpen.into());
self.skip()?;
}
']' => {
tokens.push(Sep::BracketClose.into());
self.skip()?;
}
':' => {
tokens.push(Sep::Colon.into());
self.skip()?;
}
'=' => {
self.skip()?;
match self.peek_next() {
Ok('=') => {
tokens.push(Token::Op(Op::Eq));
self.skip()?;
}
_ => tokens.push(Token::Op(Op::Assign)),
}
}
'!' => {
self.skip()?;
match self.peek_next() {
Ok('=') => {
tokens.push(Token::Op(Op::Neq));
self.skip()?;
}
_ => tokens.push(Token::Op(Op::Not)),
}
}
'&' => {
self.skip()?;
match self.peek_next()? {
'&' => {
tokens.push(Token::Op(Op::And));
self.skip()?;
}
x => return Err(TokenizeError::UnexpectedCharacter(x)),
}
}
'|' => {
self.skip()?;
match self.peek_next()? {
'|' => {
tokens.push(Token::Op(Op::And));
self.skip()?;
}
x => return Err(TokenizeError::UnexpectedCharacter(x)),
}
}
'.' => {
self.skip()?;
match self.next()? {
'.' => tokens.push(Op::RangeExclusive.into()),
x => return Err(TokenizeError::UnexpectedCharacter(x)),
}
}
x => return Err(TokenizeError::UnexpectedCharacter(x)),
}
}
Ok(tokens)
}
/// Read an identifier, which is just an alphanumeric bit of text.
fn read_identifier(&mut self) -> Result<String, TokenizeError> {
Ok(self
.read_while(|c| c.is_alphanumeric() || c == '_')?
.into_iter()
.collect())
}
/// Read a string, which is two quotations surrounding any amount of text.
fn read_string(&mut self) -> Result<String, TokenizeError> {
// skip the initial quotation
self.skip()?;
let mut escape = false;
let mut string = String::new();
while let Ok(c) = self.next() {
match c {
'"' if !escape => break,
'\\' if !escape => escape = true,
'n' if escape => string.push('\n'),
c => {
string.push(c);
escape = false;
}
}
}
Ok(string)
}
/// Read a number, which is an f64. Decimal optional.
fn read_number(&mut self, def_negative: bool) -> Result<f64, TokenizeError> {
let negative = if def_negative {
true
} else {
if let Ok('-') = self.peek_next() {
self.next()?;
true
} else {
false
}
};
let mut pre_dec = String::new(); // chars before the .
let mut post_dec = String::new(); // chars after the .
let mut dec_seen = false;
loop {
let c = match self.next() {
Ok(c) => c,
Err(_) => break,
};
match c {
'.' if dec_seen => return Err(TokenizeError::UnexpectedCharacter('.')),
'.' if !dec_seen => dec_seen = true,
'0'..='9' => {
if dec_seen {
post_dec.push(c);
} else {
pre_dec.push(c);
}
}
_ => {
self.back()?;
break;
}
}
}
match (pre_dec.is_empty(), post_dec.is_empty()) {
(true, true) => return Err(TokenizeError::NumberParseError),
(true, false) => pre_dec.push('0'),
(false, true) => post_dec.push('0'),
_ => (),
}
format!("{}.{}", pre_dec, post_dec)
.parse()
.map(|n: f64| if negative { -n } else { n })
.map_err(|_| TokenizeError::NumberParseError)
}
/// Read the next character in the reader, an `Option<char>`.
fn next(&mut self) -> Result<char, io::Error> {
let mut byte = [0u8];
self.reader.read_exact(&mut byte).map(|_| byte[0] as char)
}
/// Skips the next character in the reader.
fn skip(&mut self) -> Result<(), io::Error> {
self.reader.seek(SeekFrom::Current(1)).map(|_| ())
}
/// Goes back to the last character in the reader.
fn back(&mut self) -> Result<(), io::Error> {
self.reader.seek(SeekFrom::Current(-1)).map(|_| ())
}
/// Peeks ahead at the next character in the reader. This works by reading and then seeking back one on success.
fn peek_next(&mut self) -> Result<char, io::Error> {
self.next().map(|c| {
self.reader.seek(SeekFrom::Current(-1)).unwrap();
c
})
}
/// Reads bytes until the predicate returns false.
fn read_while<F>(&mut self, f: F) -> Result<Vec<char>, io::Error>
where
F: Fn(char) -> bool,
{
let mut v = vec![];
loop {
// get the next character and check if it passes the predicate
let (ch, ok) = match self.next() {
Ok(c) => (Some(c), f(c)),
Err(_) => (None, false),
};
// if it does,
if ok {
// add it to the array
v.push(ch.unwrap());
} else {
// otherwise seek back one and break out of the loop
if let Some(_) = ch {
self.reader.seek(SeekFrom::Current(-1))?;
}
break;
}
}
Ok(v)
}
}
| 30.55 | 116 | 0.391304 |
714fcf1d5786ab91f44e8efb17d370f2af85d094 | 4,544 | #![deny(warnings)]
//! # Sqlite support for the `r2d2` connection pool.
//!
//! Library crate: [r2d2-sqlite](https://crates.io/crates/r2d2-sqlite/)
//!
//! Integrated with: [r2d2](https://crates.io/crates/r2d2)
//! and [rusqlite](https://crates.io/crates/rusqlite)
//!
//! ## Example
//!
//! ```rust,no_run
//! extern crate r2d2;
//! extern crate r2d2_sqlite;
//! extern crate rusqlite;
//!
//! use std::thread;
//! use r2d2_sqlite::SqliteConnectionManager;
//! use rusqlite::params;
//!
//! fn main() {
//! let manager = SqliteConnectionManager::file("file.db");
//! let pool = r2d2::Pool::new(manager).unwrap();
//! pool.get()
//! .unwrap()
//! .execute("CREATE TABLE IF NOT EXISTS foo (bar INTEGER)", params![])
//! .unwrap();
//!
//! (0..10)
//! .map(|i| {
//! let pool = pool.clone();
//! thread::spawn(move || {
//! let conn = pool.get().unwrap();
//! conn.execute("INSERT INTO foo (bar) VALUES (?)", &[&i])
//! .unwrap();
//! })
//! })
//! .collect::<Vec<_>>()
//! .into_iter()
//! .map(thread::JoinHandle::join)
//! .collect::<Result<_, _>>()
//! .unwrap()
//! }
//! ```
extern crate r2d2;
extern crate rusqlite;
use rusqlite::{Connection, Error, OpenFlags};
use std::fmt;
use std::path::{Path, PathBuf};
#[derive(Debug)]
enum Source {
File(PathBuf),
Memory,
}
type InitFn = dyn Fn(&mut Connection) -> Result<(), rusqlite::Error> + Send + Sync + 'static;
/// An `r2d2::ManageConnection` for `rusqlite::Connection`s.
pub struct SqliteConnectionManager {
source: Source,
flags: OpenFlags,
init: Option<Box<InitFn>>,
}
impl fmt::Debug for SqliteConnectionManager {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut builder = f.debug_struct("SqliteConnectionManager");
let _ = builder.field("source", &self.source);
let _ = builder.field("flags", &self.source);
let _ = builder.field("init", &self.init.as_ref().map(|_| "InitFn"));
builder.finish()
}
}
impl SqliteConnectionManager {
/// Creates a new `SqliteConnectionManager` from file.
///
/// See `rusqlite::Connection::open`
pub fn file<P: AsRef<Path>>(path: P) -> Self {
Self {
source: Source::File(path.as_ref().to_path_buf()),
flags: OpenFlags::default(),
init: None,
}
}
/// Creates a new `SqliteConnectionManager` from memory.
pub fn memory() -> Self {
Self {
source: Source::Memory,
flags: OpenFlags::default(),
init: None,
}
}
/// Converts `SqliteConnectionManager` into one that sets OpenFlags upon
/// connection creation.
///
/// See `rustqlite::OpenFlags` for a list of available flags.
pub fn with_flags(self, flags: OpenFlags) -> Self {
Self { flags, ..self }
}
/// Converts `SqliteConnectionManager` into one that calls an initialization
/// function upon connection creation. Could be used to set PRAGMAs, for
/// example.
///
/// ### Example
///
/// Make a `SqliteConnectionManager` that sets the `foreign_keys` pragma to
/// true for every connection.
///
/// ```rust,no_run
/// # use r2d2_sqlite::{SqliteConnectionManager};
/// let manager = SqliteConnectionManager::file("app.db")
/// .with_init(|c| c.execute_batch("PRAGMA foreign_keys=1;"));
/// ```
pub fn with_init<F>(self, init: F) -> Self
where
F: Fn(&mut Connection) -> Result<(), rusqlite::Error> + Send + Sync + 'static,
{
let init: Option<Box<InitFn>> = Some(Box::new(init));
Self { init, ..self }
}
}
impl r2d2::ManageConnection for SqliteConnectionManager {
type Connection = Connection;
type Error = rusqlite::Error;
fn connect(&self) -> Result<Connection, Error> {
match self.source {
Source::File(ref path) => Connection::open_with_flags(path, self.flags),
Source::Memory => Connection::open_in_memory_with_flags(self.flags),
}
.map_err(Into::into)
.and_then(|mut c| match self.init {
None => Ok(c),
Some(ref init) => init(&mut c).map(|_| c),
})
}
fn is_valid(&self, conn: &mut Connection) -> Result<(), Error> {
conn.execute_batch("").map_err(Into::into)
}
fn has_broken(&self, _: &mut Connection) -> bool {
false
}
}
| 29.894737 | 93 | 0.567121 |
fffd4bd28eb9be10add7972b291241bbda16ced2 | 1,139 | #[doc = "Reader of register PB_OVTDIS"]
pub type R = crate::R<u32, super::PB_OVTDIS>;
#[doc = "Writer for register PB_OVTDIS"]
pub type W = crate::W<u32, super::PB_OVTDIS>;
#[doc = "Register PB_OVTDIS `reset()`'s with value 0"]
impl crate::ResetValue for super::PB_OVTDIS {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0
}
}
#[doc = "Reader of field `OVTDIS`"]
pub type OVTDIS_R = crate::R<u16, u16>;
#[doc = "Write proxy for field `OVTDIS`"]
pub struct OVTDIS_W<'a> {
w: &'a mut W,
}
impl<'a> OVTDIS_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u16) -> &'a mut W {
self.w.bits = (self.w.bits & !0xffff) | ((value as u32) & 0xffff);
self.w
}
}
impl R {
#[doc = "Bits 0:15 - Disable Over Voltage Capability"]
#[inline(always)]
pub fn ovtdis(&self) -> OVTDIS_R {
OVTDIS_R::new((self.bits & 0xffff) as u16)
}
}
impl W {
#[doc = "Bits 0:15 - Disable Over Voltage Capability"]
#[inline(always)]
pub fn ovtdis(&mut self) -> OVTDIS_W {
OVTDIS_W { w: self }
}
}
| 27.780488 | 74 | 0.586479 |
b9c38546717ff311ef6e0906523c45965a2d5571 | 1,635 | use structopt::StructOpt;
use structopt::clap::AppSettings::*;
use super::Commands;
#[derive(Debug, StructOpt, Default, Clone)]
#[structopt(
global_settings = &[DisableVersion, DeriveDisplayOrder, VersionlessSubcommands],
about = "\nKubectl wrapper for running bash commands on a container, it's SSH!"
)]
pub struct CmdCtl {
/// Pod target.
///
pub pod: Option<String>,
/// Container target.
///
pub container: Option<String>,
/// Filter container list. ( ie: my-deployment-name )
#[structopt(short = "f", long = "filter")]
pub filter: Option<String>,
/// Namespace target. ( ie: Environment )
#[structopt(short = "n", long = "namespace")]
pub namespace: Option<String>,
/// Cluster target.
#[structopt(short = "c", long = "context")]
pub context: Option<String>,
/// Update token for eks using aws profile.
#[structopt(long = "eks")]
pub eks: Option<String>,
/// Enable verbose logging.
#[structopt(long = "verbose", short = "v")]
pub verbose: bool,
/// Perform dry-run analysis.
#[structopt(long = "dry-run")]
pub dry_run: bool,
#[structopt(subcommand)]
pub commands: Option<Commands>,
}
impl CmdCtl {
pub fn run_command_process(self) -> CmdCtl {
match &self.commands {
Some(commands) => {
commands.process();
self
},
None => self
}
}
pub fn is_verbose(&self) -> bool {
match self.commands.clone() {
Some(commands) => commands.is_verbose(),
None => self.verbose
}
}
}
| 24.402985 | 84 | 0.582875 |
1c24028d7614e1378be6f3a4c333c887c0550dd8 | 2,143 | extern crate rocket;
extern crate rocket_contrib;
use std::env;
use std::path::PathBuf;
use rocket::Rocket;
use rocket::config::{Config, Environment};
use rocket_contrib::Template;
fn template_root() -> PathBuf {
let cwd = env::current_dir().expect("current working directory");
cwd.join("tests").join("templates")
}
fn rocket() -> Rocket {
let config = Config::build(Environment::Development)
.extra("template_dir", template_root().to_str().expect("template directory"))
.expect("valid configuration");
rocket::custom(config, true).attach(Template::fairing())
}
#[cfg(feature = "tera_templates")]
mod tera_tests {
use super::*;
use std::collections::HashMap;
const UNESCAPED_EXPECTED: &'static str
= "\nh_start\ntitle: _test_\nh_end\n\n\n<script />\n\nfoot\n";
const ESCAPED_EXPECTED: &'static str
= "\nh_start\ntitle: _test_\nh_end\n\n\n<script />\n\nfoot\n";
#[test]
fn test_tera_templates() {
let rocket = rocket();
let mut map = HashMap::new();
map.insert("title", "_test_");
map.insert("content", "<script />");
// Test with a txt file, which shouldn't escape.
let template = Template::show(&rocket, "tera/txt_test", &map);
assert_eq!(template, Some(UNESCAPED_EXPECTED.into()));
// Now with an HTML file, which should.
let template = Template::show(&rocket, "tera/html_test", &map);
assert_eq!(template, Some(ESCAPED_EXPECTED.into()));
}
}
#[cfg(feature = "handlebars_templates")]
mod handlebars_tests {
use super::*;
use std::collections::HashMap;
const EXPECTED: &'static str
= "Hello _test_!\n\n<main> <script /> hi </main>\nDone.\n\n";
#[test]
fn test_handlebars_templates() {
let rocket = rocket();
let mut map = HashMap::new();
map.insert("title", "_test_");
map.insert("content", "<script /> hi");
// Test with a txt file, which shouldn't escape.
let template = Template::show(&rocket, "hbs/test", &map);
assert_eq!(template, Some(EXPECTED.into()));
}
}
| 29.763889 | 85 | 0.623425 |
75294cf7ee2ab275209f9c1adff15acf579034f2 | 206 | pub mod pcalc_keywords;
pub mod pcalc_value;
pub mod pcalc_binary_ops;
pub mod pcalc_unary_ops;
pub mod pcalc_environment;
pub mod pcalc_code;
pub mod pcalc_lexer;
pub mod pcalc_parser;
pub mod pcalc_repl;
| 20.6 | 26 | 0.825243 |
149c08c793ab365cdb7b5d3bcc5f7d7f4f05c844 | 3,682 | use e2d2::operators::*;
use e2d2::scheduler::*;
use e2d2::utils::*;
use fnv::FnvHasher;
use std::collections::HashMap;
use std::hash::BuildHasherDefault;
use std::hash::{BuildHasher, Hash, Hasher};
use twox_hash::XxHash;
use uuid::Uuid;
type FnvHash = BuildHasherDefault<FnvHasher>;
type XxHashFactory = BuildHasherDefault<XxHash>;
struct Maglev {
// permutation: Box<Vec<Vec<usize>>>,
lut: Box<Vec<usize>>,
lut_size: usize,
}
impl Maglev {
pub fn offset_skip_for_name(name: &str, h1: &FnvHash, h2: &XxHashFactory, lsize: usize) -> (usize, usize) {
let mut fnv_state = h1.build_hasher();
name.hash(&mut fnv_state);
let hash1 = fnv_state.finish() as usize;
let mut xx_state = h2.build_hasher();
name.hash(&mut xx_state);
let hash2 = xx_state.finish() as usize;
let offset = hash2 % lsize;
let skip = hash1 % (lsize - 1) + 1;
(offset, skip)
}
pub fn generate_permutations(backends: &[&str], lsize: usize) -> Vec<Vec<usize>> {
println!("Generating permutations");
let fnv_hasher: FnvHash = Default::default();
let xx_hasher: XxHashFactory = Default::default();
backends
.iter()
.map(|n| Maglev::offset_skip_for_name(n, &fnv_hasher, &xx_hasher, lsize))
.map(|(offset, skip)| (0..lsize).map(|j| (offset + j * skip) % lsize).collect())
.collect()
}
fn generate_lut(permutations: &Vec<Vec<usize>>, size: usize) -> Box<Vec<usize>> {
let mut next: Vec<_> = permutations.iter().map(|_| 0).collect();
let mut entry: Box<Vec<usize>> = box ((0..size).map(|_| 0x8000).collect());
let mut n = 0;
println!("Generating LUT");
while n < size {
for i in 0..next.len() {
let mut c = permutations[i][next[i]];
while entry[c] != 0x8000 {
next[i] += 1;
c = permutations[i][next[i]];
}
if entry[c] == 0x8000 {
entry[c] = i;
next[i] += 1;
n += 1;
}
if n >= size {
break;
}
}
}
println!("Done Generating LUT");
entry
}
pub fn new(name: &[&str], lsize: usize) -> Maglev {
let permutations = box Maglev::generate_permutations(name, lsize);
Maglev {
lut: Maglev::generate_lut(&*permutations, lsize),
lut_size: lsize,
}
}
pub fn lookup(&self, hash: usize) -> usize {
let idx = hash % self.lut_size;
self.lut[idx]
}
}
pub fn maglev<T: 'static + Batch, S: Scheduler + Sized>(
parent: T,
s: &mut S,
backends: &[&str],
) -> CompositionBatch {
let ct = backends.len();
let lut = Maglev::new(backends, 65537);
let mut cache = HashMap::<usize, usize, FnvHash>::with_hasher(Default::default());
let uuid = Uuid::new_v4();
let mut groups = parent
.transform(box move |pkt| {
assert!(pkt.refcnt() == 1);
let hdr = pkt.headers_mut().mac_mut(0);
hdr.swap_addresses();
})
.group_by(
ct,
box move |pkt| {
let payload = pkt.get_payload(0);
let hash = ipv4_flow_hash(payload, 0);
let out = cache.entry(hash).or_insert_with(|| lut.lookup(hash));
*out
},
s,
"GroupBy".to_string(),
uuid,
);
let pipeline = merge((0..ct).map(|i| groups.get_group(i).unwrap()).collect());
pipeline.compose()
}
| 32.298246 | 111 | 0.5239 |
d615b0022f5d4a471d4afc0148834128ad28eff8 | 4,014 | // Copyright (c) 2018-2022 The MobileCoin Foundation
//! A Peer-to-Peer networking error.
use crate::ConsensusMsgError;
use displaydoc::Display;
use grpcio::Error as GrpcError;
use mc_connection::AttestationError;
use mc_consensus_api::ConversionError;
use mc_consensus_enclave_api::Error as EnclaveError;
use mc_transaction_core::tx::TxHash;
use mc_util_serial::{
decode::Error as RmpDecodeError, encode::Error as RmpEncodeError,
DecodeError as ProstDecodeError, EncodeError as ProstEncodeError,
};
use retry::Error as RetryError;
use std::{array::TryFromSliceError, result::Result as StdResult};
/// A convenience wrapper for a [std::result::Result] object which contains a
/// peer [Error].
pub type Result<T> = StdResult<T, Error>;
/// A convenience wrapper for an [std::result::Result] which contains a
/// [RetryError] for a peer [Error].
pub type RetryResult<T> = StdResult<T, RetryError<Error>>;
/// An enumeration of errors which can occur as the result of a peer connection
/// issue
#[derive(Debug, Display)]
pub enum Error {
/// Attestation failure: {0}
Attestation(PeerAttestationError),
/// Resource not found
NotFound,
/// Channel disconnected, could not send
ChannelSend,
/// Request range too large
RequestTooLarge,
/// gRPC failure: {0}
Grpc(GrpcError),
/// Internal retry failure: {0}
RetryInternal(String),
/// Conversion failure: {0}
Conversion(ConversionError),
/// Serialization
Serialization,
/// Enclave error: {0}
Enclave(EnclaveError),
/// Conensus message: {0}
ConsensusMsg(ConsensusMsgError),
/// Tx hashes not in cache: {0:?}
TxHashesNotInCache(Vec<TxHash>),
/// Unknown peering issue
Other,
}
impl Error {
pub fn should_retry(&self) -> bool {
matches!(
self,
Error::Grpc(_) | Error::Attestation(_) | Error::Enclave(EnclaveError::Attest(_))
)
}
}
impl From<ConversionError> for Error {
fn from(src: ConversionError) -> Self {
Error::Conversion(src)
}
}
impl From<PeerAttestationError> for Error {
fn from(src: PeerAttestationError) -> Self {
Error::Attestation(src)
}
}
impl From<GrpcError> for Error {
fn from(src: GrpcError) -> Self {
Error::Grpc(src)
}
}
impl From<ProstDecodeError> for Error {
fn from(_src: ProstDecodeError) -> Self {
Error::Serialization
}
}
impl From<ProstEncodeError> for Error {
fn from(_src: ProstEncodeError) -> Self {
Error::Serialization
}
}
impl From<RetryError<Self>> for Error {
fn from(src: RetryError<Self>) -> Self {
match src {
RetryError::Operation { error, .. } => error,
RetryError::Internal(s) => Error::RetryInternal(s),
}
}
}
impl From<RmpDecodeError> for Error {
fn from(_src: RmpDecodeError) -> Self {
Error::Serialization
}
}
impl From<RmpEncodeError> for Error {
fn from(_src: RmpEncodeError) -> Self {
Error::Serialization
}
}
impl From<TryFromSliceError> for Error {
fn from(_src: TryFromSliceError) -> Self {
ConversionError::ArrayCastError.into()
}
}
impl From<EnclaveError> for Error {
fn from(src: EnclaveError) -> Self {
Self::Enclave(src)
}
}
impl From<ConsensusMsgError> for Error {
fn from(src: ConsensusMsgError) -> Self {
Self::ConsensusMsg(src)
}
}
#[derive(Debug, Display)]
pub enum PeerAttestationError {
/// gRPC failure during attestation: {0}
Grpc(GrpcError),
/// Local enclave failure during attestation: {0}
Enclave(EnclaveError),
}
impl From<GrpcError> for PeerAttestationError {
fn from(src: GrpcError) -> Self {
PeerAttestationError::Grpc(src)
}
}
impl From<EnclaveError> for PeerAttestationError {
fn from(src: EnclaveError) -> Self {
PeerAttestationError::Enclave(src)
}
}
impl AttestationError for PeerAttestationError {
fn should_reattest(&self) -> bool {
true
}
}
| 25.0875 | 92 | 0.659691 |
38ecee4eed58f8cddce44c5d80bc5045d520e6b1 | 27,139 | // Generated from definition io.k8s.api.storage.v1beta1.StorageClass
/// StorageClass describes the parameters for a class of storage for which PersistentVolumes can be dynamically provisioned.
///
/// StorageClasses are non-namespaced; the name of the storage class according to etcd is in ObjectMeta.Name.
#[derive(Clone, Debug, Default, PartialEq)]
pub struct StorageClass {
/// AllowVolumeExpansion shows whether the storage class allow volume expand
pub allow_volume_expansion: Option<bool>,
/// Restrict the node topologies where volumes can be dynamically provisioned. Each volume plugin defines its own supported topology specifications. An empty TopologySelectorTerm list means there is no topology restriction. This field is only honored by servers that enable the VolumeScheduling feature.
pub allowed_topologies: Vec<crate::api::core::v1::TopologySelectorTerm>,
/// Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
pub metadata: crate::apimachinery::pkg::apis::meta::v1::ObjectMeta,
/// Dynamically provisioned PersistentVolumes of this storage class are created with these mountOptions, e.g. \["ro", "soft"\]. Not validated - mount of the PVs will simply fail if one is invalid.
pub mount_options: Vec<String>,
/// Parameters holds the parameters for the provisioner that should create volumes of this storage class.
pub parameters: std::collections::BTreeMap<String, String>,
/// Provisioner indicates the type of the provisioner.
pub provisioner: String,
/// Dynamically provisioned PersistentVolumes of this storage class are created with this reclaimPolicy. Defaults to Delete.
pub reclaim_policy: Option<String>,
/// VolumeBindingMode indicates how PersistentVolumeClaims should be provisioned and bound. When unset, VolumeBindingImmediate is used. This field is only honored by servers that enable the VolumeScheduling feature.
pub volume_binding_mode: Option<String>,
}
// Begin storage.k8s.io/v1beta1/StorageClass
// Generated from operation createStorageV1beta1StorageClass
impl StorageClass {
/// create a StorageClass
///
/// Use the returned [`crate::ResponseBody`]`<`[`crate::CreateResponse`]`<Self>>` constructor, or [`crate::CreateResponse`]`<Self>` directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `body`
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn create_storage_class(
body: &crate::api::storage::v1beta1::StorageClass,
optional: crate::CreateOptional<'_>,
) -> Result<(crate::http::Request<Vec<u8>>, fn(crate::http::StatusCode) -> crate::ResponseBody<crate::CreateResponse<Self>>), crate::RequestError> {
let __url = "/apis/storage.k8s.io/v1beta1/storageclasses?".to_owned();
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
optional.__serialize(&mut __query_pairs);
let __url = __query_pairs.finish();
let __request = crate::http::Request::post(__url);
let __body = crate::serde_json::to_vec(body).map_err(crate::RequestError::Json)?;
let __request = __request.header(crate::http::header::CONTENT_TYPE, crate::http::header::HeaderValue::from_static("application/json"));
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
// Generated from operation deleteStorageV1beta1CollectionStorageClass
impl StorageClass {
/// delete collection of StorageClass
///
/// Use the returned [`crate::ResponseBody`]`<`[`crate::DeleteResponse`]`<`[`crate::List`]`<Self>>>` constructor, or [`crate::DeleteResponse`]`<`[`crate::List`]`<Self>>` directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `delete_optional`
///
/// Delete options. Use `Default::default()` to not pass any.
///
/// * `list_optional`
///
/// List options. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn delete_collection_storage_class(
delete_optional: crate::DeleteOptional<'_>,
list_optional: crate::ListOptional<'_>,
) -> Result<(crate::http::Request<Vec<u8>>, fn(crate::http::StatusCode) -> crate::ResponseBody<crate::DeleteResponse<crate::List<Self>>>), crate::RequestError> {
let __url = "/apis/storage.k8s.io/v1beta1/storageclasses?".to_owned();
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
list_optional.__serialize(&mut __query_pairs);
let __url = __query_pairs.finish();
let __request = crate::http::Request::delete(__url);
let __body = crate::serde_json::to_vec(&delete_optional).map_err(crate::RequestError::Json)?;
let __request = __request.header(crate::http::header::CONTENT_TYPE, crate::http::header::HeaderValue::from_static("application/json"));
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
// Generated from operation deleteStorageV1beta1StorageClass
impl StorageClass {
/// delete a StorageClass
///
/// Use the returned [`crate::ResponseBody`]`<`[`crate::DeleteResponse`]`<Self>>` constructor, or [`crate::DeleteResponse`]`<Self>` directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `name`
///
/// name of the StorageClass
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn delete_storage_class(
name: &str,
optional: crate::DeleteOptional<'_>,
) -> Result<(crate::http::Request<Vec<u8>>, fn(crate::http::StatusCode) -> crate::ResponseBody<crate::DeleteResponse<Self>>), crate::RequestError> {
let __url = format!("/apis/storage.k8s.io/v1beta1/storageclasses/{name}",
name = crate::percent_encoding::percent_encode(name.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
);
let __request = crate::http::Request::delete(__url);
let __body = crate::serde_json::to_vec(&optional).map_err(crate::RequestError::Json)?;
let __request = __request.header(crate::http::header::CONTENT_TYPE, crate::http::header::HeaderValue::from_static("application/json"));
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
// Generated from operation listStorageV1beta1StorageClass
impl StorageClass {
/// list or watch objects of kind StorageClass
///
/// This operation only supports listing all items of this type.
///
/// Use the returned [`crate::ResponseBody`]`<`[`crate::ListResponse`]`<Self>>` constructor, or [`crate::ListResponse`]`<Self>` directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn list_storage_class(
optional: crate::ListOptional<'_>,
) -> Result<(crate::http::Request<Vec<u8>>, fn(crate::http::StatusCode) -> crate::ResponseBody<crate::ListResponse<Self>>), crate::RequestError> {
let __url = "/apis/storage.k8s.io/v1beta1/storageclasses?".to_owned();
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
optional.__serialize(&mut __query_pairs);
let __url = __query_pairs.finish();
let __request = crate::http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
// Generated from operation patchStorageV1beta1StorageClass
impl StorageClass {
/// partially update the specified StorageClass
///
/// Use the returned [`crate::ResponseBody`]`<`[`crate::PatchResponse`]`<Self>>` constructor, or [`crate::PatchResponse`]`<Self>` directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `name`
///
/// name of the StorageClass
///
/// * `body`
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn patch_storage_class(
name: &str,
body: &crate::apimachinery::pkg::apis::meta::v1::Patch,
optional: crate::PatchOptional<'_>,
) -> Result<(crate::http::Request<Vec<u8>>, fn(crate::http::StatusCode) -> crate::ResponseBody<crate::PatchResponse<Self>>), crate::RequestError> {
let __url = format!("/apis/storage.k8s.io/v1beta1/storageclasses/{name}?",
name = crate::percent_encoding::percent_encode(name.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
);
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
optional.__serialize(&mut __query_pairs);
let __url = __query_pairs.finish();
let __request = crate::http::Request::patch(__url);
let __body = crate::serde_json::to_vec(body).map_err(crate::RequestError::Json)?;
let __request = __request.header(crate::http::header::CONTENT_TYPE, crate::http::header::HeaderValue::from_static(match body {
crate::apimachinery::pkg::apis::meta::v1::Patch::Json(_) => "application/json-patch+json",
crate::apimachinery::pkg::apis::meta::v1::Patch::Merge(_) => "application/merge-patch+json",
crate::apimachinery::pkg::apis::meta::v1::Patch::StrategicMerge(_) => "application/strategic-merge-patch+json",
}));
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
// Generated from operation readStorageV1beta1StorageClass
impl StorageClass {
/// read the specified StorageClass
///
/// Use the returned [`crate::ResponseBody`]`<`[`ReadStorageClassResponse`]`>` constructor, or [`ReadStorageClassResponse`] directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `name`
///
/// name of the StorageClass
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn read_storage_class(
name: &str,
optional: ReadStorageClassOptional<'_>,
) -> Result<(crate::http::Request<Vec<u8>>, fn(crate::http::StatusCode) -> crate::ResponseBody<ReadStorageClassResponse>), crate::RequestError> {
let ReadStorageClassOptional {
exact,
export,
pretty,
} = optional;
let __url = format!("/apis/storage.k8s.io/v1beta1/storageclasses/{name}?",
name = crate::percent_encoding::percent_encode(name.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
);
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
if let Some(exact) = exact {
__query_pairs.append_pair("exact", &exact.to_string());
}
if let Some(export) = export {
__query_pairs.append_pair("export", &export.to_string());
}
if let Some(pretty) = pretty {
__query_pairs.append_pair("pretty", pretty);
}
let __url = __query_pairs.finish();
let __request = crate::http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
/// Optional parameters of [`StorageClass::read_storage_class`]
#[cfg(feature = "api")]
#[derive(Clone, Copy, Debug, Default)]
pub struct ReadStorageClassOptional<'a> {
/// Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. Deprecated. Planned for removal in 1.18.
pub exact: Option<bool>,
/// Should this value be exported. Export strips fields that a user can not specify. Deprecated. Planned for removal in 1.18.
pub export: Option<bool>,
/// If 'true', then the output is pretty printed.
pub pretty: Option<&'a str>,
}
/// Use `<ReadStorageClassResponse as Response>::try_from_parts` to parse the HTTP response body of [`StorageClass::read_storage_class`]
#[cfg(feature = "api")]
#[derive(Debug)]
pub enum ReadStorageClassResponse {
Ok(crate::api::storage::v1beta1::StorageClass),
Other(Result<Option<crate::serde_json::Value>, crate::serde_json::Error>),
}
#[cfg(feature = "api")]
impl crate::Response for ReadStorageClassResponse {
fn try_from_parts(status_code: crate::http::StatusCode, buf: &[u8]) -> Result<(Self, usize), crate::ResponseError> {
match status_code {
crate::http::StatusCode::OK => {
let result = match crate::serde_json::from_slice(buf) {
Ok(value) => value,
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => return Err(crate::ResponseError::Json(err)),
};
Ok((ReadStorageClassResponse::Ok(result), buf.len()))
},
_ => {
let (result, read) =
if buf.is_empty() {
(Ok(None), 0)
}
else {
match crate::serde_json::from_slice(buf) {
Ok(value) => (Ok(Some(value)), buf.len()),
Err(ref err) if err.is_eof() => return Err(crate::ResponseError::NeedMoreData),
Err(err) => (Err(err), 0),
}
};
Ok((ReadStorageClassResponse::Other(result), read))
},
}
}
}
// Generated from operation replaceStorageV1beta1StorageClass
impl StorageClass {
/// replace the specified StorageClass
///
/// Use the returned [`crate::ResponseBody`]`<`[`crate::ReplaceResponse`]`<Self>>` constructor, or [`crate::ReplaceResponse`]`<Self>` directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `name`
///
/// name of the StorageClass
///
/// * `body`
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn replace_storage_class(
name: &str,
body: &crate::api::storage::v1beta1::StorageClass,
optional: crate::ReplaceOptional<'_>,
) -> Result<(crate::http::Request<Vec<u8>>, fn(crate::http::StatusCode) -> crate::ResponseBody<crate::ReplaceResponse<Self>>), crate::RequestError> {
let __url = format!("/apis/storage.k8s.io/v1beta1/storageclasses/{name}?",
name = crate::percent_encoding::percent_encode(name.as_bytes(), crate::percent_encoding2::PATH_SEGMENT_ENCODE_SET),
);
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
optional.__serialize(&mut __query_pairs);
let __url = __query_pairs.finish();
let __request = crate::http::Request::put(__url);
let __body = crate::serde_json::to_vec(body).map_err(crate::RequestError::Json)?;
let __request = __request.header(crate::http::header::CONTENT_TYPE, crate::http::header::HeaderValue::from_static("application/json"));
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
// Generated from operation watchStorageV1beta1StorageClass
impl StorageClass {
/// list or watch objects of kind StorageClass
///
/// This operation only supports watching one item, or a list of items, of this type for changes.
///
/// Use the returned [`crate::ResponseBody`]`<`[`crate::WatchResponse`]`<Self>>` constructor, or [`crate::WatchResponse`]`<Self>` directly, to parse the HTTP response.
///
/// # Arguments
///
/// * `optional`
///
/// Optional parameters. Use `Default::default()` to not pass any.
#[cfg(feature = "api")]
pub fn watch_storage_class(
optional: crate::WatchOptional<'_>,
) -> Result<(crate::http::Request<Vec<u8>>, fn(crate::http::StatusCode) -> crate::ResponseBody<crate::WatchResponse<Self>>), crate::RequestError> {
let __url = "/apis/storage.k8s.io/v1beta1/storageclasses?".to_owned();
let mut __query_pairs = crate::url::form_urlencoded::Serializer::new(__url);
optional.__serialize(&mut __query_pairs);
let __url = __query_pairs.finish();
let __request = crate::http::Request::get(__url);
let __body = vec![];
match __request.body(__body) {
Ok(request) => Ok((request, crate::ResponseBody::new)),
Err(err) => Err(crate::RequestError::Http(err)),
}
}
}
// End storage.k8s.io/v1beta1/StorageClass
impl crate::Resource for StorageClass {
const API_VERSION: &'static str = "storage.k8s.io/v1beta1";
const GROUP: &'static str = "storage.k8s.io";
const KIND: &'static str = "StorageClass";
const VERSION: &'static str = "v1beta1";
}
impl crate::ListableResource for StorageClass {
const LIST_KIND: &'static str = concat!("StorageClass", "List");
}
impl crate::Metadata for StorageClass {
type Ty = crate::apimachinery::pkg::apis::meta::v1::ObjectMeta;
fn metadata(&self) -> &<Self as crate::Metadata>::Ty {
&self.metadata
}
fn metadata_mut(&mut self) -> &mut<Self as crate::Metadata>::Ty {
&mut self.metadata
}
}
impl<'de> crate::serde::Deserialize<'de> for StorageClass {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: crate::serde::Deserializer<'de> {
#[allow(non_camel_case_types)]
enum Field {
Key_api_version,
Key_kind,
Key_allow_volume_expansion,
Key_allowed_topologies,
Key_metadata,
Key_mount_options,
Key_parameters,
Key_provisioner,
Key_reclaim_policy,
Key_volume_binding_mode,
Other,
}
impl<'de> crate::serde::Deserialize<'de> for Field {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: crate::serde::Deserializer<'de> {
struct Visitor;
impl<'de> crate::serde::de::Visitor<'de> for Visitor {
type Value = Field;
fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str("field identifier")
}
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E> where E: crate::serde::de::Error {
Ok(match v {
"apiVersion" => Field::Key_api_version,
"kind" => Field::Key_kind,
"allowVolumeExpansion" => Field::Key_allow_volume_expansion,
"allowedTopologies" => Field::Key_allowed_topologies,
"metadata" => Field::Key_metadata,
"mountOptions" => Field::Key_mount_options,
"parameters" => Field::Key_parameters,
"provisioner" => Field::Key_provisioner,
"reclaimPolicy" => Field::Key_reclaim_policy,
"volumeBindingMode" => Field::Key_volume_binding_mode,
_ => Field::Other,
})
}
}
deserializer.deserialize_identifier(Visitor)
}
}
struct Visitor;
impl<'de> crate::serde::de::Visitor<'de> for Visitor {
type Value = StorageClass;
fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str(<Self::Value as crate::Resource>::KIND)
}
fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error> where A: crate::serde::de::MapAccess<'de> {
let mut value_allow_volume_expansion: Option<bool> = None;
let mut value_allowed_topologies: Option<Vec<crate::api::core::v1::TopologySelectorTerm>> = None;
let mut value_metadata: Option<crate::apimachinery::pkg::apis::meta::v1::ObjectMeta> = None;
let mut value_mount_options: Option<Vec<String>> = None;
let mut value_parameters: Option<std::collections::BTreeMap<String, String>> = None;
let mut value_provisioner: Option<String> = None;
let mut value_reclaim_policy: Option<String> = None;
let mut value_volume_binding_mode: Option<String> = None;
while let Some(key) = crate::serde::de::MapAccess::next_key::<Field>(&mut map)? {
match key {
Field::Key_api_version => {
let value_api_version: String = crate::serde::de::MapAccess::next_value(&mut map)?;
if value_api_version != <Self::Value as crate::Resource>::API_VERSION {
return Err(crate::serde::de::Error::invalid_value(crate::serde::de::Unexpected::Str(&value_api_version), &<Self::Value as crate::Resource>::API_VERSION));
}
},
Field::Key_kind => {
let value_kind: String = crate::serde::de::MapAccess::next_value(&mut map)?;
if value_kind != <Self::Value as crate::Resource>::KIND {
return Err(crate::serde::de::Error::invalid_value(crate::serde::de::Unexpected::Str(&value_kind), &<Self::Value as crate::Resource>::KIND));
}
},
Field::Key_allow_volume_expansion => value_allow_volume_expansion = crate::serde::de::MapAccess::next_value(&mut map)?,
Field::Key_allowed_topologies => value_allowed_topologies = crate::serde::de::MapAccess::next_value(&mut map)?,
Field::Key_metadata => value_metadata = Some(crate::serde::de::MapAccess::next_value(&mut map)?),
Field::Key_mount_options => value_mount_options = crate::serde::de::MapAccess::next_value(&mut map)?,
Field::Key_parameters => value_parameters = crate::serde::de::MapAccess::next_value(&mut map)?,
Field::Key_provisioner => value_provisioner = Some(crate::serde::de::MapAccess::next_value(&mut map)?),
Field::Key_reclaim_policy => value_reclaim_policy = crate::serde::de::MapAccess::next_value(&mut map)?,
Field::Key_volume_binding_mode => value_volume_binding_mode = crate::serde::de::MapAccess::next_value(&mut map)?,
Field::Other => { let _: crate::serde::de::IgnoredAny = crate::serde::de::MapAccess::next_value(&mut map)?; },
}
}
Ok(StorageClass {
allow_volume_expansion: value_allow_volume_expansion,
allowed_topologies: value_allowed_topologies.unwrap_or_default(),
metadata: value_metadata.ok_or_else(|| crate::serde::de::Error::missing_field("metadata"))?,
mount_options: value_mount_options.unwrap_or_default(),
parameters: value_parameters.unwrap_or_default(),
provisioner: value_provisioner.ok_or_else(|| crate::serde::de::Error::missing_field("provisioner"))?,
reclaim_policy: value_reclaim_policy,
volume_binding_mode: value_volume_binding_mode,
})
}
}
deserializer.deserialize_struct(
<Self as crate::Resource>::KIND,
&[
"apiVersion",
"kind",
"allowVolumeExpansion",
"allowedTopologies",
"metadata",
"mountOptions",
"parameters",
"provisioner",
"reclaimPolicy",
"volumeBindingMode",
],
Visitor,
)
}
}
impl crate::serde::Serialize for StorageClass {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: crate::serde::Serializer {
let mut state = serializer.serialize_struct(
<Self as crate::Resource>::KIND,
4 +
self.allow_volume_expansion.as_ref().map_or(0, |_| 1) +
usize::from(!self.allowed_topologies.is_empty()) +
usize::from(!self.mount_options.is_empty()) +
usize::from(!self.parameters.is_empty()) +
self.reclaim_policy.as_ref().map_or(0, |_| 1) +
self.volume_binding_mode.as_ref().map_or(0, |_| 1),
)?;
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "apiVersion", <Self as crate::Resource>::API_VERSION)?;
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "kind", <Self as crate::Resource>::KIND)?;
if let Some(value) = &self.allow_volume_expansion {
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "allowVolumeExpansion", value)?;
}
if !self.allowed_topologies.is_empty() {
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "allowedTopologies", &self.allowed_topologies)?;
}
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "metadata", &self.metadata)?;
if !self.mount_options.is_empty() {
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "mountOptions", &self.mount_options)?;
}
if !self.parameters.is_empty() {
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "parameters", &self.parameters)?;
}
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "provisioner", &self.provisioner)?;
if let Some(value) = &self.reclaim_policy {
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "reclaimPolicy", value)?;
}
if let Some(value) = &self.volume_binding_mode {
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "volumeBindingMode", value)?;
}
crate::serde::ser::SerializeStruct::end(state)
}
}
| 46.953287 | 307 | 0.607723 |
337b54fa76185a80910e553f82a61d71ff733c85 | 49 | pub mod enums;
pub mod events;
pub mod types;
| 12.25 | 16 | 0.693878 |
8f048f8de30eeb533eb145fe288a6f682007f8d1 | 619 | // DO NOT EDIT THIS FILE. IT WAS AUTOMATICALLY GENERATED BY:
//
// ucd-generate property-bool ucd-13.0.0 --chars --include whitespace
//
// ucd-generate 0.2.7 is available on crates.io.
pub const BY_NAME: &'static [(&'static str, &'static [(char, char)])] =
&[("White_Space", WHITE_SPACE)];
pub const WHITE_SPACE: &'static [(char, char)] = &[
('\t', '\r'),
(' ', ' '),
('\u{85}', '\u{85}'),
('\u{a0}', '\u{a0}'),
('\u{1680}', '\u{1680}'),
('\u{2000}', '\u{200a}'),
('\u{2028}', '\u{2029}'),
('\u{202f}', '\u{202f}'),
('\u{205f}', '\u{205f}'),
('\u{3000}', '\u{3000}'),
];
| 28.136364 | 71 | 0.494346 |
218e31e8720b779601f12697496e29b4c39ef63e | 12,039 | use geom::{Distance, Duration};
use map_gui::tools::{
cmp_dist, cmp_duration, InputWaypoints, TripManagement, TripManagementState, WaypointID,
};
use map_model::{PathfinderCaching, NORMAL_LANE_THICKNESS};
use synthpop::{TripEndpoint, TripMode};
use widgetry::mapspace::{ObjectID, ToggleZoomed, World};
use widgetry::{
Color, EventCtx, GfxCtx, Line, Outcome, Panel, RoundedF64, Spinner, State, Text, Widget,
};
use super::per_neighborhood::{FilterableObj, Tab};
use super::{Neighborhood, NeighborhoodID};
use crate::{App, Transition};
pub struct RoutePlanner {
panel: Panel,
waypoints: InputWaypoints,
files: TripManagement<App, RoutePlanner>,
world: World<Obj>,
draw_routes: ToggleZoomed,
neighborhood: Neighborhood,
}
impl TripManagementState<App> for RoutePlanner {
fn mut_files(&mut self) -> &mut TripManagement<App, Self> {
&mut self.files
}
fn app_session_current_trip_name(app: &mut App) -> &mut Option<String> {
&mut app.session.current_trip_name
}
fn sync_from_file_management(&mut self, ctx: &mut EventCtx, app: &mut App) {
self.waypoints
.overwrite(app, self.files.current.waypoints.clone());
self.update_everything(ctx, app);
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
enum Obj {
Waypoint(WaypointID),
Filterable(FilterableObj),
}
impl ObjectID for Obj {}
impl RoutePlanner {
pub fn new_state(ctx: &mut EventCtx, app: &mut App, id: NeighborhoodID) -> Box<dyn State<App>> {
let neighborhood = Neighborhood::new(ctx, app, id);
let mut rp = RoutePlanner {
panel: Panel::empty(ctx),
waypoints: InputWaypoints::new(app),
files: TripManagement::new(app),
world: World::unbounded(),
draw_routes: ToggleZoomed::empty(ctx),
neighborhood,
};
if let Some(current_name) = &app.session.current_trip_name {
rp.files.set_current(current_name);
}
rp.sync_from_file_management(ctx, app);
Box::new(rp)
}
// Updates the panel and draw_routes
fn update_everything(&mut self, ctx: &mut EventCtx, app: &mut App) {
self.files.autosave(app);
let results_widget = self.recalculate_paths(ctx, app);
let contents = Widget::col(vec![
self.files.get_panel_widget(ctx),
Widget::horiz_separator(ctx, 1.0),
self.waypoints.get_panel_widget(ctx),
Widget::row(vec![
Line("Slow-down factor for main roads:")
.into_widget(ctx)
.centered_vert(),
Spinner::f64_widget(
ctx,
"main road penalty",
(1.0, 10.0),
app.session.main_road_penalty,
0.5,
),
]),
Text::from_multiline(vec![
Line("1 means free-flow traffic conditions").secondary(),
Line("Increase to see how vehicles may try to detour in heavy traffic").secondary(),
])
.into_widget(ctx),
results_widget,
]);
let mut panel = Tab::Pathfinding
.panel_builder(ctx, app, contents)
// Hovering on waypoint cards
.ignore_initial_events()
.build(ctx);
panel.restore(ctx, &self.panel);
self.panel = panel;
let mut world = World::bounded(app.map.get_bounds());
super::per_neighborhood::populate_world(
ctx,
app,
&self.neighborhood,
&mut world,
Obj::Filterable,
0,
);
self.waypoints
.rebuild_world(ctx, &mut world, Obj::Waypoint, 1);
world.initialize_hover(ctx);
world.rebuilt_during_drag(&self.world);
self.world = world;
}
// Returns a widget to display
fn recalculate_paths(&mut self, ctx: &mut EventCtx, app: &App) -> Widget {
let map = &app.map;
let mut results = Text::new();
let mut draw = ToggleZoomed::builder();
// First the route respecting the filters
let (total_time_after, total_dist_after) = {
let mut params = map.routing_params().clone();
app.session.modal_filters.update_routing_params(&mut params);
params.main_road_penalty = app.session.main_road_penalty;
let mut total_time = Duration::ZERO;
let mut total_dist = Distance::ZERO;
let color = Color::RED;
for pair in self.waypoints.get_waypoints().windows(2) {
if let Some((path, pl)) =
TripEndpoint::path_req(pair[0], pair[1], TripMode::Drive, map)
.and_then(|req| {
map.pathfind_with_params(req, ¶ms, PathfinderCaching::CacheDijkstra)
.ok()
})
.and_then(|path| path.trace(map).map(|pl| (path, pl)))
{
let shape = pl.make_polygons(5.0 * NORMAL_LANE_THICKNESS);
draw.unzoomed.push(color.alpha(0.8), shape.clone());
draw.zoomed.push(color.alpha(0.5), shape);
// We use PathV1 (lane-based) for tracing. It doesn't preserve the cost
// calculated while pathfinding, so just estimate_duration.
//
// The original reason for using estimate_duration here was to exclude the large
// penalty if the route crossed a filter. But now that's impossible at the
// pathfinding layer.
total_time += path.estimate_duration(map, None);
total_dist += path.total_length();
}
}
if total_dist != Distance::ZERO {
results.add_line(Line("Route respecting modal filters").fg(color));
results.add_line(Line(format!("Time: {}", total_time)));
results.add_line(Line(format!("Distance: {}", total_dist)));
}
(total_time, total_dist)
};
// Then the one ignoring filters
{
let mut draw_old_route = ToggleZoomed::builder();
let mut total_time = Duration::ZERO;
let mut total_dist = Distance::ZERO;
let color = Color::BLUE;
let mut params = map.routing_params().clone();
params.main_road_penalty = app.session.main_road_penalty;
for pair in self.waypoints.get_waypoints().windows(2) {
if let Some((path, pl)) =
TripEndpoint::path_req(pair[0], pair[1], TripMode::Drive, map)
.and_then(|req| {
map.pathfind_with_params(req, ¶ms, PathfinderCaching::CacheDijkstra)
.ok()
})
.and_then(|path| path.trace(map).map(|pl| (path, pl)))
{
let shape = pl.make_polygons(5.0 * NORMAL_LANE_THICKNESS);
draw_old_route
.unzoomed
.push(color.alpha(0.8), shape.clone());
draw_old_route.zoomed.push(color.alpha(0.5), shape);
total_time += path.estimate_duration(map, None);
total_dist += path.total_length();
}
}
if total_dist != Distance::ZERO {
// If these two stats are the same, assume the two paths are equivalent
if total_time == total_time_after && total_dist == total_dist_after {
draw = draw_old_route;
results = Text::new();
results.add_line(Line("The route is the same before/after modal filters"));
results.add_line(Line(format!("Time: {}", total_time)));
results.add_line(Line(format!("Distance: {}", total_dist)));
} else {
draw.append(draw_old_route);
results.add_line(
Line("Route before any modal filters (existing or new)").fg(color),
);
cmp_duration(
&mut results,
app,
total_time - total_time_after,
"shorter",
"longer",
);
// Remove formatting -- red/green gets confusing with the blue/red of the two
// routes
results.remove_colors_from_last_line();
cmp_dist(
&mut results,
app,
total_dist - total_dist_after,
"shorter",
"longer",
);
results.remove_colors_from_last_line();
}
}
}
self.draw_routes = draw.build(ctx);
results.into_widget(ctx)
}
}
impl State<App> for RoutePlanner {
fn event(&mut self, ctx: &mut EventCtx, app: &mut App) -> Transition {
let world_outcome = self.world.event(ctx);
// TODO map_id can only extract one case. Do a bit of a hack to handle filter managament
// first.
if let Some(outcome) = world_outcome.clone().maybe_map_id(|id| match id {
Obj::Filterable(id) => Some(id),
_ => None,
}) {
if super::per_neighborhood::handle_world_outcome(ctx, app, outcome) {
self.neighborhood = Neighborhood::new(ctx, app, self.neighborhood.id);
self.update_everything(ctx, app);
return Transition::Keep;
}
// Fall through. Clicking free space and other ID-less outcomes will match here, but we
// don't want them to.
}
let world_outcome_for_waypoints = world_outcome.map_id(|id| match id {
Obj::Waypoint(id) => id,
_ => unreachable!(),
});
let panel_outcome = self.panel.event(ctx);
if let Outcome::Clicked(ref x) = panel_outcome {
if let Some(t) = Tab::Pathfinding.handle_action(ctx, app, x, self.neighborhood.id) {
return t;
}
if let Some(t) = self.files.on_click(ctx, app, x) {
// Bit hacky...
if matches!(t, Transition::Keep) {
self.sync_from_file_management(ctx, app);
}
return t;
}
}
if let Outcome::Changed(ref x) = panel_outcome {
if x == "main road penalty" {
app.session.main_road_penalty =
self.panel.spinner::<RoundedF64>("main road penalty").0;
self.update_everything(ctx, app);
}
}
if self
.waypoints
.event(app, panel_outcome, world_outcome_for_waypoints)
{
// Sync from waypoints to file management
// TODO Maaaybe this directly live in the InputWaypoints system?
self.files.current.waypoints = self.waypoints.get_waypoints();
self.update_everything(ctx, app);
}
Transition::Keep
}
fn draw(&self, g: &mut GfxCtx, app: &App) {
self.panel.draw(g);
g.redraw(&self.neighborhood.fade_irrelevant);
self.draw_routes.draw(g);
app.session.draw_all_filters.draw(g);
if g.canvas.is_unzoomed() {
self.neighborhood.labels.draw(g, app);
}
self.world.draw(g);
}
fn on_destroy(&mut self, _: &mut EventCtx, app: &mut App) {
// We'll cache a custom pathfinder per set of avoided roads. Avoid leaking memory by
// clearing this out
app.map.clear_custom_pathfinder_cache();
}
}
| 38.586538 | 100 | 0.534845 |
4a3fd08a28839e20daba02de685cb2fce52a478f | 14,522 | // Copyright (c) The Starcoin Core Contributors
// SPDX-License-Identifier: Apache-2.0
// Copyright (c) The Libra Core Contributors
// SPDX-License-Identifier: Apache-2.0
//! This module implements an in-memory Merkle Accumulator that is similar to what we use in
//! storage. This accumulator will only store a small portion of the tree -- for any subtree that
//! is full, we store only the root. Also we only store the frozen nodes, therefore this structure
//! will always store up to `Log(n)` number of nodes, where `n` is the total number of leaves in
//! the tree.
//!
//! This accumulator is immutable once constructed. If we append new leaves to the tree we will
//! obtain a new accumulator instance and the old one remains unchanged.
#[cfg(test)]
mod accumulator_test;
use crate::{LeafCount, MAX_ACCUMULATOR_LEAVES};
use anyhow::{ensure, format_err, Result};
use starcoin_crypto::{hash::ACCUMULATOR_PLACEHOLDER_HASH, HashValue};
/// The Accumulator implementation.
pub struct InMemoryAccumulator {
/// Represents the roots of all the full subtrees from left to right in this accumulator. For
/// example, if we have the following accumulator, this vector will have two hashes that
/// correspond to `X` and `e`.
/// ```text
/// root
/// / \
/// / \
/// / \
/// X o
/// / \ / \
/// / \ / \
/// o o o placeholder
/// / \ / \ / \
/// a b c d e placeholder
/// ```
frozen_subtree_roots: Vec<HashValue>,
/// The total number of leaves in this accumulator.
num_leaves: LeafCount,
/// The root hash of this accumulator.
root_hash: HashValue,
}
pub struct MerkleTreeInternalNode {
left: HashValue,
right: HashValue,
}
impl MerkleTreeInternalNode {
pub fn new(left: HashValue, right: HashValue) -> Self {
Self { left, right }
}
pub fn hash(&self) -> HashValue {
let mut bytes = self.left.to_vec();
bytes.extend(self.right.to_vec());
HashValue::sha3_256_of(bytes.as_slice())
}
}
impl InMemoryAccumulator {
/// Constructs a new accumulator with roots of existing frozen subtrees. Returns error if the
/// number of frozen subtree roots does not match the number of leaves.
pub fn new(frozen_subtree_roots: Vec<HashValue>, num_leaves: u64) -> Result<Self> {
ensure!(
frozen_subtree_roots.len() == num_leaves.count_ones() as usize,
"The number of frozen subtrees does not match the number of leaves. \
frozen_subtree_roots.len(): {}. num_leaves: {}.",
frozen_subtree_roots.len(),
num_leaves,
);
let root_hash = Self::compute_root_hash(&frozen_subtree_roots, num_leaves);
Ok(Self {
frozen_subtree_roots,
num_leaves,
root_hash,
})
}
/// Constructs a new accumulator with given leaves.
pub fn from_leaves(leaves: &[HashValue]) -> Self {
Self::default().append(leaves)
}
/// Appends a list of new leaves to an existing accumulator. Since the accumulator is
/// immutable, the existing one remains unchanged and a new one representing the result is
/// returned.
pub fn append(&self, leaves: &[HashValue]) -> Self {
let mut frozen_subtree_roots = self.frozen_subtree_roots.clone();
let mut num_leaves = self.num_leaves;
for leaf in leaves {
Self::append_one(&mut frozen_subtree_roots, num_leaves, *leaf);
num_leaves += 1;
}
Self::new(frozen_subtree_roots, num_leaves).expect(
"Appending leaves to a valid accumulator should create another valid accumulator.",
)
}
/// Appends one leaf. This will update `frozen_subtree_roots` to store new frozen root nodes
/// and remove old nodes if they are now part of a larger frozen subtree.
fn append_one(
frozen_subtree_roots: &mut Vec<HashValue>,
num_existing_leaves: u64,
leaf: HashValue,
) {
// For example, this accumulator originally had N = 7 leaves. Appending a leaf is like
// adding one to this number N: 0b0111 + 1 = 0b1000. Every time we carry a bit to the left
// we merge the rightmost two subtrees and compute their parent.
// ```text
// A
// / \
// / \
// o o B
// / \ / \ / \
// o o o o o o o
// ```
// First just append the leaf.
frozen_subtree_roots.push(leaf);
// Next, merge the last two subtrees into one. If `num_existing_leaves` has N trailing
// ones, the carry will happen N times.
let num_trailing_ones = (!num_existing_leaves).trailing_zeros();
for _i in 0..num_trailing_ones {
let right_hash = frozen_subtree_roots.pop().expect("Invalid accumulator.");
let left_hash = frozen_subtree_roots.pop().expect("Invalid accumulator.");
let parent_hash = MerkleTreeInternalNode::new(left_hash, right_hash).hash();
frozen_subtree_roots.push(parent_hash);
}
}
/// Appends a list of new subtrees to the existing accumulator. This is similar to
/// [`append`](Accumulator::append) except that the new leaves themselves are not known and
/// they are represented by `subtrees`. As an example, given the following accumulator that
/// currently has 10 leaves, the frozen subtree roots and the new subtrees are annotated below.
/// Note that in this case `subtrees[0]` represents two new leaves `A` and `B`, `subtrees[1]`
/// represents four new leaves `C`, `D`, `E` and `F`, `subtrees[2]` represents four new leaves
/// `G`, `H`, `I` and `J`, and the last `subtrees[3]` represents one new leaf `K`.
///
/// ```text
/// new_root
/// / \
/// / \
/// / \
/// / \
/// / \
/// / \
/// / \
/// / \
/// / \
/// / \
/// / \
/// / \
/// / \
/// old_root o
/// / \ / \
/// / \ / placeholder
/// / \ /
/// / \ /
/// / \ /
/// / \ o
/// / \ / \
/// / \ / \
/// / o / \
/// frozen_subtree_roots[0] / \ / \
/// / \ / \ / \
/// / \ / \ / \
/// o o o subtrees[1] subtrees[2] o
/// / \ / \ / \ / \ / \ / \
/// o o o o frozen_subtree_roots[1] subtrees[0] o o o o o placeholder
/// / \ / \ / \ / \ / \ / \ / \ / \ / \ / \ / \
/// o o o o o o o o o o A B C D E F G H I J K (subtrees[3]) placeholder
/// ```
pub fn append_subtrees(&self, subtrees: &[HashValue], num_new_leaves: u64) -> Result<Self> {
ensure!(
num_new_leaves <= MAX_ACCUMULATOR_LEAVES - self.num_leaves,
"Too many new leaves. self.num_leaves: {}. num_new_leaves: {}.",
self.num_leaves,
num_new_leaves,
);
if self.num_leaves == 0 {
return Self::new(subtrees.to_vec(), num_new_leaves);
}
let mut current_subtree_roots = self.frozen_subtree_roots.clone();
let mut current_num_leaves = self.num_leaves;
let mut remaining_new_leaves = num_new_leaves;
let mut subtree_iter = subtrees.iter();
// Check if we want to combine a new subtree with the rightmost frozen subtree. To do that
// this new subtree needs to represent `rightmost_frozen_subtree_size` leaves, so we need
// to have at least this many new leaves remaining.
let mut rightmost_frozen_subtree_size = 1 << current_num_leaves.trailing_zeros();
while remaining_new_leaves >= rightmost_frozen_subtree_size {
// Note that after combining the rightmost frozen subtree of size X with a new subtree,
// we obtain a subtree of size 2X. If there was already a frozen subtree of size 2X, we
// need to carry this process further.
let mut mask = rightmost_frozen_subtree_size;
let mut current_hash = *subtree_iter
.next()
.ok_or_else(|| format_err!("Too few subtrees."))?;
while current_num_leaves & mask != 0 {
let left_hash = current_subtree_roots
.pop()
.expect("This frozen subtree must exist.");
current_hash = MerkleTreeInternalNode::new(left_hash, current_hash).hash();
mask <<= 1;
}
current_subtree_roots.push(current_hash);
current_num_leaves += rightmost_frozen_subtree_size;
remaining_new_leaves -= rightmost_frozen_subtree_size;
rightmost_frozen_subtree_size = mask;
}
// Now all the new subtrees are smaller than the rightmost frozen subtree. We just append
// all of them. Note that if the number of new subtrees does not actually match the number
// of new leaves, `Self::new` below will raise an error.
current_num_leaves += remaining_new_leaves;
current_subtree_roots.extend(subtree_iter);
Ok(Self::new(current_subtree_roots, current_num_leaves)?)
}
/// Returns the root hash of the accumulator.
pub fn root_hash(&self) -> HashValue {
self.root_hash
}
pub fn version(&self) -> u64 {
if self.num_leaves() == 0 {
0
} else {
self.num_leaves() - 1
}
}
/// Computes the root hash of an accumulator given the frozen subtree roots and the number of
/// leaves in this accumulator.
fn compute_root_hash(frozen_subtree_roots: &[HashValue], num_leaves: u64) -> HashValue {
match frozen_subtree_roots.len() {
0 => return *ACCUMULATOR_PLACEHOLDER_HASH,
1 => return frozen_subtree_roots[0],
_ => (),
}
// The trailing zeros do not matter since anything below the lowest frozen subtree is
// already represented by the subtree roots.
let mut bitmap = num_leaves >> num_leaves.trailing_zeros();
let mut current_hash = *ACCUMULATOR_PLACEHOLDER_HASH;
let mut frozen_subtree_iter = frozen_subtree_roots.iter().rev();
while bitmap > 0 {
current_hash = if bitmap & 1 != 0 {
MerkleTreeInternalNode::new(
*frozen_subtree_iter
.next()
.expect("This frozen subtree should exist."),
current_hash,
)
} else {
MerkleTreeInternalNode::new(current_hash, *ACCUMULATOR_PLACEHOLDER_HASH)
}
.hash();
bitmap >>= 1;
}
current_hash
}
/// Returns the set of frozen subtree roots in this accumulator
pub fn frozen_subtree_roots(&self) -> &Vec<HashValue> {
&self.frozen_subtree_roots
}
/// Returns the total number of leaves in this accumulator.
pub fn num_leaves(&self) -> u64 {
self.num_leaves
}
}
impl std::fmt::Debug for InMemoryAccumulator {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(
f,
"Accumulator {{ frozen_subtree_roots: {:?}, num_leaves: {:?} }}",
self.frozen_subtree_roots, self.num_leaves
)
}
}
impl Default for InMemoryAccumulator {
fn default() -> Self {
Self::new(vec![], 0).expect("Constructing empty accumulator should work.")
}
}
| 47.149351 | 134 | 0.468668 |
91fa2099ab33c457aeca79a5ee6d766a75afec25 | 189 | pub mod builder;
pub mod data;
pub mod pack;
pub mod transport;
pub use self::builder::DataBuilder;
pub use self::data::{Data, Storage};
pub use self::pack::{new_pack_stream, PackStream};
| 21 | 50 | 0.740741 |
7a5d4d37020ff6c28605d7a21bd6cba5fda0ba75 | 25,682 | //! The equality sigma proof system.
//!
//! An equality proof is defined with respect to two cryptographic objects: a twisted ElGamal
//! ciphertext and a Pedersen commitment. The proof certifies that a given ciphertext and
//! commitment pair encrypts/encodes the same message. To generate the proof, a prover must provide
//! the decryption key for the ciphertext and the Pedersen opening for the commitment.
//!
//! TODO: verify with respect to ciphertext
//!
//! The protocol guarantees computationally soundness (by the hardness of discrete log) and perfect
//! zero-knowledge in the random oracle model.
#[cfg(not(target_arch = "bpf"))]
use {
crate::encryption::{
elgamal::{ElGamalCiphertext, ElGamalKeypair, ElGamalPubkey},
pedersen::{PedersenCommitment, PedersenOpening, G, H},
},
curve25519_dalek::traits::MultiscalarMul,
rand::rngs::OsRng,
zeroize::Zeroize,
};
use {
crate::{sigma_proofs::errors::EqualityProofError, transcript::TranscriptProtocol},
arrayref::{array_ref, array_refs},
curve25519_dalek::{
ristretto::{CompressedRistretto, RistrettoPoint},
scalar::Scalar,
traits::{IsIdentity, VartimeMultiscalarMul},
},
merlin::Transcript,
};
/// Equality proof.
///
/// Contains all the elliptic curve and scalar components that make up the sigma protocol.
#[allow(non_snake_case)]
#[derive(Clone)]
pub struct CtxtCommEqualityProof {
Y_0: CompressedRistretto,
Y_1: CompressedRistretto,
Y_2: CompressedRistretto,
z_s: Scalar,
z_x: Scalar,
z_r: Scalar,
}
#[allow(non_snake_case)]
#[cfg(not(target_arch = "bpf"))]
impl CtxtCommEqualityProof {
/// Equality proof constructor.
///
/// The function does *not* hash the public key, ciphertext, or commitment into the transcript.
/// For security, the caller (the main protocol) should hash these public components prior to
/// invoking this constructor.
///
/// This function is randomized. It uses `OsRng` internally to generate random scalars.
///
/// Note that the proof constructor does not take the actual Pedersen commitment as input; it
/// takes the associated Pedersen opening instead.
///
/// * `source_keypair` - The ElGamal keypair associated with the first to be proved
/// * `source_ciphertext` - The main ElGamal ciphertext to be proved
/// * `amount` - The message associated with the ElGamal ciphertext and Pedersen commitment
/// * `opening` - The opening associated with the main Pedersen commitment to be proved
/// * `transcript` - The transcript that does the bookkeeping for the Fiat-Shamir heuristic
pub fn new(
source_keypair: &ElGamalKeypair,
source_ciphertext: &ElGamalCiphertext,
amount: u64,
opening: &PedersenOpening,
transcript: &mut Transcript,
) -> Self {
transcript.equality_proof_domain_sep();
// extract the relevant scalar and Ristretto points from the inputs
let P_source = source_keypair.public.get_point();
let D_source = source_ciphertext.handle.get_point();
let s = source_keypair.secret.get_scalar();
let x = Scalar::from(amount);
let r = opening.get_scalar();
// generate random masking factors that also serves as nonces
let mut y_s = Scalar::random(&mut OsRng);
let mut y_x = Scalar::random(&mut OsRng);
let mut y_r = Scalar::random(&mut OsRng);
let Y_0 = (&y_s * P_source).compress();
let Y_1 =
RistrettoPoint::multiscalar_mul(vec![&y_x, &y_s], vec![&(*G), D_source]).compress();
let Y_2 = RistrettoPoint::multiscalar_mul(vec![&y_x, &y_r], vec![&(*G), &(*H)]).compress();
// record masking factors in the transcript
transcript.append_point(b"Y_0", &Y_0);
transcript.append_point(b"Y_1", &Y_1);
transcript.append_point(b"Y_2", &Y_2);
let c = transcript.challenge_scalar(b"c");
transcript.challenge_scalar(b"w");
// compute the masked values
let z_s = &(&c * s) + &y_s;
let z_x = &(&c * &x) + &y_x;
let z_r = &(&c * r) + &y_r;
// zeroize random scalars
y_s.zeroize();
y_x.zeroize();
y_r.zeroize();
CtxtCommEqualityProof {
Y_0,
Y_1,
Y_2,
z_s,
z_x,
z_r,
}
}
/// Equality proof verifier. TODO: wrt commitment
///
/// * `source_pubkey` - The ElGamal pubkey associated with the ciphertext to be proved
/// * `source_ciphertext` - The main ElGamal ciphertext to be proved
/// * `destination_commitment` - The main Pedersen commitment to be proved
/// * `transcript` - The transcript that does the bookkeeping for the Fiat-Shamir heuristic
pub fn verify(
self,
source_pubkey: &ElGamalPubkey,
source_ciphertext: &ElGamalCiphertext,
destination_commitment: &PedersenCommitment,
transcript: &mut Transcript,
) -> Result<(), EqualityProofError> {
transcript.equality_proof_domain_sep();
// extract the relevant scalar and Ristretto points from the inputs
let P_source = source_pubkey.get_point();
let C_source = source_ciphertext.commitment.get_point();
let D_source = source_ciphertext.handle.get_point();
let C_destination = destination_commitment.get_point();
// include Y_0, Y_1, Y_2 to transcript and extract challenges
transcript.validate_and_append_point(b"Y_0", &self.Y_0)?;
transcript.validate_and_append_point(b"Y_1", &self.Y_1)?;
transcript.validate_and_append_point(b"Y_2", &self.Y_2)?;
let c = transcript.challenge_scalar(b"c");
let w = transcript.challenge_scalar(b"w"); // w used for batch verification
let ww = &w * &w;
let w_negated = -&w;
let ww_negated = -&ww;
// check that the required algebraic condition holds
let Y_0 = self.Y_0.decompress().ok_or(EqualityProofError::Format)?;
let Y_1 = self.Y_1.decompress().ok_or(EqualityProofError::Format)?;
let Y_2 = self.Y_2.decompress().ok_or(EqualityProofError::Format)?;
let check = RistrettoPoint::vartime_multiscalar_mul(
vec![
&self.z_s, // z_s
&(-&c), // -c
&(-&Scalar::one()), // -identity
&(&w * &self.z_x), // w * z_x
&(&w * &self.z_s), // w * z_s
&(&w_negated * &c), // -w * c
&w_negated, // -w
&(&ww * &self.z_x), // ww * z_x
&(&ww * &self.z_r), // ww * z_r
&(&ww_negated * &c), // -ww * c
&ww_negated, // -ww
],
vec![
P_source, // P_source
&(*H), // H
&Y_0, // Y_0
&(*G), // G
D_source, // D_source
C_source, // C_source
&Y_1, // Y_1
&(*G), // G
&(*H), // H
C_destination, // C_destination
&Y_2, // Y_2
],
);
if check.is_identity() {
Ok(())
} else {
Err(EqualityProofError::AlgebraicRelation)
}
}
pub fn to_bytes(&self) -> [u8; 192] {
let mut buf = [0_u8; 192];
buf[..32].copy_from_slice(self.Y_0.as_bytes());
buf[32..64].copy_from_slice(self.Y_1.as_bytes());
buf[64..96].copy_from_slice(self.Y_2.as_bytes());
buf[96..128].copy_from_slice(self.z_s.as_bytes());
buf[128..160].copy_from_slice(self.z_x.as_bytes());
buf[160..192].copy_from_slice(self.z_r.as_bytes());
buf
}
pub fn from_bytes(bytes: &[u8]) -> Result<Self, EqualityProofError> {
let bytes = array_ref![bytes, 0, 192];
let (Y_0, Y_1, Y_2, z_s, z_x, z_r) = array_refs![bytes, 32, 32, 32, 32, 32, 32];
let Y_0 = CompressedRistretto::from_slice(Y_0);
let Y_1 = CompressedRistretto::from_slice(Y_1);
let Y_2 = CompressedRistretto::from_slice(Y_2);
let z_s = Scalar::from_canonical_bytes(*z_s).ok_or(EqualityProofError::Format)?;
let z_x = Scalar::from_canonical_bytes(*z_x).ok_or(EqualityProofError::Format)?;
let z_r = Scalar::from_canonical_bytes(*z_r).ok_or(EqualityProofError::Format)?;
Ok(CtxtCommEqualityProof {
Y_0,
Y_1,
Y_2,
z_s,
z_x,
z_r,
})
}
}
/// Equality proof.
///
/// Contains all the elliptic curve and scalar components that make up the sigma protocol.
#[allow(non_snake_case)]
#[derive(Clone)]
pub struct CtxtCtxtEqualityProof {
Y_0: CompressedRistretto,
Y_1: CompressedRistretto,
Y_2: CompressedRistretto,
Y_3: CompressedRistretto,
z_s: Scalar,
z_x: Scalar,
z_r: Scalar,
}
#[allow(non_snake_case)]
#[cfg(not(target_arch = "bpf"))]
impl CtxtCtxtEqualityProof {
/// Equality proof constructor.
///
/// The function does *not* hash the public key, ciphertext, or commitment into the transcript.
/// For security, the caller (the main protocol) should hash these public components prior to
/// invoking this constructor.
///
/// This function is randomized. It uses `OsRng` internally to generate random scalars.
///
/// Note that the proof constructor does not take the actual Pedersen commitment as input; it
/// takes the associated Pedersen opening instead.
///
/// * `source_keypair` - The ElGamal keypair associated with the first ciphertext to be proved
/// * `destination_pubkey` - The ElGamal pubkey associated with the second ElGamal ciphertext
/// * `source_ciphertext` - The first ElGamal ciphertext
/// * `amount` - The message associated with the ElGamal ciphertext and Pedersen commitment
/// * `destination_opening` - The opening associated with the second ElGamal ciphertext
/// * `transcript` - The transcript that does the bookkeeping for the Fiat-Shamir heuristic
pub fn new(
source_keypair: &ElGamalKeypair,
destination_pubkey: &ElGamalPubkey,
source_ciphertext: &ElGamalCiphertext,
amount: u64,
destination_opening: &PedersenOpening,
transcript: &mut Transcript,
) -> Self {
transcript.equality_proof_domain_sep();
// extract the relevant scalar and Ristretto points from the inputs
let P_source = source_keypair.public.get_point();
let D_source = source_ciphertext.handle.get_point();
let P_destination = destination_pubkey.get_point();
let s = source_keypair.secret.get_scalar();
let x = Scalar::from(amount);
let r = destination_opening.get_scalar();
// generate random masking factors that also serves as nonces
let mut y_s = Scalar::random(&mut OsRng);
let mut y_x = Scalar::random(&mut OsRng);
let mut y_r = Scalar::random(&mut OsRng);
let Y_0 = (&y_s * P_source).compress();
let Y_1 =
RistrettoPoint::multiscalar_mul(vec![&y_x, &y_s], vec![&(*G), D_source]).compress();
let Y_2 = RistrettoPoint::multiscalar_mul(vec![&y_x, &y_r], vec![&(*G), &(*H)]).compress();
let Y_3 = (&y_r * P_destination).compress();
// record masking factors in the transcript
transcript.append_point(b"Y_0", &Y_0);
transcript.append_point(b"Y_1", &Y_1);
transcript.append_point(b"Y_2", &Y_2);
transcript.append_point(b"Y_3", &Y_3);
let c = transcript.challenge_scalar(b"c");
transcript.challenge_scalar(b"w");
// compute the masked values
let z_s = &(&c * s) + &y_s;
let z_x = &(&c * &x) + &y_x;
let z_r = &(&c * r) + &y_r;
// zeroize random scalars
y_s.zeroize();
y_x.zeroize();
y_r.zeroize();
CtxtCtxtEqualityProof {
Y_0,
Y_1,
Y_2,
Y_3,
z_s,
z_x,
z_r,
}
}
/// Equality proof verifier.
///
/// * `source_pubkey` - The ElGamal pubkey associated with the first ciphertext to be proved
/// * `destination_pubkey` - The ElGamal pubkey associated with the second ciphertext to be proved
/// * `source_ciphertext` - The first ElGamal ciphertext to be proved
/// * `destination_ciphertext` - The second ElGamal ciphertext to be proved
/// * `transcript` - The transcript that does the bookkeeping for the Fiat-Shamir heuristic
pub fn verify(
self,
source_pubkey: &ElGamalPubkey,
destination_pubkey: &ElGamalPubkey,
source_ciphertext: &ElGamalCiphertext,
destination_ciphertext: &ElGamalCiphertext,
transcript: &mut Transcript,
) -> Result<(), EqualityProofError> {
transcript.equality_proof_domain_sep();
// extract the relevant scalar and Ristretto points from the inputs
let P_source = source_pubkey.get_point();
let C_source = source_ciphertext.commitment.get_point();
let D_source = source_ciphertext.handle.get_point();
let P_destination = destination_pubkey.get_point();
let C_destination = destination_ciphertext.commitment.get_point();
let D_destination = destination_ciphertext.handle.get_point();
// include Y_0, Y_1, Y_2 to transcript and extract challenges
transcript.validate_and_append_point(b"Y_0", &self.Y_0)?;
transcript.validate_and_append_point(b"Y_1", &self.Y_1)?;
transcript.validate_and_append_point(b"Y_2", &self.Y_2)?;
transcript.validate_and_append_point(b"Y_3", &self.Y_3)?;
let c = transcript.challenge_scalar(b"c");
let w = transcript.challenge_scalar(b"w"); // w used for batch verification
let ww = &w * &w;
let www = &w * &ww;
let w_negated = -&w;
let ww_negated = -&ww;
let www_negated = -&www;
// check that the required algebraic condition holds
let Y_0 = self.Y_0.decompress().ok_or(EqualityProofError::Format)?;
let Y_1 = self.Y_1.decompress().ok_or(EqualityProofError::Format)?;
let Y_2 = self.Y_2.decompress().ok_or(EqualityProofError::Format)?;
let Y_3 = self.Y_3.decompress().ok_or(EqualityProofError::Format)?;
let check = RistrettoPoint::vartime_multiscalar_mul(
vec![
&self.z_s, // z_s
&(-&c), // -c
&(-&Scalar::one()), // -identity
&(&w * &self.z_x), // w * z_x
&(&w * &self.z_s), // w * z_s
&(&w_negated * &c), // -w * c
&w_negated, // -w
&(&ww * &self.z_x), // ww * z_x
&(&ww * &self.z_r), // ww * z_r
&(&ww_negated * &c), // -ww * c
&ww_negated, // -ww
&(&www * &self.z_r), // z_r
&(&www_negated * &c), // -www * c
&www_negated,
],
vec![
P_source, // P_source
&(*H), // H
&Y_0, // Y_0
&(*G), // G
D_source, // D_source
C_source, // C_source
&Y_1, // Y_1
&(*G), // G
&(*H), // H
C_destination, // C_destination
&Y_2, // Y_2
P_destination, // P_destination
D_destination, // D_destination
&Y_3, // Y_3
],
);
if check.is_identity() {
Ok(())
} else {
Err(EqualityProofError::AlgebraicRelation)
}
}
pub fn to_bytes(&self) -> [u8; 224] {
let mut buf = [0_u8; 224];
buf[..32].copy_from_slice(self.Y_0.as_bytes());
buf[32..64].copy_from_slice(self.Y_1.as_bytes());
buf[64..96].copy_from_slice(self.Y_2.as_bytes());
buf[96..128].copy_from_slice(self.Y_3.as_bytes());
buf[128..160].copy_from_slice(self.z_s.as_bytes());
buf[160..192].copy_from_slice(self.z_x.as_bytes());
buf[192..224].copy_from_slice(self.z_r.as_bytes());
buf
}
pub fn from_bytes(bytes: &[u8]) -> Result<Self, EqualityProofError> {
let bytes = array_ref![bytes, 0, 224];
let (Y_0, Y_1, Y_2, Y_3, z_s, z_x, z_r) = array_refs![bytes, 32, 32, 32, 32, 32, 32, 32];
let Y_0 = CompressedRistretto::from_slice(Y_0);
let Y_1 = CompressedRistretto::from_slice(Y_1);
let Y_2 = CompressedRistretto::from_slice(Y_2);
let Y_3 = CompressedRistretto::from_slice(Y_3);
let z_s = Scalar::from_canonical_bytes(*z_s).ok_or(EqualityProofError::Format)?;
let z_x = Scalar::from_canonical_bytes(*z_x).ok_or(EqualityProofError::Format)?;
let z_r = Scalar::from_canonical_bytes(*z_r).ok_or(EqualityProofError::Format)?;
Ok(CtxtCtxtEqualityProof {
Y_0,
Y_1,
Y_2,
Y_3,
z_s,
z_x,
z_r,
})
}
}
#[cfg(test)]
mod test {
use {
super::*,
crate::encryption::{elgamal::ElGamalSecretKey, pedersen::Pedersen},
};
#[test]
fn test_ciphertext_commitment_equality_proof_correctness() {
// success case
let source_keypair = ElGamalKeypair::new_rand();
let message: u64 = 55;
let source_ciphertext = source_keypair.public.encrypt(message);
let (destination_commitment, destination_opening) = Pedersen::new(message);
let mut prover_transcript = Transcript::new(b"Test");
let mut verifier_transcript = Transcript::new(b"Test");
let proof = CtxtCommEqualityProof::new(
&source_keypair,
&source_ciphertext,
message,
&destination_opening,
&mut prover_transcript,
);
assert!(proof
.verify(
&source_keypair.public,
&source_ciphertext,
&destination_commitment,
&mut verifier_transcript
)
.is_ok());
// fail case: encrypted and committed messages are different
let source_keypair = ElGamalKeypair::new_rand();
let encrypted_message: u64 = 55;
let committed_message: u64 = 77;
let source_ciphertext = source_keypair.public.encrypt(encrypted_message);
let (destination_commitment, destination_opening) = Pedersen::new(committed_message);
let mut prover_transcript = Transcript::new(b"Test");
let mut verifier_transcript = Transcript::new(b"Test");
let proof = CtxtCommEqualityProof::new(
&source_keypair,
&source_ciphertext,
message,
&destination_opening,
&mut prover_transcript,
);
assert!(proof
.verify(
&source_keypair.public,
&source_ciphertext,
&destination_commitment,
&mut verifier_transcript
)
.is_err());
}
#[test]
fn test_ciphertext_commitment_equality_proof_edge_cases() {
// if ElGamal public key zero (public key is invalid), then the proof should always reject
let public = ElGamalPubkey::from_bytes(&[0u8; 32]).unwrap();
let secret = ElGamalSecretKey::new_rand();
let elgamal_keypair = ElGamalKeypair { public, secret };
let message: u64 = 55;
let ciphertext = elgamal_keypair.public.encrypt(message);
let (commitment, opening) = Pedersen::new(message);
let mut prover_transcript = Transcript::new(b"Test");
let mut verifier_transcript = Transcript::new(b"Test");
let proof = CtxtCommEqualityProof::new(
&elgamal_keypair,
&ciphertext,
message,
&opening,
&mut prover_transcript,
);
assert!(proof
.verify(
&elgamal_keypair.public,
&ciphertext,
&commitment,
&mut verifier_transcript
)
.is_err());
// if ciphertext is all-zero (valid commitment of 0) and commitment is also all-zero, then
// the proof should still accept
let elgamal_keypair = ElGamalKeypair::new_rand();
let message: u64 = 0;
let ciphertext = ElGamalCiphertext::from_bytes(&[0u8; 64]).unwrap();
let commitment = PedersenCommitment::from_bytes(&[0u8; 32]).unwrap();
let opening = PedersenOpening::from_bytes(&[0u8; 32]).unwrap();
let mut prover_transcript = Transcript::new(b"Test");
let mut verifier_transcript = Transcript::new(b"Test");
let proof = CtxtCommEqualityProof::new(
&elgamal_keypair,
&ciphertext,
message,
&opening,
&mut prover_transcript,
);
assert!(proof
.verify(
&elgamal_keypair.public,
&ciphertext,
&commitment,
&mut verifier_transcript
)
.is_ok());
// if commitment is all-zero and the ciphertext is a correct encryption of 0, then the
// proof should still accept
let elgamal_keypair = ElGamalKeypair::new_rand();
let message: u64 = 0;
let ciphertext = elgamal_keypair.public.encrypt(message);
let commitment = PedersenCommitment::from_bytes(&[0u8; 32]).unwrap();
let opening = PedersenOpening::from_bytes(&[0u8; 32]).unwrap();
let mut prover_transcript = Transcript::new(b"Test");
let mut verifier_transcript = Transcript::new(b"Test");
let proof = CtxtCommEqualityProof::new(
&elgamal_keypair,
&ciphertext,
message,
&opening,
&mut prover_transcript,
);
assert!(proof
.verify(
&elgamal_keypair.public,
&ciphertext,
&commitment,
&mut verifier_transcript
)
.is_ok());
// if ciphertext is all zero and commitment correctly encodes 0, then the proof should
// still accept
let elgamal_keypair = ElGamalKeypair::new_rand();
let message: u64 = 0;
let ciphertext = ElGamalCiphertext::from_bytes(&[0u8; 64]).unwrap();
let (commitment, opening) = Pedersen::new(message);
let mut prover_transcript = Transcript::new(b"Test");
let mut verifier_transcript = Transcript::new(b"Test");
let proof = CtxtCommEqualityProof::new(
&elgamal_keypair,
&ciphertext,
message,
&opening,
&mut prover_transcript,
);
assert!(proof
.verify(
&elgamal_keypair.public,
&ciphertext,
&commitment,
&mut verifier_transcript
)
.is_ok());
}
#[test]
fn test_ciphertext_ciphertext_equality_proof_correctness() {
// success case
let source_keypair = ElGamalKeypair::new_rand();
let destination_keypair = ElGamalKeypair::new_rand();
let message: u64 = 55;
let source_ciphertext = source_keypair.public.encrypt(message);
let destination_opening = PedersenOpening::new_rand();
let destination_ciphertext = destination_keypair
.public
.encrypt_with(message, &destination_opening);
let mut prover_transcript = Transcript::new(b"Test");
let mut verifier_transcript = Transcript::new(b"Test");
let proof = CtxtCtxtEqualityProof::new(
&source_keypair,
&destination_keypair.public,
&source_ciphertext,
message,
&destination_opening,
&mut prover_transcript,
);
assert!(proof
.verify(
&source_keypair.public,
&destination_keypair.public,
&source_ciphertext,
&destination_ciphertext,
&mut verifier_transcript
)
.is_ok());
// fail case: encrypted and committed messages are different
let source_message: u64 = 55;
let destination_message: u64 = 77;
let source_ciphertext = source_keypair.public.encrypt(source_message);
let destination_opening = PedersenOpening::new_rand();
let destination_ciphertext = destination_keypair
.public
.encrypt_with(destination_message, &destination_opening);
let mut prover_transcript = Transcript::new(b"Test");
let mut verifier_transcript = Transcript::new(b"Test");
let proof = CtxtCtxtEqualityProof::new(
&source_keypair,
&destination_keypair.public,
&source_ciphertext,
message,
&destination_opening,
&mut prover_transcript,
);
assert!(proof
.verify(
&source_keypair.public,
&destination_keypair.public,
&source_ciphertext,
&destination_ciphertext,
&mut verifier_transcript
)
.is_err());
}
}
| 36.428369 | 102 | 0.581808 |
283a1148a9a911283aac01d82738c38aee5362b1 | 4,966 | use std::collections::HashMap;
use std::error::Error;
use std::iter;
use std::rc::Rc;
use super::{Route, RouteBuffer, StopRecord, TripBuffer, TripId, TripRecord};
use crate::line::LineId;
use crate::location::{Location, LocationId};
use crate::service::{Service, ServiceId};
use crate::shape::{Shape, ShapeId};
use crate::utils::{Action, Dataset};
pub(crate) struct Importer<'a> {
services: &'a HashMap<ServiceId, Rc<Service>>,
locations: &'a HashMap<LocationId, Rc<Location>>,
shapes: &'a HashMap<ShapeId, Shape>,
id_mapping: &'a HashMap<LineId, usize>,
line_count: usize,
}
impl<'a> Importer<'a> {
pub(crate) fn new(
services: &'a HashMap<ServiceId, Rc<Service>>,
locations: &'a HashMap<LocationId, Rc<Location>>,
shapes: &'a HashMap<ShapeId, Shape>,
id_mapping: &'a HashMap<LineId, usize>,
line_count: usize,
) -> Importer<'a> {
Importer {
services,
locations,
shapes,
id_mapping,
line_count,
}
}
fn import_trip_buffers(
&self,
dataset: &mut impl Dataset,
) -> Result<HashMap<TripId, TripBuffer>, Box<dyn Error>> {
let mut buffers = HashMap::new();
let action = Action::start("Importing trips");
for result in action.read_csv(dataset, "trips.txt")? {
let record: TripRecord = result?;
record.import(self.id_mapping, self.services, &mut buffers);
}
action.complete(&format!("Imported {} trips", buffers.len()));
Ok(buffers)
}
fn add_trip_stops(
&self,
dataset: &mut impl Dataset,
buffers: &mut HashMap<TripId, TripBuffer>,
) -> Result<(), Box<dyn Error>> {
let action = Action::start("Importing trip stops");
for result in action.read_csv(dataset, "stop_times.txt")? {
let record: StopRecord = result?;
record.import(self.locations, buffers);
}
action.complete("Imported trip stops");
Ok(())
}
fn combine_into_routes(&self, buffers: HashMap<TripId, TripBuffer>) -> Vec<Vec<Route>> {
let mut action = Action::start("Assigning trips to their lines");
let mut route_buffers = iter::repeat_with(RouteBuffer::new)
.take(self.line_count)
.collect();
for (_, buffer) in action.wrap_iter(buffers) {
buffer.create_and_place_trip(&self.shapes, &mut route_buffers);
}
action.complete("Assigned trips to their lines");
let mut action = Action::start("Merging trips into routes");
let routes = action
.wrap_iter(route_buffers)
.map(|route_buffer| route_buffer.into_routes())
.collect();
action.complete("Merged trips into routes");
routes
}
pub(crate) fn import(
self,
dataset: &mut impl Dataset,
) -> Result<Vec<Vec<Route>>, Box<dyn Error>> {
let mut buffers = self.import_trip_buffers(dataset)?;
self.add_trip_stops(dataset, &mut buffers)?;
Ok(self.combine_into_routes(buffers))
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::dataset;
use crate::fixtures::{locations, routes, services, shapes};
use test_utils::{assert_eq_alternate, map};
#[test]
fn test_import_trip_buffers() {
let mut dataset = dataset!(
trips:
trip_id, route_id, service_id, shape_id, direction_id;
1, tram_12, mon_fri, tram_12::oranienburger_tor_am_kupfergraben, 0;
2, tram_12, mon_fri, tram_12::am_kupfergraben_oranienburger_tor, 1
stop_times:
trip_id, stop_id, arrival_time, departure_time;
1, oranienburger_tor, "9:02:00", "9:02:00";
1, friedrichstr, "9:04:00", "9:04:00";
1, universitaetsstr, "9:06:00", "9:06:00";
1, am_kupfergraben, "9:07:00", "9:07:00";
2, am_kupfergraben, "8:34:00", "8:34:00";
2, georgenstr_am_kupfergraben, "8:35:00", "8:35:00";
2, friedrichstr, "8:38:00", "8:38:00";
2, oranienburger_tor, "8:40:00", "8:40:00"
);
let id_mapping = map! {
"tram_12" => 0,
};
let services = services::by_id();
let locations = locations::by_id();
let shapes = shapes::by_id();
let importer = Importer::new(&services, &locations, &shapes, &id_mapping, 1);
let routes = importer.import(&mut dataset).unwrap();
assert_eq!(routes.len(), 1);
assert_eq_alternate!(
routes[0],
vec![routes::tram_12::oranienburger_tor_am_kupfergraben()],
);
}
}
| 35.985507 | 93 | 0.555779 |
f7bd8cc711064e2bb362da5ed1c0b8bff97fed22 | 25,380 | use crate::types;
use actix_web::{get, web, HttpResponse};
use chrono::prelude::*;
use chrono_utilities::naive::DateTransitions;
use serde::{Deserialize, Serialize};
use std::collections::HashSet;
#[derive(Serialize, Debug)]
struct ResponseStructure {
month: String,
positive: i32,
recovered: i32,
deaths: i32,
active: i32,
}
#[derive(Serialize, Deserialize, Debug)]
struct Update {
harian: Vec<types::source_api::Harian>,
}
#[derive(Deserialize)]
pub struct YearPath {
year: u16,
}
#[derive(Deserialize)]
pub struct YearMonthPath {
year: u16,
month: u8,
}
#[derive(Deserialize)]
pub struct QueryParams {
since: Option<String>,
upto: Option<String>,
}
#[derive(Debug)]
struct ParsedQueryParam {
year: u16,
month: u8,
}
impl std::clone::Clone for ResponseStructure {
fn clone(&self) -> ResponseStructure {
ResponseStructure {
month: self.month.clone(),
active: self.active,
recovered: self.recovered,
deaths: self.deaths,
positive: self.positive,
}
}
}
#[get("")]
pub async fn index(params: web::Query<QueryParams>) -> HttpResponse {
let parsed_since_param = params
.since
.clone()
.unwrap_or_else(|| String::from("0.0"))
.split('.')
.map(|x| x.parse::<u16>().unwrap_or(0))
.collect::<Vec<u16>>();
let parsed_upto_param = params
.upto
.clone()
.unwrap_or_else(|| String::from("0.0"))
.split('.')
.map(|x| x.parse::<u16>().unwrap_or(0))
.collect::<Vec<u16>>();
if (params.since.is_some()
&& (parsed_since_param.contains(&0) || parsed_since_param.len() != 2))
|| (params.upto.is_some()
&& (parsed_upto_param.contains(&0) || parsed_upto_param.len() != 2))
{
return HttpResponse::BadRequest()
.status(reqwest::StatusCode::BAD_REQUEST)
.body("Invalid query parameter(s)");
}
let valid_years = HashSet::from(crate::constants::YEARS_LIST);
let valid_months = HashSet::from(crate::constants::MONTHS_LIST);
let parsed_since_param = ParsedQueryParam {
year: parsed_since_param[0],
month: parsed_since_param[1] as u8,
};
let parsed_upto_param = ParsedQueryParam {
year: parsed_upto_param[0],
month: parsed_upto_param[1] as u8,
};
let since_param_is_valid = valid_years.contains(&parsed_since_param.year)
&& valid_months.contains(&parsed_since_param.month);
let upto_param_is_valid = valid_years.contains(&parsed_upto_param.year)
&& valid_months.contains(&parsed_upto_param.month);
if (params.since.is_some() && !since_param_is_valid)
|| (params.upto.is_some() && !upto_param_is_valid)
{
return HttpResponse::BadRequest()
.status(reqwest::StatusCode::BAD_REQUEST)
.body("Invalid query parameter(s)");
}
type HandlerResponse = types::HandlerResponseTemplate<Vec<ResponseStructure>>;
#[derive(Serialize, Deserialize, Debug)]
struct APIResponse {
update: Update,
}
let resp = reqwest::get(crate::constants::COVID_API_ENDPOINT).await;
match resp {
Err(_) => HttpResponse::InternalServerError()
.status(reqwest::StatusCode::INTERNAL_SERVER_ERROR)
.body("Could not get the data, please retry in a few minutes"),
Ok(raw_response) => match raw_response.json::<APIResponse>().await {
Err(_) => HttpResponse::InternalServerError()
.status(reqwest::StatusCode::INTERNAL_SERVER_ERROR)
.body("There's something wrong with us, hang tight"),
Ok(json_response) => {
let new_harian: Vec<ResponseStructure> = json_response
.update
.harian
.into_iter()
.map(|daily_item| ResponseStructure {
month: DateTime::parse_from_rfc3339(&daily_item.key_as_string)
.unwrap()
.format("%Y-%m-%d")
.to_string(),
positive: daily_item.jumlah_positif.value as i32,
recovered: daily_item.jumlah_sembuh.value as i32,
deaths: daily_item.jumlah_meninggal.value as i32,
active: daily_item.jumlah_dirawat.value as i32,
})
.filter(|daily| {
let parsed_daily_month =
NaiveDate::parse_from_str(&daily.month, "%Y-%m-%d").unwrap();
if params.since.is_some() && params.upto.is_some() {
let since_date = NaiveDate::parse_from_str(
&format!(
"{}-{:0>2}-01",
parsed_since_param.year, parsed_since_param.month
),
"%Y-%m-%d",
)
.unwrap();
let upto_date = NaiveDate::parse_from_str(
&format!(
"{}-{:0>2}-{}",
parsed_upto_param.year,
parsed_upto_param.month,
NaiveDate::from_ymd(
parsed_upto_param.year as i32,
parsed_upto_param.month as u32,
1
)
.last_day_of_month()
),
"%Y-%m-%d",
)
.unwrap();
let since_compared = parsed_daily_month
.signed_duration_since(since_date)
.num_days();
let upto_compared = parsed_daily_month
.signed_duration_since(upto_date)
.num_days();
return since_compared >= 0 && upto_compared <= 0;
}
if params.since.is_some() {
let since_date = NaiveDate::parse_from_str(
&format!(
"{}-{:0>2}-01",
parsed_since_param.year, parsed_since_param.month
),
"%Y-%m-%d",
)
.unwrap();
let since_compared = parsed_daily_month
.signed_duration_since(since_date)
.num_days();
if since_compared < 0 {
return false;
}
return true;
}
if params.upto.is_some() {
let upto_date = NaiveDate::parse_from_str(
&format!(
"{}-{:0>2}-{}",
parsed_upto_param.year,
parsed_upto_param.month,
NaiveDate::from_ymd(
parsed_upto_param.year as i32,
parsed_upto_param.month as u32,
1
)
.last_day_of_month()
),
"%Y-%m-%d",
)
.unwrap();
let upto_compared = parsed_daily_month
.signed_duration_since(upto_date)
.num_days();
if upto_compared > 0 {
return false;
}
return true;
}
true
})
.collect();
let mut years_list: Vec<u16> = new_harian
.iter()
.map(|daily_item| {
NaiveDate::parse_from_str(&daily_item.month, "%Y-%m-%d")
.unwrap()
.year() as u16
})
.collect::<HashSet<u16>>()
.into_iter()
.collect();
years_list.sort_unstable();
let mut to_return: Vec<ResponseStructure> = Vec::new();
years_list.iter().for_each(|year| {
let cloned_harian: Vec<ResponseStructure> = new_harian.clone();
let current_year_harian: Vec<ResponseStructure> = cloned_harian
.into_iter()
.filter(|daily| {
NaiveDate::parse_from_str(&daily.month, "%Y-%m-%d")
.unwrap()
.year() as u16
== *year
})
.collect();
let mut months_list: Vec<u32> = current_year_harian
.iter()
.map(|daily_item| {
NaiveDate::parse_from_str(&daily_item.month, "%Y-%m-%d")
.unwrap()
.month()
})
.collect::<HashSet<_>>()
.into_iter()
.collect();
months_list.sort_unstable();
months_list.iter().for_each(|month| {
let folded = new_harian
.iter()
.filter(|daily| {
let parsed =
NaiveDate::parse_from_str(&daily.month, "%Y-%m-%d").unwrap();
parsed.year().to_string() == *year.to_string()
&& parsed.month().to_string() == *month.to_string()
})
.fold(
ResponseStructure {
month: format!("{}-{:0>2}", *year, month),
positive: 0,
recovered: 0,
deaths: 0,
active: 0,
},
|mut acc, next| {
acc.positive += next.positive;
acc.recovered += next.recovered;
acc.deaths += next.deaths;
acc.active += next.active;
acc
},
);
to_return.push(folded);
});
});
HttpResponse::Ok().status(reqwest::StatusCode::OK).body(
serde_json::to_string(&HandlerResponse {
ok: true,
data: to_return,
message: String::from("success"),
})
.unwrap(),
)
}
},
}
}
#[get("/{year}")]
pub async fn specific_year(
path: web::Path<YearPath>,
params: web::Query<QueryParams>,
) -> HttpResponse {
let selected_year = path.year;
let parsed_since_param = params
.since
.clone()
.unwrap_or_else(|| String::from("0.0"))
.split('.')
.map(|x| x.parse::<u16>().unwrap_or(0))
.collect::<Vec<u16>>();
let parsed_upto_param = params
.upto
.clone()
.unwrap_or_else(|| String::from("0.0"))
.split('.')
.map(|x| x.parse::<u16>().unwrap_or(0))
.collect::<Vec<u16>>();
if (params.since.is_some()
&& (parsed_since_param.contains(&0) || parsed_since_param.len() != 2))
|| (params.upto.is_some()
&& (parsed_upto_param.contains(&0) || parsed_upto_param.len() != 2))
{
return HttpResponse::BadRequest()
.status(reqwest::StatusCode::BAD_REQUEST)
.body("Invalid query parameter(s)");
}
let valid_years = HashSet::from(crate::constants::YEARS_LIST);
let valid_months = HashSet::from(crate::constants::MONTHS_LIST);
let parsed_since_param = ParsedQueryParam {
year: parsed_since_param[0],
month: parsed_since_param[1] as u8,
};
let parsed_upto_param = ParsedQueryParam {
year: parsed_upto_param[0],
month: parsed_upto_param[1] as u8,
};
let since_param_is_valid = parsed_since_param.year == selected_year
&& valid_years.contains(&parsed_since_param.year)
&& valid_months.contains(&parsed_since_param.month);
let upto_param_is_valid = parsed_upto_param.year == selected_year
&& valid_years.contains(&parsed_upto_param.year)
&& valid_months.contains(&parsed_upto_param.month);
if (params.since.is_some() && !since_param_is_valid)
|| (params.upto.is_some() && !upto_param_is_valid)
{
return HttpResponse::BadRequest()
.status(reqwest::StatusCode::BAD_REQUEST)
.body("Invalid query parameter(s)");
}
type HandlerResponse = types::HandlerResponseTemplate<Vec<ResponseStructure>>;
#[derive(Serialize, Deserialize, Debug)]
struct APIResponse {
update: Update,
}
let resp = reqwest::get(crate::constants::COVID_API_ENDPOINT).await;
match resp {
Err(_) => HttpResponse::InternalServerError()
.status(reqwest::StatusCode::INTERNAL_SERVER_ERROR)
.body("Could not get the data, please retry in a few minutes"),
Ok(raw_response) => match raw_response.json::<APIResponse>().await {
Err(_) => HttpResponse::InternalServerError()
.status(reqwest::StatusCode::INTERNAL_SERVER_ERROR)
.body("There's something wrong with us, hang tight"),
Ok(json_response) => {
let new_harian: Vec<ResponseStructure> = json_response
.update
.harian
.into_iter()
.filter(|daily_item| {
DateTime::parse_from_rfc3339(&daily_item.key_as_string)
.unwrap()
.year()
== selected_year as i32
})
.map(|daily_item| ResponseStructure {
month: DateTime::parse_from_rfc3339(&daily_item.key_as_string)
.unwrap()
.format("%Y-%m-%d")
.to_string(),
positive: daily_item.jumlah_positif.value as i32,
recovered: daily_item.jumlah_sembuh.value as i32,
deaths: daily_item.jumlah_meninggal.value as i32,
active: daily_item.jumlah_dirawat.value as i32,
})
.filter(|daily| {
let parsed_daily_month =
NaiveDate::parse_from_str(&daily.month, "%Y-%m-%d").unwrap();
if params.since.is_some() && params.upto.is_some() {
let since_date = NaiveDate::parse_from_str(
&format!(
"{}-{:0>2}-01",
parsed_since_param.year, parsed_since_param.month
),
"%Y-%m-%d",
)
.unwrap();
let upto_date = NaiveDate::parse_from_str(
&format!(
"{}-{:0>2}-{}",
parsed_upto_param.year,
parsed_upto_param.month,
NaiveDate::from_ymd(
parsed_upto_param.year as i32,
parsed_upto_param.month as u32,
1
)
.last_day_of_month()
),
"%Y-%m-%d",
)
.unwrap();
let since_compared = parsed_daily_month
.signed_duration_since(since_date)
.num_days();
let upto_compared = parsed_daily_month
.signed_duration_since(upto_date)
.num_days();
return since_compared >= 0 && upto_compared <= 0;
}
if params.since.is_some() {
let since_date = NaiveDate::parse_from_str(
&format!(
"{}-{:0>2}-01",
parsed_since_param.year, parsed_since_param.month
),
"%Y-%m-%d",
)
.unwrap();
let since_compared = parsed_daily_month
.signed_duration_since(since_date)
.num_days();
if since_compared < 0 {
return false;
}
return true;
}
if params.upto.is_some() {
let upto_date = NaiveDate::parse_from_str(
&format!(
"{}-{:0>2}-{}",
parsed_upto_param.year,
parsed_upto_param.month,
NaiveDate::from_ymd(
parsed_upto_param.year as i32,
parsed_upto_param.month as u32,
1
)
.last_day_of_month()
),
"%Y-%m-%d",
)
.unwrap();
let upto_compared = parsed_daily_month
.signed_duration_since(upto_date)
.num_days();
if upto_compared > 0 {
return false;
}
return true;
}
true
})
.collect();
let mut months_list: Vec<u32> = new_harian
.iter()
.map(|daily_item| {
NaiveDate::parse_from_str(&daily_item.month, "%Y-%m-%d")
.unwrap()
.month()
})
.collect::<HashSet<_>>()
.into_iter()
.collect();
months_list.sort_unstable();
let mut to_return: Vec<ResponseStructure> = Vec::new();
months_list.iter().for_each(|month| {
let folded = new_harian
.iter()
.filter(|daily| {
let parsed =
NaiveDate::parse_from_str(&daily.month, "%Y-%m-%d").unwrap();
parsed.month().to_string() == *month.to_string()
})
.fold(
ResponseStructure {
month: format!("{}-{:0>2}", selected_year, month),
positive: 0,
recovered: 0,
deaths: 0,
active: 0,
},
|mut acc, next| {
acc.positive += next.positive;
acc.recovered += next.recovered;
acc.deaths += next.deaths;
acc.active += next.active;
acc
},
);
to_return.push(folded);
});
HttpResponse::Ok().status(reqwest::StatusCode::OK).body(
serde_json::to_string(&HandlerResponse {
ok: true,
data: to_return,
message: String::from("success"),
})
.unwrap(),
)
}
},
}
}
#[get("/{year}/{month}")]
pub async fn specific_month(path: web::Path<YearMonthPath>) -> HttpResponse {
type HandlerResponse = types::HandlerResponseTemplate<ResponseStructure>;
#[derive(Serialize, Deserialize, Debug)]
struct APIResponse {
update: Update,
}
let selected_year = path.year;
let selected_month = path.month;
let resp = reqwest::get(crate::constants::COVID_API_ENDPOINT).await;
match resp {
Err(_) => HttpResponse::InternalServerError()
.status(reqwest::StatusCode::INTERNAL_SERVER_ERROR)
.body("Could not get the data, please retry in a few minutes"),
Ok(raw_response) => match raw_response.json::<APIResponse>().await {
Err(_) => HttpResponse::InternalServerError()
.status(reqwest::StatusCode::INTERNAL_SERVER_ERROR)
.body("There's something wrong with us, hang tight"),
Ok(json_response) => {
let new_harian: Vec<ResponseStructure> = json_response
.update
.harian
.into_iter()
.filter(|daily_item| {
let parsed_date =
DateTime::parse_from_rfc3339(&daily_item.key_as_string).unwrap();
parsed_date.year() == selected_year as i32
&& parsed_date.month() == selected_month as u32
})
.map(|daily_item| ResponseStructure {
month: DateTime::parse_from_rfc3339(&daily_item.key_as_string)
.unwrap()
.format("%Y-%m-%d")
.to_string(),
positive: daily_item.jumlah_positif.value as i32,
recovered: daily_item.jumlah_sembuh.value as i32,
deaths: daily_item.jumlah_meninggal.value as i32,
active: daily_item.jumlah_dirawat.value as i32,
})
.collect();
let to_return = new_harian
.iter()
.filter(|daily| {
let parsed = NaiveDate::parse_from_str(&daily.month, "%Y-%m-%d").unwrap();
parsed.month().to_string() == selected_month.to_string()
})
.fold(
ResponseStructure {
month: format!("{}-{:0>2}", selected_year, selected_month),
positive: 0,
recovered: 0,
deaths: 0,
active: 0,
},
|mut acc, next| {
acc.positive += next.positive;
acc.recovered += next.recovered;
acc.deaths += next.deaths;
acc.active += next.active;
acc
},
);
HttpResponse::Ok().status(reqwest::StatusCode::OK).body(
serde_json::to_string(&HandlerResponse {
ok: true,
data: to_return,
message: String::from("success"),
})
.unwrap(),
)
}
},
}
}
| 39.287926 | 98 | 0.411466 |
9c55119a449844c56e2dc72940f93562b40abcde | 574 | use {super::menu::Menu, screen_13::prelude_rc::*};
pub struct Load;
impl Screen<RcK> for Load {
fn render(&self, gpu: &Gpu, dims: Extent) -> Render {
let mut frame = gpu.render(dims);
frame.clear().record();
frame
}
fn update(self: Box<Self>, gpu: &Gpu, _: &Input) -> DynScreen {
let mut pak = Pak::open("wasm.pak")
.expect("ERROR: You must first pack the runtime content - See README.md");
let font_h1 = gpu.read_bitmap_font(&mut pak, "font/permanent-marker");
Box::new(Menu { font_h1 })
}
}
| 27.333333 | 86 | 0.585366 |
7a536ab4fb7fbdd68b7acca9025ff5e67ec0d83e | 3,265 | use std::ops::AddAssign;
use std::collections::BTreeMap;
use crate::hand_stats::HandStats;
use crate::shoe::CardShoe;
use crate::round_factory::RoundFactory;
use crate::running_stats::RunningStats;
pub struct Simulator<'a>
{
round_count: u64,
shoe: Box<dyn CardShoe>,
round_factory: &'a RoundFactory<'a>,
force_tc: Option<f32>,
adjust_rc: Option<i32>,
verbose: bool,
print_progress: bool,
}
#[derive(Debug, Default)]
pub struct SimulationResult {
pub winnings: RunningStats,
pub hand_stats: HandStats,
pub winning_distrib: BTreeMap<i32, u64>,
}
impl AddAssign for SimulationResult {
fn add_assign(&mut self, rhs: Self) {
self.winnings += rhs.winnings;
self.hand_stats += rhs.hand_stats;
for (&k, &v) in rhs.winning_distrib.iter() {
*self.winning_distrib.entry(k).or_insert(0) += v;
}
}
}
impl<'a> Simulator<'a>
{
pub fn new(round_count: u64,
shoe: Box<dyn CardShoe>,
round_factory: &'a RoundFactory<'a>,
force_tc: Option<f32>,
adjust_rc: Option<i32>,
verbose: bool,
print_progress: bool) -> Simulator<'a> {
Simulator {
round_count,
shoe,
round_factory,
force_tc,
adjust_rc,
verbose,
print_progress,
}
}
pub fn run(mut self) -> SimulationResult {
let mut winnings = RunningStats::default();
let mut hand_stats = HandStats::default();
let mut winning_distrib = BTreeMap::new();
for round_i in 0..self.round_count {
if let Some(force_tc) = self.force_tc {
self.shoe.force_true_count(force_tc);
}
let rc = self.shoe.running_count();
let tc = self.shoe.true_count();
let (_, result) = self.round_factory.make(&mut *self.shoe).run();
if let Some(rel_rc) = self.adjust_rc {
self.shoe.adjust_running_count(rel_rc);
}
let num_result = result.player_results[0];
winnings.push(num_result);
hand_stats += result.hand_stats;
let hash_key = (num_result * 2.0).round() as i32;
*winning_distrib.entry(hash_key).or_insert(0) += 1;
if self.print_progress {
Self::update_progress(round_i + 1, self.round_count);
}
if self.verbose {
eprintln!("rc = {:+}, tc = {:+.1}", rc, tc);
//eprintln!("{:?}", round);
eprintln!("res = {:+.1}\n", num_result);
}
}
SimulationResult {
winnings,
hand_stats,
winning_distrib,
}
}
fn update_progress(done: u64, total: u64) {
if total < 100 || done % (total / 100) == 0 {
let percent = (done as f64 / total as f64 * 100.0).round() as u32;
if percent == 100 {
eprintln!("100%");
} else if percent % 5 == 0 {
eprint!("{}%", percent);
} else {
eprint!(".");
}
} else if done == total {
eprintln!("100%");
}
}
}
| 28.146552 | 78 | 0.524043 |
1a6849137c5477ae205e98e12d9c1352ce89c383 | 48 | pub(crate) mod deep_merge;
pub(crate) mod json;
| 16 | 26 | 0.75 |
ed9c83a9e068ba56f6de0e2abfd907930bbf7787 | 991 | use sc_cli::RunCmd;
use structopt::StructOpt;
#[derive(Debug, StructOpt)]
pub struct Cli {
#[structopt(subcommand)]
pub subcommand: Option<Subcommand>,
#[structopt(flatten)]
pub run: RunCmd,
}
#[derive(Debug, StructOpt)]
pub enum Subcommand {
/// Key management cli utilities
Key(sc_cli::KeySubcommand),
/// Build a chain specification.
BuildSpec(sc_cli::BuildSpecCmd),
/// Validate blocks.
CheckBlock(sc_cli::CheckBlockCmd),
/// Export blocks.
ExportBlocks(sc_cli::ExportBlocksCmd),
/// Export the state of a given block into a chain spec.
ExportState(sc_cli::ExportStateCmd),
/// Import blocks.
ImportBlocks(sc_cli::ImportBlocksCmd),
/// Remove the whole chain.
PurgeChain(sc_cli::PurgeChainCmd),
/// Revert the chain to a previous state.
Revert(sc_cli::RevertCmd),
/// The custom benchmark subcommand benchmarking runtime pallets.
#[structopt(name = "benchmark", about = "Benchmark runtime pallets.")]
Benchmark(frame_benchmarking_cli::BenchmarkCmd),
}
| 23.046512 | 71 | 0.73663 |
ede30a0bed756d2c29e7d93be495a9904ccc8e94 | 1,154 | use super::BackendTypes;
use crate::mir::operand::OperandRef;
use rustc::ty::Ty;
use rustc_target::abi::call::FnType;
use syntax_pos::Span;
pub trait IntrinsicCallMethods<'tcx>: BackendTypes {
/// Remember to add all intrinsics here, in librustc_typeck/check/mod.rs,
/// and in libcore/intrinsics.rs; if you need access to any llvm intrinsics,
/// add them to librustc_codegen_llvm/context.rs
fn codegen_intrinsic_call(
&mut self,
callee_ty: Ty<'tcx>,
fn_ty: &FnType<'tcx, Ty<'tcx>>,
args: &[OperandRef<'tcx, Self::Value>],
llresult: Self::Value,
span: Span,
);
fn abort(&mut self);
fn assume(&mut self, val: Self::Value);
fn expect(&mut self, cond: Self::Value, expected: bool) -> Self::Value;
/// Trait method used to inject `va_start` on the "spoofed" `VaListImpl` in
/// Rust defined C-variadic functions.
fn va_start(&mut self, val: Self::Value) -> Self::Value;
/// Trait method used to inject `va_end` on the "spoofed" `VaListImpl` before
/// Rust defined C-variadic functions return.
fn va_end(&mut self, val: Self::Value) -> Self::Value;
}
| 38.466667 | 81 | 0.655979 |
67717f3e917d5d2b21a183612eb343591147a00e | 2,665 | //! Lazily initialized data.
//! Used in generated code.
use std::sync;
use std::sync::atomic::AtomicPtr;
use std::sync::atomic::Ordering;
/// Lazily initialized data.
pub struct LazyV2<T: Sync> {
lock: sync::Once,
ptr: AtomicPtr<T>,
}
unsafe impl<T: Sync> Sync for LazyV2<T> {}
impl<T: Sync> LazyV2<T> {
/// Uninitialized `Lazy` object.
pub const INIT: LazyV2<T> = LazyV2 {
lock: sync::Once::new(),
ptr: AtomicPtr::new(0 as *mut T),
};
/// Get lazy field value, initialize it with given function if not yet.
pub fn get<F>(&'static self, init: F) -> &'static T
where
F: FnOnce() -> T,
{
self.lock.call_once(|| {
self.ptr
.store(Box::into_raw(Box::new(init())), Ordering::Relaxed);
});
unsafe { &*self.ptr.load(Ordering::Relaxed) }
}
}
#[cfg(test)]
mod test {
use std::sync::atomic::AtomicIsize;
use std::sync::atomic::Ordering;
use std::sync::Arc;
use std::sync::Barrier;
use std::thread;
use super::LazyV2;
#[test]
fn many_threads_calling_get() {
const N_THREADS: usize = 32;
const N_ITERS_IN_THREAD: usize = 32;
const N_ITERS: usize = 16;
static mut LAZY: LazyV2<String> = LazyV2::INIT;
static CALL_COUNT: AtomicIsize = AtomicIsize::new(0);
let value = "Hello, world!".to_owned();
for _ in 0..N_ITERS {
// Reset mutable state.
unsafe {
LAZY = LazyV2::INIT;
}
CALL_COUNT.store(0, Ordering::SeqCst);
// Create a bunch of threads, all calling .get() at the same time.
let mut threads = vec![];
let barrier = Arc::new(Barrier::new(N_THREADS));
for _ in 0..N_THREADS {
let cloned_value_thread = value.clone();
let cloned_barrier = barrier.clone();
threads.push(thread::spawn(move || {
// Ensure all threads start at once to maximise contention.
cloned_barrier.wait();
for _ in 0..N_ITERS_IN_THREAD {
assert_eq!(&cloned_value_thread, unsafe {
LAZY.get(|| {
CALL_COUNT.fetch_add(1, Ordering::SeqCst);
cloned_value_thread.clone()
})
});
}
}));
}
for thread in threads {
thread.join().unwrap();
}
assert_eq!(CALL_COUNT.load(Ordering::SeqCst), 1);
}
}
}
| 28.655914 | 79 | 0.508443 |
fe961a238c738e7068230f89de1e0754bfc7527c | 8,979 | use std::sync::atomic::{AtomicBool, Ordering::Relaxed};
use std::sync::Arc;
static OPENED: AtomicBool = AtomicBool::new(false);
#[derive(Debug, PartialEq)]
#[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))]
enum Enum {
First,
Second,
Third,
}
/// Shows off one example of each major type of widget.
#[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))]
pub struct WidgetGallery {
enabled: Arc<AtomicBool>,
visible: bool,
boolean: bool,
radio: Enum,
scalar: f32,
string: String,
color: egui::Color32,
animate_progress_bar: bool,
}
impl Default for WidgetGallery {
fn default() -> Self {
Self {
enabled: Arc::new(AtomicBool::new(true)),
visible: true,
boolean: false,
radio: Enum::First,
scalar: 42.0,
string: Default::default(),
color: egui::Color32::LIGHT_BLUE.linear_multiply(0.5),
animate_progress_bar: true,
}
}
}
impl super::Demo for WidgetGallery {
fn name(&self) -> &'static str {
"🗄 Widget Gallery"
}
fn show(&mut self, ctx: &egui::CtxRef, open: &mut bool) {
egui::Window::new(self.name())
.open(open)
.resizable(true)
.default_width(300.0)
.show(ctx, |ui| {
use super::View as _;
self.ui(ui);
});
}
}
impl super::View for WidgetGallery {
fn ui(&mut self, ui: &mut egui::Ui) {
ui.add_enabled_ui(self.enabled.load(Relaxed), |ui| {
ui.set_visible(self.visible);
egui::Grid::new("my_grid")
.num_columns(2)
.spacing([40.0, 4.0])
.striped(true)
.show(ui, |ui| {
self.gallery_grid_contents(ui);
});
});
ui.separator();
ui.horizontal(|ui| {
ui.checkbox(&mut self.visible, "Visible")
.on_hover_text("Uncheck to hide all the widgets.");
if self.visible {
ui.checkbox(&mut self.enabled.load(Relaxed), "Interactive")
.on_hover_text("Uncheck to inspect how the widgets look when disabled.");
}
});
ui.separator();
ui.vertical_centered(|ui| {
let tooltip_text = "The full egui documentation.\nYou can also click the different widgets names in the left column.";
ui.hyperlink("https://docs.rs/egui/").on_hover_text(tooltip_text);
ui.add(crate::__egui_github_link_file!(
"Source code of the widget gallery"
));
});
}
}
impl WidgetGallery {
fn gallery_grid_contents(&mut self, ui: &mut egui::Ui) {
let Self {
enabled,
visible: _,
boolean,
radio,
scalar,
string,
color,
animate_progress_bar,
} = self;
ui.add(doc_link_label("Label", "label,heading"));
ui.label("Welcome to the widget gallery!");
ui.end_row();
ui.add(doc_link_label("Hyperlink", "Hyperlink"));
use egui::special_emojis::GITHUB;
ui.hyperlink_to(
format!("{} egui home page", GITHUB),
"https://github.com/emilk/egui",
);
ui.end_row();
ui.add(doc_link_label("TextEdit", "TextEdit,text_edit"));
ui.add(egui::TextEdit::singleline(string).hint_text("Write something here"));
ui.end_row();
ui.add(doc_link_label("Button", "button"));
if ui.button("Click me!").clicked() {
let atom_enabled = enabled.clone();
*boolean = !*boolean;
if !OPENED.load(Relaxed) {
ui.ctx().output().cursor_icon = egui::CursorIcon::Wait;
} else {
ui.ctx().output().cursor_icon = egui::CursorIcon::Default;
}
std::thread::spawn(move || {
OPENED.store(true, Relaxed);
atom_enabled.store(false, Relaxed);
use rfd::FileDialog;
let files = FileDialog::new()
.set_directory(r"/Users/tenx/Documents/GitHub/egui/egui_demo_lib/src")
.pick_files();
println!("{:#?}", files);
OPENED.store(false, Relaxed);
atom_enabled.store(true, Relaxed);
});
}
ui.end_row();
ui.add(doc_link_label("Checkbox", "checkbox"));
ui.checkbox(boolean, "Checkbox");
ui.end_row();
ui.add(doc_link_label("RadioButton", "radio"));
ui.horizontal(|ui| {
ui.radio_value(radio, Enum::First, "First");
ui.radio_value(radio, Enum::Second, "Second");
ui.radio_value(radio, Enum::Third, "Third");
});
ui.end_row();
ui.add(doc_link_label(
"SelectableLabel",
"selectable_value,SelectableLabel",
));
ui.horizontal(|ui| {
ui.selectable_value(radio, Enum::First, "First");
ui.selectable_value(radio, Enum::Second, "Second");
ui.selectable_value(radio, Enum::Third, "Third");
});
ui.end_row();
ui.add(doc_link_label("ComboBox", "ComboBox"));
egui::ComboBox::from_label("Take your pick")
.selected_text(format!("{:?}", radio))
.show_ui(ui, |ui| {
ui.selectable_value(radio, Enum::First, "First");
ui.selectable_value(radio, Enum::Second, "Second");
ui.selectable_value(radio, Enum::Third, "Third");
});
ui.end_row();
ui.add(doc_link_label("Slider", "Slider"));
ui.add(egui::Slider::new(scalar, 0.0..=360.0).suffix("°"));
ui.end_row();
ui.add(doc_link_label("DragValue", "DragValue"));
ui.add(egui::DragValue::new(scalar).speed(1.0));
ui.end_row();
ui.add(doc_link_label("ProgressBar", "ProgressBar"));
let progress = *scalar / 360.0;
let progress_bar = egui::ProgressBar::new(progress)
.show_percentage()
.animate(false);
*animate_progress_bar = ui
.add(progress_bar)
.on_hover_text("The progress bar can be animated!")
.hovered();
ui.end_row();
ui.add(doc_link_label("Color picker", "color_edit"));
ui.color_edit_button_srgba(color);
ui.end_row();
ui.add(doc_link_label("Image", "Image"));
ui.image(egui::TextureId::Egui, [24.0, 16.0])
.on_hover_text("The egui font texture was the convenient choice to show here.");
ui.end_row();
ui.add(doc_link_label("ImageButton", "ImageButton"));
if ui
.add(egui::ImageButton::new(egui::TextureId::Egui, [24.0, 16.0]))
.on_hover_text("The egui font texture was the convenient choice to show here.")
.clicked()
{
*boolean = !*boolean;
}
ui.end_row();
ui.add(doc_link_label("Separator", "separator"));
ui.separator();
ui.end_row();
ui.add(doc_link_label("CollapsingHeader", "collapsing"));
ui.collapsing("Click to see what is hidden!", |ui| {
ui.horizontal_wrapped(|ui| {
ui.label(
"Not much, as it turns out - but here is a gold star for you for checking:",
);
ui.colored_label(egui::Color32::GOLD, "☆");
});
});
ui.end_row();
ui.add(doc_link_label("Plot", "plot"));
ui.add(example_plot());
ui.end_row();
ui.hyperlink_to(
"Custom widget:",
super::toggle_switch::url_to_file_source_code(),
);
ui.add(super::toggle_switch::toggle(boolean)).on_hover_text(
"It's easy to create your own widgets!\n\
This toggle switch is just 15 lines of code.",
);
ui.end_row();
}
}
fn example_plot() -> egui::plot::Plot {
use egui::plot::{Line, Value, Values};
let n = 128;
let line = Line::new(Values::from_values_iter((0..=n).map(|i| {
use std::f64::consts::TAU;
let x = egui::remap(i as f64, 0.0..=n as f64, -TAU..=TAU);
Value::new(x, x.sin())
})));
egui::plot::Plot::new("example_plot")
.line(line)
.height(32.0)
.data_aspect(1.0)
}
fn doc_link_label<'a>(title: &'a str, search_term: &'a str) -> impl egui::Widget + 'a {
let label = format!("{}:", title);
let url = format!("https://docs.rs/egui?search={}", search_term);
move |ui: &mut egui::Ui| {
ui.hyperlink_to(label, url).on_hover_ui(|ui| {
ui.horizontal_wrapped(|ui| {
ui.label("Search egui docs for");
ui.code(search_term);
});
})
}
}
| 31.840426 | 130 | 0.530014 |
1807a5131006ccb8e625bcae9fffbd68be10053a | 1,887 | use crate::defs::{self, BitSize};
use core::fmt;
use std::ops::{Add, Sub};
#[derive(Copy, Clone)]
pub struct WordSize {
pub words: usize,
}
#[allow(dead_code)]
impl WordSize {
pub const fn one() -> Self {
Self { words: 1 }
}
pub const fn new(words: usize) -> Self {
Self { words }
}
pub const fn add(self, n: usize) -> Self {
Self {
words: self.words + n,
}
}
pub const fn bytes(self) -> usize {
self.words * defs::WORD_BYTES
}
}
impl Add for WordSize {
type Output = Self;
fn add(self, other: Self) -> Self {
Self {
words: self.words + other.words,
}
}
}
impl Sub for WordSize {
type Output = Self;
fn sub(self, other: Self) -> Self {
Self {
words: self.words - other.words,
}
}
}
impl fmt::Display for WordSize {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{} words", self.words)
}
}
#[derive(Debug, Copy, Clone)]
pub struct ByteSize {
pub bytes: usize,
}
#[allow(dead_code)]
impl ByteSize {
#[inline]
pub const fn new(bytes: usize) -> ByteSize {
Self { bytes }
}
pub fn add(&mut self, n: usize) {
self.bytes += n
}
// TODO: impl Add trait
pub fn add_bytesize(&mut self, other: ByteSize) {
self.bytes = self.bytes + other.bytes
}
#[inline]
pub const fn bytes(self) -> usize {
self.bytes
}
#[inline]
pub const fn get_words_rounded_down(self) -> WordSize {
WordSize::new(self.bytes / defs::WORD_BYTES)
}
#[inline]
pub const fn get_words_rounded_up(self) -> WordSize {
WordSize::new((self.bytes + defs::WORD_BYTES - 1) / defs::WORD_BYTES)
}
#[inline]
pub const fn get_bits(self) -> BitSize {
BitSize::with_unit_const(self.bytes, defs::WORD_BYTES)
}
}
impl fmt::Display for ByteSize {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{} bytes", self.bytes)
}
}
| 18.144231 | 73 | 0.599364 |
de3f8766c2cecd1f40e26ed5577544c40bfd8940 | 961 | // if1.rs
pub fn bigger(a: i32, b: i32) -> i32 {
// Complete this function to return the bigger number!
// Do not use:
// - return
// - another function call
// - additional variables
// Scroll down for hints.
if (a > b) {
a
} else {
b
}
}
// Don't mind this for now :)
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn ten_is_bigger_than_eight() {
assert_eq!(10, bigger(10, 8));
}
#[test]
fn fortytwo_is_bigger_than_thirtytwo() {
assert_eq!(42, bigger(32, 42));
}
}
// It's possible to do this in one line if you would like!
// Some similar examples from other languages:
// - In C(++) this would be: `a > b ? a : b`
// - In Python this would be: `a if a > b else b`
// Remember in Rust that:
// - the `if` condition does not need to be surrounded by parentheses
// - `if`/`else` conditionals are expressions
// - Each condition is followed by a `{}` block.
| 23.439024 | 69 | 0.582726 |
ab294800797d2e5aa6b3601b67453103d19abed3 | 14,732 | //! Explicit VR Little Endian syntax transfer implementation
use crate::decode::basic::LittleEndianBasicDecoder;
use crate::decode::{BasicDecode, Decode, DecodeFrom};
use crate::encode::basic::LittleEndianBasicEncoder;
use crate::encode::{BasicEncode, Encode};
use crate::error::Result;
use byteordered::byteorder::{ByteOrder, LittleEndian};
use byteordered::Endianness;
use dicom_core::header::{DataElementHeader, Header, Length, SequenceItemHeader};
use dicom_core::{PrimitiveValue, Tag, VR};
use std::io::{Read, Write};
/// A data element decoder for the Explicit VR Little Endian transfer syntax.
#[derive(Debug, Default, Clone)]
pub struct ExplicitVRLittleEndianDecoder {
basic: LittleEndianBasicDecoder,
}
impl Decode for ExplicitVRLittleEndianDecoder {
fn decode_header<S>(&self, mut source: &mut S) -> Result<(DataElementHeader, usize)>
where
S: ?Sized + Read,
{
// retrieve tag
let Tag(group, element) = self.basic.decode_tag(&mut source)?;
let mut buf = [0u8; 4];
if group == 0xFFFE {
// item delimiters do not have VR or reserved field
source.read_exact(&mut buf)?;
let len = LittleEndian::read_u32(&buf);
return Ok((
DataElementHeader::new((group, element), VR::UN, Length(len)),
8, // tag + len
));
}
// retrieve explicit VR
source.read_exact(&mut buf[0..2])?;
let vr = VR::from_binary([buf[0], buf[1]]).unwrap_or(VR::UN);
let bytes_read;
// retrieve data length
let len = match vr {
VR::OB
| VR::OD
| VR::OF
| VR::OL
| VR::OW
| VR::SQ
| VR::UC
| VR::UR
| VR::UT
| VR::UN => {
// read 2 reserved bytes, then 4 bytes for data length
source.read_exact(&mut buf[0..2])?;
source.read_exact(&mut buf)?;
bytes_read = 12;
LittleEndian::read_u32(&buf)
}
_ => {
// read 2 bytes for the data length
source.read_exact(&mut buf[0..2])?;
bytes_read = 8;
u32::from(LittleEndian::read_u16(&buf[0..2]))
}
};
Ok((
DataElementHeader::new((group, element), vr, Length(len)),
bytes_read,
))
}
fn decode_item_header<S>(&self, source: &mut S) -> Result<SequenceItemHeader>
where
S: ?Sized + Read,
{
let mut buf = [0u8; 8];
source.read_exact(&mut buf)?;
// retrieve tag
let group = LittleEndian::read_u16(&buf[0..2]);
let element = LittleEndian::read_u16(&buf[2..4]);
let len = LittleEndian::read_u32(&buf[4..8]);
let header = SequenceItemHeader::new((group, element), Length(len))?;
Ok(header)
}
fn decode_tag<S>(&self, source: &mut S) -> Result<Tag>
where
S: ?Sized + Read,
{
let mut buf = [0u8; 4];
source.read_exact(&mut buf)?;
Ok(Tag(
LittleEndian::read_u16(&buf[0..2]),
LittleEndian::read_u16(&buf[2..4]),
))
}
}
impl<S: ?Sized> DecodeFrom<S> for ExplicitVRLittleEndianDecoder
where
S: Read,
{
fn decode_header(&self, source: &mut S) -> Result<(DataElementHeader, usize)> {
Decode::decode_header(self, source)
}
fn decode_item_header(&self, source: &mut S) -> Result<SequenceItemHeader> {
Decode::decode_item_header(self, source)
}
fn decode_tag(&self, source: &mut S) -> Result<Tag> {
Decode::decode_tag(self, source)
}
}
/// A concrete encoder for the transfer syntax ExplicitVRLittleEndian
#[derive(Debug, Default, Clone)]
pub struct ExplicitVRLittleEndianEncoder {
basic: LittleEndianBasicEncoder,
}
impl BasicEncode for ExplicitVRLittleEndianEncoder {
fn endianness(&self) -> Endianness {
Endianness::Little
}
fn encode_us<S>(&self, to: S, value: u16) -> Result<()>
where
S: Write,
{
self.basic.encode_us(to, value)
}
fn encode_ul<S>(&self, to: S, value: u32) -> Result<()>
where
S: Write,
{
self.basic.encode_ul(to, value)
}
fn encode_uv<S>(&self, to: S, value: u64) -> Result<()>
where
S: Write,
{
self.basic.encode_uv(to, value)
}
fn encode_ss<S>(&self, to: S, value: i16) -> Result<()>
where
S: Write,
{
self.basic.encode_ss(to, value)
}
fn encode_sl<S>(&self, to: S, value: i32) -> Result<()>
where
S: Write,
{
self.basic.encode_sl(to, value)
}
fn encode_sv<S>(&self, to: S, value: i64) -> Result<()>
where
S: Write,
{
self.basic.encode_sv(to, value)
}
fn encode_fl<S>(&self, to: S, value: f32) -> Result<()>
where
S: Write,
{
self.basic.encode_fl(to, value)
}
fn encode_fd<S>(&self, to: S, value: f64) -> Result<()>
where
S: Write,
{
self.basic.encode_fd(to, value)
}
}
impl Encode for ExplicitVRLittleEndianEncoder {
fn encode_tag<W>(&self, mut to: W, tag: Tag) -> Result<()>
where
W: Write,
{
let mut buf = [0u8, 4];
LittleEndian::write_u16(&mut buf[..], tag.group());
LittleEndian::write_u16(&mut buf[2..], tag.element());
to.write_all(&buf)?;
Ok(())
}
fn encode_element_header<W>(&self, mut to: W, de: DataElementHeader) -> Result<usize>
where
W: Write,
{
match de.vr() {
VR::OB
| VR::OD
| VR::OF
| VR::OL
| VR::OW
| VR::SQ
| VR::UC
| VR::UR
| VR::UT
| VR::UN => {
let mut buf = [0u8; 12];
LittleEndian::write_u16(&mut buf[0..], de.tag().group());
LittleEndian::write_u16(&mut buf[2..], de.tag().element());
let vr_bytes = de.vr().to_bytes();
buf[4] = vr_bytes[0];
buf[5] = vr_bytes[1];
// buf[6..8] is kept zero'd
LittleEndian::write_u32(&mut buf[8..], de.len().0);
to.write_all(&buf)?;
Ok(12)
}
_ => {
let mut buf = [0u8; 8];
LittleEndian::write_u16(&mut buf[0..], de.tag().group());
LittleEndian::write_u16(&mut buf[2..], de.tag().element());
let vr_bytes = de.vr().to_bytes();
buf[4] = vr_bytes[0];
buf[5] = vr_bytes[1];
LittleEndian::write_u16(&mut buf[6..], de.len().0 as u16);
to.write_all(&buf)?;
Ok(8)
}
}
}
fn encode_item_header<W>(&self, mut to: W, len: u32) -> Result<()>
where
W: Write,
{
let mut buf = [0u8; 8];
LittleEndian::write_u16(&mut buf, 0xFFFE);
LittleEndian::write_u16(&mut buf[2..], 0xE000);
LittleEndian::write_u32(&mut buf[4..], len);
to.write_all(&buf)?;
Ok(())
}
fn encode_item_delimiter<W>(&self, mut to: W) -> Result<()>
where
W: Write,
{
let mut buf = [0u8; 8];
LittleEndian::write_u16(&mut buf, 0xFFFE);
LittleEndian::write_u16(&mut buf[2..], 0xE00D);
to.write_all(&buf)?;
Ok(())
}
fn encode_sequence_delimiter<W>(&self, mut to: W) -> Result<()>
where
W: Write,
{
let mut buf = [0u8; 8];
LittleEndian::write_u16(&mut buf, 0xFFFE);
LittleEndian::write_u16(&mut buf[2..], 0xE0DD);
to.write_all(&buf)?;
Ok(())
}
fn encode_primitive<W>(&self, to: W, value: &PrimitiveValue) -> Result<usize>
where
W: Write,
{
self.basic.encode_primitive(to, value)
}
}
#[cfg(test)]
mod tests {
use super::ExplicitVRLittleEndianDecoder;
use super::ExplicitVRLittleEndianEncoder;
use crate::decode::Decode;
use crate::encode::Encode;
use dicom_core::header::{DataElementHeader, Header, Length};
use dicom_core::{Tag, VR};
use std::io::{Cursor, Read, Seek, SeekFrom, Write};
// manually crafting some DICOM data elements
// Tag: (0002,0002) Media Storage SOP Class UID
// VR: UI
// Length: 26
// Value: "1.2.840.10008.5.1.4.1.1.1\0"
// --
// Tag: (0002,0010) Transfer Syntax UID
// VR: UI
// Length: 20
// Value: "1.2.840.10008.1.2.1\0" == ExplicitVRLittleEndian
// --
const RAW: &'static [u8; 62] = &[
0x02, 0x00, 0x02, 0x00, 0x55, 0x49, 0x1a, 0x00, 0x31, 0x2e, 0x32, 0x2e, 0x38, 0x34, 0x30,
0x2e, 0x31, 0x30, 0x30, 0x30, 0x38, 0x2e, 0x35, 0x2e, 0x31, 0x2e, 0x34, 0x2e, 0x31, 0x2e,
0x31, 0x2e, 0x31, 0x00, 0x02, 0x00, 0x10, 0x00, 0x55, 0x49, 0x14, 0x00, 0x31, 0x2e, 0x32,
0x2e, 0x38, 0x34, 0x30, 0x2e, 0x31, 0x30, 0x30, 0x30, 0x38, 0x2e, 0x31, 0x2e, 0x32, 0x2e,
0x31, 0x00,
];
#[test]
fn decode_data_elements() {
let dec = ExplicitVRLittleEndianDecoder::default();
let mut cursor = Cursor::new(RAW.as_ref());
{
// read first element
let (elem, bytes_read) = dec
.decode_header(&mut cursor)
.expect("should find an element");
assert_eq!(elem.tag(), Tag(2, 2));
assert_eq!(elem.vr(), VR::UI);
assert_eq!(elem.len(), Length(26));
assert_eq!(bytes_read, 8);
// read only half of the value data
let mut buffer: Vec<u8> = Vec::with_capacity(13);
buffer.resize(13, 0);
cursor
.read_exact(buffer.as_mut_slice())
.expect("should read it fine");
assert_eq!(buffer.as_slice(), b"1.2.840.10008".as_ref());
}
// cursor should now be @ #21 (there is no automatic skipping)
assert_eq!(cursor.seek(SeekFrom::Current(0)).unwrap(), 21);
// cursor should now be @ #34 after skipping
assert_eq!(cursor.seek(SeekFrom::Current(13)).unwrap(), 34);
{
// read second element
let (elem, _bytes_read) = dec
.decode_header(&mut cursor)
.expect("should find an element");
assert_eq!(elem.tag(), Tag(2, 16));
assert_eq!(elem.vr(), VR::UI);
assert_eq!(elem.len(), Length(20));
// read all data
let mut buffer: Vec<u8> = Vec::with_capacity(20);
buffer.resize(20, 0);
cursor
.read_exact(buffer.as_mut_slice())
.expect("should read it fine");
assert_eq!(buffer.as_slice(), b"1.2.840.10008.1.2.1\0".as_ref());
}
}
#[test]
fn encode_data_elements() {
let mut buf = [0u8; 62];
{
let enc = ExplicitVRLittleEndianEncoder::default();
let mut writer = Cursor::new(&mut buf[..]);
// encode first element
let de = DataElementHeader::new(Tag(0x0002, 0x0002), VR::UI, Length(26));
let len = enc
.encode_element_header(&mut writer, de)
.expect("should write it fine");
assert_eq!(len, 8);
writer
.write_all(b"1.2.840.10008.5.1.4.1.1.1\0".as_ref())
.expect("should write the value fine");
}
assert_eq!(&buf[0..8], &RAW[0..8]);
{
let enc = ExplicitVRLittleEndianEncoder::default();
let mut writer = Cursor::new(&mut buf[34..]);
// encode second element
let de = DataElementHeader::new(Tag(0x0002, 0x0010), VR::UI, Length(20));
let len = enc
.encode_element_header(&mut writer, de)
.expect("should write it fine");
assert_eq!(len, 8);
writer
.write_all(b"1.2.840.10008.1.2.1\0".as_ref())
.expect("should write the value fine");
}
assert_eq!(&buf[34..42], &RAW[34..42]);
assert_eq!(&buf[..], &RAW[..]);
}
// manually crafting some DICOM sequence/item delimiters
// Tag: (0008,103F) Series Description Code Sequence
// VR: SQ
// Reserved bytes: 0x0000
// Length: 0xFFFF_FFFF
// --
// Tag: (FFFE,E000) Item
// Length: 0xFFFF_FFFF (unspecified)
// --
// Tag: (FFFE,E00D) Item Delimitation Item
// Length: 0
// --
// Tag: (FFFE,E0DD) Sequence Delimitation Item
// Length: 0
// --
const RAW_SEQUENCE_ITEMS: &'static [u8] = &[
0x08, 0x00, 0x3F, 0x10, b'S', b'Q', 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0x00,
0xE0, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0x0D, 0xE0, 0x00, 0x00, 0x00, 0x00, 0xFE, 0xFF,
0xDD, 0xE0, 0x00, 0x00, 0x00, 0x00,
];
#[test]
fn decode_items() {
let dec = ExplicitVRLittleEndianDecoder::default();
let mut cursor = Cursor::new(RAW_SEQUENCE_ITEMS);
{
// read first element
let (elem, bytes_read) = dec
.decode_header(&mut cursor)
.expect("should find an element header");
assert_eq!(elem.tag(), Tag(8, 0x103F));
assert_eq!(elem.vr(), VR::SQ);
assert!(elem.len().is_undefined());
assert_eq!(bytes_read, 12);
}
// cursor should now be @ #12
assert_eq!(cursor.seek(SeekFrom::Current(0)).unwrap(), 12);
{
let elem = dec
.decode_item_header(&mut cursor)
.expect("should find an item header");
assert!(elem.is_item());
assert_eq!(elem.tag(), Tag(0xFFFE, 0xE000));
assert!(elem.len().is_undefined());
}
// cursor should now be @ #20
assert_eq!(cursor.seek(SeekFrom::Current(0)).unwrap(), 20);
{
let elem = dec
.decode_item_header(&mut cursor)
.expect("should find an item header");
assert!(elem.is_item_delimiter());
assert_eq!(elem.tag(), Tag(0xFFFE, 0xE00D));
assert_eq!(elem.len(), Length(0));
}
// cursor should now be @ #28
assert_eq!(cursor.seek(SeekFrom::Current(0)).unwrap(), 28);
{
let elem = dec
.decode_item_header(&mut cursor)
.expect("should find an item header");
assert!(elem.is_sequence_delimiter());
assert_eq!(elem.tag(), Tag(0xFFFE, 0xE0DD));
assert_eq!(elem.len(), Length(0));
}
}
}
| 32.095861 | 97 | 0.525115 |
e82968d4c504539b394933ec90e47fe0b7b76844 | 20,752 | #[doc = "Reader of register EMR"]
pub type R = crate::R<u32, super::EMR>;
#[doc = "Writer for register EMR"]
pub type W = crate::W<u32, super::EMR>;
#[doc = "Register EMR `reset()`'s with value 0"]
impl crate::ResetValue for super::EMR {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0
}
}
#[doc = "Reader of field `EM0`"]
pub type EM0_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `EM0`"]
pub struct EM0_W<'a> {
w: &'a mut W,
}
impl<'a> EM0_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !0x01) | ((value as u32) & 0x01);
self.w
}
}
#[doc = "Reader of field `EM1`"]
pub type EM1_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `EM1`"]
pub struct EM1_W<'a> {
w: &'a mut W,
}
impl<'a> EM1_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 1)) | (((value as u32) & 0x01) << 1);
self.w
}
}
#[doc = "Reader of field `EM2`"]
pub type EM2_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `EM2`"]
pub struct EM2_W<'a> {
w: &'a mut W,
}
impl<'a> EM2_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 2)) | (((value as u32) & 0x01) << 2);
self.w
}
}
#[doc = "Reader of field `EM3`"]
pub type EM3_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `EM3`"]
pub struct EM3_W<'a> {
w: &'a mut W,
}
impl<'a> EM3_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 3)) | (((value as u32) & 0x01) << 3);
self.w
}
}
#[doc = "External Match Control 0. Determines the functionality of External Match 0.\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
#[repr(u8)]
pub enum EMC0_A {
#[doc = "0: Do Nothing."]
DO_NOTHING_ = 0,
#[doc = "1: Clear the corresponding External Match bit/output to 0 (MATn.m pin is LOW if pinned out)."]
CLEAR_THE_CORRESPOND = 1,
#[doc = "2: Set the corresponding External Match bit/output to 1 (MATn.m pin is HIGH if pinned out)."]
SET_THE_CORRESPONDIN = 2,
#[doc = "3: Toggle the corresponding External Match bit/output."]
TOGGLE_THE_CORRESPON = 3,
}
impl From<EMC0_A> for u8 {
#[inline(always)]
fn from(variant: EMC0_A) -> Self {
variant as _
}
}
#[doc = "Reader of field `EMC0`"]
pub type EMC0_R = crate::R<u8, EMC0_A>;
impl EMC0_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> EMC0_A {
match self.bits {
0 => EMC0_A::DO_NOTHING_,
1 => EMC0_A::CLEAR_THE_CORRESPOND,
2 => EMC0_A::SET_THE_CORRESPONDIN,
3 => EMC0_A::TOGGLE_THE_CORRESPON,
_ => unreachable!(),
}
}
#[doc = "Checks if the value of the field is `DO_NOTHING_`"]
#[inline(always)]
pub fn is_do_nothing_(&self) -> bool {
*self == EMC0_A::DO_NOTHING_
}
#[doc = "Checks if the value of the field is `CLEAR_THE_CORRESPOND`"]
#[inline(always)]
pub fn is_clear_the_correspond(&self) -> bool {
*self == EMC0_A::CLEAR_THE_CORRESPOND
}
#[doc = "Checks if the value of the field is `SET_THE_CORRESPONDIN`"]
#[inline(always)]
pub fn is_set_the_correspondin(&self) -> bool {
*self == EMC0_A::SET_THE_CORRESPONDIN
}
#[doc = "Checks if the value of the field is `TOGGLE_THE_CORRESPON`"]
#[inline(always)]
pub fn is_toggle_the_correspon(&self) -> bool {
*self == EMC0_A::TOGGLE_THE_CORRESPON
}
}
#[doc = "Write proxy for field `EMC0`"]
pub struct EMC0_W<'a> {
w: &'a mut W,
}
impl<'a> EMC0_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: EMC0_A) -> &'a mut W {
{
self.bits(variant.into())
}
}
#[doc = "Do Nothing."]
#[inline(always)]
pub fn do_nothing_(self) -> &'a mut W {
self.variant(EMC0_A::DO_NOTHING_)
}
#[doc = "Clear the corresponding External Match bit/output to 0 (MATn.m pin is LOW if pinned out)."]
#[inline(always)]
pub fn clear_the_correspond(self) -> &'a mut W {
self.variant(EMC0_A::CLEAR_THE_CORRESPOND)
}
#[doc = "Set the corresponding External Match bit/output to 1 (MATn.m pin is HIGH if pinned out)."]
#[inline(always)]
pub fn set_the_correspondin(self) -> &'a mut W {
self.variant(EMC0_A::SET_THE_CORRESPONDIN)
}
#[doc = "Toggle the corresponding External Match bit/output."]
#[inline(always)]
pub fn toggle_the_correspon(self) -> &'a mut W {
self.variant(EMC0_A::TOGGLE_THE_CORRESPON)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x03 << 4)) | (((value as u32) & 0x03) << 4);
self.w
}
}
#[doc = "External Match Control 1. Determines the functionality of External Match 1.\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
#[repr(u8)]
pub enum EMC1_A {
#[doc = "0: Do Nothing."]
DO_NOTHING_ = 0,
#[doc = "1: Clear the corresponding External Match bit/output to 0 (MATn.m pin is LOW if pinned out)."]
CLEAR_THE_CORRESPOND = 1,
#[doc = "2: Set the corresponding External Match bit/output to 1 (MATn.m pin is HIGH if pinned out)."]
SET_THE_CORRESPONDIN = 2,
#[doc = "3: Toggle the corresponding External Match bit/output."]
TOGGLE_THE_CORRESPON = 3,
}
impl From<EMC1_A> for u8 {
#[inline(always)]
fn from(variant: EMC1_A) -> Self {
variant as _
}
}
#[doc = "Reader of field `EMC1`"]
pub type EMC1_R = crate::R<u8, EMC1_A>;
impl EMC1_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> EMC1_A {
match self.bits {
0 => EMC1_A::DO_NOTHING_,
1 => EMC1_A::CLEAR_THE_CORRESPOND,
2 => EMC1_A::SET_THE_CORRESPONDIN,
3 => EMC1_A::TOGGLE_THE_CORRESPON,
_ => unreachable!(),
}
}
#[doc = "Checks if the value of the field is `DO_NOTHING_`"]
#[inline(always)]
pub fn is_do_nothing_(&self) -> bool {
*self == EMC1_A::DO_NOTHING_
}
#[doc = "Checks if the value of the field is `CLEAR_THE_CORRESPOND`"]
#[inline(always)]
pub fn is_clear_the_correspond(&self) -> bool {
*self == EMC1_A::CLEAR_THE_CORRESPOND
}
#[doc = "Checks if the value of the field is `SET_THE_CORRESPONDIN`"]
#[inline(always)]
pub fn is_set_the_correspondin(&self) -> bool {
*self == EMC1_A::SET_THE_CORRESPONDIN
}
#[doc = "Checks if the value of the field is `TOGGLE_THE_CORRESPON`"]
#[inline(always)]
pub fn is_toggle_the_correspon(&self) -> bool {
*self == EMC1_A::TOGGLE_THE_CORRESPON
}
}
#[doc = "Write proxy for field `EMC1`"]
pub struct EMC1_W<'a> {
w: &'a mut W,
}
impl<'a> EMC1_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: EMC1_A) -> &'a mut W {
{
self.bits(variant.into())
}
}
#[doc = "Do Nothing."]
#[inline(always)]
pub fn do_nothing_(self) -> &'a mut W {
self.variant(EMC1_A::DO_NOTHING_)
}
#[doc = "Clear the corresponding External Match bit/output to 0 (MATn.m pin is LOW if pinned out)."]
#[inline(always)]
pub fn clear_the_correspond(self) -> &'a mut W {
self.variant(EMC1_A::CLEAR_THE_CORRESPOND)
}
#[doc = "Set the corresponding External Match bit/output to 1 (MATn.m pin is HIGH if pinned out)."]
#[inline(always)]
pub fn set_the_correspondin(self) -> &'a mut W {
self.variant(EMC1_A::SET_THE_CORRESPONDIN)
}
#[doc = "Toggle the corresponding External Match bit/output."]
#[inline(always)]
pub fn toggle_the_correspon(self) -> &'a mut W {
self.variant(EMC1_A::TOGGLE_THE_CORRESPON)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x03 << 6)) | (((value as u32) & 0x03) << 6);
self.w
}
}
#[doc = "External Match Control 2. Determines the functionality of External Match 2.\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
#[repr(u8)]
pub enum EMC2_A {
#[doc = "0: Do Nothing."]
DO_NOTHING_ = 0,
#[doc = "1: Clear the corresponding External Match bit/output to 0 (MATn.m pin is LOW if pinned out)."]
CLEAR_THE_CORRESPOND = 1,
#[doc = "2: Set the corresponding External Match bit/output to 1 (MATn.m pin is HIGH if pinned out)."]
SET_THE_CORRESPONDIN = 2,
#[doc = "3: Toggle the corresponding External Match bit/output."]
TOGGLE_THE_CORRESPON = 3,
}
impl From<EMC2_A> for u8 {
#[inline(always)]
fn from(variant: EMC2_A) -> Self {
variant as _
}
}
#[doc = "Reader of field `EMC2`"]
pub type EMC2_R = crate::R<u8, EMC2_A>;
impl EMC2_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> EMC2_A {
match self.bits {
0 => EMC2_A::DO_NOTHING_,
1 => EMC2_A::CLEAR_THE_CORRESPOND,
2 => EMC2_A::SET_THE_CORRESPONDIN,
3 => EMC2_A::TOGGLE_THE_CORRESPON,
_ => unreachable!(),
}
}
#[doc = "Checks if the value of the field is `DO_NOTHING_`"]
#[inline(always)]
pub fn is_do_nothing_(&self) -> bool {
*self == EMC2_A::DO_NOTHING_
}
#[doc = "Checks if the value of the field is `CLEAR_THE_CORRESPOND`"]
#[inline(always)]
pub fn is_clear_the_correspond(&self) -> bool {
*self == EMC2_A::CLEAR_THE_CORRESPOND
}
#[doc = "Checks if the value of the field is `SET_THE_CORRESPONDIN`"]
#[inline(always)]
pub fn is_set_the_correspondin(&self) -> bool {
*self == EMC2_A::SET_THE_CORRESPONDIN
}
#[doc = "Checks if the value of the field is `TOGGLE_THE_CORRESPON`"]
#[inline(always)]
pub fn is_toggle_the_correspon(&self) -> bool {
*self == EMC2_A::TOGGLE_THE_CORRESPON
}
}
#[doc = "Write proxy for field `EMC2`"]
pub struct EMC2_W<'a> {
w: &'a mut W,
}
impl<'a> EMC2_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: EMC2_A) -> &'a mut W {
{
self.bits(variant.into())
}
}
#[doc = "Do Nothing."]
#[inline(always)]
pub fn do_nothing_(self) -> &'a mut W {
self.variant(EMC2_A::DO_NOTHING_)
}
#[doc = "Clear the corresponding External Match bit/output to 0 (MATn.m pin is LOW if pinned out)."]
#[inline(always)]
pub fn clear_the_correspond(self) -> &'a mut W {
self.variant(EMC2_A::CLEAR_THE_CORRESPOND)
}
#[doc = "Set the corresponding External Match bit/output to 1 (MATn.m pin is HIGH if pinned out)."]
#[inline(always)]
pub fn set_the_correspondin(self) -> &'a mut W {
self.variant(EMC2_A::SET_THE_CORRESPONDIN)
}
#[doc = "Toggle the corresponding External Match bit/output."]
#[inline(always)]
pub fn toggle_the_correspon(self) -> &'a mut W {
self.variant(EMC2_A::TOGGLE_THE_CORRESPON)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x03 << 8)) | (((value as u32) & 0x03) << 8);
self.w
}
}
#[doc = "External Match Control 3. Determines the functionality of External Match 3.\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
#[repr(u8)]
pub enum EMC3_A {
#[doc = "0: Do Nothing."]
DO_NOTHING_ = 0,
#[doc = "1: Clear the corresponding External Match bit/output to 0 (MATn.m pin is LOW if pinned out)."]
CLEAR_THE_CORRESPOND = 1,
#[doc = "2: Set the corresponding External Match bit/output to 1 (MATn.m pin is HIGH if pinned out)."]
SET_THE_CORRESPONDIN = 2,
#[doc = "3: Toggle the corresponding External Match bit/output."]
TOGGLE_THE_CORRESPON = 3,
}
impl From<EMC3_A> for u8 {
#[inline(always)]
fn from(variant: EMC3_A) -> Self {
variant as _
}
}
#[doc = "Reader of field `EMC3`"]
pub type EMC3_R = crate::R<u8, EMC3_A>;
impl EMC3_R {
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> EMC3_A {
match self.bits {
0 => EMC3_A::DO_NOTHING_,
1 => EMC3_A::CLEAR_THE_CORRESPOND,
2 => EMC3_A::SET_THE_CORRESPONDIN,
3 => EMC3_A::TOGGLE_THE_CORRESPON,
_ => unreachable!(),
}
}
#[doc = "Checks if the value of the field is `DO_NOTHING_`"]
#[inline(always)]
pub fn is_do_nothing_(&self) -> bool {
*self == EMC3_A::DO_NOTHING_
}
#[doc = "Checks if the value of the field is `CLEAR_THE_CORRESPOND`"]
#[inline(always)]
pub fn is_clear_the_correspond(&self) -> bool {
*self == EMC3_A::CLEAR_THE_CORRESPOND
}
#[doc = "Checks if the value of the field is `SET_THE_CORRESPONDIN`"]
#[inline(always)]
pub fn is_set_the_correspondin(&self) -> bool {
*self == EMC3_A::SET_THE_CORRESPONDIN
}
#[doc = "Checks if the value of the field is `TOGGLE_THE_CORRESPON`"]
#[inline(always)]
pub fn is_toggle_the_correspon(&self) -> bool {
*self == EMC3_A::TOGGLE_THE_CORRESPON
}
}
#[doc = "Write proxy for field `EMC3`"]
pub struct EMC3_W<'a> {
w: &'a mut W,
}
impl<'a> EMC3_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: EMC3_A) -> &'a mut W {
{
self.bits(variant.into())
}
}
#[doc = "Do Nothing."]
#[inline(always)]
pub fn do_nothing_(self) -> &'a mut W {
self.variant(EMC3_A::DO_NOTHING_)
}
#[doc = "Clear the corresponding External Match bit/output to 0 (MATn.m pin is LOW if pinned out)."]
#[inline(always)]
pub fn clear_the_correspond(self) -> &'a mut W {
self.variant(EMC3_A::CLEAR_THE_CORRESPOND)
}
#[doc = "Set the corresponding External Match bit/output to 1 (MATn.m pin is HIGH if pinned out)."]
#[inline(always)]
pub fn set_the_correspondin(self) -> &'a mut W {
self.variant(EMC3_A::SET_THE_CORRESPONDIN)
}
#[doc = "Toggle the corresponding External Match bit/output."]
#[inline(always)]
pub fn toggle_the_correspon(self) -> &'a mut W {
self.variant(EMC3_A::TOGGLE_THE_CORRESPON)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x03 << 10)) | (((value as u32) & 0x03) << 10);
self.w
}
}
impl R {
#[doc = "Bit 0 - External Match 0. When a match occurs between the TC and MR0, this bit can either toggle, go low, go high, or do nothing, depending on bits 5:4 of this register. This bit can be driven onto a MATn.0 pin, in a positive-logic manner (0 = low, 1 = high)."]
#[inline(always)]
pub fn em0(&self) -> EM0_R {
EM0_R::new((self.bits & 0x01) != 0)
}
#[doc = "Bit 1 - External Match 1. When a match occurs between the TC and MR1, this bit can either toggle, go low, go high, or do nothing, depending on bits 7:6 of this register. This bit can be driven onto a MATn.1 pin, in a positive-logic manner (0 = low, 1 = high)."]
#[inline(always)]
pub fn em1(&self) -> EM1_R {
EM1_R::new(((self.bits >> 1) & 0x01) != 0)
}
#[doc = "Bit 2 - External Match 2. When a match occurs between the TC and MR2, this bit can either toggle, go low, go high, or do nothing, depending on bits 9:8 of this register. This bit can be driven onto a MATn.0 pin, in a positive-logic manner (0 = low, 1 = high)."]
#[inline(always)]
pub fn em2(&self) -> EM2_R {
EM2_R::new(((self.bits >> 2) & 0x01) != 0)
}
#[doc = "Bit 3 - External Match 3. When a match occurs between the TC and MR3, this bit can either toggle, go low, go high, or do nothing, depending on bits 11:10 of this register. This bit can be driven onto a MATn.0 pin, in a positive-logic manner (0 = low, 1 = high)."]
#[inline(always)]
pub fn em3(&self) -> EM3_R {
EM3_R::new(((self.bits >> 3) & 0x01) != 0)
}
#[doc = "Bits 4:5 - External Match Control 0. Determines the functionality of External Match 0."]
#[inline(always)]
pub fn emc0(&self) -> EMC0_R {
EMC0_R::new(((self.bits >> 4) & 0x03) as u8)
}
#[doc = "Bits 6:7 - External Match Control 1. Determines the functionality of External Match 1."]
#[inline(always)]
pub fn emc1(&self) -> EMC1_R {
EMC1_R::new(((self.bits >> 6) & 0x03) as u8)
}
#[doc = "Bits 8:9 - External Match Control 2. Determines the functionality of External Match 2."]
#[inline(always)]
pub fn emc2(&self) -> EMC2_R {
EMC2_R::new(((self.bits >> 8) & 0x03) as u8)
}
#[doc = "Bits 10:11 - External Match Control 3. Determines the functionality of External Match 3."]
#[inline(always)]
pub fn emc3(&self) -> EMC3_R {
EMC3_R::new(((self.bits >> 10) & 0x03) as u8)
}
}
impl W {
#[doc = "Bit 0 - External Match 0. When a match occurs between the TC and MR0, this bit can either toggle, go low, go high, or do nothing, depending on bits 5:4 of this register. This bit can be driven onto a MATn.0 pin, in a positive-logic manner (0 = low, 1 = high)."]
#[inline(always)]
pub fn em0(&mut self) -> EM0_W {
EM0_W { w: self }
}
#[doc = "Bit 1 - External Match 1. When a match occurs between the TC and MR1, this bit can either toggle, go low, go high, or do nothing, depending on bits 7:6 of this register. This bit can be driven onto a MATn.1 pin, in a positive-logic manner (0 = low, 1 = high)."]
#[inline(always)]
pub fn em1(&mut self) -> EM1_W {
EM1_W { w: self }
}
#[doc = "Bit 2 - External Match 2. When a match occurs between the TC and MR2, this bit can either toggle, go low, go high, or do nothing, depending on bits 9:8 of this register. This bit can be driven onto a MATn.0 pin, in a positive-logic manner (0 = low, 1 = high)."]
#[inline(always)]
pub fn em2(&mut self) -> EM2_W {
EM2_W { w: self }
}
#[doc = "Bit 3 - External Match 3. When a match occurs between the TC and MR3, this bit can either toggle, go low, go high, or do nothing, depending on bits 11:10 of this register. This bit can be driven onto a MATn.0 pin, in a positive-logic manner (0 = low, 1 = high)."]
#[inline(always)]
pub fn em3(&mut self) -> EM3_W {
EM3_W { w: self }
}
#[doc = "Bits 4:5 - External Match Control 0. Determines the functionality of External Match 0."]
#[inline(always)]
pub fn emc0(&mut self) -> EMC0_W {
EMC0_W { w: self }
}
#[doc = "Bits 6:7 - External Match Control 1. Determines the functionality of External Match 1."]
#[inline(always)]
pub fn emc1(&mut self) -> EMC1_W {
EMC1_W { w: self }
}
#[doc = "Bits 8:9 - External Match Control 2. Determines the functionality of External Match 2."]
#[inline(always)]
pub fn emc2(&mut self) -> EMC2_W {
EMC2_W { w: self }
}
#[doc = "Bits 10:11 - External Match Control 3. Determines the functionality of External Match 3."]
#[inline(always)]
pub fn emc3(&mut self) -> EMC3_W {
EMC3_W { w: self }
}
}
| 36.729204 | 276 | 0.598207 |
7a701ddebf8f1d040d6162744c17804ae9451b36 | 12,181 | /*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
use common::{Location, Named, WithLocation};
use graphql_syntax::{FloatValue, OperationKind};
use intern::string_key::StringKey;
use schema::{FieldID, Type, TypeReference};
use schema::{SDLSchema, Schema};
use std::fmt;
use std::hash::Hash;
use std::sync::Arc;
use crate::AssociatedData;
// Definitions
#[derive(Clone, Debug, Eq, PartialEq)]
pub enum ExecutableDefinition {
Operation(OperationDefinition),
Fragment(FragmentDefinition),
}
impl ExecutableDefinition {
pub fn has_directive(&self, directive_name: StringKey) -> bool {
match self {
ExecutableDefinition::Operation(node) => node
.directives
.iter()
.any(|d| d.name.item == directive_name),
ExecutableDefinition::Fragment(node) => node
.directives
.iter()
.any(|d| d.name.item == directive_name),
}
}
pub fn name_with_location(&self) -> WithLocation<StringKey> {
match self {
ExecutableDefinition::Operation(node) => node.name,
ExecutableDefinition::Fragment(node) => node.name,
}
}
}
/// A fully-typed mutation, query, or subscription definition
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct OperationDefinition {
pub kind: OperationKind,
pub name: WithLocation<StringKey>,
pub type_: Type,
pub variable_definitions: Vec<VariableDefinition>,
pub directives: Vec<Directive>,
pub selections: Vec<Selection>,
}
impl OperationDefinition {
pub fn is_query(&self) -> bool {
self.kind == OperationKind::Query
}
pub fn is_mutation(&self) -> bool {
self.kind == OperationKind::Mutation
}
pub fn is_subscription(&self) -> bool {
self.kind == OperationKind::Subscription
}
}
/// A fully-typed fragment definition
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct FragmentDefinition {
pub name: WithLocation<StringKey>,
pub variable_definitions: Vec<VariableDefinition>,
pub used_global_variables: Vec<VariableDefinition>,
pub type_condition: Type,
pub directives: Vec<Directive>,
pub selections: Vec<Selection>,
}
/// A variable definition of an operation or fragment
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct VariableDefinition {
pub name: WithLocation<StringKey>,
pub type_: TypeReference,
pub default_value: Option<WithLocation<ConstantValue>>,
pub directives: Vec<Directive>,
}
impl VariableDefinition {
pub fn has_non_null_default_value(&self) -> bool {
match &self.default_value {
Some(value) => value.item.is_non_null(),
_ => false,
}
}
}
impl Named for VariableDefinition {
fn name(&self) -> StringKey {
self.name.item
}
}
// Selections
/// A selection within an operation or fragment
#[derive(Clone, Eq, PartialEq)]
pub enum Selection {
FragmentSpread(Arc<FragmentSpread>),
InlineFragment(Arc<InlineFragment>),
LinkedField(Arc<LinkedField>),
ScalarField(Arc<ScalarField>),
Condition(Arc<Condition>),
}
impl Selection {
/// Get selection directives
/// This method will panic if called on the Selection::Condition
pub fn directives(&self) -> &[Directive] {
match self {
Selection::FragmentSpread(node) => &node.directives,
Selection::InlineFragment(node) => &node.directives,
Selection::ScalarField(node) => &node.directives,
Selection::LinkedField(node) => &node.directives,
Selection::Condition(_) => unreachable!("Unexpected `Condition` selection."),
}
}
/// Update Selection directives
/// This method will panic if called on the Selection::Condition
pub fn set_directives(&mut self, directives: Vec<Directive>) {
match self {
Selection::FragmentSpread(node) => {
Arc::make_mut(node).directives = directives;
}
Selection::InlineFragment(node) => {
Arc::make_mut(node).directives = directives;
}
Selection::ScalarField(node) => {
Arc::make_mut(node).directives = directives;
}
Selection::LinkedField(node) => {
Arc::make_mut(node).directives = directives;
}
Selection::Condition(_) => unreachable!("Unexpected `Condition` selection."),
};
}
/// A quick method to get the location of the selection. This may
/// be helpful for error reporting. Please note, this implementation
/// prefers the location of the alias for scalar and linked field selections.
/// It also returns `None` for conditional nodes and inline fragments.
pub fn location(&self) -> Option<Location> {
match self {
Selection::Condition(_) => None,
Selection::FragmentSpread(node) => Some(node.fragment.location),
Selection::InlineFragment(_) => None,
Selection::LinkedField(node) => Some(node.alias_or_name_location()),
Selection::ScalarField(node) => Some(node.alias_or_name_location()),
}
}
/// Similar to `==`, but only checking for `Arc::ptr_eq` without
/// doing a deeper structural equality check
pub fn ptr_eq(&self, other: &Selection) -> bool {
match (self, other) {
(Selection::LinkedField(a), Selection::LinkedField(b)) => Arc::ptr_eq(a, b),
(Selection::ScalarField(a), Selection::ScalarField(b)) => Arc::ptr_eq(a, b),
(Selection::InlineFragment(a), Selection::InlineFragment(b)) => Arc::ptr_eq(a, b),
(Selection::FragmentSpread(a), Selection::FragmentSpread(b)) => Arc::ptr_eq(a, b),
(Selection::Condition(a), Selection::Condition(b)) => Arc::ptr_eq(a, b),
(Selection::LinkedField(_), _)
| (Selection::ScalarField(_), _)
| (Selection::InlineFragment(_), _)
| (Selection::FragmentSpread(_), _)
| (Selection::Condition(_), _) => false,
}
}
}
impl fmt::Debug for Selection {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Selection::FragmentSpread(node) => f.write_fmt(format_args!("{:#?}", node)),
Selection::InlineFragment(node) => f.write_fmt(format_args!("{:#?}", node)),
Selection::LinkedField(node) => f.write_fmt(format_args!("{:#?}", node)),
Selection::ScalarField(node) => f.write_fmt(format_args!("{:#?}", node)),
Selection::Condition(node) => f.write_fmt(format_args!("{:#?}", node)),
}
}
}
/// ... Name
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct FragmentSpread {
pub fragment: WithLocation<StringKey>,
pub arguments: Vec<Argument>,
pub directives: Vec<Directive>,
}
/// ... SelectionSet
/// ... on Type SelectionSet
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct InlineFragment {
pub type_condition: Option<Type>,
pub directives: Vec<Directive>,
pub selections: Vec<Selection>,
}
pub trait Field {
fn alias(&self) -> Option<WithLocation<StringKey>>;
fn definition(&self) -> WithLocation<FieldID>;
fn arguments(&self) -> &[Argument];
fn directives(&self) -> &[Directive];
fn alias_or_name(&self, schema: &SDLSchema) -> StringKey {
if let Some(name) = self.alias() {
name.item
} else {
schema.field(self.definition().item).name.item
}
}
fn alias_or_name_location(&self) -> Location {
if let Some(name) = self.alias() {
name.location
} else {
self.definition().location
}
}
}
/// Name Arguments? SelectionSet
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct LinkedField {
pub alias: Option<WithLocation<StringKey>>,
pub definition: WithLocation<FieldID>,
pub arguments: Vec<Argument>,
pub directives: Vec<Directive>,
pub selections: Vec<Selection>,
}
impl Field for LinkedField {
fn alias(&self) -> Option<WithLocation<StringKey>> {
self.alias
}
fn definition(&self) -> WithLocation<FieldID> {
self.definition
}
fn arguments(&self) -> &[Argument] {
&self.arguments
}
fn directives(&self) -> &[Directive] {
&self.directives
}
}
/// Name Arguments?
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct ScalarField {
pub alias: Option<WithLocation<StringKey>>,
pub definition: WithLocation<FieldID>,
pub arguments: Vec<Argument>,
pub directives: Vec<Directive>,
}
impl Field for ScalarField {
fn alias(&self) -> Option<WithLocation<StringKey>> {
self.alias
}
fn definition(&self) -> WithLocation<FieldID> {
self.definition
}
fn arguments(&self) -> &[Argument] {
&self.arguments
}
fn directives(&self) -> &[Directive] {
&self.directives
}
}
/// https://spec.graphql.org/June2018/#sec--skip
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct Condition {
pub selections: Vec<Selection>,
pub value: ConditionValue,
pub passing_value: bool,
}
impl Condition {
pub fn directive_name(&self) -> &'static str {
if self.passing_value {
"include"
} else {
"skip"
}
}
}
// Associated Types
/// @ Name Arguments?
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct Directive {
pub name: WithLocation<StringKey>,
pub arguments: Vec<Argument>,
/// Optional typed data that has no textual representation. This can be used
/// to attach arbitrary data on compiler-internal directives, such as to
/// pass instructions to code generation.
pub data: Option<Box<dyn AssociatedData>>,
}
impl Named for Directive {
fn name(&self) -> StringKey {
self.name.item
}
}
/// Name : Value
#[derive(Clone, Debug, Eq, PartialEq, Hash)]
pub struct Argument {
pub name: WithLocation<StringKey>,
pub value: WithLocation<Value>,
}
impl Named for Argument {
fn name(&self) -> StringKey {
self.name.item
}
}
#[derive(Clone, Debug, Eq, PartialEq, Hash)]
pub enum Value {
Constant(ConstantValue),
Variable(Variable),
List(Vec<Value>),
Object(Vec<Argument>),
}
impl Value {
/// If the value is a constant string literal, return the value, otherwise None.
pub fn get_string_literal(&self) -> Option<StringKey> {
if let Value::Constant(ConstantValue::String(val)) = self {
Some(*val)
} else {
None
}
}
/// Return the constant string literal of this value.
/// Panics if the value is not a constant string literal.
pub fn expect_string_literal(&self) -> StringKey {
self.get_string_literal().unwrap_or_else(|| {
panic!("expected a string literal, got {:?}", self);
})
}
}
#[derive(Clone, Debug, Eq, PartialEq, Hash)]
pub struct Variable {
pub name: WithLocation<StringKey>,
pub type_: TypeReference,
}
/// Name : Value[Const]
#[derive(Clone, Debug, Eq, PartialEq, Hash)]
pub struct ConstantArgument {
pub name: WithLocation<StringKey>,
pub value: WithLocation<ConstantValue>,
}
impl Named for ConstantArgument {
fn name(&self) -> StringKey {
self.name.item
}
}
#[derive(Clone, Debug, Eq, PartialEq, Hash)]
pub enum ConstantValue {
Int(i64),
Float(FloatValue),
String(StringKey),
Boolean(bool),
Null(),
Enum(StringKey),
List(Vec<ConstantValue>),
Object(Vec<ConstantArgument>),
}
impl ConstantValue {
pub fn is_null(&self) -> bool {
matches!(self, ConstantValue::Null())
}
pub fn is_non_null(&self) -> bool {
!self.is_null()
}
pub fn get_string_literal(&self) -> Option<StringKey> {
match self {
ConstantValue::String(value) => Some(*value),
_ => None,
}
}
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub enum ConditionValue {
Constant(bool),
Variable(Variable),
}
| 29.493947 | 94 | 0.622445 |
ccbae18565edffca98d148a4791ae93bf75d842f | 9,519 | //! String interner for Boa.
//!
//! The idea behind using a string interner is that in most of the code, strings such as
//! identifiers and literals are often repeated. This causes extra burden when comparing them and
//! storing them. A string interner stores a unique `usize` symbol for each string, making sure
//! that there are no duplicates. This makes it much easier to compare, since it's just comparing
//! to `usize`, and also it's easier to store, since instead of a heap-allocated string, you only
//! need to store a `usize`. This reduces memory consumption and improves performance in the
//! compiler.
#![doc(
html_logo_url = "https://raw.githubusercontent.com/boa-dev/boa/main/assets/logo.svg",
html_favicon_url = "https://raw.githubusercontent.com/boa-dev/boa/main/assets/logo.svg"
)]
#![deny(
clippy::all,
unused_qualifications,
unused_import_braces,
unused_lifetimes,
unreachable_pub,
trivial_numeric_casts,
// rustdoc,
missing_debug_implementations,
missing_copy_implementations,
deprecated_in_future,
meta_variable_misuse,
non_ascii_idents,
rust_2018_compatibility,
rust_2018_idioms,
future_incompatible,
nonstandard_style,
)]
#![warn(clippy::perf, clippy::single_match_else, clippy::dbg_macro)]
#![allow(
clippy::missing_inline_in_public_items,
clippy::cognitive_complexity,
clippy::must_use_candidate,
clippy::missing_errors_doc,
clippy::as_conversions,
clippy::let_unit_value,
rustdoc::missing_doc_code_examples
)]
#[cfg(test)]
mod tests;
use std::{fmt::Display, num::NonZeroUsize};
use gc::{unsafe_empty_trace, Finalize, Trace};
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
use string_interner::{backend::BucketBackend, StringInterner, Symbol};
/// Backend of the string interner.
type Backend = BucketBackend<Sym>;
/// The string interner for Boa.
///
/// This is a type alias that makes it easier to reference it in the code.
#[derive(Debug)]
pub struct Interner {
inner: StringInterner<Backend>,
}
impl Interner {
/// Creates a new StringInterner with the given initial capacity.
#[inline]
pub fn with_capacity(cap: usize) -> Self {
Self {
inner: StringInterner::with_capacity(cap),
}
}
/// Returns the number of strings interned by the interner.
#[inline]
pub fn len(&self) -> usize {
self.inner.len()
}
/// Returns `true` if the string interner has no interned strings.
#[inline]
pub fn is_empty(&self) -> bool {
self.inner.is_empty()
}
/// Returns the symbol for the given string if any.
///
/// Can be used to query if a string has already been interned without interning.
pub fn get<T>(&self, string: T) -> Option<Sym>
where
T: AsRef<str>,
{
let string = string.as_ref();
Self::get_static(string).or_else(|| self.inner.get(string))
}
/// Interns the given string.
///
/// Returns a symbol for resolution into the original string.
///
/// # Panics
/// If the interner already interns the maximum number of strings possible by the chosen symbol type.
pub fn get_or_intern<T>(&mut self, string: T) -> Sym
where
T: AsRef<str>,
{
let string = string.as_ref();
Self::get_static(string).unwrap_or_else(|| self.inner.get_or_intern(string))
}
/// Interns the given `'static` string.
///
/// Returns a symbol for resolution into the original string.
///
/// # Note
///
/// This is more efficient than [`StringInterner::get_or_intern`] since it might
/// avoid some memory allocations if the backends supports this.
///
/// # Panics
///
/// If the interner already interns the maximum number of strings possible
/// by the chosen symbol type.
pub fn get_or_intern_static(&mut self, string: &'static str) -> Sym {
Self::get_static(string).unwrap_or_else(|| self.inner.get_or_intern_static(string))
}
/// Shrink backend capacity to fit the interned strings exactly.
#[inline]
pub fn shrink_to_fit(&mut self) {
self.inner.shrink_to_fit()
}
/// Returns the string for the given symbol if any.
#[inline]
pub fn resolve(&self, symbol: Sym) -> Option<&str> {
let index = symbol.as_raw().get();
if index <= Self::STATIC_STRINGS.len() {
Some(Self::STATIC_STRINGS[index - 1])
} else {
self.inner.resolve(symbol)
}
}
/// Returns the string for the given symbol.
///
/// # Panics
///
/// If the interner cannot resolve the given symbol.
#[inline]
pub fn resolve_expect(&self, symbol: Sym) -> &str {
self.resolve(symbol).expect("string disappeared")
}
/// Gets the symbol of the static string if one of them
fn get_static(string: &str) -> Option<Sym> {
Self::STATIC_STRINGS
.into_iter()
.enumerate()
.find(|&(_i, s)| string == s)
.map(|(i, _str)| {
let raw = NonZeroUsize::new(i.wrapping_add(1)).expect("static array too big");
Sym::from_raw(raw)
})
}
}
impl<T> FromIterator<T> for Interner
where
T: AsRef<str>,
{
#[inline]
fn from_iter<I>(iter: I) -> Self
where
I: IntoIterator<Item = T>,
{
Self {
inner: StringInterner::from_iter(iter),
}
}
}
impl<T> Extend<T> for Interner
where
T: AsRef<str>,
{
#[inline]
fn extend<I>(&mut self, iter: I)
where
I: IntoIterator<Item = T>,
{
self.inner.extend(iter)
}
}
impl<'a> IntoIterator for &'a Interner {
type Item = (Sym, &'a str);
type IntoIter = <&'a Backend as IntoIterator>::IntoIter;
#[inline]
fn into_iter(self) -> Self::IntoIter {
self.inner.into_iter()
}
}
impl Default for Interner {
fn default() -> Self {
Self {
inner: StringInterner::new(),
}
}
}
/// The string symbol type for Boa.
///
/// This symbol type is internally a `NonZeroUsize`, which makes it pointer-width in size and it's
/// optimized so that it can occupy 1 pointer width even in an `Option` type.
#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Finalize)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "serde", serde(transparent))]
pub struct Sym {
value: NonZeroUsize,
}
impl Sym {
/// Padding for the symbol internal value.
const PADDING: usize = Interner::STATIC_STRINGS.len() + 1;
/// Symbol for the empty string (`""`).
pub const EMPTY_STRING: Self = unsafe { Self::from_raw(NonZeroUsize::new_unchecked(1)) };
/// Symbol for the `"arguments"` string.
pub const ARGUMENTS: Self = unsafe { Self::from_raw(NonZeroUsize::new_unchecked(2)) };
/// Symbol for the `"await"` string.
pub const AWAIT: Self = unsafe { Self::from_raw(NonZeroUsize::new_unchecked(3)) };
/// Symbol for the `"yield"` string.
pub const YIELD: Self = unsafe { Self::from_raw(NonZeroUsize::new_unchecked(4)) };
/// Symbol for the `"eval"` string.
pub const EVAL: Self = unsafe { Self::from_raw(NonZeroUsize::new_unchecked(5)) };
/// Symbol for the `"default"` string.
pub const DEFAULT: Self = unsafe { Self::from_raw(NonZeroUsize::new_unchecked(6)) };
/// Symbol for the `"null"` string.
pub const NULL: Self = unsafe { Self::from_raw(NonZeroUsize::new_unchecked(7)) };
/// Symbol for the `"RegExp"` string.
pub const REGEXP: Self = unsafe { Self::from_raw(NonZeroUsize::new_unchecked(8)) };
/// Symbol for the `"get"` string.
pub const GET: Self = unsafe { Self::from_raw(NonZeroUsize::new_unchecked(9)) };
/// Symbol for the `"set"` string.
pub const SET: Self = unsafe { Self::from_raw(NonZeroUsize::new_unchecked(10)) };
/// Symbol for the `"<main>"` string.
pub const MAIN: Self = unsafe { Self::from_raw(NonZeroUsize::new_unchecked(11)) };
/// Creates a `Sym` from a raw `NonZeroUsize`.
const fn from_raw(value: NonZeroUsize) -> Self {
Self { value }
}
/// Retrieves the raw `NonZeroUsize` for this symbol.`
const fn as_raw(self) -> NonZeroUsize {
self.value
}
}
impl Symbol for Sym {
#[inline]
fn try_from_usize(index: usize) -> Option<Self> {
index
.checked_add(Self::PADDING)
.and_then(NonZeroUsize::new)
.map(|value| Self { value })
}
#[inline]
fn to_usize(self) -> usize {
self.value.get() - Self::PADDING
}
}
// Safe because `Sym` implements `Copy`.
unsafe impl Trace for Sym {
unsafe_empty_trace!();
}
/// Converts a given element to a string using an interner.
pub trait ToInternedString {
/// Converts a given element to a string using an interner.
fn to_interned_string(&self, interner: &Interner) -> String;
}
impl<T> ToInternedString for T
where
T: Display,
{
fn to_interned_string(&self, _interner: &Interner) -> String {
format!("{}", self)
}
}
impl Interner {
/// List of commonly used static strings.
///
/// Make sure that any string added as a `Sym` constant is also added here.
const STATIC_STRINGS: [&'static str; 11] = [
"",
"arguments",
"await",
"yield",
"eval",
"default",
"null",
"RegExp",
"get",
"set",
"<main>",
];
}
| 29.199387 | 105 | 0.627272 |
56853c92eccc4d03c9d4801f9f37ed0d58bd1cc5 | 204 | mod lib;
use lib::Bst;
fn main() {
let mut t = Bst::new();
t.insert(5,String::from("Hello5"));
t.insert(3,String::from("Hello3"));
t.insert(6,String::from("Hello6"));
t.bst_get(4);
}
| 18.545455 | 39 | 0.563725 |
e24b411a3b54e00ab77c63bd337f2fede2806734 | 4,661 | use crate::core::ics02_client::error as client_error;
use crate::core::ics24_host::error::ValidationError;
use crate::core::ics24_host::identifier::{ClientId, ConnectionId};
use crate::proofs::ProofError;
use crate::Height;
use flex_error::define_error;
define_error! {
#[derive(Debug, PartialEq, Eq)]
Error {
Ics02Client
[ client_error::Error ]
| _ | { "ics02 client error" },
InvalidState
{ state: i32 }
| e | { format_args!("connection state is unknown: {}", e.state) },
ConnectionExistsAlready
{ connection_id: ConnectionId }
| e | {
format_args!("connection exists (was initialized) already: {0}",
e.connection_id)
},
ConnectionMismatch
{ connection_id: ConnectionId }
| e | {
format_args!("connection end for identifier {0} was never initialized",
e.connection_id)
},
InvalidConsensusHeight
{
target_height: Height,
currrent_height: Height
}
| e | {
format_args!("consensus height claimed by the client on the other party is too advanced: {0} (host chain current height: {1})",
e.target_height, e.currrent_height)
},
StaleConsensusHeight
{
target_height: Height,
oldest_height: Height
}
| e | {
format_args!("consensus height claimed by the client on the other party has been pruned: {0} (host chain oldest height: {1})",
e.target_height, e.oldest_height)
},
InvalidIdentifier
[ ValidationError ]
| _ | { "identifier error" },
EmptyProtoConnectionEnd
| _ | { "ConnectionEnd domain object could not be constructed out of empty proto object" },
EmptyVersions
| _ | { "empty supported versions" },
EmptyFeatures
| _ | { "empty supported features" },
NoCommonVersion
| _ | { "no common version" },
InvalidAddress
| _ | { "invalid address" },
MissingProofHeight
| _ | { "missing proof height" },
MissingConsensusHeight
| _ | { "missing consensus height" },
InvalidProof
[ ProofError ]
| _ | { "invalid connection proof" },
VerifyConnectionState
[ client_error::Error ]
| _ | { "error verifying connnection state" },
InvalidSigner
| _ | { "invalid signer" },
ConnectionNotFound
{ connection_id: ConnectionId }
| e | {
format_args!("no connection was found for the previous connection id provided {0}",
e.connection_id)
},
InvalidCounterparty
| _ | { "invalid signer" },
ConnectionIdMismatch
{
connection_id: ConnectionId,
counterparty_connection_id: ConnectionId,
}
| e | {
format_args!("counterparty chosen connection id {0} is different than the connection id {1}",
e.connection_id, e.counterparty_connection_id)
},
MissingCounterparty
| _ | { "missing counterparty" },
MissingCounterpartyPrefix
| _ | { "missing counterparty prefix" },
NullClientProof
| _ | { "client proof must be present" },
FrozenClient
{ client_id: ClientId }
| e | {
format_args!("the client id does not match any client state: {0}",
e.client_id)
},
ConnectionVerificationFailure
| _ | { "the connection proof verification failed" },
ConsensusStateVerificationFailure
{ height: Height }
[ client_error::Error ]
| e | {
format_args!("the consensus proof verification failed (height: {0})",
e.height)
},
// TODO: use more specific error source
ClientStateVerificationFailure
{
client_id: ClientId,
}
[ client_error::Error ]
| e | {
format_args!("the client state proof verification failed for client id {0}",
e.client_id)
},
ImplementationSpecific
| _ | { "implementation specific error" },
}
}
| 30.664474 | 143 | 0.518344 |
e667c01193f12890b8de44d98a139aa1e7ef1363 | 1,687 | // Copyright 2020 Ant Group. All rights reserved.
// Copyright (C) 2020 Alibaba Cloud. All rights reserved.
//
// SPDX-License-Identifier: Apache-2.0
use std::collections::HashMap;
use std::io::Result;
use std::sync::Arc;
use nydus_utils::digest;
use storage::device::BlobInfo;
use crate::metadata::{Inode, RafsInode, RafsSuperBlock, RafsSuperInodes};
use crate::{RafsIoReader, RafsResult};
pub struct MockSuperBlock {
pub inodes: HashMap<Inode, Arc<dyn RafsInode + Send + Sync>>,
}
pub const CHUNK_SIZE: u32 = 200;
impl Default for MockSuperBlock {
fn default() -> Self {
Self {
inodes: HashMap::new(),
}
}
}
impl MockSuperBlock {
pub fn new() -> Self {
Self {
inodes: HashMap::new(),
}
}
}
impl RafsSuperInodes for MockSuperBlock {
fn get_max_ino(&self) -> Inode {
unimplemented!()
}
fn get_inode(&self, ino: Inode, _digest_validate: bool) -> Result<Arc<dyn RafsInode>> {
self.inodes
.get(&ino)
.map_or(Err(enoent!()), |i| Ok(i.clone()))
}
fn validate_digest(
&self,
_inode: Arc<dyn RafsInode>,
_recursive: bool,
_digester: digest::Algorithm,
) -> Result<bool> {
unimplemented!()
}
}
impl RafsSuperBlock for MockSuperBlock {
fn load(&mut self, _r: &mut RafsIoReader) -> Result<()> {
unimplemented!()
}
fn update(&self, _r: &mut RafsIoReader) -> RafsResult<()> {
unimplemented!()
}
fn destroy(&mut self) {}
fn get_blob_infos(&self) -> Vec<Arc<BlobInfo>> {
unimplemented!()
}
fn root_ino(&self) -> u64 {
unimplemented!()
}
}
| 23.109589 | 91 | 0.594547 |
216d2800d8a0ac3e76d9fe2bcef0a286c4a304e2 | 1,360 | use byteorder::LittleEndian;
use crate::decode::Decode;
use crate::encode::Encode;
use crate::mysql::io::BufMutExt;
use crate::mysql::protocol::TypeId;
use crate::mysql::types::MySqlTypeInfo;
use crate::mysql::{MySql, MySqlData, MySqlValue};
use crate::types::Type;
impl Type<MySql> for [u8] {
fn type_info() -> MySqlTypeInfo {
MySqlTypeInfo {
id: TypeId::TEXT,
is_binary: true,
is_unsigned: false,
char_set: 63, // binary
}
}
}
impl Type<MySql> for Vec<u8> {
fn type_info() -> MySqlTypeInfo {
<[u8] as Type<MySql>>::type_info()
}
}
impl Encode<MySql> for [u8] {
fn encode(&self, buf: &mut Vec<u8>) {
buf.put_bytes_lenenc::<LittleEndian>(self);
}
}
impl Encode<MySql> for Vec<u8> {
fn encode(&self, buf: &mut Vec<u8>) {
<[u8] as Encode<MySql>>::encode(self, buf);
}
}
impl<'de> Decode<'de, MySql> for Vec<u8> {
fn decode(value: MySqlValue<'de>) -> crate::Result<Self> {
match value.try_get()? {
MySqlData::Binary(buf) | MySqlData::Text(buf) => Ok(buf.to_vec()),
}
}
}
impl<'de> Decode<'de, MySql> for &'de [u8] {
fn decode(value: MySqlValue<'de>) -> crate::Result<Self> {
match value.try_get()? {
MySqlData::Binary(buf) | MySqlData::Text(buf) => Ok(buf),
}
}
}
| 24.727273 | 78 | 0.575 |
bb4a793045795df17a051729abe1f125859e6b4a | 13,102 | use crate::cli::connect::ConnectError;
use tedge_config::FilePath;
use url::Url;
#[derive(Debug, PartialEq)]
pub struct BridgeConfig {
pub cloud_name: String,
pub config_file: String,
pub connection: String,
pub address: String,
pub remote_username: Option<String>,
pub bridge_root_cert_path: FilePath,
pub remote_clientid: String,
pub local_clientid: String,
pub bridge_certfile: FilePath,
pub bridge_keyfile: FilePath,
pub use_mapper: bool,
pub use_agent: bool,
pub try_private: bool,
pub start_type: String,
pub clean_session: bool,
pub notifications: bool,
pub notifications_local_only: bool,
pub notification_topic: String,
pub bridge_attempt_unsubscribe: bool,
pub topics: Vec<String>,
}
impl BridgeConfig {
pub fn serialize<W: std::io::Write>(&self, writer: &mut W) -> std::io::Result<()> {
writeln!(writer, "### Bridge")?;
writeln!(writer, "connection {}", self.connection)?;
match &self.remote_username {
Some(name) => {
writeln!(writer, "remote_username {}", name)?;
}
None => {}
}
writeln!(writer, "address {}", self.address)?;
// XXX: This has to go away
if std::fs::metadata(&self.bridge_root_cert_path)?.is_dir() {
writeln!(writer, "bridge_capath {}", self.bridge_root_cert_path)?;
} else {
writeln!(writer, "bridge_cafile {}", self.bridge_root_cert_path)?;
}
writeln!(writer, "remote_clientid {}", self.remote_clientid)?;
writeln!(writer, "local_clientid {}", self.local_clientid)?;
writeln!(writer, "bridge_certfile {}", self.bridge_certfile)?;
writeln!(writer, "bridge_keyfile {}", self.bridge_keyfile)?;
writeln!(writer, "try_private {}", self.try_private)?;
writeln!(writer, "start_type {}", self.start_type)?;
writeln!(writer, "cleansession {}", self.clean_session)?;
writeln!(writer, "notifications {}", self.notifications)?;
writeln!(
writer,
"notifications_local_only {}",
self.notifications_local_only
)?;
writeln!(writer, "notification_topic {}", self.notification_topic)?;
writeln!(
writer,
"bridge_attempt_unsubscribe {}",
self.bridge_attempt_unsubscribe
)?;
writeln!(writer, "\n### Topics",)?;
for topic in &self.topics {
writeln!(writer, "topic {}", topic)?;
}
Ok(())
}
pub fn validate(&self) -> Result<(), ConnectError> {
// XXX: This is actually wrong. Our address looks like this: `domain:port`
// `Url::parse` will treat `domain` as `schema` ...
Url::parse(&self.address)?;
if !self.bridge_root_cert_path.as_ref().exists() {
return Err(ConnectError::Certificate);
}
if !self.bridge_certfile.as_ref().exists() {
return Err(ConnectError::Certificate);
}
if !self.bridge_keyfile.as_ref().exists() {
return Err(ConnectError::Certificate);
}
Ok(())
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_serialize_with_cafile_correctly() -> anyhow::Result<()> {
let file = tempfile::NamedTempFile::new()?;
let bridge_root_cert_path: FilePath = file.path().into();
let bridge = BridgeConfig {
cloud_name: "test".into(),
config_file: "test-bridge.conf".into(),
connection: "edge_to_test".into(),
address: "test.test.io:8883".into(),
remote_username: None,
bridge_root_cert_path: bridge_root_cert_path.clone(),
remote_clientid: "alpha".into(),
local_clientid: "test".into(),
bridge_certfile: "./test-certificate.pem".into(),
bridge_keyfile: "./test-private-key.pem".into(),
use_mapper: false,
use_agent: false,
topics: vec![],
try_private: false,
start_type: "automatic".into(),
clean_session: true,
notifications: false,
notifications_local_only: false,
notification_topic: "test_topic".into(),
bridge_attempt_unsubscribe: false,
};
let mut serialized_config = Vec::<u8>::new();
bridge.serialize(&mut serialized_config)?;
let bridge_cafile = format!("bridge_cafile {}", bridge_root_cert_path);
let mut expected = r#"### Bridge
connection edge_to_test
address test.test.io:8883
"#
.to_owned();
expected.push_str(&bridge_cafile);
expected.push_str(
r#"
remote_clientid alpha
local_clientid test
bridge_certfile ./test-certificate.pem
bridge_keyfile ./test-private-key.pem
try_private false
start_type automatic
cleansession true
notifications false
notifications_local_only false
notification_topic test_topic
bridge_attempt_unsubscribe false
### Topics
"#,
);
assert_eq!(serialized_config, expected.as_bytes());
Ok(())
}
#[test]
fn test_serialize_with_capath_correctly() -> anyhow::Result<()> {
let dir = tempfile::TempDir::new()?;
let bridge_root_cert_path: FilePath = dir.path().into();
let bridge = BridgeConfig {
cloud_name: "test".into(),
config_file: "test-bridge.conf".into(),
connection: "edge_to_test".into(),
address: "test.test.io:8883".into(),
remote_username: None,
bridge_root_cert_path: bridge_root_cert_path.clone(),
remote_clientid: "alpha".into(),
local_clientid: "test".into(),
bridge_certfile: "./test-certificate.pem".into(),
bridge_keyfile: "./test-private-key.pem".into(),
use_mapper: false,
use_agent: false,
topics: vec![],
try_private: false,
start_type: "automatic".into(),
clean_session: true,
notifications: false,
notifications_local_only: false,
notification_topic: "test_topic".into(),
bridge_attempt_unsubscribe: false,
};
let mut serialized_config = Vec::<u8>::new();
bridge.serialize(&mut serialized_config)?;
let bridge_capath = format!("bridge_capath {}", bridge_root_cert_path);
let mut expected = r#"### Bridge
connection edge_to_test
address test.test.io:8883
"#
.to_owned();
expected.push_str(&bridge_capath);
expected.push_str(
r#"
remote_clientid alpha
local_clientid test
bridge_certfile ./test-certificate.pem
bridge_keyfile ./test-private-key.pem
try_private false
start_type automatic
cleansession true
notifications false
notifications_local_only false
notification_topic test_topic
bridge_attempt_unsubscribe false
### Topics
"#,
);
assert_eq!(serialized_config, expected.as_bytes());
Ok(())
}
#[test]
fn test_serialize() -> anyhow::Result<()> {
let file = tempfile::NamedTempFile::new()?;
let bridge_root_cert_path: FilePath = file.path().into();
let config = BridgeConfig {
cloud_name: "az".into(),
config_file: "az-bridge.conf".into(),
connection: "edge_to_az".into(),
address: "test.test.io:8883".into(),
remote_username: Some("test.test.io/alpha/?api-version=2018-06-30".into()),
bridge_root_cert_path: bridge_root_cert_path.clone(),
remote_clientid: "alpha".into(),
local_clientid: "Azure".into(),
bridge_certfile: "./test-certificate.pem".into(),
bridge_keyfile: "./test-private-key.pem".into(),
use_mapper: false,
use_agent: false,
topics: vec![
r#"messages/events/ out 1 az/ devices/alpha/"#.into(),
r##"messages/devicebound/# out 1 az/ devices/alpha/"##.into(),
],
try_private: false,
start_type: "automatic".into(),
clean_session: true,
notifications: false,
notifications_local_only: false,
notification_topic: "test_topic".into(),
bridge_attempt_unsubscribe: false,
};
let mut buffer = Vec::new();
config.serialize(&mut buffer)?;
let contents = String::from_utf8(buffer)?;
let config_set: std::collections::HashSet<&str> = contents
.lines()
.filter(|str| !str.is_empty() && !str.starts_with('#'))
.collect();
let mut expected = std::collections::HashSet::new();
expected.insert("connection edge_to_az");
expected.insert("remote_username test.test.io/alpha/?api-version=2018-06-30");
expected.insert("address test.test.io:8883");
let bridge_capath = format!("bridge_cafile {}", bridge_root_cert_path);
expected.insert(&bridge_capath);
expected.insert("remote_clientid alpha");
expected.insert("local_clientid Azure");
expected.insert("bridge_certfile ./test-certificate.pem");
expected.insert("bridge_keyfile ./test-private-key.pem");
expected.insert("start_type automatic");
expected.insert("try_private false");
expected.insert("cleansession true");
expected.insert("notifications false");
expected.insert("notifications_local_only false");
expected.insert("notification_topic test_topic");
expected.insert("bridge_attempt_unsubscribe false");
expected.insert("topic messages/events/ out 1 az/ devices/alpha/");
expected.insert("topic messages/devicebound/# out 1 az/ devices/alpha/");
assert_eq!(config_set, expected);
Ok(())
}
#[test]
fn test_validate_ok() -> anyhow::Result<()> {
let ca_file = tempfile::NamedTempFile::new()?;
let bridge_ca_path: FilePath = ca_file.path().into();
let cert_file = tempfile::NamedTempFile::new()?;
let bridge_certfile: FilePath = cert_file.path().into();
let key_file = tempfile::NamedTempFile::new()?;
let bridge_keyfile: FilePath = key_file.path().into();
let correct_url = "http://test.com";
let config = BridgeConfig {
address: correct_url.into(),
bridge_root_cert_path: bridge_ca_path,
bridge_certfile,
bridge_keyfile,
..default_bridge_config()
};
assert!(config.validate().is_ok());
Ok(())
}
// XXX: This test is flawed as it is not clear what it tests.
// It can fail due to either `incorrect_url` OR `non_existent_path`.
#[test]
fn test_validate_wrong_url() {
let incorrect_url = "noturl";
let non_existent_path = "/path/that/does/not/exist";
let config = BridgeConfig {
address: incorrect_url.into(),
bridge_certfile: non_existent_path.into(),
bridge_keyfile: non_existent_path.into(),
..default_bridge_config()
};
assert!(config.validate().is_err());
}
#[test]
fn test_validate_wrong_cert_path() {
let correct_url = "http://test.com";
let non_existent_path = "/path/that/does/not/exist";
let config = BridgeConfig {
address: correct_url.into(),
bridge_certfile: non_existent_path.into(),
bridge_keyfile: non_existent_path.into(),
..default_bridge_config()
};
assert!(config.validate().is_err());
}
#[test]
fn test_validate_wrong_key_path() -> anyhow::Result<()> {
let cert_file = tempfile::NamedTempFile::new()?;
let bridge_certfile: FilePath = cert_file.path().into();
let correct_url = "http://test.com";
let non_existent_path = "/path/that/does/not/exist";
let config = BridgeConfig {
address: correct_url.into(),
bridge_certfile,
bridge_keyfile: non_existent_path.into(),
..default_bridge_config()
};
assert!(config.validate().is_err());
Ok(())
}
fn default_bridge_config() -> BridgeConfig {
BridgeConfig {
cloud_name: "az/c8y".into(),
config_file: "cfg".to_string(),
connection: "edge_to_az/c8y".into(),
address: "".into(),
remote_username: None,
bridge_root_cert_path: "".into(),
bridge_certfile: "".into(),
bridge_keyfile: "".into(),
remote_clientid: "".into(),
local_clientid: "".into(),
use_mapper: true,
use_agent: true,
try_private: false,
start_type: "automatic".into(),
clean_session: true,
notifications: false,
notifications_local_only: false,
notification_topic: "test_topic".into(),
bridge_attempt_unsubscribe: false,
topics: vec![],
}
}
}
| 33.423469 | 87 | 0.594947 |
ddabff534513cb6fb1e41eddf039b083a3cf04cc | 809 | use std::time::Instant;
use compute_shader::collatz;
use rayon::prelude::*;
fn main() {
let top = 2u32.pow(20);
let src_range = 1..top;
let start = Instant::now();
let result = src_range
.clone()
.into_par_iter()
.map(collatz)
.collect::<Vec<_>>();
let took = start.elapsed();
let mut max = 0;
for (src, out) in src_range.zip(result.iter().copied()) {
match out {
Some(out) if out > max => {
max = out;
// Should produce <https://oeis.org/A006877>
println!("{}: {}", src, out);
}
Some(_) => (),
None => {
println!("{}: overflowed", src);
break;
}
}
}
println!("Took: {:?}", took);
}
| 24.515152 | 61 | 0.443758 |
0938504e45a70275ee210b03166072c824d08e59 | 448 | use serde::{Deserialize, Deserializer};
pub(crate) fn keywords_map<'de, D>(deserializer: D) -> Result<String, D::Error>
where
D: Deserializer<'de>,
{
String::deserialize(deserializer).map(|x| {
match x.as_str() {
"0x8020000000000000" => "Audit Success",
"0x8010000000000000" => "Audit Failure",
"0x80000000000000" => "Classic",
_ => &x
}
.into()
})
}
| 26.352941 | 79 | 0.537946 |
712ba24fe7ca10385d1c0e546a6e0f83fa2dfa30 | 721 | // Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub(crate) mod abstract_types;
pub(crate) mod ctypes;
pub(crate) mod fun;
pub(crate) mod gc;
pub(crate) mod pod; // hey, that rhymes
| 36.05 | 75 | 0.742025 |
aceb351e6a527cc14020864aa60527bfcca97cec | 8,172 | use crate::{
client::{
Client,
ClientOptions,
client::ConnectionMode,
KeepAlive,
},
Error, Result,
util::{
TokioRuntime,
}
};
use url::Url;
#[cfg(any(feature = "tls", feature = "websocket"))]
use ::rustls;
#[cfg(feature = "tls")]
use std::sync::Arc;
use tokio::time::Duration;
/// A fluent builder interface to configure a Client.
///
/// Note that you must call `.set_host()` to configure a host to
/// connect to before `.build()`
#[derive(Default)]
pub struct ClientBuilder {
url: Option<Url>,
username: Option<String>,
password: Option<Vec<u8>>,
keep_alive: Option<KeepAlive>,
runtime: TokioRuntime,
client_id: Option<String>,
packet_buffer_len: Option<usize>,
max_packet_len: Option<usize>,
operation_timeout: Option<Duration>,
connection_mode: ConnectionMode,
automatic_connect: Option<bool>,
connect_retry_delay: Option<Duration>,
}
impl ClientBuilder {
/// Build a new `Client` with this configuration.
pub fn build(&mut self) -> Result<Client> {
Client::new(ClientOptions {
url: self
.url
.clone()
.ok_or(Error::from("You must set a url for the client"))?,
username: self.username.clone(),
password: self.password.clone(),
keep_alive: self.keep_alive.unwrap_or(KeepAlive::from_secs(30)),
runtime: self.runtime.clone(),
client_id: self.client_id.clone(),
packet_buffer_len: self.packet_buffer_len.unwrap_or(100),
max_packet_len: self.max_packet_len.unwrap_or(64 * 1024),
operation_timeout: self.operation_timeout.unwrap_or(Duration::from_secs(20)),
connection_mode: self.connection_mode.clone(),
automatic_connect: self.automatic_connect.unwrap_or(true),
connect_retry_delay: self.connect_retry_delay.unwrap_or(Duration::from_secs(30)),
})
}
/// Set the destination url for this mqtt connection to the given string (returning an error if
/// the provided string is not a valid URL).
///
/// See [Self::set_url] for more details
pub fn set_url_string(&mut self, url: &str) -> Result<&mut Self> {
use std::convert::TryFrom;
let url = Url::try_from(url).map_err(|e| Error::StdError(Box::new(e)))?;
self.set_url(url)
}
/// Set the destination url for this mqtt connection.
///
/// Supported schema are:
/// - mqtt: An mqtt session over tcp (default TCP port 1883)
/// - mqtts: An mqtt session over tls (default TCP port 8883)
/// - ws: An mqtt session over a websocket (default TCP port 80, requires cargo feature "websocket")
/// - wss: An mqtt session over a secure websocket (default TCP port 443, requires cargo feature "websocket")
///
/// If the selected scheme is mqtts or wss, then it will preserve the previously provided tls
/// configuration, if one has been given, or make a new default one otherwise.
pub fn set_url(&mut self, url: Url) -> Result<&mut Self> {
#[cfg(any(feature = "tls", feature = "websocket"))]
let rustls_config = match &self.connection_mode {
#[cfg(feature = "tls")]
ConnectionMode::Tls(config) => config.clone(),
#[cfg(feature = "websocket")]
ConnectionMode::WebsocketSecure(config) => config.clone(),
_ => Arc::new(rustls::ClientConfig::new()),
};
self.connection_mode = match url.scheme() {
"mqtt" => ConnectionMode::Tcp,
#[cfg(feature = "tls")]
"mqtts" => ConnectionMode::Tls(rustls_config),
#[cfg(feature = "websocket")]
"ws" => ConnectionMode::Websocket,
#[cfg(feature = "websocket")]
"wss" => ConnectionMode::WebsocketSecure(rustls_config),
scheme => return Err(Error::String(format!("Unsupported scheme: {}", scheme))),
};
self.url = Some(url);
Ok(self)
}
/// Set username to authenticate with.
///
/// The default value is no username.
pub fn set_username(&mut self, username: Option<String>) -> &mut Self {
self.username = username;
self
}
/// Set password to authenticate with.
///
/// The default is no password.
pub fn set_password(&mut self, password: Option<Vec<u8>>) -> &mut Self {
self.password = password;
self
}
/// Set keep alive time.
///
/// This controls how often ping requests are sent when the connection is idle.
/// See [MQTT 3.1.1 specification section 3.1.2.10](http://docs.oasis-open.org/mqtt/mqtt/v3.1.1/csprd02/mqtt-v3.1.1-csprd02.html#_Keep_Alive)
///
/// The default value is 30 seconds.
pub fn set_keep_alive(&mut self, keep_alive: KeepAlive) -> &mut Self {
self.keep_alive = Some(keep_alive);
self
}
/// Set the tokio runtime to spawn background tasks onto.
///
/// The default is to use the default tokio runtime, i.e. `tokio::spawn()`.
pub fn set_tokio_runtime(&mut self, rt: TokioRuntime) -> &mut Self {
self.runtime = rt;
self
}
/// Set the ClientId to connect with.
pub fn set_client_id(&mut self, client_id: Option<String>) -> &mut Self {
self.client_id = client_id;
self
}
/// Set the inbound and outbound packet buffer length.
///
/// The default is 100.
pub fn set_packet_buffer_len(&mut self, packet_buffer_len: usize) -> &mut Self {
self.packet_buffer_len = Some(packet_buffer_len);
self
}
/// Set the maximum packet length.
///
/// The default is 64 * 1024 bytes.
pub fn set_max_packet_len(&mut self, max_packet_len: usize) -> &mut Self {
self.max_packet_len = Some(max_packet_len);
self
}
/// Set the timeout for operations.
///
/// The default is 20 seconds.
pub fn set_operation_timeout(&mut self, operation_timeout: Duration) -> &mut Self {
self.operation_timeout = Some(operation_timeout);
self
}
/// Set the TLS ClientConfig for the client-server connection.
///
/// Enables TLS. By default TLS is disabled.
#[cfg(feature = "tls")]
pub fn set_tls_client_config(&mut self, tls_client_config: rustls::ClientConfig) -> &mut Self {
match self.connection_mode {
ref mut mode @ ConnectionMode::Tcp => {
let _ = self.url.as_mut().map(|url| url.set_scheme("mqtts"));
*mode = ConnectionMode::Tls(Arc::new(tls_client_config))
}
ConnectionMode::Tls(ref mut config) => *config = Arc::new(tls_client_config),
#[cfg(feature = "websocket")]
ref mut mode @ ConnectionMode::Websocket => {
*mode = ConnectionMode::WebsocketSecure(Arc::new(tls_client_config))
}
#[cfg(feature = "websocket")]
ConnectionMode::WebsocketSecure(ref mut config) => {
let _ = self.url.as_mut().map(|url| url.set_scheme("https"));
*config = Arc::new(tls_client_config)
}
}
self
}
#[cfg(feature = "websocket")]
/// Set the connection to use a websocket
pub fn set_websocket(&mut self) -> &mut Self {
self.connection_mode = ConnectionMode::Websocket;
self
}
/// Sets the connection mode to the given value
///
/// The default is to use Tcp
pub fn set_connection_mode(&mut self, mode: ConnectionMode) -> &mut Self {
self.connection_mode = mode;
self
}
/// Set whether to automatically connect and reconnect.
///
/// The default is true.
pub fn set_automatic_connect(&mut self, automatic_connect: bool) -> &mut Self {
self.automatic_connect = Some(automatic_connect);
self
}
/// Set the delay between connect retries.
///
/// The default is 30s.
pub fn set_connect_retry_delay(&mut self, connect_retry_delay: Duration) -> &mut Self {
self.connect_retry_delay = Some(connect_retry_delay);
self
}
}
| 36 | 145 | 0.607685 |
ddccaa20d9986d9d72d2a3953f8b8232b327ea8f | 15,984 | // This file was split off of a file that was part of https://github.com/alexheretic/glyph-brush
use gl::types::*;
use glyph_brush::rusttype::Scale;
use glyph_brush::*;
use macros::{d, invariants_checked};
use std::{ffi::CString, mem, ptr, str};
pub const EDIT_Z: f32 = 0.5;
pub const HIGHLIGHT_Z: f32 = 0.4375;
pub const CURSOR_Z: f32 = 0.375;
pub const STATUS_BACKGROUND_Z: f32 = 0.25;
pub const STATUS_Z: f32 = 0.125;
pub struct State {
vertex_count: usize,
vertex_max: usize,
program: u32,
fs: u32,
vs: u32,
vbo: u32,
vao: u32,
glyph_texture: u32,
}
pub type Res<T> = Result<T, Box<std::error::Error>>;
/// ```text
/// [
/// left_top * 3,
/// right_bottom * 2,
/// tex_left_top * 2,
/// tex_right_bottom * 2,
/// color * 4,
/// override_alpha
/// ]
/// ```
pub type Vertex = [GLfloat; 14];
fn transform_status_line(vertex: &mut Vertex) {
let max_x = &mut vertex[3];
*max_x = std::f32::MAX;
vertex[13] = 1.0;
}
fn extract_tex_coords(vertex: &Vertex) -> TexCoords {
let mut output: TexCoords = d!();
// To compenate for y flipping in to_vertex
output.min.x = vertex[5];
output.max.y = vertex[6];
output.max.x = vertex[7];
output.min.y = vertex[8];
output
}
#[inline]
#[perf_viz::record]
fn to_vertex(
glyph_brush::GlyphVertex {
mut tex_coords,
pixel_coords,
bounds,
screen_dimensions: (screen_w, screen_h),
color,
z,
}: glyph_brush::GlyphVertex,
) -> Vertex {
let gl_bounds = rusttype::Rect {
min: rusttype::point(
2.0 * (bounds.min.x / screen_w - 0.5),
2.0 * (0.5 - bounds.min.y / screen_h),
),
max: rusttype::point(
2.0 * (bounds.max.x / screen_w - 0.5),
2.0 * (0.5 - bounds.max.y / screen_h),
),
};
let mut gl_rect = rusttype::Rect {
min: rusttype::point(
2.0 * (pixel_coords.min.x as f32 / screen_w - 0.5),
2.0 * (0.5 - pixel_coords.min.y as f32 / screen_h),
),
max: rusttype::point(
2.0 * (pixel_coords.max.x as f32 / screen_w - 0.5),
2.0 * (0.5 - pixel_coords.max.y as f32 / screen_h),
),
};
// handle overlapping bounds, modify uv_rect to preserve texture aspect
if gl_rect.max.x > gl_bounds.max.x {
let old_width = gl_rect.width();
gl_rect.max.x = gl_bounds.max.x;
tex_coords.max.x = tex_coords.min.x + tex_coords.width() * gl_rect.width() / old_width;
}
if gl_rect.min.x < gl_bounds.min.x {
let old_width = gl_rect.width();
gl_rect.min.x = gl_bounds.min.x;
tex_coords.min.x = tex_coords.max.x - tex_coords.width() * gl_rect.width() / old_width;
}
// note: y access is flipped gl compared with screen,
// texture is not flipped (ie is a headache)
if gl_rect.max.y < gl_bounds.max.y {
let old_height = gl_rect.height();
gl_rect.max.y = gl_bounds.max.y;
tex_coords.max.y = tex_coords.min.y + tex_coords.height() * gl_rect.height() / old_height;
}
if gl_rect.min.y > gl_bounds.min.y {
let old_height = gl_rect.height();
gl_rect.min.y = gl_bounds.min.y;
tex_coords.min.y = tex_coords.max.y - tex_coords.height() * gl_rect.height() / old_height;
}
[
gl_rect.min.x,
gl_rect.max.y,
z,
gl_rect.max.x,
gl_rect.min.y,
// this isn't `mix.x, min.y, max.x, max.y` in order to flip the y axis
tex_coords.min.x,
tex_coords.max.y,
tex_coords.max.x,
tex_coords.min.y,
//
color[0],
color[1],
color[2],
color[3],
0.0,
]
}
macro_rules! gl_assert_ok {
() => {{
if invariants_checked!() {
let err = gl::GetError();
assert_eq!(err, gl::NO_ERROR, "{}", gl_err_to_str(err));
}
}};
}
pub fn init<F>(glyph_brush: &GlyphBrush<Vertex>, load_fn: F) -> Res<State>
where
F: FnMut(&'static str) -> *const GLvoid,
{
// Load the OpenGL function pointers
gl::load_with(load_fn);
// Create GLSL shaders
let vs = compile_shader(include_str!("shader/vert.glsl"), gl::VERTEX_SHADER)?;
let fs = compile_shader(include_str!("shader/frag.glsl"), gl::FRAGMENT_SHADER)?;
let program = link_program(vs, fs)?;
let mut vao = 0;
let mut vbo = 0;
let mut glyph_texture = 0;
unsafe {
// Create Vertex Array Object
gl::GenVertexArrays(1, &mut vao);
gl::BindVertexArray(vao);
// Create a Vertex Buffer Object
gl::GenBuffers(1, &mut vbo);
gl::BindBuffer(gl::ARRAY_BUFFER, vbo);
// Enamble Depth testing so we can occlude things while sending them down in any order
gl::Enable(gl::DEPTH_TEST);
{
// Create a texture for the glyphs
// The texture holds 1 byte per pixel as alpha data
gl::PixelStorei(gl::UNPACK_ALIGNMENT, 1);
gl::GenTextures(1, &mut glyph_texture);
gl::BindTexture(gl::TEXTURE_2D, glyph_texture);
gl::TexParameteri(gl::TEXTURE_2D, gl::TEXTURE_WRAP_S, gl::CLAMP_TO_EDGE as _);
gl::TexParameteri(gl::TEXTURE_2D, gl::TEXTURE_WRAP_T, gl::CLAMP_TO_EDGE as _);
gl::TexParameteri(gl::TEXTURE_2D, gl::TEXTURE_MIN_FILTER, gl::LINEAR as _);
gl::TexParameteri(gl::TEXTURE_2D, gl::TEXTURE_MAG_FILTER, gl::LINEAR as _);
let (width, height) = glyph_brush.texture_dimensions();
gl::TexImage2D(
gl::TEXTURE_2D,
0,
gl::RED as _,
width as _,
height as _,
0,
gl::RED,
gl::UNSIGNED_BYTE,
ptr::null(),
);
gl_assert_ok!();
}
// Use shader program
gl::UseProgram(program);
gl::BindFragDataLocation(program, 0, CString::new("out_color")?.as_ptr());
// Specify the layout of the vertex data
let mut offset = 0;
for (v_field, float_count) in &[
("left_top", 3),
("right_bottom", 2),
("tex_left_top", 2),
("tex_right_bottom", 2),
("color", 4),
("override_alpha", 1),
] {
let attr = gl::GetAttribLocation(program, CString::new(*v_field)?.as_ptr());
if attr < 0 {
return Err(format!("{} GetAttribLocation -> {}", v_field, attr).into());
}
gl::VertexAttribPointer(
attr as _,
*float_count,
gl::FLOAT,
gl::FALSE as _,
mem::size_of::<Vertex>() as _,
offset as _,
);
gl::EnableVertexAttribArray(attr as _);
gl::VertexAttribDivisor(attr as _, 1);
offset += float_count * 4;
}
// Enabled alpha blending
gl::Enable(gl::BLEND);
gl::BlendFunc(gl::SRC_ALPHA, gl::ONE_MINUS_SRC_ALPHA);
// Use srgb for consistency with other examples
gl::Enable(gl::FRAMEBUFFER_SRGB);
gl::ClearColor(0.02, 0.02, 0.02, 1.0);
}
let vertex_count = 0;
let vertex_max = vertex_count;
Ok(State {
vertex_count,
vertex_max,
program,
fs,
vs,
vbo,
vao,
glyph_texture,
})
}
pub fn set_dimensions(width: i32, height: i32) {
unsafe {
gl::Viewport(0, 0, width, height);
}
}
#[derive(Clone)]
pub struct RenderExtras {
pub status_line_position: Option<(f32, f32)>,
pub status_scale: Scale,
pub highlight_ranges: Vec<HighlightRange>,
}
#[perf_viz::record]
pub fn render(
State {
ref mut vertex_count,
ref mut vertex_max,
..
}: &mut State,
glyph_brush: &mut GlyphBrush<Vertex>,
width: u32,
height: u32,
RenderExtras {
status_line_position,
status_scale,
highlight_ranges,
}: RenderExtras,
) -> Res<()> {
let query_ids = [0; 1];
if cfg!(feature = "time-render") {
// Adding and then retreving this query for how long the gl rendering took,
// "implicitly flushes the GL pipeline" according to this docs page:
// https://www.khronos.org/registry/OpenGL-Refpages/gl4/html/glBeginQuery.xhtml
// Without something flushing the queue, as of this writing, the frames do not
// appear to render as quickly. That is, after user input the updated frame does
// shows up after a noticably longer delay. Oddly enough, `glFinish` produces the
// same speed up, and but it takes longer than this query does, (around a ms or so)
// at least on my current machine + driver setup. Here's a question abotut this:
// https://gamedev.stackexchange.com/q/172737
// For the time being, I'm making this feature enabled by default since it is
// currently faster, but thi may well not be true any more on a future machine/driver
// so it seems worth it to keep it a feature.
unsafe {
gl::GenQueries(1, query_ids.as_ptr() as _);
gl::BeginQuery(gl::TIME_ELAPSED, query_ids[0])
}
}
let dimensions = (width, height);
let mut brush_action;
loop {
perf_viz::start_record!("process_queued");
brush_action = glyph_brush.process_queued(
dimensions,
|rect, tex_data| unsafe {
perf_viz::start_record!("|rect, tex_data|");
// Update part of gpu texture with new glyph alpha values
gl::TexSubImage2D(
gl::TEXTURE_2D,
0,
rect.min.x as _,
rect.min.y as _,
rect.width() as _,
rect.height() as _,
gl::RED,
gl::UNSIGNED_BYTE,
tex_data.as_ptr() as _,
);
gl_assert_ok!();
perf_viz::end_record!("|rect, tex_data|");
},
to_vertex,
status_line_position.map(|status_line_position| AdditionalRects {
transform_status_line,
extract_tex_coords,
status_line_position,
status_scale,
highlight_ranges: highlight_ranges.clone(),
}),
);
perf_viz::end_record!("process_queued");
match brush_action {
Ok(_) => break,
Err(BrushError::TextureTooSmall { suggested, .. }) => unsafe {
perf_viz::record_guard!("BrushError::TextureTooSmall");
let (new_width, new_height) = suggested;
eprint!("\r \r");
eprintln!("Resizing glyph texture -> {}x{}", new_width, new_height);
// Recreate texture as a larger size to fit more
gl::TexImage2D(
gl::TEXTURE_2D,
0,
gl::RED as _,
new_width as _,
new_height as _,
0,
gl::RED,
gl::UNSIGNED_BYTE,
ptr::null(),
);
gl_assert_ok!();
glyph_brush.resize_texture(new_width, new_height);
},
}
}
match brush_action? {
BrushAction::Draw(vertices) => {
perf_viz::record_guard!("BrushAction::Draw");
// Draw new vertices
*vertex_count = vertices.len();
unsafe {
if vertex_max < vertex_count {
gl::BufferData(
gl::ARRAY_BUFFER,
(*vertex_count * mem::size_of::<Vertex>()) as GLsizeiptr,
vertices.as_ptr() as _,
gl::DYNAMIC_DRAW,
);
} else {
gl::BufferSubData(
gl::ARRAY_BUFFER,
0,
(*vertex_count * mem::size_of::<Vertex>()) as GLsizeiptr,
vertices.as_ptr() as _,
);
}
}
*vertex_max = *vertex_max.max(vertex_count);
}
BrushAction::ReDraw => {}
}
unsafe {
perf_viz::record_guard!("DrawArraysInstanced");
gl::Clear(gl::COLOR_BUFFER_BIT | gl::DEPTH_BUFFER_BIT);
gl::DrawArraysInstanced(gl::TRIANGLE_STRIP, 0, 4, *vertex_count as _);
}
//See comment in above "time-render" check.
if cfg!(feature = "time-render") {
let mut time_elapsed = 0;
unsafe {
gl::EndQuery(gl::TIME_ELAPSED);
gl::GetQueryObjectiv(query_ids[0], gl::QUERY_RESULT, &mut time_elapsed);
gl::DeleteQueries(1, query_ids.as_ptr() as _);
}
} else {
unsafe {
gl::Finish();
}
}
Ok(())
}
pub fn cleanup(
State {
program,
fs,
vs,
vbo,
vao,
glyph_texture,
..
}: State,
) -> Res<()> {
unsafe {
gl::DeleteProgram(program);
gl::DeleteShader(fs);
gl::DeleteShader(vs);
gl::DeleteBuffers(1, &vbo);
gl::DeleteVertexArrays(1, &vao);
gl::DeleteTextures(1, &glyph_texture);
}
Ok(())
}
fn gl_err_to_str(err: u32) -> &'static str {
match err {
gl::INVALID_ENUM => "INVALID_ENUM",
gl::INVALID_VALUE => "INVALID_VALUE",
gl::INVALID_OPERATION => "INVALID_OPERATION",
gl::INVALID_FRAMEBUFFER_OPERATION => "INVALID_FRAMEBUFFER_OPERATION",
gl::OUT_OF_MEMORY => "OUT_OF_MEMORY",
gl::STACK_UNDERFLOW => "STACK_UNDERFLOW",
gl::STACK_OVERFLOW => "STACK_OVERFLOW",
_ => "Unknown error",
}
}
fn compile_shader(src: &str, ty: GLenum) -> Res<GLuint> {
let shader;
unsafe {
shader = gl::CreateShader(ty);
// Attempt to compile the shader
let c_str = CString::new(src.as_bytes())?;
gl::ShaderSource(shader, 1, &c_str.as_ptr(), ptr::null());
gl::CompileShader(shader);
// Get the compile status
let mut status = GLint::from(gl::FALSE);
gl::GetShaderiv(shader, gl::COMPILE_STATUS, &mut status);
// Fail on error
if status != GLint::from(gl::TRUE) {
let mut len = 0;
gl::GetShaderiv(shader, gl::INFO_LOG_LENGTH, &mut len);
let mut buf = Vec::with_capacity(len as usize);
buf.set_len((len as usize) - 1); // subtract 1 to skip the trailing null character
gl::GetShaderInfoLog(
shader,
len,
ptr::null_mut(),
buf.as_mut_ptr() as *mut GLchar,
);
return Err(str::from_utf8(&buf)?.into());
}
}
Ok(shader)
}
fn link_program(vs: GLuint, fs: GLuint) -> Res<GLuint> {
unsafe {
let program = gl::CreateProgram();
gl::AttachShader(program, vs);
gl::AttachShader(program, fs);
gl::LinkProgram(program);
// Get the link status
let mut status = GLint::from(gl::FALSE);
gl::GetProgramiv(program, gl::LINK_STATUS, &mut status);
// Fail on error
if status != GLint::from(gl::TRUE) {
let mut len: GLint = 0;
gl::GetProgramiv(program, gl::INFO_LOG_LENGTH, &mut len);
let mut buf = Vec::with_capacity(len as usize);
buf.set_len((len as usize) - 1); // subtract 1 to skip the trailing null character
gl::GetProgramInfoLog(
program,
len,
ptr::null_mut(),
buf.as_mut_ptr() as *mut GLchar,
);
return Err(str::from_utf8(&buf)?.into());
}
Ok(program)
}
}
| 32.032064 | 98 | 0.532908 |
1ef17d079f0d02b2695e148055ed17a5735ec7b4 | 1,054 | use super::super::super::EnumTrait;
use std::str::FromStr;
#[derive(Clone, Debug)]
pub enum ClipboardFormatValues {
Bitmap,
Picture,
PictureOld,
PicturePrint,
PictureScreen,
}
impl Default for ClipboardFormatValues {
fn default() -> Self { Self::PictureOld }
}
impl EnumTrait for ClipboardFormatValues {
fn get_value_string(&self)->&str {
match &self {
Self::Bitmap => {"Bitmap"},
Self::Picture => {"Pict"},
Self::PictureOld => {"PictOld"},
Self::PicturePrint => {"PictPrint"},
Self::PictureScreen => {"PictScreen"},
}
}
}
impl FromStr for ClipboardFormatValues {
type Err = ();
fn from_str(input: &str) -> Result<Self, Self::Err> {
match input {
"Bitmap" => Ok(Self::Bitmap),
"Pict" => Ok(Self::Picture),
"PictOld" => Ok(Self::PictureOld),
"PictPrint" => Ok(Self::PicturePrint),
"PictScreen" => Ok(Self::PictureScreen),
_ => Err(()),
}
}
}
| 27.736842 | 57 | 0.547438 |
09cfdcef8760878a517dc3a5bfa7e1149225ef4f | 375 | #[repr(i32)]
pub enum SurfaceType {
Image = 0,
PDF = 1,
PS = 2,
XLib = 3,
XCB = 4,
Glitz = 5,
Quartz = 6,
Win32 = 7,
BeOS = 8,
DirectFB = 9,
SVG = 10,
OS2 = 11,
Win32Printing = 12,
QuartzImage = 13,
Script = 14,
Qt = 15,
Recording = 16,
VG = 17,
GL = 18,
DRM = 19,
Tee = 20,
XML = 21,
Skia = 22,
Subsurface = 23,
CoGL = 24
}
| 12.931034 | 22 | 0.501333 |
7297ddbefd78b7864a989910efd78123422d5b46 | 866 | use random_nnue_eval::eval_value::{EvalValueNnue, EvalValueNnueHalfKPE9};
use std::f64::NAN;
fn main() -> Result<(), Box<dyn std::error::Error>> {
let odir = "eval_halfkpe9";
println!("{}", &odir);
match std::fs::create_dir(&odir) {
Err(why) => println!("! {:?}", why.kind()),
Ok(_) => {}
}
let args: Vec<String> = std::env::args().collect();
let ifilename = &args[1];
let eval = EvalValueNnueHalfKPE9::load(&ifilename).unwrap();
for id in 0..10 {
let sdir = format!("{}/{:03}", &odir, id);
println!("{}", &sdir);
match std::fs::create_dir(&sdir) {
Err(why) => println!("! {:?}", why.kind()),
Ok(_) => {}
}
eval.clear_weight(NAN, 7.0, 20.0, 50.0)
.analyze()
.save(&format!("{}/{:03}/nn.bin", odir, id))?;
}
Ok(())
}
| 27.0625 | 73 | 0.495381 |
0309520095af24a5b4171b5ea810c7dcbdf29f44 | 5,400 | // Copyright 2013 The Servo Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use core_foundation::base::{CFRelease, CFRetain, CFTypeID, TCFType};
use core_foundation::array::{CFArray, CFArrayRef};
use core_foundation::data::{CFData, CFDataRef};
use core_foundation::number::CFNumber;
use core_foundation::string::{CFString, CFStringRef};
use core_foundation::dictionary::{CFDictionary, CFDictionaryRef};
use data_provider::CGDataProvider;
use geometry::CGRect;
use foreign_types::ForeignType;
use libc::{self, c_int, size_t};
pub type CGGlyph = libc::c_ushort;
foreign_type! {
#[doc(hidden)]
type CType = ::sys::CGFont;
fn drop = |p| CFRelease(p as *mut _);
fn clone = |p| CFRetain(p as *const _) as *mut _;
pub struct CGFont;
pub struct CGFontRef;
}
unsafe impl Send for CGFont {}
unsafe impl Sync for CGFont {}
impl CGFont {
pub fn type_id() -> CFTypeID {
unsafe {
CGFontGetTypeID()
}
}
pub fn from_data_provider(provider: CGDataProvider) -> Result<CGFont, ()> {
unsafe {
let font_ref = CGFontCreateWithDataProvider(provider.as_ptr());
if !font_ref.is_null() {
Ok(CGFont::from_ptr(font_ref))
} else {
Err(())
}
}
}
pub fn from_name(name: &CFString) -> Result<CGFont, ()> {
unsafe {
let font_ref = CGFontCreateWithFontName(name.as_concrete_TypeRef());
if !font_ref.is_null() {
Ok(CGFont::from_ptr(font_ref))
} else {
Err(())
}
}
}
pub fn create_copy_from_variations(&self, vars: &CFDictionary<CFString, CFNumber>) -> Result<CGFont, ()> {
unsafe {
let font_ref = CGFontCreateCopyWithVariations(self.as_ptr(),
vars.as_concrete_TypeRef());
if !font_ref.is_null() {
Ok(CGFont::from_ptr(font_ref))
} else {
Err(())
}
}
}
pub fn postscript_name(&self) -> CFString {
unsafe {
let string_ref = CGFontCopyPostScriptName(self.as_ptr());
TCFType::wrap_under_create_rule(string_ref)
}
}
pub fn get_glyph_b_boxes(&self, glyphs: &[CGGlyph], bboxes: &mut [CGRect]) -> bool {
unsafe {
assert!(bboxes.len() >= glyphs.len());
CGFontGetGlyphBBoxes(self.as_ptr(),
glyphs.as_ptr(),
glyphs.len(),
bboxes.as_mut_ptr())
}
}
pub fn get_glyph_advances(&self, glyphs: &[CGGlyph], advances: &mut [c_int]) -> bool {
unsafe {
assert!(advances.len() >= glyphs.len());
CGFontGetGlyphAdvances(self.as_ptr(),
glyphs.as_ptr(),
glyphs.len(),
advances.as_mut_ptr())
}
}
pub fn get_units_per_em(&self) -> c_int {
unsafe {
CGFontGetUnitsPerEm(self.as_ptr())
}
}
pub fn copy_table_tags(&self) -> CFArray<u32> {
unsafe {
TCFType::wrap_under_create_rule(CGFontCopyTableTags(self.as_ptr()))
}
}
pub fn copy_table_for_tag(&self, tag: u32) -> Option<CFData> {
let data_ref = unsafe { CGFontCopyTableForTag(self.as_ptr(), tag) };
if !data_ref.is_null() {
Some(unsafe { TCFType::wrap_under_create_rule(data_ref) })
} else {
None
}
}
}
#[link(name = "CoreGraphics", kind = "framework")]
extern {
// TODO: basically nothing has bindings (even commented-out) besides what we use.
fn CGFontCreateWithDataProvider(provider: ::sys::CGDataProviderRef) -> ::sys::CGFontRef;
fn CGFontCreateWithFontName(name: CFStringRef) -> ::sys::CGFontRef;
fn CGFontCreateCopyWithVariations(font: ::sys::CGFontRef, vars: CFDictionaryRef) -> ::sys::CGFontRef;
fn CGFontGetTypeID() -> CFTypeID;
fn CGFontCopyPostScriptName(font: ::sys::CGFontRef) -> CFStringRef;
// These do the same thing as CFRetain/CFRelease, except
// gracefully handle a NULL argument. We don't use them.
//fn CGFontRetain(font: ::sys::CGFontRef);
//fn CGFontRelease(font: ::sys::CGFontRef);
fn CGFontGetGlyphBBoxes(font: ::sys::CGFontRef,
glyphs: *const CGGlyph,
count: size_t,
bboxes: *mut CGRect)
-> bool;
fn CGFontGetGlyphAdvances(font: ::sys::CGFontRef,
glyphs: *const CGGlyph,
count: size_t,
advances: *mut c_int)
-> bool;
fn CGFontGetUnitsPerEm(font: ::sys::CGFontRef) -> c_int;
fn CGFontCopyTableTags(font: ::sys::CGFontRef) -> CFArrayRef;
fn CGFontCopyTableForTag(font: ::sys::CGFontRef, tag: u32) -> CFDataRef;
}
| 34.394904 | 110 | 0.568148 |
fea0f7a38699007c8f09cea186851e97aab4b8c8 | 10,378 | use crate::checks::ENABLED_CHECK;
use chrono::DateTime;
use heim::{
memory::{
Memory,
Swap,
},
units::{
frequency::{
gigahertz,
hertz,
},
information::{
byte,
gigabyte,
},
time::nanosecond,
Frequency,
},
};
use log::warn;
use serenity::{
framework::standard::{
macros::command,
Args,
CommandResult,
},
model::prelude::*,
prelude::*,
utils::Colour,
};
use std::time::{
Duration,
Instant,
UNIX_EPOCH,
};
use systemstat::{
platform::common::Platform,
System,
};
use uom::{
fmt::DisplayStyle,
si::f32::{
Frequency as FrequencyF32,
Information as InformationF32,
},
};
fn epoch_nanos_to_local_datetime(nanos: u64) -> DateTime<chrono::Local> {
DateTime::from(UNIX_EPOCH + Duration::from_nanos(nanos))
}
fn fmt_uptime(uptime: Duration) -> String {
let raw_secs = uptime.as_secs();
let days = raw_secs / (60 * 60 * 24);
let hours = (raw_secs % (60 * 60 * 24)) / (60 * 60);
let minutes = (raw_secs % (60 * 60)) / 60;
let secs = raw_secs % 60;
format!(
"{} days {} hours {} minutes {} seconds",
days, hours, minutes, secs
)
}
fn fmt_memory(memory: &Memory) -> String {
let fmt_args = InformationF32::format_args(gigabyte, DisplayStyle::Abbreviation);
let avail_mem = InformationF32::new::<byte>(memory.available().get::<byte>() as f32);
let total_mem = InformationF32::new::<byte>(memory.total().get::<byte>() as f32);
let used_mem = total_mem - avail_mem;
format!(
"{:.2} / {:.2}",
fmt_args.with(used_mem),
fmt_args.with(total_mem),
)
}
fn fmt_swap(swap: &Swap) -> String {
let fmt_args = InformationF32::format_args(gigabyte, DisplayStyle::Abbreviation);
let used = InformationF32::new::<byte>(swap.used().get::<byte>() as f32);
let total = InformationF32::new::<byte>(swap.total().get::<byte>() as f32);
format!("{:.2} / {:.2}", fmt_args.with(used), fmt_args.with(total),)
}
fn fmt_cpu_frequency(freq: &Frequency) -> String {
let fmt_args = FrequencyF32::format_args(gigahertz, DisplayStyle::Abbreviation);
let freq = FrequencyF32::new::<hertz>(freq.get::<hertz>() as f32);
format!("{:.2}", fmt_args.with(freq))
}
async fn get_cpu_usage() -> Result<f32, heim::Error> {
let start = heim::cpu::usage().await?;
tokio::time::sleep(Duration::from_secs(1)).await;
let end = heim::cpu::usage().await?;
Ok((end - start).get::<heim::units::ratio::percent>())
}
#[command]
#[description("Get System Stats")]
#[bucket("system")]
#[checks(Enabled)]
async fn system(ctx: &Context, msg: &Message, _args: Args) -> CommandResult {
let start = Instant::now();
let profile = ctx.http.get_current_user().await?;
// Start Legacy data gathering
let sys = System::new();
let cpu_temp = match sys.cpu_temp() {
Ok(cpu_temp) => Some(cpu_temp),
Err(e) => {
warn!("Failed to get cpu temp: {}", e);
None
}
};
// End Legacy data gathering
let platform = match heim::host::platform().await {
Ok(platform) => Some(platform),
Err(e) => {
warn!("Failed to get platform info: {}", e);
None
}
};
let boot_time = match heim::host::boot_time().await {
Ok(boot_time) => Some(epoch_nanos_to_local_datetime(
boot_time.get::<nanosecond>() as u64
)),
Err(e) => {
warn!("Failed to get boot time: {}", e);
None
}
};
let uptime = match heim::host::uptime().await {
Ok(uptime) => Some(Duration::from_nanos(uptime.get::<nanosecond>() as u64)),
Err(e) => {
warn!("Failed to get uptime: {}", e);
None
}
};
let cpu_frequency = match heim::cpu::frequency().await {
Ok(cpu_frequency) => Some(cpu_frequency),
Err(e) => {
warn!("Failed to get cpu frequency: {}", e);
None
}
};
let cpu_logical_count = match heim::cpu::logical_count().await {
Ok(cpu_logical_count) => Some(cpu_logical_count),
Err(e) => {
warn!("Failed to get logical cpu count: {}", e);
None
}
};
let cpu_physical_count = match heim::cpu::physical_count().await {
Ok(cpu_physical_count) => cpu_physical_count, // This returns an option, so we return it here to flatten it.
Err(e) => {
warn!("Failed to get physical cpu count: {}", e);
None
}
};
let memory = match heim::memory::memory().await {
Ok(memory) => Some(memory),
Err(e) => {
warn!("Failed to get memory usage: {}", e);
None
}
};
let swap = match heim::memory::swap().await {
Ok(swap) => Some(swap),
Err(e) => {
warn!("Failed to get swap usage: {}", e);
None
}
};
let virtualization = heim::virt::detect().await;
let cpu_usage = match get_cpu_usage().await {
Ok(usage) => Some(usage),
Err(e) => {
warn!("Failed to get cpu usage: {}", e);
None
}
};
let data_retrieval_time = Instant::now() - start;
// Start WIP
// Reports Cpu time since boot.
// let cpu_time = heim::cpu::time().await.unwrap();
// Reports some cpu stats
// let cpu_stats = heim::cpu::stats().await.unwrap();
// Reports temps from all sensors
// let temperatures = heim::sensors::temperatures().collect::<Vec<_>>().await;
// End WIP
msg.channel_id
.send_message(&ctx.http, |m| {
m.embed(|e| {
e.title("System Status");
e.color(Colour::from_rgb(255, 0, 0));
if let Some(icon) = profile.avatar_url() {
e.thumbnail(icon);
}
if let Some(platform) = platform {
e.field("Hostname", platform.hostname(), true);
e.field(
"OS",
format!(
"{} {} (version {})",
platform.system(),
platform.release(),
platform.version()
),
true,
);
e.field("Arch", platform.architecture().as_str(), true);
}
if let Some(boot_time) = boot_time {
e.field("Boot Time", boot_time.to_rfc2822(), true);
}
if let Some(uptime) = uptime {
e.field("Uptime", fmt_uptime(uptime), true);
}
// Currently reports incorrectly on Windows
if let Some(cpu_frequency) = cpu_frequency {
e.field(
"Cpu Freq",
fmt_cpu_frequency(&cpu_frequency.current()),
true,
);
if let Some(min_cpu_frequency) = cpu_frequency.min() {
e.field("Min Cpu Freq", fmt_cpu_frequency(&min_cpu_frequency), true);
}
if let Some(max_cpu_frequency) = cpu_frequency.max() {
e.field("Max Cpu Freq", fmt_cpu_frequency(&max_cpu_frequency), true);
}
}
match (cpu_logical_count, cpu_physical_count) {
(Some(logical_count), Some(physical_count)) => {
e.field(
"Cpu Core Count",
format!("{} logical, {} physical", logical_count, physical_count),
true,
);
}
(Some(logical_count), None) => {
e.field("Cpu Core Count", format!("{} logical", logical_count), true);
}
(None, Some(physical_count)) => {
e.field(
"Cpu Core Count",
format!("{} physical", physical_count),
true,
);
}
(None, None) => {}
}
if let Some(memory) = memory {
e.field("Memory Usage", fmt_memory(&memory), true);
}
if let Some(swap) = swap {
e.field("Swap Usage", fmt_swap(&swap), true);
}
let virtualization_field = match virtualization.as_ref() {
Some(virtualization) => virtualization.as_str(),
None => "None",
};
e.field("Virtualization", virtualization_field, true);
if let (Some(cpu_usage), Some(cpu_logical_count)) = (cpu_usage, cpu_logical_count) {
e.field(
"Cpu Usage",
format!("{:.2}%", cpu_usage / (cpu_logical_count as f32)),
true,
);
}
/////////////////////////////////////////////////////////////////////////////////////
// Legacy (These functions from systemstat have no direct replacement in heim yet) //
/////////////////////////////////////////////////////////////////////////////////////
// This does not work on Windows
// TODO: This can probably be replaced with temprature readings from heim.
// It doesn't support Windows, but this never worked there anyways as Windows has no simple way to get temps
if let Some(cpu_temp) = cpu_temp {
e.field("Cpu Temp", format!("{} °C", cpu_temp), true);
}
e.footer(|f| {
f.text(format!(
"Retrieved system data in {:.2} second(s)",
data_retrieval_time.as_secs_f32()
));
f
});
e
})
})
.await?;
Ok(())
}
| 30.523529 | 124 | 0.47302 |
5de6b96186ddebc2e6566f11b8d20db9840b9ded | 12,221 | // Copyright 2019 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use crate::switchboard::base::*;
use anyhow::{format_err, Error};
use futures::channel::mpsc::UnboundedSender;
use futures::lock::Mutex;
use std::collections::HashMap;
use std::sync::Arc;
use fuchsia_async as fasync;
use futures::stream::StreamExt;
type ResponderMap = HashMap<u64, SettingRequestResponder>;
type ListenerMap = HashMap<SettingType, Vec<ListenSessionInfo>>;
/// Minimal data necessary to uniquely identify and interact with a listen
/// session.
#[derive(Clone)]
struct ListenSessionInfo {
session_id: u64,
/// Setting type listening to
setting_type: SettingType,
callback: ListenCallback,
}
impl PartialEq for ListenSessionInfo {
fn eq(&self, other: &Self) -> bool {
// We cannot derive PartialEq as UnboundedSender does not implement it.
self.session_id == other.session_id && self.setting_type == other.setting_type
}
}
/// Wrapper around ListenSessioninfo that provides cancellation ability as a
/// ListenSession.
struct ListenSessionImpl {
info: ListenSessionInfo,
/// Sender to invoke cancellation on. Sends the listener associated with
/// this session.
cancellation_sender: UnboundedSender<ListenSessionInfo>,
closed: bool,
}
impl ListenSessionImpl {
fn new(
info: ListenSessionInfo,
cancellation_sender: UnboundedSender<ListenSessionInfo>,
) -> Self {
Self { info: info, cancellation_sender: cancellation_sender, closed: false }
}
}
impl ListenSession for ListenSessionImpl {
fn close(&mut self) {
if self.closed {
return;
}
let info_clone = self.info.clone();
self.cancellation_sender.unbounded_send(info_clone).ok();
self.closed = true;
}
}
impl Drop for ListenSessionImpl {
fn drop(&mut self) {
self.close();
}
}
pub struct SwitchboardImpl {
/// Next available session id.
next_session_id: u64,
/// Next available action id.
next_action_id: u64,
/// Acquired during construction and used internally to send input.
action_sender: UnboundedSender<SettingAction>,
/// Acquired during construction - passed during listen to allow callback
/// for canceling listen.
listen_cancellation_sender: UnboundedSender<ListenSessionInfo>,
/// mapping of request output ids to responders.
request_responders: ResponderMap,
/// mapping of listeners for changes
listeners: ListenerMap,
}
impl SwitchboardImpl {
/// Creates a new SwitchboardImpl, which will return the instance along with
/// a sender to provide events in response to the actions sent.
pub fn create(
action_sender: UnboundedSender<SettingAction>,
) -> (Arc<Mutex<SwitchboardImpl>>, UnboundedSender<SettingEvent>) {
let (event_tx, mut event_rx) = futures::channel::mpsc::unbounded::<SettingEvent>();
let (cancel_listen_tx, mut cancel_listen_rx) =
futures::channel::mpsc::unbounded::<ListenSessionInfo>();
let switchboard = Arc::new(Mutex::new(Self {
next_session_id: 0,
next_action_id: 0,
action_sender: action_sender,
listen_cancellation_sender: cancel_listen_tx,
request_responders: HashMap::new(),
listeners: HashMap::new(),
}));
{
let switchboard_clone = switchboard.clone();
fasync::spawn(async move {
while let Some(event) = event_rx.next().await {
switchboard_clone.lock().await.process_event(event);
}
});
}
{
let switchboard_clone = switchboard.clone();
fasync::spawn(async move {
while let Some(info) = cancel_listen_rx.next().await {
switchboard_clone.lock().await.stop_listening(info);
}
});
}
return (switchboard, event_tx);
}
pub fn get_next_action_id(&mut self) -> u64 {
let return_id = self.next_action_id;
self.next_action_id += 1;
return return_id;
}
fn process_event(&mut self, input: SettingEvent) {
match input {
SettingEvent::Changed(setting_type) => {
self.notify_listeners(setting_type);
}
SettingEvent::Response(action_id, response) => {
self.handle_response(action_id, response);
}
}
}
fn stop_listening(&mut self, session_info: ListenSessionInfo) {
let action_id = self.get_next_action_id();
if let Some(session_infos) = self.listeners.get_mut(&session_info.setting_type) {
// FIXME: use `Vec::remove_item` upon stabilization
let listener_to_remove =
session_infos.iter().enumerate().find(|(_i, elem)| **elem == session_info);
if let Some((i, _elem)) = listener_to_remove {
session_infos.remove(i);
// Send updated listening size.
self.action_sender
.unbounded_send(SettingAction {
id: action_id,
setting_type: session_info.setting_type,
data: SettingActionData::Listen(session_infos.len() as u64),
})
.ok();
}
}
}
fn notify_listeners(&self, setting_type: SettingType) {
if let Some(session_infos) = self.listeners.get(&setting_type) {
for info in session_infos {
(info.callback)(setting_type);
}
}
}
fn handle_response(&mut self, origin_id: u64, response: SettingResponseResult) {
if let Some(responder) = self.request_responders.remove(&origin_id) {
responder.send(response).ok();
}
}
}
impl Switchboard for SwitchboardImpl {
fn request(
&mut self,
setting_type: SettingType,
request: SettingRequest,
callback: SettingRequestResponder,
) -> Result<(), Error> {
// Associate request
let action_id = self.get_next_action_id();
self.request_responders.insert(action_id, callback);
self.action_sender.unbounded_send(SettingAction {
id: action_id,
setting_type,
data: SettingActionData::Request(request),
})?;
return Ok(());
}
fn listen(
&mut self,
setting_type: SettingType,
listener: ListenCallback,
) -> Result<Box<dyn ListenSession + Send + Sync>, Error> {
let action_id = self.get_next_action_id();
if !self.listeners.contains_key(&setting_type) {
self.listeners.insert(setting_type, vec![]);
}
if let Some(listeners) = self.listeners.get_mut(&setting_type) {
let info = ListenSessionInfo {
session_id: self.next_session_id,
setting_type: setting_type,
callback: listener,
};
self.next_session_id += 1;
listeners.push(info.clone());
self.action_sender.unbounded_send(SettingAction {
id: action_id,
setting_type,
data: SettingActionData::Listen(listeners.len() as u64),
})?;
return Ok(Box::new(ListenSessionImpl::new(
info,
self.listen_cancellation_sender.clone(),
)));
}
return Err(format_err!("invalid error"));
}
}
#[cfg(test)]
mod tests {
use super::*;
#[fuchsia_async::run_until_stalled(test)]
async fn test_request() {
let (action_tx, mut action_rx) = futures::channel::mpsc::unbounded::<SettingAction>();
let (switchboard, event_tx) = SwitchboardImpl::create(action_tx);
let (response_tx, response_rx) =
futures::channel::oneshot::channel::<SettingResponseResult>();
// Send request
assert!(switchboard
.lock()
.await
.request(SettingType::Unknown, SettingRequest::Get, response_tx)
.is_ok());
// Ensure request is received.
let action = action_rx.next().await.unwrap();
assert_eq!(SettingType::Unknown, action.setting_type);
if let SettingActionData::Request(request) = action.data {
assert_eq!(request, SettingRequest::Get);
} else {
panic!("unexpected output type");
}
// Send response
assert!(event_tx.unbounded_send(SettingEvent::Response(action.id, Ok(None))).is_ok());
// Ensure response is received.
let response = response_rx.await.unwrap();
assert!(response.is_ok());
}
#[fuchsia_async::run_until_stalled(test)]
async fn test_listen() {
let (action_tx, mut action_rx) = futures::channel::mpsc::unbounded::<SettingAction>();
let (switchboard, _event_tx) = SwitchboardImpl::create(action_tx);
let setting_type = SettingType::Unknown;
// Register first listener and verify count.
let (notify_tx1, _notify_rx1) = futures::channel::mpsc::unbounded::<SettingType>();
let listen_result = switchboard.lock().await.listen(
setting_type,
Arc::new(move |setting| {
notify_tx1.unbounded_send(setting).ok();
}),
);
assert!(listen_result.is_ok());
{
let action = action_rx.next().await.unwrap();
assert_eq!(action.setting_type, setting_type);
assert_eq!(action.data, SettingActionData::Listen(1));
}
// Unregister and verify count.
if let Ok(mut listen_session) = listen_result {
listen_session.close();
} else {
panic!("should have a session");
}
{
let action = action_rx.next().await.unwrap();
assert_eq!(action.setting_type, setting_type);
assert_eq!(action.data, SettingActionData::Listen(0));
}
}
#[fuchsia_async::run_until_stalled(test)]
async fn test_notify() {
let (action_tx, mut action_rx) = futures::channel::mpsc::unbounded::<SettingAction>();
let (switchboard, event_tx) = SwitchboardImpl::create(action_tx);
let setting_type = SettingType::Unknown;
// Register first listener and verify count.
let (notify_tx1, mut notify_rx1) = futures::channel::mpsc::unbounded::<SettingType>();
assert!(switchboard
.lock()
.await
.listen(
setting_type,
Arc::new(move |setting_type| {
notify_tx1.unbounded_send(setting_type).ok();
})
)
.is_ok());
{
let action = action_rx.next().await.unwrap();
assert_eq!(action.setting_type, setting_type);
assert_eq!(action.data, SettingActionData::Listen(1));
}
// Register second listener and verify count
let (notify_tx2, mut notify_rx2) = futures::channel::mpsc::unbounded::<SettingType>();
assert!(switchboard
.lock()
.await
.listen(
setting_type,
Arc::new(move |setting_type| {
notify_tx2.unbounded_send(setting_type).ok();
})
)
.is_ok());
{
let action = action_rx.next().await.unwrap();
assert_eq!(action.setting_type, setting_type);
assert_eq!(action.data, SettingActionData::Listen(2));
}
// Send notification
assert!(event_tx.unbounded_send(SettingEvent::Changed(setting_type)).is_ok());
// Ensure both listeners receive notifications.
{
let notification = notify_rx1.next().await.unwrap();
assert_eq!(notification, setting_type);
}
{
let notification = notify_rx2.next().await.unwrap();
assert_eq!(notification, setting_type);
}
}
}
| 32.245383 | 94 | 0.596023 |
0e9f46246fd26fbb2f8af6c228cdfaebd2b6783c | 2,133 | use crate::cert::NebulaCertificate;
use anyhow::Result;
use std::sync::{Arc, RwLock};
#[derive(Debug, PartialEq, Clone)]
pub struct CertStateInner<'a> {
pub certificate: &'a NebulaCertificate,
pub raw_certificate: Vec<u8>,
pub raw_certificate_no_key: Vec<u8>,
pub public_key: Vec<u8>,
pub private_key: Vec<u8>,
}
pub struct CertState<'a> {
pub inner: Arc<RwLock<CertStateInner<'a>>>,
}
impl<'a> CertState<'a> {
pub fn new(
certificate: &'a mut NebulaCertificate,
private_key: Vec<u8>,
) -> Result<Self> {
let raw_certificate = certificate.marshal()?;
// take the public key
let public_key = std::mem::take(&mut certificate.details.public_key);
let raw_cert_no_key = certificate.marshal()?;
// put back
let _ = std::mem::replace(&mut certificate.details.public_key, public_key);
let inner = Arc::new(
RwLock::new(
CertStateInner{
raw_certificate,
certificate,
private_key,
public_key: certificate.details.public_key.clone().to_vec(),
raw_certificate_no_key: raw_cert_no_key,
}
)
);
let cert_state = Self{
inner,
};
Ok(cert_state)
}
}
#[cfg(test)]
mod test {
use crate::{cert, test_utils};
use super::*;
#[test]
fn test_cert_state() {
let (
mut cert,
_cert_x25519_public_key,
_cert_x25519_secret_key,
cert_ed25519_keypair,
_ca_cert,
_ca_keypair,
) = test_utils::create_test_ca_cert_client_cert().unwrap();
let cert_state = CertState::new(
&mut cert,
cert_ed25519_keypair.secret.to_bytes().to_vec(),
).unwrap();
let cert_2 = cert::unmarshal_nebula_certificate(
&cert_state.inner.read().unwrap().raw_certificate_no_key[..]
).unwrap();
// ensure the certificate doesnt contain a key
assert!(cert_2.details.public_key.len() == 0);
}
} | 29.625 | 83 | 0.570089 |
ede255e8942e31a77641a882340ca5c41724485f | 1,664 | use std::collections::VecDeque;
#[derive(Clone)]
pub struct HistoryList<T> {
maxlen: usize,
past: VecDeque<T>,
pub current: T,
future: VecDeque<T>,
}
impl<T> HistoryList<T> {
pub fn new(init: T, maxlen: usize) -> HistoryList<T> {
let past = VecDeque::with_capacity(maxlen);
let future = VecDeque::with_capacity(maxlen);
HistoryList { maxlen, past, current: init, future }
}
pub fn append(&mut self, item: T) {
let old = std::mem::replace(&mut self.current, item);
self.past.push_back(old);
self.future.clear();
while self.past.len() > self.maxlen {
let _ = self.past.pop_front();
}
}
pub fn current(&self) -> &T {
&self.current
}
pub fn prev(&mut self, count: usize) -> &T {
for _ in 0..count {
match self.past.pop_back() {
Some(item) => {
let old = std::mem::replace(&mut self.current, item);
self.future.push_front(old)
},
None => break,
}
}
&self.current
}
pub fn next(&mut self, count: usize) -> &T {
for _ in 0..count {
match self.future.pop_front() {
Some(item) => {
let old = std::mem::replace(&mut self.current, item);
self.past.push_back(old);
},
None => break,
}
}
&self.current
}
}
impl<T> Default for HistoryList<T>
where
T: Default,
{
fn default() -> HistoryList<T> {
HistoryList::new(T::default(), 100)
}
}
| 23.771429 | 73 | 0.492788 |