hexsha
stringlengths
40
40
size
int64
2
1.05M
content
stringlengths
2
1.05M
avg_line_length
float64
1.33
100
max_line_length
int64
1
1k
alphanum_fraction
float64
0.25
1
d9c875e60bc0929aa7918f17f910ad349ccad131
27,256
// Copyright 2018-2020 the Deno authors. All rights reserved. MIT license. use crate::error::AnyError; use crate::runtime::JsRuntimeState; use crate::JsRuntime; use crate::Op; use crate::OpId; use crate::OpTable; use crate::ZeroCopyBuf; use futures::future::FutureExt; use rusty_v8 as v8; use std::cell::Cell; use std::convert::TryFrom; use std::io::{stdout, Write}; use std::option::Option; use url::Url; use v8::MapFnTo; lazy_static! { pub static ref EXTERNAL_REFERENCES: v8::ExternalReferences = v8::ExternalReferences::new(&[ v8::ExternalReference { function: print.map_fn_to() }, v8::ExternalReference { function: recv.map_fn_to() }, v8::ExternalReference { function: send.map_fn_to() }, v8::ExternalReference { function: set_macrotask_callback.map_fn_to() }, v8::ExternalReference { function: eval_context.map_fn_to() }, v8::ExternalReference { getter: shared_getter.map_fn_to() }, v8::ExternalReference { function: queue_microtask.map_fn_to() }, v8::ExternalReference { function: encode.map_fn_to() }, v8::ExternalReference { function: decode.map_fn_to() }, v8::ExternalReference { function: get_promise_details.map_fn_to() }, v8::ExternalReference { function: get_proxy_details.map_fn_to() }, ]); } pub fn script_origin<'a>( s: &mut v8::HandleScope<'a>, resource_name: v8::Local<'a, v8::String>, ) -> v8::ScriptOrigin<'a> { let resource_line_offset = v8::Integer::new(s, 0); let resource_column_offset = v8::Integer::new(s, 0); let resource_is_shared_cross_origin = v8::Boolean::new(s, false); let script_id = v8::Integer::new(s, 123); let source_map_url = v8::String::new(s, "").unwrap(); let resource_is_opaque = v8::Boolean::new(s, true); let is_wasm = v8::Boolean::new(s, false); let is_module = v8::Boolean::new(s, false); v8::ScriptOrigin::new( resource_name.into(), resource_line_offset, resource_column_offset, resource_is_shared_cross_origin, script_id, source_map_url.into(), resource_is_opaque, is_wasm, is_module, ) } pub fn module_origin<'a>( s: &mut v8::HandleScope<'a>, resource_name: v8::Local<'a, v8::String>, ) -> v8::ScriptOrigin<'a> { let resource_line_offset = v8::Integer::new(s, 0); let resource_column_offset = v8::Integer::new(s, 0); let resource_is_shared_cross_origin = v8::Boolean::new(s, false); let script_id = v8::Integer::new(s, 123); let source_map_url = v8::String::new(s, "").unwrap(); let resource_is_opaque = v8::Boolean::new(s, true); let is_wasm = v8::Boolean::new(s, false); let is_module = v8::Boolean::new(s, true); v8::ScriptOrigin::new( resource_name.into(), resource_line_offset, resource_column_offset, resource_is_shared_cross_origin, script_id, source_map_url.into(), resource_is_opaque, is_wasm, is_module, ) } pub fn initialize_context<'s>( scope: &mut v8::HandleScope<'s, ()>, ) -> v8::Local<'s, v8::Context> { let scope = &mut v8::EscapableHandleScope::new(scope); let context = v8::Context::new(scope); let global = context.global(scope); let scope = &mut v8::ContextScope::new(scope, context); let deno_key = v8::String::new(scope, "Deno").unwrap(); let deno_val = v8::Object::new(scope); global.set(scope, deno_key.into(), deno_val.into()); let core_key = v8::String::new(scope, "core").unwrap(); let core_val = v8::Object::new(scope); deno_val.set(scope, core_key.into(), core_val.into()); let print_key = v8::String::new(scope, "print").unwrap(); let print_tmpl = v8::FunctionTemplate::new(scope, print); let print_val = print_tmpl.get_function(scope).unwrap(); core_val.set(scope, print_key.into(), print_val.into()); let recv_key = v8::String::new(scope, "recv").unwrap(); let recv_tmpl = v8::FunctionTemplate::new(scope, recv); let recv_val = recv_tmpl.get_function(scope).unwrap(); core_val.set(scope, recv_key.into(), recv_val.into()); let send_key = v8::String::new(scope, "send").unwrap(); let send_tmpl = v8::FunctionTemplate::new(scope, send); let send_val = send_tmpl.get_function(scope).unwrap(); core_val.set(scope, send_key.into(), send_val.into()); let set_macrotask_callback_key = v8::String::new(scope, "setMacrotaskCallback").unwrap(); let set_macrotask_callback_tmpl = v8::FunctionTemplate::new(scope, set_macrotask_callback); let set_macrotask_callback_val = set_macrotask_callback_tmpl.get_function(scope).unwrap(); core_val.set( scope, set_macrotask_callback_key.into(), set_macrotask_callback_val.into(), ); let eval_context_key = v8::String::new(scope, "evalContext").unwrap(); let eval_context_tmpl = v8::FunctionTemplate::new(scope, eval_context); let eval_context_val = eval_context_tmpl.get_function(scope).unwrap(); core_val.set(scope, eval_context_key.into(), eval_context_val.into()); let encode_key = v8::String::new(scope, "encode").unwrap(); let encode_tmpl = v8::FunctionTemplate::new(scope, encode); let encode_val = encode_tmpl.get_function(scope).unwrap(); core_val.set(scope, encode_key.into(), encode_val.into()); let decode_key = v8::String::new(scope, "decode").unwrap(); let decode_tmpl = v8::FunctionTemplate::new(scope, decode); let decode_val = decode_tmpl.get_function(scope).unwrap(); core_val.set(scope, decode_key.into(), decode_val.into()); let get_promise_details_key = v8::String::new(scope, "getPromiseDetails").unwrap(); let get_promise_details_tmpl = v8::FunctionTemplate::new(scope, get_promise_details); let get_promise_details_val = get_promise_details_tmpl.get_function(scope).unwrap(); core_val.set( scope, get_promise_details_key.into(), get_promise_details_val.into(), ); let get_proxy_details_key = v8::String::new(scope, "getProxyDetails").unwrap(); let get_proxy_details_tmpl = v8::FunctionTemplate::new(scope, get_proxy_details); let get_proxy_details_val = get_proxy_details_tmpl.get_function(scope).unwrap(); core_val.set( scope, get_proxy_details_key.into(), get_proxy_details_val.into(), ); let shared_key = v8::String::new(scope, "shared").unwrap(); core_val.set_accessor(scope, shared_key.into(), shared_getter); // Direct bindings on `window`. let queue_microtask_key = v8::String::new(scope, "queueMicrotask").unwrap(); let queue_microtask_tmpl = v8::FunctionTemplate::new(scope, queue_microtask); let queue_microtask_val = queue_microtask_tmpl.get_function(scope).unwrap(); global.set( scope, queue_microtask_key.into(), queue_microtask_val.into(), ); scope.escape(context) } pub fn boxed_slice_to_uint8array<'sc>( scope: &mut v8::HandleScope<'sc>, buf: Box<[u8]>, ) -> v8::Local<'sc, v8::Uint8Array> { assert!(!buf.is_empty()); let buf_len = buf.len(); let backing_store = v8::ArrayBuffer::new_backing_store_from_boxed_slice(buf); let backing_store_shared = backing_store.make_shared(); let ab = v8::ArrayBuffer::with_backing_store(scope, &backing_store_shared); v8::Uint8Array::new(scope, ab, 0, buf_len) .expect("Failed to create UintArray8") } pub extern "C" fn host_import_module_dynamically_callback( context: v8::Local<v8::Context>, referrer: v8::Local<v8::ScriptOrModule>, specifier: v8::Local<v8::String>, ) -> *mut v8::Promise { let scope = &mut unsafe { v8::CallbackScope::new(context) }; // NOTE(bartlomieju): will crash for non-UTF-8 specifier let specifier_str = specifier .to_string(scope) .unwrap() .to_rust_string_lossy(scope); let referrer_name = referrer.get_resource_name(); let referrer_name_str = referrer_name .to_string(scope) .unwrap() .to_rust_string_lossy(scope); // TODO(ry) I'm not sure what HostDefinedOptions is for or if we're ever going // to use it. For now we check that it is not used. This check may need to be // changed in the future. let host_defined_options = referrer.get_host_defined_options(); assert_eq!(host_defined_options.length(), 0); let resolver = v8::PromiseResolver::new(scope).unwrap(); let promise = resolver.get_promise(scope); let resolver_handle = v8::Global::new(scope, resolver); { let state_rc = JsRuntime::state(scope); let mut state = state_rc.borrow_mut(); state.dyn_import_cb(resolver_handle, &specifier_str, &referrer_name_str); } &*promise as *const _ as *mut _ } pub extern "C" fn host_initialize_import_meta_object_callback( context: v8::Local<v8::Context>, module: v8::Local<v8::Module>, meta: v8::Local<v8::Object>, ) { let scope = &mut unsafe { v8::CallbackScope::new(context) }; let state_rc = JsRuntime::state(scope); let state = state_rc.borrow(); let module_global = v8::Global::new(scope, module); let info = state .modules .get_info(&module_global) .expect("Module not found"); let url_key = v8::String::new(scope, "url").unwrap(); let url_val = v8::String::new(scope, &info.name).unwrap(); meta.create_data_property(scope, url_key.into(), url_val.into()); let main_key = v8::String::new(scope, "main").unwrap(); let main_val = v8::Boolean::new(scope, info.main); meta.create_data_property(scope, main_key.into(), main_val.into()); } pub extern "C" fn promise_reject_callback(message: v8::PromiseRejectMessage) { let scope = &mut unsafe { v8::CallbackScope::new(&message) }; let state_rc = JsRuntime::state(scope); let mut state = state_rc.borrow_mut(); let promise = message.get_promise(); let promise_global = v8::Global::new(scope, promise); match message.get_event() { v8::PromiseRejectEvent::PromiseRejectWithNoHandler => { let error = message.get_value().unwrap(); let error_global = v8::Global::new(scope, error); state .pending_promise_exceptions .insert(promise_global, error_global); } v8::PromiseRejectEvent::PromiseHandlerAddedAfterReject => { state.pending_promise_exceptions.remove(&promise_global); } v8::PromiseRejectEvent::PromiseRejectAfterResolved => {} v8::PromiseRejectEvent::PromiseResolveAfterResolved => { // Should not warn. See #1272 } }; } pub(crate) unsafe fn get_backing_store_slice( backing_store: &v8::SharedRef<v8::BackingStore>, byte_offset: usize, byte_length: usize, ) -> &[u8] { let cells: *const [Cell<u8>] = &backing_store[byte_offset..byte_offset + byte_length]; let bytes = cells as *const [u8]; &*bytes } #[allow(clippy::mut_from_ref)] pub(crate) unsafe fn get_backing_store_slice_mut( backing_store: &v8::SharedRef<v8::BackingStore>, byte_offset: usize, byte_length: usize, ) -> &mut [u8] { let cells: *const [Cell<u8>] = &backing_store[byte_offset..byte_offset + byte_length]; let bytes = cells as *const _ as *mut [u8]; &mut *bytes } fn print( scope: &mut v8::HandleScope, args: v8::FunctionCallbackArguments, _rv: v8::ReturnValue, ) { let arg_len = args.length(); assert!((0..=2).contains(&arg_len)); let obj = args.get(0); let is_err_arg = args.get(1); let mut is_err = false; if arg_len == 2 { let int_val = is_err_arg .integer_value(scope) .expect("Unable to convert to integer"); is_err = int_val != 0; }; let tc_scope = &mut v8::TryCatch::new(scope); let str_ = match obj.to_string(tc_scope) { Some(s) => s, None => v8::String::new(tc_scope, "").unwrap(), }; if is_err { eprint!("{}", str_.to_rust_string_lossy(tc_scope)); stdout().flush().unwrap(); } else { print!("{}", str_.to_rust_string_lossy(tc_scope)); stdout().flush().unwrap(); } } fn recv( scope: &mut v8::HandleScope, args: v8::FunctionCallbackArguments, _rv: v8::ReturnValue, ) { let state_rc = JsRuntime::state(scope); let mut state = state_rc.borrow_mut(); let cb = match v8::Local::<v8::Function>::try_from(args.get(0)) { Ok(cb) => cb, Err(err) => return throw_type_error(scope, err.to_string()), }; let slot = match &mut state.js_recv_cb { slot @ None => slot, _ => return throw_type_error(scope, "Deno.core.recv() already called"), }; slot.replace(v8::Global::new(scope, cb)); } fn send<'s>( scope: &mut v8::HandleScope<'s>, args: v8::FunctionCallbackArguments, mut rv: v8::ReturnValue, ) { let state_rc = JsRuntime::state(scope); let state = state_rc.borrow_mut(); let op_id = match v8::Local::<v8::Integer>::try_from(args.get(0)) .map_err(AnyError::from) .and_then(|l| OpId::try_from(l.value()).map_err(AnyError::from)) { Ok(op_id) => op_id, Err(err) => { let msg = format!("invalid op id: {}", err); let msg = v8::String::new(scope, &msg).unwrap(); let exc = v8::Exception::type_error(scope, msg); scope.throw_exception(exc); return; } }; let buf_iter = (1..args.length()).map(|idx| { v8::Local::<v8::ArrayBufferView>::try_from(args.get(idx)) .map(|view| ZeroCopyBuf::new(scope, view)) .map_err(|err| { let msg = format!("Invalid argument at position {}: {}", idx, err); let msg = v8::String::new(scope, &msg).unwrap(); v8::Exception::type_error(scope, msg) }) }); let bufs = match buf_iter.collect::<Result<_, _>>() { Ok(bufs) => bufs, Err(exc) => { scope.throw_exception(exc); return; } }; let op = OpTable::route_op(op_id, state.op_state.clone(), bufs); assert_eq!(state.shared.size(), 0); match op { Op::Sync(buf) if !buf.is_empty() => { rv.set(boxed_slice_to_uint8array(scope, buf).into()); } Op::Sync(_) => {} Op::Async(fut) => { let fut2 = fut.map(move |buf| (op_id, buf)); state.pending_ops.push(fut2.boxed_local()); state.have_unpolled_ops.set(true); } Op::AsyncUnref(fut) => { let fut2 = fut.map(move |buf| (op_id, buf)); state.pending_unref_ops.push(fut2.boxed_local()); state.have_unpolled_ops.set(true); } Op::NotFound => { let msg = format!("Unknown op id: {}", op_id); let msg = v8::String::new(scope, &msg).unwrap(); let exc = v8::Exception::type_error(scope, msg); scope.throw_exception(exc); } } } fn set_macrotask_callback( scope: &mut v8::HandleScope, args: v8::FunctionCallbackArguments, _rv: v8::ReturnValue, ) { let state_rc = JsRuntime::state(scope); let mut state = state_rc.borrow_mut(); let cb = match v8::Local::<v8::Function>::try_from(args.get(0)) { Ok(cb) => cb, Err(err) => return throw_type_error(scope, err.to_string()), }; let slot = match &mut state.js_macrotask_cb { slot @ None => slot, _ => { return throw_type_error( scope, "Deno.core.setMacrotaskCallback() already called", ); } }; slot.replace(v8::Global::new(scope, cb)); } fn eval_context( scope: &mut v8::HandleScope, args: v8::FunctionCallbackArguments, mut rv: v8::ReturnValue, ) { let source = match v8::Local::<v8::String>::try_from(args.get(0)) { Ok(s) => s, Err(_) => { let msg = v8::String::new(scope, "Invalid argument").unwrap(); let exception = v8::Exception::type_error(scope, msg); scope.throw_exception(exception); return; } }; let url = v8::Local::<v8::String>::try_from(args.get(1)) .map(|n| Url::from_file_path(n.to_rust_string_lossy(scope)).unwrap()); let output = v8::Array::new(scope, 2); /* output[0] = result output[1] = ErrorInfo | null ErrorInfo = { thrown: Error | any, isNativeError: boolean, isCompileError: boolean, } */ let tc_scope = &mut v8::TryCatch::new(scope); let name = v8::String::new(tc_scope, url.as_ref().map_or("<unknown>", Url::as_str)) .unwrap(); let origin = script_origin(tc_scope, name); let maybe_script = v8::Script::compile(tc_scope, source, Some(&origin)); if maybe_script.is_none() { assert!(tc_scope.has_caught()); let exception = tc_scope.exception().unwrap(); let js_zero = v8::Integer::new(tc_scope, 0); let js_null = v8::null(tc_scope); output.set(tc_scope, js_zero.into(), js_null.into()); let errinfo_obj = v8::Object::new(tc_scope); let is_compile_error_key = v8::String::new(tc_scope, "isCompileError").unwrap(); let is_compile_error_val = v8::Boolean::new(tc_scope, true); errinfo_obj.set( tc_scope, is_compile_error_key.into(), is_compile_error_val.into(), ); let is_native_error_key = v8::String::new(tc_scope, "isNativeError").unwrap(); let is_native_error_val = v8::Boolean::new(tc_scope, exception.is_native_error()); errinfo_obj.set( tc_scope, is_native_error_key.into(), is_native_error_val.into(), ); let thrown_key = v8::String::new(tc_scope, "thrown").unwrap(); errinfo_obj.set(tc_scope, thrown_key.into(), exception); let js_one = v8::Integer::new(tc_scope, 1); output.set(tc_scope, js_one.into(), errinfo_obj.into()); rv.set(output.into()); return; } let result = maybe_script.unwrap().run(tc_scope); if result.is_none() { assert!(tc_scope.has_caught()); let exception = tc_scope.exception().unwrap(); let js_zero = v8::Integer::new(tc_scope, 0); let js_null = v8::null(tc_scope); output.set(tc_scope, js_zero.into(), js_null.into()); let errinfo_obj = v8::Object::new(tc_scope); let is_compile_error_key = v8::String::new(tc_scope, "isCompileError").unwrap(); let is_compile_error_val = v8::Boolean::new(tc_scope, false); errinfo_obj.set( tc_scope, is_compile_error_key.into(), is_compile_error_val.into(), ); let is_native_error_key = v8::String::new(tc_scope, "isNativeError").unwrap(); let is_native_error_val = v8::Boolean::new(tc_scope, exception.is_native_error()); errinfo_obj.set( tc_scope, is_native_error_key.into(), is_native_error_val.into(), ); let thrown_key = v8::String::new(tc_scope, "thrown").unwrap(); errinfo_obj.set(tc_scope, thrown_key.into(), exception); let js_one = v8::Integer::new(tc_scope, 1); output.set(tc_scope, js_one.into(), errinfo_obj.into()); rv.set(output.into()); return; } let js_zero = v8::Integer::new(tc_scope, 0); let js_one = v8::Integer::new(tc_scope, 1); let js_null = v8::null(tc_scope); output.set(tc_scope, js_zero.into(), result.unwrap()); output.set(tc_scope, js_one.into(), js_null.into()); rv.set(output.into()); } fn encode( scope: &mut v8::HandleScope, args: v8::FunctionCallbackArguments, mut rv: v8::ReturnValue, ) { let text = match v8::Local::<v8::String>::try_from(args.get(0)) { Ok(s) => s, Err(_) => { let msg = v8::String::new(scope, "Invalid argument").unwrap(); let exception = v8::Exception::type_error(scope, msg); scope.throw_exception(exception); return; } }; let text_str = text.to_rust_string_lossy(scope); let text_bytes = text_str.as_bytes().to_vec().into_boxed_slice(); let buf = if text_bytes.is_empty() { let ab = v8::ArrayBuffer::new(scope, 0); v8::Uint8Array::new(scope, ab, 0, 0).expect("Failed to create UintArray8") } else { let buf_len = text_bytes.len(); let backing_store = v8::ArrayBuffer::new_backing_store_from_boxed_slice(text_bytes); let backing_store_shared = backing_store.make_shared(); let ab = v8::ArrayBuffer::with_backing_store(scope, &backing_store_shared); v8::Uint8Array::new(scope, ab, 0, buf_len) .expect("Failed to create UintArray8") }; rv.set(buf.into()) } fn decode( scope: &mut v8::HandleScope, args: v8::FunctionCallbackArguments, mut rv: v8::ReturnValue, ) { let view = match v8::Local::<v8::ArrayBufferView>::try_from(args.get(0)) { Ok(view) => view, Err(_) => { let msg = v8::String::new(scope, "Invalid argument").unwrap(); let exception = v8::Exception::type_error(scope, msg); scope.throw_exception(exception); return; } }; let backing_store = view.buffer(scope).unwrap().get_backing_store(); let buf = unsafe { get_backing_store_slice( &backing_store, view.byte_offset(), view.byte_length(), ) }; // If `String::new_from_utf8()` returns `None`, this means that the // length of the decoded string would be longer than what V8 can // handle. In this case we return `RangeError`. // // For more details see: // - https://encoding.spec.whatwg.org/#dom-textdecoder-decode // - https://github.com/denoland/deno/issues/6649 // - https://github.com/v8/v8/blob/d68fb4733e39525f9ff0a9222107c02c28096e2a/include/v8.h#L3277-L3278 match v8::String::new_from_utf8(scope, &buf, v8::NewStringType::Normal) { Some(text) => rv.set(text.into()), None => { let msg = v8::String::new(scope, "string too long").unwrap(); let exception = v8::Exception::range_error(scope, msg); scope.throw_exception(exception); } }; } fn queue_microtask( scope: &mut v8::HandleScope, args: v8::FunctionCallbackArguments, _rv: v8::ReturnValue, ) { match v8::Local::<v8::Function>::try_from(args.get(0)) { Ok(f) => scope.enqueue_microtask(f), Err(_) => { let msg = v8::String::new(scope, "Invalid argument").unwrap(); let exception = v8::Exception::type_error(scope, msg); scope.throw_exception(exception); } }; } fn shared_getter( scope: &mut v8::HandleScope, _name: v8::Local<v8::Name>, _args: v8::PropertyCallbackArguments, mut rv: v8::ReturnValue, ) { let state_rc = JsRuntime::state(scope); let mut state = state_rc.borrow_mut(); let JsRuntimeState { shared_ab, shared, .. } = &mut *state; // Lazily initialize the persistent external ArrayBuffer. let shared_ab = match shared_ab { Some(ref ab) => v8::Local::new(scope, ab), slot @ None => { let ab = v8::SharedArrayBuffer::with_backing_store( scope, shared.get_backing_store(), ); slot.replace(v8::Global::new(scope, ab)); ab } }; rv.set(shared_ab.into()) } // Called by V8 during `Isolate::mod_instantiate`. pub fn module_resolve_callback<'s>( context: v8::Local<'s, v8::Context>, specifier: v8::Local<'s, v8::String>, referrer: v8::Local<'s, v8::Module>, ) -> Option<v8::Local<'s, v8::Module>> { let scope = &mut unsafe { v8::CallbackScope::new(context) }; let state_rc = JsRuntime::state(scope); let state = state_rc.borrow(); let referrer_global = v8::Global::new(scope, referrer); let referrer_info = state .modules .get_info(&referrer_global) .expect("ModuleInfo not found"); let referrer_name = referrer_info.name.to_string(); let specifier_str = specifier.to_rust_string_lossy(scope); let resolved_specifier = state .loader .resolve( state.op_state.clone(), &specifier_str, &referrer_name, false, ) .expect("Module should have been already resolved"); if let Some(id) = state.modules.get_id(resolved_specifier.as_str()) { if let Some(handle) = state.modules.get_handle(id) { return Some(v8::Local::new(scope, handle)); } } let msg = format!( r#"Cannot resolve module "{}" from "{}""#, specifier_str, referrer_name ); throw_type_error(scope, msg); None } // Returns promise details or throw TypeError, if argument passed isn't a Promise. // Promise details is a js_two elements array. // promise_details = [State, Result] // State = enum { Pending = 0, Fulfilled = 1, Rejected = 2} // Result = PromiseResult<T> | PromiseError fn get_promise_details( scope: &mut v8::HandleScope, args: v8::FunctionCallbackArguments, mut rv: v8::ReturnValue, ) { let promise = match v8::Local::<v8::Promise>::try_from(args.get(0)) { Ok(val) => val, Err(_) => { let msg = v8::String::new(scope, "Invalid argument").unwrap(); let exception = v8::Exception::type_error(scope, msg); scope.throw_exception(exception); return; } }; let promise_details = v8::Array::new(scope, 2); match promise.state() { v8::PromiseState::Pending => { let js_zero = v8::Integer::new(scope, 0); promise_details.set(scope, js_zero.into(), js_zero.into()); rv.set(promise_details.into()); } v8::PromiseState::Fulfilled => { let js_zero = v8::Integer::new(scope, 0); let js_one = v8::Integer::new(scope, 1); let promise_result = promise.result(scope); promise_details.set(scope, js_zero.into(), js_one.into()); promise_details.set(scope, js_one.into(), promise_result); rv.set(promise_details.into()); } v8::PromiseState::Rejected => { let js_zero = v8::Integer::new(scope, 0); let js_one = v8::Integer::new(scope, 1); let js_two = v8::Integer::new(scope, 2); let promise_result = promise.result(scope); promise_details.set(scope, js_zero.into(), js_two.into()); promise_details.set(scope, js_one.into(), promise_result); rv.set(promise_details.into()); } } } // Based on https://github.com/nodejs/node/blob/1e470510ff74391d7d4ec382909ea8960d2d2fbc/src/node_util.cc // Copyright Joyent, Inc. and other Node contributors. // // Permission is hereby granted, free of charge, to any person obtaining a // copy of this software and associated documentation files (the // "Software"), to deal in the Software without restriction, including // without limitation the rights to use, copy, modify, merge, publish, // distribute, sublicense, and/or sell copies of the Software, and to permit // persons to whom the Software is furnished to do so, subject to the // following conditions: // // The above copyright notice and this permission notice shall be included // in all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN // NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, // DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR // OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE // USE OR OTHER DEALINGS IN THE SOFTWARE. fn get_proxy_details( scope: &mut v8::HandleScope, args: v8::FunctionCallbackArguments, mut rv: v8::ReturnValue, ) { // Return undefined if it's not a proxy. let proxy = match v8::Local::<v8::Proxy>::try_from(args.get(0)) { Ok(val) => val, Err(_) => { return; } }; let proxy_details = v8::Array::new(scope, 2); let js_zero = v8::Integer::new(scope, 0); let js_one = v8::Integer::new(scope, 1); let target = proxy.get_target(scope); let handler = proxy.get_handler(scope); proxy_details.set(scope, js_zero.into(), target); proxy_details.set(scope, js_one.into(), handler); rv.set(proxy_details.into()); } fn throw_type_error(scope: &mut v8::HandleScope, message: impl AsRef<str>) { let message = v8::String::new(scope, message.as_ref()).unwrap(); let exception = v8::Exception::type_error(scope, message); scope.throw_exception(exception); }
31.7669
105
0.666716
6422ff97d554c049e327a18e6f72305e77db5709
9,305
use proc_macro2::{Ident, Span, TokenStream}; use shared::{map_type_params, split_for_impl}; use syn::{self, Data, DeriveInput, Fields, GenericParam, Generics}; use attr::{Container, CrateName}; pub fn derive(input: TokenStream) -> TokenStream { let derive_input = syn::parse2(input).expect("Input is checked by rustc"); let container = Container::from_ast(&derive_input); let DeriveInput { ident, data, generics, .. } = derive_input; let tokens = match data { Data::Struct(_) | Data::Enum(_) => gen_impl(&container, ident, generics, &data), Data::Union(_) => panic!("Unions are not supported"), }; tokens.into() } fn gen_impl(container: &Container, ident: Ident, generics: Generics, data: &Data) -> TokenStream { let trait_bounds = &map_type_params(&generics, |ty| { quote! { #ty: _gluon_api::VmType, #ty::Type: Sized } }); let (impl_generics, ty_generics, where_clause) = split_for_impl(&generics, &[]); let gluon = match container.crate_name { CrateName::Some(ref ident) => quote! { use #ident::base as _gluon_base; use #ident::api as _gluon_api; use #ident::thread as _gluon_thread; }, CrateName::GluonVm => quote! { use base as _gluon_base; use api as _gluon_api; use thread as _gluon_thread; }, CrateName::None => quote! { use gluon::base as _gluon_base; use gluon::vm::api as _gluon_api; use gluon::vm::thread as _gluon_thread; }, }; let make_type_impl = match container.vm_type { Some(ref gluon_type) => { let type_application = gen_type_application(&generics); quote! { let ty = match vm.find_type_info(#gluon_type) { Ok(info) => info.into_type(), Err(_) => panic!("Could not find type '{}'. Is the module defining the type loaded?", #gluon_type), }; #type_application } } None => match *data { Data::Struct(ref struct_) => match struct_.fields { Fields::Named(ref fields) => { let fields = fields.named.iter().map(|field| { let ident = field.ident.as_ref().unwrap().to_string(); let typ = &field.ty; quote! { _gluon_base::types::Field { name: _gluon_base::symbol::Symbol::from(#ident), typ: <#typ as _gluon_api::VmType>::make_type(vm), } } }); quote! { _gluon_base::types::Type::record( vec![], vec![#(#fields),*], ) } } Fields::Unnamed(ref fields) => { if fields.unnamed.len() == 1 { let typ = &fields.unnamed[0].ty; quote! { <#typ as _gluon_api::VmType>::make_type(vm) } } else { let fields = fields.unnamed.iter().map(|field| &field.ty); quote! { _gluon_base::types::Type::tuple(vec![#( <#fields as _gluon_api::VmType>::make_type(vm) ),*]) } } } Fields::Unit => quote!(_gluon_base::types::Type::unit()), }, Data::Enum(ref enum_) => { let variants = enum_.variants.iter().map(|variant| { let ident = variant.ident.to_string(); match variant.fields { Fields::Named(ref fields) => { let fields = fields.named.iter().map(|field| { let ident = field.ident.as_ref().unwrap().to_string(); let typ = &field.ty; quote! { _gluon_base::types::Field { name: _gluon_base::symbol::Symbol::from(#ident), typ: <#typ as _gluon_api::VmType>::make_type(vm), } } }); quote! {{ let ctor_name = _gluon_base::symbol::Symbol::from(#ident); let typ = _gluon_base::types::Type::record( vec![], vec![#(#fields),*], ); _gluon_base::types::Field::ctor( ctor_name, vec![typ], ) }} } Fields::Unnamed(ref fields) => { let args = fields.unnamed.iter().map(|field| { let typ = &field.ty; quote! { <#typ as _gluon_api::VmType>::make_type(vm) } }); quote! {{ let ctor_name = _gluon_base::symbol::Symbol::from(#ident); _gluon_base::types::Field::ctor( ctor_name, vec![#(#args),*], ) }} } Fields::Unit => quote! {{ let ctor_name = _gluon_base::symbol::Symbol::from(#ident); _gluon_base::types::Field::ctor( ctor_name, vec![], ) }}, } //---------------------------------------------------- //---------------------------------------------------- }); quote! { _gluon_base::types::Type::variant( vec![#(#variants),*] ) } } _ => panic!( "Only structs and enums can derive `VmType` without using the `vm_type` attribute" ), }, }; let associated_type_generics = generics.params.iter().map(|param| match param { GenericParam::Type(ty) => quote!( #ty :: Type ), GenericParam::Lifetime(_) => quote!( 'static ), GenericParam::Const(c) => quote!( #c ), }); let dummy_const = Ident::new(&format!("_IMPL_VM_TYPE_FOR_{}", ident), Span::call_site()); let make_type_impl = if container.newtype { let type_application = gen_type_application(&generics); let generic_params = map_type_params(&generics, |param| { let lower_param = param.to_string().to_ascii_lowercase(); quote! { vm.global_env().get_generic(#lower_param) } }); quote! { let ty = if let Ok(ty) = vm.find_type_info(stringify!(#ident)) { ty.into_type() } else { let ty = _gluon_base::types::Alias::new( _gluon_base::symbol::Symbol::from(stringify!(#ident)), vec![#(#generic_params),*], #make_type_impl, ); vm.cache_alias(ty) }; #type_application } } else { make_type_impl }; quote! { #[allow(non_upper_case_globals)] const #dummy_const: () = { #gluon #[automatically_derived] #[allow(unused_attributes, unused_variables)] impl #impl_generics _gluon_api::VmType for #ident #ty_generics #where_clause #(#trait_bounds,)* { type Type = #ident< #(#associated_type_generics),* >; fn make_type(vm: &_gluon_thread::Thread) -> _gluon_base::types::ArcType { #make_type_impl } } }; } } fn gen_type_application(generics: &Generics) -> TokenStream { let applications = map_type_params(generics, |param| { quote! { vec.push(<#param as _gluon_api::VmType>::make_type(vm)); } }); // if there are generic parameters, we use their types and apply them // to the type of the derive target to construct the concrete type if applications.is_empty() { quote! { ty } } else { quote! { let mut vec = _gluon_base::types::AppVec::new(); #(#applications)* _gluon_base::types::Type::app(ty, vec) } } }
38.292181
119
0.41784
dd5d0109613753fb5244ab38bffc32aba8ce98bf
14,703
// This file is part of Deskchains. // Copyright (C) 2020-2021 Deskchains Foundation. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program. If not, see <https://www.gnu.org/licenses/>. #![allow(clippy::upper_case_acronyms)] use ethereum_types::{H160, U256}; use frame_support::log; use jsonrpc_core::{Error, ErrorCode, Result, Value}; use pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi; use rustc_hex::ToHex; use sc_rpc_api::DenyUnsafe; use sp_api::ProvideRuntimeApi; use sp_blockchain::HeaderBackend; use sp_core::{Bytes, Decode}; use sp_rpc::number::NumberOrHex; use sp_runtime::{ codec::Codec, generic::BlockId, traits::{self, Block as BlockT, MaybeDisplay, MaybeFromStr}, SaturatedConversion, }; use std::convert::{TryFrom, TryInto}; use std::{marker::PhantomData, sync::Arc}; use call_request::{CallRequest, EstimateResourcesResponse}; pub use module_evm::{ExitError, ExitReason}; pub use module_evm_rpc_runtime_api::EVMRuntimeRPCApi; pub use crate::evm_api::{EVMApi as EVMApiT, EVMApiServer}; mod call_request; mod evm_api; fn internal_err<T: ToString>(message: T) -> Error { Error { code: ErrorCode::InternalError, message: message.to_string(), data: None, } } #[allow(dead_code)] fn error_on_execution_failure(reason: &ExitReason, data: &[u8]) -> Result<()> { match reason { ExitReason::Succeed(_) => Ok(()), ExitReason::Error(e) => { if *e == ExitError::OutOfGas { // `ServerError(0)` will be useful in estimate gas return Err(Error { code: ErrorCode::ServerError(0), message: "out of gas".to_string(), data: None, }); } Err(Error { code: ErrorCode::InternalError, message: format!("execution error: {:?}", e), data: Some(Value::String("0x".to_string())), }) } ExitReason::Revert(_) => Err(Error { code: ErrorCode::InternalError, message: decode_revert_message(data) .map_or("execution revert".into(), |data| format!("execution revert: {}", data)), data: Some(Value::String(format!("0x{}", data.to_hex::<String>()))), }), ExitReason::Fatal(e) => Err(Error { code: ErrorCode::InternalError, message: format!("execution fatal: {:?}", e), data: Some(Value::String("0x".to_string())), }), } } fn decode_revert_message(data: &[u8]) -> Option<String> { // A minimum size of error function selector (4) + offset (32) + string length // (32) should contain a utf-8 encoded revert reason. let msg_start: usize = 68; if data.len() > msg_start { let message_len = U256::from(&data[36..msg_start]).saturated_into::<usize>(); let msg_end = msg_start.checked_add(message_len)?; if data.len() < msg_end { return None; } let body: &[u8] = &data[msg_start..msg_end]; if let Ok(reason) = std::str::from_utf8(body) { return Some(reason.to_string()); } } None } pub struct EVMApi<B, C, Balance> { client: Arc<C>, deny_unsafe: DenyUnsafe, _marker: PhantomData<(B, Balance)>, } impl<B, C, Balance> EVMApi<B, C, Balance> { pub fn new(client: Arc<C>, deny_unsafe: DenyUnsafe) -> Self { Self { client, deny_unsafe, _marker: Default::default(), } } } fn to_u128(val: NumberOrHex) -> std::result::Result<u128, ()> { val.into_u256().try_into().map_err(|_| ()) } impl<B, C, Balance> EVMApiT<<B as BlockT>::Hash> for EVMApi<B, C, Balance> where B: BlockT, C: ProvideRuntimeApi<B> + HeaderBackend<B> + Send + Sync + 'static, C::Api: EVMRuntimeRPCApi<B, Balance>, C::Api: TransactionPaymentApi<B, Balance>, Balance: Codec + MaybeDisplay + MaybeFromStr + Default + Send + Sync + 'static + TryFrom<u128> + Into<U256>, { fn call(&self, request: CallRequest, at: Option<<B as BlockT>::Hash>) -> Result<Bytes> { self.deny_unsafe.check_if_safe()?; let hash = at.unwrap_or_else(|| self.client.info().best_hash); let CallRequest { from, to, gas_limit, storage_limit, value, data, } = request; let gas_limit = gas_limit.unwrap_or_else(u64::max_value); // TODO: set a limit let storage_limit = storage_limit.unwrap_or_else(u32::max_value); // TODO: set a limit let data = data.map(|d| d.0).unwrap_or_default(); let api = self.client.runtime_api(); let balance_value = if let Some(value) = value { to_u128(value).and_then(|v| TryInto::<Balance>::try_into(v).map_err(|_| ())) } else { Ok(Default::default()) }; let balance_value = balance_value.map_err(|_| Error { code: ErrorCode::InvalidParams, message: format!("Invalid parameter value: {:?}", value), data: None, })?; match to { Some(to) => { let info = api .call( &BlockId::Hash(hash), from.unwrap_or_default(), to, data, balance_value, gas_limit, storage_limit, true, ) .map_err(|err| internal_err(format!("runtime error: {:?}", err)))? .map_err(|err| internal_err(format!("execution fatal: {:?}", err)))?; log::debug!( target: "evm", "rpc call, info.exit_reason: {:?}, info.value: {:?}", info.exit_reason, info.value, ); error_on_execution_failure(&info.exit_reason, &info.value)?; Ok(Bytes(info.value)) } None => Err(Error { code: ErrorCode::InternalError, message: "Not supported".into(), data: None, }), } } fn estimate_resources( &self, from: H160, unsigned_extrinsic: Bytes, at: Option<<B as BlockT>::Hash>, ) -> Result<EstimateResourcesResponse> { self.deny_unsafe.check_if_safe()?; let hash = at.unwrap_or_else(|| self.client.info().best_hash); let request = self .client .runtime_api() .get_estimate_resources_request(&BlockId::Hash(hash), unsigned_extrinsic.to_vec()) .map_err(|err| internal_err(format!("runtime error: {:?}", err)))? .map_err(|err| internal_err(format!("execution fatal: {:?}", err)))?; // Determine the highest possible gas limits let max_gas_limit = u64::max_value(); // TODO: set a limit let mut highest = U256::from(request.gas_limit.unwrap_or(max_gas_limit)); let request = CallRequest { from: Some(from), to: request.to, gas_limit: request.gas_limit, storage_limit: request.storage_limit, value: request.value.map(|v| NumberOrHex::Hex(U256::from(v))), data: request.data.map(Bytes), }; log::debug!( target: "evm", "estimate_resources, from: {:?}, to: {:?}, gas_limit: {:?}, storage_limit: {:?}, value: {:?}, at_hash: {:?}", request.from, request.to, request.gas_limit, request.storage_limit, request.value, hash ); struct ExecutableResult { data: Vec<u8>, exit_reason: ExitReason, used_gas: U256, used_storage: i32, } // Create a helper to check if a gas allowance results in an executable transaction let executable = move |request: CallRequest, gas| -> Result<ExecutableResult> { let CallRequest { from, to, gas_limit, storage_limit, value, data, } = request; // Use request gas limit only if it less than gas_limit parameter let gas_limit = core::cmp::min(gas_limit.unwrap_or(gas), gas); let storage_limit = storage_limit.unwrap_or_else(u32::max_value); // TODO: set a limit let data = data.map(|d| d.0).unwrap_or_default(); let balance_value = if let Some(value) = value { to_u128(value).and_then(|v| TryInto::<Balance>::try_into(v).map_err(|_| ())) } else { Ok(Default::default()) }; let balance_value = balance_value.map_err(|_| Error { code: ErrorCode::InvalidParams, message: format!("Invalid parameter value: {:?}", value), data: None, })?; let (exit_reason, data, used_gas, used_storage) = match to { Some(to) => { let info = self .client .runtime_api() .call( &BlockId::Hash(hash), from.unwrap_or_default(), to, data, balance_value, gas_limit, storage_limit, true, ) .map_err(|err| internal_err(format!("runtime error: {:?}", err)))? .map_err(|err| internal_err(format!("execution fatal: {:?}", err)))?; (info.exit_reason, info.value, info.used_gas, info.used_storage) } None => { let info = self .client .runtime_api() .create( &BlockId::Hash(hash), from.unwrap_or_default(), data, balance_value, gas_limit, storage_limit, true, ) .map_err(|err| internal_err(format!("runtime error: {:?}", err)))? .map_err(|err| internal_err(format!("execution fatal: {:?}", err)))?; (info.exit_reason, Vec::new(), info.used_gas, info.used_storage) } }; Ok(ExecutableResult { exit_reason, data, used_gas, used_storage, }) }; // Verify that the transaction succeed with highest capacity let cap = highest; let ExecutableResult { data, exit_reason, used_gas, used_storage, } = executable(request.clone(), highest.as_u64())?; match exit_reason { ExitReason::Succeed(_) => (), ExitReason::Error(ExitError::OutOfGas) => { return Err(internal_err(format!("gas required exceeds allowance {}", cap))) } // If the transaction reverts, there are two possible cases, // it can revert because the called contract feels that it does not have enough // gas left to continue, or it can revert for another reason unrelated to gas. ExitReason::Revert(revert) => { if request.gas_limit.is_some() { // If the user has provided a gas limit, then we have executed // with less block gas limit, so we must reexecute with block gas limit to // know if the revert is due to a lack of gas or not. let ExecutableResult { data, exit_reason, .. } = executable(request.clone(), max_gas_limit)?; match exit_reason { ExitReason::Succeed(_) => { return Err(internal_err(format!("gas required exceeds allowance {}", cap))) } // The execution has been done with block gas limit, so it is not a lack of gas from the user. other => error_on_execution_failure(&other, &data)?, } } else { // The execution has already been done with block gas limit, so it is not a lack of gas from the // user. error_on_execution_failure(&ExitReason::Revert(revert), &data)? } } other => error_on_execution_failure(&other, &data)?, }; // rpc_binary_search_estimate block { // Define the lower bound of the binary search const MIN_GAS_PER_TX: U256 = U256([21_000, 0, 0, 0]); let mut lowest = MIN_GAS_PER_TX; // Start close to the used gas for faster binary search let mut mid = std::cmp::min(used_gas * 3, (highest + lowest) / 2); // Execute the binary search and hone in on an executable gas limit. let mut previous_highest = highest; while (highest - lowest) > U256::one() { let ExecutableResult { data, exit_reason, .. } = executable(request.clone(), mid.as_u64())?; match exit_reason { ExitReason::Succeed(_) => { highest = mid; // If the variation in the estimate is less than 10%, // then the estimate is considered sufficiently accurate. if (previous_highest - highest) * 10 / previous_highest < U256::one() { break; } previous_highest = highest; } ExitReason::Revert(_) | ExitReason::Error(ExitError::OutOfGas) => { lowest = mid; } other => error_on_execution_failure(&other, &data)?, } mid = (highest + lowest) / 2; } } let uxt: <B as traits::Block>::Extrinsic = Decode::decode(&mut &*unsigned_extrinsic).map_err(|e| Error { code: ErrorCode::InternalError, message: "Unable to dry run extrinsic.".into(), data: Some(format!("{:?}", e).into()), })?; let fee = self .client .runtime_api() .query_fee_details(&BlockId::Hash(hash), uxt, unsigned_extrinsic.len() as u32) .map_err(|e| Error { code: ErrorCode::InternalError, message: "Unable to query fee details.".into(), data: Some(format!("{:?}", e).into()), })?; let adjusted_weight_fee = fee .inclusion_fee .map_or_else(Default::default, |inclusion| inclusion.adjusted_weight_fee); Ok(EstimateResourcesResponse { gas: highest, storage: used_storage, weight_fee: adjusted_weight_fee.into(), }) } } #[test] fn decode_revert_message_should_work() { use sp_core::bytes::from_hex; assert_eq!(decode_revert_message(&vec![]), None); let data = from_hex("0x8c379a00000000000000000000000000000000000000000000000000000000000000020").unwrap(); assert_eq!(decode_revert_message(&data), None); let data = from_hex("0x8c379a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000d6572726f72206d65737361676").unwrap(); assert_eq!(decode_revert_message(&data), None); let data = from_hex("0x8c379a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000d6572726f72206d65737361676500000000000000000000000000000000000000").unwrap(); assert_eq!(decode_revert_message(&data), Some("error message".into())); // ensures we protect against msg_start + message_len overflow let data = from_hex("0x9850188c1837189a0000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000018d618571827182618f718220618d6185718371836161876").unwrap(); assert_eq!(decode_revert_message(&data), None); // ensures we protect against msg_start + message_len overflow let data = from_hex("0x9860189818501818188c181818371818189a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000181818d6181818571818182718181826181818f71818182206181818d61818185718181837181818361618181876").unwrap(); assert_eq!(decode_revert_message(&data), None); // ensures we protect against msg_start + message_len overflow let data = from_hex("0x98640818c3187918a0000000000000000000000000000000000000000000000000000000000000001820000000000000000000000000000000000000000000000000000000000000000d186518721872186f18721820186d18651873187318611867186500000000000000000000000000000000000000").unwrap(); assert_eq!(decode_revert_message(&data), None); }
33.264706
292
0.686595
fb7e500d3932a1b7ce8a8c09b02eda8923bbdbd7
6,426
use crate::parser; use crate::test_util::compare; use super::expand_precedence; use super::resolve::resolve; #[test] fn multilevel() { let grammar = parser::parse_grammar( r#" grammar; Expr: u32 = { #[precedence(level="1")] <left:Expr> "*" <right:Expr> => 0, #[precedence(level="1")] <left:Expr> "/" <right:Expr> => 0, #[precedence(level="2")] <left:Expr> "+" <right:Expr> => 0, #[precedence(level="2")] <left:Expr> "-" <right:Expr> => 0, #[precedence(level="3")] <left:Expr> "%" <right:Expr> => 0, } Ext: u32 = Expr; "#, ) .unwrap(); let expected = parser::parse_grammar( r#" grammar; Expr1: u32 = { <left:Expr1> "*" <right:Expr1> => 0, <left:Expr1> "/" <right:Expr1> => 0, } Expr2: u32 = { <left:Expr2> "+" <right:Expr2> => 0, <left:Expr2> "-" <right:Expr2> => 0, Expr1, } Expr: u32 = { <left:Expr> "%" <right:Expr> => 0, Expr2, } Ext: u32 = Expr; "#, ) .unwrap(); compare(expand_precedence(grammar), resolve(expected)); } #[test] fn with_assoc() { let grammar = parser::parse_grammar( r#" grammar; Expr: u32 = { #[precedence(level="1")] "const" => 0, "!" <Expr> => 0, #[precedence(level="2")] #[assoc(side="none")] "!!" <Expr> => 0, #[assoc(side="left")] "const2" => 0, #[precedence(level="2")] #[assoc(side="left")] <left:Expr> "*" <right:Expr> => 0, #[assoc(side="right")] <left:Expr> "/" <right:Expr> => 0, #[precedence(level="3")] #[assoc(side="left")] <left:Expr> "?" <middle:Expr> ":" <right:Expr> => 0, #[assoc(side="right")] <left:Expr> "|" <middle:Expr> "-" <right:Expr> => 0, #[assoc(side="none")] <left:Expr> "^" <middle:Expr> "$" <right:Expr> => 0, #[assoc(side="all")] <left:Expr> "[" <middle:Expr> ";" <right:Expr> => 0, } "#, ) .unwrap(); let expected = parser::parse_grammar( r#" grammar; Expr1: u32 = { "const" => 0, "!" <Expr1> => 0, } Expr2: u32 = { "!!" <Expr1> => 0, "const2" => 0, <left:Expr2> "*" <right:Expr1> => 0, <left:Expr1> "/" <right:Expr2> => 0, Expr1, } Expr: u32 = { <left:Expr> "?" <middle:Expr2> ":" <right:Expr2> => 0, <left:Expr2> "|" <middle:Expr2> "-" <right:Expr> => 0, <left:Expr2> "^" <middle:Expr2> "$" <right:Expr2> => 0, <left:Expr> "[" <middle:Expr> ";" <right:Expr> => 0, Expr2, } "#, ) .unwrap(); compare(expand_precedence(grammar), resolve(expected)); } #[test] fn non_consecutive_levels() { let grammar = parser::parse_grammar( r#" grammar; Expr: u32 = { #[precedence(level="5")] #[assoc(side="left")] <left:Expr> "?" <middle:Expr> ":" <right:Expr> => 0, #[assoc(side="right")] <left:Expr> "|" <middle:Expr> "-" <right:Expr> => 0, #[assoc(side="none")] <left:Expr> "^" <middle:Expr> "$" <right:Expr> => 0, #[assoc(side="all")] <left:Expr> "[" <middle:Expr> ";" <right:Expr> => 0, #[precedence(level="0")] "const" => 0, "!" <Expr> => 0, #[precedence(level="3")] #[assoc(side="none")] "!!" <Expr> => 0, #[assoc(side="left")] "const2" => 0, <left:Expr> "*" <right:Expr> => 0, #[assoc(side="right")] <left:Expr> "/" <right:Expr> => 0, } "#, ) .unwrap(); let expected = parser::parse_grammar( r#" grammar; Expr0: u32 = { "const" => 0, "!" <Expr0> => 0, } Expr3: u32 = { "!!" <Expr0> => 0, "const2" => 0, <left:Expr3> "*" <right:Expr0> => 0, <left:Expr0> "/" <right:Expr3> => 0, Expr0, } Expr: u32 = { <left:Expr> "?" <middle:Expr3> ":" <right:Expr3> => 0, <left:Expr3> "|" <middle:Expr3> "-" <right:Expr> => 0, <left:Expr3> "^" <middle:Expr3> "$" <right:Expr3> => 0, <left:Expr> "[" <middle:Expr> ";" <right:Expr> => 0, Expr3, } "#, ) .unwrap(); compare(expand_precedence(grammar), resolve(expected)); } #[test] fn macros() { let grammar = parser::parse_grammar( r#" grammar; Expr: u32 = { #[precedence(level="1")] "const" => 0, #[precedence(level="2")] #[assoc(side="left")] MacroOp<OpTimes, Expr, Expr> => 0, #[precedence(level="3")] #[assoc(side="right")] MacroOp<OpPlus, Expr, Expr> => 0, } MacroOp<Op, RuleLeft, RuleRight>: u32 = <left: RuleLeft> <op: Op> <right: RuleRight> => 0; OpTimes: () = "*" => (); OpPlus: () = "+" => (); Ext: u32 = Expr; "#, ) .unwrap(); let expected = parser::parse_grammar( r#" grammar; Expr1: u32 = { "const" => 0, } Expr2: u32 = { MacroOp<OpTimes, Expr2, Expr1> => 0, Expr1, } Expr: u32 = { MacroOp<OpPlus, Expr2, Expr> => 0, Expr2, } MacroOp<Op, RuleLeft, RuleRight>: u32 = <left: RuleLeft> <op: Op> <right: RuleRight> => 0; OpTimes: () = "*" => (); OpPlus: () = "+" => (); Ext: u32 = Expr; "#, ) .unwrap(); compare(expand_precedence(grammar), resolve(expected)); } #[test] fn calculator() { let grammar = parser::parse_grammar( r#" grammar; Expr: i32 = { #[precedence(lvl="0")] Num, "(" <Expr> ")", #[precedence(lvl="1")] #[assoc(side="left")] <l:Expr> "*" <r:Expr> => l * r, <l:Expr> "/" <r:Expr> => l / r, #[precedence(lvl="2")] #[assoc(side="left")] <l:Expr> "+" <r:Expr> => l + r, <l:Expr> "-" <r:Expr> => l - r, }; Num: i32 = { r"[0-9]+" => i32::from_str(<>).unwrap(), }; "#, ) .unwrap(); let expected = parser::parse_grammar( r#" grammar; Expr0: i32 = { Num, "(" <Expr0> ")", } Expr1: i32 = { <l:Expr1> "*" <r:Expr0> => l * r, <l:Expr1> "/" <r:Expr0> => l / r, Expr0, }; Expr: i32 = { <l:Expr> "+" <r:Expr1> => l + r, <l:Expr> "-" <r:Expr1> => l - r, Expr1, }; Num: i32 = { r"[0-9]+" => i32::from_str(<>).unwrap(), }; "#, ) .unwrap(); compare(expand_precedence(grammar), resolve(expected)); }
21
94
0.455649
f9a523641e41e1dbef4377388c1fa82fc5aab0cd
606
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. fn main() { let mut v = vec!(1); let mut f = |&mut:| v.push(2); let _w = v; //~ ERROR: cannot move out of `v` f(); }
33.666667
69
0.681518
dd036447ed9442401b4fe4ba36690cca3d698750
847
pub use self::client::Client; pub use self::error::Error; pub use self::publisher::{Publisher, PublisherStream}; pub use self::service::Service; pub use self::subscriber::Subscriber; use rosmsg::RosMsg; use std::sync::atomic::AtomicUsize; use std::sync::Arc; use Clock; mod client; pub mod error; mod header; mod publisher; mod service; mod subscriber; mod util; pub type ServiceResult<T> = Result<T, String>; pub trait Message: RosMsg + Send + 'static { fn msg_definition() -> String; fn md5sum() -> String; fn msg_type() -> String; fn set_header(&mut self, _clock: &Arc<Clock>, _seq: &Arc<AtomicUsize>) {} } pub trait ServicePair: Message { type Request: RosMsg + Send + 'static; type Response: RosMsg + Send + 'static; } #[derive(Clone, Debug)] pub struct Topic { pub name: String, pub msg_type: String, }
21.717949
77
0.687131
e5d0cde69c9be2452bc1271fdcecac1440711c49
6,458
//! Collection of composition types. pub mod basic_f32; pub mod basic_premultiplied_f32; #[cfg(feature = "image-crate")] pub mod image_rgb_rgba; /// Compositor attributes pub trait CompositorAttr { /// If true, the compositor requires updating destinations even alpha is zero. fn keep_dst_on_transparent_src(&self) -> bool; } /// Compositor composites two pixels with alpha value. pub trait Compositor<T>: CompositorAttr { fn composite(&self, dst: &T, src: &T, alpha: f64) -> T; } #[derive(Clone)] pub struct Clear; #[derive(Clone)] pub struct Src; #[derive(Clone)] pub struct Dst; #[derive(Clone)] pub struct SrcOver; #[derive(Clone)] pub struct SrcIn; #[derive(Clone)] pub struct SrcOut; #[derive(Clone)] pub struct SrcAtop; #[derive(Clone)] pub struct DstOver; #[derive(Clone)] pub struct DstIn; #[derive(Clone)] pub struct DstOut; #[derive(Clone)] pub struct DstAtop; #[derive(Clone)] pub struct Xor; #[derive(Clone)] pub struct Add; #[derive(Clone)] pub struct Darken; #[derive(Clone)] pub struct Lighten; #[derive(Clone)] pub struct Multiply; #[derive(Clone)] pub struct Screen; #[derive(Clone)] pub struct Overlay; #[derive(Clone)] pub struct HardLight; #[derive(Clone)] pub struct Dodge; #[derive(Clone)] pub struct Burn; #[derive(Clone)] pub struct SoftLight; #[derive(Clone)] pub struct Difference; #[derive(Clone)] pub struct Exclusion; /// Dynamically composition type. #[derive(Clone)] pub enum Basic { Clear, Src, Dst, SrcOver, SrcIn, SrcOut, SrcAtop, DstOver, DstIn, DstOut, DstAtop, Xor, Add, Darken, Lighten, Multiply, Screen, Overlay, HardLight, Dodge, Burn, SoftLight, Difference, Exclusion, } impl CompositorAttr for Clear {fn keep_dst_on_transparent_src(&self) -> bool {false}} impl CompositorAttr for Src {fn keep_dst_on_transparent_src(&self) -> bool {false}} impl CompositorAttr for Dst {fn keep_dst_on_transparent_src(&self) -> bool {true}} impl CompositorAttr for SrcOver {fn keep_dst_on_transparent_src(&self) -> bool {true}} impl CompositorAttr for SrcIn {fn keep_dst_on_transparent_src(&self) -> bool {false}} impl CompositorAttr for SrcOut {fn keep_dst_on_transparent_src(&self) -> bool {false}} impl CompositorAttr for SrcAtop {fn keep_dst_on_transparent_src(&self) -> bool {false}} impl CompositorAttr for DstOver {fn keep_dst_on_transparent_src(&self) -> bool {true}} impl CompositorAttr for DstIn {fn keep_dst_on_transparent_src(&self) -> bool {false}} impl CompositorAttr for DstOut {fn keep_dst_on_transparent_src(&self) -> bool {true}} impl CompositorAttr for DstAtop {fn keep_dst_on_transparent_src(&self) -> bool {false}} impl CompositorAttr for Xor {fn keep_dst_on_transparent_src(&self) -> bool {true}} impl CompositorAttr for Add {fn keep_dst_on_transparent_src(&self) -> bool {true}} impl CompositorAttr for Darken {fn keep_dst_on_transparent_src(&self) -> bool {true}} impl CompositorAttr for Lighten {fn keep_dst_on_transparent_src(&self) -> bool {true}} impl CompositorAttr for Multiply {fn keep_dst_on_transparent_src(&self) -> bool {true}} impl CompositorAttr for Screen {fn keep_dst_on_transparent_src(&self) -> bool {true}} impl CompositorAttr for Overlay {fn keep_dst_on_transparent_src(&self) -> bool {true}} impl CompositorAttr for HardLight {fn keep_dst_on_transparent_src(&self) -> bool {true}} impl CompositorAttr for Dodge {fn keep_dst_on_transparent_src(&self) -> bool {true}} impl CompositorAttr for Burn {fn keep_dst_on_transparent_src(&self) -> bool {true}} impl CompositorAttr for SoftLight {fn keep_dst_on_transparent_src(&self) -> bool {true}} impl CompositorAttr for Difference {fn keep_dst_on_transparent_src(&self) -> bool {true}} impl CompositorAttr for Exclusion {fn keep_dst_on_transparent_src(&self) -> bool {true}} impl CompositorAttr for Basic { fn keep_dst_on_transparent_src(&self) -> bool { use Basic::*; match self { Clear => Clear.keep_dst_on_transparent_src(), Src => Src.keep_dst_on_transparent_src(), Dst => Dst.keep_dst_on_transparent_src(), SrcOver => SrcOver.keep_dst_on_transparent_src(), SrcIn => SrcIn.keep_dst_on_transparent_src(), SrcOut => SrcOut.keep_dst_on_transparent_src(), SrcAtop => SrcAtop.keep_dst_on_transparent_src(), DstOver => DstOver.keep_dst_on_transparent_src(), DstIn => DstIn.keep_dst_on_transparent_src(), DstOut => DstOut.keep_dst_on_transparent_src(), DstAtop => DstAtop.keep_dst_on_transparent_src(), Xor => Xor.keep_dst_on_transparent_src(), Add => Add.keep_dst_on_transparent_src(), Darken => Darken.keep_dst_on_transparent_src(), Lighten => Lighten.keep_dst_on_transparent_src(), Multiply => Multiply.keep_dst_on_transparent_src(), Screen => Screen.keep_dst_on_transparent_src(), Overlay => Overlay.keep_dst_on_transparent_src(), HardLight => HardLight.keep_dst_on_transparent_src(), Dodge => Dodge.keep_dst_on_transparent_src(), Burn => Burn.keep_dst_on_transparent_src(), SoftLight => SoftLight.keep_dst_on_transparent_src(), Difference => Difference.keep_dst_on_transparent_src(), Exclusion => Exclusion.keep_dst_on_transparent_src(), } } } /// For Measuring performance. pub mod perf { use crate::pixel::Rgba; use super::*; #[derive(Clone)] pub struct Perf; impl CompositorAttr for Perf { fn keep_dst_on_transparent_src(&self) -> bool { false } } #[cfg(feature = "image-crate")] impl Compositor<image::Rgba<u8>> for Perf { #[allow(unused_variables)] fn composite(&self, a: &image::Rgba<u8>, b: &image::Rgba<u8>, alpha: f64) -> image::Rgba<u8> { image::Rgba([a.0[0], b.0[0], alpha as u8, 255]) } } #[cfg(feature = "image-crate")] impl Compositor<image::Rgb<u8>> for Perf { #[allow(unused_variables)] fn composite(&self, a: &image::Rgb<u8>, b: &image::Rgb<u8>, alpha: f64) -> image::Rgb<u8> { image::Rgb([a.0[0], b.0[0], alpha as u8]) } } impl Compositor<Rgba> for Perf { #[allow(unused_variables)] fn composite(&self, a: &Rgba, b: &Rgba, alpha: f64) -> Rgba { Rgba([a.0[0], b.0[0], alpha as f32, 1.0]) } } }
33.811518
102
0.682874
4847073ebb9247079d4779f8b755706cd924d8d6
1,482
#[doc = r" Value read from the register"] pub struct R { bits: u8, } #[doc = r" Value to write to the register"] pub struct W { bits: u8, } impl super::UCA0IRTCTL { #[doc = r" Modifies the contents of the register"] #[inline] pub fn modify<F>(&self, f: F) where for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W, { let bits = self.register.get(); let r = R { bits: bits }; let mut w = W { bits: bits }; f(&r, &mut w); self.register.set(w.bits); } #[doc = r" Reads the contents of the register"] #[inline] pub fn read(&self) -> R { R { bits: self.register.get(), } } #[doc = r" Writes to the register"] #[inline] pub fn write<F>(&self, f: F) where F: FnOnce(&mut W) -> &mut W, { let mut w = W::reset_value(); f(&mut w); self.register.set(w.bits); } #[doc = r" Writes the reset value to the register"] #[inline] pub fn reset(&self) { self.write(|w| w) } } impl R { #[doc = r" Value of the register as raw bits"] #[inline] pub fn bits(&self) -> u8 { self.bits } } impl W { #[doc = r" Reset value of the register"] #[inline] pub fn reset_value() -> W { W { bits: 0 } } #[doc = r" Writes raw bits to the register"] #[inline] pub unsafe fn bits(&mut self, bits: u8) -> &mut Self { self.bits = bits; self } }
22.8
58
0.495951
bb1830a147e43ec196affc82aba84d01ed57941c
11,102
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! Linux-specific raw type definitions #![stable(feature = "raw_ext", since = "1.1.0")] #![rustc_deprecated(since = "1.8.0", reason = "these type aliases are no longer supported by \ the standard library, the `libc` crate on \ crates.io should be used instead for the correct \ definitions")] #![allow(deprecated)] #![allow(missing_debug_implementations)] use os::raw::c_ulong; #[stable(feature = "raw_ext", since = "1.1.0")] pub type dev_t = u64; #[stable(feature = "raw_ext", since = "1.1.0")] pub type mode_t = u32; #[stable(feature = "pthread_t", since = "1.8.0")] pub type pthread_t = c_ulong; #[doc(inline)] #[stable(feature = "raw_ext", since = "1.1.0")] pub use self::arch::{off_t, ino_t, nlink_t, blksize_t, blkcnt_t, stat, time_t}; #[cfg(any(target_arch = "x86", target_arch = "le32", target_arch = "powerpc", target_arch = "arm", target_arch = "asmjs", target_arch = "wasm32"))] mod arch { use os::raw::{c_long, c_short, c_uint}; #[stable(feature = "raw_ext", since = "1.1.0")] pub type blkcnt_t = u64; #[stable(feature = "raw_ext", since = "1.1.0")] pub type blksize_t = u64; #[stable(feature = "raw_ext", since = "1.1.0")] pub type ino_t = u64; #[stable(feature = "raw_ext", since = "1.1.0")] pub type nlink_t = u64; #[stable(feature = "raw_ext", since = "1.1.0")] pub type off_t = u64; #[stable(feature = "raw_ext", since = "1.1.0")] pub type time_t = i64; #[repr(C)] #[derive(Clone)] #[stable(feature = "raw_ext", since = "1.1.0")] pub struct stat { #[stable(feature = "raw_ext", since = "1.1.0")] pub st_dev: u64, #[stable(feature = "raw_ext", since = "1.1.0")] pub __pad1: c_short, #[stable(feature = "raw_ext", since = "1.1.0")] pub __st_ino: u32, #[stable(feature = "raw_ext", since = "1.1.0")] pub st_mode: u32, #[stable(feature = "raw_ext", since = "1.1.0")] pub st_nlink: u32, #[stable(feature = "raw_ext", since = "1.1.0")] pub st_uid: u32, #[stable(feature = "raw_ext", since = "1.1.0")] pub st_gid: u32, #[stable(feature = "raw_ext", since = "1.1.0")] pub st_rdev: u64, #[stable(feature = "raw_ext", since = "1.1.0")] pub __pad2: c_uint, #[stable(feature = "raw_ext", since = "1.1.0")] pub st_size: i64, #[stable(feature = "raw_ext", since = "1.1.0")] pub st_blksize: i32, #[stable(feature = "raw_ext", since = "1.1.0")] pub st_blocks: i64, #[stable(feature = "raw_ext", since = "1.1.0")] pub st_atime: i32, #[stable(feature = "raw_ext", since = "1.1.0")] pub st_atime_nsec: c_long, #[stable(feature = "raw_ext", since = "1.1.0")] pub st_mtime: i32, #[stable(feature = "raw_ext", since = "1.1.0")] pub st_mtime_nsec: c_long, #[stable(feature = "raw_ext", since = "1.1.0")] pub st_ctime: i32, #[stable(feature = "raw_ext", since = "1.1.0")] pub st_ctime_nsec: c_long, #[stable(feature = "raw_ext", since = "1.1.0")] pub st_ino: u64, } } #[cfg(target_arch = "mips")] mod arch { use os::raw::{c_long, c_ulong}; #[cfg(target_env = "musl")] #[stable(feature = "raw_ext", since = "1.1.0")] pub type blkcnt_t = i64; #[cfg(not(target_env = "musl"))] #[stable(feature = "raw_ext", since = "1.1.0")] pub type blkcnt_t = u64; #[stable(feature = "raw_ext", since = "1.1.0")] pub type blksize_t = u64; #[cfg(target_env = "musl")] #[stable(feature = "raw_ext", since = "1.1.0")] pub type ino_t = u64; #[cfg(not(target_env = "musl"))] #[stable(feature = "raw_ext", since = "1.1.0")] pub type ino_t = u64; #[stable(feature = "raw_ext", since = "1.1.0")] pub type nlink_t = u64; #[cfg(target_env = "musl")] #[stable(feature = "raw_ext", since = "1.1.0")] pub type off_t = u64; #[cfg(not(target_env = "musl"))] #[stable(feature = "raw_ext", since = "1.1.0")] pub type off_t = u64; #[stable(feature = "raw_ext", since = "1.1.0")] pub type time_t = i64; #[repr(C)] #[derive(Clone)] #[stable(feature = "raw_ext", since = "1.1.0")] pub struct stat { #[stable(feature = "raw_ext", since = "1.1.0")] pub st_dev: c_ulong, #[stable(feature = "raw_ext", since = "1.1.0")] pub st_pad1: [c_long; 3], #[stable(feature = "raw_ext", since = "1.1.0")] pub st_ino: u64, #[stable(feature = "raw_ext", since = "1.1.0")] pub st_mode: u32, #[stable(feature = "raw_ext", since = "1.1.0")] pub st_nlink: u32, #[stable(feature = "raw_ext", since = "1.1.0")] pub st_uid: u32, #[stable(feature = "raw_ext", since = "1.1.0")] pub st_gid: u32, #[stable(feature = "raw_ext", since = "1.1.0")] pub st_rdev: c_ulong, #[stable(feature = "raw_ext", since = "1.1.0")] pub st_pad2: [c_long; 2], #[stable(feature = "raw_ext", since = "1.1.0")] pub st_size: i64, #[stable(feature = "raw_ext", since = "1.1.0")] pub st_atime: i32, #[stable(feature = "raw_ext", since = "1.1.0")] pub st_atime_nsec: c_long, #[stable(feature = "raw_ext", since = "1.1.0")] pub st_mtime: i32, #[stable(feature = "raw_ext", since = "1.1.0")] pub st_mtime_nsec: c_long, #[stable(feature = "raw_ext", since = "1.1.0")] pub st_ctime: i32, #[stable(feature = "raw_ext", since = "1.1.0")] pub st_ctime_nsec: c_long, #[stable(feature = "raw_ext", since = "1.1.0")] pub st_blksize: i32, #[stable(feature = "raw_ext", since = "1.1.0")] pub st_blocks: i64, #[stable(feature = "raw_ext", since = "1.1.0")] pub st_pad5: [c_long; 14], } } #[cfg(any(target_arch = "mips64", target_arch = "s390x", target_arch = "sparc64"))] mod arch { pub use libc::{off_t, ino_t, nlink_t, blksize_t, blkcnt_t, stat, time_t}; } #[cfg(target_arch = "aarch64")] mod arch { use os::raw::{c_long, c_int}; #[stable(feature = "raw_ext", since = "1.1.0")] pub type blkcnt_t = u64; #[stable(feature = "raw_ext", since = "1.1.0")] pub type blksize_t = u64; #[stable(feature = "raw_ext", since = "1.1.0")] pub type ino_t = u64; #[stable(feature = "raw_ext", since = "1.1.0")] pub type nlink_t = u64; #[stable(feature = "raw_ext", since = "1.1.0")] pub type off_t = u64; #[stable(feature = "raw_ext", since = "1.1.0")] pub type time_t = i64; #[repr(C)] #[derive(Clone)] #[stable(feature = "raw_ext", since = "1.1.0")] pub struct stat { #[stable(feature = "raw_ext", since = "1.1.0")] pub st_dev: u64, #[stable(feature = "raw_ext", since = "1.1.0")] pub st_ino: u64, #[stable(feature = "raw_ext", since = "1.1.0")] pub st_mode: u32, #[stable(feature = "raw_ext", since = "1.1.0")] pub st_nlink: u32, #[stable(feature = "raw_ext", since = "1.1.0")] pub st_uid: u32, #[stable(feature = "raw_ext", since = "1.1.0")] pub st_gid: u32, #[stable(feature = "raw_ext", since = "1.1.0")] pub st_rdev: u64, #[stable(feature = "raw_ext", since = "1.1.0")] pub __pad1: u64, #[stable(feature = "raw_ext", since = "1.1.0")] pub st_size: i64, #[stable(feature = "raw_ext", since = "1.1.0")] pub st_blksize: i32, #[stable(feature = "raw_ext", since = "1.1.0")] pub __pad2: c_int, #[stable(feature = "raw_ext", since = "1.1.0")] pub st_blocks: i64, #[stable(feature = "raw_ext", since = "1.1.0")] pub st_atime: i64, #[stable(feature = "raw_ext", since = "1.1.0")] pub st_atime_nsec: c_long, #[stable(feature = "raw_ext", since = "1.1.0")] pub st_mtime: i64, #[stable(feature = "raw_ext", since = "1.1.0")] pub st_mtime_nsec: c_long, #[stable(feature = "raw_ext", since = "1.1.0")] pub st_ctime: i64, #[stable(feature = "raw_ext", since = "1.1.0")] pub st_ctime_nsec: c_long, #[stable(feature = "raw_ext", since = "1.1.0")] pub __unused: [c_int; 2], } } #[cfg(any(target_arch = "x86_64", target_arch = "powerpc64"))] mod arch { use os::raw::{c_long, c_int}; #[stable(feature = "raw_ext", since = "1.1.0")] pub type blkcnt_t = u64; #[stable(feature = "raw_ext", since = "1.1.0")] pub type blksize_t = u64; #[stable(feature = "raw_ext", since = "1.1.0")] pub type ino_t = u64; #[stable(feature = "raw_ext", since = "1.1.0")] pub type nlink_t = u64; #[stable(feature = "raw_ext", since = "1.1.0")] pub type off_t = u64; #[stable(feature = "raw_ext", since = "1.1.0")] pub type time_t = i64; #[repr(C)] #[derive(Clone)] #[stable(feature = "raw_ext", since = "1.1.0")] pub struct stat { #[stable(feature = "raw_ext", since = "1.1.0")] pub st_dev: u64, #[stable(feature = "raw_ext", since = "1.1.0")] pub st_ino: u64, #[stable(feature = "raw_ext", since = "1.1.0")] pub st_nlink: u64, #[stable(feature = "raw_ext", since = "1.1.0")] pub st_mode: u32, #[stable(feature = "raw_ext", since = "1.1.0")] pub st_uid: u32, #[stable(feature = "raw_ext", since = "1.1.0")] pub st_gid: u32, #[stable(feature = "raw_ext", since = "1.1.0")] pub __pad0: c_int, #[stable(feature = "raw_ext", since = "1.1.0")] pub st_rdev: u64, #[stable(feature = "raw_ext", since = "1.1.0")] pub st_size: i64, #[stable(feature = "raw_ext", since = "1.1.0")] pub st_blksize: i64, #[stable(feature = "raw_ext", since = "1.1.0")] pub st_blocks: i64, #[stable(feature = "raw_ext", since = "1.1.0")] pub st_atime: i64, #[stable(feature = "raw_ext", since = "1.1.0")] pub st_atime_nsec: c_long, #[stable(feature = "raw_ext", since = "1.1.0")] pub st_mtime: i64, #[stable(feature = "raw_ext", since = "1.1.0")] pub st_mtime_nsec: c_long, #[stable(feature = "raw_ext", since = "1.1.0")] pub st_ctime: i64, #[stable(feature = "raw_ext", since = "1.1.0")] pub st_ctime_nsec: c_long, #[stable(feature = "raw_ext", since = "1.1.0")] pub __unused: [c_long; 3], } }
40.224638
80
0.545307
d7ad55c00700dbf6748e49f1f728dafdf46823cc
41,629
//! Instruction types use crate::{ error::LendingError, state::{ReserveConfig, ReserveFees}, }; use solana_program::{ instruction::{AccountMeta, Instruction}, msg, program_error::ProgramError, pubkey::{Pubkey, PUBKEY_BYTES}, sysvar, }; use std::{convert::TryInto, mem::size_of}; /// Instructions supported by the lending program. #[derive(Clone, Debug, PartialEq)] pub enum LendingInstruction { // 0 /// Initializes a new lending market. /// /// Accounts expected by this instruction: /// /// 0. `[writable]` Lending market account - uninitialized. /// 1. `[]` Rent sysvar. /// 2. `[]` Token program id. /// 3. `[]` Oracle program id. InitLendingMarket { /// Owner authority which can add new reserves owner: Pubkey, /// Currency market prices are quoted in /// e.g. "USD" null padded (`*b"USD\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"`) or SPL token mint pubkey quote_currency: [u8; 32], }, // 1 /// Sets the new owner of a lending market. /// /// Accounts expected by this instruction: /// /// 0. `[writable]` Lending market account. /// 1. `[signer]` Current owner. SetLendingMarketOwner { /// The new owner new_owner: Pubkey, }, // 2 /// Initializes a new lending market reserve. /// /// Accounts expected by this instruction: /// /// 0. `[writable]` Source liquidity token account. /// $authority can transfer $liquidity_amount. /// 1. `[writable]` Destination collateral token account - uninitialized. /// 2. `[writable]` Reserve account - uninitialized. /// 3. `[]` Reserve liquidity SPL Token mint. /// 4. `[writable]` Reserve liquidity supply SPL Token account - uninitialized. /// 5. `[writable]` Reserve liquidity fee receiver - uninitialized. /// 6. `[writable]` Reserve collateral SPL Token mint - uninitialized. /// 7. `[writable]` Reserve collateral token supply - uninitialized. /// 8. `[]` Pyth product account. /// 9. `[]` Pyth price account. /// This will be used as the reserve liquidity oracle account. /// 10 `[]` Lending market account. /// 11 `[]` Derived lending market authority. /// 12 `[signer]` Lending market owner. /// 13 `[signer]` User transfer authority ($authority). /// 14 `[]` Clock sysvar. /// 15 `[]` Rent sysvar. /// 16 `[]` Token program id. InitReserve { /// Initial amount of liquidity to deposit into the new reserve liquidity_amount: u64, /// Reserve configuration values config: ReserveConfig, }, // 3 /// Accrue interest and update market price of liquidity on a reserve. /// /// Accounts expected by this instruction: /// /// 0. `[writable]` Reserve account. /// 1. `[]` Reserve liquidity oracle account. /// Must be the Pyth price account specified at InitReserve. /// 2. `[]` Clock sysvar. RefreshReserve, // 4 /// Deposit liquidity into a reserve in exchange for collateral. Collateral represents a share /// of the reserve liquidity pool. /// /// Accounts expected by this instruction: /// /// 0. `[writable]` Source liquidity token account. /// $authority can transfer $liquidity_amount. /// 1. `[writable]` Destination collateral token account. /// 2. `[writable]` Reserve account. /// 3. `[writable]` Reserve liquidity supply SPL Token account. /// 4. `[writable]` Reserve collateral SPL Token mint. /// 5. `[]` Lending market account. /// 6. `[]` Derived lending market authority. /// 7. `[signer]` User transfer authority ($authority). /// 8. `[]` Clock sysvar. /// 9. `[]` Token program id. DepositReserveLiquidity { /// Amount of liquidity to deposit in exchange for collateral tokens liquidity_amount: u64, }, // 5 /// Redeem collateral from a reserve in exchange for liquidity. /// /// Accounts expected by this instruction: /// /// 0. `[writable]` Source collateral token account. /// $authority can transfer $collateral_amount. /// 1. `[writable]` Destination liquidity token account. /// 2. `[writable]` Reserve account. /// 3. `[writable]` Reserve collateral SPL Token mint. /// 4. `[writable]` Reserve liquidity supply SPL Token account. /// 5. `[]` Lending market account. /// 6. `[]` Derived lending market authority. /// 7. `[signer]` User transfer authority ($authority). /// 8. `[]` Clock sysvar. /// 9. `[]` Token program id. RedeemReserveCollateral { /// Amount of collateral tokens to redeem in exchange for liquidity collateral_amount: u64, }, // 6 /// Initializes a new lending market obligation. /// /// Accounts expected by this instruction: /// /// 0. `[writable]` Obligation account - uninitialized. /// 1. `[]` Lending market account. /// 2. `[signer]` Obligation owner. /// 3. `[]` Clock sysvar. /// 4. `[]` Rent sysvar. /// 5. `[]` Token program id. InitObligation, // 7 /// Refresh an obligation's accrued interest and collateral and liquidity prices. Requires /// refreshed reserves, as all obligation collateral deposit reserves in order, followed by all /// liquidity borrow reserves in order. /// /// Accounts expected by this instruction: /// /// 0. `[writable]` Obligation account. /// 1. `[]` Clock sysvar. /// .. `[]` Collateral deposit reserve accounts - refreshed, all, in order. /// .. `[]` Liquidity borrow reserve accounts - refreshed, all, in order. RefreshObligation, // 8 /// Deposit collateral to an obligation. Requires a refreshed reserve. /// /// Accounts expected by this instruction: /// /// 0. `[writable]` Source collateral token account. /// Minted by deposit reserve collateral mint. /// $authority can transfer $collateral_amount. /// 1. `[writable]` Destination deposit reserve collateral supply SPL Token account. /// 2. `[]` Deposit reserve account - refreshed. /// 3. `[writable]` Obligation account. /// 4. `[]` Lending market account. /// 5. `[signer]` Obligation owner. /// 6. `[signer]` User transfer authority ($authority). /// 7. `[]` Clock sysvar. /// 8. `[]` Token program id. DepositObligationCollateral { /// Amount of collateral tokens to deposit collateral_amount: u64, }, // 9 /// Withdraw collateral from an obligation. Requires a refreshed obligation and reserve. /// /// Accounts expected by this instruction: /// /// 0. `[writable]` Source withdraw reserve collateral supply SPL Token account. /// 1. `[writable]` Destination collateral token account. /// Minted by withdraw reserve collateral mint. /// 2. `[]` Withdraw reserve account - refreshed. /// 3. `[writable]` Obligation account - refreshed. /// 4. `[]` Lending market account. /// 5. `[]` Derived lending market authority. /// 6. `[signer]` Obligation owner. /// 7. `[]` Clock sysvar. /// 8. `[]` Token program id. WithdrawObligationCollateral { /// Amount of collateral tokens to withdraw - u64::MAX for up to 100% of deposited amount collateral_amount: u64, }, // 10 /// Borrow liquidity from a reserve by depositing collateral tokens. Requires a refreshed /// obligation and reserve. /// /// Accounts expected by this instruction: /// /// 0. `[writable]` Source borrow reserve liquidity supply SPL Token account. /// 1. `[writable]` Destination liquidity token account. /// Minted by borrow reserve liquidity mint. /// 2. `[writable]` Borrow reserve account - refreshed. /// 3. `[writable]` Borrow reserve liquidity fee receiver account. /// Must be the fee account specified at InitReserve. /// 4. `[writable]` Obligation account - refreshed. /// 5. `[]` Lending market account. /// 6. `[]` Derived lending market authority. /// 7. `[signer]` Obligation owner. /// 8. `[]` Clock sysvar. /// 9. `[]` Token program id. /// 10 `[optional, writable]` Host fee receiver account. BorrowObligationLiquidity { /// Amount of liquidity to borrow - u64::MAX for 100% of borrowing power liquidity_amount: u64, // @TODO: slippage constraint - https://git.io/JmV67 }, // 11 /// Repay borrowed liquidity to a reserve. Requires a refreshed obligation and reserve. /// /// Accounts expected by this instruction: /// /// 0. `[writable]` Source liquidity token account. /// Minted by repay reserve liquidity mint. /// $authority can transfer $liquidity_amount. /// 1. `[writable]` Destination repay reserve liquidity supply SPL Token account. /// 2. `[writable]` Repay reserve account - refreshed. /// 3. `[writable]` Obligation account - refreshed. /// 4. `[]` Lending market account. /// 5. `[signer]` User transfer authority ($authority). /// 6. `[]` Clock sysvar. /// 7. `[]` Token program id. RepayObligationLiquidity { /// Amount of liquidity to repay - u64::MAX for 100% of borrowed amount liquidity_amount: u64, }, // 12 /// Repay borrowed liquidity to a reserve to receive collateral at a discount from an unhealthy /// obligation. Requires a refreshed obligation and reserves. /// /// Accounts expected by this instruction: /// /// 0. `[writable]` Source liquidity token account. /// Minted by repay reserve liquidity mint. /// $authority can transfer $liquidity_amount. /// 1. `[writable]` Destination collateral token account. /// Minted by withdraw reserve collateral mint. /// 2. `[writable]` Repay reserve account - refreshed. /// 3. `[writable]` Repay reserve liquidity supply SPL Token account. /// 4. `[]` Withdraw reserve account - refreshed. /// 5. `[writable]` Withdraw reserve collateral supply SPL Token account. /// 6. `[writable]` Obligation account - refreshed. /// 7. `[]` Lending market account. /// 8. `[]` Derived lending market authority. /// 9. `[signer]` User transfer authority ($authority). /// 10 `[]` Clock sysvar. /// 11 `[]` Token program id. LiquidateObligation { /// Amount of liquidity to repay - u64::MAX for up to 100% of borrowed amount liquidity_amount: u64, }, // 13 /// Make a flash loan. /// /// Accounts expected by this instruction: /// /// 0. `[writable]` Source liquidity token account. /// Minted by reserve liquidity mint. /// Must match the reserve liquidity supply. /// 1. `[writable]` Destination liquidity token account. /// Minted by reserve liquidity mint. /// 2. `[writable]` Reserve account. /// 3. `[writable]` Flash loan fee receiver account. /// Must match the reserve liquidity fee receiver. /// 4. `[writable]` Host fee receiver. /// 5. `[]` Lending market account. /// 6. `[]` Derived lending market authority. /// 7. `[]` Token program id. /// 8. `[]` Flash loan receiver program id. /// Must implement an instruction that has tag of 0 and a signature of `(amount: u64)` /// This instruction must return the amount to the source liquidity account. /// .. `[any]` Additional accounts expected by the receiving program's `ReceiveFlashLoan` instruction. /// /// The flash loan receiver program that is to be invoked should contain an instruction with /// tag `0` and accept the total amount (including fee) that needs to be returned back after /// its execution has completed. /// /// Flash loan receiver should have an instruction with the following signature: /// /// 0. `[writable]` Source liquidity (matching the destination from above). /// 1. `[writable]` Destination liquidity (matching the source from above). /// 2. `[]` Token program id /// .. `[any]` Additional accounts provided to the lending program's `FlashLoan` instruction above. /// ReceiveFlashLoan { /// // Amount that must be repaid by the receiver program /// amount: u64 /// } FlashLoan { /// The amount that is to be borrowed - u64::MAX for up to 100% of available liquidity amount: u64, }, ///14 /// Closes obligation account to retrieve SOL rent from the obligation account to loanee /// Only supports non-native accounts if balances are zero /// /// Accounts expected by this instruction: /// /// 0. `[writeable]` Obligation account /// 1. `[writeable]` Obigation owner /// 2. `[writeable]` Destination account /// 3. `[writeable]` Collateral reserve (source) account /// 4. `[]` Lending market account /// 5. `[signer]` Transfer (lending market) authority account /// 6. `[]` Token program id CloseObligationAccount, } impl LendingInstruction { /// Unpacks a byte buffer into a [LendingInstruction](enum.LendingInstruction.html). pub fn unpack(input: &[u8]) -> Result<Self, ProgramError> { let (&tag, rest) = input .split_first() .ok_or(LendingError::InstructionUnpackError)?; Ok(match tag { 0 => { let (owner, rest) = Self::unpack_pubkey(rest)?; let (quote_currency, _rest) = Self::unpack_bytes32(rest)?; Self::InitLendingMarket { owner, quote_currency: *quote_currency, } } 1 => { let (new_owner, _rest) = Self::unpack_pubkey(rest)?; Self::SetLendingMarketOwner { new_owner } } 2 => { let (liquidity_amount, rest) = Self::unpack_u64(rest)?; let (optimal_utilization_rate, rest) = Self::unpack_u8(rest)?; let (loan_to_value_ratio, rest) = Self::unpack_u8(rest)?; let (liquidation_bonus, rest) = Self::unpack_u8(rest)?; let (liquidation_threshold, rest) = Self::unpack_u8(rest)?; let (min_borrow_rate, rest) = Self::unpack_u8(rest)?; let (optimal_borrow_rate, rest) = Self::unpack_u8(rest)?; let (max_borrow_rate, rest) = Self::unpack_u8(rest)?; let (borrow_fee_wad, rest) = Self::unpack_u64(rest)?; let (flash_loan_fee_wad, rest) = Self::unpack_u64(rest)?; let (host_fee_percentage, _rest) = Self::unpack_u8(rest)?; Self::InitReserve { liquidity_amount, config: ReserveConfig { optimal_utilization_rate, loan_to_value_ratio, liquidation_bonus, liquidation_threshold, min_borrow_rate, optimal_borrow_rate, max_borrow_rate, fees: ReserveFees { borrow_fee_wad, flash_loan_fee_wad, host_fee_percentage, }, }, } } 3 => Self::RefreshReserve, 4 => { let (liquidity_amount, _rest) = Self::unpack_u64(rest)?; Self::DepositReserveLiquidity { liquidity_amount } } 5 => { let (collateral_amount, _rest) = Self::unpack_u64(rest)?; Self::RedeemReserveCollateral { collateral_amount } } 6 => Self::InitObligation, 7 => Self::RefreshObligation, 8 => { let (collateral_amount, _rest) = Self::unpack_u64(rest)?; Self::DepositObligationCollateral { collateral_amount } } 9 => { let (collateral_amount, _rest) = Self::unpack_u64(rest)?; Self::WithdrawObligationCollateral { collateral_amount } } 10 => { let (liquidity_amount, _rest) = Self::unpack_u64(rest)?; Self::BorrowObligationLiquidity { liquidity_amount } } 11 => { let (liquidity_amount, _rest) = Self::unpack_u64(rest)?; Self::RepayObligationLiquidity { liquidity_amount } } 12 => { let (liquidity_amount, _rest) = Self::unpack_u64(rest)?; Self::LiquidateObligation { liquidity_amount } } 13 => { let (amount, _rest) = Self::unpack_u64(rest)?; Self::FlashLoan { amount } } 14 => Self::CloseObligationAccount, _ => { msg!("Instruction cannot be unpacked"); return Err(LendingError::InstructionUnpackError.into()); } }) } fn unpack_u64(input: &[u8]) -> Result<(u64, &[u8]), ProgramError> { if input.len() < 8 { msg!("u64 cannot be unpacked"); return Err(LendingError::InstructionUnpackError.into()); } let (bytes, rest) = input.split_at(8); let value = bytes .get(..8) .and_then(|slice| slice.try_into().ok()) .map(u64::from_le_bytes) .ok_or(LendingError::InstructionUnpackError)?; Ok((value, rest)) } fn unpack_u8(input: &[u8]) -> Result<(u8, &[u8]), ProgramError> { if input.is_empty() { msg!("u8 cannot be unpacked"); return Err(LendingError::InstructionUnpackError.into()); } let (bytes, rest) = input.split_at(1); let value = bytes .get(..1) .and_then(|slice| slice.try_into().ok()) .map(u8::from_le_bytes) .ok_or(LendingError::InstructionUnpackError)?; Ok((value, rest)) } fn unpack_bytes32(input: &[u8]) -> Result<(&[u8; 32], &[u8]), ProgramError> { if input.len() < 32 { msg!("32 bytes cannot be unpacked"); return Err(LendingError::InstructionUnpackError.into()); } let (bytes, rest) = input.split_at(32); Ok(( bytes .try_into() .map_err(|_| LendingError::InstructionUnpackError)?, rest, )) } fn unpack_pubkey(input: &[u8]) -> Result<(Pubkey, &[u8]), ProgramError> { if input.len() < PUBKEY_BYTES { msg!("Pubkey cannot be unpacked"); return Err(LendingError::InstructionUnpackError.into()); } let (key, rest) = input.split_at(PUBKEY_BYTES); let pk = Pubkey::new(key); Ok((pk, rest)) } /// Packs a [LendingInstruction](enum.LendingInstruction.html) into a byte buffer. pub fn pack(&self) -> Vec<u8> { let mut buf = Vec::with_capacity(size_of::<Self>()); match *self { Self::InitLendingMarket { owner, quote_currency, } => { buf.push(0); buf.extend_from_slice(owner.as_ref()); buf.extend_from_slice(quote_currency.as_ref()); } Self::SetLendingMarketOwner { new_owner } => { buf.push(1); buf.extend_from_slice(new_owner.as_ref()); } Self::InitReserve { liquidity_amount, config: ReserveConfig { optimal_utilization_rate, loan_to_value_ratio, liquidation_bonus, liquidation_threshold, min_borrow_rate, optimal_borrow_rate, max_borrow_rate, fees: ReserveFees { borrow_fee_wad, flash_loan_fee_wad, host_fee_percentage, }, }, } => { buf.push(2); buf.extend_from_slice(&liquidity_amount.to_le_bytes()); buf.extend_from_slice(&optimal_utilization_rate.to_le_bytes()); buf.extend_from_slice(&loan_to_value_ratio.to_le_bytes()); buf.extend_from_slice(&liquidation_bonus.to_le_bytes()); buf.extend_from_slice(&liquidation_threshold.to_le_bytes()); buf.extend_from_slice(&min_borrow_rate.to_le_bytes()); buf.extend_from_slice(&optimal_borrow_rate.to_le_bytes()); buf.extend_from_slice(&max_borrow_rate.to_le_bytes()); buf.extend_from_slice(&borrow_fee_wad.to_le_bytes()); buf.extend_from_slice(&flash_loan_fee_wad.to_le_bytes()); buf.extend_from_slice(&host_fee_percentage.to_le_bytes()); } Self::RefreshReserve => { buf.push(3); } Self::DepositReserveLiquidity { liquidity_amount } => { buf.push(4); buf.extend_from_slice(&liquidity_amount.to_le_bytes()); } Self::RedeemReserveCollateral { collateral_amount } => { buf.push(5); buf.extend_from_slice(&collateral_amount.to_le_bytes()); } Self::InitObligation => { buf.push(6); } Self::RefreshObligation => { buf.push(7); } Self::DepositObligationCollateral { collateral_amount } => { buf.push(8); buf.extend_from_slice(&collateral_amount.to_le_bytes()); } Self::WithdrawObligationCollateral { collateral_amount } => { buf.push(9); buf.extend_from_slice(&collateral_amount.to_le_bytes()); } Self::BorrowObligationLiquidity { liquidity_amount } => { buf.push(10); buf.extend_from_slice(&liquidity_amount.to_le_bytes()); } Self::RepayObligationLiquidity { liquidity_amount } => { buf.push(11); buf.extend_from_slice(&liquidity_amount.to_le_bytes()); } Self::LiquidateObligation { liquidity_amount } => { buf.push(12); buf.extend_from_slice(&liquidity_amount.to_le_bytes()); } Self::FlashLoan { amount } => { buf.push(13); buf.extend_from_slice(&amount.to_le_bytes()); } Self::CloseObligationAccount => { buf.push(14); } } buf } } /// Creates an 'InitLendingMarket' instruction. pub fn init_lending_market( program_id: Pubkey, owner: Pubkey, quote_currency: [u8; 32], lending_market_pubkey: Pubkey, oracle_program_id: Pubkey, ) -> Instruction { Instruction { program_id, accounts: vec![ AccountMeta::new(lending_market_pubkey, false), AccountMeta::new_readonly(sysvar::rent::id(), false), AccountMeta::new_readonly(spl_token::id(), false), AccountMeta::new_readonly(oracle_program_id, false), ], data: LendingInstruction::InitLendingMarket { owner, quote_currency, } .pack(), } } /// Creates a 'SetLendingMarketOwner' instruction. pub fn set_lending_market_owner( program_id: Pubkey, lending_market_pubkey: Pubkey, lending_market_owner: Pubkey, new_owner: Pubkey, ) -> Instruction { Instruction { program_id, accounts: vec![ AccountMeta::new(lending_market_pubkey, false), AccountMeta::new_readonly(lending_market_owner, true), ], data: LendingInstruction::SetLendingMarketOwner { new_owner }.pack(), } } /// Creates an 'InitReserve' instruction. #[allow(clippy::too_many_arguments)] pub fn init_reserve( program_id: Pubkey, liquidity_amount: u64, config: ReserveConfig, source_liquidity_pubkey: Pubkey, destination_collateral_pubkey: Pubkey, reserve_pubkey: Pubkey, reserve_liquidity_mint_pubkey: Pubkey, reserve_liquidity_supply_pubkey: Pubkey, reserve_liquidity_fee_receiver_pubkey: Pubkey, reserve_collateral_mint_pubkey: Pubkey, reserve_collateral_supply_pubkey: Pubkey, pyth_product_pubkey: Pubkey, pyth_price_pubkey: Pubkey, lending_market_pubkey: Pubkey, lending_market_owner_pubkey: Pubkey, user_transfer_authority_pubkey: Pubkey, ) -> Instruction { let (lending_market_authority_pubkey, _bump_seed) = Pubkey::find_program_address( &[&lending_market_pubkey.to_bytes()[..PUBKEY_BYTES]], &program_id, ); let accounts = vec![ AccountMeta::new(source_liquidity_pubkey, false), AccountMeta::new(destination_collateral_pubkey, false), AccountMeta::new(reserve_pubkey, false), AccountMeta::new_readonly(reserve_liquidity_mint_pubkey, false), AccountMeta::new(reserve_liquidity_supply_pubkey, false), AccountMeta::new(reserve_liquidity_fee_receiver_pubkey, false), AccountMeta::new(reserve_collateral_mint_pubkey, false), AccountMeta::new(reserve_collateral_supply_pubkey, false), AccountMeta::new_readonly(pyth_product_pubkey, false), AccountMeta::new_readonly(pyth_price_pubkey, false), AccountMeta::new_readonly(lending_market_pubkey, false), AccountMeta::new_readonly(lending_market_authority_pubkey, false), AccountMeta::new_readonly(lending_market_owner_pubkey, true), AccountMeta::new_readonly(user_transfer_authority_pubkey, true), AccountMeta::new_readonly(sysvar::clock::id(), false), AccountMeta::new_readonly(sysvar::rent::id(), false), AccountMeta::new_readonly(spl_token::id(), false), ]; Instruction { program_id, accounts, data: LendingInstruction::InitReserve { liquidity_amount, config, } .pack(), } } /// Creates a `RefreshReserve` instruction pub fn refresh_reserve( program_id: Pubkey, reserve_pubkey: Pubkey, reserve_liquidity_oracle_pubkey: Pubkey, ) -> Instruction { let accounts = vec![ AccountMeta::new(reserve_pubkey, false), AccountMeta::new_readonly(reserve_liquidity_oracle_pubkey, false), AccountMeta::new_readonly(sysvar::clock::id(), false), ]; Instruction { program_id, accounts, data: LendingInstruction::RefreshReserve.pack(), } } /// Creates a 'DepositReserveLiquidity' instruction. #[allow(clippy::too_many_arguments)] pub fn deposit_reserve_liquidity( program_id: Pubkey, liquidity_amount: u64, source_liquidity_pubkey: Pubkey, destination_collateral_pubkey: Pubkey, reserve_pubkey: Pubkey, reserve_liquidity_supply_pubkey: Pubkey, reserve_collateral_mint_pubkey: Pubkey, lending_market_pubkey: Pubkey, user_transfer_authority_pubkey: Pubkey, ) -> Instruction { let (lending_market_authority_pubkey, _bump_seed) = Pubkey::find_program_address( &[&lending_market_pubkey.to_bytes()[..PUBKEY_BYTES]], &program_id, ); Instruction { program_id, accounts: vec![ AccountMeta::new(source_liquidity_pubkey, false), AccountMeta::new(destination_collateral_pubkey, false), AccountMeta::new(reserve_pubkey, false), AccountMeta::new(reserve_liquidity_supply_pubkey, false), AccountMeta::new(reserve_collateral_mint_pubkey, false), AccountMeta::new_readonly(lending_market_pubkey, false), AccountMeta::new_readonly(lending_market_authority_pubkey, false), AccountMeta::new_readonly(user_transfer_authority_pubkey, true), AccountMeta::new_readonly(sysvar::clock::id(), false), AccountMeta::new_readonly(spl_token::id(), false), ], data: LendingInstruction::DepositReserveLiquidity { liquidity_amount }.pack(), } } /// Creates a 'RedeemReserveCollateral' instruction. #[allow(clippy::too_many_arguments)] pub fn redeem_reserve_collateral( program_id: Pubkey, collateral_amount: u64, source_collateral_pubkey: Pubkey, destination_liquidity_pubkey: Pubkey, reserve_pubkey: Pubkey, reserve_collateral_mint_pubkey: Pubkey, reserve_liquidity_supply_pubkey: Pubkey, lending_market_pubkey: Pubkey, user_transfer_authority_pubkey: Pubkey, ) -> Instruction { let (lending_market_authority_pubkey, _bump_seed) = Pubkey::find_program_address( &[&lending_market_pubkey.to_bytes()[..PUBKEY_BYTES]], &program_id, ); Instruction { program_id, accounts: vec![ AccountMeta::new(source_collateral_pubkey, false), AccountMeta::new(destination_liquidity_pubkey, false), AccountMeta::new(reserve_pubkey, false), AccountMeta::new(reserve_collateral_mint_pubkey, false), AccountMeta::new(reserve_liquidity_supply_pubkey, false), AccountMeta::new_readonly(lending_market_pubkey, false), AccountMeta::new_readonly(lending_market_authority_pubkey, false), AccountMeta::new_readonly(user_transfer_authority_pubkey, true), AccountMeta::new_readonly(sysvar::clock::id(), false), AccountMeta::new_readonly(spl_token::id(), false), ], data: LendingInstruction::RedeemReserveCollateral { collateral_amount }.pack(), } } /// Creates an 'InitObligation' instruction. #[allow(clippy::too_many_arguments)] pub fn init_obligation( program_id: Pubkey, obligation_pubkey: Pubkey, lending_market_pubkey: Pubkey, obligation_owner_pubkey: Pubkey, ) -> Instruction { Instruction { program_id, accounts: vec![ AccountMeta::new(obligation_pubkey, false), AccountMeta::new_readonly(lending_market_pubkey, false), AccountMeta::new_readonly(obligation_owner_pubkey, true), AccountMeta::new_readonly(sysvar::clock::id(), false), AccountMeta::new_readonly(sysvar::rent::id(), false), AccountMeta::new_readonly(spl_token::id(), false), ], data: LendingInstruction::InitObligation.pack(), } } /// Creates a 'RefreshObligation' instruction. #[allow(clippy::too_many_arguments)] pub fn refresh_obligation( program_id: Pubkey, obligation_pubkey: Pubkey, reserve_pubkeys: Vec<Pubkey>, ) -> Instruction { let mut accounts = vec![ AccountMeta::new(obligation_pubkey, false), AccountMeta::new_readonly(sysvar::clock::id(), false), ]; accounts.extend( reserve_pubkeys .into_iter() .map(|pubkey| AccountMeta::new_readonly(pubkey, false)), ); Instruction { program_id, accounts, data: LendingInstruction::RefreshObligation.pack(), } } /// Creates a 'DepositObligationCollateral' instruction. #[allow(clippy::too_many_arguments)] pub fn deposit_obligation_collateral( program_id: Pubkey, collateral_amount: u64, source_collateral_pubkey: Pubkey, destination_collateral_pubkey: Pubkey, deposit_reserve_pubkey: Pubkey, obligation_pubkey: Pubkey, lending_market_pubkey: Pubkey, obligation_owner_pubkey: Pubkey, user_transfer_authority_pubkey: Pubkey, ) -> Instruction { Instruction { program_id, accounts: vec![ AccountMeta::new(source_collateral_pubkey, false), AccountMeta::new(destination_collateral_pubkey, false), AccountMeta::new_readonly(deposit_reserve_pubkey, false), AccountMeta::new(obligation_pubkey, false), AccountMeta::new_readonly(lending_market_pubkey, false), AccountMeta::new_readonly(obligation_owner_pubkey, true), AccountMeta::new_readonly(user_transfer_authority_pubkey, true), AccountMeta::new_readonly(sysvar::clock::id(), false), AccountMeta::new_readonly(spl_token::id(), false), ], data: LendingInstruction::DepositObligationCollateral { collateral_amount }.pack(), } } /// Creates a 'WithdrawObligationCollateral' instruction. #[allow(clippy::too_many_arguments)] pub fn withdraw_obligation_collateral( program_id: Pubkey, collateral_amount: u64, source_collateral_pubkey: Pubkey, destination_collateral_pubkey: Pubkey, withdraw_reserve_pubkey: Pubkey, obligation_pubkey: Pubkey, lending_market_pubkey: Pubkey, obligation_owner_pubkey: Pubkey, ) -> Instruction { let (lending_market_authority_pubkey, _bump_seed) = Pubkey::find_program_address( &[&lending_market_pubkey.to_bytes()[..PUBKEY_BYTES]], &program_id, ); Instruction { program_id, accounts: vec![ AccountMeta::new(source_collateral_pubkey, false), AccountMeta::new(destination_collateral_pubkey, false), AccountMeta::new_readonly(withdraw_reserve_pubkey, false), AccountMeta::new(obligation_pubkey, false), AccountMeta::new_readonly(lending_market_pubkey, false), AccountMeta::new_readonly(lending_market_authority_pubkey, false), AccountMeta::new_readonly(obligation_owner_pubkey, true), AccountMeta::new_readonly(sysvar::clock::id(), false), AccountMeta::new_readonly(spl_token::id(), false), ], data: LendingInstruction::WithdrawObligationCollateral { collateral_amount }.pack(), } } /// Creates a 'BorrowObligationLiquidity' instruction. #[allow(clippy::too_many_arguments)] pub fn borrow_obligation_liquidity( program_id: Pubkey, liquidity_amount: u64, source_liquidity_pubkey: Pubkey, destination_liquidity_pubkey: Pubkey, borrow_reserve_pubkey: Pubkey, borrow_reserve_liquidity_fee_receiver_pubkey: Pubkey, obligation_pubkey: Pubkey, lending_market_pubkey: Pubkey, obligation_owner_pubkey: Pubkey, host_fee_receiver_pubkey: Option<Pubkey>, ) -> Instruction { let (lending_market_authority_pubkey, _bump_seed) = Pubkey::find_program_address( &[&lending_market_pubkey.to_bytes()[..PUBKEY_BYTES]], &program_id, ); let mut accounts = vec![ AccountMeta::new(source_liquidity_pubkey, false), AccountMeta::new(destination_liquidity_pubkey, false), AccountMeta::new(borrow_reserve_pubkey, false), AccountMeta::new(borrow_reserve_liquidity_fee_receiver_pubkey, false), AccountMeta::new(obligation_pubkey, false), AccountMeta::new_readonly(lending_market_pubkey, false), AccountMeta::new_readonly(lending_market_authority_pubkey, false), AccountMeta::new_readonly(obligation_owner_pubkey, true), AccountMeta::new_readonly(sysvar::clock::id(), false), AccountMeta::new_readonly(spl_token::id(), false), ]; if let Some(host_fee_receiver_pubkey) = host_fee_receiver_pubkey { accounts.push(AccountMeta::new(host_fee_receiver_pubkey, false)); } Instruction { program_id, accounts, data: LendingInstruction::BorrowObligationLiquidity { liquidity_amount }.pack(), } } /// Creates a `RepayObligationLiquidity` instruction #[allow(clippy::too_many_arguments)] pub fn repay_obligation_liquidity( program_id: Pubkey, liquidity_amount: u64, source_liquidity_pubkey: Pubkey, destination_liquidity_pubkey: Pubkey, repay_reserve_pubkey: Pubkey, obligation_pubkey: Pubkey, lending_market_pubkey: Pubkey, user_transfer_authority_pubkey: Pubkey, ) -> Instruction { Instruction { program_id, accounts: vec![ AccountMeta::new(source_liquidity_pubkey, false), AccountMeta::new(destination_liquidity_pubkey, false), AccountMeta::new(repay_reserve_pubkey, false), AccountMeta::new(obligation_pubkey, false), AccountMeta::new_readonly(lending_market_pubkey, false), AccountMeta::new_readonly(user_transfer_authority_pubkey, true), AccountMeta::new_readonly(sysvar::clock::id(), false), AccountMeta::new_readonly(spl_token::id(), false), ], data: LendingInstruction::RepayObligationLiquidity { liquidity_amount }.pack(), } } /// Creates a `LiquidateObligation` instruction #[allow(clippy::too_many_arguments)] pub fn liquidate_obligation( program_id: Pubkey, liquidity_amount: u64, source_liquidity_pubkey: Pubkey, destination_collateral_pubkey: Pubkey, repay_reserve_pubkey: Pubkey, repay_reserve_liquidity_supply_pubkey: Pubkey, withdraw_reserve_pubkey: Pubkey, withdraw_reserve_collateral_supply_pubkey: Pubkey, obligation_pubkey: Pubkey, lending_market_pubkey: Pubkey, user_transfer_authority_pubkey: Pubkey, ) -> Instruction { let (lending_market_authority_pubkey, _bump_seed) = Pubkey::find_program_address( &[&lending_market_pubkey.to_bytes()[..PUBKEY_BYTES]], &program_id, ); Instruction { program_id, accounts: vec![ AccountMeta::new(source_liquidity_pubkey, false), AccountMeta::new(destination_collateral_pubkey, false), AccountMeta::new(repay_reserve_pubkey, false), AccountMeta::new(repay_reserve_liquidity_supply_pubkey, false), AccountMeta::new_readonly(withdraw_reserve_pubkey, false), AccountMeta::new(withdraw_reserve_collateral_supply_pubkey, false), AccountMeta::new(obligation_pubkey, false), AccountMeta::new_readonly(lending_market_pubkey, false), AccountMeta::new_readonly(lending_market_authority_pubkey, false), AccountMeta::new_readonly(user_transfer_authority_pubkey, true), AccountMeta::new_readonly(sysvar::clock::id(), false), AccountMeta::new_readonly(spl_token::id(), false), ], data: LendingInstruction::LiquidateObligation { liquidity_amount }.pack(), } } /// Creates a `FlashLoan` instruction. #[allow(clippy::too_many_arguments)] pub fn flash_loan( program_id: Pubkey, amount: u64, source_liquidity_pubkey: Pubkey, destination_liquidity_pubkey: Pubkey, reserve_pubkey: Pubkey, reserve_liquidity_fee_receiver_pubkey: Pubkey, host_fee_receiver_pubkey: Pubkey, lending_market_pubkey: Pubkey, flash_loan_receiver_program_id: Pubkey, flash_loan_receiver_program_accounts: Vec<AccountMeta>, ) -> Instruction { let (lending_market_authority_pubkey, _bump_seed) = Pubkey::find_program_address( &[&lending_market_pubkey.to_bytes()[..PUBKEY_BYTES]], &program_id, ); let mut accounts = vec![ AccountMeta::new(source_liquidity_pubkey, false), AccountMeta::new(destination_liquidity_pubkey, false), AccountMeta::new(reserve_pubkey, false), AccountMeta::new(reserve_liquidity_fee_receiver_pubkey, false), AccountMeta::new(host_fee_receiver_pubkey, false), AccountMeta::new_readonly(lending_market_pubkey, false), AccountMeta::new_readonly(lending_market_authority_pubkey, false), AccountMeta::new_readonly(spl_token::id(), false), AccountMeta::new_readonly(flash_loan_receiver_program_id, false), ]; accounts.extend(flash_loan_receiver_program_accounts); Instruction { program_id, accounts, data: LendingInstruction::FlashLoan { amount }.pack(), } } /// Creates a 'CloseObligationAccount' instruction pub fn close_obligation_account( program_id: Pubkey, obligation_pubkey: Pubkey, obligation_owner_pubkey: Pubkey, destination_pubkey: Pubkey, reserve_pubkey: Pubkey, lending_market_pubkey: Pubkey, ) -> Instruction { let (lending_market_authority_pubkey, _bump_seed) = Pubkey::find_program_address( &[&lending_market_pubkey.to_bytes()[..PUBKEY_BYTES]], &program_id, ); Instruction { program_id, accounts: vec![ AccountMeta::new(obligation_pubkey, false), AccountMeta::new(obligation_owner_pubkey, false), AccountMeta::new(destination_pubkey, false), AccountMeta::new(reserve_pubkey, false), AccountMeta::new_readonly(lending_market_pubkey, false), AccountMeta::new_readonly(lending_market_authority_pubkey, false), AccountMeta::new(spl_token::id(), false), ], data: LendingInstruction::CloseObligationAccount.pack(), } }
40.377304
129
0.614908
8fe8cf7084247dc81b2d69f9267069321d07c1e5
3,146
use crate::clients::PathClient; use crate::request_options::*; use azure_core::prelude::*; use azure_core::{AppendToUrlQuery, Response as HttpResponse}; use azure_storage::core::headers::CommonStorageResponseHeaders; use std::convert::TryInto; /// A future of a delete file response type PutPath = futures::future::BoxFuture<'static, crate::Result<DeletePathResponse>>; #[derive(Debug, Clone)] pub struct DeletePathBuilder<C> where C: PathClient, { client: C, recursive: Option<Recursive>, continuation: Option<NextMarker>, if_match_condition: Option<IfMatchCondition>, if_modified_since: Option<IfModifiedSinceCondition>, client_request_id: Option<ClientRequestId>, timeout: Option<Timeout>, context: Context, } impl<C: PathClient + 'static> DeletePathBuilder<C> { pub(crate) fn new(client: C, recursive: Option<Recursive>, context: Context) -> Self { Self { client, recursive, continuation: None, if_match_condition: None, if_modified_since: None, client_request_id: None, timeout: None, context, } } setters! { recursive: Recursive => Some(recursive), continuation: NextMarker => Some(continuation), if_match_condition: IfMatchCondition => Some(if_match_condition), if_modified_since: IfModifiedSinceCondition => Some(if_modified_since), client_request_id: ClientRequestId => Some(client_request_id), timeout: Timeout => Some(timeout), context: Context => context, } pub fn into_future(self) -> PutPath { let this = self.clone(); let ctx = self.context.clone(); Box::pin(async move { let mut url = this.client.url()?; if let Some(continuation) = self.continuation { continuation.append_to_url_query_as_continuation(&mut url); }; self.recursive.append_to_url_query(&mut url); self.timeout.append_to_url_query(&mut url); let mut request = this .client .prepare_request(url.as_str(), http::Method::DELETE); request.insert_headers(&this.client_request_id); request.insert_headers(&this.if_match_condition); request.insert_headers(&this.if_modified_since); let response = self .client .pipeline() .send(&mut ctx.clone(), &mut request) .await?; DeletePathResponse::try_from(response).await }) } } #[derive(Debug, Clone)] pub struct DeletePathResponse { pub common_storage_response_headers: CommonStorageResponseHeaders, pub continuation: Option<NextMarker>, } impl DeletePathResponse { pub async fn try_from(response: HttpResponse) -> Result<Self, crate::Error> { let (_status_code, headers, _pinned_stream) = response.deconstruct(); Ok(Self { common_storage_response_headers: (&headers).try_into()?, continuation: NextMarker::from_header_optional(&headers)?, }) } }
32.102041
90
0.636999
4bf29aa3d9634ae26fe44370777deb0fee86825d
694
/* --- BEGIN Variable Definitions --- Owner s; Owner mut x; Owner y; Owner some_string; Function String::from(); Function takes_ownership(); Function println!() --- END Variable Definitions --- */ fn main() { let s = String::from("hello"); // !{ Move(String::from()->s) } takes_ownership(s); // !{ Move(s->takes_ownership()) } let mut x = 5; // !{ Bind(x) } let y = x; // !{ Copy(x->y) } x = 6; // !{ Bind(x) } } // !{ GoOutOfScope(s), GoOutOfScope(x), GoOutOfScope(y) } fn takes_ownership(some_string: String) { // !{ InitOwnerParam(some_string) } println!("{}", some_string); // !{ PassByStaticReference(some_string->println!()) } } // !{ GoOutOfScope(some_string) }
40.823529
87
0.610951
0335d6fa6ca476ce3669538d0e0f4d0a04ec46d3
20,988
// This file was generated by gir (https://github.com/gtk-rs/gir) // from gir-files (https://github.com/gtk-rs/gir-files.git) // DO NOT EDIT use crate::Accessible; use crate::AccessibleRole; use crate::Align; use crate::Buildable; use crate::ConstraintTarget; use crate::Editable; use crate::LayoutManager; use crate::Overflow; use crate::Widget; use glib::object::Cast; use glib::object::IsA; use glib::object::ObjectExt; use glib::object::ObjectType as ObjectType_; use glib::signal::connect_raw; use glib::signal::SignalHandlerId; use glib::translate::*; use glib::StaticType; use glib::ToValue; use std::boxed::Box as Box_; use std::fmt; use std::mem::transmute; glib::wrapper! { pub struct PasswordEntry(Object<ffi::GtkPasswordEntry, ffi::GtkPasswordEntryClass>) @extends Widget, @implements Accessible, Buildable, ConstraintTarget, Editable; match fn { get_type => || ffi::gtk_password_entry_get_type(), } } impl PasswordEntry { #[doc(alias = "gtk_password_entry_new")] pub fn new() -> PasswordEntry { assert_initialized_main_thread!(); unsafe { Widget::from_glib_none(ffi::gtk_password_entry_new()).unsafe_cast() } } #[doc(alias = "gtk_password_entry_get_extra_menu")] pub fn extra_menu(&self) -> Option<gio::MenuModel> { unsafe { from_glib_none(ffi::gtk_password_entry_get_extra_menu( self.to_glib_none().0, )) } } #[doc(alias = "gtk_password_entry_get_show_peek_icon")] pub fn shows_peek_icon(&self) -> bool { unsafe { from_glib(ffi::gtk_password_entry_get_show_peek_icon( self.to_glib_none().0, )) } } #[doc(alias = "gtk_password_entry_set_extra_menu")] pub fn set_extra_menu<P: IsA<gio::MenuModel>>(&self, model: Option<&P>) { unsafe { ffi::gtk_password_entry_set_extra_menu( self.to_glib_none().0, model.map(|p| p.as_ref()).to_glib_none().0, ); } } #[doc(alias = "gtk_password_entry_set_show_peek_icon")] pub fn set_show_peek_icon(&self, show_peek_icon: bool) { unsafe { ffi::gtk_password_entry_set_show_peek_icon( self.to_glib_none().0, show_peek_icon.to_glib(), ); } } #[doc(alias = "get_property_activates_default")] pub fn activates_default(&self) -> bool { unsafe { let mut value = glib::Value::from_type(<bool as StaticType>::static_type()); glib::gobject_ffi::g_object_get_property( self.as_ptr() as *mut glib::gobject_ffi::GObject, b"activates-default\0".as_ptr() as *const _, value.to_glib_none_mut().0, ); value .get() .expect("Return Value for property `activates-default` getter") .unwrap() } } #[doc(alias = "set_property_activates_default")] pub fn set_activates_default(&self, activates_default: bool) { unsafe { glib::gobject_ffi::g_object_set_property( self.as_ptr() as *mut glib::gobject_ffi::GObject, b"activates-default\0".as_ptr() as *const _, glib::Value::from(&activates_default).to_glib_none().0, ); } } #[doc(alias = "get_property_placeholder_text")] pub fn placeholder_text(&self) -> Option<glib::GString> { unsafe { let mut value = glib::Value::from_type(<glib::GString as StaticType>::static_type()); glib::gobject_ffi::g_object_get_property( self.as_ptr() as *mut glib::gobject_ffi::GObject, b"placeholder-text\0".as_ptr() as *const _, value.to_glib_none_mut().0, ); value .get() .expect("Return Value for property `placeholder-text` getter") } } #[doc(alias = "set_property_placeholder_text")] pub fn set_placeholder_text(&self, placeholder_text: Option<&str>) { unsafe { glib::gobject_ffi::g_object_set_property( self.as_ptr() as *mut glib::gobject_ffi::GObject, b"placeholder-text\0".as_ptr() as *const _, glib::Value::from(placeholder_text).to_glib_none().0, ); } } pub fn connect_activate<F: Fn(&PasswordEntry) + 'static>(&self, f: F) -> SignalHandlerId { unsafe extern "C" fn activate_trampoline<F: Fn(&PasswordEntry) + 'static>( this: *mut ffi::GtkPasswordEntry, f: glib::ffi::gpointer, ) { let f: &F = &*(f as *const F); f(&from_glib_borrow(this)) } unsafe { let f: Box_<F> = Box_::new(f); connect_raw( self.as_ptr() as *mut _, b"activate\0".as_ptr() as *const _, Some(transmute::<_, unsafe extern "C" fn()>( activate_trampoline::<F> as *const (), )), Box_::into_raw(f), ) } } pub fn emit_activate(&self) { let _ = unsafe { glib::Object::from_glib_borrow(self.as_ptr() as *mut glib::gobject_ffi::GObject) .emit_by_name("activate", &[]) .unwrap() }; } pub fn connect_property_activates_default_notify<F: Fn(&PasswordEntry) + 'static>( &self, f: F, ) -> SignalHandlerId { unsafe extern "C" fn notify_activates_default_trampoline< F: Fn(&PasswordEntry) + 'static, >( this: *mut ffi::GtkPasswordEntry, _param_spec: glib::ffi::gpointer, f: glib::ffi::gpointer, ) { let f: &F = &*(f as *const F); f(&from_glib_borrow(this)) } unsafe { let f: Box_<F> = Box_::new(f); connect_raw( self.as_ptr() as *mut _, b"notify::activates-default\0".as_ptr() as *const _, Some(transmute::<_, unsafe extern "C" fn()>( notify_activates_default_trampoline::<F> as *const (), )), Box_::into_raw(f), ) } } pub fn connect_property_extra_menu_notify<F: Fn(&PasswordEntry) + 'static>( &self, f: F, ) -> SignalHandlerId { unsafe extern "C" fn notify_extra_menu_trampoline<F: Fn(&PasswordEntry) + 'static>( this: *mut ffi::GtkPasswordEntry, _param_spec: glib::ffi::gpointer, f: glib::ffi::gpointer, ) { let f: &F = &*(f as *const F); f(&from_glib_borrow(this)) } unsafe { let f: Box_<F> = Box_::new(f); connect_raw( self.as_ptr() as *mut _, b"notify::extra-menu\0".as_ptr() as *const _, Some(transmute::<_, unsafe extern "C" fn()>( notify_extra_menu_trampoline::<F> as *const (), )), Box_::into_raw(f), ) } } pub fn connect_property_placeholder_text_notify<F: Fn(&PasswordEntry) + 'static>( &self, f: F, ) -> SignalHandlerId { unsafe extern "C" fn notify_placeholder_text_trampoline<F: Fn(&PasswordEntry) + 'static>( this: *mut ffi::GtkPasswordEntry, _param_spec: glib::ffi::gpointer, f: glib::ffi::gpointer, ) { let f: &F = &*(f as *const F); f(&from_glib_borrow(this)) } unsafe { let f: Box_<F> = Box_::new(f); connect_raw( self.as_ptr() as *mut _, b"notify::placeholder-text\0".as_ptr() as *const _, Some(transmute::<_, unsafe extern "C" fn()>( notify_placeholder_text_trampoline::<F> as *const (), )), Box_::into_raw(f), ) } } pub fn connect_property_show_peek_icon_notify<F: Fn(&PasswordEntry) + 'static>( &self, f: F, ) -> SignalHandlerId { unsafe extern "C" fn notify_show_peek_icon_trampoline<F: Fn(&PasswordEntry) + 'static>( this: *mut ffi::GtkPasswordEntry, _param_spec: glib::ffi::gpointer, f: glib::ffi::gpointer, ) { let f: &F = &*(f as *const F); f(&from_glib_borrow(this)) } unsafe { let f: Box_<F> = Box_::new(f); connect_raw( self.as_ptr() as *mut _, b"notify::show-peek-icon\0".as_ptr() as *const _, Some(transmute::<_, unsafe extern "C" fn()>( notify_show_peek_icon_trampoline::<F> as *const (), )), Box_::into_raw(f), ) } } } impl Default for PasswordEntry { fn default() -> Self { Self::new() } } #[derive(Clone, Default)] pub struct PasswordEntryBuilder { activates_default: Option<bool>, extra_menu: Option<gio::MenuModel>, placeholder_text: Option<String>, show_peek_icon: Option<bool>, can_focus: Option<bool>, can_target: Option<bool>, css_classes: Option<Vec<String>>, css_name: Option<String>, cursor: Option<gdk::Cursor>, focus_on_click: Option<bool>, focusable: Option<bool>, halign: Option<Align>, has_tooltip: Option<bool>, height_request: Option<i32>, hexpand: Option<bool>, hexpand_set: Option<bool>, layout_manager: Option<LayoutManager>, margin_bottom: Option<i32>, margin_end: Option<i32>, margin_start: Option<i32>, margin_top: Option<i32>, name: Option<String>, opacity: Option<f64>, overflow: Option<Overflow>, receives_default: Option<bool>, sensitive: Option<bool>, tooltip_markup: Option<String>, tooltip_text: Option<String>, valign: Option<Align>, vexpand: Option<bool>, vexpand_set: Option<bool>, visible: Option<bool>, width_request: Option<i32>, accessible_role: Option<AccessibleRole>, editable: Option<bool>, enable_undo: Option<bool>, max_width_chars: Option<i32>, text: Option<String>, width_chars: Option<i32>, xalign: Option<f32>, } impl PasswordEntryBuilder { pub fn new() -> Self { Self::default() } pub fn build(self) -> PasswordEntry { let mut properties: Vec<(&str, &dyn ToValue)> = vec![]; if let Some(ref activates_default) = self.activates_default { properties.push(("activates-default", activates_default)); } if let Some(ref extra_menu) = self.extra_menu { properties.push(("extra-menu", extra_menu)); } if let Some(ref placeholder_text) = self.placeholder_text { properties.push(("placeholder-text", placeholder_text)); } if let Some(ref show_peek_icon) = self.show_peek_icon { properties.push(("show-peek-icon", show_peek_icon)); } if let Some(ref can_focus) = self.can_focus { properties.push(("can-focus", can_focus)); } if let Some(ref can_target) = self.can_target { properties.push(("can-target", can_target)); } if let Some(ref css_classes) = self.css_classes { properties.push(("css-classes", css_classes)); } if let Some(ref css_name) = self.css_name { properties.push(("css-name", css_name)); } if let Some(ref cursor) = self.cursor { properties.push(("cursor", cursor)); } if let Some(ref focus_on_click) = self.focus_on_click { properties.push(("focus-on-click", focus_on_click)); } if let Some(ref focusable) = self.focusable { properties.push(("focusable", focusable)); } if let Some(ref halign) = self.halign { properties.push(("halign", halign)); } if let Some(ref has_tooltip) = self.has_tooltip { properties.push(("has-tooltip", has_tooltip)); } if let Some(ref height_request) = self.height_request { properties.push(("height-request", height_request)); } if let Some(ref hexpand) = self.hexpand { properties.push(("hexpand", hexpand)); } if let Some(ref hexpand_set) = self.hexpand_set { properties.push(("hexpand-set", hexpand_set)); } if let Some(ref layout_manager) = self.layout_manager { properties.push(("layout-manager", layout_manager)); } if let Some(ref margin_bottom) = self.margin_bottom { properties.push(("margin-bottom", margin_bottom)); } if let Some(ref margin_end) = self.margin_end { properties.push(("margin-end", margin_end)); } if let Some(ref margin_start) = self.margin_start { properties.push(("margin-start", margin_start)); } if let Some(ref margin_top) = self.margin_top { properties.push(("margin-top", margin_top)); } if let Some(ref name) = self.name { properties.push(("name", name)); } if let Some(ref opacity) = self.opacity { properties.push(("opacity", opacity)); } if let Some(ref overflow) = self.overflow { properties.push(("overflow", overflow)); } if let Some(ref receives_default) = self.receives_default { properties.push(("receives-default", receives_default)); } if let Some(ref sensitive) = self.sensitive { properties.push(("sensitive", sensitive)); } if let Some(ref tooltip_markup) = self.tooltip_markup { properties.push(("tooltip-markup", tooltip_markup)); } if let Some(ref tooltip_text) = self.tooltip_text { properties.push(("tooltip-text", tooltip_text)); } if let Some(ref valign) = self.valign { properties.push(("valign", valign)); } if let Some(ref vexpand) = self.vexpand { properties.push(("vexpand", vexpand)); } if let Some(ref vexpand_set) = self.vexpand_set { properties.push(("vexpand-set", vexpand_set)); } if let Some(ref visible) = self.visible { properties.push(("visible", visible)); } if let Some(ref width_request) = self.width_request { properties.push(("width-request", width_request)); } if let Some(ref accessible_role) = self.accessible_role { properties.push(("accessible-role", accessible_role)); } if let Some(ref editable) = self.editable { properties.push(("editable", editable)); } if let Some(ref enable_undo) = self.enable_undo { properties.push(("enable-undo", enable_undo)); } if let Some(ref max_width_chars) = self.max_width_chars { properties.push(("max-width-chars", max_width_chars)); } if let Some(ref text) = self.text { properties.push(("text", text)); } if let Some(ref width_chars) = self.width_chars { properties.push(("width-chars", width_chars)); } if let Some(ref xalign) = self.xalign { properties.push(("xalign", xalign)); } let ret = glib::Object::new::<PasswordEntry>(&properties).expect("object new"); ret } pub fn activates_default(mut self, activates_default: bool) -> Self { self.activates_default = Some(activates_default); self } pub fn extra_menu<P: IsA<gio::MenuModel>>(mut self, extra_menu: &P) -> Self { self.extra_menu = Some(extra_menu.clone().upcast()); self } pub fn placeholder_text(mut self, placeholder_text: &str) -> Self { self.placeholder_text = Some(placeholder_text.to_string()); self } pub fn show_peek_icon(mut self, show_peek_icon: bool) -> Self { self.show_peek_icon = Some(show_peek_icon); self } pub fn can_focus(mut self, can_focus: bool) -> Self { self.can_focus = Some(can_focus); self } pub fn can_target(mut self, can_target: bool) -> Self { self.can_target = Some(can_target); self } pub fn css_classes(mut self, css_classes: Vec<String>) -> Self { self.css_classes = Some(css_classes); self } pub fn css_name(mut self, css_name: &str) -> Self { self.css_name = Some(css_name.to_string()); self } pub fn cursor(mut self, cursor: &gdk::Cursor) -> Self { self.cursor = Some(cursor.clone()); self } pub fn focus_on_click(mut self, focus_on_click: bool) -> Self { self.focus_on_click = Some(focus_on_click); self } pub fn focusable(mut self, focusable: bool) -> Self { self.focusable = Some(focusable); self } pub fn halign(mut self, halign: Align) -> Self { self.halign = Some(halign); self } pub fn has_tooltip(mut self, has_tooltip: bool) -> Self { self.has_tooltip = Some(has_tooltip); self } pub fn height_request(mut self, height_request: i32) -> Self { self.height_request = Some(height_request); self } pub fn hexpand(mut self, hexpand: bool) -> Self { self.hexpand = Some(hexpand); self } pub fn hexpand_set(mut self, hexpand_set: bool) -> Self { self.hexpand_set = Some(hexpand_set); self } pub fn layout_manager<P: IsA<LayoutManager>>(mut self, layout_manager: &P) -> Self { self.layout_manager = Some(layout_manager.clone().upcast()); self } pub fn margin_bottom(mut self, margin_bottom: i32) -> Self { self.margin_bottom = Some(margin_bottom); self } pub fn margin_end(mut self, margin_end: i32) -> Self { self.margin_end = Some(margin_end); self } pub fn margin_start(mut self, margin_start: i32) -> Self { self.margin_start = Some(margin_start); self } pub fn margin_top(mut self, margin_top: i32) -> Self { self.margin_top = Some(margin_top); self } pub fn name(mut self, name: &str) -> Self { self.name = Some(name.to_string()); self } pub fn opacity(mut self, opacity: f64) -> Self { self.opacity = Some(opacity); self } pub fn overflow(mut self, overflow: Overflow) -> Self { self.overflow = Some(overflow); self } pub fn receives_default(mut self, receives_default: bool) -> Self { self.receives_default = Some(receives_default); self } pub fn sensitive(mut self, sensitive: bool) -> Self { self.sensitive = Some(sensitive); self } pub fn tooltip_markup(mut self, tooltip_markup: &str) -> Self { self.tooltip_markup = Some(tooltip_markup.to_string()); self } pub fn tooltip_text(mut self, tooltip_text: &str) -> Self { self.tooltip_text = Some(tooltip_text.to_string()); self } pub fn valign(mut self, valign: Align) -> Self { self.valign = Some(valign); self } pub fn vexpand(mut self, vexpand: bool) -> Self { self.vexpand = Some(vexpand); self } pub fn vexpand_set(mut self, vexpand_set: bool) -> Self { self.vexpand_set = Some(vexpand_set); self } pub fn visible(mut self, visible: bool) -> Self { self.visible = Some(visible); self } pub fn width_request(mut self, width_request: i32) -> Self { self.width_request = Some(width_request); self } pub fn accessible_role(mut self, accessible_role: AccessibleRole) -> Self { self.accessible_role = Some(accessible_role); self } pub fn editable(mut self, editable: bool) -> Self { self.editable = Some(editable); self } pub fn enable_undo(mut self, enable_undo: bool) -> Self { self.enable_undo = Some(enable_undo); self } pub fn max_width_chars(mut self, max_width_chars: i32) -> Self { self.max_width_chars = Some(max_width_chars); self } pub fn text(mut self, text: &str) -> Self { self.text = Some(text.to_string()); self } pub fn width_chars(mut self, width_chars: i32) -> Self { self.width_chars = Some(width_chars); self } pub fn xalign(mut self, xalign: f32) -> Self { self.xalign = Some(xalign); self } } impl fmt::Display for PasswordEntry { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.write_str("PasswordEntry") } }
32.190184
167
0.567419
e998a0845d45715e6a1b7e5c9e24a8d908ab41df
2,843
#[macro_use] extern crate nom; use nom::{IResult,digit}; // Parser definition use std::str; use std::str::FromStr; use self::Operator::*; enum Operator { Slash, Star, } impl Operator { fn to_str(&self) -> &'static str { match *self { Slash => "/", Star => "*", } } } // Parse the specified `Operator`. named_args!(operator(op: Operator) <&[u8]>, tag!(op.to_str()) ); // We parse any expr surrounded by the tags `open_tag` and `close_tag`, ignoring all whitespaces around those named_args!(brackets<'a>(open_tag: &str, close_tag: &str) <i64>, ws!(delimited!( tag!(open_tag), expr, tag!(close_tag) )) ); // We transform an integer string into a i64, ignoring surrounding whitespaces // We look for a digit suite, and try to convert it. // If either str::from_utf8 or FromStr::from_str fail, // we fallback to the brackets parser defined above named!(factor<i64>, alt!( map_res!( map_res!( ws!(digit), str::from_utf8 ), FromStr::from_str ) | call!(brackets, "(", ")") ) ); // We read an initial factor and for each time we find // a * or / operator followed by another factor, we do // the math by folding everything named!(term <i64>, do_parse!( init: factor >> res: fold_many0!( pair!(alt!(call!(operator, Star) | call!(operator, Slash)), factor), init, |acc, (op, val): (&[u8], i64)| { if (op[0] as char) == '*' { acc * val } else { acc / val } } ) >> (res) ) ); named!(expr <i64>, do_parse!( init: term >> res: fold_many0!( pair!(alt!(tag!("+") | tag!("-")), term), init, |acc, (op, val): (&[u8], i64)| { if (op[0] as char) == '+' { acc + val } else { acc - val } } ) >> (res) ) ); #[test] fn factor_test() { assert_eq!(factor(&b"3"[..]), IResult::Done(&b""[..], 3)); assert_eq!(factor(&b" 12"[..]), IResult::Done(&b""[..], 12)); assert_eq!(factor(&b"537 "[..]), IResult::Done(&b""[..], 537)); assert_eq!(factor(&b" 24 "[..]), IResult::Done(&b""[..], 24)); } #[test] fn term_test() { assert_eq!(term(&b" 12 *2 / 3"[..]), IResult::Done(&b""[..], 8)); assert_eq!(term(&b" 2* 3 *2 *2 / 3"[..]), IResult::Done(&b""[..], 8)); assert_eq!(term(&b" 48 / 3/2"[..]), IResult::Done(&b""[..], 8)); } #[test] fn expr_test() { assert_eq!(expr(&b" 1 + 2 "[..]), IResult::Done(&b""[..], 3)); assert_eq!(expr(&b" 12 + 6 - 4+ 3"[..]), IResult::Done(&b""[..], 17)); assert_eq!(expr(&b" 1 + 2*3 + 4"[..]), IResult::Done(&b""[..], 11)); } #[test] fn parens_test() { assert_eq!(expr(&b" ( 2 )"[..]), IResult::Done(&b""[..], 2)); assert_eq!(expr(&b" 2* ( 3 + 4 ) "[..]), IResult::Done(&b""[..], 14)); assert_eq!(expr(&b" 2*2 / ( 5 - 1) + 3"[..]), IResult::Done(&b""[..], 4)); }
26.082569
124
0.520929
0a0943ff2d11f1c6a96a1adda6ddcacdc1d25618
1,108
// structs1.rs // Address all the TODOs to make the tests pass! struct ColorClassicStruct { // TODO: Something goes here name : String, hex : String } struct ColorTupleStruct(String, String); #[derive(Debug)] struct UnitStruct; #[cfg(test)] mod tests { use super::*; #[test] fn classic_c_structs() { // TODO: Instantiate a classic c struct! let green = ColorClassicStruct {name : String::from("green"), hex : String::from("#00FF00")}; assert_eq!(green.name, "green"); assert_eq!(green.hex, "#00FF00"); } #[test] fn tuple_structs() { // TODO: Instantiate a tuple struct! let green = ColorTupleStruct (String::from("green"), String::from("#00FF00")); assert_eq!(green.0, "green"); assert_eq!(green.1, "#00FF00"); } #[test] fn unit_structs() { // TODO: Instantiate a unit struct! let unit_struct = UnitStruct; let message = format!("{:?}s are fun!", unit_struct); assert_eq!(message, "UnitStructs are fun!"); } }
23.574468
86
0.572202
87517a13614e09c546c46c44ae8a4fa0912c84a0
10,279
// Copyright 2019-2020 Twitter, Inc. // Licensed under the Apache License, Version 2.0 // http://www.apache.org/licenses/LICENSE-2.0 use std::sync::{Arc, Mutex}; use std::time::Instant; use async_trait::async_trait; use dashmap::DashMap; #[cfg(feature = "perf")] use perfcnt::*; use rustcommon_metrics::*; use tokio::fs::File; use tokio::io::{AsyncBufReadExt, BufReader}; use crate::common::bpf::*; use crate::config::SamplerConfig; use crate::samplers::Common; use crate::Sampler; mod config; mod stat; pub use config::*; pub use stat::*; #[cfg(not(feature = "perf"))] struct PerfCounter {} #[allow(dead_code)] pub struct Scheduler { bpf: Option<Arc<Mutex<BPF>>>, bpf_last: Arc<Mutex<Instant>>, common: Common, perf_counters: DashMap<SchedulerStatistic, Vec<PerfCounter>>, } #[async_trait] impl Sampler for Scheduler { type Statistic = SchedulerStatistic; fn new(common: Common) -> Result<Self, failure::Error> { let fault_tolerant = common.config.general().fault_tolerant(); let perf_counters = DashMap::new(); if common.config.samplers().scheduler().enabled() && common.config.samplers().scheduler().perf_events() { #[cfg(feature = "perf")] { if let Ok(cores) = crate::common::hardware_threads() { for statistic in common.config.samplers().scheduler().statistics().iter() { if let Some(mut builder) = statistic.perf_counter_builder() { let mut event_counters = Vec::new(); for core in 0..cores { match builder.on_cpu(core as isize).for_all_pids().finish() { Ok(c) => event_counters.push(c), Err(e) => { debug!( "Failed to create PerfCounter for {:?}: {}", statistic, e ); } } } if event_counters.len() as u64 == cores { trace!("Initialized PerfCounters for {:?}", statistic); perf_counters.insert(*statistic, event_counters); } } } } else if !fault_tolerant { fatal!("failed to detect number of hardware threads"); } else { error!("failed to detect number of hardware threads. skipping scheduler perf telemetry"); } } } #[allow(unused_mut)] let mut sampler = Self { bpf: None, bpf_last: Arc::new(Mutex::new(Instant::now())), common, perf_counters, }; if let Err(e) = sampler.initialize_bpf() { if !fault_tolerant { return Err(e); } } Ok(sampler) } fn spawn(common: Common) { if let Ok(mut sampler) = Self::new(common.clone()) { common.handle.spawn(async move { loop { let _ = sampler.sample().await; } }); } else if !common.config.fault_tolerant() { fatal!("failed to initialize scheduler sampler"); } else { error!("failed to initialize scheduler sampler"); } } fn common(&self) -> &Common { &self.common } fn common_mut(&mut self) -> &mut Common { &mut self.common } fn sampler_config(&self) -> &dyn SamplerConfig<Statistic = Self::Statistic> { self.common.config().samplers().scheduler() } async fn sample(&mut self) -> Result<(), std::io::Error> { if let Some(ref mut delay) = self.delay() { delay.tick().await; } if !self.sampler_config().enabled() { return Ok(()); } debug!("sampling"); self.register(); self.map_result(self.sample_proc_stat().await)?; #[cfg(feature = "bpf")] self.map_result(self.sample_bpf())?; #[cfg(feature = "perf")] { let result = self.sample_perf_counters().await; self.map_result(result)?; } Ok(()) } fn summary(&self, statistic: &Self::Statistic) -> Option<Summary> { let precision = if statistic.bpf_table().is_some() { 2 } else { 3 }; Some(Summary::histogram( statistic.max(), precision, Some(self.general_config().window()), )) } } impl Scheduler { async fn sample_proc_stat(&self) -> Result<(), std::io::Error> { let file = File::open("/proc/stat").await?; let reader = BufReader::new(file); let mut lines = reader.lines(); let time = time::precise_time_ns(); while let Some(line) = lines.next_line().await? { let parts: Vec<&str> = line.split_whitespace().collect(); match parts.get(0) { Some(&"ctxt") => { self.metrics().record_counter( &SchedulerStatistic::ContextSwitches, time, parts.get(1).map(|v| v.parse().unwrap_or(0)).unwrap_or(0), ); } Some(&"processes") => { self.metrics().record_counter( &SchedulerStatistic::ProcessesCreated, time, parts.get(1).map(|v| v.parse().unwrap_or(0)).unwrap_or(0), ); } Some(&"procs_running") => { self.metrics().record_gauge( &SchedulerStatistic::ProcessesRunning, time, parts.get(1).map(|v| v.parse().unwrap_or(0)).unwrap_or(0), ); } Some(&"procs_blocked") => { self.metrics().record_gauge( &SchedulerStatistic::ProcessesBlocked, time, parts.get(1).map(|v| v.parse().unwrap_or(0)).unwrap_or(0), ); } Some(_) | None => {} } } Ok(()) } #[cfg(feature = "bpf")] fn sample_bpf(&self) -> Result<(), std::io::Error> { use crate::common::MICROSECOND; // sample bpf { if self.bpf_last.lock().unwrap().elapsed() >= self.general_config().window() { if let Some(ref bpf) = self.bpf { let bpf = bpf.lock().unwrap(); let time = time::precise_time_ns(); for statistic in self.sampler_config().statistics() { if let Some(table) = statistic.bpf_table() { let mut table = (*bpf).inner.table(table); for (&value, &count) in &map_from_table(&mut table) { if count > 0 { self.metrics().record_distribution( statistic, time, value * MICROSECOND, count, ); } } } } } *self.bpf_last.lock().unwrap() = Instant::now(); } } Ok(()) } #[cfg(feature = "perf")] async fn sample_perf_counters(&mut self) -> Result<(), std::io::Error> { let time = time::precise_time_ns(); for stat in self.sampler_config().statistics() { if let Some(mut counters) = self.perf_counters.get_mut(stat) { let mut value = 0; for counter in counters.iter_mut() { let count = match counter.read() { Ok(c) => c, Err(e) => { debug!("Could not read perf counter for event {:?}: {}", stat, e); 0 } }; value += count; } if value > 0 { debug!("recording value for: {:?}", stat); } self.metrics().record_counter(stat, time, value); } } Ok(()) } // checks that bpf is enabled in config and one or more bpf stats enabled #[cfg(feature = "bpf")] fn bpf_enabled(&self) -> bool { if self.sampler_config().bpf() { for statistic in self.sampler_config().statistics() { if statistic.bpf_table().is_some() { return true; } } } false } fn initialize_bpf(&mut self) -> Result<(), failure::Error> { #[cfg(feature = "bpf")] { if self.enabled() && self.bpf_enabled() { debug!("initializing bpf"); // load the code and compile let code = include_str!("bpf.c"); let mut bpf = bcc::BPF::new(code)?; // load + attach kprobes! bcc::Kprobe::new() .handler("trace_run") .function("finish_task_switch") .attach(&mut bpf)?; bcc::Kprobe::new() .handler("trace_ttwu_do_wakeup") .function("ttwu_do_wakeup") .attach(&mut bpf)?; bcc::Kprobe::new() .handler("trace_wake_up_new_task") .function("wake_up_new_task") .attach(&mut bpf)?; self.bpf = Some(Arc::new(Mutex::new(BPF { inner: bpf }))); } } Ok(()) } }
33.265372
109
0.442066
de00a9427b3e05c7d82a4b0658b3d88aca752a67
564
// if1.rs pub fn bigger(a: i32, b: i32) -> i32 { // Complete this function to return the bigger number! // Do not use: // - another function call // - additional variables // Execute `rustlings hint if1` for hints if a> b { a }else { b } } // Don't mind this for now :) #[cfg(test)] mod tests { use super::*; #[test] fn ten_is_bigger_than_eight() { assert_eq!(10, bigger(10, 8)); } #[test] fn fortytwo_is_bigger_than_thirtytwo() { assert_eq!(42, bigger(32, 42)); } }
17.625
58
0.546099
c1122d64b2d53dcedade44319c3efb139b8ed07d
2,071
use dson::Dson; use thir::{Expr, Literal, TypedHir}; use thiserror::Error; #[derive(Debug, Clone, PartialEq, Error)] pub enum HirToJsonError { #[error("{0} not allowed")] NotAllowed(String), } pub fn thir_to_dson(thir: &TypedHir) -> Result<Dson, HirToJsonError> { let dson = match &thir.expr { Expr::Literal(Literal::Int(value)) => Dson::Literal(dson::Literal::Int(*value)), Expr::Literal(Literal::Rational(a, b)) => Dson::Literal(dson::Literal::Rational(*a, *b)), Expr::Literal(Literal::Float(value)) => Dson::Literal(dson::Literal::Float(*value)), Expr::Literal(Literal::String(value)) => { Dson::Literal(dson::Literal::String(value.clone())) } Expr::Product(values) => Dson::Product( values .iter() .map(thir_to_dson) .collect::<Result<Vec<_>, _>>()?, ), Expr::Vector(values) => Dson::Array( values .iter() .map(thir_to_dson) .collect::<Result<Vec<_>, _>>()?, ), Expr::Set(values) => Dson::Set( values .iter() .map(thir_to_dson) .collect::<Result<Vec<_>, _>>()?, ), Expr::Let { .. } => return Err(HirToJsonError::NotAllowed("let".into())), Expr::Perform { .. } => return Err(HirToJsonError::NotAllowed("perform".into())), Expr::Handle { .. } => return Err(HirToJsonError::NotAllowed("handle".into())), Expr::Apply { .. } => return Err(HirToJsonError::NotAllowed("apply".into())), Expr::Match { .. } => return Err(HirToJsonError::NotAllowed("match".into())), Expr::Function { .. } => return Err(HirToJsonError::NotAllowed("function".into())), Expr::Op { .. } => return Err(HirToJsonError::NotAllowed("op".into())), Expr::Label { label, item: expr } => Dson::Labeled { label: label.clone(), expr: Box::new(thir_to_dson(expr)?), }, }; Ok(dson) } #[cfg(test)] mod tests {}
38.351852
97
0.53549
bbb68f46b2a821910e81fb3306a6e7691b644c5d
1,591
#[cfg(target_arch = "x86_64")] use core::arch::x86_64::{ // info can be found at https://software.intel.com/sites/landingpage/IntrinsicsGuide __m256i, // << _mm256_slli_epi64, // >> _mm256_srli_epi64, // ^ _mm256_xor_si256, // mem -> reg _mm256_loadu_si256, // reg -> mem _mm256_storeu_si256 }; #[cfg(target_arch = "x86_64")] #[inline(always)] /// Generate 4 random u64 by running 4 parallel xorshifts using avx. /// This version uses rust's intrinsics instead of directly asm, /// and we observe that's several time slower. /// [This might be the reason why](https://software.intel.com/content/www/us/en/develop/articles/avoiding-avx-sse-transition-penalties.html) /// /// Example: /// /// ``` /// let mut seed: [u64; 4] = [ /// 0xBAD5EEDdeadbeef, /// 0xBAD5EEDdeadbeef, /// 0xBAD5EEDdeadbeef, /// 0xBAD5EEDdeadbeef, /// ]; /// let values = xorshift_avx_intrinsics(& mut seed); /// println!("{:?}", values); /// ``` pub fn xorshift_avx_intrinsics(seed: & mut [u64; 4]) -> [u64; 4] { let mut result: [u64; 4] = [0; 4]; unsafe{ let mut temp: __m256i; let mut seed: __m256i = _mm256_loadu_si256(seed.as_mut_ptr() as *mut __m256i); temp = _mm256_slli_epi64(seed, 13); seed = _mm256_xor_si256(seed, temp); temp = _mm256_srli_epi64(seed, 7); seed = _mm256_xor_si256(seed, temp); temp = _mm256_slli_epi64(seed, 17); seed = _mm256_xor_si256(seed, temp); _mm256_storeu_si256(result.as_mut_ptr() as *mut __m256i, seed); result } }
30.596154
140
0.62665
75294dbe55d499edb4c8f890cbe28fe82ef52df1
15,083
//! This crate is for solving instances of the [minimum cost maximum flow problem](https://en.wikipedia.org/wiki/Minimum-cost_flow_problem). //! It uses the network simplex algorithm from the [LEMON](http://lemon.cs.elte.hu/trac/lemon) graph optimization library. //! //! # Example //! ``` //! use mcmf::{GraphBuilder, Vertex, Cost, Capacity}; //! let (cost, paths) = GraphBuilder::new() //! .add_edge(Vertex::Source, "Vancouver", Capacity(2), Cost(0)) //! .add_edge("Vancouver", "Toronto", Capacity(2), Cost(100)) //! .add_edge("Toronto", "Halifax", Capacity(1), Cost(150)) //! .add_edge("Vancouver", "Halifax", Capacity(5), Cost(400)) //! .add_edge("Halifax", Vertex::Sink, Capacity(2), Cost(0)) //! .mcmf(); //! assert_eq!(cost, 650); //! assert_eq!(cost, paths.iter().map(|path| path.cost()).sum()); //! assert_eq!(paths.len(), 2); //! assert!( //! paths[0].vertices() == vec![ //! &Vertex::Source, //! &Vertex::Node("Vancouver"), //! &Vertex::Node("Halifax"), //! &Vertex::Sink]); //! assert!( //! paths[1].vertices() == vec![ //! &Vertex::Source, //! &Vertex::Node("Vancouver"), //! &Vertex::Node("Toronto"), //! &Vertex::Node("Halifax"), //! &Vertex::Sink]); //! ``` use std::collections::BTreeMap; use std::iter; use std::cmp::min; #[link(name="flow")] extern { fn network_simplex_mcmf_i64(num_vertices: i64, num_edges: i64, node_supply: *const i64, edge_a: *const i64, edge_b: *const i64, edge_capacity: *const i64, edge_cost: *const i64, edge_flow_result: *mut i64) -> i64; } #[derive(Clone, Copy, Eq, PartialEq)] struct Node(usize); struct Edge { pub a: Node, pub b: Node, pub data: EdgeData, } struct Graph { nodes: Vec<NodeData>, edges: Vec<Edge>, } impl Graph { pub fn add_edge(&mut self, a: Node, b: Node, data: EdgeData) -> &mut Self { assert!(a.0 < self.nodes.len()); assert!(b.0 < self.nodes.len()); self.edges.push(Edge {a, b, data}); self } pub fn extract(self) -> (Vec<NodeData>, Vec<Edge>) { (self.nodes, self.edges) } } impl Graph { pub fn new_default(num_vertices: usize) -> Self { let nodes = vec![Default::default(); num_vertices]; Graph {nodes, edges: Vec::new()} } } #[derive(Clone, Copy, Default)] struct NodeData { supply: i64, } #[derive(Clone, Copy)] struct EdgeData { cost: i64, capacity: i64, flow: i64, } /// Wrapper type representing the cost of an edge in the graph. #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub struct Cost(pub i32); /// Wrapper type representing the capacity of an edge in the graph. #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub struct Capacity(pub i32); impl EdgeData { pub fn new(cost: Cost, capacity: Capacity) -> Self { let cost = cost.0 as i64; let capacity = capacity.0 as i64; assert!(capacity >= 0); EdgeData {cost, capacity, flow: Default::default()} } } impl Graph { pub fn increase_supply(&mut self, node: Node, amount: i64) { self.delta_supply(node, amount); } pub fn decrease_supply(&mut self, node: Node, amount: i64) { self.delta_supply(node, -amount); } pub fn delta_supply(&mut self, node: Node, amount: i64) { self.nodes[node.0].supply += amount; } pub fn mcmf(&mut self) -> i64 { let num_vertices = self.nodes.len() as i64; let num_edges = self.edges.len() as i64; let node_supply: Vec<_> = self.nodes.iter().map(|x| clamp_to_i32(x.supply)).collect(); let edge_a: Vec<_> = self.edges.iter().map(|x| x.a.0 as i64).collect(); let edge_b: Vec<_> = self.edges.iter().map(|x| x.b.0 as i64).collect(); let edge_capacity: Vec<_> = self.edges.iter().map(|x| x.data.capacity).collect(); let edge_cost: Vec<_> = self.edges.iter().map(|x| x.data.cost).collect(); let mut edge_flow_result = vec![0; self.edges.len()]; let result; unsafe { result = network_simplex_mcmf_i64(num_vertices, num_edges, node_supply.as_ptr(), edge_a.as_ptr(), edge_b.as_ptr(), edge_capacity.as_ptr(), edge_cost.as_ptr(), edge_flow_result.as_mut_ptr()); } for (edge, &flow) in self.edges.iter_mut().zip(edge_flow_result.iter()) { edge.data.flow = flow; } result } } fn clamp_to_i32(x: i64) -> i64 { let limit = std::i32::MAX as i64; let x = std::cmp::min(x, limit); let x = std::cmp::max(x, -limit); x } /// This class represents a vertex in a graph. /// It is parametrized by `T` so that users of the library can use the most convenient type for representing nodes in the graph. #[derive(Copy, Clone, Hash, Debug, PartialEq, Eq, PartialOrd, Ord)] pub enum Vertex<T: Clone + Ord> { Source, Sink, Node(T) } impl<T> Vertex<T> where T: Clone + Ord { /// Maps `Source`, `Sink`, and `Node(x)` to `None`, `None`, and `Some(x)` respectively. pub fn as_option(self) -> Option<T> { match self { Vertex::Source => None, Vertex::Sink => None, Vertex::Node(x) => Some(x), } } } impl<T> From<T> for Vertex<T> where T: Clone + Ord { fn from(x: T) -> Vertex<T> { Vertex::Node(x) } } /// Represents flow in a solution to the minimum cost maximum flow problem. #[derive(Clone)] pub struct Flow<T: Clone + Ord> { pub a: Vertex<T>, pub b: Vertex<T>, pub amount: u32, pub cost: i32 } /// Represents a path from the source to the sink in a solution to the minimum cost maximum flow problem. pub struct Path<T: Clone + Ord> { pub flows: Vec<Flow<T>> } impl<T> Path<T> where T: Clone + Ord { /// A list of all the vertices in the path. /// Always begins with `Vertex::Source` and ends with `Vertex::Sink`. pub fn vertices(&self) -> Vec<&Vertex<T>> { iter::once(&self.flows[0].a) .chain(self.flows.iter().map(|x| &x.b)) .collect() } /// A list of all the edges in the path. pub fn edges(&self) -> Vec<&Flow<T>> { self.flows.iter().collect() } /// Returns the total cost of the path. /// `path.cost()` is always a multiple of `path.amount()`. pub fn cost(&self) -> i32 { self.flows.iter() .map(|flow| flow.amount as i32 * flow.cost) .sum() } /// Returns the amount of flow in the path. pub fn amount(&self) -> u32 { self.flows[0].amount } /// Returns the number of edges in the path. pub fn len(&self) -> usize { self.flows.len() } } /// Use this struct to build a graph, then call the `mcmf()` function to find its minimum cost maximum flow. /// # Example /// ``` /// use mcmf::{GraphBuilder, Vertex, Cost, Capacity}; /// let (cost, paths) = GraphBuilder::new() /// .add_edge(Vertex::Source, "Vancouver", Capacity(2), Cost(0)) /// .add_edge("Vancouver", "Toronto", Capacity(2), Cost(100)) /// .add_edge("Toronto", "Halifax", Capacity(1), Cost(150)) /// .add_edge("Vancouver", "Halifax", Capacity(5), Cost(400)) /// .add_edge("Halifax", Vertex::Sink, Capacity(2), Cost(0)) /// .mcmf(); /// assert_eq!(cost, 650); /// assert_eq!(cost, paths.iter().map(|path| path.cost()).sum()); /// assert_eq!(paths.len(), 2); /// assert!( /// paths[0].vertices() == vec![ /// &Vertex::Source, /// &Vertex::Node("Vancouver"), /// &Vertex::Node("Halifax"), /// &Vertex::Sink]); /// assert!( /// paths[1].vertices() == vec![ /// &Vertex::Source, /// &Vertex::Node("Vancouver"), /// &Vertex::Node("Toronto"), /// &Vertex::Node("Halifax"), /// &Vertex::Sink]); /// ``` #[derive(Clone)] pub struct GraphBuilder<T: Clone + Ord> { pub edge_list: Vec<(Vertex<T>, Vertex<T>, Capacity, Cost)> } impl<T> GraphBuilder<T> where T: Clone + Ord { /// Creates a new empty graph. pub fn new() -> Self { GraphBuilder {edge_list: Vec::new()} } /// Add an edge to the graph. /// /// `capacity` and `cost` have wrapper types so that you can't mix them up. /// /// Panics if `capacity` is negative. pub fn add_edge<A: Into<Vertex<T>>, B: Into<Vertex<T>>>(&mut self, a: A, b: B, capacity: Capacity, cost: Cost) -> &mut Self { if capacity.0 < 0 { panic!("capacity cannot be negative (capacity was {})", capacity.0) } let a = a.into(); let b = b.into(); assert!(a != b); assert!(a != Vertex::Sink); assert!(b != Vertex::Source); self.edge_list.push((a, b, capacity, cost)); self } /// Computes the minimum cost maximum flow. /// /// Returns a tuple (total cost, list of paths). The paths are sorted in ascending order by length. /// /// This gives incorrect results when the total cost or the total flow exceeds 2^(31)-1. /// It is the responsibility of the caller to ensure that the total cost doesn't exceed 2^(31)-1. pub fn mcmf(&self) -> (i32, Vec<Path<T>>) { let mut next_id = 0; let source = Vertex::Source.clone(); let sink = Vertex::Sink.clone(); let mut index_mapper = BTreeMap::new(); for vertex in self.edge_list.iter() .flat_map(move |&(ref a, ref b, _, _)| iter::once(a).chain(iter::once(b))) .chain(iter::once(&source)) .chain(iter::once(&sink)) { if !index_mapper.contains_key(&vertex) { index_mapper.insert(vertex, next_id); next_id += 1; } } let num_vertices = next_id; let mut g = Graph::new_default(num_vertices); for &(ref a, ref b, cap, cost) in &self.edge_list { let node_a = Node(*index_mapper.get(&a).unwrap()); let node_b = Node(*index_mapper.get(&b).unwrap()); if *a == Vertex::Source || *b == Vertex::Sink { // The + supply and - supply must be equal because of how LEMON interprets // its input. // http://lemon.cs.elte.hu/pub/doc/latest/a00005.html g.increase_supply(Node(*index_mapper.get(&Vertex::Source).unwrap()), cap.0 as i64); g.decrease_supply(Node(*index_mapper.get(&Vertex::Sink).unwrap()), cap.0 as i64); } g.add_edge(node_a, node_b, EdgeData::new(cost, cap)); } let total_amount = g.mcmf() as i32; let (_, edges) = g.extract(); let inverse_mapping: BTreeMap<_, _> = index_mapper.into_iter().map(|(a, b)| (b, a)).collect(); let flows = edges.into_iter().map(|x| { let a = (**inverse_mapping.get(&x.a.0).unwrap()).clone(); let b = (**inverse_mapping.get(&x.b.0).unwrap()).clone(); let amount = x.data.flow as u32; let cost = x.data.cost as i32; Flow {a, b, amount, cost} }) .filter(|x| x.amount != 0) .collect(); let mut paths = GraphBuilder::path_decomposition(flows); paths.sort_by_key(|path| path.len()); (total_amount, paths) } fn path_decomposition(flows: Vec<Flow<T>>) -> Vec<Path<T>> { let mut adj: BTreeMap<Vertex<T>, Vec<Flow<T>>> = flows.iter() .map(|x| (x.a.clone(), Vec::new())) .collect(); for x in flows { adj.get_mut(&x.a).unwrap().push(x); } fn decompose<T: Clone + Ord>(adj: &mut BTreeMap<Vertex<T>, Vec<Flow<T>>>, v: &Vertex<T>, parent_amount: u32) -> (u32, Vec<Flow<T>>) { if *v == Vertex::Sink { (std::u32::MAX, Vec::new()) } else if adj.get(&v).into_iter().all(|x| x.is_empty()) { (0, Vec::new()) } else { let flow = adj.get_mut(&v).unwrap().pop().unwrap(); let amount = min(parent_amount, flow.amount); let (child_amount, child_path) = decompose(adj, &flow.b, amount); let amount = min(amount, child_amount); let mut path = child_path; if amount < flow.amount { adj.get_mut(&v).unwrap().push(Flow {amount: flow.amount - amount, ..flow.clone()}); } path.push(Flow {amount, ..flow}); (amount, path) } } let mut result = Vec::new(); loop { let (flow, path) = decompose(&mut adj, &Vertex::Source, std::u32::MAX); if flow == 0 { break; } else { result.push(path.into_iter().rev().collect()); } } result.into_iter().map(|x| Path {flows: x}).collect() } } #[cfg(test)] mod tests { use super::*; #[test] #[allow(non_snake_case)] fn mcmf() { let mut G = Graph::new_default(4); G.increase_supply(Node(0), 20); G.decrease_supply(Node(3), 20); G.add_edge(Node(0), Node(1), EdgeData::new(Cost(100), Capacity(10))); G.add_edge(Node(0), Node(2), EdgeData::new(Cost(300), Capacity(20))); G.add_edge(Node(1), Node(2), EdgeData::new(Cost(50), Capacity(5))); G.add_edge(Node(1), Node(3), EdgeData::new(Cost(200), Capacity(10))); G.add_edge(Node(2), Node(3), EdgeData::new(Cost(100), Capacity(20))); let cost = G.mcmf(); let (_, edges) = G.extract(); let flow: Vec<_> = edges.iter().map(|x| x.data.flow).collect(); assert_eq!(cost, 6750); assert_eq!(flow, vec![10, 10, 5, 5, 15]); } #[test] fn large_number() { #[derive(Clone, PartialEq, Eq, PartialOrd, Ord)] enum OnlyNode { Only } for i in 0..48 { let x = i * 1000; println!("x={}", x); let (total, _) = GraphBuilder::new() .add_edge(Vertex::Source, OnlyNode::Only, Capacity(x), Cost(x)) .add_edge(Vertex::Source, OnlyNode::Only, Capacity(x), Cost(x)) .add_edge(OnlyNode::Only, Vertex::Sink, Capacity(x), Cost(0)) .mcmf(); assert_eq!(total, (x as i64 * x as i64) as i32); } } #[test] fn empty_graph() { let (cost, paths) = GraphBuilder::<i32>::new().mcmf(); assert_eq!(cost, 0); assert!(paths.is_empty()) } #[test] fn large_capacities() { let max = 1 << 30; assert_eq!(max, 1073741824); let (cost, paths) = GraphBuilder::new() .add_edge( Vertex::Source, "A", Capacity(max), Cost(0), ).add_edge( "A", Vertex::Sink, Capacity(max), Cost(0), ).mcmf(); assert_eq!(cost, 0); assert_eq!(paths.len(), 1); } #[test] #[should_panic] fn negative_capacity_panics() { GraphBuilder::new().add_edge("a", "b", Capacity(-1), Cost(0)); } }
34.673563
141
0.545846
8a2db6ba73025c16d4cae06224790798279f4cb7
1,334
use cssparser::Parser; use crate::parser::ParseError; use crate::properties::declaration::PropertyDeclaration; use crate::properties::declaration_block::SourcePropertyDeclaration; use crate::stylesheets::stylesheet::ParserContext; use crate::values::shortcut_for_four_values; use crate::values::specified::layout::LineStyle; pub struct Longhands { pub border_top_style: LineStyle, pub border_right_style: LineStyle, pub border_bottom_style: LineStyle, pub border_left_style: LineStyle, } shortcut_for_four_values!( Longhands, border_top_style, border_right_style, border_bottom_style, border_left_style, LineStyle ); /// Parse the given shorthand and fill the result into the /// `declarations` vector. pub fn parse_into<'i, 't>( declarations: &mut SourcePropertyDeclaration, _context: &ParserContext, input: &mut Parser<'i, 't>, ) -> Result<(), ParseError<'i>> { input .parse_entirely(|input| Longhands::parse_values(input)) .map(|longhands| { declarations.push(PropertyDeclaration::BorderTopStyle(longhands.border_top_style)); declarations.push(PropertyDeclaration::BorderRightStyle(longhands.border_right_style)); declarations.push(PropertyDeclaration::BorderBottomStyle(longhands.border_bottom_style)); declarations.push(PropertyDeclaration::BorderLeftStyle(longhands.border_left_style)); }) }
31.761905
92
0.794603
896e16058b884fe113666ec77b4d66c314e3b87b
845
use riscv::register::sstatus::{self, Sstatus, SPP}; #[repr(C)] pub struct TrapContext { pub x: [usize; 32], pub sstatus: Sstatus, pub sepc: usize, pub kernel_satp: usize, pub kernel_sp: usize, pub trap_handler: usize, } impl TrapContext { pub fn set_sp(&mut self, sp: usize) { self.x[2] = sp; } pub fn app_init_context( entry: usize, sp: usize, kernel_satp: usize, kernel_sp: usize, trap_handler: usize, ) -> Self { let mut sstatus = sstatus::read(); sstatus.set_spp(SPP::User); let mut cx = Self { x: [0; 32], sstatus, //33 sepc: entry, //34 kernel_satp, //35 kernel_sp, //36 trap_handler, //37 }; cx.set_sp(sp); cx } }
22.236842
51
0.504142
1aad104644b8cb2fddd92d01cae9f339fbaad243
278
// SPDX-License-Identifier: MIT /* * File: src/math/nextafterf.rs * * The nextafterf function. * * Author: HTG-YT * Copyright (c) 2021 The LibM Team of the HaruxOS Project */ #[no_mangle] pub extern "C" fn nextafterf(x: f32, y: f32) -> f32 { libm::nextafterf(x, y) }
19.857143
58
0.651079
288cb1ae888e603c6c6b60cd012eceeb277fbaef
1,811
use std::sync::Arc; use crate::{ error::CommandError, manager::command::{producer::CommandProducer, ClockCommand, Command}, value::Value, }; use super::{ClockId, ClockShared, ClockTime}; /// Controls a clock. /// /// When a [`ClockHandle`] is dropped, the corresponding clock /// will be removed. pub struct ClockHandle { pub(crate) id: ClockId, pub(crate) shared: Arc<ClockShared>, pub(crate) command_producer: CommandProducer, } impl ClockHandle { /// Returns the unique identifier for the clock. pub fn id(&self) -> ClockId { self.id } /// Returns `true` if the clock is currently ticking /// and `false` if not. pub fn ticking(&self) -> bool { self.shared.ticking() } /// Returns the current time of the clock. pub fn time(&self) -> ClockTime { ClockTime { clock: self.id, ticks: self.shared.ticks(), } } /// Sets the duration of time between each tick (in seconds). pub fn set_interval(&mut self, interval: impl Into<Value>) -> Result<(), CommandError> { self.command_producer .push(Command::Clock(ClockCommand::SetInterval( self.id, interval.into(), ))) } /// Starts or resumes the clock. pub fn start(&mut self) -> Result<(), CommandError> { self.command_producer .push(Command::Clock(ClockCommand::Start(self.id))) } /// Pauses the clock. pub fn pause(&mut self) -> Result<(), CommandError> { self.command_producer .push(Command::Clock(ClockCommand::Pause(self.id))) } /// Stops and resets the clock. pub fn stop(&mut self) -> Result<(), CommandError> { self.command_producer .push(Command::Clock(ClockCommand::Stop(self.id))) } } impl Drop for ClockHandle { fn drop(&mut self) { self.shared.mark_for_removal(); } } impl From<&ClockHandle> for ClockId { fn from(handle: &ClockHandle) -> Self { handle.id() } }
22.6375
89
0.675318
d95d6b4a31a5724eeaf20858da84848ee09aa27f
114,361
#![doc = "generated by AutoRust 0.1.0"] #![allow(unused_mut)] #![allow(unused_variables)] #![allow(unused_imports)] use super::{models, models::*, API_VERSION}; pub mod accounts { use super::{models, models::*, API_VERSION}; pub async fn list( operation_config: &crate::OperationConfig, subscription_id: &str, filter: Option<&str>, top: Option<i32>, skip: Option<i32>, select: Option<&str>, orderby: Option<&str>, count: Option<bool>, ) -> std::result::Result<DataLakeAnalyticsAccountListResult, list::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/providers/Microsoft.DataLakeAnalytics/accounts", operation_config.base_path(), subscription_id ); let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); if let Some(filter) = filter { url.query_pairs_mut().append_pair("$filter", filter); } if let Some(top) = top { url.query_pairs_mut().append_pair("$top", top.to_string().as_str()); } if let Some(skip) = skip { url.query_pairs_mut().append_pair("$skip", skip.to_string().as_str()); } if let Some(select) = select { url.query_pairs_mut().append_pair("$select", select); } if let Some(orderby) = orderby { url.query_pairs_mut().append_pair("$orderby", orderby); } if let Some(count) = count { url.query_pairs_mut().append_pair("$count", count.to_string().as_str()); } let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: DataLakeAnalyticsAccountListResult = serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?; Err(list::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod list { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list_by_resource_group( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, filter: Option<&str>, top: Option<i32>, skip: Option<i32>, select: Option<&str>, orderby: Option<&str>, count: Option<bool>, ) -> std::result::Result<DataLakeAnalyticsAccountListResult, list_by_resource_group::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataLakeAnalytics/accounts", operation_config.base_path(), subscription_id, resource_group_name ); let mut url = url::Url::parse(url_str).map_err(list_by_resource_group::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_by_resource_group::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); if let Some(filter) = filter { url.query_pairs_mut().append_pair("$filter", filter); } if let Some(top) = top { url.query_pairs_mut().append_pair("$top", top.to_string().as_str()); } if let Some(skip) = skip { url.query_pairs_mut().append_pair("$skip", skip.to_string().as_str()); } if let Some(select) = select { url.query_pairs_mut().append_pair("$select", select); } if let Some(orderby) = orderby { url.query_pairs_mut().append_pair("$orderby", orderby); } if let Some(count) = count { url.query_pairs_mut().append_pair("$count", count.to_string().as_str()); } let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(list_by_resource_group::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_by_resource_group::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: DataLakeAnalyticsAccountListResult = serde_json::from_slice(rsp_body) .map_err(|source| list_by_resource_group::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body) .map_err(|source| list_by_resource_group::Error::DeserializeError(source, rsp_body.clone()))?; Err(list_by_resource_group::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod list_by_resource_group { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn get( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, ) -> std::result::Result<DataLakeAnalyticsAccount, get::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataLakeAnalytics/accounts/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name ); let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: DataLakeAnalyticsAccount = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Err(get::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod get { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn create( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, parameters: &CreateDataLakeAnalyticsAccountParameters, ) -> std::result::Result<create::Response, create::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataLakeAnalytics/accounts/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name ); let mut url = url::Url::parse(url_str).map_err(create::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(create::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(parameters).map_err(create::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(create::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(create::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: DataLakeAnalyticsAccount = serde_json::from_slice(rsp_body).map_err(|source| create::Error::DeserializeError(source, rsp_body.clone()))?; Ok(create::Response::Ok200(rsp_value)) } http::StatusCode::CREATED => { let rsp_body = rsp.body(); let rsp_value: DataLakeAnalyticsAccount = serde_json::from_slice(rsp_body).map_err(|source| create::Error::DeserializeError(source, rsp_body.clone()))?; Ok(create::Response::Created201(rsp_value)) } status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| create::Error::DeserializeError(source, rsp_body.clone()))?; Err(create::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod create { use super::{models, models::*, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200(DataLakeAnalyticsAccount), Created201(DataLakeAnalyticsAccount), } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, parameters: Option<&UpdateDataLakeAnalyticsAccountParameters>, ) -> std::result::Result<update::Response, update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataLakeAnalytics/accounts/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name ); let mut url = url::Url::parse(url_str).map_err(update::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PATCH); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(update::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = if let Some(parameters) = parameters { req_builder = req_builder.header("content-type", "application/json"); azure_core::to_json(parameters).map_err(update::Error::SerializeError)? } else { bytes::Bytes::from_static(azure_core::EMPTY_BODY) }; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(update::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(update::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: DataLakeAnalyticsAccount = serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(update::Response::Ok200(rsp_value)) } http::StatusCode::CREATED => { let rsp_body = rsp.body(); let rsp_value: DataLakeAnalyticsAccount = serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(update::Response::Created201(rsp_value)) } http::StatusCode::ACCEPTED => { let rsp_body = rsp.body(); let rsp_value: DataLakeAnalyticsAccount = serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(update::Response::Accepted202(rsp_value)) } status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?; Err(update::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod update { use super::{models, models::*, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200(DataLakeAnalyticsAccount), Created201(DataLakeAnalyticsAccount), Accepted202(DataLakeAnalyticsAccount), } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn delete( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, ) -> std::result::Result<delete::Response, delete::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataLakeAnalytics/accounts/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name ); let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(delete::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(delete::Response::Ok200), http::StatusCode::ACCEPTED => Ok(delete::Response::Accepted202), http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?; Err(delete::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod delete { use super::{models, models::*, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200, Accepted202, NoContent204, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn check_name_availability( operation_config: &crate::OperationConfig, subscription_id: &str, location: &str, parameters: &CheckNameAvailabilityParameters, ) -> std::result::Result<NameAvailabilityInformation, check_name_availability::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/providers/Microsoft.DataLakeAnalytics/locations/{}/checkNameAvailability", operation_config.base_path(), subscription_id, location ); let mut url = url::Url::parse(url_str).map_err(check_name_availability::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(check_name_availability::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(parameters).map_err(check_name_availability::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(check_name_availability::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(check_name_availability::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: NameAvailabilityInformation = serde_json::from_slice(rsp_body) .map_err(|source| check_name_availability::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body) .map_err(|source| check_name_availability::Error::DeserializeError(source, rsp_body.clone()))?; Err(check_name_availability::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod check_name_availability { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod data_lake_store_accounts { use super::{models, models::*, API_VERSION}; pub async fn list_by_account( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, filter: Option<&str>, top: Option<i32>, skip: Option<i32>, select: Option<&str>, orderby: Option<&str>, count: Option<bool>, ) -> std::result::Result<DataLakeStoreAccountInformationListResult, list_by_account::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataLakeAnalytics/accounts/{}/dataLakeStoreAccounts", operation_config.base_path(), subscription_id, resource_group_name, account_name ); let mut url = url::Url::parse(url_str).map_err(list_by_account::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_by_account::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); if let Some(filter) = filter { url.query_pairs_mut().append_pair("$filter", filter); } if let Some(top) = top { url.query_pairs_mut().append_pair("$top", top.to_string().as_str()); } if let Some(skip) = skip { url.query_pairs_mut().append_pair("$skip", skip.to_string().as_str()); } if let Some(select) = select { url.query_pairs_mut().append_pair("$select", select); } if let Some(orderby) = orderby { url.query_pairs_mut().append_pair("$orderby", orderby); } if let Some(count) = count { url.query_pairs_mut().append_pair("$count", count.to_string().as_str()); } let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list_by_account::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_by_account::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: DataLakeStoreAccountInformationListResult = serde_json::from_slice(rsp_body) .map_err(|source| list_by_account::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body) .map_err(|source| list_by_account::Error::DeserializeError(source, rsp_body.clone()))?; Err(list_by_account::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod list_by_account { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn get( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, data_lake_store_account_name: &str, ) -> std::result::Result<DataLakeStoreAccountInformation, get::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataLakeAnalytics/accounts/{}/dataLakeStoreAccounts/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, data_lake_store_account_name ); let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: DataLakeStoreAccountInformation = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Err(get::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod get { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn add( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, data_lake_store_account_name: &str, parameters: Option<&AddDataLakeStoreParameters>, ) -> std::result::Result<(), add::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataLakeAnalytics/accounts/{}/dataLakeStoreAccounts/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, data_lake_store_account_name ); let mut url = url::Url::parse(url_str).map_err(add::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(add::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = if let Some(parameters) = parameters { req_builder = req_builder.header("content-type", "application/json"); azure_core::to_json(parameters).map_err(add::Error::SerializeError)? } else { bytes::Bytes::from_static(azure_core::EMPTY_BODY) }; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(add::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(add::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(()), status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| add::Error::DeserializeError(source, rsp_body.clone()))?; Err(add::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod add { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn delete( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, data_lake_store_account_name: &str, ) -> std::result::Result<(), delete::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataLakeAnalytics/accounts/{}/dataLakeStoreAccounts/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, data_lake_store_account_name ); let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(delete::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(()), status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?; Err(delete::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod delete { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod storage_accounts { use super::{models, models::*, API_VERSION}; pub async fn list_by_account( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, filter: Option<&str>, top: Option<i32>, skip: Option<i32>, select: Option<&str>, orderby: Option<&str>, count: Option<bool>, ) -> std::result::Result<StorageAccountInformationListResult, list_by_account::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataLakeAnalytics/accounts/{}/storageAccounts", operation_config.base_path(), subscription_id, resource_group_name, account_name ); let mut url = url::Url::parse(url_str).map_err(list_by_account::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_by_account::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); if let Some(filter) = filter { url.query_pairs_mut().append_pair("$filter", filter); } if let Some(top) = top { url.query_pairs_mut().append_pair("$top", top.to_string().as_str()); } if let Some(skip) = skip { url.query_pairs_mut().append_pair("$skip", skip.to_string().as_str()); } if let Some(select) = select { url.query_pairs_mut().append_pair("$select", select); } if let Some(orderby) = orderby { url.query_pairs_mut().append_pair("$orderby", orderby); } if let Some(count) = count { url.query_pairs_mut().append_pair("$count", count.to_string().as_str()); } let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list_by_account::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_by_account::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: StorageAccountInformationListResult = serde_json::from_slice(rsp_body) .map_err(|source| list_by_account::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body) .map_err(|source| list_by_account::Error::DeserializeError(source, rsp_body.clone()))?; Err(list_by_account::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod list_by_account { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn get( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, storage_account_name: &str, ) -> std::result::Result<StorageAccountInformation, get::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataLakeAnalytics/accounts/{}/storageAccounts/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, storage_account_name ); let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: StorageAccountInformation = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Err(get::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod get { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn add( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, storage_account_name: &str, parameters: &AddStorageAccountParameters, ) -> std::result::Result<(), add::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataLakeAnalytics/accounts/{}/storageAccounts/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, storage_account_name ); let mut url = url::Url::parse(url_str).map_err(add::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(add::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(parameters).map_err(add::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(add::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(add::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(()), status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| add::Error::DeserializeError(source, rsp_body.clone()))?; Err(add::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod add { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, storage_account_name: &str, parameters: Option<&UpdateStorageAccountParameters>, ) -> std::result::Result<(), update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataLakeAnalytics/accounts/{}/storageAccounts/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, storage_account_name ); let mut url = url::Url::parse(url_str).map_err(update::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PATCH); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(update::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = if let Some(parameters) = parameters { req_builder = req_builder.header("content-type", "application/json"); azure_core::to_json(parameters).map_err(update::Error::SerializeError)? } else { bytes::Bytes::from_static(azure_core::EMPTY_BODY) }; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(update::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(update::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(()), status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?; Err(update::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod update { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn delete( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, storage_account_name: &str, ) -> std::result::Result<(), delete::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataLakeAnalytics/accounts/{}/storageAccounts/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, storage_account_name ); let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(delete::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(()), status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?; Err(delete::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod delete { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list_storage_containers( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, storage_account_name: &str, ) -> std::result::Result<StorageContainerListResult, list_storage_containers::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataLakeAnalytics/accounts/{}/storageAccounts/{}/containers", operation_config.base_path(), subscription_id, resource_group_name, account_name, storage_account_name ); let mut url = url::Url::parse(url_str).map_err(list_storage_containers::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_storage_containers::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(list_storage_containers::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_storage_containers::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: StorageContainerListResult = serde_json::from_slice(rsp_body) .map_err(|source| list_storage_containers::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body) .map_err(|source| list_storage_containers::Error::DeserializeError(source, rsp_body.clone()))?; Err(list_storage_containers::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod list_storage_containers { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn get_storage_container( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, storage_account_name: &str, container_name: &str, ) -> std::result::Result<StorageContainer, get_storage_container::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataLakeAnalytics/accounts/{}/storageAccounts/{}/containers/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, storage_account_name, container_name ); let mut url = url::Url::parse(url_str).map_err(get_storage_container::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get_storage_container::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder .body(req_body) .map_err(get_storage_container::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(get_storage_container::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: StorageContainer = serde_json::from_slice(rsp_body) .map_err(|source| get_storage_container::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body) .map_err(|source| get_storage_container::Error::DeserializeError(source, rsp_body.clone()))?; Err(get_storage_container::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod get_storage_container { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn list_sas_tokens( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, storage_account_name: &str, container_name: &str, ) -> std::result::Result<SasTokenInformationListResult, list_sas_tokens::Error> { let http_client = operation_config.http_client(); let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataLakeAnalytics/accounts/{}/storageAccounts/{}/containers/{}/listSasTokens" , operation_config . base_path () , subscription_id , resource_group_name , account_name , storage_account_name , container_name) ; let mut url = url::Url::parse(url_str).map_err(list_sas_tokens::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::POST); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_sas_tokens::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list_sas_tokens::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_sas_tokens::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: SasTokenInformationListResult = serde_json::from_slice(rsp_body) .map_err(|source| list_sas_tokens::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body) .map_err(|source| list_sas_tokens::Error::DeserializeError(source, rsp_body.clone()))?; Err(list_sas_tokens::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod list_sas_tokens { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod compute_policies { use super::{models, models::*, API_VERSION}; pub async fn list_by_account( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, ) -> std::result::Result<ComputePolicyListResult, list_by_account::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataLakeAnalytics/accounts/{}/computePolicies", operation_config.base_path(), subscription_id, resource_group_name, account_name ); let mut url = url::Url::parse(url_str).map_err(list_by_account::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_by_account::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list_by_account::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_by_account::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: ComputePolicyListResult = serde_json::from_slice(rsp_body) .map_err(|source| list_by_account::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body) .map_err(|source| list_by_account::Error::DeserializeError(source, rsp_body.clone()))?; Err(list_by_account::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod list_by_account { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn get( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, compute_policy_name: &str, ) -> std::result::Result<ComputePolicy, get::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataLakeAnalytics/accounts/{}/computePolicies/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, compute_policy_name ); let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: ComputePolicy = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Err(get::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod get { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn create_or_update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, compute_policy_name: &str, parameters: &CreateOrUpdateComputePolicyParameters, ) -> std::result::Result<ComputePolicy, create_or_update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataLakeAnalytics/accounts/{}/computePolicies/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, compute_policy_name ); let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(create_or_update::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(parameters).map_err(create_or_update::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(create_or_update::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: ComputePolicy = serde_json::from_slice(rsp_body) .map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body) .map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?; Err(create_or_update::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod create_or_update { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, compute_policy_name: &str, parameters: Option<&UpdateComputePolicyParameters>, ) -> std::result::Result<ComputePolicy, update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataLakeAnalytics/accounts/{}/computePolicies/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, compute_policy_name ); let mut url = url::Url::parse(url_str).map_err(update::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PATCH); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(update::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = if let Some(parameters) = parameters { req_builder = req_builder.header("content-type", "application/json"); azure_core::to_json(parameters).map_err(update::Error::SerializeError)? } else { bytes::Bytes::from_static(azure_core::EMPTY_BODY) }; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(update::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(update::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: ComputePolicy = serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?; Err(update::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod update { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn delete( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, compute_policy_name: &str, ) -> std::result::Result<delete::Response, delete::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataLakeAnalytics/accounts/{}/computePolicies/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, compute_policy_name ); let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(delete::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(delete::Response::Ok200), http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?; Err(delete::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod delete { use super::{models, models::*, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200, NoContent204, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod firewall_rules { use super::{models, models::*, API_VERSION}; pub async fn list_by_account( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, ) -> std::result::Result<FirewallRuleListResult, list_by_account::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataLakeAnalytics/accounts/{}/firewallRules", operation_config.base_path(), subscription_id, resource_group_name, account_name ); let mut url = url::Url::parse(url_str).map_err(list_by_account::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list_by_account::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list_by_account::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(list_by_account::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: FirewallRuleListResult = serde_json::from_slice(rsp_body) .map_err(|source| list_by_account::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body) .map_err(|source| list_by_account::Error::DeserializeError(source, rsp_body.clone()))?; Err(list_by_account::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod list_by_account { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn get( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, firewall_rule_name: &str, ) -> std::result::Result<FirewallRule, get::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataLakeAnalytics/accounts/{}/firewallRules/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, firewall_rule_name ); let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: FirewallRule = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?; Err(get::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod get { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn create_or_update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, firewall_rule_name: &str, parameters: &CreateOrUpdateFirewallRuleParameters, ) -> std::result::Result<FirewallRule, create_or_update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataLakeAnalytics/accounts/{}/firewallRules/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, firewall_rule_name ); let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(create_or_update::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(parameters).map_err(create_or_update::Error::SerializeError)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(create_or_update::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: FirewallRule = serde_json::from_slice(rsp_body) .map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body) .map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?; Err(create_or_update::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod create_or_update { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, firewall_rule_name: &str, parameters: Option<&UpdateFirewallRuleParameters>, ) -> std::result::Result<FirewallRule, update::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataLakeAnalytics/accounts/{}/firewallRules/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, firewall_rule_name ); let mut url = url::Url::parse(url_str).map_err(update::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PATCH); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(update::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = if let Some(parameters) = parameters { req_builder = req_builder.header("content-type", "application/json"); azure_core::to_json(parameters).map_err(update::Error::SerializeError)? } else { bytes::Bytes::from_static(azure_core::EMPTY_BODY) }; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(update::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(update::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: FirewallRule = serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?; Err(update::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod update { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } pub async fn delete( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, account_name: &str, firewall_rule_name: &str, ) -> std::result::Result<delete::Response, delete::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.DataLakeAnalytics/accounts/{}/firewallRules/{}", operation_config.base_path(), subscription_id, resource_group_name, account_name, firewall_rule_name ); let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(delete::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => Ok(delete::Response::Ok200), http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?; Err(delete::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod delete { use super::{models, models::*, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200, NoContent204, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod operations { use super::{models, models::*, API_VERSION}; pub async fn list(operation_config: &crate::OperationConfig) -> std::result::Result<OperationListResult, list::Error> { let http_client = operation_config.http_client(); let url_str = &format!("{}/providers/Microsoft.DataLakeAnalytics/operations", operation_config.base_path(),); let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(list::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?; let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: OperationListResult = serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?; Err(list::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod list { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } } pub mod locations { use super::{models, models::*, API_VERSION}; pub async fn get_capability( operation_config: &crate::OperationConfig, subscription_id: &str, location: &str, ) -> std::result::Result<CapabilityInformation, get_capability::Error> { let http_client = operation_config.http_client(); let url_str = &format!( "{}/subscriptions/{}/providers/Microsoft.DataLakeAnalytics/locations/{}/capability", operation_config.base_path(), subscription_id, location ); let mut url = url::Url::parse(url_str).map_err(get_capability::Error::ParseUrlError)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); if let Some(token_credential) = operation_config.token_credential() { let token_response = token_credential .get_token(operation_config.token_credential_resource()) .await .map_err(get_capability::Error::GetTokenError)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); } url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(get_capability::Error::BuildRequestError)?; let rsp = http_client .execute_request(req) .await .map_err(get_capability::Error::ExecuteRequestError)?; match rsp.status() { http::StatusCode::OK => { let rsp_body = rsp.body(); let rsp_value: CapabilityInformation = serde_json::from_slice(rsp_body).map_err(|source| get_capability::Error::DeserializeError(source, rsp_body.clone()))?; Ok(rsp_value) } http::StatusCode::NOT_FOUND => Err(get_capability::Error::NotFound404 {}), status_code => { let rsp_body = rsp.body(); let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body).map_err(|source| get_capability::Error::DeserializeError(source, rsp_body.clone()))?; Err(get_capability::Error::DefaultResponse { status_code, value: rsp_value, }) } } } pub mod get_capability { use super::{models, models::*, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("Error response #response_type")] NotFound404 {}, #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrlError(url::ParseError), #[error("Failed to build request: {0}")] BuildRequestError(http::Error), #[error("Failed to execute request: {0}")] ExecuteRequestError(azure_core::HttpError), #[error("Failed to serialize request body: {0}")] SerializeError(serde_json::Error), #[error("Failed to deserialize response: {0}, body: {1:?}")] DeserializeError(serde_json::Error, bytes::Bytes), #[error("Failed to get access token: {0}")] GetTokenError(azure_core::Error), } } }
47.630571
302
0.584089
ab86d39d4e946d201edd4d32fc3953650b8b95b1
3,477
#[cfg(not(feature = "mesalock_sgx"))] use std::fmt; #[cfg(not(feature = "mesalock_sgx"))] use std::str::FromStr; use parity_scale_codec::{Decode, Encode, Error, Input, Output}; use secp256k1::key::PublicKey; #[cfg(not(feature = "mesalock_sgx"))] use serde::de; #[cfg(not(feature = "mesalock_sgx"))] use serde::{Deserialize, Deserializer, Serialize, Serializer}; use crate::common::H264; /// What can be access in TX -- TODO: revisit when enforced by HW encryption / enclaves /// TODO: custom Encode/Decode when data structures are finalized (for backwards/forwards compatibility, encoders/decoders should be able to work with old formats) #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode, PartialOrd, Ord)] #[cfg_attr(not(feature = "mesalock_sgx"), derive(Serialize, Deserialize))] pub enum TxAccess { AllData, // TODO: u16 and Vec size check in Decode implementation Output(u64), // TODO: other components? // TODO: TX ID could be computed as a root of a merkle tree from different TX components? } impl Default for TxAccess { fn default() -> Self { TxAccess::AllData } } /// Specifies who can access what -- TODO: revisit when enforced by HW encryption / enclaves #[derive(Debug, PartialEq, Eq, Clone, PartialOrd, Ord)] #[cfg_attr(not(feature = "mesalock_sgx"), derive(Serialize, Deserialize))] pub struct TxAccessPolicy { #[cfg_attr( not(feature = "mesalock_sgx"), serde(serialize_with = "serialize_view_key") )] #[cfg_attr( not(feature = "mesalock_sgx"), serde(deserialize_with = "deserialize_view_key") )] pub view_key: PublicKey, pub access: TxAccess, } #[cfg(not(feature = "mesalock_sgx"))] fn serialize_view_key<S>( view_key: &PublicKey, serializer: S, ) -> std::result::Result<S::Ok, S::Error> where S: Serializer, { let view_key_string = format!("{}", view_key); serializer.serialize_str(&view_key_string) } #[cfg(not(feature = "mesalock_sgx"))] fn deserialize_view_key<'de, D>(deserializer: D) -> std::result::Result<PublicKey, D::Error> where D: Deserializer<'de>, { struct StrVisitor; impl<'de> de::Visitor<'de> for StrVisitor { type Value = PublicKey; fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { formatter.write_str("view key in hexadecimal string") } #[inline] fn visit_str<E>(self, value: &str) -> Result<Self::Value, E> where E: de::Error, { PublicKey::from_str(value).map_err(|err| de::Error::custom(err.to_string())) } } deserializer.deserialize_str(StrVisitor) } impl Encode for TxAccessPolicy { fn encode_to<W: Output>(&self, dest: &mut W) { self.view_key.serialize().encode_to(dest); self.access.encode_to(dest); } fn size_hint(&self) -> usize { 33 + self.access.size_hint() } } impl Decode for TxAccessPolicy { fn decode<I: Input>(input: &mut I) -> Result<Self, Error> { let view_key_bytes = H264::decode(input)?; let view_key = PublicKey::from_slice(&view_key_bytes) .map_err(|_| Error::from("Unable to parse public key"))?; let access = TxAccess::decode(input)?; Ok(TxAccessPolicy::new(view_key, access)) } } impl TxAccessPolicy { /// creates tx access policy pub fn new(view_key: PublicKey, access: TxAccess) -> Self { TxAccessPolicy { view_key, access } } }
30.5
163
0.652862
e26604b6aa9d1a238bfa80d418c5efb061c3d69e
2,650
//! A CLI tool for converting between table schema formats. #![forbid(unsafe_code)] #![warn( missing_docs, unused_extern_crates, clippy::all, clippy::cargo, clippy::cast_lossless, clippy::cast_possible_truncation, clippy::cast_possible_wrap, clippy::cast_precision_loss, clippy::cast_sign_loss, clippy::inefficient_to_string )] // We handle this using `cargo deny` instead. #![allow(clippy::multiple_crate_versions)] // Needed to prevent linker errors about OpenSSL. #[allow(unused_extern_crates)] extern crate openssl; // Pull in all of `tokio`'s experimental `async` and `await` support. #[macro_use] #[allow(unused_imports)] extern crate tokio; use common_failures::{quick_main, Result}; use dbcrossbarlib::{config::Configuration, run_futures_with_runtime, Context}; use slog::{debug, Drain}; use slog_async::{self, OverflowStrategy}; use structopt::{self, StructOpt}; mod cmd; mod logging; quick_main!(run); fn run() -> Result<()> { // Set up standard Rust logging for third-party crates. env_logger::init(); // Find our system SSL configuration, even if we're statically linked. openssl_probe::init_ssl_cert_env_vars(); // Parse our command-line arguments. let opt = cmd::Opt::from_args(); // Set up `slog`-based structured logging for our async code, because we // need to be able to untangle very complicated logs from many parallel // async tasks. let base_drain = opt.log_format.create_drain(); let filtered = slog_envlogger::new(base_drain); let drain = slog_async::Async::new(filtered) .chan_size(64) // This may slow down application performance, even when `RUST_LOG` is // not set. But we've been seeing a lot of dropped messages lately, so // let's try it. .overflow_strategy(OverflowStrategy::Block) .build() .fuse(); let log = logging::global_logger_with_extra_values(drain, &opt.log_extra)?; // Set up an execution context for our background workers, if any. The `ctx` // must be passed to all our background operations. The `worker_fut` will // return either success when all background workers have finished, or an // error as soon as one fails. let (ctx, worker_fut) = Context::create(log); // Log our command-line options. debug!(ctx.log(), "{:?}", opt); // Load our configuration. let config = Configuration::try_default()?; debug!(ctx.log(), "{:?}", config); // Create a future to run our command. let cmd_fut = cmd::run(ctx, config, opt); // Run our futures. run_futures_with_runtime(cmd_fut, worker_fut) }
31.927711
80
0.689811
fb0a9d8a2c699a4aa1b35b229138f9bba03d9cbf
179
mod callgraph; mod checker; mod collector; mod dataflow; mod genkill; mod lock; mod tracker; mod report; mod def_use; pub use self::checker::DoubleLockChecker; use super::config;
14.916667
41
0.776536
fbdcdc81ae9bb9b8cbb2487068e15fa5f4a344c0
21,888
// Copyright 2018-2022 the Deno authors. All rights reserved. MIT license. use deno_core::anyhow::Context; use deno_core::error::{uri_error, AnyError}; pub use deno_core::normalize_path; use deno_core::ModuleSpecifier; use deno_runtime::deno_crypto::rand; use std::env::current_dir; use std::fs::OpenOptions; use std::io::{Error, Write}; use std::path::{Path, PathBuf}; use walkdir::WalkDir; pub fn atomic_write_file<T: AsRef<[u8]>>( filename: &Path, data: T, mode: u32, ) -> std::io::Result<()> { let rand: String = (0..4) .map(|_| format!("{:02x}", rand::random::<u8>())) .collect(); let extension = format!("{}.tmp", rand); let tmp_file = filename.with_extension(extension); write_file(&tmp_file, data, mode)?; std::fs::rename(tmp_file, filename)?; Ok(()) } pub fn write_file<T: AsRef<[u8]>>( filename: &Path, data: T, mode: u32, ) -> std::io::Result<()> { write_file_2(filename, data, true, mode, true, false) } pub fn write_file_2<T: AsRef<[u8]>>( filename: &Path, data: T, update_mode: bool, mode: u32, is_create: bool, is_append: bool, ) -> std::io::Result<()> { let mut file = OpenOptions::new() .read(false) .write(true) .append(is_append) .truncate(!is_append) .create(is_create) .open(filename)?; if update_mode { #[cfg(unix)] { use std::os::unix::fs::PermissionsExt; let mode = mode & 0o777; let permissions = PermissionsExt::from_mode(mode); file.set_permissions(permissions)?; } #[cfg(not(unix))] let _ = mode; } file.write_all(data.as_ref()) } /// Similar to `std::fs::canonicalize()` but strips UNC prefixes on Windows. pub fn canonicalize_path(path: &Path) -> Result<PathBuf, Error> { let path = path.canonicalize()?; #[cfg(windows)] return Ok(strip_unc_prefix(path)); #[cfg(not(windows))] return Ok(path); } #[cfg(windows)] fn strip_unc_prefix(path: PathBuf) -> PathBuf { use std::path::Component; use std::path::Prefix; let mut components = path.components(); match components.next() { Some(Component::Prefix(prefix)) => { match prefix.kind() { // \\?\device Prefix::Verbatim(device) => { let mut path = PathBuf::new(); path.push(format!(r"\\{}\", device.to_string_lossy())); path.extend(components.filter(|c| !matches!(c, Component::RootDir))); path } // \\?\c:\path Prefix::VerbatimDisk(_) => { let mut path = PathBuf::new(); path.push(prefix.as_os_str().to_string_lossy().replace(r"\\?\", "")); path.extend(components); path } // \\?\UNC\hostname\share_name\path Prefix::VerbatimUNC(hostname, share_name) => { let mut path = PathBuf::new(); path.push(format!( r"\\{}\{}\", hostname.to_string_lossy(), share_name.to_string_lossy() )); path.extend(components.filter(|c| !matches!(c, Component::RootDir))); path } _ => path, } } _ => path, } } pub fn resolve_from_cwd(path: &Path) -> Result<PathBuf, AnyError> { let resolved_path = if path.is_absolute() { path.to_owned() } else { let cwd = current_dir().context("Failed to get current working directory")?; cwd.join(path) }; Ok(normalize_path(&resolved_path)) } /// Checks if the path has extension Deno supports. pub fn is_supported_ext(path: &Path) -> bool { if let Some(ext) = get_extension(path) { matches!( ext.as_str(), "ts" | "tsx" | "js" | "jsx" | "mjs" | "mts" | "cjs" | "cts" ) } else { false } } /// Checks if the path has a basename and extension Deno supports for tests. pub fn is_supported_test_path(path: &Path) -> bool { if let Some(name) = path.file_stem() { let basename = name.to_string_lossy(); (basename.ends_with("_test") || basename.ends_with(".test") || basename == "test") && is_supported_ext(path) } else { false } } /// Checks if the path has an extension Deno supports for tests. pub fn is_supported_test_ext(path: &Path) -> bool { if let Some(ext) = get_extension(path) { matches!( ext.as_str(), "ts" | "tsx" | "js" | "jsx" | "mjs" | "mts" | "cjs" | "cts" | "md" | "mkd" | "mkdn" | "mdwn" | "mdown" | "markdown" ) } else { false } } /// Get the extension of a file in lowercase. pub fn get_extension(file_path: &Path) -> Option<String> { return file_path .extension() .and_then(|e| e.to_str()) .map(|e| e.to_lowercase()); } /// Collects file paths that satisfy the given predicate, by recursively walking `files`. /// If the walker visits a path that is listed in `ignore`, it skips descending into the directory. pub fn collect_files<P>( files: &[PathBuf], ignore: &[PathBuf], predicate: P, ) -> Result<Vec<PathBuf>, AnyError> where P: Fn(&Path) -> bool, { let mut target_files = Vec::new(); // retain only the paths which exist and ignore the rest let canonicalized_ignore: Vec<PathBuf> = ignore .iter() .filter_map(|i| canonicalize_path(i).ok()) .collect(); for file in files { for entry in WalkDir::new(file) .into_iter() .filter_entry(|e| { canonicalize_path(e.path()).map_or(false, |c| { !canonicalized_ignore.iter().any(|i| c.starts_with(i)) }) }) .filter_map(|e| match e { Ok(e) if !e.file_type().is_dir() && predicate(e.path()) => Some(e), _ => None, }) { target_files.push(canonicalize_path(entry.path())?) } } Ok(target_files) } /// Collects module specifiers that satisfy the given predicate as a file path, by recursively walking `include`. /// Specifiers that start with http and https are left intact. pub fn collect_specifiers<P>( include: Vec<String>, ignore: &[PathBuf], predicate: P, ) -> Result<Vec<ModuleSpecifier>, AnyError> where P: Fn(&Path) -> bool, { let mut prepared = vec![]; let root_path = std::env::current_dir()?; for path in include { let lowercase_path = path.to_lowercase(); if lowercase_path.starts_with("http://") || lowercase_path.starts_with("https://") { let url = ModuleSpecifier::parse(&path)?; prepared.push(url); continue; } let p = normalize_path(&root_path.join(path)); if p.is_dir() { let test_files = collect_files(&[p], ignore, &predicate).unwrap(); let mut test_files_as_urls = test_files .iter() .map(|f| ModuleSpecifier::from_file_path(f).unwrap()) .collect::<Vec<ModuleSpecifier>>(); test_files_as_urls.sort(); prepared.extend(test_files_as_urls); } else { let url = ModuleSpecifier::from_file_path(p).unwrap(); prepared.push(url); } } Ok(prepared) } /// Asynchronously removes a directory and all its descendants, but does not error /// when the directory does not exist. pub async fn remove_dir_all_if_exists(path: &Path) -> std::io::Result<()> { let result = tokio::fs::remove_dir_all(path).await; match result { Err(err) if err.kind() == std::io::ErrorKind::NotFound => Ok(()), _ => result, } } /// Attempts to convert a specifier to a file path. By default, uses the Url /// crate's `to_file_path()` method, but falls back to try and resolve unix-style /// paths on Windows. pub fn specifier_to_file_path( specifier: &ModuleSpecifier, ) -> Result<PathBuf, AnyError> { let result = if cfg!(windows) { match specifier.to_file_path() { Ok(path) => Ok(path), Err(()) => { // This might be a unix-style path which is used in the tests even on Windows. // Attempt to see if we can convert it to a `PathBuf`. This code should be removed // once/if https://github.com/servo/rust-url/issues/730 is implemented. if specifier.scheme() == "file" && specifier.host().is_none() && specifier.port().is_none() && specifier.path_segments().is_some() { let path_str = specifier.path(); match String::from_utf8( percent_encoding::percent_decode(path_str.as_bytes()).collect(), ) { Ok(path_str) => Ok(PathBuf::from(path_str)), Err(_) => Err(()), } } else { Err(()) } } } } else { specifier.to_file_path() }; match result { Ok(path) => Ok(path), Err(()) => Err(uri_error(format!( "Invalid file path.\n Specifier: {}", specifier ))), } } /// Ensures a specifier that will definitely be a directory has a trailing slash. pub fn ensure_directory_specifier( mut specifier: ModuleSpecifier, ) -> ModuleSpecifier { let path = specifier.path(); if !path.ends_with('/') { let new_path = format!("{}/", path); specifier.set_path(&new_path); } specifier } /// Gets the parent of this module specifier. pub fn specifier_parent(specifier: &ModuleSpecifier) -> ModuleSpecifier { let mut specifier = specifier.clone(); // don't use specifier.segments() because it will strip the leading slash let mut segments = specifier.path().split('/').collect::<Vec<_>>(); if segments.iter().all(|s| s.is_empty()) { return specifier; } if let Some(last) = segments.last() { if last.is_empty() { segments.pop(); } segments.pop(); let new_path = format!("{}/", segments.join("/")); specifier.set_path(&new_path); } specifier } /// This function checks if input path has trailing slash or not. If input path /// has trailing slash it will return true else it will return false. pub fn path_has_trailing_slash(path: &Path) -> bool { if let Some(path_str) = path.to_str() { if cfg!(windows) { path_str.ends_with('\\') } else { path_str.ends_with('/') } } else { false } } #[cfg(test)] mod tests { use super::*; use tempfile::TempDir; #[test] fn resolve_from_cwd_child() { let cwd = current_dir().unwrap(); assert_eq!(resolve_from_cwd(Path::new("a")).unwrap(), cwd.join("a")); } #[test] fn resolve_from_cwd_dot() { let cwd = current_dir().unwrap(); assert_eq!(resolve_from_cwd(Path::new(".")).unwrap(), cwd); } #[test] fn resolve_from_cwd_parent() { let cwd = current_dir().unwrap(); assert_eq!(resolve_from_cwd(Path::new("a/..")).unwrap(), cwd); } #[test] fn test_normalize_path() { assert_eq!(normalize_path(Path::new("a/../b")), PathBuf::from("b")); assert_eq!(normalize_path(Path::new("a/./b/")), PathBuf::from("a/b/")); assert_eq!( normalize_path(Path::new("a/./b/../c")), PathBuf::from("a/c") ); if cfg!(windows) { assert_eq!( normalize_path(Path::new("C:\\a\\.\\b\\..\\c")), PathBuf::from("C:\\a\\c") ); } } // TODO: Get a good expected value here for Windows. #[cfg(not(windows))] #[test] fn resolve_from_cwd_absolute() { let expected = Path::new("/a"); assert_eq!(resolve_from_cwd(expected).unwrap(), expected); } #[test] fn test_is_supported_ext() { assert!(!is_supported_ext(Path::new("tests/subdir/redirects"))); assert!(!is_supported_ext(Path::new("README.md"))); assert!(is_supported_ext(Path::new("lib/typescript.d.ts"))); assert!(is_supported_ext(Path::new("testdata/001_hello.js"))); assert!(is_supported_ext(Path::new("testdata/002_hello.ts"))); assert!(is_supported_ext(Path::new("foo.jsx"))); assert!(is_supported_ext(Path::new("foo.tsx"))); assert!(is_supported_ext(Path::new("foo.TS"))); assert!(is_supported_ext(Path::new("foo.TSX"))); assert!(is_supported_ext(Path::new("foo.JS"))); assert!(is_supported_ext(Path::new("foo.JSX"))); assert!(is_supported_ext(Path::new("foo.mjs"))); assert!(is_supported_ext(Path::new("foo.mts"))); assert!(is_supported_ext(Path::new("foo.cjs"))); assert!(is_supported_ext(Path::new("foo.cts"))); assert!(!is_supported_ext(Path::new("foo.mjsx"))); } #[test] fn test_is_supported_test_ext() { assert!(!is_supported_test_ext(Path::new("tests/subdir/redirects"))); assert!(is_supported_test_ext(Path::new("README.md"))); assert!(is_supported_test_ext(Path::new("readme.MD"))); assert!(is_supported_test_ext(Path::new("lib/typescript.d.ts"))); assert!(is_supported_test_ext(Path::new("testdata/001_hello.js"))); assert!(is_supported_test_ext(Path::new("testdata/002_hello.ts"))); assert!(is_supported_test_ext(Path::new("foo.jsx"))); assert!(is_supported_test_ext(Path::new("foo.tsx"))); assert!(is_supported_test_ext(Path::new("foo.TS"))); assert!(is_supported_test_ext(Path::new("foo.TSX"))); assert!(is_supported_test_ext(Path::new("foo.JS"))); assert!(is_supported_test_ext(Path::new("foo.JSX"))); assert!(is_supported_test_ext(Path::new("foo.mjs"))); assert!(is_supported_test_ext(Path::new("foo.mts"))); assert!(is_supported_test_ext(Path::new("foo.cjs"))); assert!(is_supported_test_ext(Path::new("foo.cts"))); assert!(!is_supported_test_ext(Path::new("foo.mjsx"))); assert!(!is_supported_test_ext(Path::new("foo.jsonc"))); assert!(!is_supported_test_ext(Path::new("foo.JSONC"))); assert!(!is_supported_test_ext(Path::new("foo.json"))); assert!(!is_supported_test_ext(Path::new("foo.JsON"))); } #[test] fn test_is_supported_test_path() { assert!(is_supported_test_path(Path::new( "tests/subdir/foo_test.ts" ))); assert!(is_supported_test_path(Path::new( "tests/subdir/foo_test.tsx" ))); assert!(is_supported_test_path(Path::new( "tests/subdir/foo_test.js" ))); assert!(is_supported_test_path(Path::new( "tests/subdir/foo_test.jsx" ))); assert!(is_supported_test_path(Path::new("bar/foo.test.ts"))); assert!(is_supported_test_path(Path::new("bar/foo.test.tsx"))); assert!(is_supported_test_path(Path::new("bar/foo.test.js"))); assert!(is_supported_test_path(Path::new("bar/foo.test.jsx"))); assert!(is_supported_test_path(Path::new("foo/bar/test.js"))); assert!(is_supported_test_path(Path::new("foo/bar/test.jsx"))); assert!(is_supported_test_path(Path::new("foo/bar/test.ts"))); assert!(is_supported_test_path(Path::new("foo/bar/test.tsx"))); assert!(!is_supported_test_path(Path::new("README.md"))); assert!(!is_supported_test_path(Path::new("lib/typescript.d.ts"))); assert!(!is_supported_test_path(Path::new("notatest.js"))); assert!(!is_supported_test_path(Path::new("NotAtest.ts"))); } #[test] fn test_collect_files() { fn create_files(dir_path: &Path, files: &[&str]) { std::fs::create_dir(dir_path).expect("Failed to create directory"); for f in files { let path = dir_path.join(f); std::fs::write(path, "").expect("Failed to create file"); } } // dir.ts // ├── a.ts // ├── b.js // ├── child // │ ├── e.mjs // │ ├── f.mjsx // │ ├── .foo.TS // │ └── README.md // ├── c.tsx // ├── d.jsx // └── ignore // ├── g.d.ts // └── .gitignore let t = TempDir::new().expect("tempdir fail"); let root_dir_path = t.path().join("dir.ts"); let root_dir_files = ["a.ts", "b.js", "c.tsx", "d.jsx"]; create_files(&root_dir_path, &root_dir_files); let child_dir_path = root_dir_path.join("child"); let child_dir_files = ["e.mjs", "f.mjsx", ".foo.TS", "README.md"]; create_files(&child_dir_path, &child_dir_files); let ignore_dir_path = root_dir_path.join("ignore"); let ignore_dir_files = ["g.d.ts", ".gitignore"]; create_files(&ignore_dir_path, &ignore_dir_files); let result = collect_files(&[root_dir_path], &[ignore_dir_path], |path| { // exclude dotfiles path .file_name() .and_then(|f| f.to_str()) .map_or(false, |f| !f.starts_with('.')) }) .unwrap(); let expected = [ "a.ts", "b.js", "e.mjs", "f.mjsx", "README.md", "c.tsx", "d.jsx", ]; for e in expected.iter() { assert!(result.iter().any(|r| r.ends_with(e))); } assert_eq!(result.len(), expected.len()); } #[test] fn test_collect_specifiers() { fn create_files(dir_path: &Path, files: &[&str]) { std::fs::create_dir(dir_path).expect("Failed to create directory"); for f in files { let path = dir_path.join(f); std::fs::write(path, "").expect("Failed to create file"); } } // dir.ts // ├── a.ts // ├── b.js // ├── child // │ ├── e.mjs // │ ├── f.mjsx // │ ├── .foo.TS // │ └── README.md // ├── c.tsx // ├── d.jsx // └── ignore // ├── g.d.ts // └── .gitignore let t = TempDir::new().expect("tempdir fail"); let root_dir_path = t.path().join("dir.ts"); let root_dir_files = ["a.ts", "b.js", "c.tsx", "d.jsx"]; create_files(&root_dir_path, &root_dir_files); let child_dir_path = root_dir_path.join("child"); let child_dir_files = ["e.mjs", "f.mjsx", ".foo.TS", "README.md"]; create_files(&child_dir_path, &child_dir_files); let ignore_dir_path = root_dir_path.join("ignore"); let ignore_dir_files = ["g.d.ts", ".gitignore"]; create_files(&ignore_dir_path, &ignore_dir_files); let result = collect_specifiers( vec![ "http://localhost:8080".to_string(), root_dir_path.to_str().unwrap().to_string(), "https://localhost:8080".to_string(), ], &[ignore_dir_path], |path| { // exclude dotfiles path .file_name() .and_then(|f| f.to_str()) .map_or(false, |f| !f.starts_with('.')) }, ) .unwrap(); let root_dir_url = ModuleSpecifier::from_file_path( canonicalize_path(&root_dir_path).unwrap(), ) .unwrap() .to_string(); let expected: Vec<ModuleSpecifier> = [ "http://localhost:8080", &format!("{}/a.ts", root_dir_url), &format!("{}/b.js", root_dir_url), &format!("{}/c.tsx", root_dir_url), &format!("{}/child/README.md", root_dir_url), &format!("{}/child/e.mjs", root_dir_url), &format!("{}/child/f.mjsx", root_dir_url), &format!("{}/d.jsx", root_dir_url), "https://localhost:8080", ] .iter() .map(|f| ModuleSpecifier::parse(f).unwrap()) .collect::<Vec<ModuleSpecifier>>(); assert_eq!(result, expected); } #[cfg(windows)] #[test] fn test_strip_unc_prefix() { run_test(r"C:\", r"C:\"); run_test(r"C:\test\file.txt", r"C:\test\file.txt"); run_test(r"\\?\C:\", r"C:\"); run_test(r"\\?\C:\test\file.txt", r"C:\test\file.txt"); run_test(r"\\.\C:\", r"\\.\C:\"); run_test(r"\\.\C:\Test\file.txt", r"\\.\C:\Test\file.txt"); run_test(r"\\?\UNC\localhost\", r"\\localhost"); run_test(r"\\?\UNC\localhost\c$\", r"\\localhost\c$"); run_test( r"\\?\UNC\localhost\c$\Windows\file.txt", r"\\localhost\c$\Windows\file.txt", ); run_test(r"\\?\UNC\wsl$\deno.json", r"\\wsl$\deno.json"); run_test(r"\\?\server1", r"\\server1"); run_test(r"\\?\server1\e$\", r"\\server1\e$\"); run_test( r"\\?\server1\e$\test\file.txt", r"\\server1\e$\test\file.txt", ); fn run_test(input: &str, expected: &str) { assert_eq!( strip_unc_prefix(PathBuf::from(input)), PathBuf::from(expected) ); } } #[test] fn test_specifier_to_file_path() { run_success_test("file:///", "/"); run_success_test("file:///test", "/test"); run_success_test("file:///dir/test/test.txt", "/dir/test/test.txt"); run_success_test( "file:///dir/test%20test/test.txt", "/dir/test test/test.txt", ); fn run_success_test(specifier: &str, expected_path: &str) { let result = specifier_to_file_path(&ModuleSpecifier::parse(specifier).unwrap()) .unwrap(); assert_eq!(result, PathBuf::from(expected_path)); } } #[test] fn test_ensure_directory_specifier() { run_test("file:///", "file:///"); run_test("file:///test", "file:///test/"); run_test("file:///test/", "file:///test/"); run_test("file:///test/other", "file:///test/other/"); run_test("file:///test/other/", "file:///test/other/"); fn run_test(specifier: &str, expected: &str) { let result = ensure_directory_specifier(ModuleSpecifier::parse(specifier).unwrap()); assert_eq!(result.to_string(), expected); } } #[test] fn test_specifier_parent() { run_test("file:///", "file:///"); run_test("file:///test", "file:///"); run_test("file:///test/", "file:///"); run_test("file:///test/other", "file:///test/"); run_test("file:///test/other.txt", "file:///test/"); run_test("file:///test/other/", "file:///test/"); fn run_test(specifier: &str, expected: &str) { let result = specifier_parent(&ModuleSpecifier::parse(specifier).unwrap()); assert_eq!(result.to_string(), expected); } } #[test] fn test_path_has_trailing_slash() { #[cfg(not(windows))] { run_test("/Users/johndoe/Desktop/deno-project/target/", true); run_test(r"/Users/johndoe/deno-project/target//", true); run_test("/Users/johndoe/Desktop/deno-project", false); run_test(r"/Users/johndoe/deno-project\", false); } #[cfg(windows)] { run_test(r"C:\test\deno-project\", true); run_test(r"C:\test\deno-project\\", true); run_test(r"C:\test\file.txt", false); run_test(r"C:\test\file.txt/", false); } fn run_test(path_str: &str, expected: bool) { let path = Path::new(path_str); let result = path_has_trailing_slash(path); assert_eq!(result, expected); } } }
29.820163
113
0.599689
08fe77cfddc95eb583f101edca204688ddcbbaa8
313
// strings1.rs // Make me compile without changing the function signature! // Execute `rustlings hint strings1` for hints ;) fn main() { let answer = current_favorite_color(); println!("My current favorite color is {}", answer); } fn current_favorite_color() -> String { return "blue".to_string() }
24.076923
59
0.693291
214fae02ce27210e3010f41ec6cdda38c2c65fea
26,870
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! Thread-local reference-counted boxes (the `Rc<T>` type). //! //! The `Rc<T>` type provides shared ownership of an immutable value. Destruction is deterministic, //! and will occur as soon as the last owner is gone. It is marked as non-sendable because it //! avoids the overhead of atomic reference counting. //! //! The `downgrade` method can be used to create a non-owning `Weak<T>` pointer to the box. A //! `Weak<T>` pointer can be upgraded to an `Rc<T>` pointer, but will return `None` if the value //! has already been dropped. //! //! For example, a tree with parent pointers can be represented by putting the nodes behind strong //! `Rc<T>` pointers, and then storing the parent pointers as `Weak<T>` pointers. //! //! # Examples //! //! Consider a scenario where a set of `Gadget`s are owned by a given `Owner`. We want to have our //! `Gadget`s point to their `Owner`. We can't do this with unique ownership, because more than one //! gadget may belong to the same `Owner`. `Rc<T>` allows us to share an `Owner` between multiple //! `Gadget`s, and have the `Owner` remain allocated as long as any `Gadget` points at it. //! //! ```rust //! use std::rc::Rc; //! //! struct Owner { //! name: String //! // ...other fields //! } //! //! struct Gadget { //! id: int, //! owner: Rc<Owner> //! // ...other fields //! } //! //! fn main() { //! // Create a reference counted Owner. //! let gadget_owner : Rc<Owner> = Rc::new( //! Owner { name: String::from_str("Gadget Man") } //! ); //! //! // Create Gadgets belonging to gadget_owner. To increment the reference //! // count we clone the `Rc<T>` object. //! let gadget1 = Gadget { id: 1, owner: gadget_owner.clone() }; //! let gadget2 = Gadget { id: 2, owner: gadget_owner.clone() }; //! //! drop(gadget_owner); //! //! // Despite dropping gadget_owner, we're still able to print out the name of //! // the Owner of the Gadgets. This is because we've only dropped the //! // reference count object, not the Owner it wraps. As long as there are //! // other `Rc<T>` objects pointing at the same Owner, it will remain allocated. Notice //! // that the `Rc<T>` wrapper around Gadget.owner gets automatically dereferenced //! // for us. //! println!("Gadget {} owned by {}", gadget1.id, gadget1.owner.name); //! println!("Gadget {} owned by {}", gadget2.id, gadget2.owner.name); //! //! // At the end of the method, gadget1 and gadget2 get destroyed, and with //! // them the last counted references to our Owner. Gadget Man now gets //! // destroyed as well. //! } //! ``` //! //! If our requirements change, and we also need to be able to traverse from Owner → Gadget, we //! will run into problems: an `Rc<T>` pointer from Owner → Gadget introduces a cycle between the //! objects. This means that their reference counts can never reach 0, and the objects will remain //! allocated: a memory leak. In order to get around this, we can use `Weak<T>` pointers. These //! pointers don't contribute to the total count. //! //! Rust actually makes it somewhat difficult to produce this loop in the first place: in order to //! end up with two objects that point at each other, one of them needs to be mutable. This is //! problematic because `Rc<T>` enforces memory safety by only giving out shared references to the //! object it wraps, and these don't allow direct mutation. We need to wrap the part of the object //! we wish to mutate in a `RefCell`, which provides *interior mutability*: a method to achieve //! mutability through a shared reference. `RefCell` enforces Rust's borrowing rules at runtime. //! Read the `Cell` documentation for more details on interior mutability. //! //! ```rust //! use std::rc::Rc; //! use std::rc::Weak; //! use std::cell::RefCell; //! //! struct Owner { //! name: String, //! gadgets: RefCell<Vec<Weak<Gadget>>> //! // ...other fields //! } //! //! struct Gadget { //! id: int, //! owner: Rc<Owner> //! // ...other fields //! } //! //! fn main() { //! // Create a reference counted Owner. Note the fact that we've put the //! // Owner's vector of Gadgets inside a RefCell so that we can mutate it //! // through a shared reference. //! let gadget_owner : Rc<Owner> = Rc::new( //! Owner { //! name: "Gadget Man".to_string(), //! gadgets: RefCell::new(Vec::new()) //! } //! ); //! //! // Create Gadgets belonging to gadget_owner as before. //! let gadget1 = Rc::new(Gadget{id: 1, owner: gadget_owner.clone()}); //! let gadget2 = Rc::new(Gadget{id: 2, owner: gadget_owner.clone()}); //! //! // Add the Gadgets to their Owner. To do this we mutably borrow from //! // the RefCell holding the Owner's Gadgets. //! gadget_owner.gadgets.borrow_mut().push(gadget1.clone().downgrade()); //! gadget_owner.gadgets.borrow_mut().push(gadget2.clone().downgrade()); //! //! // Iterate over our Gadgets, printing their details out //! for gadget_opt in gadget_owner.gadgets.borrow().iter() { //! //! // gadget_opt is a Weak<Gadget>. Since weak pointers can't guarantee //! // that their object is still allocated, we need to call upgrade() on them //! // to turn them into a strong reference. This returns an Option, which //! // contains a reference to our object if it still exists. //! let gadget = gadget_opt.upgrade().unwrap(); //! println!("Gadget {} owned by {}", gadget.id, gadget.owner.name); //! } //! //! // At the end of the method, gadget_owner, gadget1 and gadget2 get //! // destroyed. There are now no strong (`Rc<T>`) references to the gadgets. //! // Once they get destroyed, the Gadgets get destroyed. This zeroes the //! // reference count on Gadget Man, so he gets destroyed as well. //! } //! ``` #![stable] use core::borrow::BorrowFrom; use core::cell::Cell; use core::clone::Clone; use core::cmp::{PartialEq, PartialOrd, Eq, Ord, Ordering}; use core::default::Default; use core::fmt; use core::hash::{mod, Hash}; use core::kinds::marker; use core::mem::{transmute, min_align_of, size_of, forget}; use core::nonzero::NonZero; use core::ops::{Deref, Drop}; use core::option::Option; use core::option::Option::{Some, None}; use core::ptr::{mod, PtrExt}; use core::result::Result; use core::result::Result::{Ok, Err}; use heap::deallocate; struct RcBox<T> { value: T, strong: Cell<uint>, weak: Cell<uint> } /// An immutable reference-counted pointer type. /// /// See the [module level documentation](../index.html) for more details. #[unsafe_no_drop_flag] #[stable] pub struct Rc<T> { // FIXME #12808: strange names to try to avoid interfering with field accesses of the contained // type via Deref _ptr: NonZero<*mut RcBox<T>>, _nosend: marker::NoSend, _noshare: marker::NoSync } impl<T> Rc<T> { /// Constructs a new `Rc<T>`. /// /// # Examples /// /// ``` /// use std::rc::Rc; /// /// let five = Rc::new(5i); /// ``` #[stable] pub fn new(value: T) -> Rc<T> { unsafe { Rc { // there is an implicit weak pointer owned by all the strong pointers, which // ensures that the weak destructor never frees the allocation while the strong // destructor is running, even if the weak pointer is stored inside the strong one. _ptr: NonZero::new(transmute(box RcBox { value: value, strong: Cell::new(1), weak: Cell::new(1) })), _nosend: marker::NoSend, _noshare: marker::NoSync } } } /// Downgrades the `Rc<T>` to a `Weak<T>` reference. /// /// # Examples /// /// ``` /// use std::rc::Rc; /// /// let five = Rc::new(5i); /// /// let weak_five = five.downgrade(); /// ``` #[experimental = "Weak pointers may not belong in this module"] pub fn downgrade(&self) -> Weak<T> { self.inc_weak(); Weak { _ptr: self._ptr, _nosend: marker::NoSend, _noshare: marker::NoSync } } } /// Get the number of weak references to this value. #[inline] #[experimental] pub fn weak_count<T>(this: &Rc<T>) -> uint { this.weak() - 1 } /// Get the number of strong references to this value. #[inline] #[experimental] pub fn strong_count<T>(this: &Rc<T>) -> uint { this.strong() } /// Returns true if there are no other `Rc` or `Weak<T>` values that share the same inner value. /// /// # Examples /// /// ``` /// use std::rc; /// use std::rc::Rc; /// /// let five = Rc::new(5i); /// /// rc::is_unique(&five); /// ``` #[inline] #[experimental] pub fn is_unique<T>(rc: &Rc<T>) -> bool { weak_count(rc) == 0 && strong_count(rc) == 1 } /// Unwraps the contained value if the `Rc<T>` is unique. /// /// If the `Rc<T>` is not unique, an `Err` is returned with the same `Rc<T>`. /// /// # Example /// /// ``` /// use std::rc::{mod, Rc}; /// /// let x = Rc::new(3u); /// assert_eq!(rc::try_unwrap(x), Ok(3u)); /// /// let x = Rc::new(4u); /// let _y = x.clone(); /// assert_eq!(rc::try_unwrap(x), Err(Rc::new(4u))); /// ``` #[inline] #[experimental] pub fn try_unwrap<T>(rc: Rc<T>) -> Result<T, Rc<T>> { if is_unique(&rc) { unsafe { let val = ptr::read(&*rc); // copy the contained object // destruct the box and skip our Drop // we can ignore the refcounts because we know we're unique deallocate(*rc._ptr as *mut u8, size_of::<RcBox<T>>(), min_align_of::<RcBox<T>>()); forget(rc); Ok(val) } } else { Err(rc) } } /// Returns a mutable reference to the contained value if the `Rc<T>` is unique. /// /// Returns `None` if the `Rc<T>` is not unique. /// /// # Example /// /// ``` /// use std::rc::{mod, Rc}; /// /// let mut x = Rc::new(3u); /// *rc::get_mut(&mut x).unwrap() = 4u; /// assert_eq!(*x, 4u); /// /// let _y = x.clone(); /// assert!(rc::get_mut(&mut x).is_none()); /// ``` #[inline] #[experimental] pub fn get_mut<'a, T>(rc: &'a mut Rc<T>) -> Option<&'a mut T> { if is_unique(rc) { let inner = unsafe { &mut **rc._ptr }; Some(&mut inner.value) } else { None } } impl<T: Clone> Rc<T> { /// Make a mutable reference from the given `Rc<T>`. /// /// This is also referred to as a copy-on-write operation because the inner data is cloned if /// the reference count is greater than one. /// /// # Examples /// /// ``` /// use std::rc::Rc; /// /// let mut five = Rc::new(5i); /// /// let mut_five = five.make_unique(); /// ``` #[inline] #[experimental] pub fn make_unique(&mut self) -> &mut T { if !is_unique(self) { *self = Rc::new((**self).clone()) } // This unsafety is ok because we're guaranteed that the pointer returned is the *only* // pointer that will ever be returned to T. Our reference count is guaranteed to be 1 at // this point, and we required the `Rc<T>` itself to be `mut`, so we're returning the only // possible reference to the inner value. let inner = unsafe { &mut **self._ptr }; &mut inner.value } } impl<T> BorrowFrom<Rc<T>> for T { fn borrow_from(owned: &Rc<T>) -> &T { &**owned } } #[experimental = "Deref is experimental."] impl<T> Deref<T> for Rc<T> { #[inline(always)] fn deref(&self) -> &T { &self.inner().value } } #[unsafe_destructor] #[experimental = "Drop is experimental."] impl<T> Drop for Rc<T> { /// Drops the `Rc<T>`. /// /// This will decrement the strong reference count. If the strong reference count becomes zero /// and the only other references are `Weak<T>` ones, `drop`s the inner value. /// /// # Examples /// /// ``` /// use std::rc::Rc; /// /// { /// let five = Rc::new(5i); /// /// // stuff /// /// drop(five); // explict drop /// } /// { /// let five = Rc::new(5i); /// /// // stuff /// /// } // implicit drop /// ``` fn drop(&mut self) { unsafe { let ptr = *self._ptr; if !ptr.is_null() { self.dec_strong(); if self.strong() == 0 { ptr::read(&**self); // destroy the contained object // remove the implicit "strong weak" pointer now that we've destroyed the // contents. self.dec_weak(); if self.weak() == 0 { deallocate(ptr as *mut u8, size_of::<RcBox<T>>(), min_align_of::<RcBox<T>>()) } } } } } } #[stable] impl<T> Clone for Rc<T> { /// Makes a clone of the `Rc<T>`. /// /// This increases the strong reference count. /// /// # Examples /// /// ``` /// use std::rc::Rc; /// /// let five = Rc::new(5i); /// /// five.clone(); /// ``` #[inline] fn clone(&self) -> Rc<T> { self.inc_strong(); Rc { _ptr: self._ptr, _nosend: marker::NoSend, _noshare: marker::NoSync } } } #[stable] impl<T: Default> Default for Rc<T> { /// Creates a new `Rc<T>`, with the `Default` value for `T`. /// /// # Examples /// /// ``` /// use std::rc::Rc; /// use std::default::Default; /// /// let x: Rc<int> = Default::default(); /// ``` #[inline] #[stable] fn default() -> Rc<T> { Rc::new(Default::default()) } } #[unstable = "PartialEq is unstable."] impl<T: PartialEq> PartialEq for Rc<T> { /// Equality for two `Rc<T>`s. /// /// Two `Rc<T>`s are equal if their inner value are equal. /// /// # Examples /// /// ``` /// use std::rc::Rc; /// /// let five = Rc::new(5i); /// /// five == Rc::new(5i); /// ``` #[inline(always)] fn eq(&self, other: &Rc<T>) -> bool { **self == **other } /// Inequality for two `Rc<T>`s. /// /// Two `Rc<T>`s are unequal if their inner value are unequal. /// /// # Examples /// /// ``` /// use std::rc::Rc; /// /// let five = Rc::new(5i); /// /// five != Rc::new(5i); /// ``` #[inline(always)] fn ne(&self, other: &Rc<T>) -> bool { **self != **other } } #[unstable = "Eq is unstable."] impl<T: Eq> Eq for Rc<T> {} #[unstable = "PartialOrd is unstable."] impl<T: PartialOrd> PartialOrd for Rc<T> { /// Partial comparison for two `Rc<T>`s. /// /// The two are compared by calling `partial_cmp()` on their inner values. /// /// # Examples /// /// ``` /// use std::rc::Rc; /// /// let five = Rc::new(5i); /// /// five.partial_cmp(&Rc::new(5i)); /// ``` #[inline(always)] fn partial_cmp(&self, other: &Rc<T>) -> Option<Ordering> { (**self).partial_cmp(&**other) } /// Less-than comparison for two `Rc<T>`s. /// /// The two are compared by calling `<` on their inner values. /// /// # Examples /// /// ``` /// use std::rc::Rc; /// /// let five = Rc::new(5i); /// /// five < Rc::new(5i); /// ``` #[inline(always)] fn lt(&self, other: &Rc<T>) -> bool { **self < **other } /// 'Less-than or equal to' comparison for two `Rc<T>`s. /// /// The two are compared by calling `<=` on their inner values. /// /// # Examples /// /// ``` /// use std::rc::Rc; /// /// let five = Rc::new(5i); /// /// five <= Rc::new(5i); /// ``` #[inline(always)] fn le(&self, other: &Rc<T>) -> bool { **self <= **other } /// Greater-than comparison for two `Rc<T>`s. /// /// The two are compared by calling `>` on their inner values. /// /// # Examples /// /// ``` /// use std::rc::Rc; /// /// let five = Rc::new(5i); /// /// five > Rc::new(5i); /// ``` #[inline(always)] fn gt(&self, other: &Rc<T>) -> bool { **self > **other } /// 'Greater-than or equal to' comparison for two `Rc<T>`s. /// /// The two are compared by calling `>=` on their inner values. /// /// # Examples /// /// ``` /// use std::rc::Rc; /// /// let five = Rc::new(5i); /// /// five >= Rc::new(5i); /// ``` #[inline(always)] fn ge(&self, other: &Rc<T>) -> bool { **self >= **other } } #[unstable = "Ord is unstable."] impl<T: Ord> Ord for Rc<T> { /// Comparison for two `Rc<T>`s. /// /// The two are compared by calling `cmp()` on their inner values. /// /// # Examples /// /// ``` /// use std::rc::Rc; /// /// let five = Rc::new(5i); /// /// five.partial_cmp(&Rc::new(5i)); /// ``` #[inline] fn cmp(&self, other: &Rc<T>) -> Ordering { (**self).cmp(&**other) } } // FIXME (#18248) Make `T` `Sized?` impl<S: hash::Writer, T: Hash<S>> Hash<S> for Rc<T> { #[inline] fn hash(&self, state: &mut S) { (**self).hash(state); } } #[experimental = "Show is experimental."] impl<T: fmt::Show> fmt::Show for Rc<T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { (**self).fmt(f) } } /// A weak version of `Rc<T>`. /// /// Weak references do not count when determining if the inner value should be dropped. /// /// See the [module level documentation](../index.html) for more. #[unsafe_no_drop_flag] #[experimental = "Weak pointers may not belong in this module."] pub struct Weak<T> { // FIXME #12808: strange names to try to avoid interfering with // field accesses of the contained type via Deref _ptr: NonZero<*mut RcBox<T>>, _nosend: marker::NoSend, _noshare: marker::NoSync } #[experimental = "Weak pointers may not belong in this module."] impl<T> Weak<T> { /// Upgrades a weak reference to a strong reference. /// /// Upgrades the `Weak<T>` reference to an `Rc<T>`, if possible. /// /// Returns `None` if there were no strong references and the data was destroyed. /// /// # Examples /// /// ``` /// use std::rc::Rc; /// /// let five = Rc::new(5i); /// /// let weak_five = five.downgrade(); /// /// let strong_five: Option<Rc<_>> = weak_five.upgrade(); /// ``` pub fn upgrade(&self) -> Option<Rc<T>> { if self.strong() == 0 { None } else { self.inc_strong(); Some(Rc { _ptr: self._ptr, _nosend: marker::NoSend, _noshare: marker::NoSync }) } } } #[unsafe_destructor] #[experimental = "Weak pointers may not belong in this module."] impl<T> Drop for Weak<T> { /// Drops the `Weak<T>`. /// /// This will decrement the weak reference count. /// /// # Examples /// /// ``` /// use std::rc::Rc; /// /// { /// let five = Rc::new(5i); /// let weak_five = five.downgrade(); /// /// // stuff /// /// drop(weak_five); // explict drop /// } /// { /// let five = Rc::new(5i); /// let weak_five = five.downgrade(); /// /// // stuff /// /// } // implicit drop /// ``` fn drop(&mut self) { unsafe { let ptr = *self._ptr; if !ptr.is_null() { self.dec_weak(); // the weak count starts at 1, and will only go to zero if all the strong pointers // have disappeared. if self.weak() == 0 { deallocate(ptr as *mut u8, size_of::<RcBox<T>>(), min_align_of::<RcBox<T>>()) } } } } } #[experimental = "Weak pointers may not belong in this module."] impl<T> Clone for Weak<T> { /// Makes a clone of the `Weak<T>`. /// /// This increases the weak reference count. /// /// # Examples /// /// ``` /// use std::rc::Rc; /// /// let weak_five = Rc::new(5i).downgrade(); /// /// weak_five.clone(); /// ``` #[inline] fn clone(&self) -> Weak<T> { self.inc_weak(); Weak { _ptr: self._ptr, _nosend: marker::NoSend, _noshare: marker::NoSync } } } #[doc(hidden)] trait RcBoxPtr<T> { fn inner(&self) -> &RcBox<T>; #[inline] fn strong(&self) -> uint { self.inner().strong.get() } #[inline] fn inc_strong(&self) { self.inner().strong.set(self.strong() + 1); } #[inline] fn dec_strong(&self) { self.inner().strong.set(self.strong() - 1); } #[inline] fn weak(&self) -> uint { self.inner().weak.get() } #[inline] fn inc_weak(&self) { self.inner().weak.set(self.weak() + 1); } #[inline] fn dec_weak(&self) { self.inner().weak.set(self.weak() - 1); } } impl<T> RcBoxPtr<T> for Rc<T> { #[inline(always)] fn inner(&self) -> &RcBox<T> { unsafe { &(**self._ptr) } } } impl<T> RcBoxPtr<T> for Weak<T> { #[inline(always)] fn inner(&self) -> &RcBox<T> { unsafe { &(**self._ptr) } } } #[cfg(test)] #[allow(experimental)] mod tests { use super::{Rc, Weak, weak_count, strong_count}; use std::cell::RefCell; use std::option::Option; use std::option::Option::{Some, None}; use std::result::Result::{Err, Ok}; use std::mem::drop; use std::clone::Clone; #[test] fn test_clone() { let x = Rc::new(RefCell::new(5i)); let y = x.clone(); *x.borrow_mut() = 20; assert_eq!(*y.borrow(), 20); } #[test] fn test_simple() { let x = Rc::new(5i); assert_eq!(*x, 5); } #[test] fn test_simple_clone() { let x = Rc::new(5i); let y = x.clone(); assert_eq!(*x, 5); assert_eq!(*y, 5); } #[test] fn test_destructor() { let x = Rc::new(box 5i); assert_eq!(**x, 5); } #[test] fn test_live() { let x = Rc::new(5i); let y = x.downgrade(); assert!(y.upgrade().is_some()); } #[test] fn test_dead() { let x = Rc::new(5i); let y = x.downgrade(); drop(x); assert!(y.upgrade().is_none()); } #[test] fn weak_self_cyclic() { struct Cycle { x: RefCell<Option<Weak<Cycle>>> } let a = Rc::new(Cycle { x: RefCell::new(None) }); let b = a.clone().downgrade(); *a.x.borrow_mut() = Some(b); // hopefully we don't double-free (or leak)... } #[test] fn is_unique() { let x = Rc::new(3u); assert!(super::is_unique(&x)); let y = x.clone(); assert!(!super::is_unique(&x)); drop(y); assert!(super::is_unique(&x)); let w = x.downgrade(); assert!(!super::is_unique(&x)); drop(w); assert!(super::is_unique(&x)); } #[test] fn test_strong_count() { let a = Rc::new(0u32); assert!(strong_count(&a) == 1); let w = a.downgrade(); assert!(strong_count(&a) == 1); let b = w.upgrade().expect("upgrade of live rc failed"); assert!(strong_count(&b) == 2); assert!(strong_count(&a) == 2); drop(w); drop(a); assert!(strong_count(&b) == 1); let c = b.clone(); assert!(strong_count(&b) == 2); assert!(strong_count(&c) == 2); } #[test] fn test_weak_count() { let a = Rc::new(0u32); assert!(strong_count(&a) == 1); assert!(weak_count(&a) == 0); let w = a.downgrade(); assert!(strong_count(&a) == 1); assert!(weak_count(&a) == 1); drop(w); assert!(strong_count(&a) == 1); assert!(weak_count(&a) == 0); let c = a.clone(); assert!(strong_count(&a) == 2); assert!(weak_count(&a) == 0); drop(c); } #[test] fn try_unwrap() { let x = Rc::new(3u); assert_eq!(super::try_unwrap(x), Ok(3u)); let x = Rc::new(4u); let _y = x.clone(); assert_eq!(super::try_unwrap(x), Err(Rc::new(4u))); let x = Rc::new(5u); let _w = x.downgrade(); assert_eq!(super::try_unwrap(x), Err(Rc::new(5u))); } #[test] fn get_mut() { let mut x = Rc::new(3u); *super::get_mut(&mut x).unwrap() = 4u; assert_eq!(*x, 4u); let y = x.clone(); assert!(super::get_mut(&mut x).is_none()); drop(y); assert!(super::get_mut(&mut x).is_some()); let _w = x.downgrade(); assert!(super::get_mut(&mut x).is_none()); } #[test] fn test_cowrc_clone_make_unique() { let mut cow0 = Rc::new(75u); let mut cow1 = cow0.clone(); let mut cow2 = cow1.clone(); assert!(75 == *cow0.make_unique()); assert!(75 == *cow1.make_unique()); assert!(75 == *cow2.make_unique()); *cow0.make_unique() += 1; *cow1.make_unique() += 2; *cow2.make_unique() += 3; assert!(76 == *cow0); assert!(77 == *cow1); assert!(78 == *cow2); // none should point to the same backing memory assert!(*cow0 != *cow1); assert!(*cow0 != *cow2); assert!(*cow1 != *cow2); } #[test] fn test_cowrc_clone_unique2() { let mut cow0 = Rc::new(75u); let cow1 = cow0.clone(); let cow2 = cow1.clone(); assert!(75 == *cow0); assert!(75 == *cow1); assert!(75 == *cow2); *cow0.make_unique() += 1; assert!(76 == *cow0); assert!(75 == *cow1); assert!(75 == *cow2); // cow1 and cow2 should share the same contents // cow0 should have a unique reference assert!(*cow0 != *cow1); assert!(*cow0 != *cow2); assert!(*cow1 == *cow2); } #[test] fn test_cowrc_clone_weak() { let mut cow0 = Rc::new(75u); let cow1_weak = cow0.downgrade(); assert!(75 == *cow0); assert!(75 == *cow1_weak.upgrade().unwrap()); *cow0.make_unique() += 1; assert!(76 == *cow0); assert!(cow1_weak.upgrade().is_none()); } }
28.077325
99
0.530815
147180c2f5fa62e1db1b05ac23b89f20faf5d11c
5,544
use ferrysched_shared::imports::*; use ferrysched_shared::types::*; static MIN_THRU_FARE_TRANSFER_DURATION: Lazy<Duration> = Lazy::new(|| Duration::minutes(30)); static MAX_THRU_FARE_TRANSFER_DURATION: Lazy<Duration> = Lazy::new(|| Duration::minutes(125)); #[derive(Eq, Ord, PartialEq, PartialOrd)] pub struct SailingWithNotes { pub sailing: Sailing, pub notes: Vec<String>, } fn schedule_sailings_for_date(schedule: &Schedule, date: Date) -> Vec<SailingWithNotes> { let mut sailings = Vec::new(); for item in &schedule.items { if let Some(weekday_dr) = item.weekdays.get(&date.weekday()) { if weekday_dr.includes_date(date) { let notes = item .notes .iter() .filter_map(|(a, dr)| dr.includes_date(date).then(|| a.as_ref())) .map(String::from) .collect(); sailings.push(SailingWithNotes { sailing: item.sailing.clone(), notes }); } } } sailings } fn schedules_sailings_for_date(schedules: &[Schedule], date: Date) -> Option<(&Schedule, Vec<SailingWithNotes>)> { schedules .iter() .filter(|sched| sched.date_range.includes_date_inclusive(date)) .map(|sched| (sched, schedule_sailings_for_date(sched, date))) .next() } fn get_potential_thrufare_sailings( to_swb_sailings: Vec<SailingWithNotes>, from_swb_sailings: Vec<SailingWithNotes>, ) -> Vec<SailingWithNotes> { let mut thrufare_sailings = Vec::new(); for from_swb in &from_swb_sailings { let swb_arrive_time_range = (from_swb.sailing.depart_time - *MAX_THRU_FARE_TRANSFER_DURATION) ..=(from_swb.sailing.depart_time - *MIN_THRU_FARE_TRANSFER_DURATION); for to_swb in to_swb_sailings.iter().filter(|to_swb| swb_arrive_time_range.contains(&to_swb.sailing.arrive_time)) { let mut stops = to_swb.sailing.stops.clone(); stops.push(Stop { type_: StopType::Thrufare, terminal: TerminalCode::SWB }); stops.extend(&from_swb.sailing.stops); let mut notes = vec!["Connection at Victoria not guaranteed".to_string()]; notes.extend(to_swb.notes.iter().map(|note| format!("To Victoria: {}", note))); notes.extend(from_swb.notes.iter().map(|note| format!("From Victoria: {}", note))); thrufare_sailings.push(SailingWithNotes { sailing: Sailing { depart_time: to_swb.sailing.depart_time, arrive_time: from_swb.sailing.arrive_time, stops, }, notes, }) } } thrufare_sailings } fn select_thrufare_sailings( terminal_pair: TerminalCodePair, mut thrufare_sailings: Vec<SailingWithNotes>, ) -> Vec<SailingWithNotes> { if terminal_pair.to == TerminalCode::TSA { for depart_time in thrufare_sailings.iter().map(|s| s.sailing.depart_time).collect::<HashSet<_>>() { if let Some(max_arrive_time) = thrufare_sailings .iter() .filter(|s| s.sailing.depart_time == depart_time) .map(|s| s.sailing.arrive_time) .max() { thrufare_sailings .retain(|s| s.sailing.depart_time != depart_time || s.sailing.arrive_time == max_arrive_time); } } } else { for arrive_time in thrufare_sailings.iter().map(|s| s.sailing.arrive_time).collect::<HashSet<_>>() { if let Some(min_depart_time) = thrufare_sailings .iter() .filter(|s| s.sailing.arrive_time == arrive_time) .map(|s| s.sailing.depart_time) .min() { thrufare_sailings .retain(|s| s.sailing.arrive_time != arrive_time || s.sailing.depart_time == min_depart_time); } } } thrufare_sailings } fn get_thrufare_sailings( terminal_pair: TerminalCodePair, date: Date, schedules_map: &HashMap<TerminalCodePair, Vec<Schedule>>, ) -> Vec<SailingWithNotes> { if let (Some((_, to_swb_sailings)), Some((_, from_swb_sailings))) = ( schedules_map .get(&TerminalCodePair { from: terminal_pair.from, to: TerminalCode::SWB }) .and_then(|schedules| schedules_sailings_for_date(schedules, date)), schedules_map .get(&TerminalCodePair { from: TerminalCode::SWB, to: terminal_pair.to }) .and_then(|schedules| schedules_sailings_for_date(schedules, date)), ) { select_thrufare_sailings(terminal_pair, get_potential_thrufare_sailings(to_swb_sailings, from_swb_sailings)) } else { vec![] } } pub fn sailings_for_date( terminal_pair: TerminalCodePair, date: Date, schedules_map: &HashMap<TerminalCodePair, Vec<Schedule>>, ) -> Option<(&Schedule, Vec<SailingWithNotes>)> { if let Some((schedule, mut sailings)) = schedules_map.get(&terminal_pair).and_then(|schedules| schedules_sailings_for_date(schedules, date)) { if (terminal_pair.from == TerminalCode::TSA && terminal_pair.to != TerminalCode::SWB) || (terminal_pair.to == TerminalCode::TSA && terminal_pair.from != TerminalCode::SWB) { sailings.extend(get_thrufare_sailings(terminal_pair, date, schedules_map)); } sailings.sort_unstable(); Some((schedule, sailings)) } else { None } }
39.884892
116
0.61562
1ef0b55d3ccc29015d172e44779f54efd07e65b5
9,605
use super::externals::{wasm_extern_t, wasm_extern_vec_t}; use super::module::wasm_module_t; use super::store::wasm_store_t; use super::trap::wasm_trap_t; use crate::ordered_resolver::OrderedResolver; use std::mem; use std::sync::Arc; use wasmer::{Extern, Instance, InstantiationError}; /// Opaque type representing a WebAssembly instance. #[allow(non_camel_case_types)] pub struct wasm_instance_t { pub(crate) inner: Arc<Instance>, } /// Creates a new instance from a WebAssembly module and a /// set of imports. /// /// ## Errors /// /// The function can fail in 2 ways: /// /// 1. Link errors that happen when plugging the imports into the /// instance, /// 2. Runtime errors that happen when running the module `start` /// function. /// /// Failures are stored in the `traps` argument; the program doesn't /// panic. /// /// # Notes /// /// The `store` argument is ignored. The store from the given module /// will be used. /// /// # Example /// /// See the module's documentation. #[no_mangle] pub unsafe extern "C" fn wasm_instance_new( _store: Option<&wasm_store_t>, module: Option<&wasm_module_t>, imports: Option<&wasm_extern_vec_t>, traps: *mut *mut wasm_trap_t, ) -> Option<Box<wasm_instance_t>> { let module = module?; let imports = imports?; let wasm_module = &module.inner; let module_imports = wasm_module.imports(); let module_import_count = module_imports.len(); let resolver: OrderedResolver = imports .into_slice() .map(|imports| imports.iter()) .unwrap_or_else(|| [].iter()) .map(|imp| Extern::from((&**imp).clone())) .take(module_import_count) .collect(); let instance = match Instance::new(wasm_module, &resolver) { Ok(instance) => Arc::new(instance), Err(InstantiationError::Link(link_error)) => { crate::error::update_last_error(link_error); return None; } Err(InstantiationError::Start(runtime_error)) => { let trap: Box<wasm_trap_t> = Box::new(runtime_error.into()); *traps = Box::into_raw(trap); return None; } Err(InstantiationError::HostEnvInitialization(error)) => { crate::error::update_last_error(error); return None; } }; Some(Box::new(wasm_instance_t { inner: instance })) } /// Deletes an instance. /// /// # Example /// /// See `wasm_instance_new`. #[no_mangle] pub unsafe extern "C" fn wasm_instance_delete(_instance: Option<Box<wasm_instance_t>>) {} /// Gets the exports of the instance. /// /// # Example /// /// ```rust /// # use inline_c::assert_c; /// # fn main() { /// # (assert_c! { /// # #include "tests/wasmer_wasm.h" /// # /// int main() { /// // Create the engine and the store. /// wasm_engine_t* engine = wasm_engine_new(); /// wasm_store_t* store = wasm_store_new(engine); /// /// // Create a WebAssembly module from a WAT definition. /// wasm_byte_vec_t wat; /// wasmer_byte_vec_new_from_string( /// &wat, /// "(module\n" /// " (func (export \"function\") (param i32 i64))\n" /// " (global (export \"global\") i32 (i32.const 7))\n" /// " (table (export \"table\") 0 funcref)\n" /// " (memory (export \"memory\") 1))" /// ); /// wasm_byte_vec_t wasm; /// wat2wasm(&wat, &wasm); /// /// // Create the module. /// wasm_module_t* module = wasm_module_new(store, &wasm); /// /// // Instantiate the module. /// wasm_extern_vec_t imports = WASM_EMPTY_VEC; /// wasm_trap_t* traps = NULL; /// /// wasm_instance_t* instance = wasm_instance_new(store, module, &imports, &traps); /// assert(instance); /// /// // Read the exports. /// wasm_extern_vec_t exports; /// wasm_instance_exports(instance, &exports); /// /// // We have 4 of them. /// assert(exports.size == 4); /// /// // The first one is a function. Use `wasm_extern_as_func` /// // to go further. /// assert(wasm_extern_kind(exports.data[0]) == WASM_EXTERN_FUNC); /// /// // The second one is a global. Use `wasm_extern_as_global` to /// // go further. /// assert(wasm_extern_kind(exports.data[1]) == WASM_EXTERN_GLOBAL); /// /// // The third one is a table. Use `wasm_extern_as_table` to /// // go further. /// assert(wasm_extern_kind(exports.data[2]) == WASM_EXTERN_TABLE); /// /// // The fourth one is a memory. Use `wasm_extern_as_memory` to /// // go further. /// assert(wasm_extern_kind(exports.data[3]) == WASM_EXTERN_MEMORY); /// /// // Free everything. /// wasm_extern_vec_delete(&exports); /// wasm_instance_delete(instance); /// wasm_module_delete(module); /// wasm_byte_vec_delete(&wasm); /// wasm_byte_vec_delete(&wat); /// wasm_store_delete(store); /// wasm_engine_delete(engine); /// /// return 0; /// } /// # }) /// # .success(); /// # } /// ``` /// /// To go further: /// /// * [`wasm_extern_as_func`][super::externals::wasm_extern_as_func], /// * [`wasm_extern_as_global`][super::externals::wasm_extern_as_global], /// * [`wasm_extern_as_table`][super::externals::wasm_extern_as_table], /// * [`wasm_extern_as_memory`][super::externals::wasm_extern_as_memory]. #[no_mangle] pub unsafe extern "C" fn wasm_instance_exports( instance: &wasm_instance_t, // own out: &mut wasm_extern_vec_t, ) { let instance = &instance.inner; let mut extern_vec = instance .exports .iter() .map(|(name, r#extern)| { let function = if let Extern::Function { .. } = r#extern { instance.exports.get_function(&name).ok().cloned() } else { None }; Box::into_raw(Box::new(r#extern.clone().into())) }) .collect::<Vec<*mut wasm_extern_t>>(); extern_vec.shrink_to_fit(); out.size = extern_vec.len(); out.data = extern_vec.as_mut_ptr(); mem::forget(extern_vec); } #[cfg(test)] mod tests { use inline_c::assert_c; #[test] fn test_instance_new() { (assert_c! { #include "tests/wasmer_wasm.h" // The `sum` host function implementation. wasm_trap_t* sum_callback( const wasm_val_vec_t* arguments, wasm_val_vec_t* results ) { wasm_val_t sum = { .kind = WASM_I32, .of = { arguments->data[0].of.i32 + arguments->data[1].of.i32 }, }; results->data[0] = sum; return NULL; } int main() { // Create the engine and the store. wasm_engine_t* engine = wasm_engine_new(); wasm_store_t* store = wasm_store_new(engine); // Create a WebAssembly module from a WAT definition. wasm_byte_vec_t wat; wasmer_byte_vec_new_from_string( &wat, "(module\n" " (import \"math\" \"sum\" (func $sum (param i32 i32) (result i32)))\n" " (func (export \"add_one\") (param i32) (result i32)\n" " local.get 0\n" " i32.const 1\n" " call $sum))" ); wasm_byte_vec_t wasm; wat2wasm(&wat, &wasm); // Create the module. wasm_module_t* module = wasm_module_new(store, &wasm); assert(module); // Prepare the imports. wasm_functype_t* sum_type = wasm_functype_new_2_1( wasm_valtype_new_i32(), wasm_valtype_new_i32(), wasm_valtype_new_i32() ); wasm_func_t* sum_function = wasm_func_new(store, sum_type, sum_callback); wasm_extern_t* externs[] = { wasm_func_as_extern(sum_function) }; wasm_extern_vec_t imports = WASM_ARRAY_VEC(externs); // Instantiate the module. wasm_trap_t* traps = NULL; wasm_instance_t* instance = wasm_instance_new(store, module, &imports, &traps); assert(instance); // Run the exported function. wasm_extern_vec_t exports; wasm_instance_exports(instance, &exports); assert(exports.size == 1); const wasm_func_t* run_function = wasm_extern_as_func(exports.data[0]); assert(run_function); wasm_val_t arguments[1] = { WASM_I32_VAL(1) }; wasm_val_t results[1] = { WASM_INIT_VAL }; wasm_val_vec_t arguments_as_array = WASM_ARRAY_VEC(arguments); wasm_val_vec_t results_as_array = WASM_ARRAY_VEC(results); wasm_trap_t* trap = wasm_func_call(run_function, &arguments_as_array, &results_as_array); assert(trap == NULL); assert(results[0].of.i32 == 2); // Free everything. wasm_extern_vec_delete(&exports); wasm_instance_delete(instance); wasm_func_delete(sum_function); wasm_functype_delete(sum_type); wasm_module_delete(module); wasm_byte_vec_delete(&wasm); wasm_byte_vec_delete(&wat); wasm_store_delete(store); wasm_engine_delete(engine); return 0; } }) .success(); } }
31.388889
105
0.569287
e644d8766f1d0e967bd95e978dd8f0c6870628e4
10,504
extern crate midir; extern crate rosc; use std::thread; use std::sync::mpsc; use std::sync::{Mutex, Arc}; use std::time::{Instant,Duration}; use std::io::{stdin}; use std::error::Error; use std::env; use std::net::{UdpSocket, SocketAddrV4}; use rosc::{OscPacket, OscMessage, OscType}; use rosc::encoder; use midir::{MidiOutput,MidiInput}; use std::str::FromStr; pub mod taptempo; pub mod midi_utils; use taptempo::TapTempo; const SLEEP_TIME: u64 = 10; const BEAT_COUNT: usize = 8; enum MessageForOscThread { Terminate, UpdateBeat(usize, f32, f32), UpdateVelocity(usize, f32), UpdateMasterVelocity(f32), } enum MessageForMidiThread { Terminate, UpdateBeat(usize, f32, f32), SendNoteOn(u8, u8), } struct OscSettings { host_addr: SocketAddrV4, to_addr: SocketAddrV4, } struct MidiSettings { midi_in: MidiInput, midi_out: MidiOutput, in_port: usize, out_port: usize, } struct Beat { frequency: f32, phase: f32, velocity: f32, } impl Beat { fn new() -> Beat { Beat{ frequency: 1.0, phase: 1.0, velocity: 1.0, } } fn eval(&self, timer: &Arc<Mutex<Instant>>) -> f32 { let t = { let timer = timer.lock().unwrap(); timer.elapsed().as_millis() as f32 / 1000.0 }; ((t - self.phase) * self.frequency).fract() } } fn main() { let args: Vec<String> = env::args().collect(); let usage = format!( "Usage: {} CLIENT_IP:CLIENT_PORT", &args[0] ); if args.len() < 2 { panic!(usage); } let host_addr = get_addr_from_arg("127.0.0.1:0"); let to_addr = get_addr_from_arg(&args[1]); let osc_settings = OscSettings{ host_addr, to_addr }; // Prompt midi settings let (midi_in, in_port) = midi_utils::get_midi_in().unwrap(); let (midi_out, out_port) = midi_utils::get_midi_out().unwrap(); let midi_settings = MidiSettings{ midi_in, midi_out, in_port, out_port }; // Launch threads let (tx_osc, rx_osc) = mpsc::channel(); let (tx_midi, rx_midi) = mpsc::channel(); let timer = Arc::new(Mutex::new(Instant::now())); let osc_thread = { let timer = Arc::clone(&timer); thread::spawn(|| run_osc_thread(osc_settings, rx_osc, timer)) }; let midi_thread = { let timer = Arc::clone(&timer); let tx_osc = mpsc::Sender::clone(&tx_osc); let tx_midi = mpsc::Sender::clone(&tx_midi); thread::spawn(|| match run_midi_thread(midi_settings, rx_midi, tx_midi, tx_osc, timer) { Ok(_) => (), Err(err) => println!("Error: {}", err.description()) }) }; println!("\nPress <return> to exit..."); let mut input = String::new(); stdin().read_line(&mut input).unwrap(); // wait for next enter key press tx_osc.send(MessageForOscThread::Terminate).unwrap(); tx_midi.send(MessageForMidiThread::Terminate).unwrap(); midi_thread.join().unwrap(); osc_thread.join().unwrap(); } fn run_midi_thread(midi_settings: MidiSettings, rx: mpsc::Receiver<MessageForMidiThread>, tx_midi: mpsc::Sender<MessageForMidiThread>, tx_osc: mpsc::Sender<MessageForOscThread>, timer: Arc<Mutex<Instant>>) -> Result<(), Box<Error>> { let MidiSettings{midi_in, in_port, midi_out, out_port} = midi_settings; println!("\nOpening connection"); let mut conn_out = midi_out.connect(out_port, "Cooldown")?; let _conn_in = { let timer = Arc::clone(&timer); let tx_osc = mpsc::Sender::clone(&tx_osc); let mut tapper: Vec<TapTempo> = (0..BEAT_COUNT).map(|_| TapTempo::new()).collect(); midi_in.connect(in_port, "Cooldown", move |stamp, message, _| { match message[0] { 144 => { // note down //conn_out.send(message).unwrap_or_else(|_| println!("Error when forwarding message ...")); match message[1] { 1 | 4 | 7 | 10 | 13 | 16 | 19 | 22 => { // Tap let beat_index = ((message[1] - 1) / 3) as usize; tap(&mut tapper[beat_index], &timer); tx_osc.send(MessageForOscThread::UpdateBeat(beat_index, tapper[beat_index].frequency, tapper[beat_index].phase)).unwrap(); tx_midi.send(MessageForMidiThread::UpdateBeat(beat_index, tapper[beat_index].frequency, tapper[beat_index].phase)).unwrap(); // turn off reset button tx_midi.send(MessageForMidiThread::SendNoteOn(3 + 3 * beat_index as u8, 0)).unwrap(); }, 3 | 6 | 9 | 12 | 15 | 18 | 21 | 24 => { // Reset let beat_index = ((message[1] - 3) / 3) as usize; tapper[beat_index].reset(); println!("Reset tap tempo"); //conn_out.send(message).unwrap_or_else(|_| println!("Error when forwarding message ...")); tx_midi.send(MessageForMidiThread::SendNoteOn(message[1], 127)).unwrap(); }, _ => () } }, 128 => { // note up //conn_out.send(&vec![144, message[1], 0]).unwrap_or_else(|_| println!("Error when forwarding message ...")); }, 176 => { // knob match message[1] { 19 | 23 | 27 | 31 | 49 | 53 | 57 | 61 => { let beat_index = if message[1] < 40 { (message[1] - 19) / 4} else { 4 + (message[1] - 49) / 4 } as usize; tx_osc.send(MessageForOscThread::UpdateVelocity(beat_index, message[2] as f32 / 127.0)).unwrap(); }, 62 => { tx_osc.send(MessageForOscThread::UpdateMasterVelocity(message[2] as f32 / 127.0)).unwrap(); } _ => () } } _ => () } println!("{}: {:?} (len = {})", stamp, message, message.len()); }, ())? }; let mut beat: Vec<Beat> = (0..BEAT_COUNT).map(|_| Beat::new()).collect(); let mut bop: Vec<bool> = (0..BEAT_COUNT).map(|_| false).collect(); 'main: loop { // Handle events while let Ok(msg) = rx.try_recv() { match msg { MessageForMidiThread::Terminate => break 'main, MessageForMidiThread::UpdateBeat(beat_index, new_freq, new_phase) => { beat[beat_index].frequency = new_freq; beat[beat_index].phase = new_phase; }, MessageForMidiThread::SendNoteOn(note, velocity) => { conn_out.send(&vec![144, note, velocity]).unwrap_or_else(|_| println!("Error when forwarding message ...")); } } } // Update LED feedback on MIDI controller for i in 0..BEAT_COUNT { let value = beat[i].eval(&timer); let bip = value < 0.5; if bip != bop[i] { let n = (1 + 3 * i) as u8; conn_out.send(&vec![144, n, if bip {127} else {0}]).unwrap_or_else(|_| println!("Error when forwarding message ...")); } bop[i] = bip; } thread::sleep(Duration::from_millis(SLEEP_TIME)); } // Turn off buttons for i in 0..BEAT_COUNT { let n = (1 + 3 * i) as u8; conn_out.send(&vec![144, n, 0]).unwrap_or_else(|_| println!("Error when forwarding message ...")); conn_out.send(&vec![144, n + 2, 0]).unwrap_or_else(|_| println!("Error when forwarding message ...")); } println!("Closing connections"); Ok(()) } fn tap(tapper: &mut TapTempo, timer: &Arc<Mutex<Instant>>) { let t = { let timer = timer.lock().unwrap(); timer.elapsed().as_millis() as f32 / 1000.0 }; //if (tapper.sample_count > 1 && t - lastTap > recordingMargin / est.frequency) tapper.add_sample(t); tapper.estimate(); println!("Tap Tempo: freq = {}, phase = {}", tapper.frequency, tapper.phase); } fn run_osc_thread(osc_settings: OscSettings, rx: mpsc::Receiver<MessageForOscThread>, timer: Arc<Mutex<Instant>>) { let sock = UdpSocket::bind(osc_settings.host_addr).unwrap(); let mut beat: Vec<Beat> = (0..BEAT_COUNT).map(|_| Beat::new()).collect(); let mut master_velocity = 1.0; loop { // Handle events while let Ok(msg) = rx.try_recv() { match msg { MessageForOscThread::Terminate => return, MessageForOscThread::UpdateBeat(beat_index, new_freq, new_phase) => { beat[beat_index].frequency = new_freq; beat[beat_index].phase = new_phase; }, MessageForOscThread::UpdateVelocity(beat_index, new_velocity) => { beat[beat_index].velocity = new_velocity; }, MessageForOscThread::UpdateMasterVelocity(new_velocity) => { master_velocity = new_velocity; }, } } // Send OSC messages // - Send beats for i in 0..BEAT_COUNT { let value = beat[i].eval(&timer); let msg_buf = encoder::encode(&OscPacket::Message(OscMessage { addr: format!("/beat{}", i+1).to_string(), args: Some(vec![ OscType::Float(value), OscType::Float(beat[i].frequency), OscType::Float(beat[i].phase), OscType::Float(beat[i].velocity * master_velocity) ]), })).unwrap(); sock.send_to(&msg_buf, osc_settings.to_addr).unwrap(); } // - Send time let t = { let timer = timer.lock().unwrap(); timer.elapsed().as_millis() as f32 / 1000.0 }; let msg_buf = encoder::encode(&OscPacket::Message(OscMessage { addr: "/time".to_string(), args: Some(vec![OscType::Float(t)]), })).unwrap(); sock.send_to(&msg_buf, osc_settings.to_addr).unwrap(); thread::sleep(Duration::from_millis(SLEEP_TIME)); } } fn get_addr_from_arg(arg: &str) -> SocketAddrV4 { SocketAddrV4::from_str(arg).unwrap() }
35.367003
233
0.536177
48f9a090b201eaccba57113bee6d95bcf8f99260
4,220
// Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. //! Defines a bitmap, which is used to track which values in an Arrow array are null. //! This is called a "validity bitmap" in the Arrow documentation. use crate::buffer::Buffer; use crate::error::Result; use crate::util::bit_util; use std::ops::{BitAnd, BitOr}; #[derive(Debug, Clone)] pub struct Bitmap { pub(crate) bits: Buffer, } impl Bitmap { pub fn new(num_bits: usize) -> Self { let num_bytes = num_bits / 8 + if num_bits % 8 > 0 { 1 } else { 0 }; let r = num_bytes % 64; let len = if r == 0 { num_bytes } else { num_bytes + 64 - r }; let mut v = Vec::with_capacity(len); for _ in 0..len { v.push(255); // 1 is not null } Bitmap { bits: Buffer::from(&v[..]), } } pub fn len(&self) -> usize { self.bits.len() } pub fn is_set(&self, i: usize) -> bool { assert!(i < (self.bits.len() << 3)); unsafe { bit_util::get_bit_raw(self.bits.raw_data(), i) } } pub fn buffer_ref(&self) -> &Buffer { &self.bits } pub fn to_buffer(self) -> Buffer { self.bits } } impl<'a, 'b> BitAnd<&'b Bitmap> for &'a Bitmap { type Output = Result<Bitmap>; fn bitand(self, rhs: &'b Bitmap) -> Result<Bitmap> { Ok(Bitmap::from((&self.bits & &rhs.bits)?)) } } impl<'a, 'b> BitOr<&'b Bitmap> for &'a Bitmap { type Output = Result<Bitmap>; fn bitor(self, rhs: &'b Bitmap) -> Result<Bitmap> { Ok(Bitmap::from((&self.bits | &rhs.bits)?)) } } impl From<Buffer> for Bitmap { fn from(buf: Buffer) -> Self { Self { bits: buf } } } impl PartialEq for Bitmap { fn eq(&self, other: &Self) -> bool { // buffer equality considers capacity, but here we want to only compare // actual data contents let self_len = self.bits.len(); let other_len = other.bits.len(); if self_len != other_len { return false; } &self.bits.data()[..self_len] == &other.bits.data()[..self_len] } } #[cfg(test)] mod tests { use super::*; #[test] fn test_bitmap_length() { assert_eq!(64, Bitmap::new(63 * 8).len()); assert_eq!(64, Bitmap::new(64 * 8).len()); assert_eq!(128, Bitmap::new(65 * 8).len()); } #[test] fn test_bitwise_and() { let bitmap1 = Bitmap::from(Buffer::from([0b01101010])); let bitmap2 = Bitmap::from(Buffer::from([0b01001110])); assert_eq!( Bitmap::from(Buffer::from([0b01001010])), (&bitmap1 & &bitmap2).unwrap() ); } #[test] fn test_bitwise_or() { let bitmap1 = Bitmap::from(Buffer::from([0b01101010])); let bitmap2 = Bitmap::from(Buffer::from([0b01001110])); assert_eq!( Bitmap::from(Buffer::from([0b01101110])), (&bitmap1 | &bitmap2).unwrap() ); } #[test] fn test_bitmap_is_set() { let bitmap = Bitmap::from(Buffer::from([0b01001010])); assert_eq!(false, bitmap.is_set(0)); assert_eq!(true, bitmap.is_set(1)); assert_eq!(false, bitmap.is_set(2)); assert_eq!(true, bitmap.is_set(3)); assert_eq!(false, bitmap.is_set(4)); assert_eq!(false, bitmap.is_set(5)); assert_eq!(true, bitmap.is_set(6)); assert_eq!(false, bitmap.is_set(7)); } }
28.707483
85
0.582938
feef894d6dd0855269f4d2056354b365c1c16c3b
4,222
#[cfg(feature = "cross-version")] pub mod compatibility; #[cfg(all(feature = "network"))] pub mod network; /* Explorer quick test. Run node for ~15 minutes and verify explorer is in sync with node rest */ pub mod explorer; /* Sanity performance tests. Quick tests to check overall node performance. Run some transaction for ~15 minutes or specified no of transactions (100) */ pub mod transaction; /* Long running test for self node (48 h) */ #[cfg(feature = "soak")] pub mod soak; /* Quick load test for rest api */ pub mod rest; /* Long running test for dumping rewards each epoch */ pub mod bootstrap; pub mod fragment; pub mod persistent_log; pub mod rewards; pub mod voting; use jormungandr_automation::{ jcli::{self, JCli}, jormungandr::{ExplorerError, JormungandrError, JormungandrProcess}, }; use jormungandr_lib::{crypto::hash::Hash, interfaces::Value}; use thiserror::Error; use thor::Wallet; #[derive(Error, Debug)] pub enum NodeStuckError { #[error("node tip is not moving up. Stuck at {tip_hash} ")] TipIsNotMoving { tip_hash: String, logs: String }, #[error("node block counter is not moving up. Stuck at {block_counter}")] BlockCounterIsNoIncreased { block_counter: u64, logs: String }, #[error("accounts funds were not trasfered (actual: {actual} vs expected: {expected}). Logs: {logs}")] FundsNotTransfered { actual: Value, expected: Value, logs: String, }, #[error("explorer is out of sync with rest node (actual: {actual} vs expected: {expected}). Logs: {logs}")] ExplorerTipIsOutOfSync { actual: Hash, expected: Hash, logs: String, }, #[error("error in logs found")] InternalJormungandrError(#[from] JormungandrError), #[error("jcli error")] InternalJcliError(#[from] jcli::Error), #[error("exploer error")] InternalExplorerError(#[from] ExplorerError), } pub fn send_transaction_and_ensure_block_was_produced( transation_messages: &[String], jormungandr: &JormungandrProcess, ) -> Result<(), NodeStuckError> { let jcli: JCli = Default::default(); let block_tip_before_transaction = jcli.rest().v0().tip(&jormungandr.rest_uri()); let block_counter_before_transaction = jormungandr.logger.get_created_blocks_counter(); jcli.fragment_sender(jormungandr) .send_many(transation_messages) .wait_until_all_processed(&Default::default()) .map_err(NodeStuckError::InternalJcliError)?; let block_tip_after_transaction = jcli.rest().v0().tip(jormungandr.rest_uri()); let block_counter_after_transaction = jormungandr.logger.get_created_blocks_counter(); if block_tip_before_transaction == block_tip_after_transaction { return Err(NodeStuckError::TipIsNotMoving { tip_hash: block_tip_after_transaction, logs: jormungandr.logger.get_log_content(), }); } if block_counter_before_transaction == block_counter_after_transaction { return Err(NodeStuckError::BlockCounterIsNoIncreased { block_counter: block_counter_before_transaction as u64, logs: jormungandr.logger.get_log_content(), }); } Ok(()) } pub fn check_transaction_was_processed( transaction: String, receiver: &Wallet, value: u64, jormungandr: &JormungandrProcess, ) -> Result<(), NodeStuckError> { send_transaction_and_ensure_block_was_produced(&[transaction], jormungandr)?; check_funds_transferred_to(&receiver.address().to_string(), value.into(), jormungandr)?; jormungandr .check_no_errors_in_log() .map_err(NodeStuckError::InternalJormungandrError) } pub fn check_funds_transferred_to( address: &str, value: Value, jormungandr: &JormungandrProcess, ) -> Result<(), NodeStuckError> { let jcli: JCli = Default::default(); let account_state = jcli .rest() .v0() .account_stats(address, &jormungandr.rest_uri()); if *account_state.value() != value { return Err(NodeStuckError::FundsNotTransfered { actual: *account_state.value(), expected: value, logs: jormungandr.logger.get_log_content(), }); } Ok(()) }
31.044118
111
0.68901
ebc454b9d44ab1bcc9d3dc3fd54b45bb7badcb57
29,614
// Copyright 2016 TiKV Project Authors. Licensed under Apache-2.0. use std::sync::mpsc::channel; use std::sync::Arc; use std::time::Duration; use std::{fs, thread}; use kvproto::metapb; use kvproto::pdpb; use kvproto::raft_cmdpb::*; use kvproto::raft_serverpb::RaftMessage; use raft::eraftpb::MessageType; use engine_rocks::Compat; use engine_traits::{Iterable, Peekable, CF_WRITE}; use keys::data_key; use pd_client::PdClient; use raftstore::store::{Callback, WriteResponse}; use raftstore::Result; use test_raftstore::*; use tikv_util::config::*; pub const REGION_MAX_SIZE: u64 = 50000; pub const REGION_SPLIT_SIZE: u64 = 30000; fn test_base_split_region<T, F>(cluster: &mut Cluster<T>, split: F, right_derive: bool) where T: Simulator, F: Fn(&mut Cluster<T>, &metapb::Region, &[u8]), { cluster.cfg.raft_store.right_derive_when_split = right_derive; cluster.run(); let pd_client = Arc::clone(&cluster.pd_client); let tbls = vec![ (b"k22", b"k11", b"k33"), (b"k11", b"k00", b"k11"), (b"k33", b"k22", b"k33"), ]; for (split_key, left_key, right_key) in tbls { cluster.must_put(left_key, b"v1"); cluster.must_put(right_key, b"v3"); // Left and right key must be in same region before split. let region = pd_client.get_region(left_key).unwrap(); let region2 = pd_client.get_region(right_key).unwrap(); assert_eq!(region.get_id(), region2.get_id()); // Split with split_key, so left_key must in left, and right_key in right. split(cluster, &region, split_key); let left = pd_client.get_region(left_key).unwrap(); let right = pd_client.get_region(right_key).unwrap(); assert_eq!( region.get_id(), if right_derive { right.get_id() } else { left.get_id() } ); assert_eq!(region.get_start_key(), left.get_start_key()); assert_eq!(left.get_end_key(), right.get_start_key()); assert_eq!(region.get_end_key(), right.get_end_key()); cluster.must_put(left_key, b"vv1"); assert_eq!(cluster.get(left_key).unwrap(), b"vv1".to_vec()); cluster.must_put(right_key, b"vv3"); assert_eq!(cluster.get(right_key).unwrap(), b"vv3".to_vec()); let epoch = left.get_region_epoch().clone(); let get = new_request(left.get_id(), epoch, vec![new_get_cmd(right_key)], false); debug!("requesting {:?}", get); let resp = cluster .call_command_on_leader(get, Duration::from_secs(5)) .unwrap(); assert!(resp.get_header().has_error(), "{:?}", resp); assert!( resp.get_header().get_error().has_key_not_in_region(), "{:?}", resp ); } } #[test] fn test_server_base_split_region_left_derive() { let count = 5; let mut cluster = new_server_cluster(0, count); test_base_split_region(&mut cluster, Cluster::must_split, false); } #[test] fn test_server_base_split_region_right_derive() { let count = 5; let mut cluster = new_server_cluster(0, count); test_base_split_region(&mut cluster, Cluster::must_split, true); } #[test] fn test_server_split_region_twice() { let count = 5; let mut cluster = new_server_cluster(0, count); cluster.run(); let pd_client = Arc::clone(&cluster.pd_client); let (split_key, left_key, right_key) = (b"k22", b"k11", b"k33"); cluster.must_put(left_key, b"v1"); cluster.must_put(right_key, b"v3"); // Left and right key must be in same region before split. let region = pd_client.get_region(left_key).unwrap(); let region2 = pd_client.get_region(right_key).unwrap(); assert_eq!(region.get_id(), region2.get_id()); let (tx, rx) = channel(); let key = split_key.to_vec(); let c = Box::new(move |write_resp: WriteResponse| { let mut resp = write_resp.response; let admin_resp = resp.mut_admin_response(); let split_resp = admin_resp.mut_splits(); let mut regions: Vec<_> = split_resp.take_regions().into(); let mut d = regions.drain(..); let (left, right) = (d.next().unwrap(), d.next().unwrap()); assert_eq!(left.get_end_key(), key.as_slice()); assert_eq!(region2.get_start_key(), left.get_start_key()); assert_eq!(left.get_end_key(), right.get_start_key()); assert_eq!(region2.get_end_key(), right.get_end_key()); tx.send(right).unwrap(); }); cluster.split_region(&region, split_key, Callback::Write(c)); let region3 = rx.recv_timeout(Duration::from_secs(5)).unwrap(); cluster.must_put(split_key, b"v2"); let (tx1, rx1) = channel(); let c = Box::new(move |write_resp: WriteResponse| { assert!(write_resp.response.has_header()); assert!(write_resp.response.get_header().has_error()); assert!(!write_resp.response.has_admin_response()); tx1.send(()).unwrap(); }); cluster.split_region(&region3, split_key, Callback::Write(c)); rx1.recv_timeout(Duration::from_secs(5)).unwrap(); } fn test_auto_split_region<T: Simulator>(cluster: &mut Cluster<T>) { cluster.cfg.raft_store.split_region_check_tick_interval = ReadableDuration::millis(100); cluster.cfg.coprocessor.region_max_size = ReadableSize(REGION_MAX_SIZE); cluster.cfg.coprocessor.region_split_size = ReadableSize(REGION_SPLIT_SIZE); let check_size_diff = cluster.cfg.raft_store.region_split_check_diff.0; let mut range = 1..; cluster.run(); let pd_client = Arc::clone(&cluster.pd_client); let region = pd_client.get_region(b"").unwrap(); let last_key = put_till_size(cluster, REGION_SPLIT_SIZE, &mut range); // it should be finished in millis if split. thread::sleep(Duration::from_millis(300)); let target = pd_client.get_region(&last_key).unwrap(); assert_eq!(region, target); let max_key = put_cf_till_size( cluster, CF_WRITE, REGION_MAX_SIZE - REGION_SPLIT_SIZE + check_size_diff, &mut range, ); let left = pd_client.get_region(b"").unwrap(); let right = pd_client.get_region(&max_key).unwrap(); if left == right { cluster.wait_region_split(&region); } let left = pd_client.get_region(b"").unwrap(); let right = pd_client.get_region(&max_key).unwrap(); assert_ne!(left, right); assert_eq!(region.get_start_key(), left.get_start_key()); assert_eq!(right.get_start_key(), left.get_end_key()); assert_eq!(region.get_end_key(), right.get_end_key()); assert_eq!(pd_client.get_region(&max_key).unwrap(), right); assert_eq!(pd_client.get_region(left.get_end_key()).unwrap(), right); let middle_key = left.get_end_key(); let leader = cluster.leader_of_region(left.get_id()).unwrap(); let store_id = leader.get_store_id(); let mut size = 0; cluster.engines[&store_id] .kv .c() .scan(&data_key(b""), &data_key(middle_key), false, |k, v| { size += k.len() as u64; size += v.len() as u64; Ok(true) }) .expect(""); assert!(size <= REGION_SPLIT_SIZE); // although size may be smaller than REGION_SPLIT_SIZE, but the diff should // be small. assert!(size > REGION_SPLIT_SIZE - 1000); let epoch = left.get_region_epoch().clone(); let get = new_request(left.get_id(), epoch, vec![new_get_cmd(&max_key)], false); let resp = cluster .call_command_on_leader(get, Duration::from_secs(5)) .unwrap(); assert!(resp.get_header().has_error()); assert!(resp.get_header().get_error().has_key_not_in_region()); } #[test] fn test_node_auto_split_region() { let count = 5; let mut cluster = new_node_cluster(0, count); test_auto_split_region(&mut cluster); } #[test] fn test_incompatible_node_auto_split_region() { let count = 5; let mut cluster = new_incompatible_node_cluster(0, count); test_auto_split_region(&mut cluster); } #[test] fn test_server_auto_split_region() { let count = 5; let mut cluster = new_server_cluster(0, count); test_auto_split_region(&mut cluster); } #[test] fn test_incompatible_server_auto_split_region() { let count = 5; let mut cluster = new_incompatible_server_cluster(0, count); test_auto_split_region(&mut cluster); } // A filter that disable commitment by heartbeat. #[derive(Clone)] struct EraseHeartbeatCommit; impl Filter for EraseHeartbeatCommit { fn before(&self, msgs: &mut Vec<RaftMessage>) -> Result<()> { for msg in msgs { if msg.get_message().get_msg_type() == MessageType::MsgHeartbeat { msg.mut_message().set_commit(0); } } Ok(()) } } fn check_cluster(cluster: &mut Cluster<impl Simulator>, k: &[u8], v: &[u8], all_committed: bool) { let region = cluster.pd_client.get_region(k).unwrap(); let mut tried_cnt = 0; let leader = loop { match cluster.leader_of_region(region.get_id()) { None => { tried_cnt += 1; if tried_cnt >= 3 { panic!("leader should be elected"); } continue; } Some(l) => break l, } }; let mut missing_count = 0; for i in 1..=region.get_peers().len() as u64 { let engine = cluster.get_engine(i); if all_committed || i == leader.get_store_id() { must_get_equal(&engine, k, v); } else { // Note that a follower can still commit the log by an empty MsgAppend // when bcast commit is disabled. A heartbeat response comes to leader // before MsgAppendResponse will trigger MsgAppend. match engine.c().get_value(&keys::data_key(k)).unwrap() { Some(res) => assert_eq!(v, &res[..]), None => missing_count += 1, } } } assert!(all_committed || missing_count > 0); } /// TiKV enables lazy broadcast commit optimization, which can delay split /// on follower node. So election of new region will delay. We need to make /// sure broadcast commit is disabled when split. #[test] fn test_delay_split_region() { let mut cluster = new_server_cluster(0, 3); cluster.cfg.raft_store.raft_log_gc_count_limit = 500; cluster.cfg.raft_store.merge_max_log_gap = 100; cluster.cfg.raft_store.raft_log_gc_threshold = 500; // To stable the test, we use a large hearbeat timeout 200ms(100ms * 2). // And to elect leader quickly, set election timeout to 1s(100ms * 10). configure_for_lease_read(&mut cluster, Some(100), Some(10)); // We use three nodes for this test. cluster.run(); let pd_client = Arc::clone(&cluster.pd_client); let region = pd_client.get_region(b"").unwrap(); cluster.must_put(b"k1", b"v1"); cluster.must_put(b"k3", b"v3"); // Although skip bcast is enabled, but heartbeat will commit the log in period. check_cluster(&mut cluster, b"k1", b"v1", true); check_cluster(&mut cluster, b"k3", b"v3", true); cluster.must_transfer_leader(region.get_id(), new_peer(1, 1)); cluster.add_send_filter(CloneFilterFactory(EraseHeartbeatCommit)); cluster.must_put(b"k4", b"v4"); sleep_ms(100); // skip bcast is enabled by default, so all followers should not commit // the log. check_cluster(&mut cluster, b"k4", b"v4", false); cluster.must_transfer_leader(region.get_id(), new_peer(3, 3)); // New leader should flush old committed entries eagerly. check_cluster(&mut cluster, b"k4", b"v4", true); cluster.must_put(b"k5", b"v5"); // New committed entries should be broadcast lazily. check_cluster(&mut cluster, b"k5", b"v5", false); cluster.add_send_filter(CloneFilterFactory(EraseHeartbeatCommit)); let k2 = b"k2"; // Split should be bcast eagerly, otherwise following must_put will fail // as no leader is available. cluster.must_split(&region, k2); cluster.must_put(b"k6", b"v6"); sleep_ms(100); // After split, skip bcast is enabled again, so all followers should not // commit the log. check_cluster(&mut cluster, b"k6", b"v6", false); } fn test_split_overlap_snapshot<T: Simulator>(cluster: &mut Cluster<T>) { // We use three nodes([1, 2, 3]) for this test. cluster.run(); // guarantee node 1 is leader cluster.must_transfer_leader(1, new_peer(1, 1)); cluster.must_put(b"k0", b"v0"); assert_eq!(cluster.leader_of_region(1), Some(new_peer(1, 1))); let pd_client = Arc::clone(&cluster.pd_client); // isolate node 3 for region 1. cluster.add_send_filter(CloneFilterFactory(RegionPacketFilter::new(1, 3))); cluster.must_put(b"k1", b"v1"); let region = pd_client.get_region(b"").unwrap(); // split (-inf, +inf) -> (-inf, k2), [k2, +inf] cluster.must_split(&region, b"k2"); cluster.must_put(b"k2", b"v2"); // node 1 and node 2 must have k2, but node 3 must not. for i in 1..3 { let engine = cluster.get_engine(i); must_get_equal(&engine, b"k2", b"v2"); } let engine3 = cluster.get_engine(3); must_get_none(&engine3, b"k2"); thread::sleep(Duration::from_secs(1)); let snap_dir = cluster.get_snap_dir(3); // no snaps should be sent. let snapfiles: Vec<_> = fs::read_dir(snap_dir) .unwrap() .map(|p| p.unwrap().path()) .collect(); assert!(snapfiles.is_empty()); cluster.clear_send_filters(); cluster.must_put(b"k3", b"v3"); sleep_ms(3000); // node 3 must have k3. must_get_equal(&engine3, b"k3", b"v3"); } #[test] fn test_node_split_overlap_snapshot() { let mut cluster = new_node_cluster(0, 3); test_split_overlap_snapshot(&mut cluster); } #[test] fn test_server_split_overlap_snapshot() { let mut cluster = new_server_cluster(0, 3); test_split_overlap_snapshot(&mut cluster); } fn test_apply_new_version_snapshot<T: Simulator>(cluster: &mut Cluster<T>) { // truncate the log quickly so that we can force sending snapshot. cluster.cfg.raft_store.raft_log_gc_tick_interval = ReadableDuration::millis(20); cluster.cfg.raft_store.raft_log_gc_count_limit = 5; cluster.cfg.raft_store.merge_max_log_gap = 1; cluster.cfg.raft_store.raft_log_gc_threshold = 5; // We use three nodes([1, 2, 3]) for this test. cluster.run(); // guarantee node 1 is leader cluster.must_transfer_leader(1, new_peer(1, 1)); cluster.must_put(b"k0", b"v0"); assert_eq!(cluster.leader_of_region(1), Some(new_peer(1, 1))); let pd_client = Arc::clone(&cluster.pd_client); // isolate node 3 for region 1. cluster.add_send_filter(CloneFilterFactory(RegionPacketFilter::new(1, 3))); cluster.must_put(b"k1", b"v1"); let region = pd_client.get_region(b"").unwrap(); // split (-inf, +inf) -> (-inf, k2), [k2, +inf] cluster.must_split(&region, b"k2"); cluster.must_put(b"k2", b"v2"); // node 1 and node 2 must have k2, but node 3 must not. for i in 1..3 { let engine = cluster.get_engine(i); must_get_equal(&engine, b"k2", b"v2"); } let engine3 = cluster.get_engine(3); must_get_none(&engine3, b"k2"); // transfer leader to ease the preasure of store 1. cluster.must_transfer_leader(1, new_peer(2, 2)); for _ in 0..100 { // write many logs to force log GC for region 1 and region 2. cluster.must_put(b"k1", b"v1"); cluster.must_put(b"k2", b"v2"); } cluster.clear_send_filters(); sleep_ms(3000); // node 3 must have k1, k2. must_get_equal(&engine3, b"k1", b"v1"); must_get_equal(&engine3, b"k2", b"v2"); } #[test] fn test_node_apply_new_version_snapshot() { let mut cluster = new_node_cluster(0, 3); test_apply_new_version_snapshot(&mut cluster); } #[test] fn test_server_apply_new_version_snapshot() { let mut cluster = new_server_cluster(0, 3); test_apply_new_version_snapshot(&mut cluster); } fn test_split_with_stale_peer<T: Simulator>(cluster: &mut Cluster<T>) { // disable raft log gc. cluster.cfg.raft_store.raft_log_gc_tick_interval = ReadableDuration::secs(60); cluster.cfg.raft_store.peer_stale_state_check_interval = ReadableDuration::millis(500); let pd_client = Arc::clone(&cluster.pd_client); // Disable default max peer count check. pd_client.disable_default_operator(); let r1 = cluster.run_conf_change(); // add peer (2,2) to region 1. pd_client.must_add_peer(r1, new_peer(2, 2)); // add peer (3,3) to region 1. pd_client.must_add_peer(r1, new_peer(3, 3)); cluster.must_put(b"k0", b"v0"); // check node 3 has k0. let engine3 = cluster.get_engine(3); must_get_equal(&engine3, b"k0", b"v0"); // guarantee node 1 is leader. cluster.must_transfer_leader(r1, new_peer(1, 1)); // isolate node 3 for region 1. // only filter MsgAppend to avoid election when recover. cluster.add_send_filter(CloneFilterFactory( RegionPacketFilter::new(1, 3).msg_type(MessageType::MsgAppend), )); let region = pd_client.get_region(b"").unwrap(); // split (-inf, +inf) -> (-inf, k2), [k2, +inf] cluster.must_split(&region, b"k2"); cluster.must_put(b"k2", b"v2"); let region2 = pd_client.get_region(b"k2").unwrap(); // remove peer3 in region 2. let peer3 = find_peer(&region2, 3).unwrap(); pd_client.must_remove_peer(region2.get_id(), peer3.clone()); // clear isolation so node 3 can split region 1. // now node 3 has a stale peer for region 2, but // it will be removed soon. cluster.clear_send_filters(); cluster.must_put(b"k1", b"v1"); // check node 3 has k1 must_get_equal(&engine3, b"k1", b"v1"); // split [k2, +inf) -> [k2, k3), [k3, +inf] cluster.must_split(&region2, b"k3"); let region3 = pd_client.get_region(b"k3").unwrap(); // region 3 can't contain node 3. assert_eq!(region3.get_peers().len(), 2); assert!(find_peer(&region3, 3).is_none()); let new_peer_id = pd_client.alloc_id().unwrap(); // add peer (3, new_peer_id) to region 3 pd_client.must_add_peer(region3.get_id(), new_peer(3, new_peer_id)); cluster.must_put(b"k3", b"v3"); // node 3 must have k3. must_get_equal(&engine3, b"k3", b"v3"); } #[test] fn test_node_split_with_stale_peer() { let mut cluster = new_node_cluster(0, 3); test_split_with_stale_peer(&mut cluster); } #[test] fn test_server_split_with_stale_peer() { let mut cluster = new_server_cluster(0, 3); test_split_with_stale_peer(&mut cluster); } fn test_split_region_diff_check<T: Simulator>(cluster: &mut Cluster<T>) { let region_max_size = 2000; let region_split_size = 1000; cluster.cfg.raft_store.split_region_check_tick_interval = ReadableDuration::millis(100); cluster.cfg.raft_store.region_split_check_diff = ReadableSize(10); cluster.cfg.raft_store.raft_log_gc_tick_interval = ReadableDuration::secs(20); cluster.cfg.coprocessor.region_max_size = ReadableSize(region_max_size); cluster.cfg.coprocessor.region_split_size = ReadableSize(region_split_size); let mut range = 1..; cluster.run(); let pd_client = Arc::clone(&cluster.pd_client); // The default size index distance is too large for small data, // we flush multiple times to generate more size index handles. for _ in 0..10 { put_till_size(cluster, region_max_size, &mut range); } // Peer will split when size of region meet region_max_size, // so assume the last region_max_size of data is not involved in split, // there will be at least (region_max_size * 10 - region_max_size) / region_split_size regions. // But region_max_size of data should be split too, so there will be at least 2 more regions. let min_region_cnt = (region_max_size * 10 - region_max_size) / region_split_size + 2; let mut try_cnt = 0; loop { sleep_ms(20); let region_cnt = pd_client.get_split_count() + 1; if region_cnt >= min_region_cnt as usize { return; } try_cnt += 1; if try_cnt == 500 { panic!( "expect split cnt {}, but got {}", min_region_cnt, region_cnt ); } } } #[test] fn test_server_split_region_diff_check() { let count = 1; let mut cluster = new_server_cluster(0, count); test_split_region_diff_check(&mut cluster); } #[test] fn test_node_split_region_diff_check() { let count = 1; let mut cluster = new_node_cluster(0, count); test_split_region_diff_check(&mut cluster); } fn test_split_epoch_not_match<T: Simulator>(cluster: &mut Cluster<T>, right_derive: bool) { cluster.cfg.raft_store.right_derive_when_split = right_derive; cluster.run(); let pd_client = Arc::clone(&cluster.pd_client); let old = pd_client.get_region(b"k1").unwrap(); // Construct a get command using old region meta. let get_old = new_request( old.get_id(), old.get_region_epoch().clone(), vec![new_get_cmd(b"k1")], false, ); cluster.must_split(&old, b"k2"); let left = pd_client.get_region(b"k1").unwrap(); let right = pd_client.get_region(b"k3").unwrap(); let new = if right_derive { right.clone() } else { left.clone() }; // Newer epoch also triggers the EpochNotMatch error. let mut latest_epoch = new.get_region_epoch().clone(); let latest_version = latest_epoch.get_version() + 1; latest_epoch.set_version(latest_version); let get_new = new_request(new.get_id(), latest_epoch, vec![new_get_cmd(b"k1")], false); for get in &[get_old, get_new] { let resp = cluster .call_command_on_leader(get.clone(), Duration::from_secs(5)) .unwrap(); assert!(resp.get_header().has_error(), "{:?}", get); assert!( resp.get_header().get_error().has_epoch_not_match(), "{:?}", get ); if right_derive { assert_eq!( resp.get_header() .get_error() .get_epoch_not_match() .get_current_regions(), &[right.clone(), left.clone()] ); } else { assert_eq!( resp.get_header() .get_error() .get_epoch_not_match() .get_current_regions(), &[left.clone(), right.clone()] ); } } } #[test] fn test_server_split_epoch_not_match_left_derive() { let mut cluster = new_server_cluster(0, 3); test_split_epoch_not_match(&mut cluster, false); } #[test] fn test_server_split_epoch_not_match_right_derive() { let mut cluster = new_server_cluster(0, 3); test_split_epoch_not_match(&mut cluster, true); } #[test] fn test_node_split_epoch_not_match_left_derive() { let mut cluster = new_node_cluster(0, 3); test_split_epoch_not_match(&mut cluster, false); } #[test] fn test_node_split_epoch_not_match_right_derive() { let mut cluster = new_node_cluster(0, 3); test_split_epoch_not_match(&mut cluster, true); } // For the peer which is the leader of the region before split, // it should campaigns immediately. and then this peer may take the leadership earlier. // `test_quick_election_after_split` is a helper function for testing this feature. fn test_quick_election_after_split<T: Simulator>(cluster: &mut Cluster<T>) { // Calculate the reserved time before a new campaign after split. let reserved_time = Duration::from_millis(cluster.cfg.raft_store.raft_base_tick_interval.as_millis() * 2); cluster.run(); cluster.must_put(b"k1", b"v1"); cluster.must_put(b"k3", b"v3"); let region = cluster.get_region(b"k1"); let old_leader = cluster.leader_of_region(region.get_id()).unwrap(); cluster.must_split(&region, b"k2"); // Wait for the peer of new region to start campaign. thread::sleep(reserved_time); // The campaign should always succeeds in the ideal test environment. let new_region = cluster.get_region(b"k3"); // Ensure the new leader is established for the newly split region, and it shares the // same store with the leader of old region. let new_leader = cluster.query_leader( old_leader.get_store_id(), new_region.get_id(), Duration::from_secs(5), ); assert!(new_leader.is_some()); } #[test] fn test_node_quick_election_after_split() { let mut cluster = new_node_cluster(0, 3); test_quick_election_after_split(&mut cluster); } #[test] fn test_server_quick_election_after_split() { let mut cluster = new_server_cluster(0, 3); test_quick_election_after_split(&mut cluster); } #[test] fn test_node_split_region() { let count = 5; let mut cluster = new_node_cluster(0, count); test_split_region(&mut cluster); } #[test] fn test_server_split_region() { let count = 5; let mut cluster = new_server_cluster(0, count); test_split_region(&mut cluster); } fn test_split_region<T: Simulator>(cluster: &mut Cluster<T>) { // length of each key+value let item_len = 74; // make bucket's size to item_len, which means one row one bucket cluster.cfg.coprocessor.region_max_size = ReadableSize(item_len) * 1024; let mut range = 1..; cluster.run(); let pd_client = Arc::clone(&cluster.pd_client); let region = pd_client.get_region(b"").unwrap(); let mid_key = put_till_size(cluster, 11 * item_len, &mut range); let max_key = put_till_size(cluster, 9 * item_len, &mut range); let target = pd_client.get_region(&max_key).unwrap(); assert_eq!(region, target); pd_client.must_split_region(target, pdpb::CheckPolicy::Scan, vec![]); let left = pd_client.get_region(b"").unwrap(); let right = pd_client.get_region(&max_key).unwrap(); assert_eq!(region.get_start_key(), left.get_start_key()); assert_eq!(mid_key.as_slice(), right.get_start_key()); assert_eq!(right.get_start_key(), left.get_end_key()); assert_eq!(region.get_end_key(), right.get_end_key()); let region = pd_client.get_region(b"x").unwrap(); pd_client.must_split_region( region, pdpb::CheckPolicy::Usekey, vec![b"x1".to_vec(), b"y2".to_vec()], ); let x1 = pd_client.get_region(b"x1").unwrap(); assert_eq!(x1.get_start_key(), b"x1"); assert_eq!(x1.get_end_key(), b"y2"); let y2 = pd_client.get_region(b"y2").unwrap(); assert_eq!(y2.get_start_key(), b"y2"); assert_eq!(y2.get_end_key(), b""); } #[test] fn test_node_split_update_region_right_derive() { let mut cluster = new_node_cluster(0, 3); // Election timeout and max leader lease is 1s. configure_for_lease_read(&mut cluster, Some(100), Some(10)); cluster.run(); cluster.must_put(b"k1", b"v1"); cluster.must_put(b"k3", b"v3"); let pd_client = Arc::clone(&cluster.pd_client); let region = pd_client.get_region(b"k1").unwrap(); cluster.must_split(&region, b"k2"); let right = pd_client.get_region(b"k2").unwrap(); let origin_leader = cluster.leader_of_region(right.get_id()).unwrap(); let new_leader = right .get_peers() .iter() .cloned() .find(|p| p.get_id() != origin_leader.get_id()) .unwrap(); // Make sure split is done in the new_leader. // "k4" belongs to the right. cluster.must_put(b"k4", b"v4"); must_get_equal(&cluster.get_engine(new_leader.get_store_id()), b"k4", b"v4"); // Transfer leadership to another peer. cluster.must_transfer_leader(right.get_id(), new_leader); // Make sure the new_leader is in lease. cluster.must_put(b"k4", b"v5"); // "k1" is not in the range of right. let get = new_request( right.get_id(), right.get_region_epoch().clone(), vec![new_get_cmd(b"k1")], false, ); debug!("requesting {:?}", get); let resp = cluster .call_command_on_leader(get, Duration::from_secs(5)) .unwrap(); assert!(resp.get_header().has_error(), "{:?}", resp); assert!( resp.get_header().get_error().has_key_not_in_region(), "{:?}", resp ); } #[test] fn test_split_with_epoch_not_match() { let mut cluster = new_node_cluster(0, 3); cluster.run(); cluster.must_transfer_leader(1, new_peer(1, 1)); let pd_client = Arc::clone(&cluster.pd_client); pd_client.disable_default_operator(); // Remove a peer to make conf version become 2. pd_client.must_remove_peer(1, new_peer(2, 2)); let region = cluster.get_region(b""); let mut admin_req = AdminRequest::default(); admin_req.set_cmd_type(AdminCmdType::BatchSplit); let mut batch_split_req = BatchSplitRequest::default(); batch_split_req.mut_requests().push(SplitRequest::default()); batch_split_req.mut_requests()[0].set_split_key(b"s".to_vec()); batch_split_req.mut_requests()[0].set_new_region_id(1000); batch_split_req.mut_requests()[0].set_new_peer_ids(vec![1001, 1002]); batch_split_req.mut_requests()[0].set_right_derive(true); admin_req.set_splits(batch_split_req); let mut epoch = region.get_region_epoch().clone(); epoch.conf_ver -= 1; let req = new_admin_request(1, &epoch, admin_req); let resp = cluster .call_command_on_leader(req, Duration::from_secs(3)) .unwrap(); assert!(resp.get_header().get_error().has_epoch_not_match()); }
33.690557
99
0.651955
d72382bd41d8117bdeeaa19e8836ab4e0706607c
8,344
use super::prelude::*; use sink::{Sink, SinkState}; use stream::{BufferAttr, StreamFlags, Stream}; use std::u32; use std::ffi::CStr; const INVALID_INDEX: u32 = u32::MAX; /// Specifies a sink to connect a playback stream to. /// /// Said sink might not exist (in which case a `NoEntity` error should be returned). #[derive(Debug)] pub enum SinkSpec<'a> { /// Sink index. /// /// PA specifies `u32::MAX` as an invalid index, which does not occur here. Index(u32), /// Named sink. Name(&'a CStr), } #[derive(Debug)] struct CreatePlaybackStreamParams<'a> { /// Stream properties to set (such as the media name). stream_props: PropList, sample_spec: SampleSpec, channel_map: ChannelMap, stream_flags: StreamFlags, sink_spec: Option<SinkSpec<'a>>, /// Whether to start the stream in muted or unmuted state. /// /// `None` means no preference and the server should decide. muted: Option<bool>, /// Set the channel volumes. volume: Option<CVolume>, syncid: u32, } /// Parameters for `CreatePlaybackStream` command. #[derive(Debug)] pub struct CreatePlaybackStream<'a> { inner: Box<CreatePlaybackStreamParams<'a>>, } impl<'a> CreatePlaybackStream<'a> { pub fn stream_props(&self) -> &PropList { &self.inner.stream_props } pub fn stream_flags(&self) -> StreamFlags { self.inner.stream_flags } pub fn sample_spec(&self) -> &SampleSpec { &self.inner.sample_spec } pub fn channel_map(&self) -> &ChannelMap { &self.inner.channel_map } /// Get the sink specification. /// /// This tells the server which sink to connect the stream to. If `None`, the stream will be /// connected to the default sink. pub fn sink_spec(&self) -> Option<&SinkSpec> { self.inner.sink_spec.as_ref() } /// Get the stream mute preference. /// /// * `None`: No preference, let the server decide. /// * `Some(true)`: Create the stream in muted state. /// * `Some(false)`: Create the stream in unmuted state. pub fn muted(&self) -> Option<bool> { self.inner.muted } pub fn volume(&self) -> Option<&CVolume> { self.inner.volume.as_ref() } /// Get the specified sync ID group. /// /// All streams with the same sync ID are synced with each other. They all have the same sink. pub fn sync_id(&self) -> u32 { self.inner.syncid } } impl<'a> FromTagStruct<'a> for CreatePlaybackStream<'a> { fn from_tag_struct(ts: &mut TagStructReader<'a>, protocol_version: u16) -> Result<Self, Error> { // This one contains a *lot* of stuff, and many flags that were added over time. let (sample_spec, channel_map, sink_index, sink_name, syncid, cvolume, muted); let mut muted_set = false; // whether to set muted state (since proto 15) let mut volume_set = true; let mut stream_flags = StreamFlags::empty(); let mut buf_attr = BufferAttr::default(); let mut formats = Vec::new(); // only valid for protocol>=13 sample_spec = ts.read_sample_spec()?; channel_map = ts.read_channel_map()?; sink_index = ts.read_u32()?; sink_name = ts.read_string()?; buf_attr.maxlength = ts.read_u32()?; stream_flags.set(StreamFlags::START_CORKED, ts.read_bool()?); buf_attr.tlength = ts.read_u32()?; buf_attr.prebuf = ts.read_u32()?; buf_attr.minreq = ts.read_u32()?; syncid = ts.read_u32()?; cvolume = ts.read_cvolume()?; // (cvolume must contain at least 1 volume even if set_volume is false) // only valid for proto>=12 stream_flags.set(StreamFlags::NO_REMAP_CHANNELS, ts.read_bool()?); stream_flags.set(StreamFlags::NO_REMIX_CHANNELS, ts.read_bool()?); stream_flags.set(StreamFlags::FIX_FORMAT, ts.read_bool()?); stream_flags.set(StreamFlags::FIX_RATE, ts.read_bool()?); stream_flags.set(StreamFlags::FIX_CHANNELS, ts.read_bool()?); stream_flags.set(StreamFlags::DONT_MOVE, ts.read_bool()?); stream_flags.set(StreamFlags::VARIABLE_RATE, ts.read_bool()?); // proto>=13 muted = ts.read_bool()?; stream_flags.set(StreamFlags::ADJUST_LATENCY, ts.read_bool()?); let stream_props = ts.read_proplist()?; if protocol_version >= 14 { volume_set = ts.read_bool()?; stream_flags.set(StreamFlags::EARLY_REQUESTS, ts.read_bool()?); } if protocol_version >= 15 { muted_set = ts.read_bool()?; stream_flags.set(StreamFlags::DONT_INHIBIT_AUTO_SUSPEND, ts.read_bool()?); stream_flags.set(StreamFlags::FAIL_ON_SUSPEND, ts.read_bool()?); } if protocol_version >= 17 { stream_flags.set(StreamFlags::RELATIVE_VOLUME, ts.read_bool()?); } if protocol_version >= 18 { stream_flags.set(StreamFlags::PASSTHROUGH, ts.read_bool()?); } if protocol_version >= 21 { for _ in 0..ts.read_u8()? { formats.push(ts.read_format_info()?); } // Client have a choice here: Either send no format infos, but a sample spec and channel // map, or send at least 1 format info and any kind of sample spec/channel map // TODO: (including invalid ones). } let sink_spec = match (sink_index, sink_name) { (INVALID_INDEX, None) => None, // default sink (INVALID_INDEX, Some(name)) => Some(SinkSpec::Name(name)), (index, None) => Some(SinkSpec::Index(index)), (_index, Some(_name)) => { // PA rejects this as well return Err(Error::string(format!("cannot specify both sink index and name"))); } }; // `muted_set` tells us whether to actually change the muted flag, but it only // exists since proto 15, so set it to `true` when `muted` is `true`. if muted { muted_set = true; } // Build the muting preference // (None = no pref, Some(true) = plz mute, Some(false) = plz unmute) let muted = match (muted_set, muted) { (true, muted) => Some(muted), (false, _) => None, }; let volume = if volume_set { Some(cvolume) } else { None }; Ok(Self { inner: Box::new(CreatePlaybackStreamParams { stream_props, sample_spec, channel_map, stream_flags, sink_spec, muted, volume, syncid, }), }) } } impl<'a> ToTagStruct for CreatePlaybackStream<'a> { fn to_tag_struct(&self, _w: &mut TagStructWriter, _protocol_version: u16) -> Result<(), Error> { unimplemented!() // TODO } } #[derive(Debug)] pub struct CreatePlaybackStreamReply<'a> { /// Server-internal stream index. pub stream_index: u32, pub sink_input_index: u32, /// Number of bytes that can be written to the playback buffer. pub missing: u32, /// Attributes of the created buffer. pub buffer_metrics: &'a BufferAttr, /// Actually chosen sample specs. pub sample_spec: &'a SampleSpec, /// Actually chosen channel map. pub channel_map: &'a ChannelMap, pub stream: &'a Stream, /// The sink the created stream has been connected to. pub sink: &'a Sink, } impl<'a> ToTagStruct for CreatePlaybackStreamReply<'a> { fn to_tag_struct(&self, w: &mut TagStructWriter, protocol_version: u16) -> Result<(), Error> { w.write(self.stream_index); w.write(self.sink_input_index); w.write(self.missing); // proto>=9 w.write(self.buffer_metrics.maxlength); w.write(self.buffer_metrics.tlength); w.write(self.buffer_metrics.prebuf); w.write(self.buffer_metrics.minreq); // proto>=12 w.write(self.sample_spec); w.write(self.channel_map); w.write(self.sink.index()); w.write(self.sink.name()); w.write(self.sink.state() == SinkState::Suspended); // proto>=13 w.write(self.stream.latency()); if protocol_version >= 21 { // Send back the sample format of the sink } unimplemented!() } }
34.479339
100
0.606663
010210eae60db99ff5647ac24751325cbc53f8d4
12,835
//! //! methods to interact with the rebuild process use crate::{ context::{Context, OutputFormat}, Error, GrpcStatus, }; use ::rpc::mayastor as rpc; use clap::{App, AppSettings, Arg, ArgMatches, SubCommand}; use colored_json::ToColoredJson; use snafu::ResultExt; use tonic::Status; pub async fn handler( ctx: Context, matches: &ArgMatches<'_>, ) -> crate::Result<()> { match matches.subcommand() { ("start", Some(args)) => start(ctx, &args).await, ("stop", Some(args)) => stop(ctx, &args).await, ("pause", Some(args)) => pause(ctx, &args).await, ("resume", Some(args)) => resume(ctx, &args).await, ("state", Some(args)) => state(ctx, &args).await, ("stats", Some(args)) => stats(ctx, &args).await, ("progress", Some(args)) => progress(ctx, &args).await, (cmd, _) => { Err(Status::not_found(format!("command {} does not exist", cmd))) .context(GrpcStatus) } } } pub fn subcommands<'a, 'b>() -> App<'a, 'b> { let start = SubCommand::with_name("start") .about("starts a rebuild") .arg( Arg::with_name("uuid") .required(true) .index(1) .help("uuid of the nexus"), ) .arg( Arg::with_name("uri") .required(true) .index(2) .help("uri of child to start rebuilding"), ); let stop = SubCommand::with_name("stop") .about("stops a rebuild") .arg( Arg::with_name("uuid") .required(true) .index(1) .help("uuid of the nexus"), ) .arg( Arg::with_name("uri") .required(true) .index(2) .help("uri of child to stop rebuilding"), ); let pause = SubCommand::with_name("pause") .about("pauses a rebuild") .arg( Arg::with_name("uuid") .required(true) .index(1) .help("uuid of the nexus"), ) .arg( Arg::with_name("uri") .required(true) .index(2) .help("uri of child to pause rebuilding"), ); let resume = SubCommand::with_name("resume") .about("resumes a rebuild") .arg( Arg::with_name("uuid") .required(true) .index(1) .help("uuid of the nexus"), ) .arg( Arg::with_name("uri") .required(true) .index(2) .help("uri of child to resume rebuilding"), ); let state = SubCommand::with_name("state") .about("gets the rebuild state of the child") .arg( Arg::with_name("uuid") .required(true) .index(1) .help("uuid of the nexus"), ) .arg( Arg::with_name("uri") .required(true) .index(2) .help("uri of child to get the rebuild state from"), ); let stats = SubCommand::with_name("stats") .about("gets the rebuild stats of the child") .arg( Arg::with_name("uuid") .required(true) .index(1) .help("uuid of the nexus"), ) .arg( Arg::with_name("uri") .required(true) .index(2) .help("uri of child to get the rebuild stats from"), ); let progress = SubCommand::with_name("progress") .about("shows the progress of a rebuild") .arg( Arg::with_name("uuid") .required(true) .index(1) .help("uuid of the nexus"), ) .arg( Arg::with_name("uri") .required(true) .index(2) .help("uri of child to get the rebuild progress from"), ); SubCommand::with_name("rebuild") .settings(&[ AppSettings::SubcommandRequiredElseHelp, AppSettings::ColoredHelp, AppSettings::ColorAlways, ]) .about("Rebuild management") .subcommand(start) .subcommand(stop) .subcommand(pause) .subcommand(resume) .subcommand(state) .subcommand(stats) .subcommand(progress) } async fn start( mut ctx: Context, matches: &ArgMatches<'_>, ) -> crate::Result<()> { let uuid = matches .value_of("uuid") .ok_or_else(|| Error::MissingValue { field: "uuid".to_string(), })? .to_string(); let uri = matches .value_of("uri") .ok_or_else(|| Error::MissingValue { field: "uri".to_string(), })? .to_string(); let response = ctx .client .start_rebuild(rpc::StartRebuildRequest { uuid: uuid.clone(), uri: uri.clone(), }) .await .context(GrpcStatus)?; match ctx.output { OutputFormat::Json => { println!( "{}", serde_json::to_string_pretty(&response.get_ref()) .unwrap() .to_colored_json_auto() .unwrap() ); } OutputFormat::Default => { println!("{}", &uri); } }; Ok(()) } async fn stop(mut ctx: Context, matches: &ArgMatches<'_>) -> crate::Result<()> { let uuid = matches .value_of("uuid") .ok_or_else(|| Error::MissingValue { field: "uuid".to_string(), })? .to_string(); let uri = matches .value_of("uri") .ok_or_else(|| Error::MissingValue { field: "uri".to_string(), })? .to_string(); let response = ctx .client .stop_rebuild(rpc::StopRebuildRequest { uuid: uuid.clone(), uri: uri.clone(), }) .await .context(GrpcStatus)?; match ctx.output { OutputFormat::Json => { println!( "{}", serde_json::to_string_pretty(&response.get_ref()) .unwrap() .to_colored_json_auto() .unwrap() ); } OutputFormat::Default => { println!("{}", &uri); } }; Ok(()) } async fn pause( mut ctx: Context, matches: &ArgMatches<'_>, ) -> crate::Result<()> { let uuid = matches .value_of("uuid") .ok_or_else(|| Error::MissingValue { field: "uuid".to_string(), })? .to_string(); let uri = matches .value_of("uri") .ok_or_else(|| Error::MissingValue { field: "uri".to_string(), })? .to_string(); let response = ctx .client .pause_rebuild(rpc::PauseRebuildRequest { uuid: uuid.clone(), uri: uri.clone(), }) .await .context(GrpcStatus)?; match ctx.output { OutputFormat::Json => { println!( "{}", serde_json::to_string_pretty(&response.get_ref()) .unwrap() .to_colored_json_auto() .unwrap() ); } OutputFormat::Default => { println!("{}", &uri); } }; Ok(()) } async fn resume( mut ctx: Context, matches: &ArgMatches<'_>, ) -> crate::Result<()> { let uuid = matches .value_of("uuid") .ok_or_else(|| Error::MissingValue { field: "uuid".to_string(), })? .to_string(); let uri = matches .value_of("uri") .ok_or_else(|| Error::MissingValue { field: "uri".to_string(), })? .to_string(); let response = ctx .client .resume_rebuild(rpc::ResumeRebuildRequest { uuid: uuid.clone(), uri: uri.clone(), }) .await .context(GrpcStatus)?; match ctx.output { OutputFormat::Json => { println!( "{}", serde_json::to_string_pretty(&response.get_ref()) .unwrap() .to_colored_json_auto() .unwrap() ); } OutputFormat::Default => { println!("{}", &uri); } }; Ok(()) } async fn state( mut ctx: Context, matches: &ArgMatches<'_>, ) -> crate::Result<()> { let uuid = matches .value_of("uuid") .ok_or_else(|| Error::MissingValue { field: "uuid".to_string(), })? .to_string(); let uri = matches .value_of("uri") .ok_or_else(|| Error::MissingValue { field: "uri".to_string(), })? .to_string(); let response = ctx .client .get_rebuild_state(rpc::RebuildStateRequest { uuid: uuid.clone(), uri: uri.clone(), }) .await .context(GrpcStatus)?; match ctx.output { OutputFormat::Json => { println!( "{}", serde_json::to_string_pretty(&response.get_ref()) .unwrap() .to_colored_json_auto() .unwrap() ); } OutputFormat::Default => { ctx.print_list( vec!["state"], vec![vec![response.get_ref().state.clone()]], ); } }; Ok(()) } async fn stats( mut ctx: Context, matches: &ArgMatches<'_>, ) -> crate::Result<()> { let uuid = matches .value_of("uuid") .ok_or_else(|| Error::MissingValue { field: "uuid".to_string(), })? .to_string(); let uri = matches .value_of("uri") .ok_or_else(|| Error::MissingValue { field: "uri".to_string(), })? .to_string(); ctx.v2(&format!( "Getting the rebuild stats of child {} on nexus {}", uri, uuid )); let response = ctx .client .get_rebuild_stats(rpc::RebuildStatsRequest { uuid: uuid.clone(), uri: uri.clone(), }) .await .context(GrpcStatus)?; match ctx.output { OutputFormat::Json => { println!( "{}", serde_json::to_string_pretty(&response.get_ref()) .unwrap() .to_colored_json_auto() .unwrap() ); } OutputFormat::Default => { let response = &response.get_ref(); ctx.print_list( vec![ "blocks_total", "blocks_recovered", "progress (%)", "segment_size_blks", "block_size", "tasks_total", "tasks_active", ], vec![vec![ response.blocks_total, response.blocks_recovered, response.progress, response.segment_size_blks, response.block_size, response.tasks_total, response.tasks_active, ] .iter() .map(|s| s.to_string()) .collect()], ); } }; Ok(()) } async fn progress( mut ctx: Context, matches: &ArgMatches<'_>, ) -> crate::Result<()> { let uuid = matches .value_of("uuid") .ok_or_else(|| Error::MissingValue { field: "uuid".to_string(), })? .to_string(); let uri = matches .value_of("uri") .ok_or_else(|| Error::MissingValue { field: "uri".to_string(), })? .to_string(); let response = ctx .client .get_rebuild_progress(rpc::RebuildProgressRequest { uuid: uuid.clone(), uri: uri.clone(), }) .await .context(GrpcStatus)?; match ctx.output { OutputFormat::Json => { println!( "{}", serde_json::to_string_pretty(&response.get_ref()) .unwrap() .to_colored_json_auto() .unwrap() ); } OutputFormat::Default => { ctx.print_list( vec!["progress (%)"], vec![vec![response.get_ref().progress.to_string()]], ); } }; Ok(()) }
26.087398
80
0.44815
614224410d13eb1565c01a2b230d6e18b1e9baac
28,438
/* This tool is part of the WhiteboxTools geospatial analysis library. Authors: Dr. John Lindsay Created: 10/07/2017 Last Modified: 10/05/2019 License: MIT NOTES: 1. This tool is designed to work either by specifying a single input and output file or a working directory containing multiple input LAS files. 2. Need to add the ability to exclude points based on max scan angle divation. */ use crate::lidar::*; use crate::raster::*; use crate::structures::{BoundingBox, DistanceMetric, FixedRadiusSearch2D}; use crate::tools::*; use num_cpus; use std::env; use std::f64; use std::fs; use std::io::{Error, ErrorKind}; use std::path; use std::sync::mpsc; use std::sync::{Arc, Mutex}; use std::thread; pub struct LidarPointDensity { name: String, description: String, toolbox: String, parameters: Vec<ToolParameter>, example_usage: String, } impl LidarPointDensity { pub fn new() -> LidarPointDensity { // public constructor let name = "LidarPointDensity".to_string(); let toolbox = "LiDAR Tools".to_string(); let description = "Calculates the spatial pattern of point density for a LiDAR data set. When the input/output parameters are not specified, the tool grids all LAS files contained within the working directory." .to_string(); let mut parameters = vec![]; parameters.push(ToolParameter { name: "Input File".to_owned(), flags: vec!["-i".to_owned(), "--input".to_owned()], description: "Input LiDAR file (including extension).".to_owned(), parameter_type: ParameterType::ExistingFile(ParameterFileType::Lidar), default_value: None, optional: true, }); parameters.push(ToolParameter { name: "Output File".to_owned(), flags: vec!["-o".to_owned(), "--output".to_owned()], description: "Output raster file (including extension).".to_owned(), parameter_type: ParameterType::NewFile(ParameterFileType::Raster), default_value: None, optional: true, }); parameters.push(ToolParameter { name: "Point Returns Included".to_owned(), flags: vec!["--returns".to_owned()], description: "Point return types to include; options are 'all' (default), 'last', 'first'." .to_owned(), parameter_type: ParameterType::OptionList(vec![ "all".to_owned(), "last".to_owned(), "first".to_owned(), ]), default_value: Some("all".to_owned()), optional: true, }); parameters.push(ToolParameter { name: "Grid Resolution".to_owned(), flags: vec!["--resolution".to_owned()], description: "Output raster's grid resolution.".to_owned(), parameter_type: ParameterType::Float, default_value: Some("1.0".to_owned()), optional: true, }); parameters.push(ToolParameter { name: "Search Radius".to_owned(), flags: vec!["--radius".to_owned()], description: "Search radius.".to_owned(), parameter_type: ParameterType::Float, default_value: Some("2.5".to_owned()), optional: true, }); parameters.push(ToolParameter{ name: "Exclusion Classes (0-18, based on LAS spec; e.g. 3,4,5,6,7)".to_owned(), flags: vec!["--exclude_cls".to_owned()], description: "Optional exclude classes from interpolation; Valid class values range from 0 to 18, based on LAS specifications. Example, --exclude_cls='3,4,5,6,7,18'.".to_owned(), parameter_type: ParameterType::String, default_value: None, optional: true }); // parameters.push(ToolParameter{ // name: "Palette Name (Whitebox raster outputs only)".to_owned(), // flags: vec!["--palette".to_owned()], // description: "Optional palette name (for use with Whitebox raster files).".to_owned(), // parameter_type: ParameterType::String, // default_value: None, // optional: true // }); parameters.push(ToolParameter { name: "Minimum Elevation Value (optional)".to_owned(), flags: vec!["--minz".to_owned()], description: "Optional minimum elevation for inclusion in interpolation.".to_owned(), parameter_type: ParameterType::Float, default_value: None, optional: true, }); parameters.push(ToolParameter { name: "Maximum Elevation Value (optional)".to_owned(), flags: vec!["--maxz".to_owned()], description: "Optional maximum elevation for inclusion in interpolation.".to_owned(), parameter_type: ParameterType::Float, default_value: None, optional: true, }); let sep: String = path::MAIN_SEPARATOR.to_string(); let p = format!("{}", env::current_dir().unwrap().display()); let e = format!("{}", env::current_exe().unwrap().display()); let mut short_exe = e .replace(&p, "") .replace(".exe", "") .replace(".", "") .replace(&sep, ""); if e.contains(".exe") { short_exe += ".exe"; } let usage = format!(">>.*{0} -r={1} -v --wd=\"*path*to*data*\" -i=file.las -o=outfile.tif --resolution=2.0 --radius=5.0\" .*{0} -r={1} -v --wd=\"*path*to*data*\" -i=file.las -o=outfile.tif --resolution=5.0 --radius=2.0 --exclude_cls='3,4,5,6,7,18' --palette=light_quant.plt", short_exe, name).replace("*", &sep); LidarPointDensity { name: name, description: description, toolbox: toolbox, parameters: parameters, example_usage: usage, } } } impl WhiteboxTool for LidarPointDensity { fn get_source_file(&self) -> String { String::from(file!()) } fn get_tool_name(&self) -> String { self.name.clone() } fn get_tool_description(&self) -> String { self.description.clone() } fn get_tool_parameters(&self) -> String { let mut s = String::from("{\"parameters\": ["); for i in 0..self.parameters.len() { if i < self.parameters.len() - 1 { s.push_str(&(self.parameters[i].to_string())); s.push_str(","); } else { s.push_str(&(self.parameters[i].to_string())); } } s.push_str("]}"); s } fn get_example_usage(&self) -> String { self.example_usage.clone() } fn get_toolbox(&self) -> String { self.toolbox.clone() } fn run<'a>( &self, args: Vec<String>, working_directory: &'a str, verbose: bool, ) -> Result<(), Error> { let mut input_file: String = "".to_string(); let mut output_file: String = "".to_string(); let mut return_type = "all".to_string(); let mut grid_res: f64 = 1.0; let mut search_radius = 2.5f64; let mut include_class_vals = vec![true; 256]; let mut palette = "default".to_string(); let mut exclude_cls_str = String::new(); let mut max_z = f64::INFINITY; let mut min_z = f64::NEG_INFINITY; // read the arguments if args.len() == 0 { return Err(Error::new( ErrorKind::InvalidInput, "Tool run with no parameters.", )); } for i in 0..args.len() { let mut arg = args[i].replace("\"", ""); arg = arg.replace("\'", ""); let cmd = arg.split("="); // in case an equals sign was used let vec = cmd.collect::<Vec<&str>>(); let mut keyval = false; if vec.len() > 1 { keyval = true; } let flag_val = vec[0].to_lowercase().replace("--", "-"); if flag_val == "-i" || flag_val == "-input" { if keyval { input_file = vec[1].to_string(); } else { input_file = args[i + 1].to_string(); } } else if flag_val == "-o" || flag_val == "-output" { if keyval { output_file = vec[1].to_string(); } else { output_file = args[i + 1].to_string(); } } else if flag_val == "-returns" { if keyval { return_type = vec[1].to_string(); } else { return_type = args[i + 1].to_string(); } } else if flag_val == "-resolution" { if keyval { grid_res = vec[1] .to_string() .parse::<f64>() .expect(&format!("Error parsing {}", flag_val)); } else { grid_res = args[i + 1] .to_string() .parse::<f64>() .expect(&format!("Error parsing {}", flag_val)); } } else if flag_val == "-radius" { if keyval { search_radius = vec[1] .to_string() .parse::<f64>() .expect(&format!("Error parsing {}", flag_val)); } else { search_radius = args[i + 1] .to_string() .parse::<f64>() .expect(&format!("Error parsing {}", flag_val)); } } else if flag_val == "-palette" { if keyval { palette = vec[1].to_string(); } else { palette = args[i + 1].to_string(); } } else if flag_val == "-exclude_cls" { if keyval { exclude_cls_str = vec[1].to_string(); } else { exclude_cls_str = args[i + 1].to_string(); } let mut cmd = exclude_cls_str.split(","); let mut vec = cmd.collect::<Vec<&str>>(); if vec.len() == 1 { cmd = exclude_cls_str.split(";"); vec = cmd.collect::<Vec<&str>>(); } for value in vec { if !value.trim().is_empty() { let c = value.trim().parse::<usize>().unwrap(); include_class_vals[c] = false; } } } else if flag_val == "-minz" { if keyval { min_z = vec[1] .to_string() .parse::<f64>() .expect(&format!("Error parsing {}", flag_val)); } else { min_z = args[i + 1] .to_string() .parse::<f64>() .expect(&format!("Error parsing {}", flag_val)); } } else if flag_val == "-maxz" { if keyval { max_z = vec[1] .to_string() .parse::<f64>() .expect(&format!("Error parsing {}", flag_val)); } else { max_z = args[i + 1] .to_string() .parse::<f64>() .expect(&format!("Error parsing {}", flag_val)); } } } if verbose { println!("***************{}", "*".repeat(self.get_tool_name().len())); println!("* Welcome to {} *", self.get_tool_name()); println!("***************{}", "*".repeat(self.get_tool_name().len())); } let start = Instant::now(); let (all_returns, late_returns, early_returns): (bool, bool, bool); if return_type.contains("last") { all_returns = false; late_returns = true; early_returns = false; } else if return_type.contains("first") { all_returns = false; late_returns = false; early_returns = true; } else { // all all_returns = true; late_returns = false; early_returns = false; } let mut inputs = vec![]; let mut outputs = vec![]; if input_file.is_empty() { if working_directory.is_empty() { return Err(Error::new(ErrorKind::InvalidInput, "This tool must be run by specifying either an individual input file or a working directory.")); } // match fs::read_dir(working_directory) { // Err(why) => println!("! {:?}", why.kind()), // Ok(paths) => { // for path in paths { // let s = format!("{:?}", path.unwrap().path()); // if s.replace("\"", "").to_lowercase().ends_with(".las") { // inputs.push(format!("{:?}", s.replace("\"", ""))); // outputs.push( // inputs[inputs.len() - 1] // .replace(".las", ".tif") // .replace(".LAS", ".tif"), // ) // } else if s.replace("\"", "").to_lowercase().ends_with(".zip") { // inputs.push(format!("{:?}", s.replace("\"", ""))); // outputs.push( // inputs[inputs.len() - 1] // .replace(".zip", ".tif") // .replace(".ZIP", ".tif"), // ) // } // } // } // } if std::path::Path::new(&working_directory).is_dir() { for entry in fs::read_dir(working_directory.clone())? { let s = entry? .path() .into_os_string() .to_str() .expect("Error reading path string") .to_string(); if s.to_lowercase().ends_with(".las") { inputs.push(s); outputs.push( inputs[inputs.len() - 1] .replace(".las", ".tif") .replace(".LAS", ".tif"), ) } else if s.to_lowercase().ends_with(".zip") { inputs.push(s); outputs.push( inputs[inputs.len() - 1] .replace(".zip", ".tif") .replace(".ZIP", ".tif"), ) } } } else { return Err(Error::new( ErrorKind::InvalidInput, format!("The input directory ({}) is incorrect.", working_directory), )); } } else { if !input_file.contains(path::MAIN_SEPARATOR) && !input_file.contains("/") { input_file = format!("{}{}", working_directory, input_file); } inputs.push(input_file.clone()); if output_file.is_empty() { output_file = input_file .clone() .replace(".las", ".tif") .replace(".LAS", ".tif"); } if !output_file.contains(path::MAIN_SEPARATOR) && !output_file.contains("/") { output_file = format!("{}{}", working_directory, output_file); } outputs.push(output_file); } /* If multiple files are being interpolated, we will need to know their bounding boxes, in order to retrieve points from adjacent tiles. This is so that there are no edge effects. */ let mut bounding_boxes = vec![]; for in_file in &inputs { let header = LasHeader::read_las_header(&in_file.replace("\"", ""))?; bounding_boxes.push(BoundingBox { min_x: header.min_x, max_x: header.max_x, min_y: header.min_y, max_y: header.max_y, }); } if verbose { println!("Performing analysis..."); } let num_tiles = inputs.len(); let tile_list = Arc::new(Mutex::new(0..num_tiles)); let inputs = Arc::new(inputs); let outputs = Arc::new(outputs); let bounding_boxes = Arc::new(bounding_boxes); let num_procs2 = num_cpus::get() as isize; let (tx2, rx2) = mpsc::channel(); for _ in 0..num_procs2 { let inputs = inputs.clone(); let outputs = outputs.clone(); let bounding_boxes = bounding_boxes.clone(); let tile_list = tile_list.clone(); // copy over the string parameters let palette = palette.clone(); let return_type = return_type.clone(); let tool_name = self.get_tool_name(); let exclude_cls_str = exclude_cls_str.clone(); let include_class_vals = include_class_vals.clone(); let tx2 = tx2.clone(); thread::spawn(move || { let mut tile = 0; while tile < num_tiles { // Get the next tile up for interpolation { tile = match tile_list.lock().unwrap().next() { Some(val) => val, None => break, // There are no more tiles to interpolate }; } let start_run = Instant::now(); let input_file = inputs[tile].replace("\"", "").clone(); let output_file = outputs[tile].replace("\"", "").clone(); // Expand the bounding box to include the areas of overlap let bb = BoundingBox { min_x: bounding_boxes[tile].min_x - search_radius as f64, max_x: bounding_boxes[tile].max_x + search_radius as f64, min_y: bounding_boxes[tile].min_y - search_radius as f64, max_y: bounding_boxes[tile].max_y + search_radius as f64, }; let mut frs: FixedRadiusSearch2D<u8> = FixedRadiusSearch2D::new(search_radius, DistanceMetric::SquaredEuclidean); if verbose && inputs.len() == 1 { println!("Reading input LAS file..."); } let mut progress: i32; let mut old_progress: i32 = -1; for m in 0..inputs.len() { if bounding_boxes[m].overlaps(bb) { let input = match LasFile::new(&inputs[m].replace("\"", "").clone(), "r") { Ok(lf) => lf, Err(err) => panic!( "Error reading file {}: {}", inputs[m].replace("\"", ""), err ), }; let n_points = input.header.number_of_points as usize; let num_points: f64 = (input.header.number_of_points - 1) as f64; // used for progress calculation only for i in 0..n_points { let p: PointData = input[i]; if !p.withheld() { if all_returns || (p.is_late_return() & late_returns) || (p.is_early_return() & early_returns) { if include_class_vals[p.classification() as usize] { if bb.is_point_in_box(p.x, p.y) && p.z >= min_z && p.z <= max_z { frs.insert(p.x, p.y, 1u8); } } } } if verbose && inputs.len() == 1 { progress = (100.0_f64 * i as f64 / num_points) as i32; if progress != old_progress { println!("Binning points: {}%", progress); old_progress = progress; } } } } } let west: f64 = bounding_boxes[tile].min_x; let north: f64 = bounding_boxes[tile].max_y; let rows: isize = (((north - bounding_boxes[tile].min_y) / grid_res).ceil()) as isize; let columns: isize = (((bounding_boxes[tile].max_x - west) / grid_res).ceil()) as isize; let south: f64 = north - rows as f64 * grid_res; let east = west + columns as f64 * grid_res; let nodata = -32768.0f64; let mut configs = RasterConfigs { ..Default::default() }; configs.rows = rows as usize; configs.columns = columns as usize; configs.north = north; configs.south = south; configs.east = east; configs.west = west; configs.resolution_x = grid_res; configs.resolution_y = grid_res; configs.nodata = nodata; configs.data_type = DataType::F64; configs.photometric_interp = PhotometricInterpretation::Continuous; configs.palette = palette.clone(); let mut output = Raster::initialize_using_config(&output_file, &configs); let search_area = f64::consts::PI * (search_radius * search_radius) as f64; if num_tiles > 1 { let (mut x, mut y): (f64, f64); for row in 0..rows { for col in 0..columns { x = west + col as f64 * grid_res + 0.5; y = north - row as f64 * grid_res - 0.5; let ret = frs.search(x, y); output.set_value(row, col, ret.len() as f64 / search_area); } if verbose && inputs.len() == 1 { progress = (100.0_f64 * row as f64 / (rows - 1) as f64) as i32; if progress != old_progress { println!("Progress: {}%", progress); old_progress = progress; } } } } else { // there's only one tile, so use all cores to interpolate this one tile. let frs = Arc::new(frs); // wrap FRS in an Arc let num_procs = num_cpus::get() as isize; let (tx, rx) = mpsc::channel(); for tid in 0..num_procs { let frs = frs.clone(); let tx1 = tx.clone(); thread::spawn(move || { let (mut x, mut y): (f64, f64); for row in (0..rows).filter(|r| r % num_procs == tid) { let mut data = vec![nodata; columns as usize]; for col in 0..columns { x = west + col as f64 * grid_res + 0.5; y = north - row as f64 * grid_res - 0.5; let ret = frs.search(x, y); data[col as usize] = ret.len() as f64 / search_area; } tx1.send((row, data)).unwrap(); } }); } for row in 0..rows { let data = rx.recv().expect("Error receiving data from thread."); output.set_row_data(data.0, data.1); if verbose { progress = (100.0_f64 * row as f64 / (rows - 1) as f64) as i32; if progress != old_progress { println!("Progress: {}%", progress); old_progress = progress; } } } } let elapsed_time_run = get_formatted_elapsed_time(start_run); output.add_metadata_entry(format!( "Created by whitebox_tools\' {} tool", tool_name )); output.add_metadata_entry(format!("Input file: {}", input_file)); output.add_metadata_entry(format!("Grid resolution: {}", grid_res)); output.add_metadata_entry(format!("Search radius: {}", search_radius)); output.add_metadata_entry(format!("Returns: {}", return_type)); output.add_metadata_entry(format!("Excluded classes: {}", exclude_cls_str)); output.add_metadata_entry(format!( "Elapsed Time (including I/O): {}", elapsed_time_run )); if verbose && inputs.len() == 1 { println!("Saving data...") }; let _ = output.write().unwrap(); tx2.send(tile).unwrap(); } }); } let mut progress: i32; let mut old_progress: i32 = -1; for tile in 0..inputs.len() { let tile_completed = rx2.recv().unwrap(); if verbose { println!( "Finished interpolating {} ({} of {})", inputs[tile_completed] .replace("\"", "") .replace(working_directory, "") .replace(".las", ""), tile + 1, inputs.len() ); } if verbose { progress = (100.0_f64 * tile as f64 / (inputs.len() - 1) as f64) as i32; if progress != old_progress { println!("Progress: {}%", progress); old_progress = progress; } } } let elapsed_time = get_formatted_elapsed_time(start); if verbose { println!( "{}", &format!("Elapsed Time (including I/O): {}", elapsed_time) ); } Ok(()) } }
41.820588
218
0.427597
117a96d1174041c3079ed9b01d1d2352ec79d840
2,897
// Copyright 2020 Datafuse Labs. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use common_base::tokio; use common_exception::Result; use common_management::UserInfo; use common_meta_types::AuthType; use common_planners::*; use pretty_assertions::assert_eq; use crate::interpreters::*; use crate::sql::*; #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn test_drop_user_interpreter() -> Result<()> { common_tracing::init_default_ut_tracing(); let ctx = crate::tests::try_create_context()?; { static TEST_QUERY: &str = "DROP USER 'test'@'localhost'"; if let PlanNode::DropUser(plan) = PlanParser::parse(TEST_QUERY, ctx.clone()).await? { let executor = DropUserInterpreter::try_create(ctx.clone(), plan.clone())?; assert_eq!(executor.name(), "DropUserInterpreter"); let ret = executor.execute(None).await; assert!(ret.is_err()) } else { panic!() } } { static TEST_QUERY: &str = "DROP USER IF EXISTS 'test'@'localhost'"; if let PlanNode::DropUser(plan) = PlanParser::parse(TEST_QUERY, ctx.clone()).await? { let executor = DropUserInterpreter::try_create(ctx.clone(), plan.clone())?; assert_eq!(executor.name(), "DropUserInterpreter"); let ret = executor.execute(None).await; assert!(ret.is_ok()) } else { panic!() } } { let name = "test"; let hostname = "localhost"; let password = "test"; let user_info = UserInfo::new( name.to_string(), hostname.to_string(), Vec::from(password), AuthType::PlainText, ); let user_mgr = ctx.get_sessions_manager().get_user_manager(); user_mgr.add_user(user_info).await?; let old_user = user_mgr.get_user(name, hostname).await?; assert_eq!(old_user.password, Vec::from(password)); static TEST_QUERY: &str = "DROP USER 'test'@'localhost'"; if let PlanNode::DropUser(plan) = PlanParser::parse(TEST_QUERY, ctx.clone()).await? { let executor = DropUserInterpreter::try_create(ctx, plan.clone())?; assert_eq!(executor.name(), "DropUserInterpreter"); executor.execute(None).await?; } else { panic!() } } Ok(()) }
34.903614
93
0.625129
1cb1dede5519d2c9e11f7cc02b4ec29710ba11f8
726
//! This repo contains a simple framework for building Optics agents. //! It has common utils and tools for configuring the app, interacting with the //! smart contracts, etc. //! //! Implementations of the `Home` and `Replica` traits on different chains //! ought to live here. #![forbid(unsafe_code)] #![warn(missing_docs)] #![warn(unused_extern_crates)] mod settings; pub use settings::*; /// Base trait for an agent mod agent; pub use agent::*; #[doc(hidden)] #[cfg_attr(tarpaulin, skip)] #[macro_use] mod macros; pub use macros::*; /// Home type mod home; pub use home::*; /// Replica type mod replica; pub use replica::*; /// XAppConnectionManager type mod xapp; pub use xapp::*; mod metrics; pub use metrics::*;
19.105263
79
0.702479
abde43b7db78b86119fc4616cf573bd21b80ee0c
1,961
use specs::prelude::*; use super::{Map, Position, BlocksTile, TileSize}; pub struct MapIndexingSystem {} impl<'a> System<'a> for MapIndexingSystem { type SystemData = ( WriteExpect<'a, Map>, ReadStorage<'a, Position>, ReadStorage<'a, BlocksTile>, ReadStorage<'a, TileSize>, Entities<'a>,); fn run(&mut self, data : Self::SystemData) { let (mut map, position, blockers, sizes, entities) = data; map.populate_blocked(); map.clear_content_index(); for (entity, position) in (&entities, &position).join() { let idx = map.xy_idx(position.x, position.y); if let Some(size) = sizes.get(entity) { // Multi-tile for y in position.y .. position.y + size.y { for x in position.x .. position.x + size.x { if x > 0 && x < map.width-1 && y > 0 && y < map.height-1 { let idx = map.xy_idx(x, y); if blockers.get(entity).is_some() { map.blocked[idx] = true; } // Push the entity to the appropriate index slot. It's a Copy // type, so we don't need to clone it (we want to avoid moving it out of the ECS!) map.tile_content[idx].push(entity); } } } } else { // Single Tile if blockers.get(entity).is_some() { map.blocked[idx] = true; } // Push the entity to the appropriate index slot. It's a Copy // type, so we don't need to clone it (we want to avoid moving it out of the ECS!) map.tile_content[idx].push(entity); } } } }
39.22
110
0.4564
03dc6f36f9a3e607ed63ce3f8d25380c7bb97726
721
// Copyright 2016 Mozilla // // Licensed under the Apache License, Version 2.0 (the "License"); you may not use // this file except in compliance with the License. You may obtain a copy of the // License at http://www.apache.org/licenses/LICENSE-2.0 // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. // Verify that our public API can be imported. #[allow(unused_imports)] use mentat::{conn, new_connection, Conn, QueryResults, TypedValue, ValueType};
48.066667
82
0.765603
f997762e768c12d149c938b8314937ce2cb28e65
2,846
// Copyright (c) The Libra Core Contributors // SPDX-License-Identifier: Apache-2.0 use crate::unit_tests::testutils::compile_module_string; #[test] fn compile_script_with_functions() { let code = String::from( " module Foobar { resource FooCoin { value: u64 } public value(this: &R#Self.FooCoin): u64 { let value_ref: &u64; value_ref = &move(this).value; return *move(value_ref); } public deposit(this: &mut R#Self.FooCoin, check: R#Self.FooCoin) { let value_ref: &mut u64; let value: u64; let check_ref: &R#Self.FooCoin; let check_value: u64; let new_value: u64; let i: u64; value_ref = &mut move(this).value; value = *copy(value_ref); check_ref = &check; check_value = Self.value(move(check_ref)); new_value = copy(value) + copy(check_value); *move(value_ref) = move(new_value); FooCoin { value: i } = move(check); return; } } ", ); let compiled_module_res = compile_module_string(&code); assert!(compiled_module_res.is_ok()); } fn generate_function(name: &str, num_formals: usize, num_locals: usize) -> String { let mut code = format!("public {}(", name); code.reserve(30 * (num_formals + num_locals)); for i in 0..num_formals { code.push_str(&format!("formal_{}: u64", i)); if i < num_formals - 1 { code.push_str(", "); } } code.push_str(") {\n"); for i in 0..num_locals { code.push_str(&format!("let x_{}: u64;\n", i)); } for i in 0..num_locals { code.push_str(&format!("x_{} = {};\n", i, i)); } code.push_str("return;"); code.push_str("}"); code } #[test] fn compile_script_with_large_frame() { let mut code = String::from( " module Foobar { resource FooCoin { value: u64 } ", ); // Max number of locals (formals + local variables) is u8::max_value(). code.push_str(&generate_function("foo_func", 128, 127)); code.push_str("}"); let compiled_module_res = compile_module_string(&code); assert!(compiled_module_res.is_ok()); } #[test] fn compile_script_with_invalid_large_frame() { let mut code = String::from( " module Foobar { resource FooCoin { value: u64 } ", ); // Max number of locals (formals + local variables) is u8::max_value(). code.push_str(&generate_function("foo_func", 128, 128)); code.push_str("}"); let compiled_module_res = compile_module_string(&code); assert!(compiled_module_res.is_err()); }
27.104762
83
0.550597
cc303bfcba3a5c36f75a369db095ecc51c1afcca
3,374
// SPDX-License-Identifier: MIT OR Apache-2.0 // // Copyright (c) 2018-2020 Andre Richter <andre.o.richter@gmail.com> //! Printing facilities. use crate::{bsp, console}; use core::fmt; //-------------------------------------------------------------------------------------------------- // Private Code //-------------------------------------------------------------------------------------------------- #[doc(hidden)] pub fn _print(args: fmt::Arguments) { use console::interface::Write; bsp::console::console().write_fmt(args).unwrap(); } //-------------------------------------------------------------------------------------------------- // Public Code //-------------------------------------------------------------------------------------------------- /// Prints without a newline. /// /// Carbon copy from https://doc.rust-lang.org/src/std/macros.rs.html #[macro_export] macro_rules! print { ($($arg:tt)*) => ($crate::print::_print(format_args!($($arg)*))); } /// Prints with a newline. /// /// Carbon copy from https://doc.rust-lang.org/src/std/macros.rs.html #[macro_export] macro_rules! println { () => ($crate::print!("\n")); ($($arg:tt)*) => ({ $crate::print::_print(format_args_nl!($($arg)*)); }) } /// Prints an info, with a newline. #[macro_export] macro_rules! info { ($string:expr) => ({ #[allow(unused_imports)] use crate::time::interface::TimeManager; let timestamp = $crate::time::time_manager().uptime(); let timestamp_subsec_us = timestamp.subsec_micros(); $crate::print::_print(format_args_nl!( concat!("[ {:>3}.{:03}{:03}] ", $string), timestamp.as_secs(), timestamp_subsec_us / 1_000, timestamp_subsec_us % 1_000 )); }); ($format_string:expr, $($arg:tt)*) => ({ #[allow(unused_imports)] use crate::time::interface::TimeManager; let timestamp = $crate::time::time_manager().uptime(); let timestamp_subsec_us = timestamp.subsec_micros(); $crate::print::_print(format_args_nl!( concat!("[ {:>3}.{:03}{:03}] ", $format_string), timestamp.as_secs(), timestamp_subsec_us / 1_000, timestamp_subsec_us % 1_000, $($arg)* )); }) } /// Prints a warning, with a newline. #[macro_export] macro_rules! warn { ($string:expr) => ({ #[allow(unused_imports)] use crate::time::interface::TimeManager; let timestamp = $crate::time::time_manager().uptime(); let timestamp_subsec_us = timestamp.subsec_micros(); $crate::print::_print(format_args_nl!( concat!("[W {:>3}.{:03}{:03}] ", $string), timestamp.as_secs(), timestamp_subsec_us / 1_000, timestamp_subsec_us % 1_000 )); }); ($format_string:expr, $($arg:tt)*) => ({ #[allow(unused_imports)] use crate::time::interface::TimeManager; let timestamp = $crate::time::time_manager().uptime(); let timestamp_subsec_us = timestamp.subsec_micros(); $crate::print::_print(format_args_nl!( concat!("[W {:>3}.{:03}{:03}] ", $format_string), timestamp.as_secs(), timestamp_subsec_us / 1_000, timestamp_subsec_us % 1_000, $($arg)* )); }) }
30.396396
100
0.504149
56a0392e611fb1e7a45481dc18600f128adb20fb
9,905
#[doc = r" Value read from the register"] pub struct R { bits: u32, } impl super::MAC_DEBUG { #[doc = r" Reads the contents of the register"] #[inline] pub fn read(&self) -> R { R { bits: self.register.get(), } } } #[doc = r" Value of the field"] pub struct RXIDLESTATR { bits: bool, } impl RXIDLESTATR { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Value of the field"] pub struct FIFOSTAT0R { bits: u8, } impl FIFOSTAT0R { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bits(&self) -> u8 { self.bits } } #[doc = r" Value of the field"] pub struct RXFIFOSTAT1R { bits: bool, } impl RXFIFOSTAT1R { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Value of the field"] pub struct RXFIFOSTATR { bits: u8, } impl RXFIFOSTATR { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bits(&self) -> u8 { self.bits } } #[doc = r" Value of the field"] pub struct RXFIFOLVLR { bits: u8, } impl RXFIFOLVLR { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bits(&self) -> u8 { self.bits } } #[doc = r" Value of the field"] pub struct TXIDLESTATR { bits: bool, } impl TXIDLESTATR { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Value of the field"] pub struct TXSTATR { bits: u8, } impl TXSTATR { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bits(&self) -> u8 { self.bits } } #[doc = r" Value of the field"] pub struct PAUSER { bits: bool, } impl PAUSER { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Value of the field"] pub struct TXFIFOSTATR { bits: u8, } impl TXFIFOSTATR { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bits(&self) -> u8 { self.bits } } #[doc = r" Value of the field"] pub struct TXFIFOSTAT1R { bits: bool, } impl TXFIFOSTAT1R { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Value of the field"] pub struct TXFIFOLVLR { bits: bool, } impl TXFIFOLVLR { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } } #[doc = r" Value of the field"] pub struct TXFIFOFULLR { bits: bool, } impl TXFIFOFULLR { #[doc = r" Value of the field as raw bits"] #[inline] pub fn bit(&self) -> bool { self.bits } #[doc = r" Returns `true` if the bit is clear (0)"] #[inline] pub fn bit_is_clear(&self) -> bool { !self.bit() } #[doc = r" Returns `true` if the bit is set (1)"] #[inline] pub fn bit_is_set(&self) -> bool { self.bit() } } impl R { #[doc = r" Value of the register as raw bits"] #[inline] pub fn bits(&self) -> u32 { self.bits } #[doc = "Bit 0 - When high, it indicates that the MAC MII receive protocol engine is actively receiving data and not in IDLE state."] #[inline] pub fn rxidlestat(&self) -> RXIDLESTATR { let bits = { const MASK: bool = true; const OFFSET: u8 = 0; ((self.bits >> OFFSET) & MASK as u32) != 0 }; RXIDLESTATR { bits } } #[doc = "Bits 1:2 - When high, it indicates the active state of the small FIFO Read and Write controllers respectively of the MAC receive Frame Controller module."] #[inline] pub fn fifostat0(&self) -> FIFOSTAT0R { let bits = { const MASK: u8 = 3; const OFFSET: u8 = 1; ((self.bits >> OFFSET) & MASK as u32) as u8 }; FIFOSTAT0R { bits } } #[doc = "Bit 4 - When high, it indicates that the MTL RxFIFO Write Controller is active and transferring a received frame to the FIFO."] #[inline] pub fn rxfifostat1(&self) -> RXFIFOSTAT1R { let bits = { const MASK: bool = true; const OFFSET: u8 = 4; ((self.bits >> OFFSET) & MASK as u32) != 0 }; RXFIFOSTAT1R { bits } } #[doc = "Bits 5:6 - State of the RxFIFO read Controller: 00 = idle state 01 = reading frame data 10 = reading frame status (or Time stamp) 11 = flushing the frame data and status"] #[inline] pub fn rxfifostat(&self) -> RXFIFOSTATR { let bits = { const MASK: u8 = 3; const OFFSET: u8 = 5; ((self.bits >> OFFSET) & MASK as u32) as u8 }; RXFIFOSTATR { bits } } #[doc = "Bits 8:9 - Status of the RxFIFO Fill-level 00 = RxFIFO Empty 01 = RxFIFO fill-level below flow-control de-activate threshold 10 = RxFIFO fill-level above flow-control activate threshold 11 = RxFIFO Full"] #[inline] pub fn rxfifolvl(&self) -> RXFIFOLVLR { let bits = { const MASK: u8 = 3; const OFFSET: u8 = 8; ((self.bits >> OFFSET) & MASK as u32) as u8 }; RXFIFOLVLR { bits } } #[doc = "Bit 16 - When high, it indicates that the MAC MII transmit protocol engine is actively transmitting data and not in IDLE state."] #[inline] pub fn txidlestat(&self) -> TXIDLESTATR { let bits = { const MASK: bool = true; const OFFSET: u8 = 16; ((self.bits >> OFFSET) & MASK as u32) != 0 }; TXIDLESTATR { bits } } #[doc = "Bits 17:18 - State of the MAC Transmit Frame Controller module: 00 = idle 01 = Waiting for Status of previous frame or IFG/backoff period to be over 10 = Generating and transmitting a PAUSE control frame (in full duplex mode) 11 = Transferring input frame for transmission"] #[inline] pub fn txstat(&self) -> TXSTATR { let bits = { const MASK: u8 = 3; const OFFSET: u8 = 17; ((self.bits >> OFFSET) & MASK as u32) as u8 }; TXSTATR { bits } } #[doc = "Bit 19 - When high, it indicates that the MAC transmitter is in PAUSE condition (in full-duplex only) and hence will not schedule any frame for transmission."] #[inline] pub fn pause(&self) -> PAUSER { let bits = { const MASK: bool = true; const OFFSET: u8 = 19; ((self.bits >> OFFSET) & MASK as u32) != 0 }; PAUSER { bits } } #[doc = "Bits 20:21 - State of the TxFIFO read Controller 00 = idle state 01 = READ state (transferring data to MAC transmitter) 10 = Waiting for TxStatus from MAC transmitter 11 = Writing the received TxStatus or flushing the TxFIFO"] #[inline] pub fn txfifostat(&self) -> TXFIFOSTATR { let bits = { const MASK: u8 = 3; const OFFSET: u8 = 20; ((self.bits >> OFFSET) & MASK as u32) as u8 }; TXFIFOSTATR { bits } } #[doc = "Bit 22 - When high, it indicates that the TxFIFO Write Controller is active and transferring data to the TxFIFO."] #[inline] pub fn txfifostat1(&self) -> TXFIFOSTAT1R { let bits = { const MASK: bool = true; const OFFSET: u8 = 22; ((self.bits >> OFFSET) & MASK as u32) != 0 }; TXFIFOSTAT1R { bits } } #[doc = "Bit 24 - When high, it indicates that the TxFIFO is not empty and has some data left for transmission."] #[inline] pub fn txfifolvl(&self) -> TXFIFOLVLR { let bits = { const MASK: bool = true; const OFFSET: u8 = 24; ((self.bits >> OFFSET) & MASK as u32) != 0 }; TXFIFOLVLR { bits } } #[doc = "Bit 25 - When high, it indicates that the TxStatus FIFO is full and hence the controller will not be accepting any more frames for transmission."] #[inline] pub fn txfifofull(&self) -> TXFIFOFULLR { let bits = { const MASK: bool = true; const OFFSET: u8 = 25; ((self.bits >> OFFSET) & MASK as u32) != 0 }; TXFIFOFULLR { bits } } }
28.877551
287
0.546795
1c8dc82d88f9699fcb257adaf1cf3ea27d75b158
98
pub mod pda; pub mod state; pub mod transition; pub use self::{pda::*, state::*, transition::*};
16.333333
48
0.653061
e608a7d22dcf5e8a8fe33bf3fbf969974157ae93
83,287
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! A growable list type, written `Vec<T>` but pronounced 'vector.' //! //! Vectors have `O(1)` indexing, push (to the end) and pop (from the end). use core::prelude::*; use alloc::boxed::Box; use alloc::heap::{EMPTY, allocate, reallocate, deallocate}; use core::cmp::max; use core::default::Default; use core::fmt; use core::kinds::marker::{ContravariantLifetime, InvariantType}; use core::mem; use core::num; use core::ops; use core::ptr; use core::raw::Slice as RawSlice; use core::uint; use {Mutable, MutableSeq}; use slice::{MutableOrdSlice, MutableSliceAllocating, CloneableVector}; use slice::{Items, MutItems}; /// An owned, growable vector. /// /// # Examples /// /// ``` /// let mut vec = Vec::new(); /// vec.push(1i); /// vec.push(2i); /// /// assert_eq!(vec.len(), 2); /// assert_eq!(vec[0], 1); /// /// assert_eq!(vec.pop(), Some(2)); /// assert_eq!(vec.len(), 1); /// /// *vec.get_mut(0) = 7i; /// assert_eq!(vec[0], 7); /// /// vec.push_all([1, 2, 3]); /// /// for x in vec.iter() { /// println!("{}", x); /// } /// assert_eq!(vec, vec![7i, 1, 2, 3]); /// ``` /// /// The `vec!` macro is provided to make initialization more convenient: /// /// ``` /// let mut vec = vec![1i, 2i, 3i]; /// vec.push(4); /// assert_eq!(vec, vec![1, 2, 3, 4]); /// ``` /// /// Use a `Vec` as an efficient stack: /// /// ``` /// let mut stack = Vec::new(); /// /// stack.push(1i); /// stack.push(2i); /// stack.push(3i); /// /// loop { /// let top = match stack.pop() { /// None => break, // empty /// Some(x) => x, /// }; /// // Prints 3, 2, 1 /// println!("{}", top); /// } /// ``` /// /// # Capacity and reallocation /// /// The capacity of a vector is the amount of space allocated for any future /// elements that will be added onto the vector. This is not to be confused /// with the *length* of a vector, which specifies the number of actual /// elements within the vector. If a vector's length exceeds its capacity, /// its capacity will automatically be increased, but its elements will /// have to be reallocated. /// /// For example, a vector with capacity 10 and length 0 would be an empty /// vector with space for 10 more elements. Pushing 10 or fewer elements onto /// the vector will not change its capacity or cause reallocation to occur. /// However, if the vector's length is increased to 11, it will have to /// reallocate, which can be slow. For this reason, it is recommended /// to use `Vec::with_capacity` whenever possible to specify how big the vector /// is expected to get. #[unsafe_no_drop_flag] #[stable] pub struct Vec<T> { len: uint, cap: uint, ptr: *mut T } impl<T> Vec<T> { /// Constructs a new, empty `Vec`. /// /// The vector will not allocate until elements are pushed onto it. /// /// # Example /// /// ``` /// let mut vec: Vec<int> = Vec::new(); /// ``` #[inline] #[stable] pub fn new() -> Vec<T> { // We want ptr to never be NULL so instead we set it to some arbitrary // non-null value which is fine since we never call deallocate on the ptr // if cap is 0. The reason for this is because the pointer of a slice // being NULL would break the null pointer optimization for enums. Vec { len: 0, cap: 0, ptr: EMPTY as *mut T } } /// Constructs a new, empty `Vec` with the specified capacity. /// /// The vector will be able to hold exactly `capacity` elements without /// reallocating. If `capacity` is 0, the vector will not allocate. /// /// It is important to note that this function does not specify the /// *length* of the returned vector, but only the *capacity*. (For an /// explanation of the difference between length and capacity, see /// the main `Vec` docs above, 'Capacity and reallocation'.) To create /// a vector of a given length, use `Vec::from_elem` or `Vec::from_fn`. /// /// # Example /// /// ``` /// let mut vec: Vec<int> = Vec::with_capacity(10); /// /// // The vector contains no items, even though it has capacity for more /// assert_eq!(vec.len(), 0); /// /// // These are all done without reallocating... /// for i in range(0i, 10) { /// vec.push(i); /// } /// /// // ...but this may make the vector reallocate /// vec.push(11); /// ``` #[inline] #[stable] pub fn with_capacity(capacity: uint) -> Vec<T> { if mem::size_of::<T>() == 0 { Vec { len: 0, cap: uint::MAX, ptr: EMPTY as *mut T } } else if capacity == 0 { Vec::new() } else { let size = capacity.checked_mul(&mem::size_of::<T>()) .expect("capacity overflow"); let ptr = unsafe { allocate(size, mem::min_align_of::<T>()) }; Vec { len: 0, cap: capacity, ptr: ptr as *mut T } } } /// Creates and initializes a `Vec`. /// /// Creates a `Vec` of size `length` and initializes the elements to the /// value returned by the closure `op`. /// /// # Example /// /// ``` /// let vec = Vec::from_fn(3, |idx| idx * 2); /// assert_eq!(vec, vec![0, 2, 4]); /// ``` #[inline] #[unstable = "the naming is uncertain as well as this migrating to unboxed \ closures in the future"] pub fn from_fn(length: uint, op: |uint| -> T) -> Vec<T> { unsafe { let mut xs = Vec::with_capacity(length); while xs.len < length { let len = xs.len; ptr::write(xs.as_mut_slice().unsafe_mut(len), op(len)); xs.len += 1; } xs } } /// Creates a `Vec<T>` directly from the raw constituents. /// /// This is highly unsafe: /// /// - if `ptr` is null, then `length` and `capacity` should be 0 /// - `ptr` must point to an allocation of size `capacity` /// - there must be `length` valid instances of type `T` at the /// beginning of that allocation /// - `ptr` must be allocated by the default `Vec` allocator /// /// # Example /// /// ``` /// use std::ptr; /// use std::mem; /// /// fn main() { /// let mut v = vec![1i, 2, 3]; /// /// // Pull out the various important pieces of information about `v` /// let p = v.as_mut_ptr(); /// let len = v.len(); /// let cap = v.capacity(); /// /// unsafe { /// // Cast `v` into the void: no destructor run, so we are in /// // complete control of the allocation to which `p` points. /// mem::forget(v); /// /// // Overwrite memory with 4, 5, 6 /// for i in range(0, len as int) { /// ptr::write(p.offset(i), 4 + i); /// } /// /// // Put everything back together into a Vec /// let rebuilt = Vec::from_raw_parts(len, cap, p); /// assert_eq!(rebuilt, vec![4i, 5i, 6i]); /// } /// } /// ``` #[experimental] pub unsafe fn from_raw_parts(length: uint, capacity: uint, ptr: *mut T) -> Vec<T> { Vec { len: length, cap: capacity, ptr: ptr } } /// Consumes the `Vec`, partitioning it based on a predicate. /// /// Partitions the `Vec` into two `Vec`s `(A,B)`, where all elements of `A` /// satisfy `f` and all elements of `B` do not. The order of elements is /// preserved. /// /// # Example /// /// ``` /// let vec = vec![1i, 2i, 3i, 4i]; /// let (even, odd) = vec.partition(|&n| n % 2 == 0); /// assert_eq!(even, vec![2, 4]); /// assert_eq!(odd, vec![1, 3]); /// ``` #[inline] #[experimental] pub fn partition(self, f: |&T| -> bool) -> (Vec<T>, Vec<T>) { let mut lefts = Vec::new(); let mut rights = Vec::new(); for elt in self.into_iter() { if f(&elt) { lefts.push(elt); } else { rights.push(elt); } } (lefts, rights) } } impl<T: Clone> Vec<T> { /// Constructs a `Vec` with copies of a value. /// /// Creates a `Vec` with `length` copies of `value`. /// /// # Example /// ``` /// let vec = Vec::from_elem(3, "hi"); /// println!("{}", vec); // prints [hi, hi, hi] /// ``` #[inline] #[unstable = "this functionality may become more generic over all collections"] pub fn from_elem(length: uint, value: T) -> Vec<T> { unsafe { let mut xs = Vec::with_capacity(length); while xs.len < length { let len = xs.len; ptr::write(xs.as_mut_slice().unsafe_mut(len), value.clone()); xs.len += 1; } xs } } /// Appends all elements in a slice to the `Vec`. /// /// Iterates over the slice `other`, clones each element, and then appends /// it to this `Vec`. The `other` vector is traversed in-order. /// /// # Example /// /// ``` /// let mut vec = vec![1i]; /// vec.push_all([2i, 3, 4]); /// assert_eq!(vec, vec![1, 2, 3, 4]); /// ``` #[inline] #[experimental] pub fn push_all(&mut self, other: &[T]) { self.reserve_additional(other.len()); for i in range(0, other.len()) { let len = self.len(); // Unsafe code so this can be optimised to a memcpy (or something similarly // fast) when T is Copy. LLVM is easily confused, so any extra operations // during the loop can prevent this optimisation. unsafe { ptr::write( self.as_mut_slice().unsafe_mut(len), other.unsafe_get(i).clone()); self.set_len(len + 1); } } } /// Grows the `Vec` in-place. /// /// Adds `n` copies of `value` to the `Vec`. /// /// # Example /// /// ``` /// let mut vec = vec!["hello"]; /// vec.grow(2, "world"); /// assert_eq!(vec, vec!["hello", "world", "world"]); /// ``` #[stable] pub fn grow(&mut self, n: uint, value: T) { self.reserve_additional(n); let mut i: uint = 0u; while i < n { self.push(value.clone()); i += 1u; } } /// Partitions a vector based on a predicate. /// /// Clones the elements of the vector, partitioning them into two `Vec`s /// `(a, b)`, where all elements of `a` satisfy `f` and all elements of `b` /// do not. The order of elements is preserved. /// /// # Example /// /// ``` /// let vec = vec![1i, 2, 3, 4]; /// let (even, odd) = vec.partitioned(|&n| n % 2 == 0); /// assert_eq!(even, vec![2i, 4]); /// assert_eq!(odd, vec![1i, 3]); /// ``` #[experimental] pub fn partitioned(&self, f: |&T| -> bool) -> (Vec<T>, Vec<T>) { let mut lefts = Vec::new(); let mut rights = Vec::new(); for elt in self.iter() { if f(elt) { lefts.push(elt.clone()); } else { rights.push(elt.clone()); } } (lefts, rights) } } #[unstable] impl<T:Clone> Clone for Vec<T> { fn clone(&self) -> Vec<T> { self.as_slice().to_vec() } fn clone_from(&mut self, other: &Vec<T>) { // drop anything in self that will not be overwritten if self.len() > other.len() { self.truncate(other.len()) } // reuse the contained values' allocations/resources. for (place, thing) in self.iter_mut().zip(other.iter()) { place.clone_from(thing) } // self.len <= other.len due to the truncate above, so the // slice here is always in-bounds. let slice = other[self.len()..]; self.push_all(slice); } } #[experimental = "waiting on Index stability"] impl<T> Index<uint,T> for Vec<T> { #[inline] fn index<'a>(&'a self, index: &uint) -> &'a T { &self.as_slice()[*index] } } #[cfg(not(stage0))] impl<T> IndexMut<uint,T> for Vec<T> { #[inline] fn index_mut<'a>(&'a mut self, index: &uint) -> &'a mut T { self.get_mut(*index) } } impl<T> ops::Slice<uint, [T]> for Vec<T> { #[inline] fn as_slice_<'a>(&'a self) -> &'a [T] { self.as_slice() } #[inline] fn slice_from_or_fail<'a>(&'a self, start: &uint) -> &'a [T] { self.as_slice().slice_from_or_fail(start) } #[inline] fn slice_to_or_fail<'a>(&'a self, end: &uint) -> &'a [T] { self.as_slice().slice_to_or_fail(end) } #[inline] fn slice_or_fail<'a>(&'a self, start: &uint, end: &uint) -> &'a [T] { self.as_slice().slice_or_fail(start, end) } } impl<T> ops::SliceMut<uint, [T]> for Vec<T> { #[inline] fn as_mut_slice_<'a>(&'a mut self) -> &'a mut [T] { self.as_mut_slice() } #[inline] fn slice_from_or_fail_mut<'a>(&'a mut self, start: &uint) -> &'a mut [T] { self.as_mut_slice().slice_from_or_fail_mut(start) } #[inline] fn slice_to_or_fail_mut<'a>(&'a mut self, end: &uint) -> &'a mut [T] { self.as_mut_slice().slice_to_or_fail_mut(end) } #[inline] fn slice_or_fail_mut<'a>(&'a mut self, start: &uint, end: &uint) -> &'a mut [T] { self.as_mut_slice().slice_or_fail_mut(start, end) } } #[experimental = "waiting on FromIterator stability"] impl<T> FromIterator<T> for Vec<T> { #[inline] fn from_iter<I:Iterator<T>>(mut iterator: I) -> Vec<T> { let (lower, _) = iterator.size_hint(); let mut vector = Vec::with_capacity(lower); for element in iterator { vector.push(element) } vector } } #[experimental = "waiting on Extendable stability"] impl<T> Extendable<T> for Vec<T> { #[inline] fn extend<I: Iterator<T>>(&mut self, mut iterator: I) { let (lower, _) = iterator.size_hint(); self.reserve_additional(lower); for element in iterator { self.push(element) } } } #[unstable = "waiting on PartialEq stability"] impl<T: PartialEq> PartialEq for Vec<T> { #[inline] fn eq(&self, other: &Vec<T>) -> bool { self.as_slice() == other.as_slice() } } #[unstable = "waiting on PartialOrd stability"] impl<T: PartialOrd> PartialOrd for Vec<T> { #[inline] fn partial_cmp(&self, other: &Vec<T>) -> Option<Ordering> { self.as_slice().partial_cmp(&other.as_slice()) } } #[unstable = "waiting on Eq stability"] impl<T: Eq> Eq for Vec<T> {} #[experimental] impl<T: PartialEq, V: AsSlice<T>> Equiv<V> for Vec<T> { #[inline] fn equiv(&self, other: &V) -> bool { self.as_slice() == other.as_slice() } } #[unstable = "waiting on Ord stability"] impl<T: Ord> Ord for Vec<T> { #[inline] fn cmp(&self, other: &Vec<T>) -> Ordering { self.as_slice().cmp(&other.as_slice()) } } #[experimental = "waiting on Collection stability"] impl<T> Collection for Vec<T> { #[inline] #[stable] fn len(&self) -> uint { self.len } } // FIXME: #13996: need a way to mark the return value as `noalias` #[inline(never)] unsafe fn alloc_or_realloc<T>(ptr: *mut T, old_size: uint, size: uint) -> *mut T { if old_size == 0 { allocate(size, mem::min_align_of::<T>()) as *mut T } else { reallocate(ptr as *mut u8, old_size, size, mem::min_align_of::<T>()) as *mut T } } #[inline] unsafe fn dealloc<T>(ptr: *mut T, len: uint) { if mem::size_of::<T>() != 0 { deallocate(ptr as *mut u8, len * mem::size_of::<T>(), mem::min_align_of::<T>()) } } impl<T> Vec<T> { /// Returns the number of elements the vector can hold without /// reallocating. /// /// # Example /// /// ``` /// let vec: Vec<int> = Vec::with_capacity(10); /// assert_eq!(vec.capacity(), 10); /// ``` #[inline] #[stable] pub fn capacity(&self) -> uint { self.cap } /// Reserves capacity for at least `n` additional elements in the given /// vector. /// /// # Failure /// /// Fails if the new capacity overflows `uint`. /// /// # Example /// /// ``` /// let mut vec: Vec<int> = vec![1i]; /// vec.reserve_additional(10); /// assert!(vec.capacity() >= 11); /// ``` pub fn reserve_additional(&mut self, extra: uint) { if self.cap - self.len < extra { match self.len.checked_add(&extra) { None => fail!("Vec::reserve_additional: `uint` overflow"), Some(new_cap) => self.reserve(new_cap) } } } /// Reserves capacity for at least `n` elements in the given vector. /// /// This function will over-allocate in order to amortize the allocation /// costs in scenarios where the caller may need to repeatedly reserve /// additional space. /// /// If the capacity for `self` is already equal to or greater than the /// requested capacity, then no action is taken. /// /// # Example /// /// ``` /// let mut vec = vec![1i, 2, 3]; /// vec.reserve(10); /// assert!(vec.capacity() >= 10); /// ``` pub fn reserve(&mut self, capacity: uint) { if capacity > self.cap { self.reserve_exact(num::next_power_of_two(capacity)) } } /// Reserves capacity for exactly `capacity` elements in the given vector. /// /// If the capacity for `self` is already equal to or greater than the /// requested capacity, then no action is taken. /// /// # Example /// /// ``` /// let mut vec: Vec<int> = Vec::with_capacity(10); /// vec.reserve_exact(11); /// assert_eq!(vec.capacity(), 11); /// ``` pub fn reserve_exact(&mut self, capacity: uint) { if mem::size_of::<T>() == 0 { return } if capacity > self.cap { let size = capacity.checked_mul(&mem::size_of::<T>()) .expect("capacity overflow"); unsafe { self.ptr = alloc_or_realloc(self.ptr, self.cap * mem::size_of::<T>(), size); } self.cap = capacity; } } /// Shrinks the capacity of the vector as much as possible. /// /// # Example /// /// ``` /// let mut vec = vec![1i, 2, 3]; /// vec.shrink_to_fit(); /// ``` #[stable] pub fn shrink_to_fit(&mut self) { if mem::size_of::<T>() == 0 { return } if self.len == 0 { if self.cap != 0 { unsafe { dealloc(self.ptr, self.cap) } self.cap = 0; } } else { unsafe { // Overflow check is unnecessary as the vector is already at // least this large. self.ptr = reallocate(self.ptr as *mut u8, self.cap * mem::size_of::<T>(), self.len * mem::size_of::<T>(), mem::min_align_of::<T>()) as *mut T; } self.cap = self.len; } } /// Convert the vector into Box<[T]>. /// /// Note that this will drop any excess capacity. Calling this and converting back to a vector /// with `into_vec()` is equivalent to calling `shrink_to_fit()`. #[experimental] pub fn into_boxed_slice(mut self) -> Box<[T]> { self.shrink_to_fit(); unsafe { let xs: Box<[T]> = mem::transmute(self.as_mut_slice()); mem::forget(self); xs } } /// Shorten a vector, dropping excess elements. /// /// If `len` is greater than the vector's current length, this has no /// effect. /// /// # Example /// /// ``` /// let mut vec = vec![1i, 2, 3, 4]; /// vec.truncate(2); /// assert_eq!(vec, vec![1, 2]); /// ``` #[unstable = "waiting on failure semantics"] pub fn truncate(&mut self, len: uint) { unsafe { // drop any extra elements while len < self.len { // decrement len before the read(), so a failure on Drop doesn't // re-drop the just-failed value. self.len -= 1; ptr::read(self.as_slice().unsafe_get(self.len)); } } } /// Deprecated, use `.extend(other.into_iter())` #[inline] #[deprecated = "use .extend(other.into_iter())"] #[cfg(stage0)] pub fn push_all_move(&mut self, other: Vec<T>) { self.extend(other.into_iter()); } /// Returns a mutable slice of the elements of `self`. /// /// # Example /// /// ``` /// fn foo(slice: &mut [int]) {} /// /// let mut vec = vec![1i, 2]; /// foo(vec.as_mut_slice()); /// ``` #[inline] #[stable] pub fn as_mut_slice<'a>(&'a mut self) -> &'a mut [T] { unsafe { mem::transmute(RawSlice { data: self.as_mut_ptr() as *const T, len: self.len, }) } } /// Creates a consuming iterator, that is, one that moves each /// value out of the vector (from start to end). The vector cannot /// be used after calling this. /// /// # Example /// /// ``` /// let v = vec!["a".to_string(), "b".to_string()]; /// for s in v.into_iter() { /// // s has type String, not &String /// println!("{}", s); /// } /// ``` #[inline] pub fn into_iter(self) -> MoveItems<T> { unsafe { let ptr = self.ptr; let cap = self.cap; let begin = self.ptr as *const T; let end = if mem::size_of::<T>() == 0 { (ptr as uint + self.len()) as *const T } else { ptr.offset(self.len() as int) as *const T }; mem::forget(self); MoveItems { allocation: ptr, cap: cap, ptr: begin, end: end } } } /// Sets the length of a vector. /// /// This will explicitly set the size of the vector, without actually /// modifying its buffers, so it is up to the caller to ensure that the /// vector is actually the specified size. /// /// # Example /// /// ``` /// let mut v = vec![1u, 2, 3, 4]; /// unsafe { /// v.set_len(1); /// } /// ``` #[inline] #[stable] pub unsafe fn set_len(&mut self, len: uint) { self.len = len; } /// Returns a mutable reference to the value at index `index`. /// /// # Failure /// /// Fails if `index` is out of bounds /// /// # Example /// /// ``` /// let mut vec = vec![1i, 2, 3]; /// *vec.get_mut(1) = 4; /// assert_eq!(vec, vec![1i, 4, 3]); /// ``` #[inline] #[unstable = "this is likely to be moved to actual indexing"] pub fn get_mut<'a>(&'a mut self, index: uint) -> &'a mut T { &mut self.as_mut_slice()[index] } /// Returns an iterator over references to the elements of the vector in /// order. /// /// # Example /// /// ``` /// let vec = vec![1i, 2, 3]; /// for num in vec.iter() { /// println!("{}", *num); /// } /// ``` #[inline] pub fn iter<'a>(&'a self) -> Items<'a,T> { self.as_slice().iter() } /// Returns an iterator over mutable references to the elements of the /// vector in order. /// /// # Example /// /// ``` /// let mut vec = vec![1i, 2, 3]; /// for num in vec.iter_mut() { /// *num = 0; /// } /// ``` #[inline] pub fn iter_mut<'a>(&'a mut self) -> MutItems<'a,T> { self.as_mut_slice().iter_mut() } /// Sorts the vector, in place, using `compare` to compare elements. /// /// This sort is `O(n log n)` worst-case and stable, but allocates /// approximately `2 * n`, where `n` is the length of `self`. /// /// # Example /// /// ``` /// let mut v = vec![5i, 4, 1, 3, 2]; /// v.sort_by(|a, b| a.cmp(b)); /// assert_eq!(v, vec![1i, 2, 3, 4, 5]); /// /// // reverse sorting /// v.sort_by(|a, b| b.cmp(a)); /// assert_eq!(v, vec![5i, 4, 3, 2, 1]); /// ``` #[inline] pub fn sort_by(&mut self, compare: |&T, &T| -> Ordering) { self.as_mut_slice().sort_by(compare) } /// Returns a slice of self spanning the interval [`start`, `end`). /// /// # Failure /// /// Fails when the slice (or part of it) is outside the bounds of self, or when /// `start` > `end`. /// /// # Example /// /// ``` /// let vec = vec![1i, 2, 3, 4]; /// assert!(vec[0..2] == [1, 2]); /// ``` #[inline] pub fn slice<'a>(&'a self, start: uint, end: uint) -> &'a [T] { self[start..end] } /// Returns a slice containing all but the first element of the vector. /// /// # Failure /// /// Fails when the vector is empty. /// /// # Example /// /// ``` /// let vec = vec![1i, 2, 3]; /// assert!(vec.tail() == [2, 3]); /// ``` #[inline] pub fn tail<'a>(&'a self) -> &'a [T] { self[].tail() } /// Returns a reference to the last element of a vector, or `None` if it is /// empty. /// /// # Example /// /// ``` /// let vec = vec![1i, 2, 3]; /// assert!(vec.last() == Some(&3)); /// ``` #[inline] pub fn last<'a>(&'a self) -> Option<&'a T> { self[].last() } /// Returns a mutable reference to the last element of a vector, or `None` /// if it is empty. /// /// # Example /// /// ``` /// let mut vec = vec![1i, 2, 3]; /// *vec.last_mut().unwrap() = 4; /// assert_eq!(vec, vec![1i, 2, 4]); /// ``` #[inline] pub fn last_mut<'a>(&'a mut self) -> Option<&'a mut T> { self.as_mut_slice().last_mut() } /// Removes an element from anywhere in the vector and return it, replacing /// it with the last element. This does not preserve ordering, but is O(1). /// /// Returns `None` if `index` is out of bounds. /// /// # Example /// ``` /// let mut v = vec!["foo".to_string(), "bar".to_string(), /// "baz".to_string(), "qux".to_string()]; /// /// assert_eq!(v.swap_remove(1), Some("bar".to_string())); /// assert_eq!(v, vec!["foo".to_string(), "qux".to_string(), "baz".to_string()]); /// /// assert_eq!(v.swap_remove(0), Some("foo".to_string())); /// assert_eq!(v, vec!["baz".to_string(), "qux".to_string()]); /// /// assert_eq!(v.swap_remove(2), None); /// ``` #[inline] #[unstable = "the naming of this function may be altered"] pub fn swap_remove(&mut self, index: uint) -> Option<T> { let length = self.len(); if length > 0 && index < length - 1 { self.as_mut_slice().swap(index, length - 1); } else if index >= length { return None } self.pop() } /// Inserts an element at position `index` within the vector, shifting all /// elements after position `i` one position to the right. /// /// # Failure /// /// Fails if `index` is not between `0` and the vector's length (both /// bounds inclusive). /// /// # Example /// /// ``` /// let mut vec = vec![1i, 2, 3]; /// vec.insert(1, 4); /// assert_eq!(vec, vec![1, 4, 2, 3]); /// vec.insert(4, 5); /// assert_eq!(vec, vec![1, 4, 2, 3, 5]); /// ``` #[unstable = "failure semantics need settling"] pub fn insert(&mut self, index: uint, element: T) { let len = self.len(); assert!(index <= len); // space for the new element self.reserve(len + 1); unsafe { // infallible // The spot to put the new value { let p = self.as_mut_ptr().offset(index as int); // Shift everything over to make space. (Duplicating the // `index`th element into two consecutive places.) ptr::copy_memory(p.offset(1), &*p, len - index); // Write it in, overwriting the first copy of the `index`th // element. ptr::write(&mut *p, element); } self.set_len(len + 1); } } /// Removes and returns the element at position `index` within the vector, /// shifting all elements after position `index` one position to the left. /// Returns `None` if `i` is out of bounds. /// /// # Example /// /// ``` /// let mut v = vec![1i, 2, 3]; /// assert_eq!(v.remove(1), Some(2)); /// assert_eq!(v, vec![1, 3]); /// /// assert_eq!(v.remove(4), None); /// // v is unchanged: /// assert_eq!(v, vec![1, 3]); /// ``` #[unstable = "failure semantics need settling"] pub fn remove(&mut self, index: uint) -> Option<T> { let len = self.len(); if index < len { unsafe { // infallible let ret; { // the place we are taking from. let ptr = self.as_mut_ptr().offset(index as int); // copy it out, unsafely having a copy of the value on // the stack and in the vector at the same time. ret = Some(ptr::read(ptr as *const T)); // Shift everything down to fill in that spot. ptr::copy_memory(ptr, &*ptr.offset(1), len - index - 1); } self.set_len(len - 1); ret } } else { None } } /// Returns a mutable slice of `self` between `start` and `end`. /// /// # Failure /// /// Fails when `start` or `end` point outside the bounds of `self`, or when /// `start` > `end`. /// /// # Example /// /// ``` /// let mut vec = vec![1i, 2, 3, 4]; /// assert!(vec[mut 0..2] == [1, 2]); /// ``` #[inline] pub fn slice_mut<'a>(&'a mut self, start: uint, end: uint) -> &'a mut [T] { self[mut start..end] } /// Returns a mutable slice of `self` from `start` to the end of the `Vec`. /// /// # Failure /// /// Fails when `start` points outside the bounds of self. /// /// # Example /// /// ``` /// let mut vec = vec![1i, 2, 3, 4]; /// assert!(vec[mut 2..] == [3, 4]); /// ``` #[inline] pub fn slice_from_mut<'a>(&'a mut self, start: uint) -> &'a mut [T] { self[mut start..] } /// Returns a mutable slice of `self` from the start of the `Vec` to `end`. /// /// # Failure /// /// Fails when `end` points outside the bounds of self. /// /// # Example /// /// ``` /// let mut vec = vec![1i, 2, 3, 4]; /// assert!(vec[mut ..2] == [1, 2]); /// ``` #[inline] pub fn slice_to_mut<'a>(&'a mut self, end: uint) -> &'a mut [T] { self[mut ..end] } /// Returns a pair of mutable slices that divides the `Vec` at an index. /// /// The first will contain all indices from `[0, mid)` (excluding /// the index `mid` itself) and the second will contain all /// indices from `[mid, len)` (excluding the index `len` itself). /// /// # Failure /// /// Fails if `mid > len`. /// /// # Example /// /// ``` /// let mut vec = vec![1i, 2, 3, 4, 5, 6]; /// /// // scoped to restrict the lifetime of the borrows /// { /// let (left, right) = vec.split_at_mut(0); /// assert!(left == &mut []); /// assert!(right == &mut [1, 2, 3, 4, 5, 6]); /// } /// /// { /// let (left, right) = vec.split_at_mut(2); /// assert!(left == &mut [1, 2]); /// assert!(right == &mut [3, 4, 5, 6]); /// } /// /// { /// let (left, right) = vec.split_at_mut(6); /// assert!(left == &mut [1, 2, 3, 4, 5, 6]); /// assert!(right == &mut []); /// } /// ``` #[inline] pub fn split_at_mut<'a>(&'a mut self, mid: uint) -> (&'a mut [T], &'a mut [T]) { self[mut].split_at_mut(mid) } /// Reverses the order of elements in a vector, in place. /// /// # Example /// /// ``` /// let mut v = vec![1i, 2, 3]; /// v.reverse(); /// assert_eq!(v, vec![3i, 2, 1]); /// ``` #[inline] pub fn reverse(&mut self) { self[mut].reverse() } /// Returns a slice of `self` from `start` to the end of the vec. /// /// # Failure /// /// Fails when `start` points outside the bounds of self. /// /// # Example /// /// ``` /// let vec = vec![1i, 2, 3]; /// assert!(vec[1..] == [2, 3]); /// ``` #[inline] pub fn slice_from<'a>(&'a self, start: uint) -> &'a [T] { self[start..] } /// Returns a slice of self from the start of the vec to `end`. /// /// # Failure /// /// Fails when `end` points outside the bounds of self. /// /// # Example /// /// ``` /// let vec = vec![1i, 2, 3, 4]; /// assert!(vec[..2] == [1, 2]); /// ``` #[inline] pub fn slice_to<'a>(&'a self, end: uint) -> &'a [T] { self[..end] } /// Returns a slice containing all but the last element of the vector. /// /// # Failure /// /// Fails if the vector is empty /// /// # Example /// /// ``` /// let vec = vec![1i, 2, 3]; /// assert!(vec.init() == [1, 2]); /// ``` #[inline] pub fn init<'a>(&'a self) -> &'a [T] { self[0..self.len() - 1] } /// Returns an unsafe pointer to the vector's buffer. /// /// The caller must ensure that the vector outlives the pointer this /// function returns, or else it will end up pointing to garbage. /// /// Modifying the vector may cause its buffer to be reallocated, which /// would also make any pointers to it invalid. /// /// # Example /// /// ``` /// let v = vec![1i, 2, 3]; /// let p = v.as_ptr(); /// unsafe { /// // Examine each element manually /// assert_eq!(*p, 1i); /// assert_eq!(*p.offset(1), 2i); /// assert_eq!(*p.offset(2), 3i); /// } /// ``` #[inline] pub fn as_ptr(&self) -> *const T { self.ptr as *const T } /// Returns a mutable unsafe pointer to the vector's buffer. /// /// The caller must ensure that the vector outlives the pointer this /// function returns, or else it will end up pointing to garbage. /// /// Modifying the vector may cause its buffer to be reallocated, which /// would also make any pointers to it invalid. /// /// # Example /// /// ``` /// use std::ptr; /// /// let mut v = vec![1i, 2, 3]; /// let p = v.as_mut_ptr(); /// unsafe { /// ptr::write(p, 9i); /// ptr::write(p.offset(2), 5i); /// } /// assert_eq!(v, vec![9i, 2, 5]); /// ``` #[inline] pub fn as_mut_ptr(&mut self) -> *mut T { self.ptr } /// Retains only the elements specified by the predicate. /// /// In other words, remove all elements `e` such that `f(&e)` returns false. /// This method operates in place and preserves the order of the retained elements. /// /// # Example /// /// ``` /// let mut vec = vec![1i, 2, 3, 4]; /// vec.retain(|x| x%2 == 0); /// assert_eq!(vec, vec![2, 4]); /// ``` #[unstable = "the closure argument may become an unboxed closure"] pub fn retain(&mut self, f: |&T| -> bool) { let len = self.len(); let mut del = 0u; { let v = self.as_mut_slice(); for i in range(0u, len) { if !f(&v[i]) { del += 1; } else if del > 0 { v.swap(i-del, i); } } } if del > 0 { self.truncate(len - del); } } /// Expands a vector in place, initializing the new elements to the result of a function. /// /// The vector is grown by `n` elements. The i-th new element are initialized to the value /// returned by `f(i)` where `i` is in the range [0, n). /// /// # Example /// /// ``` /// let mut vec = vec![0u, 1]; /// vec.grow_fn(3, |i| i); /// assert_eq!(vec, vec![0, 1, 0, 1, 2]); /// ``` #[unstable = "this function may be renamed or change to unboxed closures"] pub fn grow_fn(&mut self, n: uint, f: |uint| -> T) { self.reserve_additional(n); for i in range(0u, n) { self.push(f(i)); } } } impl<T:Ord> Vec<T> { /// Sorts the vector in place. /// /// This sort is `O(n log n)` worst-case and stable, but allocates /// approximately `2 * n`, where `n` is the length of `self`. /// /// # Example /// /// ``` /// let mut vec = vec![3i, 1, 2]; /// vec.sort(); /// assert_eq!(vec, vec![1, 2, 3]); /// ``` pub fn sort(&mut self) { self.as_mut_slice().sort() } } #[experimental = "waiting on Mutable stability"] impl<T> Mutable for Vec<T> { #[inline] #[stable] fn clear(&mut self) { self.truncate(0) } } impl<T: PartialEq> Vec<T> { /// Returns true if a vector contains an element equal to the given value. /// /// # Example /// /// ``` /// let vec = vec![1i, 2, 3]; /// assert!(vec.contains(&1)); /// ``` #[inline] pub fn contains(&self, x: &T) -> bool { self.as_slice().contains(x) } /// Removes consecutive repeated elements in the vector. /// /// If the vector is sorted, this removes all duplicates. /// /// # Example /// /// ``` /// let mut vec = vec![1i, 2, 2, 3, 2]; /// vec.dedup(); /// assert_eq!(vec, vec![1i, 2, 3, 2]); /// ``` #[unstable = "this function may be renamed"] pub fn dedup(&mut self) { unsafe { // Although we have a mutable reference to `self`, we cannot make // *arbitrary* changes. The `PartialEq` comparisons could fail, so we // must ensure that the vector is in a valid state at all time. // // The way that we handle this is by using swaps; we iterate // over all the elements, swapping as we go so that at the end // the elements we wish to keep are in the front, and those we // wish to reject are at the back. We can then truncate the // vector. This operation is still O(n). // // Example: We start in this state, where `r` represents "next // read" and `w` represents "next_write`. // // r // +---+---+---+---+---+---+ // | 0 | 1 | 1 | 2 | 3 | 3 | // +---+---+---+---+---+---+ // w // // Comparing self[r] against self[w-1], this is not a duplicate, so // we swap self[r] and self[w] (no effect as r==w) and then increment both // r and w, leaving us with: // // r // +---+---+---+---+---+---+ // | 0 | 1 | 1 | 2 | 3 | 3 | // +---+---+---+---+---+---+ // w // // Comparing self[r] against self[w-1], this value is a duplicate, // so we increment `r` but leave everything else unchanged: // // r // +---+---+---+---+---+---+ // | 0 | 1 | 1 | 2 | 3 | 3 | // +---+---+---+---+---+---+ // w // // Comparing self[r] against self[w-1], this is not a duplicate, // so swap self[r] and self[w] and advance r and w: // // r // +---+---+---+---+---+---+ // | 0 | 1 | 2 | 1 | 3 | 3 | // +---+---+---+---+---+---+ // w // // Not a duplicate, repeat: // // r // +---+---+---+---+---+---+ // | 0 | 1 | 2 | 3 | 1 | 3 | // +---+---+---+---+---+---+ // w // // Duplicate, advance r. End of vec. Truncate to w. let ln = self.len(); if ln < 1 { return; } // Avoid bounds checks by using unsafe pointers. let p = self.as_mut_slice().as_mut_ptr(); let mut r = 1; let mut w = 1; while r < ln { let p_r = p.offset(r as int); let p_wm1 = p.offset((w - 1) as int); if *p_r != *p_wm1 { if r != w { let p_w = p_wm1.offset(1); mem::swap(&mut *p_r, &mut *p_w); } w += 1; } r += 1; } self.truncate(w); } } } impl<T> AsSlice<T> for Vec<T> { /// Returns a slice into `self`. /// /// # Example /// /// ``` /// fn foo(slice: &[int]) {} /// /// let vec = vec![1i, 2]; /// foo(vec.as_slice()); /// ``` #[inline] #[stable] fn as_slice<'a>(&'a self) -> &'a [T] { unsafe { mem::transmute(RawSlice { data: self.as_ptr(), len: self.len }) } } } impl<T: Clone, V: AsSlice<T>> Add<V, Vec<T>> for Vec<T> { #[inline] fn add(&self, rhs: &V) -> Vec<T> { let mut res = Vec::with_capacity(self.len() + rhs.as_slice().len()); res.push_all(self.as_slice()); res.push_all(rhs.as_slice()); res } } #[unsafe_destructor] impl<T> Drop for Vec<T> { fn drop(&mut self) { // This is (and should always remain) a no-op if the fields are // zeroed (when moving out, because of #[unsafe_no_drop_flag]). if self.cap != 0 { unsafe { for x in self.as_mut_slice().iter() { ptr::read(x); } dealloc(self.ptr, self.cap) } } } } #[stable] impl<T> Default for Vec<T> { fn default() -> Vec<T> { Vec::new() } } #[experimental = "waiting on Show stability"] impl<T:fmt::Show> fmt::Show for Vec<T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { self.as_slice().fmt(f) } } #[experimental = "waiting on MutableSeq stability"] impl<T> MutableSeq<T> for Vec<T> { /// Appends an element to the back of a collection. /// /// # Failure /// /// Fails if the number of elements in the vector overflows a `uint`. /// /// # Example /// /// ```rust /// let mut vec = vec!(1i, 2); /// vec.push(3); /// assert_eq!(vec, vec!(1, 2, 3)); /// ``` #[inline] #[stable] fn push(&mut self, value: T) { if mem::size_of::<T>() == 0 { // zero-size types consume no memory, so we can't rely on the address space running out self.len = self.len.checked_add(&1).expect("length overflow"); unsafe { mem::forget(value); } return } if self.len == self.cap { let old_size = self.cap * mem::size_of::<T>(); let size = max(old_size, 2 * mem::size_of::<T>()) * 2; if old_size > size { fail!("capacity overflow") } unsafe { self.ptr = alloc_or_realloc(self.ptr, old_size, size); } self.cap = max(self.cap, 2) * 2; } unsafe { let end = (self.ptr as *const T).offset(self.len as int) as *mut T; ptr::write(&mut *end, value); self.len += 1; } } #[inline] #[stable] fn pop(&mut self) -> Option<T> { if self.len == 0 { None } else { unsafe { self.len -= 1; Some(ptr::read(self.as_slice().unsafe_get(self.len()))) } } } } /// An iterator that moves out of a vector. pub struct MoveItems<T> { allocation: *mut T, // the block of memory allocated for the vector cap: uint, // the capacity of the vector ptr: *const T, end: *const T } impl<T> MoveItems<T> { #[inline] /// Drops all items that have not yet been moved and returns the empty vector. pub fn unwrap(mut self) -> Vec<T> { unsafe { for _x in self { } let MoveItems { allocation, cap, ptr: _ptr, end: _end } = self; mem::forget(self); Vec { ptr: allocation, cap: cap, len: 0 } } } } impl<T> Iterator<T> for MoveItems<T> { #[inline] fn next<'a>(&'a mut self) -> Option<T> { unsafe { if self.ptr == self.end { None } else { if mem::size_of::<T>() == 0 { // purposefully don't use 'ptr.offset' because for // vectors with 0-size elements this would return the // same pointer. self.ptr = mem::transmute(self.ptr as uint + 1); // Use a non-null pointer value Some(ptr::read(mem::transmute(1u))) } else { let old = self.ptr; self.ptr = self.ptr.offset(1); Some(ptr::read(old)) } } } } #[inline] fn size_hint(&self) -> (uint, Option<uint>) { let diff = (self.end as uint) - (self.ptr as uint); let size = mem::size_of::<T>(); let exact = diff / (if size == 0 {1} else {size}); (exact, Some(exact)) } } impl<T> DoubleEndedIterator<T> for MoveItems<T> { #[inline] fn next_back<'a>(&'a mut self) -> Option<T> { unsafe { if self.end == self.ptr { None } else { if mem::size_of::<T>() == 0 { // See above for why 'ptr.offset' isn't used self.end = mem::transmute(self.end as uint - 1); // Use a non-null pointer value Some(ptr::read(mem::transmute(1u))) } else { self.end = self.end.offset(-1); Some(ptr::read(mem::transmute(self.end))) } } } } } impl<T> ExactSize<T> for MoveItems<T> {} #[unsafe_destructor] impl<T> Drop for MoveItems<T> { fn drop(&mut self) { // destroy the remaining elements if self.cap != 0 { for _x in *self {} unsafe { dealloc(self.allocation, self.cap); } } } } /// Converts an iterator of pairs into a pair of vectors. /// /// Returns a tuple containing two vectors where the i-th element of the first /// vector contains the first element of the i-th tuple of the input iterator, /// and the i-th element of the second vector contains the second element /// of the i-th tuple of the input iterator. #[unstable = "this functionality may become more generic over time"] pub fn unzip<T, U, V: Iterator<(T, U)>>(mut iter: V) -> (Vec<T>, Vec<U>) { let (lo, _) = iter.size_hint(); let mut ts = Vec::with_capacity(lo); let mut us = Vec::with_capacity(lo); for (t, u) in iter { ts.push(t); us.push(u); } (ts, us) } /// Wrapper type providing a `&Vec<T>` reference via `Deref`. #[experimental] pub struct DerefVec<'a, T> { x: Vec<T>, l: ContravariantLifetime<'a> } impl<'a, T> Deref<Vec<T>> for DerefVec<'a, T> { fn deref<'b>(&'b self) -> &'b Vec<T> { &self.x } } // Prevent the inner `Vec<T>` from attempting to deallocate memory. #[unsafe_destructor] impl<'a, T> Drop for DerefVec<'a, T> { fn drop(&mut self) { self.x.len = 0; self.x.cap = 0; } } /// Convert a slice to a wrapper type providing a `&Vec<T>` reference. #[experimental] pub fn as_vec<'a, T>(x: &'a [T]) -> DerefVec<'a, T> { unsafe { DerefVec { x: Vec::from_raw_parts(x.len(), x.len(), x.as_ptr() as *mut T), l: ContravariantLifetime::<'a> } } } /// Unsafe vector operations. #[unstable] pub mod raw { use super::Vec; use core::ptr; /// Constructs a vector from an unsafe pointer to a buffer. /// /// The elements of the buffer are copied into the vector without cloning, /// as if `ptr::read()` were called on them. #[inline] #[unstable] pub unsafe fn from_buf<T>(ptr: *const T, elts: uint) -> Vec<T> { let mut dst = Vec::with_capacity(elts); dst.set_len(elts); ptr::copy_nonoverlapping_memory(dst.as_mut_ptr(), ptr, elts); dst } } /// An owned, partially type-converted vector of elements with non-zero size. /// /// `T` and `U` must have the same, non-zero size. They must also have the same /// alignment. /// /// When the destructor of this struct runs, all `U`s from `start_u` (incl.) to /// `end_u` (excl.) and all `T`s from `start_t` (incl.) to `end_t` (excl.) are /// destructed. Additionally the underlying storage of `vec` will be freed. struct PartialVecNonZeroSized<T,U> { vec: Vec<T>, start_u: *mut U, end_u: *mut U, start_t: *mut T, end_t: *mut T, } /// An owned, partially type-converted vector of zero-sized elements. /// /// When the destructor of this struct runs, all `num_t` `T`s and `num_u` `U`s /// are destructed. struct PartialVecZeroSized<T,U> { num_t: uint, num_u: uint, marker_t: InvariantType<T>, marker_u: InvariantType<U>, } #[unsafe_destructor] impl<T,U> Drop for PartialVecNonZeroSized<T,U> { fn drop(&mut self) { unsafe { // `vec` hasn't been modified until now. As it has a length // currently, this would run destructors of `T`s which might not be // there. So at first, set `vec`s length to `0`. This must be done // at first to remain memory-safe as the destructors of `U` or `T` // might cause unwinding where `vec`s destructor would be executed. self.vec.set_len(0); // We have instances of `U`s and `T`s in `vec`. Destruct them. while self.start_u != self.end_u { let _ = ptr::read(self.start_u as *const U); // Run a `U` destructor. self.start_u = self.start_u.offset(1); } while self.start_t != self.end_t { let _ = ptr::read(self.start_t as *const T); // Run a `T` destructor. self.start_t = self.start_t.offset(1); } // After this destructor ran, the destructor of `vec` will run, // deallocating the underlying memory. } } } #[unsafe_destructor] impl<T,U> Drop for PartialVecZeroSized<T,U> { fn drop(&mut self) { unsafe { // Destruct the instances of `T` and `U` this struct owns. while self.num_t != 0 { let _: T = mem::uninitialized(); // Run a `T` destructor. self.num_t -= 1; } while self.num_u != 0 { let _: U = mem::uninitialized(); // Run a `U` destructor. self.num_u -= 1; } } } } impl<T> Vec<T> { /// Converts a `Vec<T>` to a `Vec<U>` where `T` and `U` have the same /// size and in case they are not zero-sized the same minimal alignment. /// /// # Failure /// /// Fails if `T` and `U` have differing sizes or are not zero-sized and /// have differing minimal alignments. /// /// # Example /// /// ``` /// let v = vec![0u, 1, 2]; /// let w = v.map_in_place(|i| i + 3); /// assert_eq!(w.as_slice(), [3, 4, 5].as_slice()); /// /// #[deriving(PartialEq, Show)] /// struct Newtype(u8); /// let bytes = vec![0x11, 0x22]; /// let newtyped_bytes = bytes.map_in_place(|x| Newtype(x)); /// assert_eq!(newtyped_bytes.as_slice(), [Newtype(0x11), Newtype(0x22)].as_slice()); /// ``` pub fn map_in_place<U>(self, f: |T| -> U) -> Vec<U> { // FIXME: Assert statically that the types `T` and `U` have the same // size. assert!(mem::size_of::<T>() == mem::size_of::<U>()); let mut vec = self; if mem::size_of::<T>() != 0 { // FIXME: Assert statically that the types `T` and `U` have the // same minimal alignment in case they are not zero-sized. // These asserts are necessary because the `min_align_of` of the // types are passed to the allocator by `Vec`. assert!(mem::min_align_of::<T>() == mem::min_align_of::<U>()); // This `as int` cast is safe, because the size of the elements of the // vector is not 0, and: // // 1) If the size of the elements in the vector is 1, the `int` may // overflow, but it has the correct bit pattern so that the // `.offset()` function will work. // // Example: // Address space 0x0-0xF. // `u8` array at: 0x1. // Size of `u8` array: 0x8. // Calculated `offset`: -0x8. // After `array.offset(offset)`: 0x9. // (0x1 + 0x8 = 0x1 - 0x8) // // 2) If the size of the elements in the vector is >1, the `uint` -> // `int` conversion can't overflow. let offset = vec.len() as int; let start = vec.as_mut_ptr(); let mut pv = PartialVecNonZeroSized { vec: vec, start_t: start, // This points inside the vector, as the vector has length // `offset`. end_t: unsafe { start.offset(offset) }, start_u: start as *mut U, end_u: start as *mut U, }; // start_t // start_u // | // +-+-+-+-+-+-+ // |T|T|T|...|T| // +-+-+-+-+-+-+ // | | // end_u end_t while pv.end_u as *mut T != pv.end_t { unsafe { // start_u start_t // | | // +-+-+-+-+-+-+-+-+-+ // |U|...|U|T|T|...|T| // +-+-+-+-+-+-+-+-+-+ // | | // end_u end_t let t = ptr::read(pv.start_t as *const T); // start_u start_t // | | // +-+-+-+-+-+-+-+-+-+ // |U|...|U|X|T|...|T| // +-+-+-+-+-+-+-+-+-+ // | | // end_u end_t // We must not fail here, one cell is marked as `T` // although it is not `T`. pv.start_t = pv.start_t.offset(1); // start_u start_t // | | // +-+-+-+-+-+-+-+-+-+ // |U|...|U|X|T|...|T| // +-+-+-+-+-+-+-+-+-+ // | | // end_u end_t // We may fail again. // The function given by the user might fail. let u = f(t); ptr::write(pv.end_u, u); // start_u start_t // | | // +-+-+-+-+-+-+-+-+-+ // |U|...|U|U|T|...|T| // +-+-+-+-+-+-+-+-+-+ // | | // end_u end_t // We should not fail here, because that would leak the `U` // pointed to by `end_u`. pv.end_u = pv.end_u.offset(1); // start_u start_t // | | // +-+-+-+-+-+-+-+-+-+ // |U|...|U|U|T|...|T| // +-+-+-+-+-+-+-+-+-+ // | | // end_u end_t // We may fail again. } } // start_u start_t // | | // +-+-+-+-+-+-+ // |U|...|U|U|U| // +-+-+-+-+-+-+ // | // end_t // end_u // Extract `vec` and prevent the destructor of // `PartialVecNonZeroSized` from running. Note that none of the // function calls can fail, thus no resources can be leaked (as the // `vec` member of `PartialVec` is the only one which holds // allocations -- and it is returned from this function. None of // this can fail. unsafe { let vec_len = pv.vec.len(); let vec_cap = pv.vec.capacity(); let vec_ptr = pv.vec.as_mut_ptr() as *mut U; mem::forget(pv); Vec::from_raw_parts(vec_len, vec_cap, vec_ptr) } } else { // Put the `Vec` into the `PartialVecZeroSized` structure and // prevent the destructor of the `Vec` from running. Since the // `Vec` contained zero-sized objects, it did not allocate, so we // are not leaking memory here. let mut pv = PartialVecZeroSized::<T,U> { num_t: vec.len(), num_u: 0, marker_t: InvariantType, marker_u: InvariantType, }; unsafe { mem::forget(vec); } while pv.num_t != 0 { unsafe { // Create a `T` out of thin air and decrement `num_t`. This // must not fail between these steps, as otherwise a // destructor of `T` which doesn't exist runs. let t = mem::uninitialized(); pv.num_t -= 1; // The function given by the user might fail. let u = f(t); // Forget the `U` and increment `num_u`. This increment // cannot overflow the `uint` as we only do this for a // number of times that fits into a `uint` (and start with // `0`). Again, we should not fail between these steps. mem::forget(u); pv.num_u += 1; } } // Create a `Vec` from our `PartialVecZeroSized` and make sure the // destructor of the latter will not run. None of this can fail. let mut result = Vec::new(); unsafe { result.set_len(pv.num_u); } result } } } #[cfg(test)] mod tests { extern crate test; use std::prelude::*; use std::mem::size_of; use test::Bencher; use super::{as_vec, unzip, raw, Vec}; use MutableSeq; struct DropCounter<'a> { count: &'a mut int } #[unsafe_destructor] impl<'a> Drop for DropCounter<'a> { fn drop(&mut self) { *self.count += 1; } } #[test] fn test_as_vec() { let xs = [1u8, 2u8, 3u8]; assert_eq!(as_vec(xs).as_slice(), xs.as_slice()); } #[test] fn test_as_vec_dtor() { let (mut count_x, mut count_y) = (0, 0); { let xs = &[DropCounter { count: &mut count_x }, DropCounter { count: &mut count_y }]; assert_eq!(as_vec(xs).len(), 2); } assert_eq!(count_x, 1); assert_eq!(count_y, 1); } #[test] fn test_small_vec_struct() { assert!(size_of::<Vec<u8>>() == size_of::<uint>() * 3); } #[test] fn test_double_drop() { struct TwoVec<T> { x: Vec<T>, y: Vec<T> } let (mut count_x, mut count_y) = (0, 0); { let mut tv = TwoVec { x: Vec::new(), y: Vec::new() }; tv.x.push(DropCounter {count: &mut count_x}); tv.y.push(DropCounter {count: &mut count_y}); // If Vec had a drop flag, here is where it would be zeroed. // Instead, it should rely on its internal state to prevent // doing anything significant when dropped multiple times. drop(tv.x); // Here tv goes out of scope, tv.y should be dropped, but not tv.x. } assert_eq!(count_x, 1); assert_eq!(count_y, 1); } #[test] fn test_reserve_additional() { let mut v = Vec::new(); assert_eq!(v.capacity(), 0); v.reserve_additional(2); assert!(v.capacity() >= 2); for i in range(0i, 16) { v.push(i); } assert!(v.capacity() >= 16); v.reserve_additional(16); assert!(v.capacity() >= 32); v.push(16); v.reserve_additional(16); assert!(v.capacity() >= 33) } #[test] fn test_extend() { let mut v = Vec::new(); let mut w = Vec::new(); v.extend(range(0i, 3)); for i in range(0i, 3) { w.push(i) } assert_eq!(v, w); v.extend(range(3i, 10)); for i in range(3i, 10) { w.push(i) } assert_eq!(v, w); } #[test] fn test_slice_from_mut() { let mut values = vec![1u8,2,3,4,5]; { let slice = values.slice_from_mut(2); assert!(slice == [3, 4, 5]); for p in slice.iter_mut() { *p += 2; } } assert!(values.as_slice() == [1, 2, 5, 6, 7]); } #[test] fn test_slice_to_mut() { let mut values = vec![1u8,2,3,4,5]; { let slice = values.slice_to_mut(2); assert!(slice == [1, 2]); for p in slice.iter_mut() { *p += 1; } } assert!(values.as_slice() == [2, 3, 3, 4, 5]); } #[test] fn test_split_at_mut() { let mut values = vec![1u8,2,3,4,5]; { let (left, right) = values.split_at_mut(2); { let left: &[_] = left; assert!(left[0..left.len()] == [1, 2]); } for p in left.iter_mut() { *p += 1; } { let right: &[_] = right; assert!(right[0..right.len()] == [3, 4, 5]); } for p in right.iter_mut() { *p += 2; } } assert!(values == vec![2u8, 3, 5, 6, 7]); } #[test] fn test_clone() { let v: Vec<int> = vec!(); let w = vec!(1i, 2, 3); assert_eq!(v, v.clone()); let z = w.clone(); assert_eq!(w, z); // they should be disjoint in memory. assert!(w.as_ptr() != z.as_ptr()) } #[test] fn test_clone_from() { let mut v = vec!(); let three = vec!(box 1i, box 2, box 3); let two = vec!(box 4i, box 5); // zero, long v.clone_from(&three); assert_eq!(v, three); // equal v.clone_from(&three); assert_eq!(v, three); // long, short v.clone_from(&two); assert_eq!(v, two); // short, long v.clone_from(&three); assert_eq!(v, three) } #[test] fn test_grow_fn() { let mut v = vec![0u, 1]; v.grow_fn(3, |i| i); assert!(v == vec![0u, 1, 0, 1, 2]); } #[test] fn test_retain() { let mut vec = vec![1u, 2, 3, 4]; vec.retain(|x| x%2 == 0); assert!(vec == vec![2u, 4]); } #[test] fn zero_sized_values() { let mut v = Vec::new(); assert_eq!(v.len(), 0); v.push(()); assert_eq!(v.len(), 1); v.push(()); assert_eq!(v.len(), 2); assert_eq!(v.pop(), Some(())); assert_eq!(v.pop(), Some(())); assert_eq!(v.pop(), None); assert_eq!(v.iter().count(), 0); v.push(()); assert_eq!(v.iter().count(), 1); v.push(()); assert_eq!(v.iter().count(), 2); for &() in v.iter() {} assert_eq!(v.iter_mut().count(), 2); v.push(()); assert_eq!(v.iter_mut().count(), 3); v.push(()); assert_eq!(v.iter_mut().count(), 4); for &() in v.iter_mut() {} unsafe { v.set_len(0); } assert_eq!(v.iter_mut().count(), 0); } #[test] fn test_partition() { assert_eq!(vec![].partition(|x: &int| *x < 3), (vec![], vec![])); assert_eq!(vec![1i, 2, 3].partition(|x: &int| *x < 4), (vec![1, 2, 3], vec![])); assert_eq!(vec![1i, 2, 3].partition(|x: &int| *x < 2), (vec![1], vec![2, 3])); assert_eq!(vec![1i, 2, 3].partition(|x: &int| *x < 0), (vec![], vec![1, 2, 3])); } #[test] fn test_partitioned() { assert_eq!(vec![].partitioned(|x: &int| *x < 3), (vec![], vec![])) assert_eq!(vec![1i, 2, 3].partitioned(|x: &int| *x < 4), (vec![1, 2, 3], vec![])); assert_eq!(vec![1i, 2, 3].partitioned(|x: &int| *x < 2), (vec![1], vec![2, 3])); assert_eq!(vec![1i, 2, 3].partitioned(|x: &int| *x < 0), (vec![], vec![1, 2, 3])); } #[test] fn test_zip_unzip() { let z1 = vec![(1i, 4i), (2, 5), (3, 6)]; let (left, right) = unzip(z1.iter().map(|&x| x)); let (left, right) = (left.as_slice(), right.as_slice()); assert_eq!((1, 4), (left[0], right[0])); assert_eq!((2, 5), (left[1], right[1])); assert_eq!((3, 6), (left[2], right[2])); } #[test] fn test_unsafe_ptrs() { unsafe { // Test on-stack copy-from-buf. let a = [1i, 2, 3]; let ptr = a.as_ptr(); let b = raw::from_buf(ptr, 3u); assert_eq!(b, vec![1, 2, 3]); // Test on-heap copy-from-buf. let c = vec![1i, 2, 3, 4, 5]; let ptr = c.as_ptr(); let d = raw::from_buf(ptr, 5u); assert_eq!(d, vec![1, 2, 3, 4, 5]); } } #[test] fn test_vec_truncate_drop() { static mut drops: uint = 0; struct Elem(int); impl Drop for Elem { fn drop(&mut self) { unsafe { drops += 1; } } } let mut v = vec![Elem(1), Elem(2), Elem(3), Elem(4), Elem(5)]; assert_eq!(unsafe { drops }, 0); v.truncate(3); assert_eq!(unsafe { drops }, 2); v.truncate(0); assert_eq!(unsafe { drops }, 5); } #[test] #[should_fail] fn test_vec_truncate_fail() { struct BadElem(int); impl Drop for BadElem { fn drop(&mut self) { let BadElem(ref mut x) = *self; if *x == 0xbadbeef { fail!("BadElem failure: 0xbadbeef") } } } let mut v = vec![BadElem(1), BadElem(2), BadElem(0xbadbeef), BadElem(4)]; v.truncate(0); } #[test] fn test_index() { let vec = vec!(1i, 2, 3); assert!(vec[1] == 2); } #[test] #[should_fail] fn test_index_out_of_bounds() { let vec = vec!(1i, 2, 3); let _ = vec[3]; } #[test] #[should_fail] fn test_slice_out_of_bounds_1() { let x: Vec<int> = vec![1, 2, 3, 4, 5]; x[-1..]; } #[test] #[should_fail] fn test_slice_out_of_bounds_2() { let x: Vec<int> = vec![1, 2, 3, 4, 5]; x[..6]; } #[test] #[should_fail] fn test_slice_out_of_bounds_3() { let x: Vec<int> = vec![1, 2, 3, 4, 5]; x[-1..4]; } #[test] #[should_fail] fn test_slice_out_of_bounds_4() { let x: Vec<int> = vec![1, 2, 3, 4, 5]; x[1..6]; } #[test] #[should_fail] fn test_slice_out_of_bounds_5() { let x: Vec<int> = vec![1, 2, 3, 4, 5]; x[3..2]; } #[test] fn test_swap_remove_empty() { let mut vec: Vec<uint> = vec!(); assert_eq!(vec.swap_remove(0), None); } #[test] fn test_move_iter_unwrap() { let mut vec: Vec<uint> = Vec::with_capacity(7); vec.push(1); vec.push(2); let ptr = vec.as_ptr(); vec = vec.into_iter().unwrap(); assert_eq!(vec.as_ptr(), ptr); assert_eq!(vec.capacity(), 7); assert_eq!(vec.len(), 0); } #[test] #[should_fail] fn test_map_in_place_incompatible_types_fail() { let v = vec![0u, 1, 2]; v.map_in_place(|_| ()); } #[test] fn test_map_in_place() { let v = vec![0u, 1, 2]; assert_eq!(v.map_in_place(|i: uint| i as int - 1).as_slice(), [-1i, 0, 1].as_slice()); } #[test] fn test_map_in_place_zero_sized() { let v = vec![(), ()]; #[deriving(PartialEq, Show)] struct ZeroSized; assert_eq!(v.map_in_place(|_| ZeroSized).as_slice(), [ZeroSized, ZeroSized].as_slice()); } #[test] fn test_move_items() { let vec = vec![1, 2, 3]; let mut vec2 : Vec<i32> = vec![]; for i in vec.into_iter() { vec2.push(i); } assert!(vec2 == vec![1, 2, 3]); } #[test] fn test_move_items_reverse() { let vec = vec![1, 2, 3]; let mut vec2 : Vec<i32> = vec![]; for i in vec.into_iter().rev() { vec2.push(i); } assert!(vec2 == vec![3, 2, 1]); } #[test] fn test_move_items_zero_sized() { let vec = vec![(), (), ()]; let mut vec2 : Vec<()> = vec![]; for i in vec.into_iter() { vec2.push(i); } assert!(vec2 == vec![(), (), ()]); } #[test] fn test_into_boxed_slice() { let xs = vec![1u, 2, 3]; let ys = xs.into_boxed_slice(); assert_eq!(ys.as_slice(), [1u, 2, 3].as_slice()); } #[bench] fn bench_new(b: &mut Bencher) { b.iter(|| { let v: Vec<uint> = Vec::new(); assert_eq!(v.len(), 0); assert_eq!(v.capacity(), 0); }) } fn do_bench_with_capacity(b: &mut Bencher, src_len: uint) { b.bytes = src_len as u64; b.iter(|| { let v: Vec<uint> = Vec::with_capacity(src_len); assert_eq!(v.len(), 0); assert_eq!(v.capacity(), src_len); }) } #[bench] fn bench_with_capacity_0000(b: &mut Bencher) { do_bench_with_capacity(b, 0) } #[bench] fn bench_with_capacity_0010(b: &mut Bencher) { do_bench_with_capacity(b, 10) } #[bench] fn bench_with_capacity_0100(b: &mut Bencher) { do_bench_with_capacity(b, 100) } #[bench] fn bench_with_capacity_1000(b: &mut Bencher) { do_bench_with_capacity(b, 1000) } fn do_bench_from_fn(b: &mut Bencher, src_len: uint) { b.bytes = src_len as u64; b.iter(|| { let dst = Vec::from_fn(src_len, |i| i); assert_eq!(dst.len(), src_len); assert!(dst.iter().enumerate().all(|(i, x)| i == *x)); }) } #[bench] fn bench_from_fn_0000(b: &mut Bencher) { do_bench_from_fn(b, 0) } #[bench] fn bench_from_fn_0010(b: &mut Bencher) { do_bench_from_fn(b, 10) } #[bench] fn bench_from_fn_0100(b: &mut Bencher) { do_bench_from_fn(b, 100) } #[bench] fn bench_from_fn_1000(b: &mut Bencher) { do_bench_from_fn(b, 1000) } fn do_bench_from_elem(b: &mut Bencher, src_len: uint) { b.bytes = src_len as u64; b.iter(|| { let dst: Vec<uint> = Vec::from_elem(src_len, 5); assert_eq!(dst.len(), src_len); assert!(dst.iter().all(|x| *x == 5)); }) } #[bench] fn bench_from_elem_0000(b: &mut Bencher) { do_bench_from_elem(b, 0) } #[bench] fn bench_from_elem_0010(b: &mut Bencher) { do_bench_from_elem(b, 10) } #[bench] fn bench_from_elem_0100(b: &mut Bencher) { do_bench_from_elem(b, 100) } #[bench] fn bench_from_elem_1000(b: &mut Bencher) { do_bench_from_elem(b, 1000) } fn do_bench_from_slice(b: &mut Bencher, src_len: uint) { let src: Vec<uint> = FromIterator::from_iter(range(0, src_len)); b.bytes = src_len as u64; b.iter(|| { let dst = src.clone().as_slice().to_vec(); assert_eq!(dst.len(), src_len); assert!(dst.iter().enumerate().all(|(i, x)| i == *x)); }); } #[bench] fn bench_from_slice_0000(b: &mut Bencher) { do_bench_from_slice(b, 0) } #[bench] fn bench_from_slice_0010(b: &mut Bencher) { do_bench_from_slice(b, 10) } #[bench] fn bench_from_slice_0100(b: &mut Bencher) { do_bench_from_slice(b, 100) } #[bench] fn bench_from_slice_1000(b: &mut Bencher) { do_bench_from_slice(b, 1000) } fn do_bench_from_iter(b: &mut Bencher, src_len: uint) { let src: Vec<uint> = FromIterator::from_iter(range(0, src_len)); b.bytes = src_len as u64; b.iter(|| { let dst: Vec<uint> = FromIterator::from_iter(src.clone().into_iter()); assert_eq!(dst.len(), src_len); assert!(dst.iter().enumerate().all(|(i, x)| i == *x)); }); } #[bench] fn bench_from_iter_0000(b: &mut Bencher) { do_bench_from_iter(b, 0) } #[bench] fn bench_from_iter_0010(b: &mut Bencher) { do_bench_from_iter(b, 10) } #[bench] fn bench_from_iter_0100(b: &mut Bencher) { do_bench_from_iter(b, 100) } #[bench] fn bench_from_iter_1000(b: &mut Bencher) { do_bench_from_iter(b, 1000) } fn do_bench_extend(b: &mut Bencher, dst_len: uint, src_len: uint) { let dst: Vec<uint> = FromIterator::from_iter(range(0, dst_len)); let src: Vec<uint> = FromIterator::from_iter(range(dst_len, dst_len + src_len)); b.bytes = src_len as u64; b.iter(|| { let mut dst = dst.clone(); dst.extend(src.clone().into_iter()); assert_eq!(dst.len(), dst_len + src_len); assert!(dst.iter().enumerate().all(|(i, x)| i == *x)); }); } #[bench] fn bench_extend_0000_0000(b: &mut Bencher) { do_bench_extend(b, 0, 0) } #[bench] fn bench_extend_0000_0010(b: &mut Bencher) { do_bench_extend(b, 0, 10) } #[bench] fn bench_extend_0000_0100(b: &mut Bencher) { do_bench_extend(b, 0, 100) } #[bench] fn bench_extend_0000_1000(b: &mut Bencher) { do_bench_extend(b, 0, 1000) } #[bench] fn bench_extend_0010_0010(b: &mut Bencher) { do_bench_extend(b, 10, 10) } #[bench] fn bench_extend_0100_0100(b: &mut Bencher) { do_bench_extend(b, 100, 100) } #[bench] fn bench_extend_1000_1000(b: &mut Bencher) { do_bench_extend(b, 1000, 1000) } fn do_bench_push_all(b: &mut Bencher, dst_len: uint, src_len: uint) { let dst: Vec<uint> = FromIterator::from_iter(range(0, dst_len)); let src: Vec<uint> = FromIterator::from_iter(range(dst_len, dst_len + src_len)); b.bytes = src_len as u64; b.iter(|| { let mut dst = dst.clone(); dst.push_all(src.as_slice()); assert_eq!(dst.len(), dst_len + src_len); assert!(dst.iter().enumerate().all(|(i, x)| i == *x)); }); } #[bench] fn bench_push_all_0000_0000(b: &mut Bencher) { do_bench_push_all(b, 0, 0) } #[bench] fn bench_push_all_0000_0010(b: &mut Bencher) { do_bench_push_all(b, 0, 10) } #[bench] fn bench_push_all_0000_0100(b: &mut Bencher) { do_bench_push_all(b, 0, 100) } #[bench] fn bench_push_all_0000_1000(b: &mut Bencher) { do_bench_push_all(b, 0, 1000) } #[bench] fn bench_push_all_0010_0010(b: &mut Bencher) { do_bench_push_all(b, 10, 10) } #[bench] fn bench_push_all_0100_0100(b: &mut Bencher) { do_bench_push_all(b, 100, 100) } #[bench] fn bench_push_all_1000_1000(b: &mut Bencher) { do_bench_push_all(b, 1000, 1000) } fn do_bench_push_all_move(b: &mut Bencher, dst_len: uint, src_len: uint) { let dst: Vec<uint> = FromIterator::from_iter(range(0u, dst_len)); let src: Vec<uint> = FromIterator::from_iter(range(dst_len, dst_len + src_len)); b.bytes = src_len as u64; b.iter(|| { let mut dst = dst.clone(); dst.extend(src.clone().into_iter()); assert_eq!(dst.len(), dst_len + src_len); assert!(dst.iter().enumerate().all(|(i, x)| i == *x)); }); } #[bench] fn bench_push_all_move_0000_0000(b: &mut Bencher) { do_bench_push_all_move(b, 0, 0) } #[bench] fn bench_push_all_move_0000_0010(b: &mut Bencher) { do_bench_push_all_move(b, 0, 10) } #[bench] fn bench_push_all_move_0000_0100(b: &mut Bencher) { do_bench_push_all_move(b, 0, 100) } #[bench] fn bench_push_all_move_0000_1000(b: &mut Bencher) { do_bench_push_all_move(b, 0, 1000) } #[bench] fn bench_push_all_move_0010_0010(b: &mut Bencher) { do_bench_push_all_move(b, 10, 10) } #[bench] fn bench_push_all_move_0100_0100(b: &mut Bencher) { do_bench_push_all_move(b, 100, 100) } #[bench] fn bench_push_all_move_1000_1000(b: &mut Bencher) { do_bench_push_all_move(b, 1000, 1000) } fn do_bench_clone(b: &mut Bencher, src_len: uint) { let src: Vec<uint> = FromIterator::from_iter(range(0, src_len)); b.bytes = src_len as u64; b.iter(|| { let dst = src.clone(); assert_eq!(dst.len(), src_len); assert!(dst.iter().enumerate().all(|(i, x)| i == *x)); }); } #[bench] fn bench_clone_0000(b: &mut Bencher) { do_bench_clone(b, 0) } #[bench] fn bench_clone_0010(b: &mut Bencher) { do_bench_clone(b, 10) } #[bench] fn bench_clone_0100(b: &mut Bencher) { do_bench_clone(b, 100) } #[bench] fn bench_clone_1000(b: &mut Bencher) { do_bench_clone(b, 1000) } fn do_bench_clone_from(b: &mut Bencher, times: uint, dst_len: uint, src_len: uint) { let dst: Vec<uint> = FromIterator::from_iter(range(0, src_len)); let src: Vec<uint> = FromIterator::from_iter(range(dst_len, dst_len + src_len)); b.bytes = (times * src_len) as u64; b.iter(|| { let mut dst = dst.clone(); for _ in range(0, times) { dst.clone_from(&src); assert_eq!(dst.len(), src_len); assert!(dst.iter().enumerate().all(|(i, x)| dst_len + i == *x)); } }); } #[bench] fn bench_clone_from_01_0000_0000(b: &mut Bencher) { do_bench_clone_from(b, 1, 0, 0) } #[bench] fn bench_clone_from_01_0000_0010(b: &mut Bencher) { do_bench_clone_from(b, 1, 0, 10) } #[bench] fn bench_clone_from_01_0000_0100(b: &mut Bencher) { do_bench_clone_from(b, 1, 0, 100) } #[bench] fn bench_clone_from_01_0000_1000(b: &mut Bencher) { do_bench_clone_from(b, 1, 0, 1000) } #[bench] fn bench_clone_from_01_0010_0010(b: &mut Bencher) { do_bench_clone_from(b, 1, 10, 10) } #[bench] fn bench_clone_from_01_0100_0100(b: &mut Bencher) { do_bench_clone_from(b, 1, 100, 100) } #[bench] fn bench_clone_from_01_1000_1000(b: &mut Bencher) { do_bench_clone_from(b, 1, 1000, 1000) } #[bench] fn bench_clone_from_01_0010_0100(b: &mut Bencher) { do_bench_clone_from(b, 1, 10, 100) } #[bench] fn bench_clone_from_01_0100_1000(b: &mut Bencher) { do_bench_clone_from(b, 1, 100, 1000) } #[bench] fn bench_clone_from_01_0010_0000(b: &mut Bencher) { do_bench_clone_from(b, 1, 10, 0) } #[bench] fn bench_clone_from_01_0100_0010(b: &mut Bencher) { do_bench_clone_from(b, 1, 100, 10) } #[bench] fn bench_clone_from_01_1000_0100(b: &mut Bencher) { do_bench_clone_from(b, 1, 1000, 100) } #[bench] fn bench_clone_from_10_0000_0000(b: &mut Bencher) { do_bench_clone_from(b, 10, 0, 0) } #[bench] fn bench_clone_from_10_0000_0010(b: &mut Bencher) { do_bench_clone_from(b, 10, 0, 10) } #[bench] fn bench_clone_from_10_0000_0100(b: &mut Bencher) { do_bench_clone_from(b, 10, 0, 100) } #[bench] fn bench_clone_from_10_0000_1000(b: &mut Bencher) { do_bench_clone_from(b, 10, 0, 1000) } #[bench] fn bench_clone_from_10_0010_0010(b: &mut Bencher) { do_bench_clone_from(b, 10, 10, 10) } #[bench] fn bench_clone_from_10_0100_0100(b: &mut Bencher) { do_bench_clone_from(b, 10, 100, 100) } #[bench] fn bench_clone_from_10_1000_1000(b: &mut Bencher) { do_bench_clone_from(b, 10, 1000, 1000) } #[bench] fn bench_clone_from_10_0010_0100(b: &mut Bencher) { do_bench_clone_from(b, 10, 10, 100) } #[bench] fn bench_clone_from_10_0100_1000(b: &mut Bencher) { do_bench_clone_from(b, 10, 100, 1000) } #[bench] fn bench_clone_from_10_0010_0000(b: &mut Bencher) { do_bench_clone_from(b, 10, 10, 0) } #[bench] fn bench_clone_from_10_0100_0010(b: &mut Bencher) { do_bench_clone_from(b, 10, 100, 10) } #[bench] fn bench_clone_from_10_1000_0100(b: &mut Bencher) { do_bench_clone_from(b, 10, 1000, 100) } }
28.699862
99
0.488516
56a29ea3958bda0b468f92caa60130a2ca284e2d
81,857
#![doc = "generated by AutoRust 0.1.0"] #![allow(unused_mut)] #![allow(unused_variables)] #![allow(unused_imports)] use super::{models, API_VERSION}; #[derive(Clone)] pub struct Client { endpoint: String, credential: std::sync::Arc<dyn azure_core::TokenCredential>, scopes: Vec<String>, pipeline: azure_core::pipeline::Pipeline, } #[derive(Clone)] pub struct ClientBuilder { credential: std::sync::Arc<dyn azure_core::TokenCredential>, endpoint: Option<String>, scopes: Option<Vec<String>>, } pub const DEFAULT_ENDPOINT: &str = azure_core::resource_manager_endpoint::AZURE_PUBLIC_CLOUD; impl ClientBuilder { pub fn new(credential: std::sync::Arc<dyn azure_core::TokenCredential>) -> Self { Self { credential, endpoint: None, scopes: None, } } pub fn endpoint(mut self, endpoint: impl Into<String>) -> Self { self.endpoint = Some(endpoint.into()); self } pub fn scopes(mut self, scopes: &[&str]) -> Self { self.scopes = Some(scopes.iter().map(|scope| (*scope).to_owned()).collect()); self } pub fn build(self) -> Client { let endpoint = self.endpoint.unwrap_or_else(|| DEFAULT_ENDPOINT.to_owned()); let scopes = self.scopes.unwrap_or_else(|| vec![format!("{}/", endpoint)]); Client::new(endpoint, self.credential, scopes) } } impl Client { pub(crate) fn endpoint(&self) -> &str { self.endpoint.as_str() } pub(crate) fn token_credential(&self) -> &dyn azure_core::TokenCredential { self.credential.as_ref() } pub(crate) fn scopes(&self) -> Vec<&str> { self.scopes.iter().map(String::as_str).collect() } pub(crate) async fn send(&self, request: impl Into<azure_core::Request>) -> Result<azure_core::Response, azure_core::Error> { let mut context = azure_core::Context::default(); let mut request = request.into(); self.pipeline.send(&mut context, &mut request).await } pub fn new(endpoint: impl Into<String>, credential: std::sync::Arc<dyn azure_core::TokenCredential>, scopes: Vec<String>) -> Self { let endpoint = endpoint.into(); let pipeline = azure_core::pipeline::Pipeline::new( option_env!("CARGO_PKG_NAME"), option_env!("CARGO_PKG_VERSION"), azure_core::ClientOptions::default(), Vec::new(), Vec::new(), ); Self { endpoint, credential, scopes, pipeline, } } pub fn my_workbooks(&self) -> my_workbooks::Client { my_workbooks::Client(self.clone()) } pub fn operations(&self) -> operations::Client { operations::Client(self.clone()) } pub fn workbooks(&self) -> workbooks::Client { workbooks::Client(self.clone()) } } #[non_exhaustive] #[derive(Debug, thiserror :: Error)] #[allow(non_camel_case_types)] pub enum Error { #[error(transparent)] MyWorkbooks_ListByResourceGroup(#[from] my_workbooks::list_by_resource_group::Error), #[error(transparent)] MyWorkbooks_ListBySubscription(#[from] my_workbooks::list_by_subscription::Error), #[error(transparent)] MyWorkbooks_Get(#[from] my_workbooks::get::Error), #[error(transparent)] MyWorkbooks_CreateOrUpdate(#[from] my_workbooks::create_or_update::Error), #[error(transparent)] MyWorkbooks_Update(#[from] my_workbooks::update::Error), #[error(transparent)] MyWorkbooks_Delete(#[from] my_workbooks::delete::Error), #[error(transparent)] Workbooks_ListBySubscription(#[from] workbooks::list_by_subscription::Error), #[error(transparent)] Workbooks_ListByResourceGroup(#[from] workbooks::list_by_resource_group::Error), #[error(transparent)] Workbooks_Get(#[from] workbooks::get::Error), #[error(transparent)] Workbooks_CreateOrUpdate(#[from] workbooks::create_or_update::Error), #[error(transparent)] Workbooks_Update(#[from] workbooks::update::Error), #[error(transparent)] Workbooks_Delete(#[from] workbooks::delete::Error), #[error(transparent)] Workbooks_RevisionsList(#[from] workbooks::revisions_list::Error), #[error(transparent)] Workbooks_RevisionGet(#[from] workbooks::revision_get::Error), #[error(transparent)] Operations_List(#[from] operations::list::Error), } pub mod my_workbooks { use super::{models, API_VERSION}; pub struct Client(pub(crate) super::Client); impl Client { pub fn list_by_resource_group( &self, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, category: impl Into<String>, ) -> list_by_resource_group::Builder { list_by_resource_group::Builder { client: self.0.clone(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), category: category.into(), tags: Vec::new(), source_id: None, can_fetch_content: None, } } pub fn list_by_subscription( &self, subscription_id: impl Into<String>, category: impl Into<String>, ) -> list_by_subscription::Builder { list_by_subscription::Builder { client: self.0.clone(), subscription_id: subscription_id.into(), category: category.into(), tags: Vec::new(), can_fetch_content: None, } } pub fn get( &self, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, resource_name: impl Into<String>, ) -> get::Builder { get::Builder { client: self.0.clone(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), resource_name: resource_name.into(), } } pub fn create_or_update( &self, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, resource_name: impl Into<String>, workbook_properties: impl Into<models::MyWorkbook>, ) -> create_or_update::Builder { create_or_update::Builder { client: self.0.clone(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), resource_name: resource_name.into(), workbook_properties: workbook_properties.into(), source_id: None, } } pub fn update( &self, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, resource_name: impl Into<String>, workbook_properties: impl Into<models::MyWorkbook>, ) -> update::Builder { update::Builder { client: self.0.clone(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), resource_name: resource_name.into(), workbook_properties: workbook_properties.into(), source_id: None, } } pub fn delete( &self, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, resource_name: impl Into<String>, ) -> delete::Builder { delete::Builder { client: self.0.clone(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), resource_name: resource_name.into(), } } } pub mod list_by_resource_group { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::MyWorkbookError, }, #[error("Failed to parse request URL: {0}")] ParseUrl(url::ParseError), #[error("Failed to build request: {0}")] BuildRequest(http::Error), #[error("Failed to serialize request body: {0}")] Serialize(serde_json::Error), #[error("Failed to get access token: {0}")] GetToken(azure_core::Error), #[error("Failed to execute request: {0}")] SendRequest(azure_core::Error), #[error("Failed to get response bytes: {0}")] ResponseBytes(azure_core::StreamError), #[error("Failed to deserialize response: {0}, body: {1:?}")] Deserialize(serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, pub(crate) category: String, pub(crate) tags: Vec<String>, pub(crate) source_id: Option<String>, pub(crate) can_fetch_content: Option<bool>, } impl Builder { pub fn tags(mut self, tags: Vec<String>) -> Self { self.tags = tags; self } pub fn source_id(mut self, source_id: impl Into<String>) -> Self { self.source_id = Some(source_id.into()); self } pub fn can_fetch_content(mut self, can_fetch_content: bool) -> Self { self.can_fetch_content = Some(can_fetch_content); self } pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::MyWorkbooksListResult, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Insights/myWorkbooks", self.client.endpoint(), &self.subscription_id, &self.resource_group_name ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let category = &self.category; url.query_pairs_mut().append_pair("category", category); if let Some(source_id) = &self.source_id { url.query_pairs_mut().append_pair("sourceId", source_id); } if let Some(can_fetch_content) = &self.can_fetch_content { url.query_pairs_mut().append_pair("canFetchContent", &can_fetch_content.to_string()); } let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::MyWorkbooksListResult = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::MyWorkbookError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } pub mod list_by_subscription { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::MyWorkbookError, }, #[error("Failed to parse request URL: {0}")] ParseUrl(url::ParseError), #[error("Failed to build request: {0}")] BuildRequest(http::Error), #[error("Failed to serialize request body: {0}")] Serialize(serde_json::Error), #[error("Failed to get access token: {0}")] GetToken(azure_core::Error), #[error("Failed to execute request: {0}")] SendRequest(azure_core::Error), #[error("Failed to get response bytes: {0}")] ResponseBytes(azure_core::StreamError), #[error("Failed to deserialize response: {0}, body: {1:?}")] Deserialize(serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) subscription_id: String, pub(crate) category: String, pub(crate) tags: Vec<String>, pub(crate) can_fetch_content: Option<bool>, } impl Builder { pub fn tags(mut self, tags: Vec<String>) -> Self { self.tags = tags; self } pub fn can_fetch_content(mut self, can_fetch_content: bool) -> Self { self.can_fetch_content = Some(can_fetch_content); self } pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::MyWorkbooksListResult, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/providers/Microsoft.Insights/myWorkbooks", self.client.endpoint(), &self.subscription_id ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let category = &self.category; url.query_pairs_mut().append_pair("category", category); if let Some(can_fetch_content) = &self.can_fetch_content { url.query_pairs_mut().append_pair("canFetchContent", &can_fetch_content.to_string()); } let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::MyWorkbooksListResult = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::MyWorkbookError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } pub mod get { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::MyWorkbookError, }, #[error("Failed to parse request URL: {0}")] ParseUrl(url::ParseError), #[error("Failed to build request: {0}")] BuildRequest(http::Error), #[error("Failed to serialize request body: {0}")] Serialize(serde_json::Error), #[error("Failed to get access token: {0}")] GetToken(azure_core::Error), #[error("Failed to execute request: {0}")] SendRequest(azure_core::Error), #[error("Failed to get response bytes: {0}")] ResponseBytes(azure_core::StreamError), #[error("Failed to deserialize response: {0}, body: {1:?}")] Deserialize(serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, pub(crate) resource_name: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::MyWorkbook, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Insights/myWorkbooks/{}", self.client.endpoint(), &self.subscription_id, &self.resource_group_name, &self.resource_name ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::MyWorkbook = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::MyWorkbookError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } pub mod create_or_update { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200(models::MyWorkbook), Created201(models::MyWorkbook), } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::MyWorkbookError, }, #[error("Failed to parse request URL: {0}")] ParseUrl(url::ParseError), #[error("Failed to build request: {0}")] BuildRequest(http::Error), #[error("Failed to serialize request body: {0}")] Serialize(serde_json::Error), #[error("Failed to get access token: {0}")] GetToken(azure_core::Error), #[error("Failed to execute request: {0}")] SendRequest(azure_core::Error), #[error("Failed to get response bytes: {0}")] ResponseBytes(azure_core::StreamError), #[error("Failed to deserialize response: {0}, body: {1:?}")] Deserialize(serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, pub(crate) resource_name: String, pub(crate) workbook_properties: models::MyWorkbook, pub(crate) source_id: Option<String>, } impl Builder { pub fn source_id(mut self, source_id: impl Into<String>) -> Self { self.source_id = Some(source_id.into()); self } pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Insights/myWorkbooks/{}", self.client.endpoint(), &self.subscription_id, &self.resource_group_name, &self.resource_name ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", super::API_VERSION); if let Some(source_id) = &self.source_id { url.query_pairs_mut().append_pair("sourceId", source_id); } req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(&self.workbook_properties).map_err(Error::Serialize)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::MyWorkbook = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(Response::Ok200(rsp_value)) } http::StatusCode::CREATED => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::MyWorkbook = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(Response::Created201(rsp_value)) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::MyWorkbookError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } pub mod update { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::MyWorkbookError, }, #[error("Failed to parse request URL: {0}")] ParseUrl(url::ParseError), #[error("Failed to build request: {0}")] BuildRequest(http::Error), #[error("Failed to serialize request body: {0}")] Serialize(serde_json::Error), #[error("Failed to get access token: {0}")] GetToken(azure_core::Error), #[error("Failed to execute request: {0}")] SendRequest(azure_core::Error), #[error("Failed to get response bytes: {0}")] ResponseBytes(azure_core::StreamError), #[error("Failed to deserialize response: {0}, body: {1:?}")] Deserialize(serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, pub(crate) resource_name: String, pub(crate) workbook_properties: models::MyWorkbook, pub(crate) source_id: Option<String>, } impl Builder { pub fn source_id(mut self, source_id: impl Into<String>) -> Self { self.source_id = Some(source_id.into()); self } pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::MyWorkbook, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Insights/myWorkbooks/{}", self.client.endpoint(), &self.subscription_id, &self.resource_group_name, &self.resource_name ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PATCH); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", super::API_VERSION); if let Some(source_id) = &self.source_id { url.query_pairs_mut().append_pair("sourceId", source_id); } req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(&self.workbook_properties).map_err(Error::Serialize)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::CREATED => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::MyWorkbook = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::MyWorkbookError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } pub mod delete { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200, NoContent204, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::MyWorkbookError, }, #[error("Failed to parse request URL: {0}")] ParseUrl(url::ParseError), #[error("Failed to build request: {0}")] BuildRequest(http::Error), #[error("Failed to serialize request body: {0}")] Serialize(serde_json::Error), #[error("Failed to get access token: {0}")] GetToken(azure_core::Error), #[error("Failed to execute request: {0}")] SendRequest(azure_core::Error), #[error("Failed to get response bytes: {0}")] ResponseBytes(azure_core::StreamError), #[error("Failed to deserialize response: {0}, body: {1:?}")] Deserialize(serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, pub(crate) resource_name: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Insights/myWorkbooks/{}", self.client.endpoint(), &self.subscription_id, &self.resource_group_name, &self.resource_name ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => Ok(Response::Ok200), http::StatusCode::NO_CONTENT => Ok(Response::NoContent204), status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::MyWorkbookError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } } pub mod workbooks { use super::{models, API_VERSION}; pub struct Client(pub(crate) super::Client); impl Client { pub fn list_by_subscription( &self, subscription_id: impl Into<String>, category: impl Into<String>, ) -> list_by_subscription::Builder { list_by_subscription::Builder { client: self.0.clone(), subscription_id: subscription_id.into(), category: category.into(), tags: Vec::new(), can_fetch_content: None, } } pub fn list_by_resource_group( &self, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, category: impl Into<String>, ) -> list_by_resource_group::Builder { list_by_resource_group::Builder { client: self.0.clone(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), category: category.into(), tags: Vec::new(), source_id: None, can_fetch_content: None, } } pub fn get( &self, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, resource_name: impl Into<String>, ) -> get::Builder { get::Builder { client: self.0.clone(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), resource_name: resource_name.into(), } } pub fn create_or_update( &self, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, resource_name: impl Into<String>, workbook_properties: impl Into<models::Workbook>, ) -> create_or_update::Builder { create_or_update::Builder { client: self.0.clone(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), resource_name: resource_name.into(), workbook_properties: workbook_properties.into(), source_id: None, } } pub fn update( &self, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, resource_name: impl Into<String>, ) -> update::Builder { update::Builder { client: self.0.clone(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), resource_name: resource_name.into(), source_id: None, workbook_update_parameters: None, } } pub fn delete( &self, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, resource_name: impl Into<String>, ) -> delete::Builder { delete::Builder { client: self.0.clone(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), resource_name: resource_name.into(), } } pub fn revisions_list( &self, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, resource_name: impl Into<String>, ) -> revisions_list::Builder { revisions_list::Builder { client: self.0.clone(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), resource_name: resource_name.into(), } } pub fn revision_get( &self, subscription_id: impl Into<String>, resource_group_name: impl Into<String>, resource_name: impl Into<String>, revision_id: impl Into<String>, ) -> revision_get::Builder { revision_get::Builder { client: self.0.clone(), subscription_id: subscription_id.into(), resource_group_name: resource_group_name.into(), resource_name: resource_name.into(), revision_id: revision_id.into(), } } } pub mod list_by_subscription { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::WorkbookError, }, #[error("Failed to parse request URL: {0}")] ParseUrl(url::ParseError), #[error("Failed to build request: {0}")] BuildRequest(http::Error), #[error("Failed to serialize request body: {0}")] Serialize(serde_json::Error), #[error("Failed to get access token: {0}")] GetToken(azure_core::Error), #[error("Failed to execute request: {0}")] SendRequest(azure_core::Error), #[error("Failed to get response bytes: {0}")] ResponseBytes(azure_core::StreamError), #[error("Failed to deserialize response: {0}, body: {1:?}")] Deserialize(serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) subscription_id: String, pub(crate) category: String, pub(crate) tags: Vec<String>, pub(crate) can_fetch_content: Option<bool>, } impl Builder { pub fn tags(mut self, tags: Vec<String>) -> Self { self.tags = tags; self } pub fn can_fetch_content(mut self, can_fetch_content: bool) -> Self { self.can_fetch_content = Some(can_fetch_content); self } pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::WorkbooksListResult, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/providers/Microsoft.Insights/workbooks", self.client.endpoint(), &self.subscription_id ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let category = &self.category; url.query_pairs_mut().append_pair("category", category); if let Some(can_fetch_content) = &self.can_fetch_content { url.query_pairs_mut().append_pair("canFetchContent", &can_fetch_content.to_string()); } let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::WorkbooksListResult = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::WorkbookError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } pub mod list_by_resource_group { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::WorkbookError, }, #[error("Failed to parse request URL: {0}")] ParseUrl(url::ParseError), #[error("Failed to build request: {0}")] BuildRequest(http::Error), #[error("Failed to serialize request body: {0}")] Serialize(serde_json::Error), #[error("Failed to get access token: {0}")] GetToken(azure_core::Error), #[error("Failed to execute request: {0}")] SendRequest(azure_core::Error), #[error("Failed to get response bytes: {0}")] ResponseBytes(azure_core::StreamError), #[error("Failed to deserialize response: {0}, body: {1:?}")] Deserialize(serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, pub(crate) category: String, pub(crate) tags: Vec<String>, pub(crate) source_id: Option<String>, pub(crate) can_fetch_content: Option<bool>, } impl Builder { pub fn tags(mut self, tags: Vec<String>) -> Self { self.tags = tags; self } pub fn source_id(mut self, source_id: impl Into<String>) -> Self { self.source_id = Some(source_id.into()); self } pub fn can_fetch_content(mut self, can_fetch_content: bool) -> Self { self.can_fetch_content = Some(can_fetch_content); self } pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::WorkbooksListResult, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Insights/workbooks", self.client.endpoint(), &self.subscription_id, &self.resource_group_name ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let category = &self.category; url.query_pairs_mut().append_pair("category", category); if let Some(source_id) = &self.source_id { url.query_pairs_mut().append_pair("sourceId", source_id); } if let Some(can_fetch_content) = &self.can_fetch_content { url.query_pairs_mut().append_pair("canFetchContent", &can_fetch_content.to_string()); } let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::WorkbooksListResult = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::WorkbookError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } pub mod get { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::WorkbookError, }, #[error("Failed to parse request URL: {0}")] ParseUrl(url::ParseError), #[error("Failed to build request: {0}")] BuildRequest(http::Error), #[error("Failed to serialize request body: {0}")] Serialize(serde_json::Error), #[error("Failed to get access token: {0}")] GetToken(azure_core::Error), #[error("Failed to execute request: {0}")] SendRequest(azure_core::Error), #[error("Failed to get response bytes: {0}")] ResponseBytes(azure_core::StreamError), #[error("Failed to deserialize response: {0}, body: {1:?}")] Deserialize(serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, pub(crate) resource_name: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::Workbook, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Insights/workbooks/{}", self.client.endpoint(), &self.subscription_id, &self.resource_group_name, &self.resource_name ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::Workbook = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::WorkbookError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } pub mod create_or_update { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200(models::Workbook), Created201(models::Workbook), } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::WorkbookError, }, #[error("Failed to parse request URL: {0}")] ParseUrl(url::ParseError), #[error("Failed to build request: {0}")] BuildRequest(http::Error), #[error("Failed to serialize request body: {0}")] Serialize(serde_json::Error), #[error("Failed to get access token: {0}")] GetToken(azure_core::Error), #[error("Failed to execute request: {0}")] SendRequest(azure_core::Error), #[error("Failed to get response bytes: {0}")] ResponseBytes(azure_core::StreamError), #[error("Failed to deserialize response: {0}, body: {1:?}")] Deserialize(serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, pub(crate) resource_name: String, pub(crate) workbook_properties: models::Workbook, pub(crate) source_id: Option<String>, } impl Builder { pub fn source_id(mut self, source_id: impl Into<String>) -> Self { self.source_id = Some(source_id.into()); self } pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Insights/workbooks/{}", self.client.endpoint(), &self.subscription_id, &self.resource_group_name, &self.resource_name ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PUT); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", super::API_VERSION); if let Some(source_id) = &self.source_id { url.query_pairs_mut().append_pair("sourceId", source_id); } req_builder = req_builder.header("content-type", "application/json"); let req_body = azure_core::to_json(&self.workbook_properties).map_err(Error::Serialize)?; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::Workbook = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(Response::Ok200(rsp_value)) } http::StatusCode::CREATED => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::Workbook = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(Response::Created201(rsp_value)) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::WorkbookError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } pub mod update { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::WorkbookError, }, #[error("Failed to parse request URL: {0}")] ParseUrl(url::ParseError), #[error("Failed to build request: {0}")] BuildRequest(http::Error), #[error("Failed to serialize request body: {0}")] Serialize(serde_json::Error), #[error("Failed to get access token: {0}")] GetToken(azure_core::Error), #[error("Failed to execute request: {0}")] SendRequest(azure_core::Error), #[error("Failed to get response bytes: {0}")] ResponseBytes(azure_core::StreamError), #[error("Failed to deserialize response: {0}, body: {1:?}")] Deserialize(serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, pub(crate) resource_name: String, pub(crate) source_id: Option<String>, pub(crate) workbook_update_parameters: Option<models::WorkbookUpdateParameters>, } impl Builder { pub fn source_id(mut self, source_id: impl Into<String>) -> Self { self.source_id = Some(source_id.into()); self } pub fn workbook_update_parameters(mut self, workbook_update_parameters: impl Into<models::WorkbookUpdateParameters>) -> Self { self.workbook_update_parameters = Some(workbook_update_parameters.into()); self } pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::Workbook, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Insights/workbooks/{}", self.client.endpoint(), &self.subscription_id, &self.resource_group_name, &self.resource_name ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::PATCH); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", super::API_VERSION); if let Some(source_id) = &self.source_id { url.query_pairs_mut().append_pair("sourceId", source_id); } let req_body = if let Some(workbook_update_parameters) = &self.workbook_update_parameters { req_builder = req_builder.header("content-type", "application/json"); azure_core::to_json(workbook_update_parameters).map_err(Error::Serialize)? } else { bytes::Bytes::from_static(azure_core::EMPTY_BODY) }; req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::CREATED => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::Workbook = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::WorkbookError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } pub mod delete { use super::{models, API_VERSION}; #[derive(Debug)] pub enum Response { Ok200, NoContent204, } #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::WorkbookError, }, #[error("Failed to parse request URL: {0}")] ParseUrl(url::ParseError), #[error("Failed to build request: {0}")] BuildRequest(http::Error), #[error("Failed to serialize request body: {0}")] Serialize(serde_json::Error), #[error("Failed to get access token: {0}")] GetToken(azure_core::Error), #[error("Failed to execute request: {0}")] SendRequest(azure_core::Error), #[error("Failed to get response bytes: {0}")] ResponseBytes(azure_core::StreamError), #[error("Failed to deserialize response: {0}, body: {1:?}")] Deserialize(serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, pub(crate) resource_name: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Insights/workbooks/{}", self.client.endpoint(), &self.subscription_id, &self.resource_group_name, &self.resource_name ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::DELETE); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => Ok(Response::Ok200), http::StatusCode::NO_CONTENT => Ok(Response::NoContent204), status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::WorkbookError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } pub mod revisions_list { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::WorkbookError, }, #[error("Failed to parse request URL: {0}")] ParseUrl(url::ParseError), #[error("Failed to build request: {0}")] BuildRequest(http::Error), #[error("Failed to serialize request body: {0}")] Serialize(serde_json::Error), #[error("Failed to get access token: {0}")] GetToken(azure_core::Error), #[error("Failed to execute request: {0}")] SendRequest(azure_core::Error), #[error("Failed to get response bytes: {0}")] ResponseBytes(azure_core::StreamError), #[error("Failed to deserialize response: {0}, body: {1:?}")] Deserialize(serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, pub(crate) resource_name: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::WorkbooksListResult, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Insights/workbooks/{}/revisions", self.client.endpoint(), &self.subscription_id, &self.resource_group_name, &self.resource_name ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::WorkbooksListResult = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::WorkbookError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } pub mod revision_get { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::WorkbookError, }, #[error("Failed to parse request URL: {0}")] ParseUrl(url::ParseError), #[error("Failed to build request: {0}")] BuildRequest(http::Error), #[error("Failed to serialize request body: {0}")] Serialize(serde_json::Error), #[error("Failed to get access token: {0}")] GetToken(azure_core::Error), #[error("Failed to execute request: {0}")] SendRequest(azure_core::Error), #[error("Failed to get response bytes: {0}")] ResponseBytes(azure_core::StreamError), #[error("Failed to deserialize response: {0}, body: {1:?}")] Deserialize(serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, pub(crate) subscription_id: String, pub(crate) resource_group_name: String, pub(crate) resource_name: String, pub(crate) revision_id: String, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::Workbook, Error>> { Box::pin(async move { let url_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Insights/workbooks/{}/revisions/{}", self.client.endpoint(), &self.subscription_id, &self.resource_group_name, &self.resource_name, &self.revision_id ); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::Workbook = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::WorkbookError = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } } pub mod operations { use super::{models, API_VERSION}; pub struct Client(pub(crate) super::Client); impl Client { pub fn list(&self) -> list::Builder { list::Builder { client: self.0.clone() } } } pub mod list { use super::{models, API_VERSION}; #[derive(Debug, thiserror :: Error)] pub enum Error { #[error("HTTP status code {}", status_code)] DefaultResponse { status_code: http::StatusCode, value: models::ErrorResponse, }, #[error("Failed to parse request URL: {0}")] ParseUrl(url::ParseError), #[error("Failed to build request: {0}")] BuildRequest(http::Error), #[error("Failed to serialize request body: {0}")] Serialize(serde_json::Error), #[error("Failed to get access token: {0}")] GetToken(azure_core::Error), #[error("Failed to execute request: {0}")] SendRequest(azure_core::Error), #[error("Failed to get response bytes: {0}")] ResponseBytes(azure_core::StreamError), #[error("Failed to deserialize response: {0}, body: {1:?}")] Deserialize(serde_json::Error, bytes::Bytes), } #[derive(Clone)] pub struct Builder { pub(crate) client: super::super::Client, } impl Builder { pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::OperationListResult, Error>> { Box::pin(async move { let url_str = &format!("{}/providers/Microsoft.Insights/operations", self.client.endpoint(),); let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?; let mut req_builder = http::request::Builder::new(); req_builder = req_builder.method(http::Method::GET); let credential = self.client.token_credential(); let token_response = credential .get_token(&self.client.scopes().join(" ")) .await .map_err(Error::GetToken)?; req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret())); url.query_pairs_mut().append_pair("api-version", super::API_VERSION); let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); req_builder = req_builder.uri(url.as_str()); let req = req_builder.body(req_body).map_err(Error::BuildRequest)?; let rsp = self.client.send(req).await.map_err(Error::SendRequest)?; let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct(); match rsp_status { http::StatusCode::OK => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::OperationListResult = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Ok(rsp_value) } status_code => { let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?; let rsp_value: models::ErrorResponse = serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?; Err(Error::DefaultResponse { status_code, value: rsp_value, }) } } }) } } } }
50.280713
138
0.516364
877d179043dd3db56fcc19bbb962647f189fdc54
973
#[macro_use] extern crate log; use kube::{ api::{Api, Reflector}, client::APIClient, config, }; /// Example way to read secrets fn main() -> Result<(), failure::Error> { std::env::set_var("RUST_LOG", "info,kube=trace"); env_logger::init(); let config = config::load_kube_config().expect("failed to load kubeconfig"); let client = APIClient::new(config); let namespace = std::env::var("NAMESPACE").unwrap_or("default".into()); let resource = Api::v1ConfigMap(client).within(&namespace); let rf = Reflector::new(resource).init()?; // Can read initial state now: rf.read()?.into_iter().for_each(|(name, d)| { info!("Found configmap {} with data: {:?}", name, d.data); }); // Poll to keep data up to date: loop { rf.poll()?; // up to date state: let pods = rf.read()?.into_iter().map(|(name, _)| name).collect::<Vec<_>>(); info!("Current configmaps: {:?}", pods); } }
29.484848
84
0.587873
ab6f2ae630ef4b6dc37a5e914665cb9442d1b704
3,396
//! A tiny crate of utilities for working with implicit Wasm codegen conventions //! (often established by LLVM and lld). //! //! Examples conventions include: //! //! * The shadow stack pointer //! * The canonical linear memory that contains the shadow stack #![deny(missing_docs, missing_debug_implementations)] use failure::{bail, format_err, Error}; use walrus::{GlobalId, GlobalKind, MemoryId, Module, ValType}; /// Get a Wasm module's canonical linear memory. pub fn get_memory(module: &Module) -> Result<MemoryId, Error> { let mut memories = module.memories.iter().map(|m| m.id()); let memory = memories.next(); if memories.next().is_some() { bail!( "expected a single memory, found multiple; multiple memories \ currently not supported" ); } memory.ok_or_else(|| { format_err!( "module does not have a memory; must have a memory \ to transform return pointers into Wasm multi-value" ) }) } /// Discover the shadow stack pointer and add it to the module's exports as /// `__shadow_stack_pointer`. /// /// Adding it to the exports is useful for making sure it doesn't get GC'd. pub fn export_shadow_stack_pointer(module: &mut Module) -> Result<(), Error> { let candidates = module .globals .iter() .filter(|g| g.ty == ValType::I32) .filter(|g| g.mutable) .filter(|g| match g.kind { GlobalKind::Local(_) => true, GlobalKind::Import(_) => false, }) .collect::<Vec<_>>(); let ssp = match candidates.len() { 0 => bail!("could not find the shadow stack pointer for the module"), // If we've got two mutable globals then we're in a pretty standard // situation for threaded code where one is the stack pointer and one is the // TLS base offset. We need to figure out which is which, and we basically // assume LLVM's current codegen where the first is the stack pointer. // // TODO: have an actual check here. 1 | 2 => candidates[0].id(), _ => bail!("too many mutable globals to infer which is the shadow stack pointer"), }; module.exports.add("__shadow_stack_pointer", ssp); Ok(()) } /// Unexport the shadow stack pointer that was previously added to the module's /// exports as `__shadow_stack_pointer`. pub fn unexport_shadow_stack_pointer(module: &mut Module) -> Result<(), Error> { let e = module .exports .iter() .find(|e| e.name == "__shadow_stack_pointer") .map(|e| e.id()) .ok_or_else(|| { format_err!("did not find the `__shadow_stack_pointer` export in the module") })?; module.exports.delete(e); Ok(()) } /// Get the `__shadow_stack_pointer`. /// /// It must have been previously added to the module's exports via /// `export_shadow_stack_pointer`. pub fn get_shadow_stack_pointer(module: &Module) -> Result<GlobalId, Error> { module .exports .iter() .find(|e| e.name == "__shadow_stack_pointer") .ok_or_else(|| { format_err!("did not find the `__shadow_stack_pointer` export in the module") }) .and_then(|e| match e.item { walrus::ExportItem::Global(g) => Ok(g), _ => bail!("`__shadow_stack_pointer` export is wrong kind"), }) }
35.375
90
0.621319
71b15321a441b76ad7f5876e92f02a46dcd5734c
1,339
use crate::types::*; use crate::errors::*; /// Contains a list of chat lists #[derive(Debug, Clone, Default, Serialize, Deserialize)] pub struct ChatLists { #[doc(hidden)] #[serde(rename(serialize = "@type", deserialize = "@type"))] td_name: String, /// List of chat lists chat_lists: Vec<ChatList>, } impl RObject for ChatLists { #[doc(hidden)] fn td_name(&self) -> &'static str { "chatLists" } fn to_json(&self) -> RTDResult<String> { Ok(serde_json::to_string(self)?) } } impl ChatLists { pub fn from_json<S: AsRef<str>>(json: S) -> RTDResult<Self> { Ok(serde_json::from_str(json.as_ref())?) } pub fn builder() -> RTDChatListsBuilder { let mut inner = ChatLists::default(); inner.td_name = "chatLists".to_string(); RTDChatListsBuilder { inner } } pub fn chat_lists(&self) -> &Vec<ChatList> { &self.chat_lists } } #[doc(hidden)] pub struct RTDChatListsBuilder { inner: ChatLists } impl RTDChatListsBuilder { pub fn build(&self) -> ChatLists { self.inner.clone() } pub fn chat_lists(&mut self, chat_lists: Vec<ChatList>) -> &mut Self { self.inner.chat_lists = chat_lists; self } } impl AsRef<ChatLists> for ChatLists { fn as_ref(&self) -> &ChatLists { self } } impl AsRef<ChatLists> for RTDChatListsBuilder { fn as_ref(&self) -> &ChatLists { &self.inner } }
20.921875
106
0.660194
fc4c3a2eb2ef8417d8e608a5f337d0f515160fcf
371
// if1.rs pub fn bigger(a: i32, b: i32) -> i32 { if a > b { return a } b } // Don't mind this for now :) #[cfg(test)] mod tests { use super::*; #[test] fn ten_is_bigger_than_eight() { assert_eq!(10, bigger(10, 8)); } #[test] fn fortytwo_is_bigger_than_thirtytwo() { assert_eq!(42, bigger(32, 42)); } }
14.269231
44
0.512129
e62a91af40fa87ae5fd394337bc38ae147ea971a
6,342
use crate::{ cmd::*, result::{anyhow, Result}, traits::{TxnEnvelope, TxnSign, B64}, }; use helium_api::blocks; use rust_decimal::{prelude::*, Decimal}; use serde::Serialize; use serde_json::json; use std::str::FromStr; /// Report an oracle price to the blockchain #[derive(Debug, StructOpt)] pub enum Cmd { Report(Report), } #[derive(Debug, StructOpt)] /// Construct an oracle price report and optionally commit it to the /// Helium Blockchain. pub struct Report { /// The oracle price to report. Specify in USD or supply one of the /// supported price lookup services ("coingecko", "bilaxy", "binance"). #[structopt(long)] price: Price, /// Block height to report the price at. Use "auto" to pick the /// latest known block height from the API. #[structopt(long)] block: Block, /// Commit the oracle price report to the API #[structopt(long)] commit: bool, } impl Cmd { pub async fn run(&self, opts: Opts) -> Result { match self { Cmd::Report(cmd) => cmd.run(opts).await, } } } impl Report { pub async fn run(&self, opts: Opts) -> Result { let password = get_password(false)?; let wallet = load_wallet(opts.files)?; let keypair = wallet.decrypt(password.as_bytes())?; let client = Client::new_with_base_url(api_url(wallet.public_key.network)); let block_height = self.block.to_block(&client).await?; let price = u64::from(self.price.to_usd().await?); let mut txn = BlockchainTxnPriceOracleV1 { public_key: keypair.public_key().into(), price, block_height, signature: Vec::new(), }; txn.signature = txn.sign(&keypair)?; let envelope = txn.in_envelope(); let status = maybe_submit_txn(self.commit, &client, &envelope).await?; print_txn(&txn, &envelope, &status, opts.format) } } fn print_txn( txn: &BlockchainTxnPriceOracleV1, envelope: &BlockchainTxn, status: &Option<PendingTxnStatus>, format: OutputFormat, ) -> Result { let encoded = envelope.to_b64()?; match format { OutputFormat::Table => { ptable!( ["Key", "Value"], ["Block Height", txn.block_height], ["Price", Usd::from(txn.price)], ["Hash", status_str(status)] ); print_footer(status) } OutputFormat::Json => { let table = json!({ "price": txn.price, "block_height": txn.block_height, "txn": encoded, "hash": status_json(status) }); print_json(&table) } } } #[derive(Clone, Copy, Debug, Serialize)] enum Block { Auto, Height(u64), } impl FromStr for Block { type Err = Box<dyn std::error::Error>; fn from_str(s: &str) -> std::result::Result<Self, Self::Err> { match s { "auto" => Ok(Block::Auto), _ => Ok(Block::Height(s.parse()?)), } } } impl Block { async fn to_block(self, client: &Client) -> Result<u64> { match self { Block::Auto => Ok(blocks::height(client).await?), Block::Height(height) => Ok(height), } } } #[derive(Debug)] enum Price { CoinGecko, Bilaxy, BinanceUs, BinanceInt, Ftx, Usd(Usd), } impl Price { async fn to_usd(&self) -> Result<Usd> { match self { Self::CoinGecko => { let response = reqwest::get("https://api.coingecko.com/api/v3/coins/helium").await?; let json: serde_json::Value = response.json().await?; let amount = &json["market_data"]["current_price"]["usd"].to_string(); Ok(Usd::from_str(amount)?) } Self::Bilaxy => { let response = reqwest::get("https://newapi.bilaxy.com/v1/valuation?currency=HNT").await?; let json: serde_json::Value = response.json().await?; let amount = &json["HNT"]["usd_value"] .as_str() .ok_or_else(|| anyhow!("No USD value found"))?; Ok(Usd::from_str(amount)?) } Self::BinanceUs => { let response = reqwest::get("https://api.binance.us/api/v3/ticker/price?symbol=HNTUSD") .await?; let json: serde_json::Value = response.json().await?; let amount = &json["price"] .as_str() .ok_or_else(|| anyhow!("No USD value found"))?; Ok(Usd::from_str(amount)?) } Self::BinanceInt => { let response = reqwest::get("https://api.binance.us/api/v3/avgPrice?symbol=HNTUSDT").await?; let json: serde_json::Value = response.json().await?; let amount = &json["price"] .as_str() .ok_or_else(|| anyhow!("No USD value found"))?; Ok(Usd::from_str(amount)?) } Self::Ftx => { let response = reqwest::get("https://ftx.com/api/markets/HNT/USD").await?; let json: serde_json::Value = response.json().await?; let amount = &json["result"]["price"].to_string(); Ok(Usd::from_str(amount)?) } Self::Usd(v) => Ok(*v), } } } impl FromStr for Price { type Err = anyhow::Error; fn from_str(s: &str) -> Result<Self> { match s { "coingecko" => Ok(Self::CoinGecko), "bilaxy" => Ok(Self::Bilaxy), // don't break old interface so maintain "binance" to Binance US "binance" => Ok(Self::BinanceUs), "binance-us" => Ok(Self::BinanceUs), "binance-int" => Ok(Self::BinanceInt), "ftx" => Ok(Self::Ftx), _ => { let data = Decimal::from_str(s).or_else(|_| Decimal::from_scientific(s))?; Ok(Self::Usd(Usd::new(data.round_dp_with_strategy( 8, RoundingStrategy::MidpointAwayFromZero, )))) } } } }
30.936585
97
0.519237
038471afd8dc70e65ba54922a437a68f0e9ccab2
5,744
use cargo_metadata::{DependencyKind, Metadata, Node, Package, PackageId, Version}; use std::collections::{HashMap, HashSet, VecDeque}; use std::fmt; /// A pattern for matching package names in a [Rule]. pub enum PackageSpec { /// Match all packages. Wildcard, /// Match packages with the specified name. Name(String), } impl fmt::Display for PackageSpec { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { Self::Wildcard => write!(f, "*"), Self::Name(name) => write!(f, "{}", name), } } } impl PackageSpec { fn applies_to(&self, pkg: &Package) -> bool { match self { Self::Wildcard => true, Self::Name(name) => &pkg.name == name, } } } /// A pattern for matching dependency kinds in a [Rule]. pub enum DepKind { /// Match all dependency kinds. Wildcard, /// Match dependency kinds from the list. OneOf(Vec<DependencyKind>), } impl DepKind { /// Returns a dependency kind pattern that applies only to dependencies from /// '[dependencies]' section in Cargo.toml. pub fn normal() -> DepKind { Self::OneOf(vec![DependencyKind::Normal]) } } /// A rule for detecting undesirable dependencies. pub struct Rule { /// The pattern for the package to which the policy applies. /// If equals to "Wildcard", the rule applies to all workspace members. pub package: PackageSpec, /// The pattern for the package that should not appear in the dependency /// closure of the "package" above. pub should_not_depend_on: PackageSpec, /// Explanation for the policy. /// Why is it important to apply this rule? pub justification: String, /// Kind of dependencies this rule applies to. pub dependency_kind: DepKind, /// The file where the rule is defined. /// Use `std::file!()` macro at the rule construction site. pub defined_in: &'static str, } impl Rule { fn applies_to_kind(&self, k: DependencyKind) -> bool { match &self.dependency_kind { DepKind::Wildcard => true, DepKind::OneOf(kinds) => kinds.contains(&k), } } } impl fmt::Display for Rule { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, "'{}' must not depend on '{}': {}", self.package, self.should_not_depend_on, self.justification ) } } pub struct Violation { /// Index of the rule that was violated. pub rule_idx: usize, /// The shortest path in the dependency graph that demonstrates the /// violation of the rule. pub dependency_path: Vec<(String, Version)>, } /// Tries to find the shortest path in the dependency graph that demonstrates a /// violation of the given `rule`. fn search_for_violation( metadata: &Metadata, node_by_id: &HashMap<&PackageId, &Node>, node: &Node, rule: &Rule, ) -> Option<Vec<(String, Version)>> { let mut paths = VecDeque::new(); paths.push_back(vec![&node]); let mut visited = HashSet::new(); visited.insert(&node.id); while let Some(path) = paths.pop_front() { let head = path.last().unwrap(); for dep in head.deps.iter() { if !dep.dep_kinds.iter().any(|e| rule.applies_to_kind(e.kind)) { continue; } if !visited.insert(&dep.pkg) { continue; } let extended_path = { let mut path = path.clone(); path.push(node_by_id.get(&dep.pkg).unwrap()); path }; if rule.should_not_depend_on.applies_to(&metadata[&dep.pkg]) { let dependency_path = extended_path .iter() .map(|n| { let p = &metadata[&n.id]; (p.name.clone(), p.version.clone()) }) .collect(); return Some(dependency_path); } paths.push_back(extended_path) } } None } /// Runs the given rule set on workspace metadata and returns the list of rule /// violations. pub fn run(metadata: &Metadata, rules: &[Rule]) -> Vec<Violation> { let mut violations = Vec::new(); let resolve = metadata.resolve.as_ref().expect("missing Metadata.resolve"); let node_by_id: HashMap<_, _> = resolve.nodes.iter().map(|n| (&n.id, n)).collect(); for (rule_idx, rule) in rules.iter().enumerate() { match &rule.package { PackageSpec::Wildcard => { for pkg in metadata.workspace_members.iter() { let node = node_by_id.get(pkg).unwrap(); if let Some(dependency_path) = search_for_violation(metadata, &node_by_id, node, rule) { violations.push(Violation { rule_idx, dependency_path, }); } } } PackageSpec::Name(n) => { for node in resolve.nodes.iter() { let package = &metadata[&node.id]; if n != &package.name { continue; } if let Some(dependency_path) = search_for_violation(metadata, &node_by_id, node, rule) { violations.push(Violation { rule_idx, dependency_path, }); } } } } } violations }
31.217391
87
0.535515
db30a23b89da17bcd9751a053707b519b4ac01ab
3,101
use std::{ collections::HashMap, fmt::{self, Debug, Formatter}, }; use crate::{consts, util}; // Headers names which may contain more than one value. const MULTI_VALUE_HEADER_NAMES: &[&str] = &[ consts::H_ACCEPT, consts::H_ACCEPT_CHARSET, consts::H_ACCEPT_ENCODING, consts::H_ACCEPT_LANGUAGE, consts::H_CACHE_CONTROL, consts::H_TE, consts::H_TRANSFER_ENCODING, consts::H_UPGRADE, consts::H_VIA, ]; type HeaderMap = HashMap<String, Vec<String>>; // The headers in an HTTP message. Contains methods which simplify the manipulation of these values. pub struct Headers { headers: HeaderMap, } impl Headers { pub fn from(headers: HeaderMap) -> Self { Headers { headers } } pub fn get(&self, name: &str) -> Option<&Vec<String>> { self.headers.get(&Self::normalize_header_name(name)) } pub fn get_host(&self) -> Option<&String> { self.get(consts::H_HOST).map(|h| h.into_iter().next().unwrap()) } pub fn get_all(&self) -> &HeaderMap { &self.headers } pub fn contains(&self, name: &str) -> bool { matches!(self.get(name), Some(_)) } // Assigns a value to a header, checking to see if the characters in the name and value are valid. pub fn set_one(&mut self, name: &str, value: &str) -> bool { if !is_token_string(name) || !Self::is_valid_header_value(&value) { false } else { self.headers.insert(Self::normalize_header_name(name), vec![value.to_string()]); true } } // Assigns multiple values to a header name. pub fn set(&mut self, name: &str, values: Vec<&str>) -> bool { if !is_token_string(name) || !values.iter().all(Self::is_valid_header_value) { false } else { let values = values.iter().map(|s| s.to_string()).collect(); self.headers.insert(Self::normalize_header_name(name), values); true } } pub fn remove(&mut self, name: &str) { self.headers.remove(name); } pub fn is_multi_value(name: &str) -> bool { MULTI_VALUE_HEADER_NAMES.contains(&&*Self::normalize_header_name(name)) } // Header names are not case-sensitive, so making them lowercase is valid and helps for comparisons. fn normalize_header_name(name: &str) -> String { name.to_ascii_lowercase() } // The standard defines the set of characters a header value may contain. fn is_valid_header_value(str: &&str) -> bool { str.chars().all(|c| util::is_visible_char(c) || consts::OPTIONAL_WHITESPACE.contains(&c)) } } impl Debug for Headers { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { let headers_joined = self .headers .iter() .map(|h| format!("{}: {}", h.0, h.1.join(", "))) .collect::<Vec<_>>() .join("\n"); write!(f, "{}", headers_joined) } } const TOKEN_CHARS: &str = "!#$%&'*+-.^_`|~"; fn is_token_char(ch: char) -> bool { TOKEN_CHARS.contains(ch) || ch.is_ascii_alphanumeric() } fn is_token_string(str: &str) -> bool { str.chars().all(is_token_char) }
33.344086
114
0.620767
e906af47cf50127d2ffa5755f093d729e1d8e008
419
use std::collections::HashMap; fn main() { let mut original = HashMap::new(); original.insert("name", "Rocket Skates"); original.insert("price", "12.75"); original.insert("color", "yellow"); let mut update = HashMap::new(); update.insert("price", "15.25"); update.insert("color", "red"); update.insert("year", "1974"); original.extend(&update); println!("{:#?}", original) }
23.277778
45
0.596659
4af11ad234e45436429802a0d53ce3765a06712c
3,813
#[global_allocator] static ALLOCATOR: jemallocator::Jemalloc = jemallocator::Jemalloc; use std::sync::mpsc::channel; use std::time::{Duration, Instant}; use timely::Configuration; use differential_dataflow::operators::{Consolidate, Count}; use declarative_dataflow::plan::{Aggregate, AggregationFn, Join, Union}; use declarative_dataflow::server::{Register, Server}; use declarative_dataflow::sources::{CsvFile, Source}; use declarative_dataflow::{Plan, Rule, Value}; use Value::Eid; fn main() { timely::execute(Configuration::Thread, move |worker| { let mut server = Server::<Duration, u64>::new(Default::default()); let (send_results, results) = channel(); let (x, y, z) = (0, 1, 2); let rules = vec![ Rule { name: "label".to_string(), plan: Plan::Union(Union { variables: vec![x, y], plans: vec![ Plan::MatchA(x, ":node".to_string(), y), Plan::Join(Join { variables: vec![z], left_plan: Box::new(Plan::MatchA(z, ":edge".to_string(), y)), right_plan: Box::new(Plan::NameExpr(vec![x, z], "label".to_string())), }), ], }), }, Rule { name: "labelprop".to_string(), plan: Plan::NameExpr(vec![x, y], "label".to_string()), // plan: Plan::Aggregate(Aggregate { // variables: vec![x, y], // key_variables: vec![], // plan: Box::new(Plan::NameExpr(vec![x, y], "label".to_string())), // aggregation_fn: AggregationFn::COUNT // }) }, ]; let edge_source = Source::CsvFile(CsvFile { has_headers: false, delimiter: b' ', path: "/Users/niko/data/labelprop/edges.httpd_df".to_string(), eid_offset: 0, timestamp_offset: None, flexible: false, comment: None, schema: vec![(":edge".to_string(), (1, Eid(0)))], }); let node_source = Source::CsvFile(CsvFile { has_headers: false, delimiter: b' ', path: "/Users/niko/data/labelprop/nodes.httpd_df".to_string(), eid_offset: 0, timestamp_offset: None, flexible: false, comment: None, schema: vec![(":node".to_string(), (1, Eid(0)))], }); worker.dataflow::<Duration, _, _>(|scope| { server.register_source(edge_source, scope).unwrap(); server.register_source(node_source, scope).unwrap(); server .register(Register { rules, publish: vec!["labelprop".to_string()], }) .unwrap(); match server.interest("labelprop", scope) { Err(error) => panic!(error), Ok(relation) => { relation .map(|_x| ()) .consolidate() .count() .probe_with(&mut server.probe) .inspect(move |x| { send_results.send((x.0.clone(), x.2)).unwrap(); }); } } }); let timer = Instant::now(); worker.step_while(|| !server.probe.done()); assert_eq!(results.recv().unwrap(), (((), 9393283), 1)); // assert_eq!(dbg!(results.recv().unwrap()), (vec![Value::Number(9393283)], 1)); println!("Finished. {:?}", timer.elapsed()); }) .unwrap(); }
35.971698
98
0.469709
38414bcd6afa7fde611b9192f0aaa36fe3a7411c
547
table! { members (user_id, project_id) { user_id -> Uuid, project_id -> Uuid, permission -> Varchar, } } table! { projects (id) { id -> Uuid, name -> Varchar, archived -> Bool, } } table! { users (id) { id -> Uuid, name -> Varchar, email -> Varchar, password -> Varchar, } } joinable!(members -> projects (project_id)); joinable!(members -> users (user_id)); allow_tables_to_appear_in_same_query!( members, projects, users, );
16.088235
44
0.52468
d57e9c8d1a2df40a55b150ed926e47d8b24b74a5
1,996
use rustc_hash::FxHashMap; use roller_protocol::control::{ButtonCoordinate, ButtonGridLocation, FaderId}; use crate::control::{ button::{ButtonGroup, ButtonMapping, ButtonRef, MetaButtonMapping}, fader::FaderControlMapping, }; #[derive(Debug, Clone, PartialEq, Eq)] pub struct ControlMapping { pub faders: FxHashMap<FaderId, FaderControlMapping>, pub button_groups: Vec<ButtonGroup>, pub meta_buttons: FxHashMap<(ButtonGridLocation, ButtonCoordinate), MetaButtonMapping>, } impl ControlMapping { pub fn new( faders: Vec<FaderControlMapping>, button_groups: Vec<ButtonGroup>, meta_buttons: Vec<MetaButtonMapping>, ) -> ControlMapping { ControlMapping { faders: faders .into_iter() .map(|mapping| (mapping.id, mapping)) .collect(), button_groups, meta_buttons: meta_buttons .into_iter() .map(|mapping| ((mapping.location, mapping.coordinate), mapping)) .collect(), } } fn group_buttons(&self) -> impl Iterator<Item = (&'_ ButtonGroup, &'_ ButtonMapping)> { self.button_groups.iter().flat_map(|group| group.iter()) } pub fn find_button( &self, location: ButtonGridLocation, coordinate: ButtonCoordinate, ) -> Option<ButtonRef<'_>> { if location == ButtonGridLocation::Main { self.group_buttons() .find(|(_, button)| button.coordinate == coordinate) .map(|(group, button)| ButtonRef::Standard(group, button)) } else { self.meta_buttons .get(&(location, coordinate)) .map(|meta_button| ButtonRef::Meta(meta_button)) } } pub fn button_refs(&self) -> impl Iterator<Item = ButtonRef<'_>> { self.group_buttons() .map(ButtonRef::from) .chain(self.meta_buttons.values().map(ButtonRef::from)) } }
34.413793
91
0.601202
d75f124a22fc4b25f9826c6e8e76acd6007cad30
10,565
// Copyright 2019 The Chromium OS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use super::constants::*; use super::evdev::{grab_evdev, ungrab_evdev}; use super::virtio_input_event; use super::InputError; use super::Result; use data_model::DataInit; use std::collections::VecDeque; use std::io::Read; use std::io::Write; use std::mem::size_of; use std::os::unix::io::{AsRawFd, RawFd}; #[derive(Copy, Clone, Debug, Default)] #[repr(C)] pub struct input_event { timestamp_fields: [u64; 2], pub type_: u16, pub code: u16, pub value: u32, } // Safe because it only has data and has no implicit padding. unsafe impl DataInit for input_event {} impl input_event { const EVENT_SIZE: usize = size_of::<input_event>(); fn from_virtio_input_event(other: &virtio_input_event) -> input_event { input_event { timestamp_fields: [0, 0], type_: other.type_.into(), code: other.code.into(), value: other.value.into(), } } } /// Encapsulates a socket or device node into an abstract event source, providing a common /// interface. /// It supports read and write operations to provide and accept events just like an event device /// node would, except that it handles virtio_input_event instead of input_event structures. /// It's necessary to call receive_events() before events are available for read. pub trait EventSource: Read + Write + AsRawFd { /// Perform any necessary initialization before receiving and sending events from/to the source. fn init(&mut self) -> Result<()> { Ok(()) } /// Perform any necessary cleanup when the device will no longer be used. fn finalize(&mut self) -> Result<()> { Ok(()) } /// Receive events from the source, filters them and stores them in a queue for future /// consumption by reading from this object. Returns the number of new non filtered events /// received. This function may block waiting for events to be available. fn receive_events(&mut self) -> Result<usize>; /// Returns the number of received events that have not been filtered or consumed yet. fn available_events_count(&self) -> usize; } // Try to read 16 events at a time to match what the linux guest driver does. const READ_BUFFER_SIZE: usize = 16 * size_of::<input_event>(); // The read buffer needs to be aligned to the alignment of input_event, which is aligned as u64 #[repr(align(8))] pub struct ReadBuffer { buffer: [u8; READ_BUFFER_SIZE], } /// Encapsulates implementation details common to all kinds of event sources. pub struct EventSourceImpl<T> { source: T, queue: VecDeque<virtio_input_event>, read_buffer: ReadBuffer, // The read index accounts for incomplete events read previously. read_idx: usize, } // Reads input events from the source. // Events are originally read as input_event structs and converted to virtio_input_event internally. impl<T: Read> EventSourceImpl<T> { fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> { let mut bytes = 0usize; for evt_slice in buf.chunks_exact_mut(virtio_input_event::EVENT_SIZE) { match self.queue.pop_front() { None => { break; } Some(evt) => { evt_slice.copy_from_slice(evt.as_slice()); bytes += evt_slice.len(); } } } Ok(bytes) } } // Writes input events to the source. // Events come as virtio_input_event structs and are converted to input_event internally. impl<T: Write> EventSourceImpl<T> { fn write<F: Fn(&virtio_input_event) -> bool>( &mut self, buf: &[u8], event_filter: F, ) -> std::io::Result<usize> { for evt_slice in buf.chunks_exact(virtio_input_event::EVENT_SIZE) { // Don't use from_slice() here, the buffer is not guaranteed to be properly aligned. let mut vio_evt = virtio_input_event::new(0, 0, 0); vio_evt.as_mut_slice().copy_from_slice(evt_slice); if !event_filter(&vio_evt) { continue; } let evt = input_event::from_virtio_input_event(&vio_evt); self.source.write_all(evt.as_slice())?; } let len = buf.len() - buf.len() % virtio_input_event::EVENT_SIZE; Ok(len) } fn flush(&mut self) -> std::io::Result<()> { self.source.flush() } } impl<T: AsRawFd> EventSourceImpl<T> { fn as_raw_fd(&self) -> RawFd { self.source.as_raw_fd() } } impl<T> EventSourceImpl<T> where T: Read + Write + AsRawFd, { // Receive events from the source and store them in a queue, unless they should be filtered out. fn receive_events<F: Fn(&input_event) -> bool>(&mut self, event_filter: F) -> Result<usize> { let read = self .source .read(&mut self.read_buffer.buffer[self.read_idx..]) .map_err(|e| InputError::EventsReadError(e))?; let buff_size = read + self.read_idx; for evt_slice in self.read_buffer.buffer[..buff_size].chunks_exact(input_event::EVENT_SIZE) { let input_evt = match input_event::from_slice(evt_slice) { Some(x) => x, None => { // This shouldn't happen because all slices (even the last one) are guaranteed // to have the correct size and be properly aligned. error!( "Failed converting a slice of sice {} to input_event", evt_slice.len() ); // Skipping the event here effectively means no events will be received, because // if from_slice fails once it will fail always. continue; } }; if !event_filter(&input_evt) { continue; } let vio_evt = virtio_input_event::from_input_event(input_evt); self.queue.push_back(vio_evt); } let remainder = buff_size % input_event::EVENT_SIZE; // If there is an incomplete event at the end of the buffer, it needs to be moved to the // beginning and the next read operation must write right after it. if remainder != 0 { warn!("read incomplete event from source"); // The copy should only happen if there is at least one complete event in the buffer, // otherwise source and destination would be the same. if buff_size != remainder { let (des, src) = self.read_buffer.buffer.split_at_mut(buff_size - remainder); des[..remainder].copy_from_slice(src); } } self.read_idx = remainder; let received_events = buff_size / input_event::EVENT_SIZE; Ok(received_events) } fn available_events(&self) -> usize { self.queue.len() } fn new(source: T) -> EventSourceImpl<T> { EventSourceImpl { source, queue: VecDeque::new(), read_buffer: ReadBuffer { buffer: [0u8; READ_BUFFER_SIZE], }, read_idx: 0, } } } /// Encapsulates a (unix) socket as an event source. pub struct SocketEventSource<T> { evt_source_impl: EventSourceImpl<T>, } impl<T> SocketEventSource<T> where T: Read + Write + AsRawFd, { pub fn new(source: T) -> SocketEventSource<T> { SocketEventSource { evt_source_impl: EventSourceImpl::new(source), } } } impl<T: Read> Read for SocketEventSource<T> { fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> { self.evt_source_impl.read(buf) } } impl<T> Write for SocketEventSource<T> where T: Read + Write + AsRawFd, { fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> { self.evt_source_impl.write(buf, |_evt| true) } fn flush(&mut self) -> std::io::Result<()> { self.evt_source_impl.flush() } } impl<T: AsRawFd> AsRawFd for SocketEventSource<T> { fn as_raw_fd(&self) -> RawFd { self.evt_source_impl.as_raw_fd() } } impl<T> EventSource for SocketEventSource<T> where T: Read + Write + AsRawFd, { fn init(&mut self) -> Result<()> { Ok(()) } fn finalize(&mut self) -> Result<()> { ungrab_evdev(self) } fn receive_events(&mut self) -> Result<usize> { self.evt_source_impl.receive_events(|_evt| true) } fn available_events_count(&self) -> usize { self.evt_source_impl.available_events() } } /// Encapsulates an event device node as an event source pub struct EvdevEventSource<T> { evt_source_impl: EventSourceImpl<T>, } impl<T> EvdevEventSource<T> where T: Read + Write + AsRawFd, { pub fn new(source: T) -> EvdevEventSource<T> { EvdevEventSource { evt_source_impl: EventSourceImpl::new(source), } } } impl<T: Read> Read for EvdevEventSource<T> { fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> { self.evt_source_impl.read(buf) } } impl<T> Write for EvdevEventSource<T> where T: Read + Write + AsRawFd, { fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> { self.evt_source_impl.write(buf, |evt| { // Miscellaneous events produced by the device are sent back to it by the kernel input // subsystem, but because these events are handled by the host kernel as well as the // guest the device would get them twice. Which would prompt the device to send the // event to the guest again entering an infinite loop. evt.type_ != EV_MSC }) } fn flush(&mut self) -> std::io::Result<()> { self.evt_source_impl.flush() } } impl<T: AsRawFd> AsRawFd for EvdevEventSource<T> { fn as_raw_fd(&self) -> RawFd { self.evt_source_impl.as_raw_fd() } } impl<T> EventSource for EvdevEventSource<T> where T: Read + Write + AsRawFd, { fn init(&mut self) -> Result<()> { grab_evdev(self) } fn finalize(&mut self) -> Result<()> { ungrab_evdev(self) } fn receive_events(&mut self) -> Result<usize> { self.evt_source_impl.receive_events(|_evt| true) } fn available_events_count(&self) -> usize { self.evt_source_impl.available_events() } }
31.537313
100
0.613441
389c3e699a867dcfa774078aec7143ed29a1aa40
15,106
//! Board support crate for the Nordic nRF52840-DK //! //! UARTE, SPIM and TWI should be functional, //! but might miss some features. #![no_std] pub use cortex_m; pub use embedded_hal; pub use nrf52840_hal as hal; /// Exports traits that are usually needed when using this crate pub mod prelude { pub use nrf52840_hal::prelude::*; } // TODO: Maybe we want a debug module like in the DWM1001-Dev implementation. // pub mod debug; use nrf52840_hal::{ gpio::{p0, p1, Disconnected, Input, Level, Output, Pin, PullUp, PushPull}, pac::{self as nrf52, CorePeripherals, Peripherals}, spim::{self, Frequency, Spim, MODE_0}, uarte::{self, Baudrate as UartBaudrate, Parity as UartParity, Uarte}, }; use embedded_hal::digital::v2::{InputPin, OutputPin}; /// Provides access to all features of the nRF52840-DK board #[allow(non_snake_case)] pub struct Board { /// The nRF52's pins which are not otherwise occupied on the nRF52840-DK pub pins: Pins, /// The nRF52840-DK UART which is wired to the virtual USB CDC port pub cdc: Uarte<nrf52::UARTE0>, /// The nRF52840-DK SPI which is wired to the SPI flash pub flash: Spim<nrf52::SPIM2>, pub flash_cs: Pin<Output<PushPull>>, /// The LEDs on the nRF52840-DK board pub leds: Leds, /// The buttons on the nRF52840-DK board pub buttons: Buttons, pub nfc: NFC, /// Core peripheral: Cache and branch predictor maintenance operations pub CBP: nrf52::CBP, /// Core peripheral: CPUID pub CPUID: nrf52::CPUID, /// Core peripheral: Debug Control Block pub DCB: nrf52::DCB, /// Core peripheral: Data Watchpoint and Trace unit pub DWT: nrf52::DWT, /// Core peripheral: Flash Patch and Breakpoint unit pub FPB: nrf52::FPB, /// Core peripheral: Floating Point Unit pub FPU: nrf52::FPU, /// Core peripheral: Instrumentation Trace Macrocell pub ITM: nrf52::ITM, /// Core peripheral: Memory Protection Unit pub MPU: nrf52::MPU, /// Core peripheral: Nested Vector Interrupt Controller pub NVIC: nrf52::NVIC, /// Core peripheral: System Control Block pub SCB: nrf52::SCB, /// Core peripheral: SysTick Timer pub SYST: nrf52::SYST, /// Core peripheral: Trace Port Interface Unit pub TPIU: nrf52::TPIU, /// nRF52 peripheral: FICR pub FICR: nrf52::FICR, /// nRF52 peripheral: UICR pub UICR: nrf52::UICR, /// nRF52 peripheral: ACL pub ACL: nrf52::ACL, /// nRF52 peripheral: POWER pub POWER: nrf52::POWER, /// nRF52 peripheral: CLOCK pub CLOCK: nrf52::CLOCK, /// nRF52 peripheral: RADIO pub RADIO: nrf52::RADIO, /// nRF52 peripheral: UART0 pub UART0: nrf52::UART0, /// nRF52 peripheral: SPIM0 pub SPIM0: nrf52::SPIM0, /// nRF52 peripheral: SPIS0 pub SPIS0: nrf52::SPIS0, /// nRF52 peripheral: TWIM0 pub TWIM0: nrf52::TWIM0, /// nRF52 peripheral: TWIS0 pub TWIS0: nrf52::TWIS0, /// nRF52 peripheral: SPI0 pub SPI0: nrf52::SPI0, /// nRF52 peripheral: TWI0 pub TWI0: nrf52::TWI0, /// nRF52 peripheral: SPIM1 pub SPIM1: nrf52::SPIM1, /// nRF52 peripheral: SPIS1 pub SPIS1: nrf52::SPIS1, /// nRF52 peripheral: TWIM1 pub TWIM1: nrf52::TWIM1, /// nRF52 peripheral: TWIS1 pub TWIS1: nrf52::TWIS1, /// nRF52 peripheral: SPI1 pub SPI1: nrf52::SPI1, /// nRF52 peripheral: TWI1 pub TWI1: nrf52::TWI1, /// nRF52 peripheral: NFCT pub NFCT: nrf52::NFCT, /// nRF52 peripheral: GPIOTE pub GPIOTE: nrf52::GPIOTE, /// nRF52 peripheral: SAADC pub SAADC: nrf52::SAADC, /// nRF52 peripheral: TIMER0 pub TIMER0: nrf52::TIMER0, /// nRF52 peripheral: TIMER1 pub TIMER1: nrf52::TIMER1, /// nRF52 peripheral: TIMER2 pub TIMER2: nrf52::TIMER2, /// nRF52 peripheral: RTC0 pub RTC0: nrf52::RTC0, /// nRF52 peripheral: TEMP pub TEMP: nrf52::TEMP, /// nRF52 peripheral: RNG pub RNG: nrf52::RNG, /// nRF52 peripheral: ECB pub ECB: nrf52::ECB, /// nRF52 peripheral: CCM pub CCM: nrf52::CCM, /// nRF52 peripheral: AAR pub AAR: nrf52::AAR, /// nRF52 peripheral: WDT pub WDT: nrf52::WDT, /// nRF52 peripheral: RTC1 pub RTC1: nrf52::RTC1, /// nRF52 peripheral: QDEC pub QDEC: nrf52::QDEC, /// nRF52 peripheral: COMP pub COMP: nrf52::COMP, /// nRF52 peripheral: LPCOMP pub LPCOMP: nrf52::LPCOMP, /// nRF52 peripheral: SWI0 pub SWI0: nrf52::SWI0, /// nRF52 peripheral: EGU0 pub EGU0: nrf52::EGU0, /// nRF52 peripheral: SWI1 pub SWI1: nrf52::SWI1, /// nRF52 peripheral: EGU1 pub EGU1: nrf52::EGU1, /// nRF52 peripheral: SWI2 pub SWI2: nrf52::SWI2, /// nRF52 peripheral: EGU2 pub EGU2: nrf52::EGU2, /// nRF52 peripheral: SWI3 pub SWI3: nrf52::SWI3, /// nRF52 peripheral: EGU3 pub EGU3: nrf52::EGU3, /// nRF52 peripheral: SWI4 pub SWI4: nrf52::SWI4, /// nRF52 peripheral: EGU4 pub EGU4: nrf52::EGU4, /// nRF52 peripheral: SWI5 pub SWI5: nrf52::SWI5, /// nRF52 peripheral: EGU5 pub EGU5: nrf52::EGU5, /// nRF52 peripheral: TIMER3 pub TIMER3: nrf52::TIMER3, /// nRF52 peripheral: TIMER4 pub TIMER4: nrf52::TIMER4, /// nRF52 peripheral: PWM0 pub PWM0: nrf52::PWM0, /// nRF52 peripheral: PDM pub PDM: nrf52::PDM, /// nRF52 peripheral: NVMC pub NVMC: nrf52::NVMC, /// nRF52 peripheral: PPI pub PPI: nrf52::PPI, /// nRF52 peripheral: MWU pub MWU: nrf52::MWU, /// nRF52 peripheral: PWM1 pub PWM1: nrf52::PWM1, /// nRF52 peripheral: PWM2 pub PWM2: nrf52::PWM2, /// nRF52 peripheral: RTC2 pub RTC2: nrf52::RTC2, /// nRF52 peripheral: I2S pub I2S: nrf52::I2S, } impl Board { /// Take the peripherals safely /// /// This method will return an instance of `nRF52840DK` the first time it is /// called. It will return only `None` on subsequent calls. pub fn take() -> Option<Self> { Some(Self::new(CorePeripherals::take()?, Peripherals::take()?)) } /// Steal the peripherals /// /// This method produces an instance of `nRF52840DK`, regardless of whether /// another instance was create previously. /// /// # Safety /// /// This method can be used to create multiple instances of `nRF52840DK`. Those /// instances can interfere with each other, causing all kinds of unexpected /// behavior and circumventing safety guarantees in many ways. /// /// Always use `nRF52840DK::take`, unless you really know what you're doing. pub unsafe fn steal() -> Self { Self::new(CorePeripherals::steal(), Peripherals::steal()) } fn new(cp: CorePeripherals, p: Peripherals) -> Self { let pins0 = p0::Parts::new(p.P0); let pins1 = p1::Parts::new(p.P1); // The nRF52840-DK has an 64MB SPI flash on board which can be interfaced through SPI or Quad SPI. // As for now, only the normal SPI mode is available, so we are using this for the interface. let flash_spim = Spim::new( p.SPIM2, spim::Pins { sck: pins0.p0_19.into_push_pull_output(Level::Low).degrade(), mosi: Some(pins0.p0_20.into_push_pull_output(Level::Low).degrade()), miso: Some(pins0.p0_21.into_floating_input().degrade()), }, Frequency::K500, MODE_0, 0, ); let flash_cs = pins0.p0_17.into_push_pull_output(Level::High).degrade(); // The nRF52840-DK features an USB CDC port. // It features HWFC but does not have to use it. // It can transmit a flexible baudrate of up to 1Mbps. let cdc_uart = Uarte::new( p.UARTE0, uarte::Pins { txd: pins0.p0_06.into_push_pull_output(Level::High).degrade(), rxd: pins0.p0_08.into_floating_input().degrade(), cts: Some(pins0.p0_07.into_floating_input().degrade()), rts: Some(pins0.p0_05.into_push_pull_output(Level::High).degrade()), }, UartParity::EXCLUDED, UartBaudrate::BAUD115200, ); Board { cdc: cdc_uart, flash: flash_spim, flash_cs, pins: Pins { P0_03: pins0.p0_03, P0_04: pins0.p0_04, _RESET: pins0.p0_18, P0_22: pins0.p0_22, P0_23: pins0.p0_23, P0_26: pins0.p0_26, P0_27: pins0.p0_27, P0_28: pins0.p0_28, P0_29: pins0.p0_29, P0_30: pins0.p0_30, P0_31: pins0.p0_31, P1_00: pins1.p1_00, P1_01: pins1.p1_01, P1_02: pins1.p1_02, P1_03: pins1.p1_03, P1_04: pins1.p1_04, P1_05: pins1.p1_05, P1_06: pins1.p1_06, P1_07: pins1.p1_07, P1_08: pins1.p1_08, P1_09: pins1.p1_09, P1_10: pins1.p1_10, P1_11: pins1.p1_11, P1_12: pins1.p1_12, P1_13: pins1.p1_13, P1_14: pins1.p1_14, P1_15: pins1.p1_15, }, leds: Leds { led_1: Led::new(pins0.p0_13.degrade()), led_2: Led::new(pins0.p0_14.degrade()), led_3: Led::new(pins0.p0_15.degrade()), led_4: Led::new(pins0.p0_16.degrade()), }, buttons: Buttons { button_1: Button::new(pins0.p0_11.degrade()), button_2: Button::new(pins0.p0_12.degrade()), button_3: Button::new(pins0.p0_24.degrade()), button_4: Button::new(pins0.p0_25.degrade()), }, nfc: NFC { nfc_1: pins0.p0_09, nfc_2: pins0.p0_10, }, // Core peripherals CBP: cp.CBP, CPUID: cp.CPUID, DCB: cp.DCB, DWT: cp.DWT, FPB: cp.FPB, FPU: cp.FPU, ITM: cp.ITM, MPU: cp.MPU, NVIC: cp.NVIC, SCB: cp.SCB, SYST: cp.SYST, TPIU: cp.TPIU, // nRF52 peripherals FICR: p.FICR, UICR: p.UICR, ACL: p.ACL, POWER: p.POWER, CLOCK: p.CLOCK, RADIO: p.RADIO, UART0: p.UART0, SPIM0: p.SPIM0, SPIS0: p.SPIS0, TWIM0: p.TWIM0, TWIS0: p.TWIS0, SPI0: p.SPI0, TWI0: p.TWI0, SPIM1: p.SPIM1, SPIS1: p.SPIS1, TWIM1: p.TWIM1, TWIS1: p.TWIS1, SPI1: p.SPI1, TWI1: p.TWI1, NFCT: p.NFCT, GPIOTE: p.GPIOTE, SAADC: p.SAADC, TIMER0: p.TIMER0, TIMER1: p.TIMER1, TIMER2: p.TIMER2, RTC0: p.RTC0, TEMP: p.TEMP, RNG: p.RNG, ECB: p.ECB, CCM: p.CCM, AAR: p.AAR, WDT: p.WDT, RTC1: p.RTC1, QDEC: p.QDEC, COMP: p.COMP, LPCOMP: p.LPCOMP, SWI0: p.SWI0, EGU0: p.EGU0, SWI1: p.SWI1, EGU1: p.EGU1, SWI2: p.SWI2, EGU2: p.EGU2, SWI3: p.SWI3, EGU3: p.EGU3, SWI4: p.SWI4, EGU4: p.EGU4, SWI5: p.SWI5, EGU5: p.EGU5, TIMER3: p.TIMER3, TIMER4: p.TIMER4, PWM0: p.PWM0, PDM: p.PDM, NVMC: p.NVMC, PPI: p.PPI, MWU: p.MWU, PWM1: p.PWM1, PWM2: p.PWM2, RTC2: p.RTC2, I2S: p.I2S, } } } /// The nRF52 pins that are available on the nRF52840DK #[allow(non_snake_case)] pub struct Pins { pub P0_03: p0::P0_03<Disconnected>, pub P0_04: p0::P0_04<Disconnected>, _RESET: p0::P0_18<Disconnected>, pub P0_22: p0::P0_22<Disconnected>, pub P0_23: p0::P0_23<Disconnected>, pub P0_26: p0::P0_26<Disconnected>, pub P0_27: p0::P0_27<Disconnected>, pub P0_28: p0::P0_28<Disconnected>, pub P0_29: p0::P0_29<Disconnected>, pub P0_30: p0::P0_30<Disconnected>, pub P0_31: p0::P0_31<Disconnected>, pub P1_00: p1::P1_00<Disconnected>, pub P1_01: p1::P1_01<Disconnected>, pub P1_02: p1::P1_02<Disconnected>, pub P1_03: p1::P1_03<Disconnected>, pub P1_04: p1::P1_04<Disconnected>, pub P1_05: p1::P1_05<Disconnected>, pub P1_06: p1::P1_06<Disconnected>, pub P1_07: p1::P1_07<Disconnected>, pub P1_08: p1::P1_08<Disconnected>, pub P1_09: p1::P1_09<Disconnected>, pub P1_10: p1::P1_10<Disconnected>, pub P1_11: p1::P1_11<Disconnected>, pub P1_12: p1::P1_12<Disconnected>, pub P1_13: p1::P1_13<Disconnected>, pub P1_14: p1::P1_14<Disconnected>, pub P1_15: p1::P1_15<Disconnected>, } /// The LEDs on the nRF52840-DK board pub struct Leds { /// nRF52840-DK: LED1, nRF52: P0.30 pub led_1: Led, /// nRF52840-DK: LED2, nRF52: P0.31 pub led_2: Led, /// nRF52840-DK: LED3, nRF52: P0.22 pub led_3: Led, /// nRF52840-DK: LED4, nRF52: P0.14 pub led_4: Led, } /// An LED on the nRF52840-DK board pub struct Led(Pin<Output<PushPull>>); impl Led { fn new<Mode>(pin: Pin<Mode>) -> Self { Led(pin.into_push_pull_output(Level::High)) } /// Release the inner Pin to be used directly pub fn release(self) -> Pin<Output<PushPull>> { self.0 } /// Enable the LED pub fn enable(&mut self) { self.0.set_low().unwrap() } /// Disable the LED pub fn disable(&mut self) { self.0.set_high().unwrap() } } /// The Buttons on the nRF52840-DK board pub struct Buttons { /// nRF52840-DK: Button 1, nRF52: P0.30 pub button_1: Button, /// nRF52840-DK: Button 2, nRF52: P0.31 pub button_2: Button, /// nRF52840-DK: Button 3, nRF52: P0.22 pub button_3: Button, /// nRF52840-DK: Button 4, nRF52: P0.14 pub button_4: Button, } /// A Button on the nRF52840-DK board pub struct Button(Pin<Input<PullUp>>); impl Button { fn new<Mode>(pin: Pin<Mode>) -> Self { Button(pin.into_pullup_input()) } /// Release the inner Pin to be used directly pub fn release(self) -> Pin<Input<PullUp>> { self.0 } /// Button is pressed pub fn is_pressed(&self) -> bool { self.0.is_low().unwrap() } /// Button is released pub fn is_released(&self) -> bool { self.0.is_high().unwrap() } } /// The NFC pins on the nRF52840-DK board pub struct NFC { /// nRF52840-DK: NFC1, nRF52: P0.09 pub nfc_1: p0::P0_09<Disconnected>, /// nRF52840-DK: NFC2, nRF52: P0.10 pub nfc_2: p0::P0_10<Disconnected>, }
26.317073
106
0.564742
dd52898652e21bc998dce1d09fecd49e49662b68
3,510
// Copyright 2019 The Druid Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! This example shows how to construct a basic layout. use druid::kurbo::Circle; use druid::widget::{Flex, Label, Painter}; use druid::{ AppLauncher, Color, LinearGradient, LocalizedString, PlatformError, RenderContext, UnitPoint, Widget, WidgetExt, WindowDesc, }; const DARK_GREY: Color = Color::grey8(0x3a); const DARKER_GREY: Color = Color::grey8(0x11); const LIGHTER_GREY: Color = Color::grey8(0xbb); fn build_app() -> impl Widget<()> { let gradient = LinearGradient::new( UnitPoint::TOP_LEFT, UnitPoint::BOTTOM_RIGHT, (DARKER_GREY, LIGHTER_GREY), ); // a custom background let polka_dots = Painter::new(|ctx, _, _| { let bounds = ctx.size().to_rect(); let dot_diam = bounds.width().max(bounds.height()) / 20.; let dot_spacing = dot_diam * 1.8; for y in 0..((bounds.height() / dot_diam).ceil() as usize) { for x in 0..((bounds.width() / dot_diam).ceil() as usize) { let x_offset = (y % 2) as f64 * (dot_spacing / 2.0); let x = x as f64 * dot_spacing + x_offset; let y = y as f64 * dot_spacing; let circ = Circle::new((x, y), dot_diam / 2.0); let purp = Color::rgb(1.0, 0.22, 0.76); ctx.fill(circ, &purp); } } }); Flex::column() .with_flex_child( Flex::row() .with_flex_child( Label::new("top left") .center() .border(DARK_GREY, 4.0) .padding(10.0), 1.0, ) .with_flex_child( Label::new("top right") .center() .background(DARK_GREY) .padding(10.0), 1.0, ), 1.0, ) .with_flex_child( Flex::row() .with_flex_child( Label::new("bottom left") .center() .background(gradient) .rounded(10.0) .padding(10.0), 1.0, ) .with_flex_child( Label::new("bottom right") .center() .border(LIGHTER_GREY, 4.0) .background(polka_dots) .rounded(10.0) .padding(10.0), 1.0, ), 1.0, ) } pub fn main() -> Result<(), PlatformError> { let main_window = WindowDesc::new(build_app()) .title(LocalizedString::new("panels-demo-window-title").with_placeholder("Fancy Boxes!")); AppLauncher::with_window(main_window) .log_to_console() .launch(())?; Ok(()) }
34.07767
98
0.505983
d956f26d52c9a0636fa855f56587d60351652ccc
26,399
use std::{ cell::Cell, cmp, cmp::min, io::{self, Write}, sync::{Mutex, RwLock}, }; use crossbeam::sync::MsQueue; use scoped_threadpool::Pool; use glam::Vec4; use crate::{ accel::ACCEL_NODE_RAY_TESTS, color::{map_0_1_to_wavelength, SpectralSample, XYZ}, fp_utils::robust_ray_origin, hash::hash_u32, hilbert, image::Image, math::{fast_logit, upper_power_of_two}, mis::power_heuristic, ray::{Ray, RayBatch}, scene::{Scene, SceneLightSample}, surface, timer::Timer, tracer::Tracer, transform_stack::TransformStack, }; #[derive(Debug)] pub struct Renderer<'a> { pub output_file: String, pub resolution: (usize, usize), pub spp: usize, pub seed: u32, pub scene: Scene<'a>, } #[derive(Debug, Copy, Clone)] pub struct RenderStats { pub trace_time: f64, pub accel_node_visits: u64, pub ray_count: u64, pub initial_ray_generation_time: f64, pub ray_generation_time: f64, pub sample_writing_time: f64, pub total_time: f64, } impl RenderStats { fn new() -> RenderStats { RenderStats { trace_time: 0.0, accel_node_visits: 0, ray_count: 0, initial_ray_generation_time: 0.0, ray_generation_time: 0.0, sample_writing_time: 0.0, total_time: 0.0, } } fn collect(&mut self, other: RenderStats) { self.trace_time += other.trace_time; self.accel_node_visits += other.accel_node_visits; self.ray_count += other.ray_count; self.initial_ray_generation_time += other.initial_ray_generation_time; self.ray_generation_time += other.ray_generation_time; self.sample_writing_time += other.sample_writing_time; self.total_time += other.total_time; } } impl<'a> Renderer<'a> { pub fn render( &self, max_samples_per_bucket: u32, crop: Option<(u32, u32, u32, u32)>, thread_count: u32, do_blender_output: bool, ) -> (Image, RenderStats) { let mut tpool = Pool::new(thread_count); let image = Image::new(self.resolution.0, self.resolution.1); let (img_width, img_height) = (image.width(), image.height()); let all_jobs_queued = RwLock::new(false); let collective_stats = RwLock::new(RenderStats::new()); // Set up job queue let job_queue = MsQueue::new(); // For printing render progress let pixels_rendered = Mutex::new(Cell::new(0)); // Calculate dimensions and coordinates of what we're rendering. This // accounts for cropping. let (width, height, start_x, start_y) = if let Some((x1, y1, x2, y2)) = crop { let x1 = min(x1 as usize, img_width - 1); let y1 = min(y1 as usize, img_height - 1); let x2 = min(x2 as usize, img_width - 1); let y2 = min(y2 as usize, img_height - 1); (x2 - x1 + 1, y2 - y1 + 1, x1, y1) } else { (img_width, img_height, 0, 0) }; // Render tpool.scoped(|scope| { // Spawn worker tasks for _ in 0..thread_count { let jq = &job_queue; let ajq = &all_jobs_queued; let img = &image; let pixrenref = &pixels_rendered; let cstats = &collective_stats; scope.execute(move || { self.render_job( jq, ajq, img, width * height, pixrenref, cstats, do_blender_output, ) }); } // Print initial 0.00% progress print!("0.00%"); let _ = io::stdout().flush(); // Determine bucket size based on the per-thread maximum number of samples to // calculate at a time. let (bucket_w, bucket_h) = { let target_pixels_per_bucket = max_samples_per_bucket as f64 / self.spp as f64; let target_bucket_dim = if target_pixels_per_bucket.sqrt() < 1.0 { 1usize } else { target_pixels_per_bucket.sqrt() as usize }; (target_bucket_dim, target_bucket_dim) }; // Populate job queue let bucket_n = { let bucket_count_x = ((width / bucket_w) + 1) as u32; let bucket_count_y = ((height / bucket_h) + 1) as u32; let larger = cmp::max(bucket_count_x, bucket_count_y); let pow2 = upper_power_of_two(larger); pow2 * pow2 }; for hilbert_d in 0..bucket_n { let (bx, by) = hilbert::d2xy(hilbert_d); let x = bx as usize * bucket_w; let y = by as usize * bucket_h; let w = if width >= x { min(bucket_w, width - x) } else { bucket_w }; let h = if height >= y { min(bucket_h, height - y) } else { bucket_h }; if x < width && y < height && w > 0 && h > 0 { job_queue.push(BucketJob { x: (start_x + x) as u32, y: (start_y + y) as u32, w: w as u32, h: h as u32, }); } } // Mark done queuing jobs *all_jobs_queued.write().unwrap() = true; }); // Clear percentage progress print print!("\r \r",); // Return the rendered image and stats return (image, *collective_stats.read().unwrap()); } /// Waits for buckets in the job queue to render and renders them when available. fn render_job( &self, job_queue: &MsQueue<BucketJob>, all_jobs_queued: &RwLock<bool>, image: &Image, total_pixels: usize, pixels_rendered: &Mutex<Cell<usize>>, collected_stats: &RwLock<RenderStats>, do_blender_output: bool, ) { let mut stats = RenderStats::new(); let mut timer = Timer::new(); let mut total_timer = Timer::new(); let mut paths = Vec::new(); let mut rays = RayBatch::new(); let mut tracer = Tracer::from_assembly(&self.scene.root); let mut xform_stack = TransformStack::new(); // Pre-calculate some useful values related to the image plane let cmpx = 1.0 / self.resolution.0 as f32; let cmpy = 1.0 / self.resolution.1 as f32; let min_x = -1.0; let max_x = 1.0; let min_y = -(self.resolution.1 as f32 / self.resolution.0 as f32); let max_y = self.resolution.1 as f32 / self.resolution.0 as f32; let x_extent = max_x - min_x; let y_extent = max_y - min_y; // Render 'render_loop: loop { paths.clear(); rays.clear(); // Get bucket, or exit if no more jobs left let bucket: BucketJob; loop { if let Some(b) = job_queue.try_pop() { bucket = b; break; } else if *all_jobs_queued.read().unwrap() { break 'render_loop; } } timer.tick(); // Generate light paths and initial rays for y in bucket.y..(bucket.y + bucket.h) { for x in bucket.x..(bucket.x + bucket.w) { let offset = hash_u32(((x as u32) << 16) ^ (y as u32), self.seed); for si in 0..self.spp { // Calculate image plane x and y coordinates let (img_x, img_y) = { let filter_x = fast_logit(get_sample(4, offset + si as u32), 1.5) + 0.5; let filter_y = fast_logit(get_sample(5, offset + si as u32), 1.5) + 0.5; let samp_x = (filter_x + x as f32) * cmpx; let samp_y = (filter_y + y as f32) * cmpy; ((samp_x - 0.5) * x_extent, (0.5 - samp_y) * y_extent) }; // Create the light path and initial ray for this sample let (path, ray) = LightPath::new( &self.scene, (x, y), (img_x, img_y), ( get_sample(0, offset + si as u32), get_sample(1, offset + si as u32), ), get_sample(2, offset + si as u32), map_0_1_to_wavelength(get_sample(3, offset + si as u32)), offset + si as u32, ); paths.push(path); rays.push(ray, false); } } } stats.initial_ray_generation_time += timer.tick() as f64; // Trace the paths! let mut pi = paths.len(); while pi > 0 { // Test rays against scene let isects = tracer.trace(&mut rays); stats.trace_time += timer.tick() as f64; // Determine next rays to shoot based on result let mut new_end = 0; for i in 0..pi { if paths[i].next(&mut xform_stack, &self.scene, &isects[i], &mut rays, i) { paths.swap(new_end, i); rays.swap(new_end, i); new_end += 1; } } rays.truncate(new_end); pi = new_end; stats.ray_generation_time += timer.tick() as f64; } { // Calculate color based on ray hits and save to image let min = (bucket.x, bucket.y); let max = (bucket.x + bucket.w, bucket.y + bucket.h); let mut img_bucket = image.get_bucket(min, max); for path in &paths { let path_col = SpectralSample::from_parts(path.color, path.wavelength); let mut col = img_bucket.get(path.pixel_co.0, path.pixel_co.1); col += XYZ::from_spectral_sample(&path_col) / self.spp as f32; img_bucket.set(path.pixel_co.0, path.pixel_co.1, col); } stats.sample_writing_time += timer.tick() as f64; // Pre-calculate base64 encoding if needed let base64_enc = if do_blender_output { use crate::color::xyz_to_rec709_e; Some(img_bucket.rgba_base64(xyz_to_rec709_e)) } else { None }; // Print render progress, and image data if doing blender output let guard = pixels_rendered.lock().unwrap(); let mut pr = (*guard).get(); let percentage_old = pr as f64 / total_pixels as f64 * 100.0; pr += bucket.w as usize * bucket.h as usize; (*guard).set(pr); let percentage_new = pr as f64 / total_pixels as f64 * 100.0; let old_string = format!("{:.2}%", percentage_old); let new_string = format!("{:.2}%", percentage_new); if let Some(bucket_data) = base64_enc { // If doing Blender output println!("DIV"); println!("{}", new_string); println!("{} {} {} {}", min.0, min.1, max.0, max.1); println!("{}", bucket_data); println!("BUCKET_END"); println!("DIV"); } else { // If doing console output if new_string != old_string { print!("\r{}", new_string); } } let _ = io::stdout().flush(); } } stats.total_time += total_timer.tick() as f64; stats.ray_count = tracer.rays_traced(); ACCEL_NODE_RAY_TESTS.with(|anv| { stats.accel_node_visits = anv.get(); anv.set(0); }); // Collect stats collected_stats.write().unwrap().collect(stats); } } #[derive(Debug)] enum LightPathEvent { CameraRay, BounceRay, ShadowRay, } #[derive(Debug)] pub struct LightPath { event: LightPathEvent, bounce_count: u32, pixel_co: (u32, u32), lds_offset: u32, dim_offset: Cell<u32>, time: f32, wavelength: f32, next_bounce_ray: Option<Ray>, next_attenuation_fac: Vec4, closure_sample_pdf: f32, light_attenuation: Vec4, pending_color_addition: Vec4, color: Vec4, } #[allow(clippy::new_ret_no_self)] impl LightPath { fn new( scene: &Scene, pixel_co: (u32, u32), image_plane_co: (f32, f32), lens_uv: (f32, f32), time: f32, wavelength: f32, lds_offset: u32, ) -> (LightPath, Ray) { ( LightPath { event: LightPathEvent::CameraRay, bounce_count: 0, pixel_co: pixel_co, lds_offset: lds_offset, dim_offset: Cell::new(6), time: time, wavelength: wavelength, next_bounce_ray: None, next_attenuation_fac: Vec4::splat(1.0), closure_sample_pdf: 1.0, light_attenuation: Vec4::splat(1.0), pending_color_addition: Vec4::splat(0.0), color: Vec4::splat(0.0), }, scene.camera.generate_ray( image_plane_co.0, image_plane_co.1, time, wavelength, lens_uv.0, lens_uv.1, ), ) } fn next_lds_samp(&self) -> f32 { let dimension = self.dim_offset.get(); self.dim_offset.set(dimension + 1); get_sample(dimension, self.lds_offset) } fn next( &mut self, xform_stack: &mut TransformStack, scene: &Scene, isect: &surface::SurfaceIntersection, rays: &mut RayBatch, ray_idx: usize, ) -> bool { match self.event { //-------------------------------------------------------------------- // Result of Camera or bounce ray, prepare next bounce and light rays LightPathEvent::CameraRay | LightPathEvent::BounceRay => { if let surface::SurfaceIntersection::Hit { intersection_data: ref idata, ref closure, } = *isect { // Hit something! Do the stuff // If it's an emission closure, handle specially: // - Collect light from the emission. // - Terminate the path. use crate::shading::surface_closure::SurfaceClosure; if let SurfaceClosure::Emit(color) = *closure { let color = color.to_spectral_sample(self.wavelength).e; if let LightPathEvent::CameraRay = self.event { self.color += color; } else { let mis_pdf = power_heuristic(self.closure_sample_pdf, idata.sample_pdf); self.color += color * self.light_attenuation / mis_pdf; }; return false; } // Roll the previous closure pdf into the attenauation self.light_attenuation /= self.closure_sample_pdf; // Prepare light ray let light_n = self.next_lds_samp(); let light_uvw = ( self.next_lds_samp(), self.next_lds_samp(), self.next_lds_samp(), ); xform_stack.clear(); let light_info = scene.sample_lights( xform_stack, light_n, light_uvw, self.wavelength, self.time, isect, ); let found_light = if light_info.is_none() || light_info.pdf() <= 0.0 || light_info.selection_pdf() <= 0.0 { false } else { let light_pdf = light_info.pdf(); let light_sel_pdf = light_info.selection_pdf(); // Calculate the shadow ray and surface closure stuff let (attenuation, closure_pdf, shadow_ray) = match light_info { SceneLightSample::None => unreachable!(), // Distant light SceneLightSample::Distant { direction, .. } => { let (attenuation, closure_pdf) = closure.evaluate( rays.dir(ray_idx), direction, idata.nor, idata.nor_g, self.wavelength, ); let shadow_ray = { // Calculate the shadow ray for testing if the light is // in shadow or not. let offset_pos = robust_ray_origin( idata.pos, idata.pos_err, idata.nor_g.normalized(), direction, ); Ray { orig: offset_pos, dir: direction, time: self.time, wavelength: self.wavelength, max_t: std::f32::INFINITY, } }; (attenuation, closure_pdf, shadow_ray) } // Surface light SceneLightSample::Surface { sample_geo, .. } => { let dir = sample_geo.0 - idata.pos; let (attenuation, closure_pdf) = closure.evaluate( rays.dir(ray_idx), dir, idata.nor, idata.nor_g, self.wavelength, ); let shadow_ray = { // Calculate the shadow ray for testing if the light is // in shadow or not. let offset_pos = robust_ray_origin( idata.pos, idata.pos_err, idata.nor_g.normalized(), dir, ); let offset_end = robust_ray_origin( sample_geo.0, sample_geo.2, sample_geo.1.normalized(), -dir, ); Ray { orig: offset_pos, dir: offset_end - offset_pos, time: self.time, wavelength: self.wavelength, max_t: 1.0, } }; (attenuation, closure_pdf, shadow_ray) } }; // If there's any possible contribution, set up for a // light ray. if attenuation.e.max_element() <= 0.0 { false } else { // Calculate and store the light that will be contributed // to the film plane if the light is not in shadow. let light_mis_pdf = power_heuristic(light_pdf, closure_pdf); self.pending_color_addition = light_info.color().e * attenuation.e * self.light_attenuation / (light_mis_pdf * light_sel_pdf); rays.set_from_ray(&shadow_ray, true, ray_idx); true } }; // Prepare bounce ray let do_bounce = if self.bounce_count < 2 { self.bounce_count += 1; // Sample closure let (dir, filter, pdf) = { let u = self.next_lds_samp(); let v = self.next_lds_samp(); closure.sample( idata.incoming, idata.nor, idata.nor_g, (u, v), self.wavelength, ) }; // Check if pdf is zero, to avoid NaN's. if (pdf > 0.0) && (filter.e.max_element() > 0.0) { // Account for the additional light attenuation from // this bounce self.next_attenuation_fac = filter.e; self.closure_sample_pdf = pdf; // Calculate the ray for this bounce let offset_pos = robust_ray_origin( idata.pos, idata.pos_err, idata.nor_g.normalized(), dir, ); self.next_bounce_ray = Some(Ray { orig: offset_pos, dir: dir, time: self.time, wavelength: self.wavelength, max_t: std::f32::INFINITY, }); true } else { false } } else { self.next_bounce_ray = None; false }; // Book keeping for next event if found_light { self.event = LightPathEvent::ShadowRay; return true; } else if do_bounce { rays.set_from_ray(&self.next_bounce_ray.unwrap(), false, ray_idx); self.event = LightPathEvent::BounceRay; self.light_attenuation *= self.next_attenuation_fac; return true; } else { return false; } } else { // Didn't hit anything, so background color self.color += scene .world .background_color .to_spectral_sample(self.wavelength) .e * self.light_attenuation / self.closure_sample_pdf; return false; } } //-------------------------------------------------------------------- // Result of shadow ray from sampling a light LightPathEvent::ShadowRay => { // If the light was not in shadow, add it's light to the film // plane. if let surface::SurfaceIntersection::Miss = *isect { self.color += self.pending_color_addition; } // Set up for the next bounce, if any if let Some(ref nbr) = self.next_bounce_ray { rays.set_from_ray(nbr, false, ray_idx); self.light_attenuation *= self.next_attenuation_fac; self.event = LightPathEvent::BounceRay; return true; } else { return false; } } } } } /// Gets a sample, using LDS samples for lower dimensions, /// and switching to random samples at higher dimensions where /// LDS samples aren't available. #[inline(always)] fn get_sample(dimension: u32, i: u32) -> f32 { use crate::hash::hash_u32_to_f32; if dimension < halton::MAX_DIMENSION { halton::sample(dimension, i) } else { hash_u32_to_f32(dimension, i) } } #[derive(Debug)] struct BucketJob { x: u32, y: u32, w: u32, h: u32, }
37.712857
100
0.42638
71ec289a084e87f454fa4b25e7c0614eaa3dde37
722
pub struct AssetPaths { pub fira_sans: &'static str, pub audio_birds: &'static str, pub texture_tree: &'static str, pub texture_wood_logs: &'static str, pub texture_house: &'static str, pub texture_man: &'static str, pub texture_grad_shadow: &'static str, pub texture_stockpile: &'static str, } pub const PATHS: AssetPaths = AssetPaths { fira_sans: "fonts/FiraSans-Bold.ttf", audio_birds: "audio/birds-1.ogg", texture_tree: "textures/tree.png", texture_wood_logs: "textures/wood-logs.png", texture_house: "textures/house.png", texture_man: "textures/man.png", texture_grad_shadow: "textures/grad-shadow.png", texture_stockpile: "textures/stockpile.png", };
32.818182
52
0.699446
6ad3ec67b29642bb251aef0cae43a3c216b2fe1f
573
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // Used to cause ICE // error-pattern: mismatched types static VEC: [u32; 256] = vec![]; fn main() {}
30.157895
68
0.720768
8a249287adadc9c168ef2f6ed061ecc332dc1f53
947
use std::{ ops::{Deref, Range}, rc::Rc, }; pub struct SharedSlice<T> { inner: Rc<[T]>, start: usize, end: usize, } impl<T> SharedSlice<T> { pub fn new(slice: Box<[T]>) -> Self { let start = 0; let end = slice.len(); Self { inner: slice.into(), start, end, } } pub fn from(&self, range: Range<usize>) -> Self { let Range { start, end } = range; assert!( start >= self.start && end <= self.end, "Attempted to share range {}..{}, but the valid range is {}_{}", start, end, self.start, self.end ); Self { inner: Rc::clone(&self.inner), start, end, } } } impl<T> Deref for SharedSlice<T> { type Target = [T]; fn deref(&self) -> &[T] { &self.inner[self.start..self.end] } }
18.94
76
0.432946
89ee43906b678d13975c335043bb8a2741d42514
11,413
use anyhow::anyhow; use scraper::{element_ref, html}; use serde::{Deserialize, Serialize}; use std::{collections, str}; lazy_static::lazy_static! { static ref TR: scraper::Selector = scraper::Selector::parse("tr").unwrap(); static ref TD: scraper::Selector = scraper::Selector::parse("td").unwrap(); static ref TABLE: scraper::Selector = scraper::Selector::parse("table").unwrap(); static ref A: scraper::Selector = scraper::Selector::parse("a").unwrap(); static ref P: scraper::Selector = scraper::Selector::parse("p").unwrap(); static ref H3: scraper::Selector = scraper::Selector::parse("h3").unwrap(); static ref SPAN: scraper::Selector = scraper::Selector::parse("span").unwrap(); } #[derive(Debug, Clone, Serialize, Deserialize, Hash, PartialEq, Eq)] pub struct Report { pub name: String, pub sub_type: String, } impl Report { pub fn should_skip(&self) -> bool { // skip historical dataset - there are no table definitions anyway (self.name == "HISTORICAL") || (self.name == "CONFIGURATION") // temporary || (self.name == "DEMAND_FORECASTS" && self.sub_type == "INTERMITTENT_P5_RUN") || (self.name == "DEMAND_FORECASTS" && self.sub_type == "INTERMITTENT_P5_PRED") || (self.name == "BILLING_RUN" && self.sub_type == "BILLINGAPCCOMPENSATION") || (self.name == "BILLING_RUN" && self.sub_type == "BILLINGAPCRECOVERY") || (self.name == "BILLING_RUN" && self.sub_type == "BILLING_RES_TRADER_RECOVERY") || (self.name == "BILLING_RUN" && self.sub_type == "BILLING_RES_TRADER_PAYMENT") || (self.name == "SETTLEMENT_DATA" && self.sub_type == "SETRESERVERECOVERY") } } pub type Packages = collections::BTreeMap<String, collections::BTreeMap<String, TablePage>>; #[derive(Debug, Clone, Serialize, Deserialize)] pub struct TablePage { pub summary: TableSummary, pub description: Option<Description>, pub notes: Option<TableNotes>, pub primary_key_columns: PkColumns, pub columns: TableColumns, } impl TablePage { pub fn get_summary_name(&self) -> String { self.summary.get_name() } pub fn from_html(mut docs: Vec<html::Html>) -> anyhow::Result<TablePage> { let first = docs.remove(0); let mut headings = first.select(&H3); // extract the table name. This is unkwnown but is always the first let table_name = headings.next().unwrap(); let summary = element_ref::ElementRef::wrap( table_name.next_sibling().unwrap().next_sibling().unwrap(), ) .unwrap(); // now get other info let mut details = collections::HashMap::new(); for h3 in headings { let detail_type = h3.select(&A).next().unwrap().inner_html(); let detail_table = element_ref::ElementRef::wrap(h3.next_sibling().unwrap().next_sibling().unwrap()) .unwrap(); if detail_type != "Index" { details.insert(detail_type.replace(" ", ""), detail_table); } } let mut extra_columns = Vec::new(); for doc in docs.iter() { let h3 = doc.select(&H3).next().unwrap(); let heading = h3.inner_html(); if heading.trim() != "Content" { dbg!(heading); break; } let tab = doc.select(&TABLE).next().unwrap(); let col = TableColumns::from_html(&tab)?; extra_columns.push(col); } let mut first_column_set = details .get("Content") .ok_or_else(|| anyhow!("Missing required field Content")) .and_then(|t| TableColumns::from_html(t))?; for extra in extra_columns { first_column_set.add_columns(extra); } let table_info = TablePage { columns: first_column_set, description: crate::swap_nonreq( details.get("Description").map(Description::from_html), )?, notes: crate::swap_nonreq(details.get("Notes").map(TableNotes::from_html))?, primary_key_columns: details .get("PrimaryKeyColumns") .ok_or_else(|| anyhow!("Missing required field Primary Key Columns")) .and_then(PkColumns::from_html)?, summary: TableSummary::from_html(&summary)?, }; Ok(table_info) } } #[derive(Debug, Clone, Serialize, Deserialize)] pub struct TableSummary { pub name: String, pub comment: String, } impl TableSummary { pub fn get_name(&self) -> String { self.name.clone() } fn from_html(tab: &element_ref::ElementRef) -> anyhow::Result<TableSummary> { let mut cells = tab.select(&TD); let name_el = cells .nth(1) .ok_or_else(|| anyhow!("Name cell missing from sumary table"))?; let name = name_el .select(&A) .next() .ok_or_else(|| anyhow!("No content in summary table name"))? .inner_html() .replace(" ", "") .replace("\n", ""); let comment_el = cells .nth(1) .ok_or_else(|| anyhow!("Comment cell missing from summary table"))?; let comment = comment_el .select(&P) .next() .ok_or_else(|| anyhow!("No content in summary table comment cell"))? .inner_html() .replace("\n", ""); Ok(TableSummary { name, comment }) } } #[derive(Debug, Clone, Serialize, Deserialize)] pub struct PkColumns { pub cols: Vec<String>, } impl PkColumns { fn from_html(tab: &element_ref::ElementRef) -> anyhow::Result<PkColumns> { let cols = tab .select(&P) .skip(1) .map(|er| er.inner_html().replace("\n", "")) .collect(); Ok(PkColumns { cols }) } } #[derive(Debug, Clone, Serialize, Deserialize)] pub struct Description { pub inner: String, } impl Description { fn from_html(tab: &element_ref::ElementRef) -> anyhow::Result<Description> { let inner = tab .select(&SPAN) .map(|er| er.inner_html()) .collect::<Vec<_>>() .join(" "); Ok(Description { inner }) } } #[derive(Debug, Clone, Serialize, Deserialize)] pub struct TableNotes { pub notes: Vec<TableNote>, } impl TableNotes { fn from_html(tab: &element_ref::ElementRef) -> anyhow::Result<TableNotes> { let notes = tab .select(&TR) .skip(1) .map(|el| TableNote::from_html(&el)) .collect::<anyhow::Result<Vec<_>>>()?; Ok(TableNotes { notes }) } } #[derive(Debug, Clone, Serialize, Deserialize)] pub struct TableNote { pub name: String, pub comment: String, pub value: String, } impl TableNote { fn from_html(tab: &element_ref::ElementRef) -> anyhow::Result<TableNote> { let mut cells = tab.select(&P); let name = cells.next().unwrap().inner_html().replace("\n", ""); let comment = cells.next().unwrap().inner_html().replace("\n", ""); let value = cells.next().unwrap().inner_html().replace("\n", ""); Ok(TableNote { name, comment, value, }) } } #[derive(Debug, Clone, Serialize, Deserialize)] pub struct TableColumns { pub columns: Vec<TableColumn>, } impl TableColumns { fn add_columns(&mut self, mut other: TableColumns) { self.columns.append(&mut other.columns); } fn from_html(tab: &element_ref::ElementRef) -> anyhow::Result<TableColumns> { let columns = tab .select(&TR) .skip(1) .map(|el| TableColumn::from_html(&el)) .collect::<anyhow::Result<Vec<_>>>()?; Ok(TableColumns { columns }) } } #[derive(Debug, Clone, Serialize, Deserialize)] pub struct TableColumn { pub name: String, pub data_type: DataType, pub mandatory: bool, pub comment: String, } impl TableColumn { pub fn field_name(&self) -> String { use heck::SnakeCase; format!("{}", self.name.to_snake_case()) } fn from_html(tab: &element_ref::ElementRef) -> anyhow::Result<TableColumn> { let mut cells = tab.select(&P); let name = cells.next().unwrap().inner_html().replace("\n", ""); let data_type = cells .next() .unwrap() .inner_html() .replace("\n", "") .replace(" ", "") .parse()?; let mandatory = cells.next().unwrap().inner_html().starts_with("X"); let comment = cells.next().unwrap().inner_html().replace("\n", ""); Ok(TableColumn { name, data_type, mandatory, comment, }) } } #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] pub enum DataType { Varchar { length: i32 }, Char, Date, DateTime, Decimal { precision: u8, scale: u8 }, Integer { precision: u8 }, } lazy_static::lazy_static! { static ref VARCHAR: regex::Regex = regex::Regex::new(r"varchar(2)?\((\d+)\)").unwrap(); static ref DECIMAL: regex::Regex = regex::Regex::new(r"number\((\d+),(\d+)\)").unwrap(); static ref INTEGER: regex::Regex = regex::Regex::new(r"number\((\d+)\)").unwrap(); static ref TIMESTAMP: regex::Regex = regex::Regex::new(r"timestamp\((\d+)\)").unwrap(); } impl DataType { fn parse_varchar(s: &str) -> anyhow::Result<DataType> { let length = VARCHAR .captures(s) .ok_or_else(|| anyhow!("Couldnt parse {} as Varchar", s))?[2] .parse()?; Ok(DataType::Varchar { length }) } fn parse_int(s: &str) -> anyhow::Result<DataType> { let precision = INTEGER .captures(s) .ok_or_else(|| anyhow!("Couldnt parse {} as Integer", s))?[1] .parse()?; Ok(DataType::Integer { precision }) } fn parse_decimal(s: &str) -> anyhow::Result<DataType> { let caps = DECIMAL .captures(s) .ok_or_else(|| anyhow!("Couldnt parse {} as Decimal", s))?; let (precision, scale) = (caps[1].parse()?, caps[2].parse()?); Ok(DataType::Decimal { precision, scale }) } } impl str::FromStr for DataType { type Err = anyhow::Error; fn from_str(s: &str) -> anyhow::Result<DataType> { let input = s.replace(" ", "").to_lowercase(); let set = regex::RegexSet::new(&[ r"date", r"char\(1\)", r"varchar\S+", r"timestamp\((\d+)\)", r"number\((\d+)\)", r"number\((\d+),(\d+)\)", ]) .unwrap(); match set .matches(&input) .into_iter() .collect::<Vec<_>>() .as_slice() { [0] => Ok(DataType::Date), [1] => Ok(DataType::Char), [2] => DataType::parse_varchar(&input), [3] => Ok(DataType::DateTime), [4] => DataType::parse_int(&input), [5] => DataType::parse_decimal(&input), _ => { if input == "varchar(1)" { DataType::parse_varchar(&input) } else { Err(anyhow!("Unexpected type {}", input)) } } } } }
32.702006
97
0.554193
0a38ab69f4290753c638e31b1e91345017a33319
75,214
#[cfg(not(target_os = "dragonfly"))] use libc; use libc::c_int; use std::{fmt, io, error}; use {Error, Result}; pub use self::consts::*; cfg_if! { if #[cfg(any(target_os = "freebsd", target_os = "ios", target_os = "macos"))] { unsafe fn errno_location() -> *mut c_int { libc::__error() } } else if #[cfg(target_os = "dragonfly")] { // DragonFly uses a thread-local errno variable, but #[thread_local] is // feature-gated and not available in stable Rust as of this writing // (Rust 1.21.0). We have to use a C extension to access it // (src/errno_dragonfly.c). // // Tracking issue for `thread_local` stabilization: // // https://github.com/rust-lang/rust/issues/29594 // // Once this becomes stable, we can remove build.rs, // src/errno_dragonfly.c, and use: // // extern { #[thread_local] static errno: c_int; } // #[link(name="errno_dragonfly", kind="static")] extern { pub fn errno_location() -> *mut c_int; } } else if #[cfg(any(target_os = "android", target_os = "netbsd", target_os = "openbsd"))] { unsafe fn errno_location() -> *mut c_int { libc::__errno() } } else if #[cfg(target_os = "linux")] { unsafe fn errno_location() -> *mut c_int { libc::__errno_location() } } } /// Sets the platform-specific errno to no-error unsafe fn clear() -> () { *errno_location() = 0; } /// Returns the platform-specific value of errno pub fn errno() -> i32 { unsafe { (*errno_location()) as i32 } } impl Errno { pub fn last() -> Self { last() } pub fn desc(self) -> &'static str { desc(self) } pub fn from_i32(err: i32) -> Errno { from_i32(err) } pub unsafe fn clear() -> () { clear() } /// Returns `Ok(value)` if it does not contain the sentinel value. This /// should not be used when `-1` is not the errno sentinel value. pub fn result<S: ErrnoSentinel + PartialEq<S>>(value: S) -> Result<S> { if value == S::sentinel() { Err(Error::Sys(Self::last())) } else { Ok(value) } } } /// The sentinel value indicates that a function failed and more detailed /// information about the error can be found in `errno` pub trait ErrnoSentinel: Sized { fn sentinel() -> Self; } impl ErrnoSentinel for isize { fn sentinel() -> Self { -1 } } impl ErrnoSentinel for i32 { fn sentinel() -> Self { -1 } } impl ErrnoSentinel for i64 { fn sentinel() -> Self { -1 } } impl ErrnoSentinel for *mut libc::c_void { fn sentinel() -> Self { (-1 as isize) as *mut libc::c_void } } impl error::Error for Errno { fn description(&self) -> &str { self.desc() } } impl fmt::Display for Errno { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{:?}: {}", self, self.desc()) } } impl From<Errno> for io::Error { fn from(err: Errno) -> Self { io::Error::from_raw_os_error(err as i32) } } fn last() -> Errno { Errno::from_i32(errno()) } fn desc(errno: Errno) -> &'static str { use self::Errno::*; match errno { UnknownErrno => "Unknown errno", EPERM => "Operation not permitted", ENOENT => "No such file or directory", ESRCH => "No such process", EINTR => "Interrupted system call", EIO => "I/O error", ENXIO => "No such device or address", E2BIG => "Argument list too long", ENOEXEC => "Exec format error", EBADF => "Bad file number", ECHILD => "No child processes", EAGAIN => "Try again", ENOMEM => "Out of memory", EACCES => "Permission denied", EFAULT => "Bad address", ENOTBLK => "Block device required", EBUSY => "Device or resource busy", EEXIST => "File exists", EXDEV => "Cross-device link", ENODEV => "No such device", ENOTDIR => "Not a directory", EISDIR => "Is a directory", EINVAL => "Invalid argument", ENFILE => "File table overflow", EMFILE => "Too many open files", ENOTTY => "Not a typewriter", ETXTBSY => "Text file busy", EFBIG => "File too large", ENOSPC => "No space left on device", ESPIPE => "Illegal seek", EROFS => "Read-only file system", EMLINK => "Too many links", EPIPE => "Broken pipe", EDOM => "Math argument out of domain of func", ERANGE => "Math result not representable", EDEADLK => "Resource deadlock would occur", ENAMETOOLONG => "File name too long", ENOLCK => "No record locks available", ENOSYS => "Function not implemented", ENOTEMPTY => "Directory not empty", ELOOP => "Too many symbolic links encountered", ENOMSG => "No message of desired type", EIDRM => "Identifier removed", EINPROGRESS => "Operation now in progress", EALREADY => "Operation already in progress", ENOTSOCK => "Socket operation on non-socket", EDESTADDRREQ => "Destination address required", EMSGSIZE => "Message too long", EPROTOTYPE => "Protocol wrong type for socket", ENOPROTOOPT => "Protocol not available", EPROTONOSUPPORT => "Protocol not supported", ESOCKTNOSUPPORT => "Socket type not supported", EPFNOSUPPORT => "Protocol family not supported", EAFNOSUPPORT => "Address family not supported by protocol", EADDRINUSE => "Address already in use", EADDRNOTAVAIL => "Cannot assign requested address", ENETDOWN => "Network is down", ENETUNREACH => "Network is unreachable", ENETRESET => "Network dropped connection because of reset", ECONNABORTED => "Software caused connection abort", ECONNRESET => "Connection reset by peer", ENOBUFS => "No buffer space available", EISCONN => "Transport endpoint is already connected", ENOTCONN => "Transport endpoint is not connected", ESHUTDOWN => "Cannot send after transport endpoint shutdown", ETOOMANYREFS => "Too many references: cannot splice", ETIMEDOUT => "Connection timed out", ECONNREFUSED => "Connection refused", EHOSTDOWN => "Host is down", EHOSTUNREACH => "No route to host", #[cfg(any(target_os = "linux", target_os = "android"))] ECHRNG => "Channel number out of range", #[cfg(any(target_os = "linux", target_os = "android"))] EL2NSYNC => "Level 2 not synchronized", #[cfg(any(target_os = "linux", target_os = "android"))] EL3HLT => "Level 3 halted", #[cfg(any(target_os = "linux", target_os = "android"))] EL3RST => "Level 3 reset", #[cfg(any(target_os = "linux", target_os = "android"))] ELNRNG => "Link number out of range", #[cfg(any(target_os = "linux", target_os = "android"))] EUNATCH => "Protocol driver not attached", #[cfg(any(target_os = "linux", target_os = "android"))] ENOCSI => "No CSI structure available", #[cfg(any(target_os = "linux", target_os = "android"))] EL2HLT => "Level 2 halted", #[cfg(any(target_os = "linux", target_os = "android"))] EBADE => "Invalid exchange", #[cfg(any(target_os = "linux", target_os = "android"))] EBADR => "Invalid request descriptor", #[cfg(any(target_os = "linux", target_os = "android"))] EXFULL => "Exchange full", #[cfg(any(target_os = "linux", target_os = "android"))] ENOANO => "No anode", #[cfg(any(target_os = "linux", target_os = "android"))] EBADRQC => "Invalid request code", #[cfg(any(target_os = "linux", target_os = "android"))] EBADSLT => "Invalid slot", #[cfg(any(target_os = "linux", target_os = "android"))] EBFONT => "Bad font file format", #[cfg(any(target_os = "linux", target_os = "android"))] ENOSTR => "Device not a stream", #[cfg(any(target_os = "linux", target_os = "android"))] ENODATA => "No data available", #[cfg(any(target_os = "linux", target_os = "android"))] ETIME => "Timer expired", #[cfg(any(target_os = "linux", target_os = "android"))] ENOSR => "Out of streams resources", #[cfg(any(target_os = "linux", target_os = "android"))] ENONET => "Machine is not on the network", #[cfg(any(target_os = "linux", target_os = "android"))] ENOPKG => "Package not installed", #[cfg(any(target_os = "linux", target_os = "android"))] EREMOTE => "Object is remote", #[cfg(any(target_os = "linux", target_os = "android"))] ENOLINK => "Link has been severed", #[cfg(any(target_os = "linux", target_os = "android"))] EADV => "Advertise error", #[cfg(any(target_os = "linux", target_os = "android"))] ESRMNT => "Srmount error", #[cfg(any(target_os = "linux", target_os = "android"))] ECOMM => "Communication error on send", #[cfg(any(target_os = "linux", target_os = "android"))] EPROTO => "Protocol error", #[cfg(any(target_os = "linux", target_os = "android"))] EMULTIHOP => "Multihop attempted", #[cfg(any(target_os = "linux", target_os = "android"))] EDOTDOT => "RFS specific error", #[cfg(any(target_os = "linux", target_os = "android"))] EBADMSG => "Not a data message", #[cfg(any(target_os = "linux", target_os = "android"))] EOVERFLOW => "Value too large for defined data type", #[cfg(any(target_os = "linux", target_os = "android"))] ENOTUNIQ => "Name not unique on network", #[cfg(any(target_os = "linux", target_os = "android"))] EBADFD => "File descriptor in bad state", #[cfg(any(target_os = "linux", target_os = "android"))] EREMCHG => "Remote address changed", #[cfg(any(target_os = "linux", target_os = "android"))] ELIBACC => "Can not access a needed shared library", #[cfg(any(target_os = "linux", target_os = "android"))] ELIBBAD => "Accessing a corrupted shared library", #[cfg(any(target_os = "linux", target_os = "android"))] ELIBSCN => ".lib section in a.out corrupted", #[cfg(any(target_os = "linux", target_os = "android"))] ELIBMAX => "Attempting to link in too many shared libraries", #[cfg(any(target_os = "linux", target_os = "android"))] ELIBEXEC => "Cannot exec a shared library directly", #[cfg(any(target_os = "linux", target_os = "android", target_os = "openbsd"))] EILSEQ => "Illegal byte sequence", #[cfg(any(target_os = "linux", target_os = "android"))] ERESTART => "Interrupted system call should be restarted", #[cfg(any(target_os = "linux", target_os = "android"))] ESTRPIPE => "Streams pipe error", #[cfg(any(target_os = "linux", target_os = "android"))] EUSERS => "Too many users", #[cfg(any(target_os = "linux", target_os = "android", target_os = "netbsd"))] EOPNOTSUPP => "Operation not supported on transport endpoint", #[cfg(any(target_os = "linux", target_os = "android"))] ESTALE => "Stale file handle", #[cfg(any(target_os = "linux", target_os = "android"))] EUCLEAN => "Structure needs cleaning", #[cfg(any(target_os = "linux", target_os = "android"))] ENOTNAM => "Not a XENIX named type file", #[cfg(any(target_os = "linux", target_os = "android"))] ENAVAIL => "No XENIX semaphores available", #[cfg(any(target_os = "linux", target_os = "android"))] EISNAM => "Is a named type file", #[cfg(any(target_os = "linux", target_os = "android"))] EREMOTEIO => "Remote I/O error", #[cfg(any(target_os = "linux", target_os = "android"))] EDQUOT => "Quota exceeded", #[cfg(any(target_os = "linux", target_os = "android", target_os = "openbsd", target_os = "dragonfly"))] ENOMEDIUM => "No medium found", #[cfg(any(target_os = "linux", target_os = "android", target_os = "openbsd"))] EMEDIUMTYPE => "Wrong medium type", #[cfg(any(target_os = "linux", target_os = "android"))] ECANCELED => "Operation canceled", #[cfg(any(target_os = "linux", target_os = "android"))] ENOKEY => "Required key not available", #[cfg(any(target_os = "linux", target_os = "android"))] EKEYEXPIRED => "Key has expired", #[cfg(any(target_os = "linux", target_os = "android"))] EKEYREVOKED => "Key has been revoked", #[cfg(any(target_os = "linux", target_os = "android"))] EKEYREJECTED => "Key was rejected by service", #[cfg(any(target_os = "linux", target_os = "android"))] EOWNERDEAD => "Owner died", #[cfg(any(target_os = "linux", target_os = "android"))] ENOTRECOVERABLE => "State not recoverable", #[cfg(all(target_os = "linux", not(target_arch="mips")))] ERFKILL => "Operation not possible due to RF-kill", #[cfg(all(target_os = "linux", not(target_arch="mips")))] EHWPOISON => "Memory page has hardware error", #[cfg(any(target_os = "freebsd", target_os = "dragonfly"))] EDOOFUS => "Programming error", #[cfg(any(target_os = "freebsd", target_os = "dragonfly"))] EMULTIHOP => "Multihop attempted", #[cfg(any(target_os = "freebsd", target_os = "dragonfly"))] ENOLINK => "Link has been severed", #[cfg(target_os = "freebsd")] ENOTCAPABLE => "Capabilities insufficient", #[cfg(target_os = "freebsd")] ECAPMODE => "Not permitted in capability mode", #[cfg(any(target_os = "macos", target_os = "freebsd", target_os = "dragonfly", target_os = "ios", target_os = "openbsd", target_os = "netbsd"))] ENEEDAUTH => "Need authenticator", #[cfg(any(target_os = "macos", target_os = "freebsd", target_os = "dragonfly", target_os = "ios", target_os = "openbsd", target_os = "netbsd"))] EOVERFLOW => "Value too large to be stored in data type", #[cfg(any(target_os = "macos", target_os = "freebsd", target_os = "dragonfly", target_os = "ios", target_os = "netbsd"))] EILSEQ => "Illegal byte sequence", #[cfg(any(target_os = "macos", target_os = "freebsd", target_os = "dragonfly", target_os = "ios", target_os = "openbsd", target_os = "netbsd"))] ENOATTR => "Attribute not found", #[cfg(any(target_os = "macos", target_os = "freebsd", target_os = "dragonfly", target_os = "ios", target_os = "netbsd"))] EBADMSG => "Bad message", #[cfg(any(target_os = "macos", target_os = "freebsd", target_os = "dragonfly", target_os = "ios", target_os = "netbsd"))] EPROTO => "Protocol error", #[cfg(any(target_os = "macos", target_os = "freebsd", target_os = "ios"))] ENOTRECOVERABLE => "State not recoverable", #[cfg(any(target_os = "macos", target_os = "freebsd", target_os = "ios"))] EOWNERDEAD => "Previous owner died", #[cfg(any(target_os = "macos", target_os = "freebsd", target_os = "dragonfly", target_os = "ios", target_os = "openbsd", target_os = "netbsd"))] ENOTSUP => "Operation not supported", #[cfg(any(target_os = "macos", target_os = "freebsd", target_os = "dragonfly", target_os = "ios", target_os = "openbsd", target_os = "netbsd"))] EPROCLIM => "Too many processes", #[cfg(any(target_os = "macos", target_os = "freebsd", target_os = "dragonfly", target_os = "ios", target_os = "openbsd", target_os = "netbsd"))] EUSERS => "Too many users", #[cfg(any(target_os = "macos", target_os = "freebsd", target_os = "dragonfly", target_os = "ios", target_os = "openbsd", target_os = "netbsd"))] EDQUOT => "Disc quota exceeded", #[cfg(any(target_os = "macos", target_os = "freebsd", target_os = "dragonfly", target_os = "ios", target_os = "openbsd", target_os = "netbsd"))] ESTALE => "Stale NFS file handle", #[cfg(any(target_os = "macos", target_os = "freebsd", target_os = "dragonfly", target_os = "ios", target_os = "openbsd", target_os = "netbsd"))] EREMOTE => "Too many levels of remote in path", #[cfg(any(target_os = "macos", target_os = "freebsd", target_os = "dragonfly", target_os = "ios", target_os = "openbsd", target_os = "netbsd"))] EBADRPC => "RPC struct is bad", #[cfg(any(target_os = "macos", target_os = "freebsd", target_os = "dragonfly", target_os = "ios", target_os = "openbsd", target_os = "netbsd"))] ERPCMISMATCH => "RPC version wrong", #[cfg(any(target_os = "macos", target_os = "freebsd", target_os = "dragonfly", target_os = "ios", target_os = "openbsd", target_os = "netbsd"))] EPROGUNAVAIL => "RPC prog. not avail", #[cfg(any(target_os = "macos", target_os = "freebsd", target_os = "dragonfly", target_os = "ios", target_os = "openbsd", target_os = "netbsd"))] EPROGMISMATCH => "Program version wrong", #[cfg(any(target_os = "macos", target_os = "freebsd", target_os = "dragonfly", target_os = "ios", target_os = "openbsd", target_os = "netbsd"))] EPROCUNAVAIL => "Bad procedure for program", #[cfg(any(target_os = "macos", target_os = "freebsd", target_os = "dragonfly", target_os = "ios", target_os = "openbsd", target_os = "netbsd"))] EFTYPE => "Inappropriate file type or format", #[cfg(any(target_os = "macos", target_os = "freebsd", target_os = "dragonfly", target_os = "ios", target_os = "openbsd", target_os = "netbsd"))] EAUTH => "Authentication error", #[cfg(any(target_os = "macos", target_os = "freebsd", target_os = "dragonfly", target_os = "ios", target_os = "openbsd", target_os = "netbsd"))] ECANCELED => "Operation canceled", #[cfg(any(target_os = "macos", target_os = "ios"))] EPWROFF => "Device power is off", #[cfg(any(target_os = "macos", target_os = "ios"))] EDEVERR => "Device error, e.g. paper out", #[cfg(any(target_os = "macos", target_os = "ios"))] EBADEXEC => "Bad executable", #[cfg(any(target_os = "macos", target_os = "ios"))] EBADARCH => "Bad CPU type in executable", #[cfg(any(target_os = "macos", target_os = "ios"))] ESHLIBVERS => "Shared library version mismatch", #[cfg(any(target_os = "macos", target_os = "ios"))] EBADMACHO => "Malformed Macho file", #[cfg(any(target_os = "macos", target_os = "ios", target_os = "netbsd"))] EMULTIHOP => "Reserved", #[cfg(any(target_os = "macos", target_os = "ios", target_os = "netbsd"))] ENODATA => "No message available on STREAM", #[cfg(any(target_os = "macos", target_os = "ios", target_os = "netbsd"))] ENOLINK => "Reserved", #[cfg(any(target_os = "macos", target_os = "ios", target_os = "netbsd"))] ENOSR => "No STREAM resources", #[cfg(any(target_os = "macos", target_os = "ios", target_os = "netbsd"))] ENOSTR => "Not a STREAM", #[cfg(any(target_os = "macos", target_os = "ios", target_os = "netbsd"))] ETIME => "STREAM ioctl timeout", #[cfg(any(target_os = "macos", target_os = "ios"))] EOPNOTSUPP => "Operation not supported on socket", #[cfg(any(target_os = "macos", target_os = "ios"))] ENOPOLICY => "No such policy registered", #[cfg(any(target_os = "macos", target_os = "ios"))] EQFULL => "Interface output queue is full", #[cfg(target_os = "openbsd")] EOPNOTSUPP => "Operation not supported", #[cfg(target_os = "openbsd")] EIPSEC => "IPsec processing failure", #[cfg(target_os = "dragonfly")] EUNUSED94 | EUNUSED95 | EUNUSED96 | EUNUSED97 | EUNUSED98 => "Unused", #[cfg(target_os = "dragonfly")] EASYNC => "Async", } } #[cfg(any(target_os = "linux", target_os = "android"))] mod consts { use libc; #[derive(Clone, Copy, Debug, Eq, PartialEq)] #[repr(i32)] pub enum Errno { UnknownErrno = 0, EPERM = libc::EPERM, ENOENT = libc::ENOENT, ESRCH = libc::ESRCH, EINTR = libc::EINTR, EIO = libc::EIO, ENXIO = libc::ENXIO, E2BIG = libc::E2BIG, ENOEXEC = libc::ENOEXEC, EBADF = libc::EBADF, ECHILD = libc::ECHILD, EAGAIN = libc::EAGAIN, ENOMEM = libc::ENOMEM, EACCES = libc::EACCES, EFAULT = libc::EFAULT, ENOTBLK = libc::ENOTBLK, EBUSY = libc::EBUSY, EEXIST = libc::EEXIST, EXDEV = libc::EXDEV, ENODEV = libc::ENODEV, ENOTDIR = libc::ENOTDIR, EISDIR = libc::EISDIR, EINVAL = libc::EINVAL, ENFILE = libc::ENFILE, EMFILE = libc::EMFILE, ENOTTY = libc::ENOTTY, ETXTBSY = libc::ETXTBSY, EFBIG = libc::EFBIG, ENOSPC = libc::ENOSPC, ESPIPE = libc::ESPIPE, EROFS = libc::EROFS, EMLINK = libc::EMLINK, EPIPE = libc::EPIPE, EDOM = libc::EDOM, ERANGE = libc::ERANGE, EDEADLK = libc::EDEADLK, ENAMETOOLONG = libc::ENAMETOOLONG, ENOLCK = libc::ENOLCK, ENOSYS = libc::ENOSYS, ENOTEMPTY = libc::ENOTEMPTY, ELOOP = libc::ELOOP, ENOMSG = libc::ENOMSG, EIDRM = libc::EIDRM, ECHRNG = libc::ECHRNG, EL2NSYNC = libc::EL2NSYNC, EL3HLT = libc::EL3HLT, EL3RST = libc::EL3RST, ELNRNG = libc::ELNRNG, EUNATCH = libc::EUNATCH, ENOCSI = libc::ENOCSI, EL2HLT = libc::EL2HLT, EBADE = libc::EBADE, EBADR = libc::EBADR, EXFULL = libc::EXFULL, ENOANO = libc::ENOANO, EBADRQC = libc::EBADRQC, EBADSLT = libc::EBADSLT, EBFONT = libc::EBFONT, ENOSTR = libc::ENOSTR, ENODATA = libc::ENODATA, ETIME = libc::ETIME, ENOSR = libc::ENOSR, ENONET = libc::ENONET, ENOPKG = libc::ENOPKG, EREMOTE = libc::EREMOTE, ENOLINK = libc::ENOLINK, EADV = libc::EADV, ESRMNT = libc::ESRMNT, ECOMM = libc::ECOMM, EPROTO = libc::EPROTO, EMULTIHOP = libc::EMULTIHOP, EDOTDOT = libc::EDOTDOT, EBADMSG = libc::EBADMSG, EOVERFLOW = libc::EOVERFLOW, ENOTUNIQ = libc::ENOTUNIQ, EBADFD = libc::EBADFD, EREMCHG = libc::EREMCHG, ELIBACC = libc::ELIBACC, ELIBBAD = libc::ELIBBAD, ELIBSCN = libc::ELIBSCN, ELIBMAX = libc::ELIBMAX, ELIBEXEC = libc::ELIBEXEC, EILSEQ = libc::EILSEQ, ERESTART = libc::ERESTART, ESTRPIPE = libc::ESTRPIPE, EUSERS = libc::EUSERS, ENOTSOCK = libc::ENOTSOCK, EDESTADDRREQ = libc::EDESTADDRREQ, EMSGSIZE = libc::EMSGSIZE, EPROTOTYPE = libc::EPROTOTYPE, ENOPROTOOPT = libc::ENOPROTOOPT, EPROTONOSUPPORT = libc::EPROTONOSUPPORT, ESOCKTNOSUPPORT = libc::ESOCKTNOSUPPORT, EOPNOTSUPP = libc::EOPNOTSUPP, EPFNOSUPPORT = libc::EPFNOSUPPORT, EAFNOSUPPORT = libc::EAFNOSUPPORT, EADDRINUSE = libc::EADDRINUSE, EADDRNOTAVAIL = libc::EADDRNOTAVAIL, ENETDOWN = libc::ENETDOWN, ENETUNREACH = libc::ENETUNREACH, ENETRESET = libc::ENETRESET, ECONNABORTED = libc::ECONNABORTED, ECONNRESET = libc::ECONNRESET, ENOBUFS = libc::ENOBUFS, EISCONN = libc::EISCONN, ENOTCONN = libc::ENOTCONN, ESHUTDOWN = libc::ESHUTDOWN, ETOOMANYREFS = libc::ETOOMANYREFS, ETIMEDOUT = libc::ETIMEDOUT, ECONNREFUSED = libc::ECONNREFUSED, EHOSTDOWN = libc::EHOSTDOWN, EHOSTUNREACH = libc::EHOSTUNREACH, EALREADY = libc::EALREADY, EINPROGRESS = libc::EINPROGRESS, ESTALE = libc::ESTALE, EUCLEAN = libc::EUCLEAN, ENOTNAM = libc::ENOTNAM, ENAVAIL = libc::ENAVAIL, EISNAM = libc::EISNAM, EREMOTEIO = libc::EREMOTEIO, EDQUOT = libc::EDQUOT, ENOMEDIUM = libc::ENOMEDIUM, EMEDIUMTYPE = libc::EMEDIUMTYPE, ECANCELED = libc::ECANCELED, ENOKEY = libc::ENOKEY, EKEYEXPIRED = libc::EKEYEXPIRED, EKEYREVOKED = libc::EKEYREVOKED, EKEYREJECTED = libc::EKEYREJECTED, EOWNERDEAD = libc::EOWNERDEAD, ENOTRECOVERABLE = libc::ENOTRECOVERABLE, #[cfg(not(any(target_os = "android", target_arch="mips")))] ERFKILL = libc::ERFKILL, #[cfg(not(any(target_os = "android", target_arch="mips")))] EHWPOISON = libc::EHWPOISON, } pub const EWOULDBLOCK: Errno = Errno::EAGAIN; pub const EDEADLOCK: Errno = Errno::EDEADLK; pub fn from_i32(e: i32) -> Errno { use self::Errno::*; match e { libc::EPERM => EPERM, libc::ENOENT => ENOENT, libc::ESRCH => ESRCH, libc::EINTR => EINTR, libc::EIO => EIO, libc::ENXIO => ENXIO, libc::E2BIG => E2BIG, libc::ENOEXEC => ENOEXEC, libc::EBADF => EBADF, libc::ECHILD => ECHILD, libc::EAGAIN => EAGAIN, libc::ENOMEM => ENOMEM, libc::EACCES => EACCES, libc::EFAULT => EFAULT, libc::ENOTBLK => ENOTBLK, libc::EBUSY => EBUSY, libc::EEXIST => EEXIST, libc::EXDEV => EXDEV, libc::ENODEV => ENODEV, libc::ENOTDIR => ENOTDIR, libc::EISDIR => EISDIR, libc::EINVAL => EINVAL, libc::ENFILE => ENFILE, libc::EMFILE => EMFILE, libc::ENOTTY => ENOTTY, libc::ETXTBSY => ETXTBSY, libc::EFBIG => EFBIG, libc::ENOSPC => ENOSPC, libc::ESPIPE => ESPIPE, libc::EROFS => EROFS, libc::EMLINK => EMLINK, libc::EPIPE => EPIPE, libc::EDOM => EDOM, libc::ERANGE => ERANGE, libc::EDEADLK => EDEADLK, libc::ENAMETOOLONG => ENAMETOOLONG, libc::ENOLCK => ENOLCK, libc::ENOSYS => ENOSYS, libc::ENOTEMPTY => ENOTEMPTY, libc::ELOOP => ELOOP, libc::ENOMSG => ENOMSG, libc::EIDRM => EIDRM, libc::ECHRNG => ECHRNG, libc::EL2NSYNC => EL2NSYNC, libc::EL3HLT => EL3HLT, libc::EL3RST => EL3RST, libc::ELNRNG => ELNRNG, libc::EUNATCH => EUNATCH, libc::ENOCSI => ENOCSI, libc::EL2HLT => EL2HLT, libc::EBADE => EBADE, libc::EBADR => EBADR, libc::EXFULL => EXFULL, libc::ENOANO => ENOANO, libc::EBADRQC => EBADRQC, libc::EBADSLT => EBADSLT, libc::EBFONT => EBFONT, libc::ENOSTR => ENOSTR, libc::ENODATA => ENODATA, libc::ETIME => ETIME, libc::ENOSR => ENOSR, libc::ENONET => ENONET, libc::ENOPKG => ENOPKG, libc::EREMOTE => EREMOTE, libc::ENOLINK => ENOLINK, libc::EADV => EADV, libc::ESRMNT => ESRMNT, libc::ECOMM => ECOMM, libc::EPROTO => EPROTO, libc::EMULTIHOP => EMULTIHOP, libc::EDOTDOT => EDOTDOT, libc::EBADMSG => EBADMSG, libc::EOVERFLOW => EOVERFLOW, libc::ENOTUNIQ => ENOTUNIQ, libc::EBADFD => EBADFD, libc::EREMCHG => EREMCHG, libc::ELIBACC => ELIBACC, libc::ELIBBAD => ELIBBAD, libc::ELIBSCN => ELIBSCN, libc::ELIBMAX => ELIBMAX, libc::ELIBEXEC => ELIBEXEC, libc::EILSEQ => EILSEQ, libc::ERESTART => ERESTART, libc::ESTRPIPE => ESTRPIPE, libc::EUSERS => EUSERS, libc::ENOTSOCK => ENOTSOCK, libc::EDESTADDRREQ => EDESTADDRREQ, libc::EMSGSIZE => EMSGSIZE, libc::EPROTOTYPE => EPROTOTYPE, libc::ENOPROTOOPT => ENOPROTOOPT, libc::EPROTONOSUPPORT => EPROTONOSUPPORT, libc::ESOCKTNOSUPPORT => ESOCKTNOSUPPORT, libc::EOPNOTSUPP => EOPNOTSUPP, libc::EPFNOSUPPORT => EPFNOSUPPORT, libc::EAFNOSUPPORT => EAFNOSUPPORT, libc::EADDRINUSE => EADDRINUSE, libc::EADDRNOTAVAIL => EADDRNOTAVAIL, libc::ENETDOWN => ENETDOWN, libc::ENETUNREACH => ENETUNREACH, libc::ENETRESET => ENETRESET, libc::ECONNABORTED => ECONNABORTED, libc::ECONNRESET => ECONNRESET, libc::ENOBUFS => ENOBUFS, libc::EISCONN => EISCONN, libc::ENOTCONN => ENOTCONN, libc::ESHUTDOWN => ESHUTDOWN, libc::ETOOMANYREFS => ETOOMANYREFS, libc::ETIMEDOUT => ETIMEDOUT, libc::ECONNREFUSED => ECONNREFUSED, libc::EHOSTDOWN => EHOSTDOWN, libc::EHOSTUNREACH => EHOSTUNREACH, libc::EALREADY => EALREADY, libc::EINPROGRESS => EINPROGRESS, libc::ESTALE => ESTALE, libc::EUCLEAN => EUCLEAN, libc::ENOTNAM => ENOTNAM, libc::ENAVAIL => ENAVAIL, libc::EISNAM => EISNAM, libc::EREMOTEIO => EREMOTEIO, libc::EDQUOT => EDQUOT, libc::ENOMEDIUM => ENOMEDIUM, libc::EMEDIUMTYPE => EMEDIUMTYPE, libc::ECANCELED => ECANCELED, libc::ENOKEY => ENOKEY, libc::EKEYEXPIRED => EKEYEXPIRED, libc::EKEYREVOKED => EKEYREVOKED, libc::EKEYREJECTED => EKEYREJECTED, libc::EOWNERDEAD => EOWNERDEAD, libc::ENOTRECOVERABLE => ENOTRECOVERABLE, #[cfg(not(any(target_os = "android", target_arch="mips")))] libc::ERFKILL => ERFKILL, #[cfg(not(any(target_os = "android", target_arch="mips")))] libc::EHWPOISON => EHWPOISON, _ => UnknownErrno, } } } #[cfg(any(target_os = "macos", target_os = "ios"))] mod consts { use libc; #[derive(Clone, Copy, Debug, Eq, PartialEq)] #[repr(i32)] pub enum Errno { UnknownErrno = 0, EPERM = libc::EPERM, ENOENT = libc::ENOENT, ESRCH = libc::ESRCH, EINTR = libc::EINTR, EIO = libc::EIO, ENXIO = libc::ENXIO, E2BIG = libc::E2BIG, ENOEXEC = libc::ENOEXEC, EBADF = libc::EBADF, ECHILD = libc::ECHILD, EDEADLK = libc::EDEADLK, ENOMEM = libc::ENOMEM, EACCES = libc::EACCES, EFAULT = libc::EFAULT, ENOTBLK = libc::ENOTBLK, EBUSY = libc::EBUSY, EEXIST = libc::EEXIST, EXDEV = libc::EXDEV, ENODEV = libc::ENODEV, ENOTDIR = libc::ENOTDIR, EISDIR = libc::EISDIR, EINVAL = libc::EINVAL, ENFILE = libc::ENFILE, EMFILE = libc::EMFILE, ENOTTY = libc::ENOTTY, ETXTBSY = libc::ETXTBSY, EFBIG = libc::EFBIG, ENOSPC = libc::ENOSPC, ESPIPE = libc::ESPIPE, EROFS = libc::EROFS, EMLINK = libc::EMLINK, EPIPE = libc::EPIPE, EDOM = libc::EDOM, ERANGE = libc::ERANGE, EAGAIN = libc::EAGAIN, EINPROGRESS = libc::EINPROGRESS, EALREADY = libc::EALREADY, ENOTSOCK = libc::ENOTSOCK, EDESTADDRREQ = libc::EDESTADDRREQ, EMSGSIZE = libc::EMSGSIZE, EPROTOTYPE = libc::EPROTOTYPE, ENOPROTOOPT = libc::ENOPROTOOPT, EPROTONOSUPPORT = libc::EPROTONOSUPPORT, ESOCKTNOSUPPORT = libc::ESOCKTNOSUPPORT, ENOTSUP = libc::ENOTSUP, EPFNOSUPPORT = libc::EPFNOSUPPORT, EAFNOSUPPORT = libc::EAFNOSUPPORT, EADDRINUSE = libc::EADDRINUSE, EADDRNOTAVAIL = libc::EADDRNOTAVAIL, ENETDOWN = libc::ENETDOWN, ENETUNREACH = libc::ENETUNREACH, ENETRESET = libc::ENETRESET, ECONNABORTED = libc::ECONNABORTED, ECONNRESET = libc::ECONNRESET, ENOBUFS = libc::ENOBUFS, EISCONN = libc::EISCONN, ENOTCONN = libc::ENOTCONN, ESHUTDOWN = libc::ESHUTDOWN, ETOOMANYREFS = libc::ETOOMANYREFS, ETIMEDOUT = libc::ETIMEDOUT, ECONNREFUSED = libc::ECONNREFUSED, ELOOP = libc::ELOOP, ENAMETOOLONG = libc::ENAMETOOLONG, EHOSTDOWN = libc::EHOSTDOWN, EHOSTUNREACH = libc::EHOSTUNREACH, ENOTEMPTY = libc::ENOTEMPTY, EPROCLIM = libc::EPROCLIM, EUSERS = libc::EUSERS, EDQUOT = libc::EDQUOT, ESTALE = libc::ESTALE, EREMOTE = libc::EREMOTE, EBADRPC = libc::EBADRPC, ERPCMISMATCH = libc::ERPCMISMATCH, EPROGUNAVAIL = libc::EPROGUNAVAIL, EPROGMISMATCH = libc::EPROGMISMATCH, EPROCUNAVAIL = libc::EPROCUNAVAIL, ENOLCK = libc::ENOLCK, ENOSYS = libc::ENOSYS, EFTYPE = libc::EFTYPE, EAUTH = libc::EAUTH, ENEEDAUTH = libc::ENEEDAUTH, EPWROFF = libc::EPWROFF, EDEVERR = libc::EDEVERR, EOVERFLOW = libc::EOVERFLOW, EBADEXEC = libc::EBADEXEC, EBADARCH = libc::EBADARCH, ESHLIBVERS = libc::ESHLIBVERS, EBADMACHO = libc::EBADMACHO, ECANCELED = libc::ECANCELED, EIDRM = libc::EIDRM, ENOMSG = libc::ENOMSG, EILSEQ = libc::EILSEQ, ENOATTR = libc::ENOATTR, EBADMSG = libc::EBADMSG, EMULTIHOP = libc::EMULTIHOP, ENODATA = libc::ENODATA, ENOLINK = libc::ENOLINK, ENOSR = libc::ENOSR, ENOSTR = libc::ENOSTR, EPROTO = libc::EPROTO, ETIME = libc::ETIME, EOPNOTSUPP = libc::EOPNOTSUPP, ENOPOLICY = libc::ENOPOLICY, ENOTRECOVERABLE = libc::ENOTRECOVERABLE, EOWNERDEAD = libc::EOWNERDEAD, EQFULL = libc::EQFULL, } pub const ELAST: Errno = Errno::EQFULL; pub const EWOULDBLOCK: Errno = Errno::EAGAIN; pub const EDEADLOCK: Errno = Errno::EDEADLK; pub const EL2NSYNC: Errno = Errno::UnknownErrno; pub fn from_i32(e: i32) -> Errno { use self::Errno::*; match e { libc::EPERM => EPERM, libc::ENOENT => ENOENT, libc::ESRCH => ESRCH, libc::EINTR => EINTR, libc::EIO => EIO, libc::ENXIO => ENXIO, libc::E2BIG => E2BIG, libc::ENOEXEC => ENOEXEC, libc::EBADF => EBADF, libc::ECHILD => ECHILD, libc::EDEADLK => EDEADLK, libc::ENOMEM => ENOMEM, libc::EACCES => EACCES, libc::EFAULT => EFAULT, libc::ENOTBLK => ENOTBLK, libc::EBUSY => EBUSY, libc::EEXIST => EEXIST, libc::EXDEV => EXDEV, libc::ENODEV => ENODEV, libc::ENOTDIR => ENOTDIR, libc::EISDIR => EISDIR, libc::EINVAL => EINVAL, libc::ENFILE => ENFILE, libc::EMFILE => EMFILE, libc::ENOTTY => ENOTTY, libc::ETXTBSY => ETXTBSY, libc::EFBIG => EFBIG, libc::ENOSPC => ENOSPC, libc::ESPIPE => ESPIPE, libc::EROFS => EROFS, libc::EMLINK => EMLINK, libc::EPIPE => EPIPE, libc::EDOM => EDOM, libc::ERANGE => ERANGE, libc::EAGAIN => EAGAIN, libc::EINPROGRESS => EINPROGRESS, libc::EALREADY => EALREADY, libc::ENOTSOCK => ENOTSOCK, libc::EDESTADDRREQ => EDESTADDRREQ, libc::EMSGSIZE => EMSGSIZE, libc::EPROTOTYPE => EPROTOTYPE, libc::ENOPROTOOPT => ENOPROTOOPT, libc::EPROTONOSUPPORT => EPROTONOSUPPORT, libc::ESOCKTNOSUPPORT => ESOCKTNOSUPPORT, libc::ENOTSUP => ENOTSUP, libc::EPFNOSUPPORT => EPFNOSUPPORT, libc::EAFNOSUPPORT => EAFNOSUPPORT, libc::EADDRINUSE => EADDRINUSE, libc::EADDRNOTAVAIL => EADDRNOTAVAIL, libc::ENETDOWN => ENETDOWN, libc::ENETUNREACH => ENETUNREACH, libc::ENETRESET => ENETRESET, libc::ECONNABORTED => ECONNABORTED, libc::ECONNRESET => ECONNRESET, libc::ENOBUFS => ENOBUFS, libc::EISCONN => EISCONN, libc::ENOTCONN => ENOTCONN, libc::ESHUTDOWN => ESHUTDOWN, libc::ETOOMANYREFS => ETOOMANYREFS, libc::ETIMEDOUT => ETIMEDOUT, libc::ECONNREFUSED => ECONNREFUSED, libc::ELOOP => ELOOP, libc::ENAMETOOLONG => ENAMETOOLONG, libc::EHOSTDOWN => EHOSTDOWN, libc::EHOSTUNREACH => EHOSTUNREACH, libc::ENOTEMPTY => ENOTEMPTY, libc::EPROCLIM => EPROCLIM, libc::EUSERS => EUSERS, libc::EDQUOT => EDQUOT, libc::ESTALE => ESTALE, libc::EREMOTE => EREMOTE, libc::EBADRPC => EBADRPC, libc::ERPCMISMATCH => ERPCMISMATCH, libc::EPROGUNAVAIL => EPROGUNAVAIL, libc::EPROGMISMATCH => EPROGMISMATCH, libc::EPROCUNAVAIL => EPROCUNAVAIL, libc::ENOLCK => ENOLCK, libc::ENOSYS => ENOSYS, libc::EFTYPE => EFTYPE, libc::EAUTH => EAUTH, libc::ENEEDAUTH => ENEEDAUTH, libc::EPWROFF => EPWROFF, libc::EDEVERR => EDEVERR, libc::EOVERFLOW => EOVERFLOW, libc::EBADEXEC => EBADEXEC, libc::EBADARCH => EBADARCH, libc::ESHLIBVERS => ESHLIBVERS, libc::EBADMACHO => EBADMACHO, libc::ECANCELED => ECANCELED, libc::EIDRM => EIDRM, libc::ENOMSG => ENOMSG, libc::EILSEQ => EILSEQ, libc::ENOATTR => ENOATTR, libc::EBADMSG => EBADMSG, libc::EMULTIHOP => EMULTIHOP, libc::ENODATA => ENODATA, libc::ENOLINK => ENOLINK, libc::ENOSR => ENOSR, libc::ENOSTR => ENOSTR, libc::EPROTO => EPROTO, libc::ETIME => ETIME, libc::EOPNOTSUPP => EOPNOTSUPP, libc::ENOPOLICY => ENOPOLICY, libc::ENOTRECOVERABLE => ENOTRECOVERABLE, libc::EOWNERDEAD => EOWNERDEAD, libc::EQFULL => EQFULL, _ => UnknownErrno, } } } #[cfg(target_os = "freebsd")] mod consts { use libc; #[derive(Clone, Copy, Debug, Eq, PartialEq)] #[repr(i32)] pub enum Errno { UnknownErrno = 0, EPERM = libc::EPERM, ENOENT = libc::ENOENT, ESRCH = libc::ESRCH, EINTR = libc::EINTR, EIO = libc::EIO, ENXIO = libc::ENXIO, E2BIG = libc::E2BIG, ENOEXEC = libc::ENOEXEC, EBADF = libc::EBADF, ECHILD = libc::ECHILD, EDEADLK = libc::EDEADLK, ENOMEM = libc::ENOMEM, EACCES = libc::EACCES, EFAULT = libc::EFAULT, ENOTBLK = libc::ENOTBLK, EBUSY = libc::EBUSY, EEXIST = libc::EEXIST, EXDEV = libc::EXDEV, ENODEV = libc::ENODEV, ENOTDIR = libc::ENOTDIR, EISDIR = libc::EISDIR, EINVAL = libc::EINVAL, ENFILE = libc::ENFILE, EMFILE = libc::EMFILE, ENOTTY = libc::ENOTTY, ETXTBSY = libc::ETXTBSY, EFBIG = libc::EFBIG, ENOSPC = libc::ENOSPC, ESPIPE = libc::ESPIPE, EROFS = libc::EROFS, EMLINK = libc::EMLINK, EPIPE = libc::EPIPE, EDOM = libc::EDOM, ERANGE = libc::ERANGE, EAGAIN = libc::EAGAIN, EINPROGRESS = libc::EINPROGRESS, EALREADY = libc::EALREADY, ENOTSOCK = libc::ENOTSOCK, EDESTADDRREQ = libc::EDESTADDRREQ, EMSGSIZE = libc::EMSGSIZE, EPROTOTYPE = libc::EPROTOTYPE, ENOPROTOOPT = libc::ENOPROTOOPT, EPROTONOSUPPORT = libc::EPROTONOSUPPORT, ESOCKTNOSUPPORT = libc::ESOCKTNOSUPPORT, ENOTSUP = libc::ENOTSUP, EPFNOSUPPORT = libc::EPFNOSUPPORT, EAFNOSUPPORT = libc::EAFNOSUPPORT, EADDRINUSE = libc::EADDRINUSE, EADDRNOTAVAIL = libc::EADDRNOTAVAIL, ENETDOWN = libc::ENETDOWN, ENETUNREACH = libc::ENETUNREACH, ENETRESET = libc::ENETRESET, ECONNABORTED = libc::ECONNABORTED, ECONNRESET = libc::ECONNRESET, ENOBUFS = libc::ENOBUFS, EISCONN = libc::EISCONN, ENOTCONN = libc::ENOTCONN, ESHUTDOWN = libc::ESHUTDOWN, ETOOMANYREFS = libc::ETOOMANYREFS, ETIMEDOUT = libc::ETIMEDOUT, ECONNREFUSED = libc::ECONNREFUSED, ELOOP = libc::ELOOP, ENAMETOOLONG = libc::ENAMETOOLONG, EHOSTDOWN = libc::EHOSTDOWN, EHOSTUNREACH = libc::EHOSTUNREACH, ENOTEMPTY = libc::ENOTEMPTY, EPROCLIM = libc::EPROCLIM, EUSERS = libc::EUSERS, EDQUOT = libc::EDQUOT, ESTALE = libc::ESTALE, EREMOTE = libc::EREMOTE, EBADRPC = libc::EBADRPC, ERPCMISMATCH = libc::ERPCMISMATCH, EPROGUNAVAIL = libc::EPROGUNAVAIL, EPROGMISMATCH = libc::EPROGMISMATCH, EPROCUNAVAIL = libc::EPROCUNAVAIL, ENOLCK = libc::ENOLCK, ENOSYS = libc::ENOSYS, EFTYPE = libc::EFTYPE, EAUTH = libc::EAUTH, ENEEDAUTH = libc::ENEEDAUTH, EIDRM = libc::EIDRM, ENOMSG = libc::ENOMSG, EOVERFLOW = libc::EOVERFLOW, ECANCELED = libc::ECANCELED, EILSEQ = libc::EILSEQ, ENOATTR = libc::ENOATTR, EDOOFUS = libc::EDOOFUS, EBADMSG = libc::EBADMSG, EMULTIHOP = libc::EMULTIHOP, ENOLINK = libc::ENOLINK, EPROTO = libc::EPROTO, ENOTCAPABLE = libc::ENOTCAPABLE, ECAPMODE = libc::ECAPMODE, ENOTRECOVERABLE = libc::ENOTRECOVERABLE, EOWNERDEAD = libc::EOWNERDEAD, } pub const ELAST: Errno = Errno::EOWNERDEAD; pub const EWOULDBLOCK: Errno = Errno::EAGAIN; pub const EDEADLOCK: Errno = Errno::EDEADLK; pub const EL2NSYNC: Errno = Errno::UnknownErrno; pub fn from_i32(e: i32) -> Errno { use self::Errno::*; match e { libc::EPERM => EPERM, libc::ENOENT => ENOENT, libc::ESRCH => ESRCH, libc::EINTR => EINTR, libc::EIO => EIO, libc::ENXIO => ENXIO, libc::E2BIG => E2BIG, libc::ENOEXEC => ENOEXEC, libc::EBADF => EBADF, libc::ECHILD => ECHILD, libc::EDEADLK => EDEADLK, libc::ENOMEM => ENOMEM, libc::EACCES => EACCES, libc::EFAULT => EFAULT, libc::ENOTBLK => ENOTBLK, libc::EBUSY => EBUSY, libc::EEXIST => EEXIST, libc::EXDEV => EXDEV, libc::ENODEV => ENODEV, libc::ENOTDIR => ENOTDIR, libc::EISDIR => EISDIR, libc::EINVAL => EINVAL, libc::ENFILE => ENFILE, libc::EMFILE => EMFILE, libc::ENOTTY => ENOTTY, libc::ETXTBSY => ETXTBSY, libc::EFBIG => EFBIG, libc::ENOSPC => ENOSPC, libc::ESPIPE => ESPIPE, libc::EROFS => EROFS, libc::EMLINK => EMLINK, libc::EPIPE => EPIPE, libc::EDOM => EDOM, libc::ERANGE => ERANGE, libc::EAGAIN => EAGAIN, libc::EINPROGRESS => EINPROGRESS, libc::EALREADY => EALREADY, libc::ENOTSOCK => ENOTSOCK, libc::EDESTADDRREQ => EDESTADDRREQ, libc::EMSGSIZE => EMSGSIZE, libc::EPROTOTYPE => EPROTOTYPE, libc::ENOPROTOOPT => ENOPROTOOPT, libc::EPROTONOSUPPORT => EPROTONOSUPPORT, libc::ESOCKTNOSUPPORT => ESOCKTNOSUPPORT, libc::ENOTSUP => ENOTSUP, libc::EPFNOSUPPORT => EPFNOSUPPORT, libc::EAFNOSUPPORT => EAFNOSUPPORT, libc::EADDRINUSE => EADDRINUSE, libc::EADDRNOTAVAIL => EADDRNOTAVAIL, libc::ENETDOWN => ENETDOWN, libc::ENETUNREACH => ENETUNREACH, libc::ENETRESET => ENETRESET, libc::ECONNABORTED => ECONNABORTED, libc::ECONNRESET => ECONNRESET, libc::ENOBUFS => ENOBUFS, libc::EISCONN => EISCONN, libc::ENOTCONN => ENOTCONN, libc::ESHUTDOWN => ESHUTDOWN, libc::ETOOMANYREFS => ETOOMANYREFS, libc::ETIMEDOUT => ETIMEDOUT, libc::ECONNREFUSED => ECONNREFUSED, libc::ELOOP => ELOOP, libc::ENAMETOOLONG => ENAMETOOLONG, libc::EHOSTDOWN => EHOSTDOWN, libc::EHOSTUNREACH => EHOSTUNREACH, libc::ENOTEMPTY => ENOTEMPTY, libc::EPROCLIM => EPROCLIM, libc::EUSERS => EUSERS, libc::EDQUOT => EDQUOT, libc::ESTALE => ESTALE, libc::EREMOTE => EREMOTE, libc::EBADRPC => EBADRPC, libc::ERPCMISMATCH => ERPCMISMATCH, libc::EPROGUNAVAIL => EPROGUNAVAIL, libc::EPROGMISMATCH => EPROGMISMATCH, libc::EPROCUNAVAIL => EPROCUNAVAIL, libc::ENOLCK => ENOLCK, libc::ENOSYS => ENOSYS, libc::EFTYPE => EFTYPE, libc::EAUTH => EAUTH, libc::ENEEDAUTH => ENEEDAUTH, libc::EIDRM => EIDRM, libc::ENOMSG => ENOMSG, libc::EOVERFLOW => EOVERFLOW, libc::ECANCELED => ECANCELED, libc::EILSEQ => EILSEQ, libc::ENOATTR => ENOATTR, libc::EDOOFUS => EDOOFUS, libc::EBADMSG => EBADMSG, libc::EMULTIHOP => EMULTIHOP, libc::ENOLINK => ENOLINK, libc::EPROTO => EPROTO, libc::ENOTCAPABLE => ENOTCAPABLE, libc::ECAPMODE => ECAPMODE, libc::ENOTRECOVERABLE => ENOTRECOVERABLE, libc::EOWNERDEAD => EOWNERDEAD, _ => UnknownErrno, } } } #[cfg(target_os = "dragonfly")] mod consts { use libc; #[derive(Clone, Copy, Debug, Eq, PartialEq)] #[repr(i32)] pub enum Errno { UnknownErrno = 0, EPERM = libc::EPERM, ENOENT = libc::ENOENT, ESRCH = libc::ESRCH, EINTR = libc::EINTR, EIO = libc::EIO, ENXIO = libc::ENXIO, E2BIG = libc::E2BIG, ENOEXEC = libc::ENOEXEC, EBADF = libc::EBADF, ECHILD = libc::ECHILD, EDEADLK = libc::EDEADLK, ENOMEM = libc::ENOMEM, EACCES = libc::EACCES, EFAULT = libc::EFAULT, ENOTBLK = libc::ENOTBLK, EBUSY = libc::EBUSY, EEXIST = libc::EEXIST, EXDEV = libc::EXDEV, ENODEV = libc::ENODEV, ENOTDIR = libc::ENOTDIR, EISDIR = libc::EISDIR, EINVAL = libc::EINVAL, ENFILE = libc::ENFILE, EMFILE = libc::EMFILE, ENOTTY = libc::ENOTTY, ETXTBSY = libc::ETXTBSY, EFBIG = libc::EFBIG, ENOSPC = libc::ENOSPC, ESPIPE = libc::ESPIPE, EROFS = libc::EROFS, EMLINK = libc::EMLINK, EPIPE = libc::EPIPE, EDOM = libc::EDOM, ERANGE = libc::ERANGE, EAGAIN = libc::EAGAIN, EINPROGRESS = libc::EINPROGRESS, EALREADY = libc::EALREADY, ENOTSOCK = libc::ENOTSOCK, EDESTADDRREQ = libc::EDESTADDRREQ, EMSGSIZE = libc::EMSGSIZE, EPROTOTYPE = libc::EPROTOTYPE, ENOPROTOOPT = libc::ENOPROTOOPT, EPROTONOSUPPORT = libc::EPROTONOSUPPORT, ESOCKTNOSUPPORT = libc::ESOCKTNOSUPPORT, ENOTSUP = libc::ENOTSUP, EPFNOSUPPORT = libc::EPFNOSUPPORT, EAFNOSUPPORT = libc::EAFNOSUPPORT, EADDRINUSE = libc::EADDRINUSE, EADDRNOTAVAIL = libc::EADDRNOTAVAIL, ENETDOWN = libc::ENETDOWN, ENETUNREACH = libc::ENETUNREACH, ENETRESET = libc::ENETRESET, ECONNABORTED = libc::ECONNABORTED, ECONNRESET = libc::ECONNRESET, ENOBUFS = libc::ENOBUFS, EISCONN = libc::EISCONN, ENOTCONN = libc::ENOTCONN, ESHUTDOWN = libc::ESHUTDOWN, ETOOMANYREFS = libc::ETOOMANYREFS, ETIMEDOUT = libc::ETIMEDOUT, ECONNREFUSED = libc::ECONNREFUSED, ELOOP = libc::ELOOP, ENAMETOOLONG = libc::ENAMETOOLONG, EHOSTDOWN = libc::EHOSTDOWN, EHOSTUNREACH = libc::EHOSTUNREACH, ENOTEMPTY = libc::ENOTEMPTY, EPROCLIM = libc::EPROCLIM, EUSERS = libc::EUSERS, EDQUOT = libc::EDQUOT, ESTALE = libc::ESTALE, EREMOTE = libc::EREMOTE, EBADRPC = libc::EBADRPC, ERPCMISMATCH = libc::ERPCMISMATCH, EPROGUNAVAIL = libc::EPROGUNAVAIL, EPROGMISMATCH = libc::EPROGMISMATCH, EPROCUNAVAIL = libc::EPROCUNAVAIL, ENOLCK = libc::ENOLCK, ENOSYS = libc::ENOSYS, EFTYPE = libc::EFTYPE, EAUTH = libc::EAUTH, ENEEDAUTH = libc::ENEEDAUTH, EIDRM = libc::EIDRM, ENOMSG = libc::ENOMSG, EOVERFLOW = libc::EOVERFLOW, ECANCELED = libc::ECANCELED, EILSEQ = libc::EILSEQ, ENOATTR = libc::ENOATTR, EDOOFUS = libc::EDOOFUS, EBADMSG = libc::EBADMSG, EMULTIHOP = libc::EMULTIHOP, ENOLINK = libc::ENOLINK, EPROTO = libc::EPROTO, ENOMEDIUM = libc::ENOMEDIUM, EUNUSED94 = libc::EUNUSED94, EUNUSED95 = libc::EUNUSED95, EUNUSED96 = libc::EUNUSED96, EUNUSED97 = libc::EUNUSED97, EUNUSED98 = libc::EUNUSED98, EASYNC = libc::EASYNC, } pub const ELAST: Errno = Errno::EASYNC; pub const EWOULDBLOCK: Errno = Errno::EAGAIN; pub const EDEADLOCK: Errno = Errno::EDEADLK; pub const EOPNOTSUPP: Errno = Errno::ENOTSUP; pub const EL2NSYNC: Errno = Errno::UnknownErrno; pub fn from_i32(e: i32) -> Errno { use self::Errno::*; match e { libc::EPERM => EPERM, libc::ENOENT => ENOENT, libc::ESRCH => ESRCH, libc::EINTR => EINTR, libc::EIO => EIO, libc::ENXIO => ENXIO, libc::E2BIG => E2BIG, libc::ENOEXEC => ENOEXEC, libc::EBADF => EBADF, libc::ECHILD => ECHILD, libc::EDEADLK => EDEADLK, libc::ENOMEM => ENOMEM, libc::EACCES => EACCES, libc::EFAULT => EFAULT, libc::ENOTBLK => ENOTBLK, libc::EBUSY => EBUSY, libc::EEXIST => EEXIST, libc::EXDEV => EXDEV, libc::ENODEV => ENODEV, libc::ENOTDIR => ENOTDIR, libc::EISDIR=> EISDIR, libc::EINVAL => EINVAL, libc::ENFILE => ENFILE, libc::EMFILE => EMFILE, libc::ENOTTY => ENOTTY, libc::ETXTBSY => ETXTBSY, libc::EFBIG => EFBIG, libc::ENOSPC => ENOSPC, libc::ESPIPE => ESPIPE, libc::EROFS => EROFS, libc::EMLINK => EMLINK, libc::EPIPE => EPIPE, libc::EDOM => EDOM, libc::ERANGE => ERANGE, libc::EAGAIN => EAGAIN, libc::EINPROGRESS => EINPROGRESS, libc::EALREADY => EALREADY, libc::ENOTSOCK => ENOTSOCK, libc::EDESTADDRREQ => EDESTADDRREQ, libc::EMSGSIZE => EMSGSIZE, libc::EPROTOTYPE => EPROTOTYPE, libc::ENOPROTOOPT => ENOPROTOOPT, libc::EPROTONOSUPPORT => EPROTONOSUPPORT, libc::ESOCKTNOSUPPORT => ESOCKTNOSUPPORT, libc::ENOTSUP => ENOTSUP, libc::EPFNOSUPPORT => EPFNOSUPPORT, libc::EAFNOSUPPORT => EAFNOSUPPORT, libc::EADDRINUSE => EADDRINUSE, libc::EADDRNOTAVAIL => EADDRNOTAVAIL, libc::ENETDOWN => ENETDOWN, libc::ENETUNREACH => ENETUNREACH, libc::ENETRESET => ENETRESET, libc::ECONNABORTED => ECONNABORTED, libc::ECONNRESET => ECONNRESET, libc::ENOBUFS => ENOBUFS, libc::EISCONN => EISCONN, libc::ENOTCONN => ENOTCONN, libc::ESHUTDOWN => ESHUTDOWN, libc::ETOOMANYREFS => ETOOMANYREFS, libc::ETIMEDOUT => ETIMEDOUT, libc::ECONNREFUSED => ECONNREFUSED, libc::ELOOP => ELOOP, libc::ENAMETOOLONG => ENAMETOOLONG, libc::EHOSTDOWN => EHOSTDOWN, libc::EHOSTUNREACH => EHOSTUNREACH, libc::ENOTEMPTY => ENOTEMPTY, libc::EPROCLIM => EPROCLIM, libc::EUSERS => EUSERS, libc::EDQUOT => EDQUOT, libc::ESTALE => ESTALE, libc::EREMOTE => EREMOTE, libc::EBADRPC => EBADRPC, libc::ERPCMISMATCH => ERPCMISMATCH, libc::EPROGUNAVAIL => EPROGUNAVAIL, libc::EPROGMISMATCH => EPROGMISMATCH, libc::EPROCUNAVAIL => EPROCUNAVAIL, libc::ENOLCK => ENOLCK, libc::ENOSYS => ENOSYS, libc::EFTYPE => EFTYPE, libc::EAUTH => EAUTH, libc::ENEEDAUTH => ENEEDAUTH, libc::EIDRM => EIDRM, libc::ENOMSG => ENOMSG, libc::EOVERFLOW => EOVERFLOW, libc::ECANCELED => ECANCELED, libc::EILSEQ => EILSEQ, libc::ENOATTR => ENOATTR, libc::EDOOFUS => EDOOFUS, libc::EBADMSG => EBADMSG, libc::EMULTIHOP => EMULTIHOP, libc::ENOLINK => ENOLINK, libc::EPROTO => EPROTO, libc::ENOMEDIUM => ENOMEDIUM, libc::EUNUSED94 => EUNUSED94, libc::EUNUSED95 => EUNUSED95, libc::EUNUSED96 => EUNUSED96, libc::EUNUSED97 => EUNUSED97, libc::EUNUSED98 => EUNUSED98, libc::EASYNC => EASYNC, _ => UnknownErrno, } } } #[cfg(target_os = "openbsd")] mod consts { use libc; #[derive(Clone, Copy, Debug, Eq, PartialEq)] #[repr(i32)] pub enum Errno { UnknownErrno = 0, EPERM = libc::EPERM, ENOENT = libc::ENOENT, ESRCH = libc::ESRCH, EINTR = libc::EINTR, EIO = libc::EIO, ENXIO = libc::ENXIO, E2BIG = libc::E2BIG, ENOEXEC = libc::ENOEXEC, EBADF = libc::EBADF, ECHILD = libc::ECHILD, EDEADLK = libc::EDEADLK, ENOMEM = libc::ENOMEM, EACCES = libc::EACCES, EFAULT = libc::EFAULT, ENOTBLK = libc::ENOTBLK, EBUSY = libc::EBUSY, EEXIST = libc::EEXIST, EXDEV = libc::EXDEV, ENODEV = libc::ENODEV, ENOTDIR = libc::ENOTDIR, EISDIR = libc::EISDIR, EINVAL = libc::EINVAL, ENFILE = libc::ENFILE, EMFILE = libc::EMFILE, ENOTTY = libc::ENOTTY, ETXTBSY = libc::ETXTBSY, EFBIG = libc::EFBIG, ENOSPC = libc::ENOSPC, ESPIPE = libc::ESPIPE, EROFS = libc::EROFS, EMLINK = libc::EMLINK, EPIPE = libc::EPIPE, EDOM = libc::EDOM, ERANGE = libc::ERANGE, EAGAIN = libc::EAGAIN, EINPROGRESS = libc::EINPROGRESS, EALREADY = libc::EALREADY, ENOTSOCK = libc::ENOTSOCK, EDESTADDRREQ = libc::EDESTADDRREQ, EMSGSIZE = libc::EMSGSIZE, EPROTOTYPE = libc::EPROTOTYPE, ENOPROTOOPT = libc::ENOPROTOOPT, EPROTONOSUPPORT = libc::EPROTONOSUPPORT, ESOCKTNOSUPPORT = libc::ESOCKTNOSUPPORT, EOPNOTSUPP = libc::EOPNOTSUPP, EPFNOSUPPORT = libc::EPFNOSUPPORT, EAFNOSUPPORT = libc::EAFNOSUPPORT, EADDRINUSE = libc::EADDRINUSE, EADDRNOTAVAIL = libc::EADDRNOTAVAIL, ENETDOWN = libc::ENETDOWN, ENETUNREACH = libc::ENETUNREACH, ENETRESET = libc::ENETRESET, ECONNABORTED = libc::ECONNABORTED, ECONNRESET = libc::ECONNRESET, ENOBUFS = libc::ENOBUFS, EISCONN = libc::EISCONN, ENOTCONN = libc::ENOTCONN, ESHUTDOWN = libc::ESHUTDOWN, ETOOMANYREFS = libc::ETOOMANYREFS, ETIMEDOUT = libc::ETIMEDOUT, ECONNREFUSED = libc::ECONNREFUSED, ELOOP = libc::ELOOP, ENAMETOOLONG = libc::ENAMETOOLONG, EHOSTDOWN = libc::EHOSTDOWN, EHOSTUNREACH = libc::EHOSTUNREACH, ENOTEMPTY = libc::ENOTEMPTY, EPROCLIM = libc::EPROCLIM, EUSERS = libc::EUSERS, EDQUOT = libc::EDQUOT, ESTALE = libc::ESTALE, EREMOTE = libc::EREMOTE, EBADRPC = libc::EBADRPC, ERPCMISMATCH = libc::ERPCMISMATCH, EPROGUNAVAIL = libc::EPROGUNAVAIL, EPROGMISMATCH = libc::EPROGMISMATCH, EPROCUNAVAIL = libc::EPROCUNAVAIL, ENOLCK = libc::ENOLCK, ENOSYS = libc::ENOSYS, EFTYPE = libc::EFTYPE, EAUTH = libc::EAUTH, ENEEDAUTH = libc::ENEEDAUTH, EIPSEC = libc::EIPSEC, ENOATTR = libc::ENOATTR, EILSEQ = libc::EILSEQ, ENOMEDIUM = libc::ENOMEDIUM, EMEDIUMTYPE = libc::EMEDIUMTYPE, EOVERFLOW = libc::EOVERFLOW, ECANCELED = libc::ECANCELED, EIDRM = libc::EIDRM, ENOMSG = libc::ENOMSG, ENOTSUP = libc::ENOTSUP, } pub const ELAST: Errno = Errno::ENOTSUP; pub const EWOULDBLOCK: Errno = Errno::EAGAIN; pub const EL2NSYNC: Errno = Errno::UnknownErrno; pub fn from_i32(e: i32) -> Errno { use self::Errno::*; match e { libc::EPERM => EPERM, libc::ENOENT => ENOENT, libc::ESRCH => ESRCH, libc::EINTR => EINTR, libc::EIO => EIO, libc::ENXIO => ENXIO, libc::E2BIG => E2BIG, libc::ENOEXEC => ENOEXEC, libc::EBADF => EBADF, libc::ECHILD => ECHILD, libc::EDEADLK => EDEADLK, libc::ENOMEM => ENOMEM, libc::EACCES => EACCES, libc::EFAULT => EFAULT, libc::ENOTBLK => ENOTBLK, libc::EBUSY => EBUSY, libc::EEXIST => EEXIST, libc::EXDEV => EXDEV, libc::ENODEV => ENODEV, libc::ENOTDIR => ENOTDIR, libc::EISDIR => EISDIR, libc::EINVAL => EINVAL, libc::ENFILE => ENFILE, libc::EMFILE => EMFILE, libc::ENOTTY => ENOTTY, libc::ETXTBSY => ETXTBSY, libc::EFBIG => EFBIG, libc::ENOSPC => ENOSPC, libc::ESPIPE => ESPIPE, libc::EROFS => EROFS, libc::EMLINK => EMLINK, libc::EPIPE => EPIPE, libc::EDOM => EDOM, libc::ERANGE => ERANGE, libc::EAGAIN => EAGAIN, libc::EINPROGRESS => EINPROGRESS, libc::EALREADY => EALREADY, libc::ENOTSOCK => ENOTSOCK, libc::EDESTADDRREQ => EDESTADDRREQ, libc::EMSGSIZE => EMSGSIZE, libc::EPROTOTYPE => EPROTOTYPE, libc::ENOPROTOOPT => ENOPROTOOPT, libc::EPROTONOSUPPORT => EPROTONOSUPPORT, libc::ESOCKTNOSUPPORT => ESOCKTNOSUPPORT, libc::EOPNOTSUPP => EOPNOTSUPP, libc::EPFNOSUPPORT => EPFNOSUPPORT, libc::EAFNOSUPPORT => EAFNOSUPPORT, libc::EADDRINUSE => EADDRINUSE, libc::EADDRNOTAVAIL => EADDRNOTAVAIL, libc::ENETDOWN => ENETDOWN, libc::ENETUNREACH => ENETUNREACH, libc::ENETRESET => ENETRESET, libc::ECONNABORTED => ECONNABORTED, libc::ECONNRESET => ECONNRESET, libc::ENOBUFS => ENOBUFS, libc::EISCONN => EISCONN, libc::ENOTCONN => ENOTCONN, libc::ESHUTDOWN => ESHUTDOWN, libc::ETOOMANYREFS => ETOOMANYREFS, libc::ETIMEDOUT => ETIMEDOUT, libc::ECONNREFUSED => ECONNREFUSED, libc::ELOOP => ELOOP, libc::ENAMETOOLONG => ENAMETOOLONG, libc::EHOSTDOWN => EHOSTDOWN, libc::EHOSTUNREACH => EHOSTUNREACH, libc::ENOTEMPTY => ENOTEMPTY, libc::EPROCLIM => EPROCLIM, libc::EUSERS => EUSERS, libc::EDQUOT => EDQUOT, libc::ESTALE => ESTALE, libc::EREMOTE => EREMOTE, libc::EBADRPC => EBADRPC, libc::ERPCMISMATCH => ERPCMISMATCH, libc::EPROGUNAVAIL => EPROGUNAVAIL, libc::EPROGMISMATCH => EPROGMISMATCH, libc::EPROCUNAVAIL => EPROCUNAVAIL, libc::ENOLCK => ENOLCK, libc::ENOSYS => ENOSYS, libc::EFTYPE => EFTYPE, libc::EAUTH => EAUTH, libc::ENEEDAUTH => ENEEDAUTH, libc::EIPSEC => EIPSEC, libc::ENOATTR => ENOATTR, libc::EILSEQ => EILSEQ, libc::ENOMEDIUM => ENOMEDIUM, libc::EMEDIUMTYPE => EMEDIUMTYPE, libc::EOVERFLOW => EOVERFLOW, libc::ECANCELED => ECANCELED, libc::EIDRM => EIDRM, libc::ENOMSG => ENOMSG, libc::ENOTSUP => ENOTSUP, _ => UnknownErrno, } } } #[cfg(target_os = "netbsd")] mod consts { use libc; #[derive(Clone, Copy, Debug, Eq, PartialEq)] #[repr(i32)] pub enum Errno { UnknownErrno = 0, EPERM = libc::EPERM, ENOENT = libc::ENOENT, ESRCH = libc::ESRCH, EINTR = libc::EINTR, EIO = libc::EIO, ENXIO = libc::ENXIO, E2BIG = libc::E2BIG, ENOEXEC = libc::ENOEXEC, EBADF = libc::EBADF, ECHILD = libc::ECHILD, EDEADLK = libc::EDEADLK, ENOMEM = libc::ENOMEM, EACCES = libc::EACCES, EFAULT = libc::EFAULT, ENOTBLK = libc::ENOTBLK, EBUSY = libc::EBUSY, EEXIST = libc::EEXIST, EXDEV = libc::EXDEV, ENODEV = libc::ENODEV, ENOTDIR = libc::ENOTDIR, EISDIR = libc::EISDIR, EINVAL = libc::EINVAL, ENFILE = libc::ENFILE, EMFILE = libc::EMFILE, ENOTTY = libc::ENOTTY, ETXTBSY = libc::ETXTBSY, EFBIG = libc::EFBIG, ENOSPC = libc::ENOSPC, ESPIPE = libc::ESPIPE, EROFS = libc::EROFS, EMLINK = libc::EMLINK, EPIPE = libc::EPIPE, EDOM = libc::EDOM, ERANGE = libc::ERANGE, EAGAIN = libc::EAGAIN, EINPROGRESS = libc::EINPROGRESS, EALREADY = libc::EALREADY, ENOTSOCK = libc::ENOTSOCK, EDESTADDRREQ = libc::EDESTADDRREQ, EMSGSIZE = libc::EMSGSIZE, EPROTOTYPE = libc::EPROTOTYPE, ENOPROTOOPT = libc::ENOPROTOOPT, EPROTONOSUPPORT = libc::EPROTONOSUPPORT, ESOCKTNOSUPPORT = libc::ESOCKTNOSUPPORT, EOPNOTSUPP = libc::EOPNOTSUPP, EPFNOSUPPORT = libc::EPFNOSUPPORT, EAFNOSUPPORT = libc::EAFNOSUPPORT, EADDRINUSE = libc::EADDRINUSE, EADDRNOTAVAIL = libc::EADDRNOTAVAIL, ENETDOWN = libc::ENETDOWN, ENETUNREACH = libc::ENETUNREACH, ENETRESET = libc::ENETRESET, ECONNABORTED = libc::ECONNABORTED, ECONNRESET = libc::ECONNRESET, ENOBUFS = libc::ENOBUFS, EISCONN = libc::EISCONN, ENOTCONN = libc::ENOTCONN, ESHUTDOWN = libc::ESHUTDOWN, ETOOMANYREFS = libc::ETOOMANYREFS, ETIMEDOUT = libc::ETIMEDOUT, ECONNREFUSED = libc::ECONNREFUSED, ELOOP = libc::ELOOP, ENAMETOOLONG = libc::ENAMETOOLONG, EHOSTDOWN = libc::EHOSTDOWN, EHOSTUNREACH = libc::EHOSTUNREACH, ENOTEMPTY = libc::ENOTEMPTY, EPROCLIM = libc::EPROCLIM, EUSERS = libc::EUSERS, EDQUOT = libc::EDQUOT, ESTALE = libc::ESTALE, EREMOTE = libc::EREMOTE, EBADRPC = libc::EBADRPC, ERPCMISMATCH = libc::ERPCMISMATCH, EPROGUNAVAIL = libc::EPROGUNAVAIL, EPROGMISMATCH = libc::EPROGMISMATCH, EPROCUNAVAIL = libc::EPROCUNAVAIL, ENOLCK = libc::ENOLCK, ENOSYS = libc::ENOSYS, EFTYPE = libc::EFTYPE, EAUTH = libc::EAUTH, ENEEDAUTH = libc::ENEEDAUTH, EIDRM = libc::EIDRM, ENOMSG = libc::ENOMSG, EOVERFLOW = libc::EOVERFLOW, EILSEQ = libc::EILSEQ, ENOTSUP = libc::ENOTSUP, ECANCELED = libc::ECANCELED, EBADMSG = libc::EBADMSG, ENODATA = libc::ENODATA, ENOSR = libc::ENOSR, ENOSTR = libc::ENOSTR, ETIME = libc::ETIME, ENOATTR = libc::ENOATTR, EMULTIHOP = libc::EMULTIHOP, ENOLINK = libc::ENOLINK, EPROTO = libc::EPROTO, } pub const ELAST: Errno = Errno::ENOTSUP; pub const EWOULDBLOCK: Errno = Errno::EAGAIN; pub const EL2NSYNC: Errno = Errno::UnknownErrno; pub fn from_i32(e: i32) -> Errno { use self::Errno::*; match e { libc::EPERM => EPERM, libc::ENOENT => ENOENT, libc::ESRCH => ESRCH, libc::EINTR => EINTR, libc::EIO => EIO, libc::ENXIO => ENXIO, libc::E2BIG => E2BIG, libc::ENOEXEC => ENOEXEC, libc::EBADF => EBADF, libc::ECHILD => ECHILD, libc::EDEADLK => EDEADLK, libc::ENOMEM => ENOMEM, libc::EACCES => EACCES, libc::EFAULT => EFAULT, libc::ENOTBLK => ENOTBLK, libc::EBUSY => EBUSY, libc::EEXIST => EEXIST, libc::EXDEV => EXDEV, libc::ENODEV => ENODEV, libc::ENOTDIR => ENOTDIR, libc::EISDIR => EISDIR, libc::EINVAL => EINVAL, libc::ENFILE => ENFILE, libc::EMFILE => EMFILE, libc::ENOTTY => ENOTTY, libc::ETXTBSY => ETXTBSY, libc::EFBIG => EFBIG, libc::ENOSPC => ENOSPC, libc::ESPIPE => ESPIPE, libc::EROFS => EROFS, libc::EMLINK => EMLINK, libc::EPIPE => EPIPE, libc::EDOM => EDOM, libc::ERANGE => ERANGE, libc::EAGAIN => EAGAIN, libc::EINPROGRESS => EINPROGRESS, libc::EALREADY => EALREADY, libc::ENOTSOCK => ENOTSOCK, libc::EDESTADDRREQ => EDESTADDRREQ, libc::EMSGSIZE => EMSGSIZE, libc::EPROTOTYPE => EPROTOTYPE, libc::ENOPROTOOPT => ENOPROTOOPT, libc::EPROTONOSUPPORT => EPROTONOSUPPORT, libc::ESOCKTNOSUPPORT => ESOCKTNOSUPPORT, libc::EOPNOTSUPP => EOPNOTSUPP, libc::EPFNOSUPPORT => EPFNOSUPPORT, libc::EAFNOSUPPORT => EAFNOSUPPORT, libc::EADDRINUSE => EADDRINUSE, libc::EADDRNOTAVAIL => EADDRNOTAVAIL, libc::ENETDOWN => ENETDOWN, libc::ENETUNREACH => ENETUNREACH, libc::ENETRESET => ENETRESET, libc::ECONNABORTED => ECONNABORTED, libc::ECONNRESET => ECONNRESET, libc::ENOBUFS => ENOBUFS, libc::EISCONN => EISCONN, libc::ENOTCONN => ENOTCONN, libc::ESHUTDOWN => ESHUTDOWN, libc::ETOOMANYREFS => ETOOMANYREFS, libc::ETIMEDOUT => ETIMEDOUT, libc::ECONNREFUSED => ECONNREFUSED, libc::ELOOP => ELOOP, libc::ENAMETOOLONG => ENAMETOOLONG, libc::EHOSTDOWN => EHOSTDOWN, libc::EHOSTUNREACH => EHOSTUNREACH, libc::ENOTEMPTY => ENOTEMPTY, libc::EPROCLIM => EPROCLIM, libc::EUSERS => EUSERS, libc::EDQUOT => EDQUOT, libc::ESTALE => ESTALE, libc::EREMOTE => EREMOTE, libc::EBADRPC => EBADRPC, libc::ERPCMISMATCH => ERPCMISMATCH, libc::EPROGUNAVAIL => EPROGUNAVAIL, libc::EPROGMISMATCH => EPROGMISMATCH, libc::EPROCUNAVAIL => EPROCUNAVAIL, libc::ENOLCK => ENOLCK, libc::ENOSYS => ENOSYS, libc::EFTYPE => EFTYPE, libc::EAUTH => EAUTH, libc::ENEEDAUTH => ENEEDAUTH, libc::EIDRM => EIDRM, libc::ENOMSG => ENOMSG, libc::EOVERFLOW => EOVERFLOW, libc::EILSEQ => EILSEQ, libc::ENOTSUP => ENOTSUP, libc::ECANCELED => ECANCELED, libc::EBADMSG => EBADMSG, libc::ENODATA => ENODATA, libc::ENOSR => ENOSR, libc::ENOSTR => ENOSTR, libc::ETIME => ETIME, libc::ENOATTR => ENOATTR, libc::EMULTIHOP => EMULTIHOP, libc::ENOLINK => ENOLINK, libc::EPROTO => EPROTO, _ => UnknownErrno, } } }
39.153566
152
0.500425
91acf5b03a37231e940c5a79882c2981e62c4dfe
153,817
mod error; mod helpers; mod impl_debug; mod impl_partialeq; pub mod struct_layout; #[cfg(test)] #[allow(warnings)] pub(crate) mod bitfield_unit; #[cfg(all(test, target_endian = "little"))] mod bitfield_unit_tests; use self::helpers::attributes; use self::struct_layout::StructLayoutTracker; use super::BindgenOptions; use crate::ir::analysis::{HasVtable, Sizedness}; use crate::ir::annotations::FieldAccessorKind; use crate::ir::comment; use crate::ir::comp::{ Base, Bitfield, BitfieldUnit, CompInfo, CompKind, Field, FieldData, FieldMethods, Method, MethodKind, }; use crate::ir::context::{BindgenContext, ItemId}; use crate::ir::derive::{ CanDerive, CanDeriveCopy, CanDeriveDebug, CanDeriveDefault, CanDeriveEq, CanDeriveHash, CanDeriveOrd, CanDerivePartialEq, CanDerivePartialOrd, }; use crate::ir::dot; use crate::ir::enum_ty::{Enum, EnumVariant, EnumVariantValue}; use crate::ir::function::{Abi, Function, FunctionKind, FunctionSig, Linkage}; use crate::ir::int::IntKind; use crate::ir::item::{IsOpaque, Item, ItemCanonicalName, ItemCanonicalPath}; use crate::ir::item_kind::ItemKind; use crate::ir::layout::Layout; use crate::ir::module::Module; use crate::ir::objc::{ObjCInterface, ObjCMethod}; use crate::ir::template::{ AsTemplateParam, TemplateInstantiation, TemplateParameters, }; use crate::ir::ty::{Type, TypeKind}; use crate::ir::var::Var; use proc_macro2::{self, Ident, Span}; use quote::TokenStreamExt; use crate::{Entry, HashMap, HashSet}; use std; use std::borrow::Cow; use std::cell::Cell; use std::collections::VecDeque; use std::fmt::Write; use std::iter; use std::ops; use std::str::FromStr; // Name of type defined in constified enum module pub static CONSTIFIED_ENUM_MODULE_REPR_NAME: &'static str = "Type"; fn top_level_path( ctx: &BindgenContext, item: &Item, ) -> Vec<proc_macro2::TokenStream> { let mut path = vec![quote! { self }]; if ctx.options().enable_cxx_namespaces { for _ in 0..item.codegen_depth(ctx) { path.push(quote! { super }); } } path } fn root_import( ctx: &BindgenContext, module: &Item, ) -> proc_macro2::TokenStream { assert!(ctx.options().enable_cxx_namespaces, "Somebody messed it up"); assert!(module.is_module()); let mut path = top_level_path(ctx, module); let root = ctx.root_module().canonical_name(ctx); let root_ident = ctx.rust_ident(&root); path.push(quote! { #root_ident }); let mut tokens = quote! {}; tokens.append_separated(path, quote!(::)); quote! { #[allow(unused_imports)] use #tokens ; } } bitflags! { struct DerivableTraits: u16 { const DEBUG = 1 << 0; const DEFAULT = 1 << 1; const COPY = 1 << 2; const CLONE = 1 << 3; const HASH = 1 << 4; const PARTIAL_ORD = 1 << 5; const ORD = 1 << 6; const PARTIAL_EQ = 1 << 7; const EQ = 1 << 8; } } fn derives_of_item(item: &Item, ctx: &BindgenContext) -> DerivableTraits { let mut derivable_traits = DerivableTraits::empty(); if item.can_derive_debug(ctx) && !item.annotations().disallow_debug() { derivable_traits |= DerivableTraits::DEBUG; } if item.can_derive_default(ctx) { derivable_traits |= DerivableTraits::DEFAULT; } let all_template_params = item.all_template_params(ctx); if item.can_derive_copy(ctx) && !item.annotations().disallow_copy() { derivable_traits |= DerivableTraits::COPY; if ctx.options().rust_features().builtin_clone_impls || !all_template_params.is_empty() { // FIXME: This requires extra logic if you have a big array in a // templated struct. The reason for this is that the magic: // fn clone(&self) -> Self { *self } // doesn't work for templates. // // It's not hard to fix though. derivable_traits |= DerivableTraits::CLONE; } } if item.can_derive_hash(ctx) { derivable_traits |= DerivableTraits::HASH; } if item.can_derive_partialord(ctx) { derivable_traits |= DerivableTraits::PARTIAL_ORD; } if item.can_derive_ord(ctx) { derivable_traits |= DerivableTraits::ORD; } if item.can_derive_partialeq(ctx) { derivable_traits |= DerivableTraits::PARTIAL_EQ; } if item.can_derive_eq(ctx) { derivable_traits |= DerivableTraits::EQ; } derivable_traits } impl From<DerivableTraits> for Vec<&'static str> { fn from(derivable_traits: DerivableTraits) -> Vec<&'static str> { [ (DerivableTraits::DEBUG, "Debug"), (DerivableTraits::DEFAULT, "Default"), (DerivableTraits::COPY, "Copy"), (DerivableTraits::CLONE, "Clone"), (DerivableTraits::HASH, "Hash"), (DerivableTraits::PARTIAL_ORD, "PartialOrd"), (DerivableTraits::ORD, "Ord"), (DerivableTraits::PARTIAL_EQ, "PartialEq"), (DerivableTraits::EQ, "Eq"), ] .iter() .filter_map(|&(flag, derive)| { Some(derive).filter(|_| derivable_traits.contains(flag)) }) .collect() } } struct CodegenResult<'a> { items: Vec<proc_macro2::TokenStream>, /// A monotonic counter used to add stable unique id's to stuff that doesn't /// need to be referenced by anything. codegen_id: &'a Cell<usize>, /// Whether a bindgen union has been generated at least once. saw_bindgen_union: bool, /// Whether an incomplete array has been generated at least once. saw_incomplete_array: bool, /// Whether Objective C types have been seen at least once. saw_objc: bool, /// Whether Apple block types have been seen at least once. saw_block: bool, /// Whether a bitfield allocation unit has been seen at least once. saw_bitfield_unit: bool, items_seen: HashSet<ItemId>, /// The set of generated function/var names, needed because in C/C++ is /// legal to do something like: /// /// ```c++ /// extern "C" { /// void foo(); /// extern int bar; /// } /// /// extern "C" { /// void foo(); /// extern int bar; /// } /// ``` /// /// Being these two different declarations. functions_seen: HashSet<String>, vars_seen: HashSet<String>, /// Used for making bindings to overloaded functions. Maps from a canonical /// function name to the number of overloads we have already codegen'd for /// that name. This lets us give each overload a unique suffix. overload_counters: HashMap<String, u32>, } impl<'a> CodegenResult<'a> { fn new(codegen_id: &'a Cell<usize>) -> Self { CodegenResult { items: vec![], saw_bindgen_union: false, saw_incomplete_array: false, saw_objc: false, saw_block: false, saw_bitfield_unit: false, codegen_id, items_seen: Default::default(), functions_seen: Default::default(), vars_seen: Default::default(), overload_counters: Default::default(), } } fn saw_bindgen_union(&mut self) { self.saw_bindgen_union = true; } fn saw_incomplete_array(&mut self) { self.saw_incomplete_array = true; } fn saw_objc(&mut self) { self.saw_objc = true; } fn saw_block(&mut self) { self.saw_block = true; } fn saw_bitfield_unit(&mut self) { self.saw_bitfield_unit = true; } fn seen<Id: Into<ItemId>>(&self, item: Id) -> bool { self.items_seen.contains(&item.into()) } fn set_seen<Id: Into<ItemId>>(&mut self, item: Id) { self.items_seen.insert(item.into()); } fn seen_function(&self, name: &str) -> bool { self.functions_seen.contains(name) } fn saw_function(&mut self, name: &str) { self.functions_seen.insert(name.into()); } /// Get the overload number for the given function name. Increments the /// counter internally so the next time we ask for the overload for this /// name, we get the incremented value, and so on. fn overload_number(&mut self, name: &str) -> u32 { let counter = self.overload_counters.entry(name.into()).or_insert(0); let number = *counter; *counter += 1; number } fn seen_var(&self, name: &str) -> bool { self.vars_seen.contains(name) } fn saw_var(&mut self, name: &str) { self.vars_seen.insert(name.into()); } fn inner<F>(&mut self, cb: F) -> Vec<proc_macro2::TokenStream> where F: FnOnce(&mut Self), { let mut new = Self::new(self.codegen_id); cb(&mut new); self.saw_incomplete_array |= new.saw_incomplete_array; self.saw_objc |= new.saw_objc; self.saw_block |= new.saw_block; self.saw_bitfield_unit |= new.saw_bitfield_unit; self.saw_bindgen_union |= new.saw_bindgen_union; new.items } } impl<'a> ops::Deref for CodegenResult<'a> { type Target = Vec<proc_macro2::TokenStream>; fn deref(&self) -> &Self::Target { &self.items } } impl<'a> ops::DerefMut for CodegenResult<'a> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.items } } /// A trait to convert a rust type into a pointer, optionally const, to the same /// type. trait ToPtr { fn to_ptr(self, is_const: bool) -> proc_macro2::TokenStream; } impl ToPtr for proc_macro2::TokenStream { fn to_ptr(self, is_const: bool) -> proc_macro2::TokenStream { if is_const { quote! { *const #self } } else { quote! { *mut #self } } } } /// An extension trait for `proc_macro2::TokenStream` that lets us append any implicit /// template parameters that exist for some type, if necessary. trait AppendImplicitTemplateParams { fn append_implicit_template_params( &mut self, ctx: &BindgenContext, item: &Item, ); } impl AppendImplicitTemplateParams for proc_macro2::TokenStream { fn append_implicit_template_params( &mut self, ctx: &BindgenContext, item: &Item, ) { let item = item.id().into_resolver().through_type_refs().resolve(ctx); match *item.expect_type().kind() { TypeKind::UnresolvedTypeRef(..) => { unreachable!("already resolved unresolved type refs") } TypeKind::ResolvedTypeRef(..) => { unreachable!("we resolved item through type refs") } // None of these types ever have implicit template parameters. TypeKind::Void | TypeKind::NullPtr | TypeKind::Pointer(..) | TypeKind::Reference(..) | TypeKind::Int(..) | TypeKind::Float(..) | TypeKind::Complex(..) | TypeKind::Array(..) | TypeKind::TypeParam | TypeKind::Opaque | TypeKind::Function(..) | TypeKind::Enum(..) | TypeKind::ObjCId | TypeKind::ObjCSel | TypeKind::TemplateInstantiation(..) => return, _ => {} } let params: Vec<_> = item .used_template_params(ctx) .iter() .map(|p| { p.try_to_rust_ty(ctx, &()) .expect("template params cannot fail to be a rust type") }) .collect(); if !params.is_empty() { self.append_all(quote! { < #( #params ),* > }); } } } trait CodeGenerator { /// Extra information from the caller. type Extra; fn codegen<'a>( &self, ctx: &BindgenContext, result: &mut CodegenResult<'a>, extra: &Self::Extra, ); } impl CodeGenerator for Item { type Extra = (); fn codegen<'a>( &self, ctx: &BindgenContext, result: &mut CodegenResult<'a>, _extra: &(), ) { if !self.is_enabled_for_codegen(ctx) { return; } if self.is_blacklisted(ctx) || result.seen(self.id()) { debug!( "<Item as CodeGenerator>::codegen: Ignoring hidden or seen: \ self = {:?}", self ); return; } debug!("<Item as CodeGenerator>::codegen: self = {:?}", self); if !ctx.codegen_items().contains(&self.id()) { // TODO(emilio, #453): Figure out what to do when this happens // legitimately, we could track the opaque stuff and disable the // assertion there I guess. warn!("Found non-whitelisted item in code generation: {:?}", self); } result.set_seen(self.id()); match *self.kind() { ItemKind::Module(ref module) => { module.codegen(ctx, result, self); } ItemKind::Function(ref fun) => { fun.codegen(ctx, result, self); } ItemKind::Var(ref var) => { var.codegen(ctx, result, self); } ItemKind::Type(ref ty) => { ty.codegen(ctx, result, self); } } } } impl CodeGenerator for Module { type Extra = Item; fn codegen<'a>( &self, ctx: &BindgenContext, result: &mut CodegenResult<'a>, item: &Item, ) { debug!("<Module as CodeGenerator>::codegen: item = {:?}", item); let codegen_self = |result: &mut CodegenResult, found_any: &mut bool| { for child in self.children() { if ctx.codegen_items().contains(child) { *found_any = true; ctx.resolve_item(*child).codegen(ctx, result, &()); } } if item.id() == ctx.root_module() { if result.saw_block { utils::prepend_block_header(ctx, &mut *result); } if result.saw_bindgen_union { utils::prepend_union_types(ctx, &mut *result); } if result.saw_incomplete_array { utils::prepend_incomplete_array_types(ctx, &mut *result); } if ctx.need_bindgen_complex_type() { utils::prepend_complex_type(&mut *result); } if result.saw_objc { utils::prepend_objc_header(ctx, &mut *result); } if result.saw_bitfield_unit { utils::prepend_bitfield_unit_type(ctx, &mut *result); } } }; if !ctx.options().enable_cxx_namespaces || (self.is_inline() && !ctx.options().conservative_inline_namespaces) { codegen_self(result, &mut false); return; } let mut found_any = false; let inner_items = result.inner(|result| { result.push(root_import(ctx, item)); let path = item.namespace_aware_canonical_path(ctx).join("::"); if let Some(raw_lines) = ctx.options().module_lines.get(&path) { for raw_line in raw_lines { found_any = true; result.push( proc_macro2::TokenStream::from_str(raw_line).unwrap(), ); } } codegen_self(result, &mut found_any); }); // Don't bother creating an empty module. if !found_any { return; } let name = item.canonical_name(ctx); let ident = ctx.rust_ident(name); result.push(if item.id() == ctx.root_module() { quote! { #[allow(non_snake_case, non_camel_case_types, non_upper_case_globals)] pub mod #ident { #( #inner_items )* } } } else { quote! { pub mod #ident { #( #inner_items )* } } }); } } impl CodeGenerator for Var { type Extra = Item; fn codegen<'a>( &self, ctx: &BindgenContext, result: &mut CodegenResult<'a>, item: &Item, ) { use crate::ir::var::VarType; debug!("<Var as CodeGenerator>::codegen: item = {:?}", item); debug_assert!(item.is_enabled_for_codegen(ctx)); let canonical_name = item.canonical_name(ctx); if result.seen_var(&canonical_name) { return; } result.saw_var(&canonical_name); let canonical_ident = ctx.rust_ident(&canonical_name); // We can't generate bindings to static variables of templates. The // number of actual variables for a single declaration are open ended // and we don't know what instantiations do or don't exist. if !item.all_template_params(ctx).is_empty() { return; } let ty = self.ty().to_rust_ty_or_opaque(ctx, &()); if let Some(val) = self.val() { match *val { VarType::Bool(val) => { result.push(quote! { pub const #canonical_ident : #ty = #val ; }); } VarType::Int(val) => { let int_kind = self .ty() .into_resolver() .through_type_aliases() .through_type_refs() .resolve(ctx) .expect_type() .as_integer() .unwrap(); let val = if int_kind.is_signed() { helpers::ast_ty::int_expr(val) } else { helpers::ast_ty::uint_expr(val as _) }; result.push(quote! { pub const #canonical_ident : #ty = #val ; }); } VarType::String(ref bytes) => { // Account the trailing zero. // // TODO: Here we ignore the type we just made up, probably // we should refactor how the variable type and ty id work. let len = bytes.len() + 1; let ty = quote! { [u8; #len] }; match String::from_utf8(bytes.clone()) { Ok(string) => { let cstr = helpers::ast_ty::cstr_expr(string); result.push(quote! { pub const #canonical_ident : &'static #ty = #cstr ; }); } Err(..) => { let bytes = helpers::ast_ty::byte_array_expr(bytes); result.push(quote! { pub const #canonical_ident : #ty = #bytes ; }); } } } VarType::Float(f) => { match helpers::ast_ty::float_expr(ctx, f) { Ok(expr) => result.push(quote! { pub const #canonical_ident : #ty = #expr ; }), Err(..) => return, } } VarType::Char(c) => { result.push(quote! { pub const #canonical_ident : #ty = #c ; }); } } } else { let mut attrs = vec![]; // If necessary, apply a `#[link_name]` attribute let link_name = self.mangled_name().unwrap_or(self.name()); if !utils::names_will_be_identical_after_mangling( &canonical_name, link_name, None, ) { attrs.push(attributes::link_name(link_name)); } let maybe_mut = if self.is_const() { quote! {} } else { quote! { mut } }; let tokens = quote!( extern "C" { #(#attrs)* pub static #maybe_mut #canonical_ident: #ty; } ); result.push(tokens); } } } impl CodeGenerator for Type { type Extra = Item; fn codegen<'a>( &self, ctx: &BindgenContext, result: &mut CodegenResult<'a>, item: &Item, ) { debug!("<Type as CodeGenerator>::codegen: item = {:?}", item); debug_assert!(item.is_enabled_for_codegen(ctx)); match *self.kind() { TypeKind::Void | TypeKind::NullPtr | TypeKind::Int(..) | TypeKind::Float(..) | TypeKind::Complex(..) | TypeKind::Array(..) | TypeKind::Vector(..) | TypeKind::Pointer(..) | TypeKind::Reference(..) | TypeKind::Function(..) | TypeKind::ResolvedTypeRef(..) | TypeKind::Opaque | TypeKind::TypeParam => { // These items don't need code generation, they only need to be // converted to rust types in fields, arguments, and such. // NOTE(emilio): If you add to this list, make sure to also add // it to BindgenContext::compute_whitelisted_and_codegen_items. return; } TypeKind::TemplateInstantiation(ref inst) => { inst.codegen(ctx, result, item) } TypeKind::BlockPointer(inner) => { if !ctx.options().generate_block { return; } let inner_item = inner.into_resolver().through_type_refs().resolve(ctx); let name = item.canonical_name(ctx); let inner_rust_type = { if let TypeKind::Function(fnsig) = inner_item.kind().expect_type().kind() { utils::fnsig_block(ctx, fnsig) } else { panic!("invalid block typedef: {:?}", inner_item) } }; let rust_name = ctx.rust_ident(&name); let mut tokens = if let Some(comment) = item.comment(ctx) { attributes::doc(comment) } else { quote! {} }; tokens.append_all(quote! { pub type #rust_name = #inner_rust_type ; }); result.push(tokens); result.saw_block(); } TypeKind::Comp(ref ci) => ci.codegen(ctx, result, item), TypeKind::TemplateAlias(inner, _) | TypeKind::Alias(inner) => { let inner_item = inner.into_resolver().through_type_refs().resolve(ctx); let name = item.canonical_name(ctx); let path = item.canonical_path(ctx); { let through_type_aliases = inner .into_resolver() .through_type_refs() .through_type_aliases() .resolve(ctx); // Try to catch the common pattern: // // typedef struct foo { ... } foo; // // here, and also other more complex cases like #946. if through_type_aliases.canonical_path(ctx) == path { return; } } // If this is a known named type, disallow generating anything // for it too. let spelling = self.name().expect("Unnamed alias?"); if utils::type_from_named(ctx, spelling).is_some() { return; } let mut outer_params = item.used_template_params(ctx); let is_opaque = item.is_opaque(ctx, &()); let inner_rust_type = if is_opaque { outer_params = vec![]; self.to_opaque(ctx, item) } else { // Its possible that we have better layout information than // the inner type does, so fall back to an opaque blob based // on our layout if converting the inner item fails. let mut inner_ty = inner_item .try_to_rust_ty_or_opaque(ctx, &()) .unwrap_or_else(|_| self.to_opaque(ctx, item)); inner_ty.append_implicit_template_params(ctx, inner_item); inner_ty }; { // FIXME(emilio): This is a workaround to avoid generating // incorrect type aliases because of types that we haven't // been able to resolve (because, eg, they depend on a // template parameter). // // It's kind of a shame not generating them even when they // could be referenced, but we already do the same for items // with invalid template parameters, and at least this way // they can be replaced, instead of generating plain invalid // code. let inner_canon_type = inner_item.expect_type().canonical_type(ctx); if inner_canon_type.is_invalid_type_param() { warn!( "Item contained invalid named type, skipping: \ {:?}, {:?}", item, inner_item ); return; } } let rust_name = ctx.rust_ident(&name); let mut tokens = if let Some(comment) = item.comment(ctx) { attributes::doc(comment) } else { quote! {} }; let alias_style = if ctx.options().type_alias.matches(&name) { AliasVariation::TypeAlias } else if ctx.options().new_type_alias.matches(&name) { AliasVariation::NewType } else if ctx.options().new_type_alias_deref.matches(&name) { AliasVariation::NewTypeDeref } else { ctx.options().default_alias_style }; // We prefer using `pub use` over `pub type` because of: // https://github.com/rust-lang/rust/issues/26264 if inner_rust_type.to_string().chars().all(|c| match c { // These are the only characters allowed in simple // paths, eg `good::dogs::Bront`. 'A'..='Z' | 'a'..='z' | '0'..='9' | ':' | '_' | ' ' => true, _ => false, }) && outer_params.is_empty() && !is_opaque && alias_style == AliasVariation::TypeAlias && inner_item.expect_type().canonical_type(ctx).is_enum() { tokens.append_all(quote! { pub use }); let path = top_level_path(ctx, item); tokens.append_separated(path, quote!(::)); tokens.append_all(quote! { :: #inner_rust_type as #rust_name ; }); result.push(tokens); return; } tokens.append_all(match alias_style { AliasVariation::TypeAlias => quote! { pub type #rust_name }, AliasVariation::NewType | AliasVariation::NewTypeDeref => { assert!( ctx.options().rust_features().repr_transparent, "repr_transparent feature is required to use {:?}", alias_style ); let mut attributes = vec![attributes::repr("transparent")]; let derivable_traits = derives_of_item(item, ctx); if !derivable_traits.is_empty() { let derives: Vec<_> = derivable_traits.into(); attributes.push(attributes::derives(&derives)) } quote! { #( #attributes )* pub struct #rust_name } } }); let params: Vec<_> = outer_params .into_iter() .filter_map(|p| p.as_template_param(ctx, &())) .collect(); if params .iter() .any(|p| ctx.resolve_type(*p).is_invalid_type_param()) { warn!( "Item contained invalid template \ parameter: {:?}", item ); return; } let params: Vec<_> = params .iter() .map(|p| { p.try_to_rust_ty(ctx, &()).expect( "type parameters can always convert to rust ty OK", ) }) .collect(); if !params.is_empty() { tokens.append_all(quote! { < #( #params ),* > }); } tokens.append_all(match alias_style { AliasVariation::TypeAlias => quote! { = #inner_rust_type ; }, AliasVariation::NewType | AliasVariation::NewTypeDeref => { quote! { (pub #inner_rust_type) ; } } }); if alias_style == AliasVariation::NewTypeDeref { let prefix = ctx.trait_prefix(); tokens.append_all(quote! { impl ::#prefix::ops::Deref for #rust_name { type Target = #inner_rust_type; #[inline] fn deref(&self) -> &Self::Target { &self.0 } } impl ::#prefix::ops::DerefMut for #rust_name { #[inline] fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } }); } result.push(tokens); } TypeKind::Enum(ref ei) => ei.codegen(ctx, result, item), TypeKind::ObjCId | TypeKind::ObjCSel => { result.saw_objc(); } TypeKind::ObjCInterface(ref interface) => { interface.codegen(ctx, result, item) } ref u @ TypeKind::UnresolvedTypeRef(..) => { unreachable!("Should have been resolved after parsing {:?}!", u) } } } } struct Vtable<'a> { item_id: ItemId, #[allow(dead_code)] methods: &'a [Method], #[allow(dead_code)] base_classes: &'a [Base], } impl<'a> Vtable<'a> { fn new( item_id: ItemId, methods: &'a [Method], base_classes: &'a [Base], ) -> Self { Vtable { item_id, methods, base_classes, } } } impl<'a> CodeGenerator for Vtable<'a> { type Extra = Item; fn codegen<'b>( &self, ctx: &BindgenContext, result: &mut CodegenResult<'b>, item: &Item, ) { assert_eq!(item.id(), self.item_id); debug_assert!(item.is_enabled_for_codegen(ctx)); // For now, generate an empty struct, later we should generate function // pointers and whatnot. let name = ctx.rust_ident(&self.canonical_name(ctx)); let void = helpers::ast_ty::c_void(ctx); result.push(quote! { #[repr(C)] pub struct #name ( #void ); }); } } impl<'a> ItemCanonicalName for Vtable<'a> { fn canonical_name(&self, ctx: &BindgenContext) -> String { format!("{}__bindgen_vtable", self.item_id.canonical_name(ctx)) } } impl<'a> TryToRustTy for Vtable<'a> { type Extra = (); fn try_to_rust_ty( &self, ctx: &BindgenContext, _: &(), ) -> error::Result<proc_macro2::TokenStream> { let name = ctx.rust_ident(self.canonical_name(ctx)); Ok(quote! { #name }) } } impl CodeGenerator for TemplateInstantiation { type Extra = Item; fn codegen<'a>( &self, ctx: &BindgenContext, result: &mut CodegenResult<'a>, item: &Item, ) { debug_assert!(item.is_enabled_for_codegen(ctx)); // Although uses of instantiations don't need code generation, and are // just converted to rust types in fields, vars, etc, we take this // opportunity to generate tests for their layout here. If the // instantiation is opaque, then its presumably because we don't // properly understand it (maybe because of specializations), and so we // shouldn't emit layout tests either. if !ctx.options().layout_tests || self.is_opaque(ctx, item) { return; } // If there are any unbound type parameters, then we can't generate a // layout test because we aren't dealing with a concrete type with a // concrete size and alignment. if ctx.uses_any_template_parameters(item.id()) { return; } let layout = item.kind().expect_type().layout(ctx); if let Some(layout) = layout { let size = layout.size; let align = layout.align; let name = item.full_disambiguated_name(ctx); let mut fn_name = format!("__bindgen_test_layout_{}_instantiation", name); let times_seen = result.overload_number(&fn_name); if times_seen > 0 { write!(&mut fn_name, "_{}", times_seen).unwrap(); } let fn_name = ctx.rust_ident_raw(fn_name); let prefix = ctx.trait_prefix(); let ident = item.to_rust_ty_or_opaque(ctx, &()); let size_of_expr = quote! { ::#prefix::mem::size_of::<#ident>() }; let align_of_expr = quote! { ::#prefix::mem::align_of::<#ident>() }; let item = quote! { #[test] fn #fn_name() { assert_eq!(#size_of_expr, #size, concat!("Size of template specialization: ", stringify!(#ident))); assert_eq!(#align_of_expr, #align, concat!("Alignment of template specialization: ", stringify!(#ident))); } }; result.push(item); } } } /// Trait for implementing the code generation of a struct or union field. trait FieldCodegen<'a> { type Extra; fn codegen<F, M>( &self, ctx: &BindgenContext, fields_should_be_private: bool, codegen_depth: usize, accessor_kind: FieldAccessorKind, parent: &CompInfo, result: &mut CodegenResult, struct_layout: &mut StructLayoutTracker, fields: &mut F, methods: &mut M, extra: Self::Extra, ) where F: Extend<proc_macro2::TokenStream>, M: Extend<proc_macro2::TokenStream>; } impl<'a> FieldCodegen<'a> for Field { type Extra = (); fn codegen<F, M>( &self, ctx: &BindgenContext, fields_should_be_private: bool, codegen_depth: usize, accessor_kind: FieldAccessorKind, parent: &CompInfo, result: &mut CodegenResult, struct_layout: &mut StructLayoutTracker, fields: &mut F, methods: &mut M, _: (), ) where F: Extend<proc_macro2::TokenStream>, M: Extend<proc_macro2::TokenStream>, { match *self { Field::DataMember(ref data) => { data.codegen( ctx, fields_should_be_private, codegen_depth, accessor_kind, parent, result, struct_layout, fields, methods, (), ); } Field::Bitfields(ref unit) => { unit.codegen( ctx, fields_should_be_private, codegen_depth, accessor_kind, parent, result, struct_layout, fields, methods, (), ); } } } } impl<'a> FieldCodegen<'a> for FieldData { type Extra = (); fn codegen<F, M>( &self, ctx: &BindgenContext, fields_should_be_private: bool, codegen_depth: usize, accessor_kind: FieldAccessorKind, parent: &CompInfo, result: &mut CodegenResult, struct_layout: &mut StructLayoutTracker, fields: &mut F, methods: &mut M, _: (), ) where F: Extend<proc_macro2::TokenStream>, M: Extend<proc_macro2::TokenStream>, { // Bitfields are handled by `FieldCodegen` implementations for // `BitfieldUnit` and `Bitfield`. assert!(self.bitfield_width().is_none()); let field_item = self.ty().into_resolver().through_type_refs().resolve(ctx); let field_ty = field_item.expect_type(); let mut ty = self.ty().to_rust_ty_or_opaque(ctx, &()); ty.append_implicit_template_params(ctx, field_item); // NB: If supported, we use proper `union` types. let ty = if parent.is_union() && !parent.can_be_rust_union(ctx) { result.saw_bindgen_union(); if ctx.options().enable_cxx_namespaces { quote! { root::__BindgenUnionField<#ty> } } else { quote! { __BindgenUnionField<#ty> } } } else if let Some(item) = field_ty.is_incomplete_array(ctx) { result.saw_incomplete_array(); let inner = item.to_rust_ty_or_opaque(ctx, &()); if ctx.options().enable_cxx_namespaces { quote! { root::__IncompleteArrayField<#inner> } } else { quote! { __IncompleteArrayField<#inner> } } } else { ty }; let mut field = quote! {}; if ctx.options().generate_comments { if let Some(raw_comment) = self.comment() { let comment = comment::preprocess(raw_comment, codegen_depth + 1); field = attributes::doc(comment); } } let field_name = self .name() .map(|name| ctx.rust_mangle(name).into_owned()) .expect("Each field should have a name in codegen!"); let field_ident = ctx.rust_ident_raw(field_name.as_str()); if !parent.is_union() { if let Some(padding_field) = struct_layout.pad_field(&field_name, field_ty, self.offset()) { fields.extend(Some(padding_field)); } } let is_private = self .annotations() .private_fields() .unwrap_or(fields_should_be_private); let accessor_kind = self.annotations().accessor_kind().unwrap_or(accessor_kind); if is_private { field.append_all(quote! { #field_ident : #ty , }); } else { field.append_all(quote! { pub #field_ident : #ty , }); } fields.extend(Some(field)); // TODO: Factor the following code out, please! if accessor_kind == FieldAccessorKind::None { return; } let getter_name = ctx.rust_ident_raw(format!("get_{}", field_name)); let mutable_getter_name = ctx.rust_ident_raw(format!("get_{}_mut", field_name)); let field_name = ctx.rust_ident_raw(field_name); methods.extend(Some(match accessor_kind { FieldAccessorKind::None => unreachable!(), FieldAccessorKind::Regular => { quote! { #[inline] pub fn #getter_name(&self) -> & #ty { &self.#field_name } #[inline] pub fn #mutable_getter_name(&mut self) -> &mut #ty { &mut self.#field_name } } } FieldAccessorKind::Unsafe => { quote! { #[inline] pub unsafe fn #getter_name(&self) -> & #ty { &self.#field_name } #[inline] pub unsafe fn #mutable_getter_name(&mut self) -> &mut #ty { &mut self.#field_name } } } FieldAccessorKind::Immutable => { quote! { #[inline] pub fn #getter_name(&self) -> & #ty { &self.#field_name } } } })); } } impl BitfieldUnit { /// Get the constructor name for this bitfield unit. fn ctor_name(&self) -> proc_macro2::TokenStream { let ctor_name = Ident::new( &format!("new_bitfield_{}", self.nth()), Span::call_site(), ); quote! { #ctor_name } } } impl Bitfield { /// Extend an under construction bitfield unit constructor with this /// bitfield. This sets the relevant bits on the `__bindgen_bitfield_unit` /// variable that's being constructed. fn extend_ctor_impl( &self, ctx: &BindgenContext, param_name: proc_macro2::TokenStream, mut ctor_impl: proc_macro2::TokenStream, ) -> proc_macro2::TokenStream { let bitfield_ty = ctx.resolve_type(self.ty()); let bitfield_ty_layout = bitfield_ty .layout(ctx) .expect("Bitfield without layout? Gah!"); let bitfield_int_ty = helpers::integer_type(ctx, bitfield_ty_layout) .expect( "Should already have verified that the bitfield is \ representable as an int", ); let offset = self.offset_into_unit(); let width = self.width() as u8; let prefix = ctx.trait_prefix(); ctor_impl.append_all(quote! { __bindgen_bitfield_unit.set( #offset, #width, { let #param_name: #bitfield_int_ty = unsafe { ::#prefix::mem::transmute(#param_name) }; #param_name as u64 } ); }); ctor_impl } } impl<'a> FieldCodegen<'a> for BitfieldUnit { type Extra = (); fn codegen<F, M>( &self, ctx: &BindgenContext, fields_should_be_private: bool, codegen_depth: usize, accessor_kind: FieldAccessorKind, parent: &CompInfo, result: &mut CodegenResult, struct_layout: &mut StructLayoutTracker, fields: &mut F, methods: &mut M, _: (), ) where F: Extend<proc_macro2::TokenStream>, M: Extend<proc_macro2::TokenStream>, { use crate::ir::ty::RUST_DERIVE_IN_ARRAY_LIMIT; result.saw_bitfield_unit(); let layout = self.layout(); let unit_field_ty = helpers::bitfield_unit(ctx, layout); let field_ty = { if parent.is_union() && !parent.can_be_rust_union(ctx) { result.saw_bindgen_union(); if ctx.options().enable_cxx_namespaces { quote! { root::__BindgenUnionField<#unit_field_ty> } } else { quote! { __BindgenUnionField<#unit_field_ty> } } } else { unit_field_ty.clone() } }; let unit_field_name = format!("_bitfield_{}", self.nth()); let unit_field_ident = ctx.rust_ident(&unit_field_name); let field = quote! { pub #unit_field_ident : #field_ty , }; fields.extend(Some(field)); let ctor_name = self.ctor_name(); let mut ctor_params = vec![]; let mut ctor_impl = quote! {}; // We cannot generate any constructor if the underlying storage can't // implement AsRef<[u8]> / AsMut<[u8]> / etc. let mut generate_ctor = layout.size <= RUST_DERIVE_IN_ARRAY_LIMIT; for bf in self.bitfields() { // Codegen not allowed for anonymous bitfields if bf.name().is_none() { continue; } if layout.size > RUST_DERIVE_IN_ARRAY_LIMIT { continue; } let mut bitfield_representable_as_int = true; bf.codegen( ctx, fields_should_be_private, codegen_depth, accessor_kind, parent, result, struct_layout, fields, methods, (&unit_field_name, &mut bitfield_representable_as_int), ); // Generating a constructor requires the bitfield to be representable as an integer. if !bitfield_representable_as_int { generate_ctor = false; continue; } let param_name = bitfield_getter_name(ctx, bf); let bitfield_ty_item = ctx.resolve_item(bf.ty()); let bitfield_ty = bitfield_ty_item.expect_type(); let bitfield_ty = bitfield_ty.to_rust_ty_or_opaque(ctx, bitfield_ty_item); ctor_params.push(quote! { #param_name : #bitfield_ty }); ctor_impl = bf.extend_ctor_impl(ctx, param_name, ctor_impl); } if generate_ctor { methods.extend(Some(quote! { #[inline] pub fn #ctor_name ( #( #ctor_params ),* ) -> #unit_field_ty { let mut __bindgen_bitfield_unit: #unit_field_ty = Default::default(); #ctor_impl __bindgen_bitfield_unit } })); } struct_layout.saw_bitfield_unit(layout); } } fn bitfield_getter_name( ctx: &BindgenContext, bitfield: &Bitfield, ) -> proc_macro2::TokenStream { let name = bitfield.getter_name(); let name = ctx.rust_ident_raw(name); quote! { #name } } fn bitfield_setter_name( ctx: &BindgenContext, bitfield: &Bitfield, ) -> proc_macro2::TokenStream { let setter = bitfield.setter_name(); let setter = ctx.rust_ident_raw(setter); quote! { #setter } } impl<'a> FieldCodegen<'a> for Bitfield { type Extra = (&'a str, &'a mut bool); fn codegen<F, M>( &self, ctx: &BindgenContext, _fields_should_be_private: bool, _codegen_depth: usize, _accessor_kind: FieldAccessorKind, parent: &CompInfo, _result: &mut CodegenResult, _struct_layout: &mut StructLayoutTracker, _fields: &mut F, methods: &mut M, (unit_field_name, bitfield_representable_as_int): (&'a str, &mut bool), ) where F: Extend<proc_macro2::TokenStream>, M: Extend<proc_macro2::TokenStream>, { let prefix = ctx.trait_prefix(); let getter_name = bitfield_getter_name(ctx, self); let setter_name = bitfield_setter_name(ctx, self); let unit_field_ident = Ident::new(unit_field_name, Span::call_site()); let bitfield_ty_item = ctx.resolve_item(self.ty()); let bitfield_ty = bitfield_ty_item.expect_type(); let bitfield_ty_layout = bitfield_ty .layout(ctx) .expect("Bitfield without layout? Gah!"); let bitfield_int_ty = match helpers::integer_type(ctx, bitfield_ty_layout) { Some(int_ty) => { *bitfield_representable_as_int = true; int_ty } None => { *bitfield_representable_as_int = false; return; } }; let bitfield_ty = bitfield_ty.to_rust_ty_or_opaque(ctx, bitfield_ty_item); let offset = self.offset_into_unit(); let width = self.width() as u8; if parent.is_union() && !parent.can_be_rust_union(ctx) { methods.extend(Some(quote! { #[inline] pub fn #getter_name(&self) -> #bitfield_ty { unsafe { ::#prefix::mem::transmute( self.#unit_field_ident.as_ref().get(#offset, #width) as #bitfield_int_ty ) } } #[inline] pub fn #setter_name(&mut self, val: #bitfield_ty) { unsafe { let val: #bitfield_int_ty = ::#prefix::mem::transmute(val); self.#unit_field_ident.as_mut().set( #offset, #width, val as u64 ) } } })); } else { methods.extend(Some(quote! { #[inline] pub fn #getter_name(&self) -> #bitfield_ty { unsafe { ::#prefix::mem::transmute( self.#unit_field_ident.get(#offset, #width) as #bitfield_int_ty ) } } #[inline] pub fn #setter_name(&mut self, val: #bitfield_ty) { unsafe { let val: #bitfield_int_ty = ::#prefix::mem::transmute(val); self.#unit_field_ident.set( #offset, #width, val as u64 ) } } })); } } } impl CodeGenerator for CompInfo { type Extra = Item; fn codegen<'a>( &self, ctx: &BindgenContext, result: &mut CodegenResult<'a>, item: &Item, ) { debug!("<CompInfo as CodeGenerator>::codegen: item = {:?}", item); debug_assert!(item.is_enabled_for_codegen(ctx)); // Don't output classes with template parameters that aren't types, and // also don't output template specializations, neither total or partial. if self.has_non_type_template_params() { return; } let ty = item.expect_type(); let layout = ty.layout(ctx); let mut packed = self.is_packed(ctx, &layout); let canonical_name = item.canonical_name(ctx); let canonical_ident = ctx.rust_ident(&canonical_name); // Generate the vtable from the method list if appropriate. // // TODO: I don't know how this could play with virtual methods that are // not in the list of methods found by us, we'll see. Also, could the // order of the vtable pointers vary? // // FIXME: Once we generate proper vtables, we need to codegen the // vtable, but *not* generate a field for it in the case that // HasVtable::has_vtable_ptr is false but HasVtable::has_vtable is true. // // Also, we need to generate the vtable in such a way it "inherits" from // the parent too. let is_opaque = item.is_opaque(ctx, &()); let mut fields = vec![]; let mut struct_layout = StructLayoutTracker::new(ctx, self, ty, &canonical_name); if !is_opaque { if item.has_vtable_ptr(ctx) { let vtable = Vtable::new(item.id(), self.methods(), self.base_members()); vtable.codegen(ctx, result, item); let vtable_type = vtable .try_to_rust_ty(ctx, &()) .expect("vtable to Rust type conversion is infallible") .to_ptr(true); fields.push(quote! { pub vtable_: #vtable_type , }); struct_layout.saw_vtable(); } for base in self.base_members() { if !base.requires_storage(ctx) { continue; } let inner_item = ctx.resolve_item(base.ty); let mut inner = inner_item.to_rust_ty_or_opaque(ctx, &()); inner.append_implicit_template_params(ctx, &inner_item); let field_name = ctx.rust_ident(&base.field_name); struct_layout.saw_base(inner_item.expect_type()); fields.push(quote! { pub #field_name: #inner, }); } } let mut methods = vec![]; if !is_opaque { let codegen_depth = item.codegen_depth(ctx); let fields_should_be_private = item.annotations().private_fields().unwrap_or(false); let struct_accessor_kind = item .annotations() .accessor_kind() .unwrap_or(FieldAccessorKind::None); for field in self.fields() { field.codegen( ctx, fields_should_be_private, codegen_depth, struct_accessor_kind, self, result, &mut struct_layout, &mut fields, &mut methods, (), ); } } let is_union = self.kind() == CompKind::Union; let layout = item.kind().expect_type().layout(ctx); let mut explicit_align = None; if is_opaque { // Opaque item should not have generated methods, fields. debug_assert!(fields.is_empty()); debug_assert!(methods.is_empty()); match layout { Some(l) => { explicit_align = Some(l.align); let ty = helpers::blob(ctx, l); fields.push(quote! { pub _bindgen_opaque_blob: #ty , }); } None => { warn!("Opaque type without layout! Expect dragons!"); } } } else if !is_union && !item.is_zero_sized(ctx) { if let Some(padding_field) = layout.and_then(|layout| struct_layout.pad_struct(layout)) { fields.push(padding_field); } if let Some(layout) = layout { if struct_layout.requires_explicit_align(layout) { if layout.align == 1 { packed = true; } else { explicit_align = Some(layout.align); if !ctx.options().rust_features.repr_align { let ty = helpers::blob( ctx, Layout::new(0, layout.align), ); fields.push(quote! { pub __bindgen_align: #ty , }); } } } } } else if is_union && !self.is_forward_declaration() { // TODO(emilio): It'd be nice to unify this with the struct path // above somehow. let layout = layout.expect("Unable to get layout information?"); struct_layout.saw_union(layout); if struct_layout.requires_explicit_align(layout) { explicit_align = Some(layout.align); } let ty = helpers::blob(ctx, layout); fields.push(if self.can_be_rust_union(ctx) { quote! { _bindgen_union_align: #ty , } } else { quote! { pub bindgen_union_field: #ty , } }); } // C++ requires every struct to be addressable, so what C++ compilers do // is making the struct 1-byte sized. // // This is apparently not the case for C, see: // https://github.com/rust-lang/rust-bindgen/issues/551 // // Just get the layout, and assume C++ if not. // // NOTE: This check is conveniently here to avoid the dummy fields we // may add for unused template parameters. if self.is_forward_declaration() { fields.push(quote! { _unused: [u8; 0], }); } else if item.is_zero_sized(ctx) { let has_address = if is_opaque { // Generate the address field if it's an opaque type and // couldn't determine the layout of the blob. layout.is_none() } else { layout.map_or(true, |l| l.size != 0) }; if has_address { let ty = helpers::blob(ctx, Layout::new(1, 1)); fields.push(quote! { pub _address: #ty, }); } } let mut generic_param_names = vec![]; for (idx, ty) in item.used_template_params(ctx).iter().enumerate() { let param = ctx.resolve_type(*ty); let name = param.name().unwrap(); let ident = ctx.rust_ident(name); generic_param_names.push(ident.clone()); let prefix = ctx.trait_prefix(); let field_name = ctx.rust_ident(format!("_phantom_{}", idx)); fields.push(quote! { pub #field_name : ::#prefix::marker::PhantomData< ::#prefix::cell::UnsafeCell<#ident> > , }); } let generics = if !generic_param_names.is_empty() { let generic_param_names = generic_param_names.clone(); quote! { < #( #generic_param_names ),* > } } else { quote! {} }; let mut attributes = vec![]; let mut needs_clone_impl = false; let mut needs_default_impl = false; let mut needs_debug_impl = false; let mut needs_partialeq_impl = false; if let Some(comment) = item.comment(ctx) { attributes.push(attributes::doc(comment)); } if packed && !is_opaque { let n = layout.map_or(1, |l| l.align); assert!(ctx.options().rust_features().repr_packed_n || n == 1); let packed_repr = if n == 1 { "packed".to_string() } else { format!("packed({})", n) }; attributes.push(attributes::repr_list(&["C", &packed_repr])); } else { attributes.push(attributes::repr("C")); } if ctx.options().rust_features().repr_align { if let Some(explicit) = explicit_align { // Ensure that the struct has the correct alignment even in // presence of alignas. let explicit = helpers::ast_ty::int_expr(explicit as i64); attributes.push(quote! { #[repr(align(#explicit))] }); } } let derivable_traits = derives_of_item(item, ctx); if !derivable_traits.contains(DerivableTraits::DEBUG) { needs_debug_impl = ctx.options().derive_debug && ctx.options().impl_debug && !ctx.no_debug_by_name(item) && !item.annotations().disallow_debug(); } if !derivable_traits.contains(DerivableTraits::DEFAULT) { needs_default_impl = ctx.options().derive_default && !self.is_forward_declaration(); } let all_template_params = item.all_template_params(ctx); if derivable_traits.contains(DerivableTraits::COPY) && !derivable_traits.contains(DerivableTraits::CLONE) { needs_clone_impl = true; } if !derivable_traits.contains(DerivableTraits::PARTIAL_EQ) { needs_partialeq_impl = ctx.options().derive_partialeq && ctx.options().impl_partialeq && ctx.lookup_can_derive_partialeq_or_partialord(item.id()) == CanDerive::Manually; } let mut derives: Vec<_> = derivable_traits.into(); derives.extend(item.annotations().derives().iter().map(String::as_str)); if !derives.is_empty() { attributes.push(attributes::derives(&derives)) } let mut tokens = if is_union && self.can_be_rust_union(ctx) { quote! { #( #attributes )* pub union #canonical_ident } } else { quote! { #( #attributes )* pub struct #canonical_ident } }; tokens.append_all(quote! { #generics { #( #fields )* } }); result.push(tokens); // Generate the inner types and all that stuff. // // TODO: In the future we might want to be smart, and use nested // modules, and whatnot. for ty in self.inner_types() { let child_item = ctx.resolve_item(*ty); // assert_eq!(child_item.parent_id(), item.id()); child_item.codegen(ctx, result, &()); } // NOTE: Some unexposed attributes (like alignment attributes) may // affect layout, so we're bad and pray to the gods for avoid sending // all the tests to shit when parsing things like max_align_t. if self.found_unknown_attr() { warn!( "Type {} has an unknown attribute that may affect layout", canonical_ident ); } if all_template_params.is_empty() { if !is_opaque { for var in self.inner_vars() { ctx.resolve_item(*var).codegen(ctx, result, &()); } } if ctx.options().layout_tests && !self.is_forward_declaration() { if let Some(layout) = layout { let fn_name = format!("bindgen_test_layout_{}", canonical_ident); let fn_name = ctx.rust_ident_raw(fn_name); let prefix = ctx.trait_prefix(); let size_of_expr = quote! { ::#prefix::mem::size_of::<#canonical_ident>() }; let align_of_expr = quote! { ::#prefix::mem::align_of::<#canonical_ident>() }; let size = layout.size; let align = layout.align; let check_struct_align = if align > ctx.target_pointer_size() && !ctx.options().rust_features().repr_align { None } else { Some(quote! { assert_eq!(#align_of_expr, #align, concat!("Alignment of ", stringify!(#canonical_ident))); }) }; // FIXME when [issue #465](https://github.com/rust-lang/rust-bindgen/issues/465) ready let too_many_base_vtables = self .base_members() .iter() .filter(|base| base.ty.has_vtable(ctx)) .count() > 1; let should_skip_field_offset_checks = is_opaque || too_many_base_vtables; let check_field_offset = if should_skip_field_offset_checks { vec![] } else { let asserts = self.fields() .iter() .filter_map(|field| match *field { Field::DataMember(ref f) if f.name().is_some() => Some(f), _ => None, }) .flat_map(|field| { let name = field.name().unwrap(); field.offset().and_then(|offset| { let field_offset = offset / 8; let field_name = ctx.rust_ident(name); Some(quote! { assert_eq!( unsafe { &(*(::#prefix::ptr::null::<#canonical_ident>())).#field_name as *const _ as usize }, #field_offset, concat!("Offset of field: ", stringify!(#canonical_ident), "::", stringify!(#field_name)) ); }) }) }) .collect::<Vec<proc_macro2::TokenStream>>(); asserts }; let item = quote! { #[test] fn #fn_name() { assert_eq!(#size_of_expr, #size, concat!("Size of: ", stringify!(#canonical_ident))); #check_struct_align #( #check_field_offset )* } }; result.push(item); } } let mut method_names = Default::default(); if ctx.options().codegen_config.methods() { for method in self.methods() { assert!(method.kind() != MethodKind::Constructor); method.codegen_method( ctx, &mut methods, &mut method_names, result, self, ); } } if ctx.options().codegen_config.constructors() { for sig in self.constructors() { Method::new( MethodKind::Constructor, *sig, /* const */ false, ) .codegen_method( ctx, &mut methods, &mut method_names, result, self, ); } } if ctx.options().codegen_config.destructors() { if let Some((kind, destructor)) = self.destructor() { debug_assert!(kind.is_destructor()); Method::new(kind, destructor, false).codegen_method( ctx, &mut methods, &mut method_names, result, self, ); } } } // NB: We can't use to_rust_ty here since for opaque types this tries to // use the specialization knowledge to generate a blob field. let ty_for_impl = quote! { #canonical_ident #generics }; if needs_clone_impl { result.push(quote! { impl #generics Clone for #ty_for_impl { fn clone(&self) -> Self { *self } } }); } if needs_default_impl { let prefix = ctx.trait_prefix(); result.push(quote! { impl #generics Default for #ty_for_impl { fn default() -> Self { unsafe { ::#prefix::mem::zeroed() } } } }); } if needs_debug_impl { let impl_ = impl_debug::gen_debug_impl( ctx, self.fields(), item, self.kind(), ); let prefix = ctx.trait_prefix(); result.push(quote! { impl #generics ::#prefix::fmt::Debug for #ty_for_impl { #impl_ } }); } if needs_partialeq_impl { if let Some(impl_) = impl_partialeq::gen_partialeq_impl( ctx, self, item, &ty_for_impl, ) { let partialeq_bounds = if !generic_param_names.is_empty() { let bounds = generic_param_names.iter().map(|t| { quote! { #t: PartialEq } }); quote! { where #( #bounds ),* } } else { quote! {} }; let prefix = ctx.trait_prefix(); result.push(quote! { impl #generics ::#prefix::cmp::PartialEq for #ty_for_impl #partialeq_bounds { #impl_ } }); } } if !methods.is_empty() { result.push(quote! { impl #generics #ty_for_impl { #( #methods )* } }); } } } trait MethodCodegen { fn codegen_method<'a>( &self, ctx: &BindgenContext, methods: &mut Vec<proc_macro2::TokenStream>, method_names: &mut HashMap<String, usize>, result: &mut CodegenResult<'a>, parent: &CompInfo, ); } impl MethodCodegen for Method { fn codegen_method<'a>( &self, ctx: &BindgenContext, methods: &mut Vec<proc_macro2::TokenStream>, method_names: &mut HashMap<String, usize>, result: &mut CodegenResult<'a>, _parent: &CompInfo, ) { assert!({ let cc = &ctx.options().codegen_config; match self.kind() { MethodKind::Constructor => cc.constructors(), MethodKind::Destructor => cc.destructors(), MethodKind::VirtualDestructor { .. } => cc.destructors(), MethodKind::Static | MethodKind::Normal | MethodKind::Virtual { .. } => cc.methods(), } }); // TODO(emilio): We could generate final stuff at least. if self.is_virtual() { return; // FIXME } // First of all, output the actual function. let function_item = ctx.resolve_item(self.signature()); if function_item.is_blacklisted(ctx) { // We shouldn't emit a method declaration if the function is blacklisted return; } function_item.codegen(ctx, result, &()); let function = function_item.expect_function(); let signature_item = ctx.resolve_item(function.signature()); let mut name = match self.kind() { MethodKind::Constructor => "new".into(), MethodKind::Destructor => "destruct".into(), _ => function.name().to_owned(), }; let signature = match *signature_item.expect_type().kind() { TypeKind::Function(ref sig) => sig, _ => panic!("How in the world?"), }; if let (Abi::ThisCall, false) = (signature.abi(), ctx.options().rust_features().thiscall_abi) { return; } // Do not generate variadic methods, since rust does not allow // implementing them, and we don't do a good job at it anyway. if signature.is_variadic() { return; } let count = { let count = method_names.entry(name.clone()).or_insert(0); *count += 1; *count - 1 }; if count != 0 { name.push_str(&count.to_string()); } let function_name = ctx.rust_ident(function_item.canonical_name(ctx)); let mut args = utils::fnsig_arguments(ctx, signature); let mut ret = utils::fnsig_return_ty(ctx, signature); if !self.is_static() && !self.is_constructor() { args[0] = if self.is_const() { quote! { &self } } else { quote! { &mut self } }; } // If it's a constructor, we always return `Self`, and we inject the // "this" parameter, so there's no need to ask the user for it. // // Note that constructors in Clang are represented as functions with // return-type = void. if self.is_constructor() { args.remove(0); ret = quote! { -> Self }; } let mut exprs = helpers::ast_ty::arguments_from_signature(&signature, ctx); let mut stmts = vec![]; // If it's a constructor, we need to insert an extra parameter with a // variable called `__bindgen_tmp` we're going to create. if self.is_constructor() { let prefix = ctx.trait_prefix(); let tmp_variable_decl = if ctx .options() .rust_features() .maybe_uninit { exprs[0] = quote! { __bindgen_tmp.as_mut_ptr() }; quote! { let mut __bindgen_tmp = ::#prefix::mem::MaybeUninit::uninit() } } else { exprs[0] = quote! { &mut __bindgen_tmp }; quote! { let mut __bindgen_tmp = ::#prefix::mem::uninitialized() } }; stmts.push(tmp_variable_decl); } else if !self.is_static() { assert!(!exprs.is_empty()); exprs[0] = quote! { self }; }; let call = quote! { #function_name (#( #exprs ),* ) }; stmts.push(call); if self.is_constructor() { stmts.push(if ctx.options().rust_features().maybe_uninit { quote! { __bindgen_tmp.assume_init() } } else { quote! { __bindgen_tmp } }) } let block = quote! { #( #stmts );* }; let mut attrs = vec![]; attrs.push(attributes::inline()); if signature.must_use() && ctx.options().rust_features().must_use_function { attrs.push(attributes::must_use()); } let name = ctx.rust_ident(&name); methods.push(quote! { #(#attrs)* pub unsafe fn #name ( #( #args ),* ) #ret { #block } }); } } /// A helper type that represents different enum variations. #[derive(Copy, Clone, PartialEq, Debug)] pub enum EnumVariation { /// The code for this enum will use a Rust enum. Note that creating this in unsafe code /// (including FFI) with an invalid value will invoke undefined behaviour, whether or not /// its marked as non_exhaustive. Rust { /// Indicates whether the generated struct should be `#[non_exhaustive]` non_exhaustive: bool, }, /// The code for this enum will use a newtype NewType { /// Indicates whether the newtype will have bitwise operators is_bitfield: bool, }, /// The code for this enum will use consts Consts, /// The code for this enum will use a module containing consts ModuleConsts, } impl EnumVariation { fn is_rust(&self) -> bool { match *self { EnumVariation::Rust { .. } => true, _ => false, } } /// Both the `Const` and `ModuleConsts` variants will cause this to return /// true. fn is_const(&self) -> bool { match *self { EnumVariation::Consts | EnumVariation::ModuleConsts => true, _ => false, } } } impl Default for EnumVariation { fn default() -> EnumVariation { EnumVariation::Consts } } impl std::str::FromStr for EnumVariation { type Err = std::io::Error; /// Create a `EnumVariation` from a string. fn from_str(s: &str) -> Result<Self, Self::Err> { match s { "rust" => Ok(EnumVariation::Rust { non_exhaustive: false, }), "rust_non_exhaustive" => Ok(EnumVariation::Rust { non_exhaustive: true, }), "bitfield" => Ok(EnumVariation::NewType { is_bitfield: true }), "consts" => Ok(EnumVariation::Consts), "moduleconsts" => Ok(EnumVariation::ModuleConsts), "newtype" => Ok(EnumVariation::NewType { is_bitfield: false }), _ => Err(std::io::Error::new( std::io::ErrorKind::InvalidInput, concat!( "Got an invalid EnumVariation. Accepted values ", "are 'rust', 'rust_non_exhaustive', 'bitfield', 'consts',", "'moduleconsts', and 'newtype'." ), )), } } } /// A helper type to construct different enum variations. enum EnumBuilder<'a> { Rust { codegen_depth: usize, attrs: Vec<proc_macro2::TokenStream>, ident: Ident, tokens: proc_macro2::TokenStream, emitted_any_variants: bool, }, NewType { codegen_depth: usize, canonical_name: &'a str, tokens: proc_macro2::TokenStream, is_bitfield: bool, }, Consts { repr: proc_macro2::TokenStream, variants: Vec<proc_macro2::TokenStream>, codegen_depth: usize, }, ModuleConsts { codegen_depth: usize, module_name: &'a str, module_items: Vec<proc_macro2::TokenStream>, }, } impl<'a> EnumBuilder<'a> { /// Returns the depth of the code generation for a variant of this enum. fn codegen_depth(&self) -> usize { match *self { EnumBuilder::Rust { codegen_depth, .. } | EnumBuilder::NewType { codegen_depth, .. } | EnumBuilder::ModuleConsts { codegen_depth, .. } | EnumBuilder::Consts { codegen_depth, .. } => codegen_depth, } } /// Returns true if the builder is for a rustified enum. fn is_rust_enum(&self) -> bool { match *self { EnumBuilder::Rust { .. } => true, _ => false, } } /// Create a new enum given an item builder, a canonical name, a name for /// the representation, and which variation it should be generated as. fn new( name: &'a str, attrs: Vec<proc_macro2::TokenStream>, repr: proc_macro2::TokenStream, enum_variation: EnumVariation, enum_codegen_depth: usize, ) -> Self { let ident = Ident::new(name, Span::call_site()); match enum_variation { EnumVariation::NewType { is_bitfield } => EnumBuilder::NewType { codegen_depth: enum_codegen_depth, canonical_name: name, tokens: quote! { #( #attrs )* pub struct #ident (pub #repr); }, is_bitfield, }, EnumVariation::Rust { .. } => { let tokens = quote!(); EnumBuilder::Rust { codegen_depth: enum_codegen_depth + 1, attrs, ident, tokens, emitted_any_variants: false, } } EnumVariation::Consts => { let mut variants = Vec::new(); variants.push(quote! { #( #attrs )* pub type #ident = #repr; }); EnumBuilder::Consts { repr, variants, codegen_depth: enum_codegen_depth, } } EnumVariation::ModuleConsts => { let ident = Ident::new( CONSTIFIED_ENUM_MODULE_REPR_NAME, Span::call_site(), ); let type_definition = quote! { #( #attrs )* pub type #ident = #repr; }; EnumBuilder::ModuleConsts { codegen_depth: enum_codegen_depth + 1, module_name: name, module_items: vec![type_definition], } } } } /// Add a variant to this enum. fn with_variant<'b>( self, ctx: &BindgenContext, variant: &EnumVariant, mangling_prefix: Option<&str>, rust_ty: proc_macro2::TokenStream, result: &mut CodegenResult<'b>, is_ty_named: bool, ) -> Self { let variant_name = ctx.rust_mangle(variant.name()); let is_rust_enum = self.is_rust_enum(); let expr = match variant.val() { EnumVariantValue::Boolean(v) if is_rust_enum => { helpers::ast_ty::uint_expr(v as u64) } EnumVariantValue::Boolean(v) => quote!(#v), EnumVariantValue::Signed(v) => helpers::ast_ty::int_expr(v), EnumVariantValue::Unsigned(v) => helpers::ast_ty::uint_expr(v), }; let mut doc = quote! {}; if ctx.options().generate_comments { if let Some(raw_comment) = variant.comment() { let comment = comment::preprocess(raw_comment, self.codegen_depth()); doc = attributes::doc(comment); } } match self { EnumBuilder::Rust { attrs, ident, tokens, emitted_any_variants: _, codegen_depth, } => { let name = ctx.rust_ident(variant_name); EnumBuilder::Rust { attrs, ident, codegen_depth, tokens: quote! { #tokens #doc #name = #expr, }, emitted_any_variants: true, } } EnumBuilder::NewType { canonical_name, .. } => { if ctx.options().rust_features().associated_const && is_ty_named { let enum_ident = ctx.rust_ident(canonical_name); let variant_ident = ctx.rust_ident(variant_name); result.push(quote! { impl #enum_ident { #doc pub const #variant_ident : #rust_ty = #rust_ty ( #expr ); } }); } else { let ident = ctx.rust_ident(match mangling_prefix { Some(prefix) => { Cow::Owned(format!("{}_{}", prefix, variant_name)) } None => variant_name, }); result.push(quote! { #doc pub const #ident : #rust_ty = #rust_ty ( #expr ); }); } self } EnumBuilder::Consts { ref repr, .. } => { let constant_name = match mangling_prefix { Some(prefix) => { Cow::Owned(format!("{}_{}", prefix, variant_name)) } None => variant_name, }; let ty = if is_ty_named { &rust_ty } else { repr }; let ident = ctx.rust_ident(constant_name); result.push(quote! { #doc pub const #ident : #ty = #expr ; }); self } EnumBuilder::ModuleConsts { codegen_depth, module_name, mut module_items, } => { let name = ctx.rust_ident(variant_name); let ty = ctx.rust_ident(CONSTIFIED_ENUM_MODULE_REPR_NAME); module_items.push(quote! { #doc pub const #name : #ty = #expr ; }); EnumBuilder::ModuleConsts { module_name, module_items, codegen_depth, } } } } fn build<'b>( self, ctx: &BindgenContext, rust_ty: proc_macro2::TokenStream, result: &mut CodegenResult<'b>, ) -> proc_macro2::TokenStream { match self { EnumBuilder::Rust { attrs, ident, tokens, emitted_any_variants, .. } => { let variants = if !emitted_any_variants { quote!(__bindgen_cannot_repr_c_on_empty_enum = 0) } else { tokens }; quote! { #( #attrs )* pub enum #ident { #variants } } } EnumBuilder::NewType { canonical_name, tokens, is_bitfield, .. } => { if !is_bitfield { return tokens; } let rust_ty_name = ctx.rust_ident_raw(canonical_name); let prefix = ctx.trait_prefix(); result.push(quote! { impl ::#prefix::ops::BitOr<#rust_ty> for #rust_ty { type Output = Self; #[inline] fn bitor(self, other: Self) -> Self { #rust_ty_name(self.0 | other.0) } } }); result.push(quote! { impl ::#prefix::ops::BitOrAssign for #rust_ty { #[inline] fn bitor_assign(&mut self, rhs: #rust_ty) { self.0 |= rhs.0; } } }); result.push(quote! { impl ::#prefix::ops::BitAnd<#rust_ty> for #rust_ty { type Output = Self; #[inline] fn bitand(self, other: Self) -> Self { #rust_ty_name(self.0 & other.0) } } }); result.push(quote! { impl ::#prefix::ops::BitAndAssign for #rust_ty { #[inline] fn bitand_assign(&mut self, rhs: #rust_ty) { self.0 &= rhs.0; } } }); tokens } EnumBuilder::Consts { variants, .. } => quote! { #( #variants )* }, EnumBuilder::ModuleConsts { module_items, module_name, .. } => { let ident = ctx.rust_ident(module_name); quote! { pub mod #ident { #( #module_items )* } } } } } } impl CodeGenerator for Enum { type Extra = Item; fn codegen<'a>( &self, ctx: &BindgenContext, result: &mut CodegenResult<'a>, item: &Item, ) { debug!("<Enum as CodeGenerator>::codegen: item = {:?}", item); debug_assert!(item.is_enabled_for_codegen(ctx)); let name = item.canonical_name(ctx); let ident = ctx.rust_ident(&name); let enum_ty = item.expect_type(); let layout = enum_ty.layout(ctx); let repr = self.repr().map(|repr| ctx.resolve_type(repr)); let repr = match repr { Some(repr) => match *repr.canonical_type(ctx).kind() { TypeKind::Int(int_kind) => int_kind, _ => panic!("Unexpected type as enum repr"), }, None => { warn!( "Guessing type of enum! Forward declarations of enums \ shouldn't be legal!" ); IntKind::Int } }; let signed = repr.is_signed(); let size = layout .map(|l| l.size) .or_else(|| repr.known_size()) .unwrap_or(0); let repr_name = match (signed, size) { (true, 1) => "i8", (false, 1) => "u8", (true, 2) => "i16", (false, 2) => "u16", (true, 4) => "i32", (false, 4) => "u32", (true, 8) => "i64", (false, 8) => "u64", _ => { warn!("invalid enum decl: signed: {}, size: {}", signed, size); "i32" } }; let mut attrs = vec![]; let variation = self.computed_enum_variation(ctx, item); // TODO(emilio): Delegate this to the builders? match variation { EnumVariation::Rust { non_exhaustive } => { attrs.push(attributes::repr(repr_name)); if non_exhaustive && ctx.options().rust_features().non_exhaustive { attrs.push(attributes::non_exhaustive()); } else if non_exhaustive && !ctx.options().rust_features().non_exhaustive { panic!("The rust target you're using doesn't seem to support non_exhaustive enums"); } } EnumVariation::NewType { .. } => { if ctx.options().rust_features.repr_transparent { attrs.push(attributes::repr("transparent")); } else { attrs.push(attributes::repr("C")); } } _ => {} }; if let Some(comment) = item.comment(ctx) { attrs.push(attributes::doc(comment)); } if !variation.is_const() { let mut derives = derives_of_item(item, ctx); // For backwards compat, enums always derive Clone/Eq/PartialEq/Hash, even // if we don't generate those by default. derives.insert( DerivableTraits::CLONE | DerivableTraits::COPY | DerivableTraits::HASH | DerivableTraits::PARTIAL_EQ | DerivableTraits::EQ, ); let derives: Vec<_> = derives.into(); attrs.push(attributes::derives(&derives)); } fn add_constant<'a>( ctx: &BindgenContext, enum_: &Type, // Only to avoid recomputing every time. enum_canonical_name: &Ident, // May be the same as "variant" if it's because the // enum is unnamed and we still haven't seen the // value. variant_name: &Ident, referenced_name: &Ident, enum_rust_ty: proc_macro2::TokenStream, result: &mut CodegenResult<'a>, ) { let constant_name = if enum_.name().is_some() { if ctx.options().prepend_enum_name { format!("{}_{}", enum_canonical_name, variant_name) } else { format!("{}", variant_name) } } else { format!("{}", variant_name) }; let constant_name = ctx.rust_ident(constant_name); result.push(quote! { pub const #constant_name : #enum_rust_ty = #enum_canonical_name :: #referenced_name ; }); } let repr = match self.repr() { Some(ty) => ty.to_rust_ty_or_opaque(ctx, &()), None => { let repr_name = ctx.rust_ident_raw(repr_name); quote! { #repr_name } } }; let mut builder = EnumBuilder::new( &name, attrs, repr, variation, item.codegen_depth(ctx), ); // A map where we keep a value -> variant relation. let mut seen_values = HashMap::<_, Ident>::default(); let enum_rust_ty = item.to_rust_ty_or_opaque(ctx, &()); let is_toplevel = item.is_toplevel(ctx); // Used to mangle the constants we generate in the unnamed-enum case. let parent_canonical_name = if is_toplevel { None } else { Some(item.parent_id().canonical_name(ctx)) }; let constant_mangling_prefix = if ctx.options().prepend_enum_name { if enum_ty.name().is_none() { parent_canonical_name.as_ref().map(|n| &**n) } else { Some(&*name) } } else { None }; // NB: We defer the creation of constified variants, in case we find // another variant with the same value (which is the common thing to // do). let mut constified_variants = VecDeque::new(); let mut iter = self.variants().iter().peekable(); while let Some(variant) = iter.next().or_else(|| constified_variants.pop_front()) { if variant.hidden() { continue; } if variant.force_constification() && iter.peek().is_some() { constified_variants.push_back(variant); continue; } match seen_values.entry(variant.val()) { Entry::Occupied(ref entry) => { if variation.is_rust() { let variant_name = ctx.rust_mangle(variant.name()); let mangled_name = if is_toplevel || enum_ty.name().is_some() { variant_name } else { let parent_name = parent_canonical_name.as_ref().unwrap(); Cow::Owned(format!( "{}_{}", parent_name, variant_name )) }; let existing_variant_name = entry.get(); // Use associated constants for named enums. if enum_ty.name().is_some() && ctx.options().rust_features().associated_const { let enum_canonical_name = &ident; let variant_name = ctx.rust_ident_raw(&*mangled_name); result.push(quote! { impl #enum_rust_ty { pub const #variant_name : #enum_rust_ty = #enum_canonical_name :: #existing_variant_name ; } }); } else { add_constant( ctx, enum_ty, &ident, &Ident::new(&*mangled_name, Span::call_site()), existing_variant_name, enum_rust_ty.clone(), result, ); } } else { builder = builder.with_variant( ctx, variant, constant_mangling_prefix, enum_rust_ty.clone(), result, enum_ty.name().is_some(), ); } } Entry::Vacant(entry) => { builder = builder.with_variant( ctx, variant, constant_mangling_prefix, enum_rust_ty.clone(), result, enum_ty.name().is_some(), ); let variant_name = ctx.rust_ident(variant.name()); // If it's an unnamed enum, or constification is enforced, // we also generate a constant so it can be properly // accessed. if (variation.is_rust() && enum_ty.name().is_none()) || variant.force_constification() { let mangled_name = if is_toplevel { variant_name.clone() } else { let parent_name = parent_canonical_name.as_ref().unwrap(); Ident::new( &format!("{}_{}", parent_name, variant_name), Span::call_site(), ) }; add_constant( ctx, enum_ty, &ident, &mangled_name, &variant_name, enum_rust_ty.clone(), result, ); } entry.insert(variant_name); } } } let item = builder.build(ctx, enum_rust_ty, result); result.push(item); } } /// Enum for the default type of macro constants. #[derive(Copy, Clone, PartialEq, Debug)] pub enum MacroTypeVariation { /// Use i32 or i64 Signed, /// Use u32 or u64 Unsigned, } impl MacroTypeVariation { /// Convert a `MacroTypeVariation` to its str representation. pub fn as_str(&self) -> &str { match self { MacroTypeVariation::Signed => "signed", MacroTypeVariation::Unsigned => "unsigned", } } } impl Default for MacroTypeVariation { fn default() -> MacroTypeVariation { MacroTypeVariation::Unsigned } } impl std::str::FromStr for MacroTypeVariation { type Err = std::io::Error; /// Create a `MacroTypeVariation` from a string. fn from_str(s: &str) -> Result<Self, Self::Err> { match s { "signed" => Ok(MacroTypeVariation::Signed), "unsigned" => Ok(MacroTypeVariation::Unsigned), _ => Err(std::io::Error::new( std::io::ErrorKind::InvalidInput, concat!( "Got an invalid MacroTypeVariation. Accepted values ", "are 'signed' and 'unsigned'" ), )), } } } /// Enum for how aliases should be translated. #[derive(Copy, Clone, PartialEq, Debug)] pub enum AliasVariation { /// Convert to regular Rust alias TypeAlias, /// Create a new type by wrapping the old type in a struct and using #[repr(transparent)] NewType, /// Same as NewStruct but also impl Deref to be able to use the methods of the wrapped type NewTypeDeref, } impl AliasVariation { /// Convert an `AliasVariation` to its str representation. pub fn as_str(&self) -> &str { match self { AliasVariation::TypeAlias => "type_alias", AliasVariation::NewType => "new_type", AliasVariation::NewTypeDeref => "new_type_deref", } } } impl Default for AliasVariation { fn default() -> AliasVariation { AliasVariation::TypeAlias } } impl std::str::FromStr for AliasVariation { type Err = std::io::Error; /// Create an `AliasVariation` from a string. fn from_str(s: &str) -> Result<Self, Self::Err> { match s { "type_alias" => Ok(AliasVariation::TypeAlias), "new_type" => Ok(AliasVariation::NewType), "new_type_deref" => Ok(AliasVariation::NewTypeDeref), _ => Err(std::io::Error::new( std::io::ErrorKind::InvalidInput, concat!( "Got an invalid AliasVariation. Accepted values ", "are 'type_alias', 'new_type', and 'new_type_deref'" ), )), } } } /// Fallible conversion to an opaque blob. /// /// Implementors of this trait should provide the `try_get_layout` method to /// fallibly get this thing's layout, which the provided `try_to_opaque` trait /// method will use to convert the `Layout` into an opaque blob Rust type. trait TryToOpaque { type Extra; /// Get the layout for this thing, if one is available. fn try_get_layout( &self, ctx: &BindgenContext, extra: &Self::Extra, ) -> error::Result<Layout>; /// Do not override this provided trait method. fn try_to_opaque( &self, ctx: &BindgenContext, extra: &Self::Extra, ) -> error::Result<proc_macro2::TokenStream> { self.try_get_layout(ctx, extra) .map(|layout| helpers::blob(ctx, layout)) } } /// Infallible conversion of an IR thing to an opaque blob. /// /// The resulting layout is best effort, and is unfortunately not guaranteed to /// be correct. When all else fails, we fall back to a single byte layout as a /// last resort, because C++ does not permit zero-sized types. See the note in /// the `ToRustTyOrOpaque` doc comment about fallible versus infallible traits /// and when each is appropriate. /// /// Don't implement this directly. Instead implement `TryToOpaque`, and then /// leverage the blanket impl for this trait. trait ToOpaque: TryToOpaque { fn get_layout(&self, ctx: &BindgenContext, extra: &Self::Extra) -> Layout { self.try_get_layout(ctx, extra) .unwrap_or_else(|_| Layout::for_size(ctx, 1)) } fn to_opaque( &self, ctx: &BindgenContext, extra: &Self::Extra, ) -> proc_macro2::TokenStream { let layout = self.get_layout(ctx, extra); helpers::blob(ctx, layout) } } impl<T> ToOpaque for T where T: TryToOpaque {} /// Fallible conversion from an IR thing to an *equivalent* Rust type. /// /// If the C/C++ construct represented by the IR thing cannot (currently) be /// represented in Rust (for example, instantiations of templates with /// const-value generic parameters) then the impl should return an `Err`. It /// should *not* attempt to return an opaque blob with the correct size and /// alignment. That is the responsibility of the `TryToOpaque` trait. trait TryToRustTy { type Extra; fn try_to_rust_ty( &self, ctx: &BindgenContext, extra: &Self::Extra, ) -> error::Result<proc_macro2::TokenStream>; } /// Fallible conversion to a Rust type or an opaque blob with the correct size /// and alignment. /// /// Don't implement this directly. Instead implement `TryToRustTy` and /// `TryToOpaque`, and then leverage the blanket impl for this trait below. trait TryToRustTyOrOpaque: TryToRustTy + TryToOpaque { type Extra; fn try_to_rust_ty_or_opaque( &self, ctx: &BindgenContext, extra: &<Self as TryToRustTyOrOpaque>::Extra, ) -> error::Result<proc_macro2::TokenStream>; } impl<E, T> TryToRustTyOrOpaque for T where T: TryToRustTy<Extra = E> + TryToOpaque<Extra = E>, { type Extra = E; fn try_to_rust_ty_or_opaque( &self, ctx: &BindgenContext, extra: &E, ) -> error::Result<proc_macro2::TokenStream> { self.try_to_rust_ty(ctx, extra).or_else(|_| { if let Ok(layout) = self.try_get_layout(ctx, extra) { Ok(helpers::blob(ctx, layout)) } else { Err(error::Error::NoLayoutForOpaqueBlob) } }) } } /// Infallible conversion to a Rust type, or an opaque blob with a best effort /// of correct size and alignment. /// /// Don't implement this directly. Instead implement `TryToRustTy` and /// `TryToOpaque`, and then leverage the blanket impl for this trait below. /// /// ### Fallible vs. Infallible Conversions to Rust Types /// /// When should one use this infallible `ToRustTyOrOpaque` trait versus the /// fallible `TryTo{RustTy, Opaque, RustTyOrOpaque}` triats? All fallible trait /// implementations that need to convert another thing into a Rust type or /// opaque blob in a nested manner should also use fallible trait methods and /// propagate failure up the stack. Only infallible functions and methods like /// CodeGenerator implementations should use the infallible /// `ToRustTyOrOpaque`. The further out we push error recovery, the more likely /// we are to get a usable `Layout` even if we can't generate an equivalent Rust /// type for a C++ construct. trait ToRustTyOrOpaque: TryToRustTy + ToOpaque { type Extra; fn to_rust_ty_or_opaque( &self, ctx: &BindgenContext, extra: &<Self as ToRustTyOrOpaque>::Extra, ) -> proc_macro2::TokenStream; } impl<E, T> ToRustTyOrOpaque for T where T: TryToRustTy<Extra = E> + ToOpaque<Extra = E>, { type Extra = E; fn to_rust_ty_or_opaque( &self, ctx: &BindgenContext, extra: &E, ) -> proc_macro2::TokenStream { self.try_to_rust_ty(ctx, extra) .unwrap_or_else(|_| self.to_opaque(ctx, extra)) } } impl<T> TryToOpaque for T where T: Copy + Into<ItemId>, { type Extra = (); fn try_get_layout( &self, ctx: &BindgenContext, _: &(), ) -> error::Result<Layout> { ctx.resolve_item((*self).into()).try_get_layout(ctx, &()) } } impl<T> TryToRustTy for T where T: Copy + Into<ItemId>, { type Extra = (); fn try_to_rust_ty( &self, ctx: &BindgenContext, _: &(), ) -> error::Result<proc_macro2::TokenStream> { ctx.resolve_item((*self).into()).try_to_rust_ty(ctx, &()) } } impl TryToOpaque for Item { type Extra = (); fn try_get_layout( &self, ctx: &BindgenContext, _: &(), ) -> error::Result<Layout> { self.kind().expect_type().try_get_layout(ctx, self) } } impl TryToRustTy for Item { type Extra = (); fn try_to_rust_ty( &self, ctx: &BindgenContext, _: &(), ) -> error::Result<proc_macro2::TokenStream> { self.kind().expect_type().try_to_rust_ty(ctx, self) } } impl TryToOpaque for Type { type Extra = Item; fn try_get_layout( &self, ctx: &BindgenContext, _: &Item, ) -> error::Result<Layout> { self.layout(ctx).ok_or(error::Error::NoLayoutForOpaqueBlob) } } impl TryToRustTy for Type { type Extra = Item; fn try_to_rust_ty( &self, ctx: &BindgenContext, item: &Item, ) -> error::Result<proc_macro2::TokenStream> { use self::helpers::ast_ty::*; match *self.kind() { TypeKind::Void => Ok(c_void(ctx)), // TODO: we should do something smart with nullptr, or maybe *const // c_void is enough? TypeKind::NullPtr => Ok(c_void(ctx).to_ptr(true)), TypeKind::Int(ik) => { match ik { IntKind::Bool => Ok(quote! { bool }), IntKind::Char { .. } => Ok(raw_type(ctx, "c_char")), IntKind::SChar => Ok(raw_type(ctx, "c_schar")), IntKind::UChar => Ok(raw_type(ctx, "c_uchar")), IntKind::Short => Ok(raw_type(ctx, "c_short")), IntKind::UShort => Ok(raw_type(ctx, "c_ushort")), IntKind::Int => Ok(raw_type(ctx, "c_int")), IntKind::UInt => Ok(raw_type(ctx, "c_uint")), IntKind::Long => Ok(raw_type(ctx, "c_long")), IntKind::ULong => Ok(raw_type(ctx, "c_ulong")), IntKind::LongLong => Ok(raw_type(ctx, "c_longlong")), IntKind::ULongLong => Ok(raw_type(ctx, "c_ulonglong")), IntKind::WChar => { let layout = self .layout(ctx) .expect("Couldn't compute wchar_t's layout?"); let ty = Layout::known_type_for_size(ctx, layout.size) .expect("Non-representable wchar_t?"); let ident = ctx.rust_ident_raw(ty); Ok(quote! { #ident }) } IntKind::I8 => Ok(quote! { i8 }), IntKind::U8 => Ok(quote! { u8 }), IntKind::I16 => Ok(quote! { i16 }), IntKind::U16 => Ok(quote! { u16 }), IntKind::I32 => Ok(quote! { i32 }), IntKind::U32 => Ok(quote! { u32 }), IntKind::I64 => Ok(quote! { i64 }), IntKind::U64 => Ok(quote! { u64 }), IntKind::Custom { name, .. } => { Ok(proc_macro2::TokenStream::from_str(name).unwrap()) } IntKind::U128 => { Ok(if ctx.options().rust_features.i128_and_u128 { quote! { u128 } } else { // Best effort thing, but wrong alignment // unfortunately. quote! { [u64; 2] } }) } IntKind::I128 => { Ok(if ctx.options().rust_features.i128_and_u128 { quote! { i128 } } else { quote! { [u64; 2] } }) } } } TypeKind::Float(fk) => { Ok(float_kind_rust_type(ctx, fk, self.layout(ctx))) } TypeKind::Complex(fk) => { let float_path = float_kind_rust_type(ctx, fk, self.layout(ctx)); ctx.generated_bindgen_complex(); Ok(if ctx.options().enable_cxx_namespaces { quote! { root::__BindgenComplex<#float_path> } } else { quote! { __BindgenComplex<#float_path> } }) } TypeKind::Function(ref fs) => { // We can't rely on the sizeof(Option<NonZero<_>>) == // sizeof(NonZero<_>) optimization with opaque blobs (because // they aren't NonZero), so don't *ever* use an or_opaque // variant here. let ty = fs.try_to_rust_ty(ctx, &())?; let prefix = ctx.trait_prefix(); Ok(quote! { ::#prefix::option::Option<#ty> }) } TypeKind::Array(item, len) | TypeKind::Vector(item, len) => { let ty = item.try_to_rust_ty(ctx, &())?; Ok(quote! { [ #ty ; #len ] }) } TypeKind::Enum(..) => { let path = item.namespace_aware_canonical_path(ctx); let path = proc_macro2::TokenStream::from_str(&path.join("::")) .unwrap(); Ok(quote!(#path)) } TypeKind::TemplateInstantiation(ref inst) => { inst.try_to_rust_ty(ctx, item) } TypeKind::ResolvedTypeRef(inner) => inner.try_to_rust_ty(ctx, &()), TypeKind::TemplateAlias(..) | TypeKind::Alias(..) | TypeKind::BlockPointer(..) => { if self.is_block_pointer() && !ctx.options().generate_block { let void = c_void(ctx); return Ok(void.to_ptr(/* is_const = */ false)); } let template_params = item .used_template_params(ctx) .into_iter() .filter(|param| param.is_template_param(ctx, &())) .collect::<Vec<_>>(); if item.is_opaque(ctx, &()) && !template_params.is_empty() { self.try_to_opaque(ctx, item) } else if let Some(ty) = self .name() .and_then(|name| utils::type_from_named(ctx, name)) { Ok(ty) } else { utils::build_path(item, ctx) } } TypeKind::Comp(ref info) => { let template_params = item.all_template_params(ctx); if info.has_non_type_template_params() || (item.is_opaque(ctx, &()) && !template_params.is_empty()) { return self.try_to_opaque(ctx, item); } utils::build_path(item, ctx) } TypeKind::Opaque => self.try_to_opaque(ctx, item), TypeKind::Pointer(inner) | TypeKind::Reference(inner) => { let is_const = ctx.resolve_type(inner).is_const(); let inner = inner.into_resolver().through_type_refs().resolve(ctx); let inner_ty = inner.expect_type(); let is_objc_pointer = match inner_ty.kind() { TypeKind::ObjCInterface(..) => true, _ => false, }; // Regardless if we can properly represent the inner type, we // should always generate a proper pointer here, so use // infallible conversion of the inner type. let mut ty = inner.to_rust_ty_or_opaque(ctx, &()); ty.append_implicit_template_params(ctx, inner); // Avoid the first function pointer level, since it's already // represented in Rust. if inner_ty.canonical_type(ctx).is_function() || is_objc_pointer { Ok(ty) } else { Ok(ty.to_ptr(is_const)) } } TypeKind::TypeParam => { let name = item.canonical_name(ctx); let ident = ctx.rust_ident(&name); Ok(quote! { #ident }) } TypeKind::ObjCSel => Ok(quote! { objc::runtime::Sel }), TypeKind::ObjCId => Ok(quote! { id }), TypeKind::ObjCInterface(ref interface) => { let name = ctx.rust_ident(interface.name()); Ok(quote! { #name }) } ref u @ TypeKind::UnresolvedTypeRef(..) => { unreachable!("Should have been resolved after parsing {:?}!", u) } } } } impl TryToOpaque for TemplateInstantiation { type Extra = Item; fn try_get_layout( &self, ctx: &BindgenContext, item: &Item, ) -> error::Result<Layout> { item.expect_type() .layout(ctx) .ok_or(error::Error::NoLayoutForOpaqueBlob) } } impl TryToRustTy for TemplateInstantiation { type Extra = Item; fn try_to_rust_ty( &self, ctx: &BindgenContext, item: &Item, ) -> error::Result<proc_macro2::TokenStream> { if self.is_opaque(ctx, item) { return Err(error::Error::InstantiationOfOpaqueType); } let def = self .template_definition() .into_resolver() .through_type_refs() .resolve(ctx); let mut ty = quote! {}; let def_path = def.namespace_aware_canonical_path(ctx); ty.append_separated( def_path.into_iter().map(|p| ctx.rust_ident(p)), quote!(::), ); let def_params = def.self_template_params(ctx); if def_params.is_empty() { // This can happen if we generated an opaque type for a partial // template specialization, and we've hit an instantiation of // that partial specialization. extra_assert!(def.is_opaque(ctx, &())); return Err(error::Error::InstantiationOfOpaqueType); } // TODO: If the definition type is a template class/struct // definition's member template definition, it could rely on // generic template parameters from its outer template // class/struct. When we emit bindings for it, it could require // *more* type arguments than we have here, and we will need to // reconstruct them somehow. We don't have any means of doing // that reconstruction at this time. let template_args = self .template_arguments() .iter() .zip(def_params.iter()) // Only pass type arguments for the type parameters that // the def uses. .filter(|&(_, param)| ctx.uses_template_parameter(def.id(), *param)) .map(|(arg, _)| { let arg = arg.into_resolver().through_type_refs().resolve(ctx); let mut ty = arg.try_to_rust_ty(ctx, &())?; ty.append_implicit_template_params(ctx, arg); Ok(ty) }) .collect::<error::Result<Vec<_>>>()?; if template_args.is_empty() { return Ok(ty); } Ok(quote! { #ty < #( #template_args ),* > }) } } impl TryToRustTy for FunctionSig { type Extra = (); fn try_to_rust_ty( &self, ctx: &BindgenContext, _: &(), ) -> error::Result<proc_macro2::TokenStream> { // TODO: we might want to consider ignoring the reference return value. let ret = utils::fnsig_return_ty(ctx, &self); let arguments = utils::fnsig_arguments(ctx, &self); let abi = self.abi(); match abi { Abi::ThisCall if !ctx.options().rust_features().thiscall_abi => { warn!("Skipping function with thiscall ABI that isn't supported by the configured Rust target"); Ok(proc_macro2::TokenStream::new()) } _ => Ok(quote! { unsafe extern #abi fn ( #( #arguments ),* ) #ret }), } } } impl CodeGenerator for Function { type Extra = Item; fn codegen<'a>( &self, ctx: &BindgenContext, result: &mut CodegenResult<'a>, item: &Item, ) { debug!("<Function as CodeGenerator>::codegen: item = {:?}", item); debug_assert!(item.is_enabled_for_codegen(ctx)); // We can't currently do anything with Internal functions so just // avoid generating anything for them. match self.linkage() { Linkage::Internal => return, Linkage::External => {} } // Pure virtual methods have no actual symbol, so we can't generate // something meaningful for them. match self.kind() { FunctionKind::Method(ref method_kind) if method_kind.is_pure_virtual() => { return; } _ => {} } // Similar to static member variables in a class template, we can't // generate bindings to template functions, because the set of // instantiations is open ended and we have no way of knowing which // monomorphizations actually exist. if !item.all_template_params(ctx).is_empty() { return; } let name = self.name(); let mut canonical_name = item.canonical_name(ctx); let mangled_name = self.mangled_name(); { let seen_symbol_name = mangled_name.unwrap_or(&canonical_name); // TODO: Maybe warn here if there's a type/argument mismatch, or // something? if result.seen_function(seen_symbol_name) { return; } result.saw_function(seen_symbol_name); } let signature_item = ctx.resolve_item(self.signature()); let signature = signature_item.kind().expect_type().canonical_type(ctx); let signature = match *signature.kind() { TypeKind::Function(ref sig) => sig, _ => panic!("Signature kind is not a Function: {:?}", signature), }; let args = utils::fnsig_arguments(ctx, signature); let ret = utils::fnsig_return_ty(ctx, signature); let mut attributes = vec![]; if signature.must_use() && ctx.options().rust_features().must_use_function { attributes.push(attributes::must_use()); } if let Some(comment) = item.comment(ctx) { attributes.push(attributes::doc(comment)); } // Handle overloaded functions by giving each overload its own unique // suffix. let times_seen = result.overload_number(&canonical_name); if times_seen > 0 { write!(&mut canonical_name, "{}", times_seen).unwrap(); } let abi = match signature.abi() { Abi::ThisCall if !ctx.options().rust_features().thiscall_abi => { warn!("Skipping function with thiscall ABI that isn't supported by the configured Rust target"); return; } Abi::Win64 if signature.is_variadic() => { warn!("Skipping variadic function with Win64 ABI that isn't supported"); return; } Abi::Unknown(unknown_abi) => { panic!( "Invalid or unknown abi {:?} for function {:?} ({:?})", unknown_abi, canonical_name, self ); } abi => abi, }; let link_name = mangled_name.unwrap_or(name); if !utils::names_will_be_identical_after_mangling( &canonical_name, link_name, Some(abi), ) { attributes.push(attributes::link_name(link_name)); } // Unfortunately this can't piggyback on the `attributes` list because // the #[link(wasm_import_module)] needs to happen before the `extern // "C"` block. It doesn't get picked up properly otherwise let wasm_link_attribute = ctx.options().wasm_import_module_name.as_ref().map(|name| { quote! { #[link(wasm_import_module = #name)] } }); let ident = ctx.rust_ident(canonical_name); let tokens = quote! { #wasm_link_attribute extern #abi { #(#attributes)* pub fn #ident ( #( #args ),* ) #ret; } }; result.push(tokens); } } fn objc_method_codegen( ctx: &BindgenContext, method: &ObjCMethod, class_name: Option<&str>, prefix: &str, ) -> proc_macro2::TokenStream { let signature = method.signature(); let fn_args = utils::fnsig_arguments(ctx, signature); let fn_ret = utils::fnsig_return_ty(ctx, signature); let sig = if method.is_class_method() { let fn_args = fn_args.clone(); quote! { ( #( #fn_args ),* ) #fn_ret } } else { let fn_args = fn_args.clone(); let args = iter::once(quote! { &self }).chain(fn_args.into_iter()); quote! { ( #( #args ),* ) #fn_ret } }; let methods_and_args = method.format_method_call(&fn_args); let body = if method.is_class_method() { let class_name = ctx.rust_ident( class_name .expect("Generating a class method without class name?") .to_owned(), ); quote! { msg_send!(class!(#class_name), #methods_and_args) } } else { quote! { msg_send!(*self, #methods_and_args) } }; let method_name = ctx.rust_ident(format!("{}{}", prefix, method.rust_name())); quote! { unsafe fn #method_name #sig where <Self as std::ops::Deref>::Target: objc::Message + Sized { #body } } } impl CodeGenerator for ObjCInterface { type Extra = Item; fn codegen<'a>( &self, ctx: &BindgenContext, result: &mut CodegenResult<'a>, item: &Item, ) { debug_assert!(item.is_enabled_for_codegen(ctx)); let mut impl_items = vec![]; for method in self.methods() { let impl_item = objc_method_codegen(ctx, method, None, ""); impl_items.push(impl_item); } let instance_method_names: Vec<_> = self.methods().iter().map(|m| m.rust_name()).collect(); for class_method in self.class_methods() { let ambiquity = instance_method_names.contains(&class_method.rust_name()); let prefix = if ambiquity { "class_" } else { "" }; let impl_item = objc_method_codegen( ctx, class_method, Some(self.name()), prefix, ); impl_items.push(impl_item); } let trait_name = ctx.rust_ident(self.rust_name()); let trait_constraints = quote! { Sized + std::ops::Deref }; let trait_block = if self.is_template() { let template_names: Vec<Ident> = self .template_names .iter() .map(|g| ctx.rust_ident(g)) .collect(); quote! { pub trait #trait_name <#(#template_names),*> : #trait_constraints { #( #impl_items )* } } } else { quote! { pub trait #trait_name : #trait_constraints { #( #impl_items )* } } }; let class_name = ctx.rust_ident(self.name()); if !self.is_category() && !self.is_protocol() { let struct_block = quote! { #[repr(transparent)] #[derive(Clone)] pub struct #class_name(pub id); impl std::ops::Deref for #class_name { type Target = objc::runtime::Object; fn deref(&self) -> &Self::Target { unsafe { &*self.0 } } } unsafe impl objc::Message for #class_name { } impl #class_name { pub fn alloc() -> Self { Self(unsafe { msg_send!(objc::class!(#class_name), alloc) }) } } }; result.push(struct_block); let mut protocol_set: HashSet<ItemId> = Default::default(); for protocol_id in self.conforms_to.iter() { protocol_set.insert(*protocol_id); let protocol_name = ctx.rust_ident( ctx.resolve_type(protocol_id.expect_type_id(ctx)) .name() .unwrap(), ); let impl_trait = quote! { impl #protocol_name for #class_name { } }; result.push(impl_trait); } let mut parent_class = self.parent_class; while let Some(parent_id) = parent_class { let parent = parent_id .expect_type_id(ctx) .into_resolver() .through_type_refs() .resolve(ctx) .expect_type() .kind(); let parent = match parent { TypeKind::ObjCInterface(ref parent) => parent, _ => break, }; parent_class = parent.parent_class; let parent_name = ctx.rust_ident(parent.rust_name()); let impl_trait = if parent.is_template() { let template_names: Vec<Ident> = parent .template_names .iter() .map(|g| ctx.rust_ident(g)) .collect(); quote! { impl <#(#template_names :'static),*> #parent_name <#(#template_names),*> for #class_name { } } } else { quote! { impl #parent_name for #class_name { } } }; result.push(impl_trait); for protocol_id in parent.conforms_to.iter() { if protocol_set.insert(*protocol_id) { let protocol_name = ctx.rust_ident( ctx.resolve_type(protocol_id.expect_type_id(ctx)) .name() .unwrap(), ); let impl_trait = quote! { impl #protocol_name for #class_name { } }; result.push(impl_trait); } } if !parent.is_template() { let parent_struct_name = parent.name(); let child_struct_name = self.name(); let parent_struct = ctx.rust_ident(parent_struct_name); let from_block = quote! { impl From<#class_name> for #parent_struct { fn from(child: #class_name) -> #parent_struct { #parent_struct(child.0) } } }; result.push(from_block); let error_msg = format!( "This {} cannot be downcasted to {}", parent_struct_name, child_struct_name ); let try_into_block = quote! { impl std::convert::TryFrom<#parent_struct> for #class_name { type Error = &'static str; fn try_from(parent: #parent_struct) -> Result<#class_name, Self::Error> { let is_kind_of : bool = unsafe { msg_send!(parent, isKindOfClass:class!(#class_name))}; if is_kind_of { Ok(#class_name(parent.0)) } else { Err(#error_msg) } } } }; result.push(try_into_block); } } } if !self.is_protocol() { let impl_block = if self.is_template() { let template_names: Vec<Ident> = self .template_names .iter() .map(|g| ctx.rust_ident(g)) .collect(); quote! { impl <#(#template_names :'static),*> #trait_name <#(#template_names),*> for #class_name { } } } else { quote! { impl #trait_name for #class_name { } } }; result.push(impl_block); } result.push(trait_block); result.saw_objc(); } } pub(crate) fn codegen( context: BindgenContext, ) -> (Vec<proc_macro2::TokenStream>, BindgenOptions) { context.gen(|context| { let _t = context.timer("codegen"); let counter = Cell::new(0); let mut result = CodegenResult::new(&counter); debug!("codegen: {:?}", context.options()); if context.options().emit_ir { let codegen_items = context.codegen_items(); for (id, item) in context.items() { if codegen_items.contains(&id) { println!("ir: {:?} = {:#?}", id, item); } } } if let Some(path) = context.options().emit_ir_graphviz.as_ref() { match dot::write_dot_file(context, path) { Ok(()) => info!( "Your dot file was generated successfully into: {}", path ), Err(e) => warn!("{}", e), } } context.resolve_item(context.root_module()).codegen( context, &mut result, &(), ); result.items }) } mod utils { use super::{error, ToRustTyOrOpaque}; use crate::ir::context::BindgenContext; use crate::ir::function::{Abi, FunctionSig}; use crate::ir::item::{Item, ItemCanonicalPath}; use crate::ir::ty::TypeKind; use proc_macro2; use std::borrow::Cow; use std::mem; use std::str::FromStr; pub fn prepend_bitfield_unit_type( ctx: &BindgenContext, result: &mut Vec<proc_macro2::TokenStream>, ) { let bitfield_unit_src = include_str!("./bitfield_unit.rs"); let bitfield_unit_src = if ctx.options().rust_features().min_const_fn { Cow::Borrowed(bitfield_unit_src) } else { Cow::Owned(bitfield_unit_src.replace("const fn ", "fn ")) }; let bitfield_unit_type = proc_macro2::TokenStream::from_str(&bitfield_unit_src).unwrap(); let bitfield_unit_type = quote!(#bitfield_unit_type); let items = vec![bitfield_unit_type]; let old_items = mem::replace(result, items); result.extend(old_items); } pub fn prepend_objc_header( ctx: &BindgenContext, result: &mut Vec<proc_macro2::TokenStream>, ) { let use_objc = if ctx.options().objc_extern_crate { quote! { #[macro_use] extern crate objc; } } else { quote! { use objc; } }; let id_type = quote! { #[allow(non_camel_case_types)] pub type id = *mut objc::runtime::Object; }; let items = vec![use_objc, id_type]; let old_items = mem::replace(result, items); result.extend(old_items.into_iter()); } pub fn prepend_block_header( ctx: &BindgenContext, result: &mut Vec<proc_macro2::TokenStream>, ) { let use_block = if ctx.options().block_extern_crate { quote! { extern crate block; } } else { quote! { use block; } }; let items = vec![use_block]; let old_items = mem::replace(result, items); result.extend(old_items.into_iter()); } pub fn prepend_union_types( ctx: &BindgenContext, result: &mut Vec<proc_macro2::TokenStream>, ) { let prefix = ctx.trait_prefix(); // If the target supports `const fn`, declare eligible functions // as `const fn` else just `fn`. let const_fn = if ctx.options().rust_features().min_const_fn { quote! { const fn } } else { quote! { fn } }; // TODO(emilio): The fmt::Debug impl could be way nicer with // std::intrinsics::type_name, but... let union_field_decl = quote! { #[repr(C)] pub struct __BindgenUnionField<T>(::#prefix::marker::PhantomData<T>); }; let union_field_impl = quote! { impl<T> __BindgenUnionField<T> { #[inline] pub #const_fn new() -> Self { __BindgenUnionField(::#prefix::marker::PhantomData) } #[inline] pub unsafe fn as_ref(&self) -> &T { ::#prefix::mem::transmute(self) } #[inline] pub unsafe fn as_mut(&mut self) -> &mut T { ::#prefix::mem::transmute(self) } } }; let union_field_default_impl = quote! { impl<T> ::#prefix::default::Default for __BindgenUnionField<T> { #[inline] fn default() -> Self { Self::new() } } }; let union_field_clone_impl = quote! { impl<T> ::#prefix::clone::Clone for __BindgenUnionField<T> { #[inline] fn clone(&self) -> Self { Self::new() } } }; let union_field_copy_impl = quote! { impl<T> ::#prefix::marker::Copy for __BindgenUnionField<T> {} }; let union_field_debug_impl = quote! { impl<T> ::#prefix::fmt::Debug for __BindgenUnionField<T> { fn fmt(&self, fmt: &mut ::#prefix::fmt::Formatter<'_>) -> ::#prefix::fmt::Result { fmt.write_str("__BindgenUnionField") } } }; // The actual memory of the filed will be hashed, so that's why these // field doesn't do anything with the hash. let union_field_hash_impl = quote! { impl<T> ::#prefix::hash::Hash for __BindgenUnionField<T> { fn hash<H: ::#prefix::hash::Hasher>(&self, _state: &mut H) { } } }; let union_field_partialeq_impl = quote! { impl<T> ::#prefix::cmp::PartialEq for __BindgenUnionField<T> { fn eq(&self, _other: &__BindgenUnionField<T>) -> bool { true } } }; let union_field_eq_impl = quote! { impl<T> ::#prefix::cmp::Eq for __BindgenUnionField<T> { } }; let items = vec![ union_field_decl, union_field_impl, union_field_default_impl, union_field_clone_impl, union_field_copy_impl, union_field_debug_impl, union_field_hash_impl, union_field_partialeq_impl, union_field_eq_impl, ]; let old_items = mem::replace(result, items); result.extend(old_items.into_iter()); } pub fn prepend_incomplete_array_types( ctx: &BindgenContext, result: &mut Vec<proc_macro2::TokenStream>, ) { let prefix = ctx.trait_prefix(); // If the target supports `const fn`, declare eligible functions // as `const fn` else just `fn`. let const_fn = if ctx.options().rust_features().min_const_fn { quote! { const fn } } else { quote! { fn } }; let incomplete_array_decl = quote! { #[repr(C)] #[derive(Default)] pub struct __IncompleteArrayField<T>( ::#prefix::marker::PhantomData<T>, [T; 0]); }; let incomplete_array_impl = quote! { impl<T> __IncompleteArrayField<T> { #[inline] pub #const_fn new() -> Self { __IncompleteArrayField(::#prefix::marker::PhantomData, []) } #[inline] pub fn as_ptr(&self) -> *const T { self as *const _ as *const T } #[inline] pub fn as_mut_ptr(&mut self) -> *mut T { self as *mut _ as *mut T } #[inline] pub unsafe fn as_slice(&self, len: usize) -> &[T] { ::#prefix::slice::from_raw_parts(self.as_ptr(), len) } #[inline] pub unsafe fn as_mut_slice(&mut self, len: usize) -> &mut [T] { ::#prefix::slice::from_raw_parts_mut(self.as_mut_ptr(), len) } } }; let incomplete_array_debug_impl = quote! { impl<T> ::#prefix::fmt::Debug for __IncompleteArrayField<T> { fn fmt(&self, fmt: &mut ::#prefix::fmt::Formatter<'_>) -> ::#prefix::fmt::Result { fmt.write_str("__IncompleteArrayField") } } }; let items = vec![ incomplete_array_decl, incomplete_array_impl, incomplete_array_debug_impl, ]; let old_items = mem::replace(result, items); result.extend(old_items.into_iter()); } pub fn prepend_complex_type(result: &mut Vec<proc_macro2::TokenStream>) { let complex_type = quote! { #[derive(PartialEq, Copy, Clone, Hash, Debug, Default)] #[repr(C)] pub struct __BindgenComplex<T> { pub re: T, pub im: T } }; let items = vec![complex_type]; let old_items = mem::replace(result, items); result.extend(old_items.into_iter()); } pub fn build_path( item: &Item, ctx: &BindgenContext, ) -> error::Result<proc_macro2::TokenStream> { let path = item.namespace_aware_canonical_path(ctx); let tokens = proc_macro2::TokenStream::from_str(&path.join("::")).unwrap(); Ok(tokens) } fn primitive_ty( ctx: &BindgenContext, name: &str, ) -> proc_macro2::TokenStream { let ident = ctx.rust_ident_raw(name); quote! { #ident } } pub fn type_from_named( ctx: &BindgenContext, name: &str, ) -> Option<proc_macro2::TokenStream> { // FIXME: We could use the inner item to check this is really a // primitive type but, who the heck overrides these anyway? Some(match name { "int8_t" => primitive_ty(ctx, "i8"), "uint8_t" => primitive_ty(ctx, "u8"), "int16_t" => primitive_ty(ctx, "i16"), "uint16_t" => primitive_ty(ctx, "u16"), "int32_t" => primitive_ty(ctx, "i32"), "uint32_t" => primitive_ty(ctx, "u32"), "int64_t" => primitive_ty(ctx, "i64"), "uint64_t" => primitive_ty(ctx, "u64"), "size_t" if ctx.options().size_t_is_usize => { primitive_ty(ctx, "usize") } "uintptr_t" => primitive_ty(ctx, "usize"), "ssize_t" if ctx.options().size_t_is_usize => { primitive_ty(ctx, "isize") } "intptr_t" | "ptrdiff_t" => primitive_ty(ctx, "isize"), _ => return None, }) } pub fn fnsig_return_ty( ctx: &BindgenContext, sig: &FunctionSig, ) -> proc_macro2::TokenStream { let return_item = ctx.resolve_item(sig.return_type()); if let TypeKind::Void = *return_item.kind().expect_type().kind() { quote! {} } else { let ret_ty = return_item.to_rust_ty_or_opaque(ctx, &()); quote! { -> #ret_ty } } } pub fn fnsig_arguments( ctx: &BindgenContext, sig: &FunctionSig, ) -> Vec<proc_macro2::TokenStream> { use super::ToPtr; let mut unnamed_arguments = 0; let mut args = sig .argument_types() .iter() .map(|&(ref name, ty)| { let arg_item = ctx.resolve_item(ty); let arg_ty = arg_item.kind().expect_type(); // From the C90 standard[1]: // // A declaration of a parameter as "array of type" shall be // adjusted to "qualified pointer to type", where the type // qualifiers (if any) are those specified within the [ and ] of // the array type derivation. // // [1]: http://c0x.coding-guidelines.com/6.7.5.3.html let arg_ty = match *arg_ty.canonical_type(ctx).kind() { TypeKind::Array(t, _) => { let stream = if ctx.options().array_pointers_in_arguments { arg_ty.to_rust_ty_or_opaque(ctx, &arg_item) } else { t.to_rust_ty_or_opaque(ctx, &()) }; stream.to_ptr(ctx.resolve_type(t).is_const()) } TypeKind::Pointer(inner) => { let inner = ctx.resolve_item(inner); let inner_ty = inner.expect_type(); if let TypeKind::ObjCInterface(ref interface) = *inner_ty.canonical_type(ctx).kind() { let name = ctx.rust_ident(interface.name()); quote! { #name } } else { arg_item.to_rust_ty_or_opaque(ctx, &()) } } _ => arg_item.to_rust_ty_or_opaque(ctx, &()), }; let arg_name = match *name { Some(ref name) => ctx.rust_mangle(name).into_owned(), None => { unnamed_arguments += 1; format!("arg{}", unnamed_arguments) } }; assert!(!arg_name.is_empty()); let arg_name = ctx.rust_ident(arg_name); quote! { #arg_name : #arg_ty } }) .collect::<Vec<_>>(); if sig.is_variadic() { args.push(quote! { ... }) } args } pub fn fnsig_block( ctx: &BindgenContext, sig: &FunctionSig, ) -> proc_macro2::TokenStream { let args = sig.argument_types().iter().map(|&(_, ty)| { let arg_item = ctx.resolve_item(ty); arg_item.to_rust_ty_or_opaque(ctx, &()) }); let return_item = ctx.resolve_item(sig.return_type()); let ret_ty = if let TypeKind::Void = *return_item.kind().expect_type().kind() { quote! { () } } else { return_item.to_rust_ty_or_opaque(ctx, &()) }; quote! { *const ::block::Block<(#(#args,)*), #ret_ty> } } // Returns true if `canonical_name` will end up as `mangled_name` at the // machine code level, i.e. after LLVM has applied any target specific // mangling. pub fn names_will_be_identical_after_mangling( canonical_name: &str, mangled_name: &str, call_conv: Option<Abi>, ) -> bool { // If the mangled name and the canonical name are the same then no // mangling can have happened between the two versions. if canonical_name == mangled_name { return true; } // Working with &[u8] makes indexing simpler than with &str let canonical_name = canonical_name.as_bytes(); let mangled_name = mangled_name.as_bytes(); let (mangling_prefix, expect_suffix) = match call_conv { Some(Abi::C) | // None is the case for global variables None => { (b'_', false) } Some(Abi::Stdcall) => (b'_', true), Some(Abi::Fastcall) => (b'@', true), // This is something we don't recognize, stay on the safe side // by emitting the `#[link_name]` attribute Some(_) => return false, }; // Check that the mangled name is long enough to at least contain the // canonical name plus the expected prefix. if mangled_name.len() < canonical_name.len() + 1 { return false; } // Return if the mangled name does not start with the prefix expected // for the given calling convention. if mangled_name[0] != mangling_prefix { return false; } // Check that the mangled name contains the canonical name after the // prefix if &mangled_name[1..canonical_name.len() + 1] != canonical_name { return false; } // If the given calling convention also prescribes a suffix, check that // it exists too if expect_suffix { let suffix = &mangled_name[canonical_name.len() + 1..]; // The shortest suffix is "@0" if suffix.len() < 2 { return false; } // Check that the suffix starts with '@' and is all ASCII decimals // after that. if suffix[0] != b'@' || !suffix[1..].iter().all(u8::is_ascii_digit) { return false; } } else if mangled_name.len() != canonical_name.len() + 1 { // If we don't expect a prefix but there is one, we need the // #[link_name] attribute return false; } true } }
33.547874
137
0.478127
0e271aac1d543038c294a374f65c98530de0e23c
623
use seed::{prelude::*, *}; use super::{solid_trait_private::SolidPrivate, Solid}; pub struct LockOpen; impl SolidPrivate for LockOpen { fn base<T>(classes: impl ToClasses) -> Node<T> { svg![ C![classes], attrs!( At::from("fill") => "currentColor", At::from("viewBox") => "0 0 20 20", ), path![attrs!( At::from("d") => "M10 2a5 5 0 00-5 5v2a2 2 0 00-2 2v5a2 2 0 002 2h10a2 2 0 002-2v-5a2 2 0 00-2-2H7V7a3 3 0 015.905-.75 1 1 0 001.937-.5A5.002 5.002 0 0010 2z", ),], ] } } impl Solid for LockOpen {}
27.086957
171
0.516854
33a9c9b67d46e70a068438181e5dd15b37cb91fd
4,718
use arrow::array::ArrayDataBuilder; use arrow::array::StringArray; use arrow::buffer::Buffer; use num_traits::{AsPrimitive, FromPrimitive, Zero}; use std::fmt::Debug; /// A packed string array that stores start and end indexes into /// a contiguous string slice. /// /// The type parameter K alters the type used to store the offsets #[derive(Debug)] pub struct PackedStringArray<K> { /// The start and end offsets of strings stored in storage offsets: Vec<K>, /// A contiguous array of string data storage: String, } impl<K: Zero> Default for PackedStringArray<K> { fn default() -> Self { Self { offsets: vec![K::zero()], storage: String::new(), } } } impl<K: AsPrimitive<usize> + FromPrimitive + Zero> PackedStringArray<K> { pub fn new() -> Self { Self::default() } pub fn new_empty(len: usize) -> Self { Self { offsets: vec![K::zero(); len + 1], storage: String::new(), } } pub fn with_capacity(keys: usize, values: usize) -> Self { let mut offsets = Vec::with_capacity(keys + 1); offsets.push(K::zero()); Self { offsets, storage: String::with_capacity(values), } } /// Append a value /// /// Returns the index of the appended data pub fn append(&mut self, data: &str) -> usize { let id = self.offsets.len() - 1; let offset = self.storage.len() + data.len(); let offset = K::from_usize(offset).expect("failed to fit into offset type"); self.offsets.push(offset); self.storage.push_str(data); id } /// Get the value at a given index pub fn get(&self, index: usize) -> Option<&str> { let start_offset = self.offsets.get(index)?.as_(); let end_offset = self.offsets.get(index + 1)?.as_(); Some(&self.storage[start_offset..end_offset]) } /// Pads with empty strings to reach length pub fn extend(&mut self, len: usize) { let offset = K::from_usize(self.storage.len()).expect("failed to fit into offset type"); self.offsets.resize(self.offsets.len() + len, offset); } pub fn iter(&self) -> PackedStringIterator<'_, K> { PackedStringIterator { array: self, index: 0, } } pub fn len(&self) -> usize { self.offsets.len() - 1 } pub fn is_empty(&self) -> bool { self.offsets.len() == 1 } /// Return the amount of memory in bytes taken up by this array pub fn size(&self) -> usize { self.storage.len() + self.offsets.len() * std::mem::size_of::<K>() } pub fn into_inner(self) -> (Vec<K>, String) { (self.offsets, self.storage) } } impl PackedStringArray<i32> { /// Convert to an arrow representation pub fn to_arrow(&self) -> StringArray { let len = self.offsets.len() - 1; let offsets = Buffer::from_slice_ref(&self.offsets); let values = Buffer::from(self.storage.as_bytes()); let data = ArrayDataBuilder::new(arrow::datatypes::DataType::Utf8) .len(len) .add_buffer(offsets) .add_buffer(values) .build(); StringArray::from(data) } } pub struct PackedStringIterator<'a, K> { array: &'a PackedStringArray<K>, index: usize, } impl<'a, K: AsPrimitive<usize> + FromPrimitive + Zero> Iterator for PackedStringIterator<'a, K> { type Item = &'a str; fn next(&mut self) -> Option<Self::Item> { let item = self.array.get(self.index)?; self.index += 1; Some(item) } fn size_hint(&self) -> (usize, Option<usize>) { let len = self.array.len() - self.index; (len, Some(len)) } } #[cfg(test)] mod tests { use crate::string::PackedStringArray; #[test] fn test_storage() { let mut array = PackedStringArray::<i32>::new(); array.append("hello"); array.append("world"); array.append("cupcake"); assert_eq!(array.get(0).unwrap(), "hello"); assert_eq!(array.get(1).unwrap(), "world"); assert_eq!(array.get(2).unwrap(), "cupcake"); assert!(array.get(-1_i32 as usize).is_none()); assert!(array.get(3).is_none()); array.extend(2); assert_eq!(array.get(3).unwrap(), ""); assert_eq!(array.get(4).unwrap(), ""); assert!(array.get(5).is_none()); } #[test] fn test_empty() { let array = PackedStringArray::<u8>::new_empty(20); assert_eq!(array.get(12).unwrap(), ""); assert_eq!(array.get(9).unwrap(), ""); assert_eq!(array.get(3).unwrap(), ""); } }
27.114943
97
0.570369
90c17a94a576f0b6c57937717fc9a3f22e488110
611
//! Macros for NuttX //! Based on https://github.com/no1wudi/nuttx.rs/blob/main/src/macros.rs /// Print a formatted message to the serial console #[macro_export] macro_rules! println { // If no parameters, print a empty string () => { $crate::puts_format(format_args!("")) }; // If 1 parameter (format string), print the format string ($s:expr) => { $crate::puts_format(format_args!($s)) }; // If 2 or more parameters (format string + args), print the formatted output ($s:expr, $($tt:tt)*) => { $crate::puts_format(format_args!($s, $($tt)*)) }; }
32.157895
82
0.605565
ab6259088f150fea724691fad4e6502a50df3112
8,699
#![allow(dead_code)] #![allow(non_snake_case)] #![deny(unused_must_use)] #[cfg(feature = "mcu")] use rustBoot::constants::{BOOT_PARTITION_ADDRESS, PARTITION_SIZE, UPDATE_PARTITION_ADDRESS}; use std::{env, path::PathBuf}; // use std::path::Path; use xshell::cmd; #[rustfmt::skip] fn main() -> Result<(), anyhow::Error> { let args = env::args().skip(1).collect::<Vec<_>>(); let args = args.iter().map(|s| &**s).collect::<Vec<_>>(); match &args[..] { ["test", "rustBoot"] => test_rustBoot(), [board, "build", "pkgs-for",] => build_rustBoot(board), [board, "sign" , "pkgs-for",] => sign_packages(board), #[cfg(feature = "mcu")] [board, "flash", "signed-pkg",] => flash_signed_fwimages(board), [board, "flash", "rustBoot",] => flash_rustBoot(board), [board, "build", "rustBoot-only",] => build_rustBoot_only(board), #[cfg(feature = "mcu")] [board, "build-sign-flash", "rustBoot",] => full_image_flash(board), #[cfg(feature = "mcu")] [board, "erase-and-flash-trailer-magic",] => erase_and_flash_trailer_magic(board), _ => { println!("USAGE: cargo xtask test rustBoot"); println!("OR"); println!("USAGE: cargo xtask [build|sign|flash] [pkgs-for|signed-pkg] [board]"); println!("OR"); println!("USAGE: cargo xtask [build-sign-flash] [rustBoot] [board]"); Ok(()) } } } fn test_rustBoot() -> Result<(), anyhow::Error> { let _p = xshell::pushd(root_dir())?; cmd!("cargo test --workspace").run()?; Ok(()) } fn build_rustBoot_only(target: &&str) -> Result<(), anyhow::Error> { let _p = xshell::pushd(root_dir().join("boards/bootloaders").join(target))?; match target { &"rpi4" => { cmd!("cargo build --release").run()?; // ` // if Path::new("kernel8.img").exists() { // cmd!("powershell -command \"del kernel8.img\"").run()?; // } #[cfg(feature = "windows")] cmd!("rust-objcopy --strip-all -O binary ..\\..\\target\\aarch64-unknown-none-softfloat\\release\\kernel rustBoot.bin").run()?; #[cfg(not(feature = "windows"))] cmd!("rust-objcopy --strip-all -O binary ../../target/aarch64-unknown-none-softfloat/release/kernel rustBoot.bin").run()?; } &"nrf52840" => { cmd!("cargo build --release").run()?; } &"stm32f411" => { cmd!("cargo build --release").run()?; } &"stm32f446" => { cmd!("cargo build --release").run()?; } _ => { println!("board not supported"); } } Ok(()) } fn build_rustBoot(target: &&str) -> Result<(), anyhow::Error> { let _p = xshell::pushd( root_dir() .join("boards/firmware") .join(target) .join("boot_fw_blinky_green"), )?; cmd!("cargo build --release").run()?; let _p = xshell::pushd( root_dir() .join("boards/firmware") .join(target) .join("updt_fw_blinky_red"), )?; cmd!("cargo build --release").run()?; build_rustBoot_only(target)?; Ok(()) } fn sign_packages(target: &&str) -> Result<(), anyhow::Error> { match *target { "nrf52840" => { let _p = xshell::pushd(root_dir().join("boards/rbSigner/signed_images"))?; cmd!("py convert2bin.py").run()?; // python script has a linux dependency - `wolfcrypt` cmd!("wsl python3 signer.py").run()?; Ok(()) } "stm32f411" => { let _p = xshell::pushd(root_dir().join("boards/rbSigner/signed_images"))?; // cmd!("python3 --version").run()?; cmd!("python3 convert2bin.py").run()?; // python script has a linux dependency - `wolfcrypt` cmd!("python3 signer.py").run()?; Ok(()) } "stm32f446" => { let _p = xshell::pushd(root_dir().join("boards/rbSigner/signed_images"))?; // cmd!("python3 --version").run()?; cmd!("python3 convert2bin.py").run()?; // python script has a linux dependency - `wolfcrypt` // cmd!("python3 signer.py").run()?; Ok(()) } _ => todo!(), } } #[cfg(feature = "mcu")] fn flash_signed_fwimages(target: &&str) -> Result<(), anyhow::Error> { match *target { "nrf52840" => { let _p = xshell::pushd(root_dir().join("boards/rbSigner/signed_images"))?; let boot_part_addr = format!("0x{:x}", BOOT_PARTITION_ADDRESS); cmd!("pyocd flash -t nrf52840 --base-address {boot_part_addr} nrf52840_bootfw_v1234_signed.bin").run()?; let updt_part_addr = format!("0x{:x}", UPDATE_PARTITION_ADDRESS); cmd!("pyocd flash -t nrf52840 --base-address {updt_part_addr} nrf52840_updtfw_v1235_signed.bin").run()?; Ok(()) } "stm32f411" => { let _p = xshell::pushd(root_dir().join("boards/rbSigner/signed_images"))?; let boot_part_addr = format!("0x{:x}", BOOT_PARTITION_ADDRESS); cmd!("pyocd flash --base-address {boot_part_addr} stm32f411_bootfw_v1235_signed.bin") .run()?; let updt_part_addr = format!("0x{:x}", UPDATE_PARTITION_ADDRESS); cmd!("pyocd flash -t stm32f411 --base-address {updt_part_addr} stm32f411_updtfw_v1235_signed.bin").run()?; Ok(()) } _ => todo!(), } } fn flash_rustBoot(target: &&str) -> Result<(), anyhow::Error> { match *target { "nrf52840" => { let _p = xshell::pushd(root_dir().join("boards/bootloaders").join(target))?; cmd!("cargo flash --chip nRF52840_xxAA --release").run()?; Ok(()) } "stm32f411" => { let _p = xshell::pushd(root_dir().join("boards/bootloaders").join(target))?; cmd!("cargo flash --chip stm32f411vetx --release").run()?; Ok(()) } _ => todo!(), } } #[cfg(feature = "mcu")] fn full_image_flash(target: &&str) -> Result<(), anyhow::Error> { build_rustBoot(target)?; sign_packages(target)?; cmd!("pyocd erase -t nrf52 --mass-erase").run()?; flash_signed_fwimages(target)?; flash_rustBoot(target)?; Ok(()) } fn root_dir() -> PathBuf { let mut xtask_dir = PathBuf::from(env!("CARGO_MANIFEST_DIR")); xtask_dir.pop(); xtask_dir } #[cfg(feature = "mcu")] /// to be used ONLY for testing. fn erase_and_flash_trailer_magic(target: &&str) -> Result<(), anyhow::Error> { match *target { "nrf52840" => { let _p = xshell::pushd(root_dir().join("boards/rbSigner/signed_images"))?; // just to ensure that an existing bootloader doesnt start to boot automatically - during a test cmd!("pyocd erase -t nrf52840 -s 0x0").run()?; let boot_trailer_magic = format!("0x{:x}", BOOT_PARTITION_ADDRESS + PARTITION_SIZE - 4); cmd!("pyocd erase -t nrf52840 -s {boot_trailer_magic}").run()?; cmd!("pyocd flash -t nrf52840 --base-address {boot_trailer_magic} trailer_magic.bin") .run()?; let updt_trailer_magic = format!("0x{:x}", UPDATE_PARTITION_ADDRESS + PARTITION_SIZE - 4); cmd!("pyocd erase -t nrf52840 -s {updt_trailer_magic}").run()?; cmd!("pyocd flash -t nrf52840 --base-address {updt_trailer_magic} trailer_magic.bin") .run()?; Ok(()) } "stm32f411" => { let _p = xshell::pushd(root_dir().join("boards/rbSigner/signed_images"))?; // just to ensure that an existing bootloader doesnt start to boot automatically - during a test cmd!("pyocd erase -t stm32f411 -s 0x0").run()?; let boot_trailer_magic = format!("0x{:x}", BOOT_PARTITION_ADDRESS + PARTITION_SIZE - 4); cmd!("pyocd erase -t stm32f411 -s {boot_trailer_magic}").run()?; cmd!("pyocd flash -t stm32f411 --base-address {boot_trailer_magic} trailer_magic.bin") .run()?; let updt_trailer_magic = format!("0x{:x}", UPDATE_PARTITION_ADDRESS + PARTITION_SIZE - 4); cmd!("pyocd erase -t stm32f411 -s {updt_trailer_magic}").run()?; cmd!("pyocd flash -t stm32f411 --base-address {updt_trailer_magic} trailer_magic.bin") .run()?; Ok(()) } _ => todo!(), } }
39.184685
139
0.540867
1ef5e22bce236c426d8f4b544a1e28f3f623ff14
3,543
use futures_util::{Stream, StreamExt as _}; use opentelemetry::global; use opentelemetry::global::shutdown_tracer_provider; use opentelemetry::sdk::trace::Config; use opentelemetry::sdk::{metrics::PushController, trace as sdktrace, Resource}; use opentelemetry::trace::TraceError; use opentelemetry::{ baggage::BaggageExt, metrics::ObserverResult, trace::{TraceContextExt, Tracer}, Context, Key, KeyValue, }; use std::error::Error; use std::time::Duration; fn init_tracer() -> Result<sdktrace::Tracer, TraceError> { opentelemetry_jaeger::new_agent_pipeline() .with_service_name("trace-demo") .with_trace_config(Config::default().with_resource(Resource::new(vec![ KeyValue::new("service.name", "new_service"), KeyValue::new("exporter", "otlp-jaeger"), ]))) .install_batch(opentelemetry::runtime::Tokio) } // Skip first immediate tick from tokio, not needed for async_std. fn delayed_interval(duration: Duration) -> impl Stream<Item = tokio::time::Instant> { opentelemetry::sdk::util::tokio_interval_stream(duration).skip(1) } fn init_meter() -> PushController { opentelemetry::sdk::export::metrics::stdout(tokio::spawn, delayed_interval).init() } const FOO_KEY: Key = Key::from_static_str("ex.com/foo"); const BAR_KEY: Key = Key::from_static_str("ex.com/bar"); const LEMONS_KEY: Key = Key::from_static_str("ex.com/lemons"); const ANOTHER_KEY: Key = Key::from_static_str("ex.com/another"); lazy_static::lazy_static! { static ref COMMON_ATTRIBUTES: [KeyValue; 4] = [ LEMONS_KEY.i64(10), KeyValue::new("A", "1"), KeyValue::new("B", "2"), KeyValue::new("C", "3"), ]; } #[tokio::main] async fn main() -> Result<(), Box<dyn Error + Send + Sync + 'static>> { // By binding the result to an unused variable, the lifetime of the variable // matches the containing block, reporting traces and metrics during the whole // execution. let _tracer = init_tracer()?; let _started = init_meter(); let tracer = global::tracer("ex.com/basic"); let meter = global::meter("ex.com/basic"); let one_metric_callback = |res: ObserverResult<f64>| res.observe(1.0, COMMON_ATTRIBUTES.as_ref()); let _ = meter .f64_value_observer("ex.com.one", one_metric_callback) .with_description("A ValueObserver set to 1.0") .init(); let histogram_two = meter.f64_histogram("ex.com.two").init(); let _baggage = Context::current_with_baggage(vec![FOO_KEY.string("foo1"), BAR_KEY.string("bar1")]) .attach(); let histogram = histogram_two.bind(COMMON_ATTRIBUTES.as_ref()); tracer.in_span("operation", |cx| { let span = cx.span(); span.add_event( "Nice operation!".to_string(), vec![Key::new("bogons").i64(100)], ); span.set_attribute(ANOTHER_KEY.string("yes")); meter.record_batch_with_context( // Note: call-site variables added as context Entries: &Context::current_with_baggage(vec![ANOTHER_KEY.string("xyz")]), COMMON_ATTRIBUTES.as_ref(), vec![histogram_two.measurement(2.0)], ); tracer.in_span("Sub operation...", |cx| { let span = cx.span(); span.set_attribute(LEMONS_KEY.string("five")); span.add_event("Sub span event".to_string(), vec![]); histogram.record(1.3); }); }); shutdown_tracer_provider(); // sending remaining spans. Ok(()) }
34.067308
91
0.646345
db9d22f14cbfb5fc1a71b1d502cfe23a29b8e768
1,883
// Copyright 2021 Datafuse Labs. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. mod aggregate_arg_min_max; mod aggregate_avg; mod aggregate_combinator_distinct; mod aggregate_combinator_if; mod aggregate_count; mod aggregate_function; mod aggregate_function_factory; mod aggregate_function_state; mod aggregate_min_max; mod aggregate_window_funnel; // mod aggregate_min_max; mod aggregate_covariance; mod aggregate_stddev_pop; mod aggregate_sum; mod aggregator; mod aggregator_common; #[macro_use] mod macros; pub use aggregate_arg_min_max::AggregateArgMinMaxFunction; pub use aggregate_avg::AggregateAvgFunction; pub use aggregate_combinator_distinct::AggregateDistinctCombinator; pub use aggregate_combinator_if::AggregateIfCombinator; pub use aggregate_count::AggregateCountFunction; pub use aggregate_covariance::AggregateCovarianceFunction; pub use aggregate_function::AggregateFunction; pub use aggregate_function::AggregateFunctionRef; pub use aggregate_function_factory::AggregateFunctionFactory; pub use aggregate_function_state::get_layout_offsets; pub use aggregate_function_state::StateAddr; pub use aggregate_function_state::StateAddrs; pub use aggregate_min_max::AggregateMinMaxFunction; pub use aggregate_stddev_pop::AggregateStddevPopFunction; pub use aggregate_sum::AggregateSumFunction; pub use aggregator::Aggregators; pub use aggregator_common::*;
35.528302
75
0.832714
7595360a1399cfeb354f8c6ef0a2d3f2b95215f9
516
use iced::Subscription; use pyo3::prelude::*; use super::ToSubscription; use crate::app::Interop; use crate::common::{GCProtocol, Message}; pub(crate) fn init_mod(_py: Python, _m: &PyModule) -> PyResult<()> { Ok(()) } #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] pub(crate) struct NoSubscription; impl GCProtocol for NoSubscription {} impl ToSubscription for NoSubscription { fn to_subscription(&self, _interop: &Interop) -> Subscription<Message> { Subscription::none() } }
23.454545
76
0.697674
50d2efcb9955286b1be223940eb9c158480d8b7f
1,750
// Update of https://github.com/ajungren/crc32_digest to work with digest v0.9 #![forbid(unsafe_code)] use crc32fast::Hasher as Crc32Hasher; use std::hash::Hasher as HasherTrait; use std::convert::TryInto; use digest::{impl_write, FixedOutput, Update, Reset}; use generic_array::typenum::U4; use generic_array::GenericArray; pub use digest::Digest; #[derive(Clone, Default)] /// Wraps a [`Hasher`] and provides it with [`Digest`] and [`DynDigest`] implementations. /// /// [`Digest`]: ../digest/trait.Digest.html /// [`DynDigest`]: ../digest/trait.DynDigest.html /// [`Hasher`]: ../crc32fast/struct.Hasher.html pub struct Crc32(Crc32Hasher); /*impl Crc32 { /// Creates a new `Crc32`. #[inline] pub fn new() -> Self { Self(Hasher::new()) } /// Creates a new `Crc32` initialized with the given state. #[inline] pub fn from_state(state: u32) -> Self { Self(Hasher::new_with_initial(state)) } }*/ impl FixedOutput for Crc32 { type OutputSize = U4; #[inline] fn finalize_into(self, out: &mut GenericArray<u8, Self::OutputSize>) { let result = self.0.finalize(); out.copy_from_slice(&result.to_be_bytes()); } fn finalize_into_reset(&mut self, out: &mut GenericArray<u8, Self::OutputSize>) { // Finish of crc32 was upcast from u32 to u64, so downcast is fine let result: u32 = self.0.finish().try_into().unwrap(); out.copy_from_slice(&result.to_be_bytes()); self.0.reset(); } } impl Update for Crc32 { #[inline] fn update(&mut self, data: impl AsRef<[u8]>) { self.0.update(data.as_ref()); } } impl Reset for Crc32 { #[inline] fn reset(&mut self) { self.0.reset(); } } impl_write!(Crc32);
26.923077
89
0.637714
16b6f48ce77a64321004c28731e8109dab525b0a
4,756
// Copyright (c) 2015 Alcatel-Lucent, (c) 2016 Nokia // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of the copyright holder nor the names of its contributors // may be used to endorse or promote products derived from this software without // specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY // DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. use bambou::{Error, RestEntity, Session}; use reqwest::Response; use std::collections::BTreeMap; use serde_json; pub use metadata::Metadata; pub use globalmetadata::GlobalMetadata; #[derive(Serialize, Deserialize, Default, Debug)] pub struct DSCPForwardingClassMapping<'a> { #[serde(skip_serializing)] #[serde(skip_deserializing)] _session: Option<&'a Session>, #[serde(rename="ID")] id: Option<String>, #[serde(rename="parentID")] parent_id: Option<String>, #[serde(rename="parentType")] parent_type: Option<String>, owner: Option<String>, #[serde(rename="DSCP")] pub dscp: Option<String>, #[serde(rename="lastUpdatedBy")] pub last_updated_by: Option<String>, #[serde(rename="entityScope")] pub entity_scope: Option<String>, #[serde(rename="forwardingClass")] pub forwarding_class: Option<String>, #[serde(rename="externalID")] pub external_id: Option<String>, } impl<'a> RestEntity<'a> for DSCPForwardingClassMapping<'a> { fn fetch(&mut self) -> Result<Response, Error> { match self._session { Some(session) => session.fetch_entity(self), None => Err(Error::NoSession), } } fn save(&mut self) -> Result<Response, Error> { match self._session { Some(session) => session.save(self), None => Err(Error::NoSession), } } fn delete(self) -> Result<Response, Error> { match self._session { Some(session) => session.delete(self), None => Err(Error::NoSession), } } fn create_child<C>(&self, child: &mut C) -> Result<Response, Error> where C: RestEntity<'a> { match self._session { Some(session) => session.create_child(self, child), None => Err(Error::NoSession), } } fn path() -> &'static str { "dscpforwardingclassmapping" } fn group_path() -> &'static str { "dscpforwardingclassmappings" } fn is_root(&self) -> bool { false } fn id(&self) -> Option<&str> { self.id.as_ref().and_then(|id| Some(id.as_str())) } fn fetch_children<R>(&self, children: &mut Vec<R>) -> Result<Response, Error> where R: RestEntity<'a> { match self._session { Some(session) => session.fetch_children(self, children), None => Err(Error::NoSession), } } fn get_session(&self) -> Option<&Session> { self._session } fn set_session(&mut self, session: &'a Session) { self._session = Some(session); } } impl<'a> DSCPForwardingClassMapping<'a> { pub fn fetch_metadatas(&self) -> Result<Vec<Metadata>, Error> { let mut metadatas = Vec::<Metadata>::new(); let _ = self.fetch_children(&mut metadatas)?; Ok(metadatas) } pub fn fetch_globalmetadatas(&self) -> Result<Vec<GlobalMetadata>, Error> { let mut globalmetadatas = Vec::<GlobalMetadata>::new(); let _ = self.fetch_children(&mut globalmetadatas)?; Ok(globalmetadatas) } }
31.496689
87
0.648024
918e43d040cb595084a8f28da327e5721224813e
1,397
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. /* # ICE when returning struct with borrowed pointer to trait A function which takes a borrowed pointer to a trait and returns a struct with that borrowed pointer results in an ICE. This does not occur with concrete types, only with borrowed pointers to traits. */ // original trait Inner { fn print(&self); } impl Inner for int { fn print(&self) { print(format!("Inner: {}\n", *self)); } } struct Outer<'self> { inner: &'self Inner } impl<'self> Outer<'self> { fn new<'r>(inner: &'r Inner) -> Outer<'r> { Outer { inner: inner } } } pub fn main() { let inner = 5; let outer = Outer::new(&inner as &Inner); outer.inner.print(); } // minimal trait MyTrait<T> { } pub struct MyContainer<'self, T> { foos: ~[&'self MyTrait<T>], } impl<'self, T> MyContainer<'self, T> { pub fn add (&mut self, foo: &'self MyTrait<T>) { self.foos.push(foo); } }
22.901639
68
0.651396
e9da8beb1fec87dfa657fef2caed44bf51b6cfc0
893
use gaiku_common::{prelude::*, Result}; use image::load_from_memory; /// Converts a `png` file to 2d chunk data. pub struct PNGReader; impl FileFormat for PNGReader { type Value = (u8, u8); fn load<C, T>(bytes: Vec<u8>) -> Result<(Vec<C>, Option<TextureAtlas2d<T>>)> where C: Chunkify<Self::Value> + ChunkifyMut<Self::Value> + Boxify, T: Texturify2d, { let mut result = vec![]; let img = load_from_memory(&bytes)?.into_luma8(); assert!(img.width() <= u16::MAX as u32); assert!(img.height() <= u16::MAX as u32); let mut chunk = C::new([0.0, 0.0, 0.0], img.width() as u16, img.height() as u16, 1); for x in 0..img.width() as u32 { for y in 0..img.height() as u32 { let color = img.get_pixel(x, y).0[0]; chunk.set(x as usize, y as usize, 0, (color, color)); } } result.push(chunk); Ok((result, None)) } }
24.805556
88
0.590146
618cce8187ff3ff109e1e2d5accf13ad08f95059
3,011
use std::collections::VecDeque; use common::{math, proto::Position}; /// Predicts the result of motion inputs in-flight to the server /// /// When sending input to the server, call `push` to record the input in a local queue of in-flight /// inputs, and to obtaining a generation tag to send alongside the input. The server echos the /// highest tag it's received alongside every state update, which we then use in `reconcile` to /// determine which inputs have been integrated into the server's state and no longer need to be /// predicted. pub struct PredictedMotion { log: VecDeque<Input>, generation: u16, predicted: Position, } impl PredictedMotion { pub fn new(initial: Position) -> Self { Self { log: VecDeque::new(), generation: 0, predicted: initial, } } /// Update for input about to be sent to the server, returning the generation it should be /// tagged with pub fn push(&mut self, direction: &na::Unit<na::Vector3<f32>>, distance: f32) -> u16 { let transform = math::translate_along(direction, distance); self.predicted.local *= transform; self.log.push_back(Input { transform }); self.generation = self.generation.wrapping_add(1); self.generation } /// Update with the latest state received from the server and the generation it was based on pub fn reconcile(&mut self, generation: u16, position: Position) { let first_gen = self.generation.wrapping_sub(self.log.len() as u16); let obsolete = usize::from(generation.wrapping_sub(first_gen)); if obsolete > self.log.len() || obsolete == 0 { // We've already processed a state incorporating equal or more recent input return; } self.log.drain(..obsolete); self.predicted.node = position.node; self.predicted.local = self .log .iter() .fold(position.local, |acc, x| acc * x.transform); } /// Latest estimate of the server's state after receiving all `push`ed inputs. pub fn predicted(&self) -> &Position { &self.predicted } } struct Input { transform: na::Matrix4<f32>, } #[cfg(test)] mod tests { use super::*; /// An arbitrary position fn pos() -> Position { Position { node: common::graph::NodeId::ROOT, local: na::one(), } } #[test] fn wraparound() { let mut pred = PredictedMotion::new(pos()); pred.generation = u16::max_value() - 1; assert_eq!(pred.push(&na::Vector3::x_axis(), 1.0), u16::max_value()); assert_eq!(pred.push(&na::Vector3::x_axis(), 1.0), 0); assert_eq!(pred.log.len(), 2); pred.reconcile(u16::max_value() - 1, pos()); assert_eq!(pred.log.len(), 2); pred.reconcile(u16::max_value(), pos()); assert_eq!(pred.log.len(), 1); pred.reconcile(0, pos()); assert_eq!(pred.log.len(), 0); } }
33.087912
99
0.610428
4ba127e38d5dcabbeeccef735c0a67de43098216
11,603
// Copyright 2018 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use { crate::{ packet_logs::{append_pcap, write_pcap_header}, *, }, async_utils::PollExt, fidl::{endpoints::RequestStream, Error as FidlError}, fidl_fuchsia_bluetooth_snoop::{PacketType, SnoopMarker, SnoopProxy, SnoopRequestStream}, fuchsia_async::{Channel, Executor}, fuchsia_inspect::{assert_inspect_tree, Inspector}, fuchsia_zircon as zx, futures::pin_mut, std::task::Poll, }; fn setup() -> ( Executor, ConcurrentSnooperPacketFutures, PacketLogs, SubscriptionManager, ConcurrentClientRequestFutures, Inspector, ) { let inspect = Inspector::new(); ( fasync::Executor::new().unwrap(), ConcurrentSnooperPacketFutures::new(), PacketLogs::new( 10, 10, 10, Duration::new(10, 0), inspect.root().create_child("packet_log"), ), SubscriptionManager::new(), ConcurrentClientRequestFutures::new(), inspect, ) } #[test] fn test_id_generator() { let mut id_gen = IdGenerator::new(); assert_eq!(id_gen.next(), ClientId(0)); assert_eq!(id_gen.next(), ClientId(1)); } #[test] fn test_register_new_client() { let (_exec, _snoopers, _logs, _subscribers, mut requests, _inspect) = setup(); assert_eq!(requests.len(), 0); let (_tx, rx) = zx::Channel::create().unwrap(); let stream = SnoopRequestStream::from_channel(Channel::from_channel(rx).unwrap()); register_new_client(stream, &mut requests, ClientId(0)); assert_eq!(requests.len(), 1); } fn fidl_endpoints() -> (SnoopProxy, SnoopRequestStream) { let (proxy, server) = fidl::endpoints::create_proxy::<SnoopMarker>().unwrap(); let request_stream = server.into_stream().unwrap(); (proxy, request_stream) } fn unwrap_request<T, E>(request: Poll<Option<Result<T, E>>>) -> T { if let Poll::Ready(Some(Ok(request))) = request { return request; } panic!("Failed to receive request"); } fn unwrap_response<T, E>(response: Poll<Result<T, E>>) -> T { if let Poll::Ready(Ok(response)) = response { return response; } panic!("Failed to receive response"); } #[test] fn test_snoop_default_command_line_args() { let args = Args::from_args(&["bt-snoop.cmx"], &[]).expect("Args created from empty args"); assert_eq!(args.log_size_soft_kib, 32); assert_eq!(args.log_size_hard_kib, 256); assert_eq!(args.log_time_seconds, 60); assert_eq!(args.max_device_count, 8); assert_eq!(args.truncate_payload, None); assert_eq!(args.verbosity, 0); } #[test] fn test_snoop_command_line_args() { let log_size_kib = 1; let log_time_seconds = 2; let max_device_count = 3; let truncate_payload = 4; let verbosity = 2; let raw_args = &[ "--log-size-soft-kib", &log_size_kib.to_string(), "--log-size-hard-kib", &log_size_kib.to_string(), "--log-time-seconds", &log_time_seconds.to_string(), "--max-device-count", &max_device_count.to_string(), "--truncate-payload", &truncate_payload.to_string(), "-v", "-v", ]; let args = Args::from_args(&["bt-snoop.cmx"], raw_args).expect("Args created from args"); assert_eq!(args.log_size_soft_kib, log_size_kib); assert_eq!(args.log_size_hard_kib, log_size_kib); assert_eq!(args.log_time_seconds, log_time_seconds); assert_eq!(args.max_device_count, max_device_count); assert_eq!(args.truncate_payload, Some(truncate_payload)); assert_eq!(args.verbosity, verbosity); } #[fasync::run_until_stalled(test)] async fn test_packet_logs_inspect() { // This is a test that basic inspect data is plumbed through from the inspect root. // More comprehensive testing of possible permutations of packet log inspect data // is found in bounded_queue.rs let inspect = Inspector::new(); let runtime_metrics_node = inspect.root().create_child("runtime_metrics"); let mut packet_logs = PacketLogs::new(2, 256, 256, Duration::from_secs(60), runtime_metrics_node); assert_inspect_tree!(inspect, root: { runtime_metrics: { logging_active_for_devices: "", } }); let id_1 = String::from("001"); packet_logs.add_device(id_1.clone()); let mut expected_data = vec![]; write_pcap_header(&mut expected_data).expect("write to succeed"); assert_inspect_tree!(inspect, root: { runtime_metrics: { logging_active_for_devices: "\"001\"", device_0: { hci_device_name: "001", byte_len: 0u64, number_of_items: 0u64, data: expected_data, }, } }); let ts = zx::Time::from_nanos(123 * 1_000_000_000); let packet = snooper::SnoopPacket::new(false, PacketType::Data, ts, vec![3, 2, 1]); // write pcap header and packet data to expected_data buffer let mut expected_data = vec![]; write_pcap_header(&mut expected_data).expect("write to succeed"); append_pcap(&mut expected_data, &packet, None).expect("write to succeed"); packet_logs.log_packet(&id_1, packet).await; assert_inspect_tree!(inspect, root: { runtime_metrics: { logging_active_for_devices: "\"001\"", device_0: { hci_device_name: "001", byte_len: 51u64, number_of_items: 1u64, data: expected_data, }, } }); drop(packet_logs); } #[test] fn test_snoop_config_inspect() { let args = Args { log_size_soft_kib: 1, log_size_hard_kib: 1, log_time_seconds: 2, max_device_count: 3, truncate_payload: Some(4), verbosity: 5, }; let inspect = Inspector::new(); let snoop_config_node = inspect.root().create_child("configuration"); let config = SnoopConfig::from_args(args, snoop_config_node); assert_inspect_tree!(inspect, root: { configuration: { log_size_soft_max_bytes: 1024u64, log_size_hard_max_bytes: "1024", log_time: 2u64, max_device_count: 3u64, truncate_payload: "4 bytes", hci_dir: HCI_DEVICE_CLASS_PATH, } }); drop(config); } // Helper that pumps the request stream to get back a single request, panicking if the stream // stalls before a request is returned. fn pump_request_stream( exec: &mut fasync::Executor, mut request_stream: SnoopRequestStream, id: ClientId, ) -> ClientRequest { let request = unwrap_request(exec.run_until_stalled(&mut request_stream.next())); (id, (Some(Ok(request)), request_stream)) } // Helper that pumps the the handle_client_request until stalled, panicking if the future // stalls in a pending state or returns an error. fn pump_handle_client_request( exec: &mut fasync::Executor, request: ClientRequest, client_requests: &mut ConcurrentClientRequestFutures, subscribers: &mut SubscriptionManager, packet_logs: &PacketLogs, ) { let handler = handle_client_request(request, client_requests, subscribers, packet_logs); pin_mut!(handler); exec.run_until_stalled(&mut handler) .expect("Handler future to complete") .expect("Client channel to accept response"); } #[test] fn test_handle_client_request() { let (mut exec, mut _snoopers, mut logs, mut subscribers, mut requests, _inspect) = setup(); // unrecognized device returns an error to the client let (proxy, request_stream) = fidl_endpoints(); let mut client_fut = proxy.start(true, Some("")); let _ = exec.run_until_stalled(&mut client_fut); let request = pump_request_stream(&mut exec, request_stream, ClientId(0)); pump_handle_client_request(&mut exec, request, &mut requests, &mut subscribers, &mut logs); let response = unwrap_response(exec.run_until_stalled(&mut client_fut)); assert!(response.error.is_some()); assert_eq!(subscribers.number_of_subscribers(), 0); // valid device returns no errors to a client subscribed to that device let (proxy, request_stream) = fidl_endpoints(); logs.add_device(String::new()); let mut client_fut = proxy.start(true, Some("")); let _ = exec.run_until_stalled(&mut client_fut); let request = pump_request_stream(&mut exec, request_stream, ClientId(1)); pump_handle_client_request(&mut exec, request, &mut requests, &mut subscribers, &mut logs); let response = unwrap_response(exec.run_until_stalled(&mut client_fut)); assert!(response.error.is_none()); assert_eq!(subscribers.number_of_subscribers(), 1); // valid device returns no errors to a client subscribed globally let (proxy, request_stream) = fidl_endpoints(); let mut client_fut = proxy.start(true, None); let _ = exec.run_until_stalled(&mut client_fut); let request = pump_request_stream(&mut exec, request_stream, ClientId(2)); pump_handle_client_request(&mut exec, request, &mut requests, &mut subscribers, &mut logs); let response = unwrap_response(exec.run_until_stalled(&mut client_fut)); assert!(response.error.is_none()); assert_eq!(subscribers.number_of_subscribers(), 2); // second request by the same client returns an error let (proxy, request_stream) = fidl_endpoints(); let mut client_fut = proxy.start(true, None); let _ = exec.run_until_stalled(&mut client_fut); let request = pump_request_stream(&mut exec, request_stream, ClientId(2)); pump_handle_client_request(&mut exec, request, &mut requests, &mut subscribers, &mut logs); let response = unwrap_response(exec.run_until_stalled(&mut client_fut)); assert!(response.error.is_some()); assert_eq!(subscribers.number_of_subscribers(), 2); // valid device returns no errors to a client requesting a dump let (proxy, request_stream) = fidl_endpoints(); let mut client_fut = proxy.start(false, None); let _ = exec.run_until_stalled(&mut client_fut); let request = pump_request_stream(&mut exec, request_stream, ClientId(3)); pump_handle_client_request(&mut exec, request, &mut requests, &mut subscribers, &mut logs); let response = unwrap_response(exec.run_until_stalled(&mut client_fut)); assert!(response.error.is_none()); assert_eq!(subscribers.number_of_subscribers(), 2); } #[test] fn test_handle_bad_client_request() { let (mut exec, mut _snoopers, mut logs, mut subscribers, mut requests, _inspect) = setup(); let id = ClientId(0); let err = Some(Err(FidlError::Invalid)); let (_proxy, req_stream) = fidl_endpoints(); let handle = req_stream.control_handle(); let request = (id, (err, req_stream)); subscribers.register(id, handle, None, None).unwrap(); assert!(subscribers.is_registered(&id)); pump_handle_client_request(&mut exec, request, &mut requests, &mut subscribers, &mut logs); assert!(!subscribers.is_registered(&id)); let id = ClientId(1); let err = Some(Err(FidlError::Invalid)); let (_proxy, req_stream) = fidl_endpoints(); let handle = req_stream.control_handle(); let request = (id, (err, req_stream)); subscribers.register(id, handle, None, None).unwrap(); assert!(subscribers.is_registered(&id)); pump_handle_client_request(&mut exec, request, &mut requests, &mut subscribers, &mut logs); assert!(!subscribers.is_registered(&id)); }
36.602524
95
0.670602
e56a4359d1822890283ef4d39d37274ca567862c
1,431
use clap::{App, Arg}; use std::fs; use std::io::{self, Write}; use rlox::vm::compiler; use rlox::vm::vm; fn main() { let matches = App::new("My Super Program") .arg( Arg::with_name("trace") .long("trace") .help("Trace execution"), ) .arg(Arg::with_name("INPUT").help("Lox script to execute")) .get_matches(); let trace = matches.is_present("trace"); if matches.is_present("INPUT") { run_file(matches.value_of("INPUT").unwrap(), trace); } else { run_prompt(trace); } } fn run_file(file: &str, trace: bool) { let contents = fs::read_to_string(file).expect("Something went wrong reading the file"); let result = interpret(&contents, trace); match result { compiler::InterpretResult::CompileError => std::process::exit(65), compiler::InterpretResult::RuntimeError => std::process::exit(70), _ => (), } } fn run_prompt(trace: bool) { loop { print!("> "); io::stdout().flush().unwrap(); let mut line = String::new(); io::stdin() .read_line(&mut line) .expect("Failed to read line"); interpret(&line, trace); } } fn interpret(source: &str, trace: bool) -> compiler::InterpretResult { match compiler::compile(source, trace) { Ok(chunk) => vm::interpret(&chunk, trace), Err(e) => e, } }
27
92
0.559748
bf99759b192cc00cbba1083140390ddd8f3fb266
357
#![cfg_attr(not(with_main), no_std)] fn f (x : u32) -> u32 { let mut k = 0; loop { if k == x { break; } k = k+1; } return k; } const ARG :u32 = 2; #[cfg(with_main)] pub fn main() { println!("{:?}", f(ARG)); } #[cfg(not(with_main))] #[cfg_attr(crux, crux_test)] fn crux_test() -> u32 { f(ARG) }
17
84
0.462185
0aeebae639e91811cfcda603e03fb34b96f42521
757
#![feature(type_alias_impl_trait)] trait IterBits { type BitsIter: Iterator<Item = u8>; fn iter_bits(self, n: u8) -> Self::BitsIter; } type IterBitsIter<T, E, I> = impl std::iter::Iterator<Item = I>; impl<T: Copy, E> IterBits for T where T: std::ops::Shr<Output = T> + std::ops::BitAnd<T, Output = T> + std::convert::From<u8> + std::convert::TryInto<u8, Error = E>, E: std::fmt::Debug, { type BitsIter = IterBitsIter<T, E, u8>; fn iter_bits(self, n: u8) -> Self::BitsIter { (0u8..n).rev().map(move |shift| ((self >> T::from(shift)) & T::from(1)).try_into().unwrap()) //~^ ERROR non-defining opaque type use in defining scope //~| ERROR type mismatch resolving } } fn main() {}
28.037037
100
0.586526
62c992e10ed0dfd34147d1f453322285b264e6fa
1,341
use rgx::math::Point2; use rgx::rect::Rect; pub fn clamp(p: &mut Point2<i32>, rect: Rect<i32>) { if p.x < rect.x1 { p.x = rect.x1; } if p.y < rect.y1 { p.y = rect.y1; } if p.x > rect.x2 { p.x = rect.x2; } if p.y > rect.y2 { p.y = rect.y2; } } pub fn stitch_frames<T: Clone>(mut frames: Vec<Vec<T>>, fw: usize, fh: usize, val: T) -> Vec<T> { let nframes = frames.len(); let width = fw * nframes; if nframes == 0 { return Vec::with_capacity(0); } else if nframes == 1 { return frames.remove(0); } let mut buffer: Vec<T> = vec![val; fw * fh * nframes]; for (i, frame) in frames.iter().enumerate() { for y in 0..fh { let offset = i * fw + y * width; buffer.splice( offset..offset + fw, frame[fw * y..fw * y + fw].iter().cloned(), ); } } buffer } pub fn align_u8<T>(data: &[T]) -> &[u8] { let (head, body, tail) = unsafe { data.align_to::<u8>() }; assert!(head.is_empty()); assert!(tail.is_empty()); body } #[macro_export] macro_rules! hashmap { ($( $key: expr => $val: expr ),*) => {{ let mut map = ::std::collections::HashMap::new(); $( map.insert($key.to_owned(), $val); )* map }} }
22.35
97
0.480239
e54b11858cd1c7e6d00f42fb3e631c6e2bb367e6
3,066
#[doc = "Register `TASKS_START` writer"] pub struct W(crate::W<TASKS_START_SPEC>); impl core::ops::Deref for W { type Target = crate::W<TASKS_START_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl core::ops::DerefMut for W { #[inline(always)] fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } impl core::convert::From<crate::W<TASKS_START_SPEC>> for W { fn from(writer: crate::W<TASKS_START_SPEC>) -> Self { W(writer) } } #[doc = "Starts continuous I2S transfer. Also starts MCK generator when this is enabled\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum TASKS_START_AW { #[doc = "1: Trigger task"] TRIGGER = 1, } impl From<TASKS_START_AW> for bool { #[inline(always)] fn from(variant: TASKS_START_AW) -> Self { variant as u8 != 0 } } #[doc = "Field `TASKS_START` writer - Starts continuous I2S transfer. Also starts MCK generator when this is enabled"] pub struct TASKS_START_W<'a> { w: &'a mut W, } impl<'a> TASKS_START_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: TASKS_START_AW) -> &'a mut W { self.bit(variant.into()) } #[doc = "Trigger task"] #[inline(always)] pub fn trigger(self) -> &'a mut W { self.variant(TASKS_START_AW::TRIGGER) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !0x01) | ((value as u32) & 0x01); self.w } } impl W { #[doc = "Bit 0 - Starts continuous I2S transfer. Also starts MCK generator when this is enabled"] #[inline(always)] pub fn tasks_start(&mut self) -> TASKS_START_W { TASKS_START_W { w: self } } #[doc = "Writes raw bits to the register."] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.0.bits(bits); self } } #[doc = "Starts continuous I2S transfer. Also starts MCK generator when this is enabled\n\nThis register you can [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [tasks_start](index.html) module"] pub struct TASKS_START_SPEC; impl crate::RegisterSpec for TASKS_START_SPEC { type Ux = u32; } #[doc = "`write(|w| ..)` method takes [tasks_start::W](W) writer structure"] impl crate::Writable for TASKS_START_SPEC { type Writer = W; } #[doc = "`reset()` method sets TASKS_START to value 0"] impl crate::Resettable for TASKS_START_SPEC { #[inline(always)] fn reset_value() -> Self::Ux { 0 } }
32.967742
392
0.620678
f566c6ad49d0580890a1c4bd39230e09cd698d5d
59,165
use crate::parser::filter::{MatchTarget, ModifierMap}; use graph_core::resource::ResourceIdentity; pub fn get_target_map_modifier(resource_identity: ResourceIdentity) -> ModifierMap { let mut modify_target = ModifierMap::default(); match resource_identity { ResourceIdentity::Activities => { modify_target.map.insert( MatchTarget::OperationId("me.ListActivities".to_string()), vec![ MatchTarget::OperationMap("activities".to_string()), MatchTarget::OperationId("activities.ListActivities".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("me.GetActivities".to_string()), vec![ MatchTarget::OperationMap("activities".to_string()), MatchTarget::OperationId("activities.GetActivities".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("me.UpdateActivities".to_string()), vec![ MatchTarget::OperationMap("activities".to_string()), MatchTarget::OperationId("activities.UpdateActivities".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("me.CreateActivities".to_string()), vec![ MatchTarget::OperationMap("activities".to_string()), MatchTarget::OperationId("activities.CreateActivities".to_string()), ], ); } ResourceIdentity::AuditLogs => { // auditLogs.auditLogRoot.GetAuditLogRoot modify_target.operation_map("auditLogs.auditLogRoot", "auditLogs"); modify_target.map.insert( MatchTarget::OperationId("auditLogs.auditLogRoot.GetAuditLogRoot".to_string()), vec![ MatchTarget::OperationMap("auditLogs".to_string()), MatchTarget::OperationId("auditLogs.GetAuditLogRoot".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("auditLogs.auditLogRoot.UpdateAuditLogRoot".to_string()), vec![ MatchTarget::OperationMap("auditLogs".to_string()), MatchTarget::OperationId("auditLogs.UpdateAuditLogRoot".to_string()), ], ); } ResourceIdentity::Attachments => { modify_target.map.insert( MatchTarget::OperationId("groups.calendar.events.ListAttachments".to_string()), vec![ MatchTarget::OperationMap("attachments".to_string()), MatchTarget::OperationId("attachments.ListAttachments".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("groups.calendar.events.GetAttachments".to_string()), vec![ MatchTarget::OperationMap("attachments".to_string()), MatchTarget::OperationId("attachments.GetAttachments".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("groups.calendar.events.CreateAttachments".to_string()), vec![ MatchTarget::OperationMap("attachments".to_string()), MatchTarget::OperationId("attachments.CreateAttachment".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("groups.calendar.events.UpdateAttachments".to_string()), vec![ MatchTarget::OperationMap("attachments".to_string()), MatchTarget::OperationId("attachments.UpdateAttachments".to_string()), ], ); } ResourceIdentity::Buckets => { modify_target.map.insert( MatchTarget::OperationMap("planner.buckets".to_string()), vec![MatchTarget::OperationMap("buckets".to_string())], ); modify_target.map.insert( MatchTarget::OperationId("planner.buckets.GetBuckets".to_string()), vec![ MatchTarget::OperationMap("buckets".to_string()), MatchTarget::OperationId("buckets.GetBuckets".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("planner.CreateBuckets".to_string()), vec![ MatchTarget::OperationMap("buckets".to_string()), MatchTarget::OperationId("buckets.CreateBuckets".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("planner.buckets.UpdateBuckets".to_string()), vec![ MatchTarget::OperationMap("buckets".to_string()), MatchTarget::OperationId("buckets.UpdateBuckets".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("planner.UpdateBuckets".to_string()), vec![ MatchTarget::OperationMap("buckets".to_string()), MatchTarget::OperationId("buckets.UpdateBuckets".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("planner.buckets.ListBuckets".to_string()), vec![ MatchTarget::OperationMap("buckets".to_string()), MatchTarget::OperationId("buckets.ListBuckets".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("planner.ListBuckets".to_string()), vec![ MatchTarget::OperationMap("buckets".to_string()), MatchTarget::OperationId("buckets.ListBuckets".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("planner.DeleteBuckets".to_string()), vec![ MatchTarget::OperationMap("buckets".to_string()), MatchTarget::OperationId("buckets.DeleteBuckets".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("planner.plans.GetBuckets".to_string()), vec![ MatchTarget::OperationMap("buckets".to_string()), MatchTarget::OperationId("buckets.GetBuckets".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("planner.GetBuckets".to_string()), vec![ MatchTarget::OperationMap("buckets".to_string()), MatchTarget::OperationId("buckets.GetBuckets".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("planner.plans.ListBuckets".to_string()), vec![ MatchTarget::OperationMap("buckets".to_string()), MatchTarget::OperationId("buckets.ListBuckets".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("planner.buckets.CreateTasks".to_string()), vec![ MatchTarget::OperationMap("buckets".to_string()), MatchTarget::OperationId("buckets.CreateTasks".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("planner.buckets.ListTasks".to_string()), vec![ MatchTarget::OperationMap("buckets".to_string()), MatchTarget::OperationId("buckets.ListTasks".to_string()), ], ); } ResourceIdentity::Calendar => { modify_target.map.insert( MatchTarget::OperationId("users.ListCalendars".to_string()), vec![ MatchTarget::OperationId("users.calendar.ListCalendars".to_string()), MatchTarget::OperationMap("users.calendar".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("users.CreateCalendar".to_string()), vec![ MatchTarget::OperationId("users.calendar.CreateCalendar".to_string()), MatchTarget::OperationMap("users.calendar".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("users.CreateCalendars".to_string()), vec![ MatchTarget::OperationId("users.calendars.CreateCalendar".to_string()), MatchTarget::OperationMap("users.calendars".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("users.DeleteCalendar".to_string()), vec![ MatchTarget::OperationId("users.calendar.DeleteCalendar".to_string()), MatchTarget::OperationMap("users.calendar".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("users.DeleteCalendars".to_string()), vec![ MatchTarget::OperationId("users.calendars.DeleteCalendars".to_string()), MatchTarget::OperationMap("users.calendars".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("users.GetCalendar".to_string()), vec![ MatchTarget::OperationId("users.calendar.GetCalendar".to_string()), MatchTarget::OperationMap("users.calendar".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("users.GetCalendars".to_string()), vec![ MatchTarget::OperationId("users.calendars.GetCalendars".to_string()), MatchTarget::OperationMap("users.calendars".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("users.UpdateCalendar".to_string()), vec![ MatchTarget::OperationId("users.calendar.UpdateCalendar".to_string()), MatchTarget::OperationMap("users.calendar".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("users.UpdateCalendars".to_string()), vec![ MatchTarget::OperationId("users.calendars.UpdateCalendars".to_string()), MatchTarget::OperationMap("users.calendars".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId( "users.calendar.calendarView.calendar.getSchedule".to_string(), ), vec![ MatchTarget::OperationId("users.calendar.calendarView.getSchedule".to_string()), MatchTarget::OperationMap("users.calendar.calendarView".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("users.calendar.events.calendar.getSchedule".to_string()), vec![ MatchTarget::OperationId("users.calendar.events.getSchedule".to_string()), MatchTarget::OperationMap("users.calendar.events".to_string()), ], ); } ResourceIdentity::CalendarGroups => { modify_target.map.insert( MatchTarget::OperationId("users.GetCalendarGroups".to_string()), vec![ MatchTarget::OperationId("users.calendarGroups.GetCalendarGroups".to_string()), MatchTarget::OperationMap("users.calendarGroups".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("users.UpdateCalendarGroups".to_string()), vec![ MatchTarget::OperationId( "users.calendarGroups.UpdateCalendarGroups".to_string(), ), MatchTarget::OperationMap("users.calendarGroups".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("users.ListCalendarGroups".to_string()), vec![ MatchTarget::OperationId("users.calendarGroups.ListCalendarGroups".to_string()), MatchTarget::OperationMap("users.calendarGroups".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("users.DeleteCalendarGroups".to_string()), vec![ MatchTarget::OperationId( "users.calendarGroups.DeleteCalendarGroups".to_string(), ), MatchTarget::OperationMap("users.calendarGroups".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId( "users.calendarGroups.calendars.events.calendar.getSchedule".to_string(), ), vec![ MatchTarget::OperationId( "users.calendarGroups.calendars.events.getSchedule".to_string(), ), MatchTarget::OperationMap("users.calendarGroups.calendars.events".to_string()), ], ); } ResourceIdentity::CalendarView => { modify_target.map.insert( MatchTarget::OperationId("me.ListCalendarView".to_string()), vec![ MatchTarget::OperationMap("calendarViews".to_string()), MatchTarget::OperationId("calendarViews.ListCalendarView".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("me.GetCalendarView".to_string()), vec![ MatchTarget::OperationMap("calendarView".to_string()), MatchTarget::OperationId("calendarView.GetCalendarView".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("me.UpdateCalendarView".to_string()), vec![ MatchTarget::OperationMap("calendarView".to_string()), MatchTarget::OperationId("calendarView.UpdateCalendarView".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("me.CreateCalendarView".to_string()), vec![ MatchTarget::OperationMap("calendarViews".to_string()), MatchTarget::OperationId("calendarViews.CreateCalendarView".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("me.calendarView.delta.fa14".to_string()), vec![ MatchTarget::OperationMap("calendarViews".to_string()), MatchTarget::OperationId("calendarViews.delta".to_string()), ], ); } ResourceIdentity::Calls => { modify_target.map.insert( MatchTarget::OperationId("communications.GetCalls".to_string()), vec![ MatchTarget::OperationMap("calls".to_string()), MatchTarget::OperationId("calls.GetCalls".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("communications.UpdateCalls".to_string()), vec![ MatchTarget::OperationMap("calls".to_string()), MatchTarget::OperationId("calls.UpdateCalls".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("communications.ListCalls".to_string()), vec![ MatchTarget::OperationMap("calls".to_string()), MatchTarget::OperationId("calls.ListCalls".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("communications.CreateCalls".to_string()), vec![ MatchTarget::OperationMap("calls".to_string()), MatchTarget::OperationId("calls.CreateCalls".to_string()), ], ); } ResourceIdentity::CallRecords => { modify_target.map.insert( MatchTarget::OperationId("communications.GetCallRecords".to_string()), vec![ MatchTarget::OperationMap("callRecords".to_string()), MatchTarget::OperationId("callRecords.GetCallRecords".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("communications.UpdateCallRecords".to_string()), vec![ MatchTarget::OperationMap("callRecords".to_string()), MatchTarget::OperationId("callRecords.UpdateCallRecords".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("communications.calls.GetCallRecords".to_string()), vec![ MatchTarget::OperationMap("callRecords".to_string()), MatchTarget::OperationId("callRecords.GetCallRecords".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("communications.calls.UpdateCallRecords".to_string()), vec![ MatchTarget::OperationMap("callRecords".to_string()), MatchTarget::OperationId("callRecords.UpdateCallRecords".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("communications.ListCallRecords".to_string()), vec![ MatchTarget::OperationMap("callRecords".to_string()), MatchTarget::OperationId("callRecords.ListCallRecords".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("communications.CreateCallRecords".to_string()), vec![ MatchTarget::OperationMap("callRecords".to_string()), MatchTarget::OperationId("callRecords.CreateCallRecords".to_string()), ], ); } ResourceIdentity::ContactFolders => { modify_target.map.insert( MatchTarget::OperationId("me.GetContactFolders".to_string()), vec![ MatchTarget::OperationMap("contactFolders".to_string()), MatchTarget::OperationId("contactFolders.GetContactFolders".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("me.UpdateContactFolders".to_string()), vec![ MatchTarget::OperationMap("contactFolders".to_string()), MatchTarget::OperationId("contactFolders.UpdateContactFolders".to_string()), ], ); } ResourceIdentity::Contacts => { modify_target.map.insert( MatchTarget::OperationId("me.GetContacts".to_string()), vec![ MatchTarget::OperationMap("contacts".to_string()), MatchTarget::OperationId("contacts.GetContacts".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("me.UpdateContacts".to_string()), vec![ MatchTarget::OperationMap("contacts".to_string()), MatchTarget::OperationId("contacts.UpdateContacts".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("me.ListContacts".to_string()), vec![ MatchTarget::OperationMap("contacts".to_string()), MatchTarget::OperationId("contacts.ListContacts".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("me.CreateContacts".to_string()), vec![ MatchTarget::OperationMap("contacts".to_string()), MatchTarget::OperationId("contacts.CreateContacts".to_string()), ], ); } ResourceIdentity::ContentTypes => { modify_target.map.insert( MatchTarget::OperationId("sites.ListContentTypes".to_string()), vec![ MatchTarget::OperationMap("sites.contentTypes".to_string()), MatchTarget::OperationId("sites.contentTypes.ListContentTypes".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("sites.GetContentTypes".to_string()), vec![ MatchTarget::OperationMap("sites.contentTypes".to_string()), MatchTarget::OperationId("sites.contentTypes.GetContentTypes".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("sites.UpdateContentTypes".to_string()), vec![ MatchTarget::OperationMap("sites.contentTypes".to_string()), MatchTarget::OperationId("sites.contentTypes.UpdateContentTypes".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("sites.CreateContentTypes".to_string()), vec![ MatchTarget::OperationMap("sites.contentTypes".to_string()), MatchTarget::OperationId("sites.contentTypes.CreateContentTypes".to_string()), ], ); } ResourceIdentity::Conversations => { modify_target.map.insert( MatchTarget::OperationId("groups.ListConversations".to_string()), vec![ MatchTarget::OperationMap("groups.conversations".to_string()), MatchTarget::OperationId("groups.conversations.ListConversations".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("groups.CreateConversations".to_string()), vec![ MatchTarget::OperationMap("groups.conversations".to_string()), MatchTarget::OperationId( "groups.conversations.CreateConversations".to_string(), ), ], ); modify_target.map.insert( MatchTarget::OperationId("groups.GetConversations".to_string()), vec![ MatchTarget::OperationMap("groups.conversations".to_string()), MatchTarget::OperationId("groups.conversations.GetConversations".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("groups.UpdateConversations".to_string()), vec![ MatchTarget::OperationMap("groups.conversations".to_string()), MatchTarget::OperationId( "groups.conversations.UpdateConversations".to_string(), ), ], ); } ResourceIdentity::ChildFolders => { modify_target.map.insert( MatchTarget::OperationId("me.mailFolders.UpdateChildFolders".to_string()), vec![ MatchTarget::OperationMap("childFolders".to_string()), MatchTarget::OperationId("childFolders.UpdateChildFolders".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("me.mailFolders.CreateChildFolders".to_string()), vec![ MatchTarget::OperationMap("childFolders".to_string()), MatchTarget::OperationId("childFolders.CreateChildFolders".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("me.mailFolders.ListChildFolders".to_string()), vec![ MatchTarget::OperationMap("childFolders".to_string()), MatchTarget::OperationId("childFolders.ListChildFolders".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("me.mailFolders.GetChildFolders".to_string()), vec![ MatchTarget::OperationMap("childFolders".to_string()), MatchTarget::OperationId("childFolders.GetChildFolders".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("me.mailFolders.childFolders.move".to_string()), vec![ MatchTarget::OperationMap("childFolders".to_string()), MatchTarget::OperationId("childFolders.MoveChildFolders".to_string()), ], ); } ResourceIdentity::DeviceManagement => { modify_target.operation_map( "deviceManagement.detectedApps.managedDevices", "deviceManagement.detectedApps.appManagedDevices", ); } ResourceIdentity::Directory => { modify_target.operation_map( "directoryObjects.microsoft.graph.administrativeUnit", "directoryObjects.administrativeUnits", ); modify_target.map.insert( MatchTarget::OperationId("directory.administrativeUnits.delta.fa14".to_string()), vec![ MatchTarget::OperationId( "directoryObjects.administrativeUnits.delta".to_string(), ), MatchTarget::OperationMap("directoryObjects.administrativeUnits".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("directoryRoles.delta.fa14".to_string()), vec![ MatchTarget::OperationId("directoryRoles.delta".to_string()), MatchTarget::OperationMap("directoryRoles".to_string()), ], ); } ResourceIdentity::Events => { modify_target.map.insert( MatchTarget::OperationId("calendar.events.UpdateInstances".to_string()), vec![MatchTarget::OperationMap( "calendar.events.instances".to_string(), )], ); modify_target.map.insert( MatchTarget::OperationId("calendar.events.GetInstances".to_string()), vec![ MatchTarget::OperationMap("calendar.events.instances".to_string()), MatchTarget::OperationId("calendar.events.instances.GetInstances".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("calendar.events.ListInstances".to_string()), vec![ MatchTarget::OperationMap("calendar.events.instances".to_string()), MatchTarget::OperationId("calendar.events.instances.ListInstances".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("calendar.events.CreateInstances".to_string()), vec![ MatchTarget::OperationMap("calendar.events.instances".to_string()), MatchTarget::OperationId( "calendar.events.instances.CreateInstances".to_string(), ), ], ); modify_target.map.insert( MatchTarget::OperationId("users.ListEvents".to_string()), vec![ MatchTarget::OperationMap("users.event".to_string()), MatchTarget::OperationId("users.event.ListEvents".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("users.CreateEvents".to_string()), vec![ MatchTarget::OperationMap("users.event".to_string()), MatchTarget::OperationId("users.event.CreateEvents".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("users.UpdateEvents".to_string()), vec![ MatchTarget::OperationMap("users.events".to_string()), MatchTarget::OperationId("users.events.UpdateEvents".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("users.GetEvents".to_string()), vec![ MatchTarget::OperationMap("users.events".to_string()), MatchTarget::OperationId("users.events.GetEvents".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("users.events.delta.fa14".to_string()), vec![ MatchTarget::OperationMap("users.event".to_string()), MatchTarget::OperationId("users.event.delta.fa14".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationMap("users.events.calendar.events".to_string()), vec![MatchTarget::OperationMap( "users.events.calendar".to_string(), )], ); } ResourceIdentity::GroupLifecyclePolicies => { modify_target.map.insert( MatchTarget::OperationMap( "groupLifecyclePolicies.groupLifecyclePolicy".to_string(), ), vec![MatchTarget::OperationMap( "groupLifecyclePolicies".to_string(), )], ); } ResourceIdentity::Instances => { modify_target.map.insert( MatchTarget::OperationId("me.calendarView.ListInstances".to_string()), vec![ MatchTarget::OperationMap("instances".to_string()), MatchTarget::OperationId("instances.ListInstances".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("me.calendarView.CreateInstances".to_string()), vec![ MatchTarget::OperationMap("instances".to_string()), MatchTarget::OperationId("instances.CreateInstances".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("me.calendarView.GetInstances".to_string()), vec![ MatchTarget::OperationMap("instances".to_string()), MatchTarget::OperationId("instances.GetInstances".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("me.calendarView.UpdateInstances".to_string()), vec![ MatchTarget::OperationMap("instances".to_string()), MatchTarget::OperationId("instances.UpdateInstances".to_string()), ], ); } ResourceIdentity::InferenceClassification => { modify_target.map.insert( MatchTarget::OperationId("me.GetInferenceClassification".to_string()), vec![ MatchTarget::OperationMap("inferenceClassification".to_string()), MatchTarget::OperationId( "inferenceClassification.GetInferenceClassification".to_string(), ), ], ); modify_target.map.insert( MatchTarget::OperationId("me.UpdateInferenceClassification".to_string()), vec![ MatchTarget::OperationMap("inferenceClassification".to_string()), MatchTarget::OperationId( "inferenceClassification.UpdateInferenceClassification".to_string(), ), ], ); } ResourceIdentity::Insights => { modify_target.map.insert( MatchTarget::OperationId("me.GetInsights".to_string()), vec![ MatchTarget::OperationMap("insights".to_string()), MatchTarget::OperationId("insights.GetInsights".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("me.UpdateInsights".to_string()), vec![ MatchTarget::OperationMap("insights".to_string()), MatchTarget::OperationId("insights.UpdateInsights".to_string()), ], ); } ResourceIdentity::Items => { modify_target.map.insert( MatchTarget::OperationId("sites.lists.ListItems".to_string()), vec![ MatchTarget::OperationMap("items".to_string()), MatchTarget::OperationId("items.ListItems ".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("sites.lists.CreateItems".to_string()), vec![ MatchTarget::OperationMap("items".to_string()), MatchTarget::OperationId("items.CreateItems".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("sites.lists.GetItems".to_string()), vec![ MatchTarget::OperationMap("items".to_string()), MatchTarget::OperationId("items.GetItems".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("sites.lists.UpdateItems".to_string()), vec![ MatchTarget::OperationMap("items".to_string()), MatchTarget::OperationId("items.UpdateItems".to_string()), ], ); } ResourceIdentity::Lists => { modify_target.map.insert( MatchTarget::OperationId("sites.GetLists".to_string()), vec![ MatchTarget::OperationMap("lists".to_string()), MatchTarget::OperationId("lists.GetLists".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("sites.UpdateLists".to_string()), vec![ MatchTarget::OperationMap("lists".to_string()), MatchTarget::OperationId("lists.UpdateLists".to_string()), ], ); } ResourceIdentity::ManagedDevices => { modify_target.map.insert( MatchTarget::OperationId("me.GetManagedDevices".to_string()), vec![ MatchTarget::OperationMap("managedDevices".to_string()), MatchTarget::OperationId("managedDevices.GetManagedDevices".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("me.UpdateManagedDevices".to_string()), vec![ MatchTarget::OperationMap("managedDevices".to_string()), MatchTarget::OperationId("managedDevices.UpdateManagedDevices".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("me.GetManagedDevices".to_string()), vec![ MatchTarget::OperationMap("managedDevices".to_string()), MatchTarget::OperationId("managedDevices.GetManagedDevices".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("me.UpdateManagedDevices".to_string()), vec![ MatchTarget::OperationMap("managedDevices".to_string()), MatchTarget::OperationId("managedDevices.UpdateManagedDevices".to_string()), ], ); } ResourceIdentity::Messages => { modify_target.map.insert( MatchTarget::OperationId("me.ListMessages".to_string()), vec![ MatchTarget::OperationMap("messages".to_string()), MatchTarget::OperationId("messages.ListMessages".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("me.GetMessages".to_string()), vec![ MatchTarget::OperationMap("messages".to_string()), MatchTarget::OperationId("messages.GetMessages".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("me.CreateMessages".to_string()), vec![ MatchTarget::OperationMap("messages".to_string()), MatchTarget::OperationId("messages.CreateMessages".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("me.UpdateMessages".to_string()), vec![ MatchTarget::OperationMap("messages".to_string()), MatchTarget::OperationId("messages.UpdateMessages".to_string()), ], ); } ResourceIdentity::MailFolders => { modify_target.map.insert( MatchTarget::OperationId("me.ListMailFolders".to_string()), vec![ MatchTarget::OperationMap("mailFolders".to_string()), MatchTarget::OperationId("mailFolders.ListMailFolders".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("me.GetMailFolders".to_string()), vec![ MatchTarget::OperationMap("mailFolders".to_string()), MatchTarget::OperationId("mailFolders.GetMailFolders".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("me.CreateMailFolders".to_string()), vec![ MatchTarget::OperationMap("mailFolders".to_string()), MatchTarget::OperationId("mailFolders.CreateMailFolders".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("me.UpdateMailFolders".to_string()), vec![ MatchTarget::OperationMap("mailFolders".to_string()), MatchTarget::OperationId("mailFolders.UpdateMailFolders".to_string()), ], ); } ResourceIdentity::Me => { // me.user.GetUser modify_target.map.insert( MatchTarget::OperationId("me.user.GetUser".to_string()), vec![ MatchTarget::OperationMap("me".to_string()), MatchTarget::OperationId("me.GetUser".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("me.user.UpdateUser".to_string()), vec![ MatchTarget::OperationMap("me".to_string()), MatchTarget::OperationId("me.UpdateUser".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationMap("me.user".to_string()), vec![MatchTarget::OperationMap("me".to_string())], ); } ResourceIdentity::Notebooks => { modify_target.map.insert( MatchTarget::OperationId("me.onenote.ListNotebooks".to_string()), vec![ MatchTarget::OperationMap("notebooks".to_string()), MatchTarget::OperationId("notebooks.ListNotebooks".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("me.onenote.GetNotebooks".to_string()), vec![ MatchTarget::OperationMap("notebooks".to_string()), MatchTarget::OperationId("notebooks.GetNotebooks".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("me.onenote.CreateNotebooks".to_string()), vec![ MatchTarget::OperationMap("notebooks".to_string()), MatchTarget::OperationId("notebooks.CreateNotebooks".to_string()), ], ); } ResourceIdentity::Onenote => { modify_target.map.insert( MatchTarget::OperationId("me.GetOnenote".to_string()), vec![ MatchTarget::OperationMap("onenote".to_string()), MatchTarget::OperationId("onenote.GetOnenote".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("me.UpdateOnenote".to_string()), vec![ MatchTarget::OperationMap("onenote".to_string()), MatchTarget::OperationId("onenote.UpdateOnenote".to_string()), ], ); } ResourceIdentity::SectionGroups => { modify_target.map.insert( MatchTarget::OperationId("me.onenote.CreateSectionGroups".to_string()), vec![ MatchTarget::OperationMap("sectionGroups".to_string()), MatchTarget::OperationId("sectionGroups.CreateSectionGroups".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("me.onenote.GetSectionGroups".to_string()), vec![ MatchTarget::OperationMap("sectionGroups".to_string()), MatchTarget::OperationId("sectionGroups.GetSectionGroup".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("me.onenote.ListSectionGroups".to_string()), vec![ MatchTarget::OperationMap("sectionGroups".to_string()), MatchTarget::OperationId("sectionGroups.ListSectionGroups".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("me.onenote.UpdateSectionGroups".to_string()), vec![ MatchTarget::OperationMap("sectionGroups".to_string()), MatchTarget::OperationId("sectionGroups.UpdateSectionGroup".to_string()), ], ); } ResourceIdentity::Pages => { modify_target.map.insert( MatchTarget::OperationId("me.onenote.GetPages".to_string()), vec![ MatchTarget::OperationMap("pages".to_string()), MatchTarget::OperationId("pages.GetPages".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("me.onenote.UpdatePages".to_string()), vec![ MatchTarget::OperationMap("pages".to_string()), MatchTarget::OperationId("pages.UpdatePages".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("me.onenote.ListPages".to_string()), vec![ MatchTarget::OperationMap("pages".to_string()), MatchTarget::OperationId("pages.ListPages".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("me.onenote.CreatePages".to_string()), vec![ MatchTarget::OperationMap("pages".to_string()), MatchTarget::OperationId("pages.CreatePages".to_string()), ], ); } ResourceIdentity::Sections => { modify_target.map.insert( MatchTarget::OperationId("me.onenote.ListSections".to_string()), vec![ MatchTarget::OperationMap("sections".to_string()), MatchTarget::OperationId("sections.ListSections".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("me.onenote.CreateSections".to_string()), vec![ MatchTarget::OperationMap("sections".to_string()), MatchTarget::OperationId("sections.CreateSections".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("me.onenote.UpdateSections".to_string()), vec![ MatchTarget::OperationMap("sections".to_string()), MatchTarget::OperationId("sections.UpdateSections".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("me.onenote.GetSections".to_string()), vec![ MatchTarget::OperationMap("sections".to_string()), MatchTarget::OperationId("sections.GetSections".to_string()), ], ); } ResourceIdentity::ParentSection => { modify_target.map.insert( MatchTarget::OperationId("me.onenote.pages.GetParentSection".to_string()), vec![ MatchTarget::OperationMap("me.onenote.pages.parentSection".to_string()), MatchTarget::OperationId( "me.onenote.pages.parentSection.GetParentNotebook".to_string(), ), ], ); modify_target.map.insert( MatchTarget::OperationId("me.onenote.pages.UpdateParentSection".to_string()), vec![ MatchTarget::OperationMap("me.onenote.pages.parentSection".to_string()), MatchTarget::OperationId( "me.onenote.pages.parentSection.UpdateParentNotebook".to_string(), ), ], ); } ResourceIdentity::ParentNotebook => { modify_target.map.insert( MatchTarget::OperationId("me.onenote.sections.GetParentNotebook".to_string()), vec![ MatchTarget::OperationMap("me.onenote.sections.parentNotebook".to_string()), MatchTarget::OperationId( "me.onenote.sections.parentNotebook.GetParentNotebook".to_string(), ), ], ); modify_target.map.insert( MatchTarget::OperationId("me.onenote.sections.UpdateParentNotebook".to_string()), vec![ MatchTarget::OperationMap("me.onenote.sections.parentNotebook".to_string()), MatchTarget::OperationId( "me.onenote.sections.parentNotebook.UpdateParentNotebook".to_string(), ), ], ); } ResourceIdentity::ParentSectionGroup => { modify_target.map.insert( MatchTarget::OperationId("me.onenote.sections.GetParentSectionGroup".to_string()), vec![ MatchTarget::OperationMap("me.onenote.sections.parentSectionGroup".to_string()), MatchTarget::OperationId( "me.onenote.sections.parentSectionGroup.GetParentSectionGroup".to_string(), ), ], ); modify_target.map.insert( MatchTarget::OperationId( "me.onenote.sections.UpdateParentSectionGroup".to_string(), ), vec![ MatchTarget::OperationMap("me.onenote.sections.parentSectionGroup".to_string()), MatchTarget::OperationId( "me.onenote.sections.parentSectionGroup.UpdateParentSectionGroup" .to_string(), ), ], ); } ResourceIdentity::Outlook => { modify_target.map.insert( MatchTarget::OperationId("me.GetOutlook".to_string()), vec![ MatchTarget::OperationMap("outlook".to_string()), MatchTarget::OperationId("outlook.GetOutlook".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("me.UpdateOutlook".to_string()), vec![ MatchTarget::OperationMap("outlook".to_string()), MatchTarget::OperationId("outlook.UpdateOutlook".to_string()), ], ); } ResourceIdentity::Planner => { modify_target.map.insert( MatchTarget::OperationMap("users.planner.plans.tasks".to_string()), vec![MatchTarget::OperationMap( "users.planner.plans.plannerTasks".to_string(), )], ); modify_target.map.insert( MatchTarget::OperationMap("users.planner.plans.buckets.tasks".to_string()), vec![MatchTarget::OperationMap( "users.planner.plans.buckets.bucketTasks".to_string(), )], ); } ResourceIdentity::Policies => { modify_target.operation_map("policies.policyRoot", "policies"); } ResourceIdentity::Posts => { modify_target.map.insert( MatchTarget::OperationId("groups.threads.UpdatePosts".to_string()), vec![ MatchTarget::OperationMap("groups.threads.posts".to_string()), MatchTarget::OperationId("groups.threads.posts.UpdatePosts".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("groups.threads.GetPosts".to_string()), vec![ MatchTarget::OperationMap("groups.threads.posts".to_string()), MatchTarget::OperationId("groups.threads.posts.GetPosts".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("groups.threads.ListPosts".to_string()), vec![ MatchTarget::OperationMap("groups.threads.posts".to_string()), MatchTarget::OperationId("groups.threads.posts.ListPosts".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("groups.threads.CreatePosts".to_string()), vec![ MatchTarget::OperationMap("groups.threads.posts".to_string()), MatchTarget::OperationId("groups.threads.posts.CreatePosts".to_string()), ], ); } ResourceIdentity::Plans => { modify_target.map.insert( MatchTarget::OperationId("planner.GetPlans".to_string()), vec![ MatchTarget::OperationMap("planner.plans".to_string()), MatchTarget::OperationId("planner.plans.GetPlans".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("planner.UpdatePlans".to_string()), vec![ MatchTarget::OperationMap("planner.plans".to_string()), MatchTarget::OperationId("planner.plans.UpdatePlans".to_string()), ], ); } ResourceIdentity::Reports => { modify_target.map.insert( MatchTarget::OperationId("reports.reportRoot.GetReportRoot".to_string()), vec![ MatchTarget::OperationMap("reports".to_string()), MatchTarget::OperationId("reports.GetReportRoot".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("reports.reportRoot.UpdateReportRoot".to_string()), vec![ MatchTarget::OperationMap("reports".to_string()), MatchTarget::OperationId("reports.UpdateReportRoot".to_string()), ], ); } ResourceIdentity::Settings => { modify_target.map.insert( MatchTarget::OperationId("me.GetSettings".to_string()), vec![ MatchTarget::OperationMap("settings".to_string()), MatchTarget::OperationId("settings.GetSettings".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("me.UpdateSettings".to_string()), vec![ MatchTarget::OperationMap("settings".to_string()), MatchTarget::OperationId("settings.UpdateSettings".to_string()), ], ); } ResourceIdentity::Sessions => { modify_target.map.insert( MatchTarget::OperationId("communications.callRecords.GetSessions".to_string()), vec![ MatchTarget::OperationMap("sessions".to_string()), MatchTarget::OperationId("sessions.GetSessions".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("communications.callRecords.ListSessions".to_string()), vec![ MatchTarget::OperationMap("sessions".to_string()), MatchTarget::OperationId("sessions.ListSessions".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("communications.callRecords.UpdateSessions".to_string()), vec![ MatchTarget::OperationMap("sessions".to_string()), MatchTarget::OperationId("sessions.UpdateSessions".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("communications.callRecords.CreateSessions".to_string()), vec![ MatchTarget::OperationMap("sessions".to_string()), MatchTarget::OperationId("sessions.CreateSessions".to_string()), ], ); } ResourceIdentity::Teams => { modify_target.map.insert( MatchTarget::OperationMap("teams.primaryChannel.messages".to_string()), vec![MatchTarget::OperationMap( "teams.primaryChannel.primaryChannelMessages".to_string(), )], ); modify_target.map.insert( MatchTarget::OperationMap("teams.primaryChannel.tabs".to_string()), vec![MatchTarget::OperationMap( "teams.primaryChannel.primaryChannelTabs".to_string(), )], ); } ResourceIdentity::Tasks => { modify_target.map.insert( MatchTarget::OperationId("planner.ListTasks".to_string()), vec![ MatchTarget::OperationMap("planner.tasks".to_string()), MatchTarget::OperationId("planner.tasks.ListTasks".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("planner.UpdateTasks".to_string()), vec![ MatchTarget::OperationMap("planner.tasks".to_string()), MatchTarget::OperationId("planner.tasks.UpdateTasks".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("planner.GetTasks".to_string()), vec![ MatchTarget::OperationMap("planner.tasks".to_string()), MatchTarget::OperationId("planner.tasks.GetTasks".to_string()), ], ); } ResourceIdentity::Threads => { modify_target.map.insert( MatchTarget::OperationId("groups.UpdateThreads".to_string()), vec![ MatchTarget::OperationMap("groups.threads".to_string()), MatchTarget::OperationId("groups.threads.UpdateThreads".to_string()), ], ); modify_target.map.insert( MatchTarget::OperationId("groups.GetThreads".to_string()), vec![ MatchTarget::OperationMap("groups.threads".to_string()), MatchTarget::OperationId("groups.threads.GetThreads".to_string()), ], ); } _ => {} } modify_target }
46.114575
100
0.520274
9ce92c08fde253a2bf8b1dd4d9a01222af650d2d
1,246
pub enum SchemaVersion { Schema0, Schema1, Unknown, } #[derive(PartialEq)] pub enum ClientHandshakeState { WriteC0C1, ReadS0S1S2, WriteC2, Finish, } #[derive(Copy, Clone)] pub enum ServerHandshakeState { ReadC0C1, WriteS0S1S2, ReadC2, Finish, } pub const RTMP_VERSION: usize = 3; pub const RTMP_HANDSHAKE_SIZE: usize = 1536; pub const RTMP_SERVER_VERSION: [u8; 4] = [0x0D, 0x0E, 0x0A, 0x0D]; pub const RTMP_CLIENT_VERSION: [u8; 4] = [0x0C, 0x00, 0x0D, 0x0E]; pub const RTMP_DIGEST_LENGTH: usize = 32; pub const RTMP_SERVER_KEY_FIRST_HALF: &'static str = "Genuine Adobe Flash Media Server 001"; pub const RTMP_CLIENT_KEY_FIRST_HALF: &'static str = "Genuine Adobe Flash Player 001"; pub const RTMP_SERVER_KEY: [u8; 68] = [ 0x47, 0x65, 0x6e, 0x75, 0x69, 0x6e, 0x65, 0x20, 0x41, 0x64, 0x6f, 0x62, 0x65, 0x20, 0x46, 0x6c, 0x61, 0x73, 0x68, 0x20, 0x4d, 0x65, 0x64, 0x69, 0x61, 0x20, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x20, 0x30, 0x30, 0x31, // Genuine Adobe Flash Media Server 001 0xf0, 0xee, 0xc2, 0x4a, 0x80, 0x68, 0xbe, 0xe8, 0x2e, 0x00, 0xd0, 0xd1, 0x02, 0x9e, 0x7e, 0x57, 0x6e, 0xec, 0x5d, 0x2d, 0x29, 0x80, 0x6f, 0xab, 0x93, 0xb8, 0xe6, 0x36, 0xcf, 0xeb, 0x31, 0xae, ]; // 68
32.789474
99
0.682183
160e7002fe940117f889fd74d0569c5c7b61193b
1,795
struct Point { x: i32, y: i32, z: i32 } pub fn main() { let mut point = Point { x: 0, y: 0, z: 0 }; { let borrowed_point = &point; let another_borrow = &point; // Data can be accessed via the references and the original owner println!("Point has coordinates: ({}, {}, {})", borrowed_point.x, another_borrow.y, point.z); // Error! Can't borrow point as mutable because it's currently // borrowed as immutable. //let mutable_borrow = &mut point; // TODO ^ Try uncommenting this line // Immutable references go out of scope } { let mutable_borrow = &mut point; // Change data via mutable reference mutable_borrow.x = 5; mutable_borrow.y = 2; mutable_borrow.z = 1; // Error! Can't borrow `point` as immutable because it's currently // borrowed as mutable. //let mutable_borrow = &point; // Error! Can't borrow `point` as immutable because it's currently // borrowed as mutable. //let y = &point.y; // TODO ^ Try uncommenting this line // Error! Can't print because `println!` takes an immutable reference. //println!("Point Z coordinate is {}", point.z); // TODO ^ Try uncommenting this line // Ok! Mutable references can be passed as immutable to `println!` println!("Point has coordinates: ({}, {}, {})", mutable_borrow.x, mutable_borrow.y, mutable_borrow.z); // Mutable reference goes out of scope } // Immutable references to point are allowed again let borrowed_point = &point; println!("Point now has coordinates: ({}, {}, {})", borrowed_point.x, borrowed_point.y, borrowed_point.z); }
33.240741
78
0.588301
874d36e83a716e9abca8b9b4e7f38aa247c27637
289
// Copyright 2020 justjavac. All rights reserved. MIT license. use anyhow::Result; use crate::version::get_local_versions; pub fn exec() -> Result<()> { let mut versions = get_local_versions(); versions.sort(); versions.reverse(); println!("{}", versions.join("\n")); Ok(()) }
22.230769
62
0.66782
9bd48352b070505d5f6bfd027051e58fb3db41de
827
#![feature(proc_macro_hygiene)] #[macro_use] extern crate rocket; #[cfg(test)] mod tests; use std::sync::atomic::{AtomicUsize, Ordering}; use rocket::State; use rocket::response::content; struct HitCount(AtomicUsize); #[get("/")] fn index(hit_count: State<'_, HitCount>) -> content::Html<String> { hit_count.0.fetch_add(1, Ordering::Relaxed); let msg = "Your visit has been recorded!"; let count = format!("Visits: {}", count(hit_count)); content::Html(format!("{}<br /><br />{}", msg, count)) } #[get("/count")] fn count(hit_count: State<'_, HitCount>) -> String { hit_count.0.load(Ordering::Relaxed).to_string() } fn rocket() -> rocket::Rocket { rocket::ignite() .mount("/", routes![index, count]) .manage(HitCount(AtomicUsize::new(0))) } fn main() { rocket().launch(); }
22.972222
67
0.633615
9c8708bbe5e055d3439a479aa47f99247eb00195
608
use crate::{wl_split_timer::WlSplitTimer, TimerDisplay}; use std::{ error::Error, sync::{Arc, Mutex}, }; pub struct App { timer: Arc<Mutex<WlSplitTimer>>, } impl App { pub fn new(timer: WlSplitTimer) -> Self { Self { timer: Arc::new(Mutex::new(timer)), } } } impl TimerDisplay for App { fn run(&mut self) -> Result<bool, Box<dyn Error>> { let timer = self.timer.lock().unwrap(); if timer.exit { return Ok(true); } Ok(false) } fn timer(&self) -> &Arc<Mutex<WlSplitTimer>> { &self.timer } }
19.612903
56
0.537829
ff9dfa71db7628ab6bc7c7ef8ea6b968dfa88104
151,678
// Copyright 2019 The vault713 Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. /// Swap API trait pub mod api; /// Library that support bitcoin operations pub mod bitcoin; /// Swap crate errors pub mod error; /// Messages that Buyer and Seller are exchanging during the swap process pub mod message; /// schnorr signature routine pub mod multisig; /// Finite State Machine that handle swap workflow pub mod fsm; /// Swap buyer API (selling MWC for BTC) pub mod buyer; /// Swap Seller API (selling BTC for MWC) pub mod seller; /// Swap state object that is used by both byer abd seller pub mod swap; /// Swap trade sessions catalog pub mod trades; /// Serialization adapters pub mod ser; /// Types used by swap library pub mod types; pub use self::error::ErrorKind; pub use self::swap::Swap; pub use self::types::Context; //pub use self::types::BtcSellerContext; pub(crate) use self::api::SwapApi; pub(crate) use self::buyer::BuyApi; pub(crate) use self::seller::SellApi; pub use grin_keychain::Keychain; #[cfg(test)] use serial_test::serial; #[cfg(test)] use std::sync::atomic::{AtomicBool, Ordering}; const CURRENT_VERSION: u8 = 1; #[cfg(test)] lazy_static! { /// Flag to set test mode static ref TEST_MODE: AtomicBool = AtomicBool::new(false); static ref ACTIVATE_TEST_RESPONSE: AtomicBool = AtomicBool::new(true); } #[cfg(test)] /// Set the test mode pub fn set_test_mode(mode: bool) { TEST_MODE.store(mode, Ordering::Relaxed); } #[cfg(test)] /// Check if we are in test mode pub fn is_test_mode() -> bool { TEST_MODE.load(Ordering::Relaxed) } #[cfg(test)] /// Set the test mode pub fn activate_test_response(mode: bool) { ACTIVATE_TEST_RESPONSE.store(mode, Ordering::Relaxed); } #[cfg(test)] /// Check if we are in test mode pub fn is_test_response() -> bool { ACTIVATE_TEST_RESPONSE.load(Ordering::Relaxed) } #[cfg(test)] mod tests { use crate::grin_util::{Mutex, RwLock}; use crate::{NodeClient, Slate, SlateVersion, VersionedSlate}; use bitcoin_lib::network::constants::Network as BtcNetwork; use bitcoin_lib::util::key::PublicKey as BtcPublicKey; use bitcoin_lib::{Address, Transaction as BtcTransaction, TxOut}; use grin_core::core::transaction::Weighting; use grin_core::core::verifier_cache::LruVerifierCache; use grin_core::core::{Inputs, KernelFeatures, Transaction, TxKernel}; use grin_keychain::{ExtKeychain, Identifier, Keychain, SwitchCommitmentType}; use grin_util::secp::key::{PublicKey, SecretKey}; use grin_util::secp::pedersen::{Commitment, RangeProof}; use grin_util::to_hex; use std::collections::HashMap; #[cfg(not(target_os = "windows"))] use std::fs::{read_to_string, write}; use std::mem; #[cfg(not(target_os = "windows"))] use std::str::FromStr; use std::sync::Arc; use super::bitcoin::*; use super::message::Message; use super::types::*; use super::*; use crate::swap::fsm::machine::StateMachine; use crate::swap::fsm::state::{Input, StateId, StateProcessRespond}; use crate::swap::message::{SecondaryUpdate, Update}; use grin_core::global; use grin_core::global::ChainTypes; use grin_wallet_util::grin_core::core::Committed; const GRIN_UNIT: u64 = 1_000_000_000; fn keychain(idx: u8) -> ExtKeychain { let seed_sell: String = format!("fixed0rng0for0testing0purposes0{}", idx % 10); let seed_sell = crate::blake2::blake2b::blake2b(32, &[], seed_sell.as_bytes()); ExtKeychain::from_seed(seed_sell.as_bytes(), false).unwrap() } fn context_sell(kc: &ExtKeychain) -> Context { Context { multisig_key: key_id(0, 0), multisig_nonce: key(kc, 1, 0), lock_nonce: key(kc, 1, 1), refund_nonce: key(kc, 1, 2), redeem_nonce: key(kc, 1, 3), role_context: RoleContext::Seller(SellerContext { parent_key_id: key_id(0, 0), inputs: vec![ (key_id(0, 1), None, 60 * GRIN_UNIT), (key_id(0, 2), None, 60 * GRIN_UNIT), ], change_output: key_id(0, 3), change_amount: 20 * GRIN_UNIT, // selling 100 coins, so 20 will be left refund_output: key_id(0, 4), secondary_context: SecondarySellerContext::Btc(BtcSellerContext { cosign: key_id(0, 5), }), }), } } fn context_buy(kc: &ExtKeychain) -> Context { Context { multisig_key: key_id(0, 0), multisig_nonce: key(kc, 1, 0), lock_nonce: key(kc, 1, 1), refund_nonce: key(kc, 1, 2), redeem_nonce: key(kc, 1, 3), role_context: RoleContext::Buyer(BuyerContext { parent_key_id: key_id(0, 0), output: key_id(0, 1), redeem: key_id(0, 2), secondary_context: SecondaryBuyerContext::Btc(BtcBuyerContext { refund: key_id(0, 3), }), }), } } fn key_id(d1: u32, d2: u32) -> Identifier { ExtKeychain::derive_key_id(2, d1, d2, 0, 0) } fn key(kc: &ExtKeychain, d1: u32, d2: u32) -> SecretKey { kc.derive_key(0, &key_id(d1, d2), SwitchCommitmentType::None) .unwrap() } fn btc_address(kc: &ExtKeychain) -> String { let key = PublicKey::from_secret_key(kc.secp(), &key(kc, 2, 0)).unwrap(); let address = Address::p2pkh( &BtcPublicKey { compressed: true, key, }, BtcNetwork::Testnet, ); format!("{}", address) } #[derive(Debug, Clone)] struct TestNodeClientState { pub height: u64, pub pending: Vec<Transaction>, pub outputs: HashMap<Commitment, u64>, pub kernels: HashMap<Commitment, (TxKernel, u64)>, } #[derive(Debug, Clone)] struct TestNodeClient { pub state: Arc<Mutex<TestNodeClientState>>, } impl TestNodeClient { pub fn new(height: u64) -> Self { let state = TestNodeClientState { height, pending: Vec::new(), outputs: HashMap::new(), kernels: HashMap::new(), }; Self { state: Arc::new(Mutex::new(state)), } } pub fn push_output(&self, commit: Commitment) { let mut state = self.state.lock(); let height = state.height; state.outputs.insert(commit, height); } pub fn mine_block(&self) { let mut state = self.state.lock(); state.height += 1; let height = state.height; let pending = mem::replace(&mut state.pending, Vec::new()); for tx in pending { for input in tx.inputs_committed() { state.outputs.remove(&input); } for output in tx.outputs_committed() { state.outputs.insert(output, height); } for kernel in tx.body.kernels { state .kernels .insert(kernel.excess.clone(), (kernel, height)); } } } pub fn mine_blocks(&self, count: u64) { if count > 0 { self.mine_block(); if count > 1 { let mut state = self.state.lock(); state.height += count - 1; } } } /// Get a current state for the test chain pub fn get_state(&self) -> TestNodeClientState { self.state.lock().clone() } /// Set a state for the test chain pub fn set_state(&self, chain_state: &TestNodeClientState) { let mut state = self.state.lock(); *state = chain_state.clone(); } // Clean the data, not height. Reorg attack pub fn clean(&self) { let mut state = self.state.lock(); state.pending.clear(); state.outputs.clear(); state.kernels.clear(); } } impl NodeClient for TestNodeClient { fn node_url(&self) -> &str { "test_node_url" } fn set_node_url(&mut self, _node_url: &str) { unimplemented!() } fn node_api_secret(&self) -> Option<String> { unimplemented!() } fn set_node_api_secret(&mut self, _node_api_secret: Option<String>) { unimplemented!() } fn get_chain_tip(&self) -> Result<(u64, String, u64), crate::Error> { let res = (self.state.lock().height, "testnodehash".to_string(), 123455); Ok(res) } fn get_header_info(&self, _height: u64) -> Result<crate::HeaderInfo, crate::Error> { unimplemented!() } fn get_connected_peer_info( &self, ) -> Result<Vec<grin_p2p::types::PeerInfoDisplayLegacy>, crate::Error> { unimplemented!() } fn height_range_to_pmmr_indices( &self, _start_height: u64, _end_height: Option<u64>, ) -> Result<(u64, u64), crate::Error> { unimplemented!() } fn get_blocks_by_height( &self, _start_height: u64, _end_height: u64, _threads_number: usize, ) -> Result<Vec<grin_api::BlockPrintable>, crate::Error> { unimplemented!() } fn reset_cache(&self) { unimplemented!() } fn post_tx(&self, tx: &Transaction, _fluff: bool) -> Result<(), crate::Error> { tx.validate( Weighting::AsTransaction, Arc::new(RwLock::new(LruVerifierCache::new())), ) .map_err(|e| crate::ErrorKind::Node(format!("Node failure, {}", e)))?; let mut state = self.state.lock(); for input in tx.inputs_committed() { // Output not unspent if !state.outputs.contains_key(&input) { return Err(crate::ErrorKind::Node("Node failure".to_string()).into()); } // Double spend attempt for tx_pending in state.pending.iter() { for in_pending in tx_pending.inputs_committed() { if in_pending == input { return Err(crate::ErrorKind::Node("Node failure".to_string()).into()); } } } } // Check for duplicate output for output in tx.outputs_committed() { if state.outputs.contains_key(&output) { return Err(crate::ErrorKind::Node("Node failure".to_string()).into()); } for tx_pending in state.pending.iter() { for out_pending in tx_pending.outputs_committed() { if out_pending == output { return Err(crate::ErrorKind::Node("Node failure".to_string()).into()); } } } } // Check for duplicate kernel for kernel in tx.kernels() { // Duplicate kernel if state.kernels.contains_key(&kernel.excess) { return Err(crate::ErrorKind::Node("Node failure".to_string()).into()); } for tx_pending in state.pending.iter() { for kernel_pending in tx_pending.kernels() { if kernel_pending.excess == kernel.excess { return Err(crate::ErrorKind::Node("Node failure".to_string()).into()); } } } } state.pending.push(tx.clone()); Ok(()) } fn get_version_info(&mut self) -> Option<crate::NodeVersionInfo> { unimplemented!() } fn get_outputs_from_node( &self, wallet_outputs: &Vec<Commitment>, ) -> Result<HashMap<Commitment, (String, u64, u64)>, crate::Error> { let mut map = HashMap::new(); let state = self.state.lock(); for output in wallet_outputs { if let Some(height) = state.outputs.get(&output) { map.insert(output.clone(), (to_hex(&output.0), *height, 0)); } } Ok(map) } fn get_outputs_by_pmmr_index( &self, _start_height: u64, _end_height: Option<u64>, _max_outputs: u64, ) -> Result<(u64, u64, Vec<(Commitment, RangeProof, bool, u64, u64)>), crate::Error> { unimplemented!() } fn get_kernel( &self, excess: &Commitment, _min_height: Option<u64>, _max_height: Option<u64>, ) -> Result<Option<(TxKernel, u64, u64)>, crate::Error> { let state = self.state.lock(); let res = state .kernels .get(excess) .map(|(kernel, height)| (kernel.clone(), *height, 0)); Ok(res) } } #[test] #[serial] fn test_refund_tx_lock() { set_test_mode(true); global::set_local_chain_type(global::ChainTypes::Floonet); swap::set_testing_cur_time(1567632152); let kc_sell = keychain(1); let ctx_sell = context_sell(&kc_sell); let secondary_redeem_address = btc_address(&kc_sell); let height = 100_000; let mut api_sell = BtcSwapApi::new_test( Arc::new(TestNodeClient::new(height)), Arc::new(Mutex::new(TestBtcNodeClient::new(1))), ); let mut swap = api_sell .create_swap_offer( &kc_sell, &ctx_sell, 100 * GRIN_UNIT, 3_000_000, Currency::Btc, secondary_redeem_address, true, // mwc should be publisher first 30, 3, 3600, 3600, "file".to_string(), "/tmp/del.me".to_string(), None, None, ) .unwrap(); let mut fsm_sell = api_sell.get_fsm(&kc_sell, &swap); let tx_state = api_sell .request_tx_confirmations(&kc_sell, &mut swap) .unwrap(); let message = match fsm_sell .process(Input::Check, &mut swap, &ctx_sell, &tx_state) .unwrap() .action .unwrap() { Action::SellerSendOfferMessage(message) => message, _ => panic!("Unexpected action"), }; // Simulate short refund lock time by passing height+4h let kc_buy = keychain(2); let ctx_buy = context_buy(&kc_buy); let nc = TestNodeClient::new(height + 12 * 60); let (id, offer, secondary_update) = message.unwrap_offer().unwrap(); let res = BuyApi::accept_swap_offer(&kc_buy, &ctx_buy, id, offer, secondary_update, &nc); assert_eq!( res.err().unwrap(), ErrorKind::InvalidMessageData( "Lock Slate inputs are not found at the chain".to_string() ) ); // Swap cannot be accepted } // Because of gonden output new line symbol we skipping Windows. #[cfg(not(target_os = "windows"))] #[test] #[serial] fn test_btc_swap() { set_test_mode(true); swap::set_testing_cur_time(1567632152); global::set_local_chain_type(ChainTypes::Floonet); let write_json = false; let kc_sell = keychain(1); let ctx_sell = context_sell(&kc_sell); let secondary_redeem_address = btc_address(&kc_sell); let nc = TestNodeClient::new(300_000); let btc_nc = TestBtcNodeClient::new(500_000); let amount = 100 * GRIN_UNIT; let btc_amount_1 = 2_000_000; let btc_amount_2 = 1_000_000; let btc_amount = btc_amount_1 + btc_amount_2; // When test was stored: Utc.ymd(2019, 9, 4).and_hms_micro(21, 22, 32, 581245) // Seller: create swap offer let mut api_sell = BtcSwapApi::new_test(Arc::new(nc.clone()), Arc::new(Mutex::new(btc_nc.clone()))); let mut swap_sell = api_sell .create_swap_offer( &kc_sell, &ctx_sell, amount, btc_amount, Currency::Btc, secondary_redeem_address, true, // lock MWC first 30, 6, 3600, 3600, "file".to_string(), "/tmp/del.me".to_string(), None, None, ) .unwrap(); let mut fsm_sell = api_sell.get_fsm(&kc_sell, &swap_sell); let tx_conf = &api_sell .request_tx_confirmations(&kc_sell, &swap_sell) .unwrap(); let sell_resp = fsm_sell .process(Input::Check, &mut swap_sell, &ctx_sell, tx_conf) .unwrap(); assert_eq!(swap_sell.state, StateId::SellerSendingOffer); let message_1: Message = match sell_resp.action.unwrap() { Action::SellerSendOfferMessage(message) => message, _ => panic!("Unexpected action"), }; let tx_conf = api_sell .request_tx_confirmations(&kc_sell, &swap_sell) .unwrap(); let sell_resp = fsm_sell .process(Input::Execute, &mut swap_sell, &ctx_sell, &tx_conf) .unwrap(); assert_eq!( sell_resp.action.unwrap().get_id_str(), "SellerWaitForOfferMessage" ); assert_eq!(swap_sell.state, StateId::SellerWaitingForAcceptanceMessage); if write_json { write( "swap_test/swap_sell_1.json", serde_json::to_string_pretty(&swap_sell).unwrap(), ) .unwrap(); write( "swap_test/message_1.json", serde_json::to_string_pretty(&message_1).unwrap(), ) .unwrap(); write( "swap_test/context_sell.json", serde_json::to_string_pretty(&ctx_sell).unwrap(), ) .unwrap(); } else { assert_eq!( read_to_string("swap_test/swap_sell_1.json").unwrap(), serde_json::to_string_pretty(&swap_sell).unwrap() ); assert_eq!( read_to_string("swap_test/message_1.json").unwrap(), serde_json::to_string_pretty(&message_1).unwrap() ); assert_eq!( read_to_string("swap_test/context_sell.json").unwrap(), serde_json::to_string_pretty(&ctx_sell).unwrap() ); } // Add inputs to utxo set nc.mine_blocks(2); for input in swap_sell.lock_slate.tx.inputs_committed() { nc.push_output(input); } let kc_buy = keychain(2); let ctx_buy = context_buy(&kc_buy); // Buyer: accept swap offer let api_buy = BtcSwapApi::new_test(Arc::new(nc.clone()), Arc::new(Mutex::new(btc_nc.clone()))); let (id, offer, secondary_update) = message_1.unwrap_offer().unwrap(); let mut swap_buy = BuyApi::accept_swap_offer(&kc_buy, &ctx_buy, id, offer, secondary_update, &nc).unwrap(); let mut fsm_buy = api_buy.get_fsm(&kc_buy, &swap_buy); let tx_conf = api_buy .request_tx_confirmations(&kc_buy, &swap_buy) .unwrap(); let buy_resp = fsm_buy .process(Input::Check, &mut swap_buy, &ctx_buy, &tx_conf) .unwrap(); assert_eq!(swap_buy.state, StateId::BuyerSendingAcceptOfferMessage); let message_2 = match buy_resp.action.unwrap() { Action::BuyerSendAcceptOfferMessage(message) => message, _ => panic!("Unexpected action"), }; let tx_conf = api_buy .request_tx_confirmations(&kc_buy, &swap_buy) .unwrap(); let buy_resp = fsm_buy .process(Input::Execute, &mut swap_buy, &ctx_buy, &tx_conf) .unwrap(); assert_eq!(swap_buy.state, StateId::BuyerWaitingForSellerToLock); // Expected to wait for the Seller to deposit MWC and wait for 1 block match buy_resp.action.unwrap() { Action::WaitForMwcConfirmations { name: _, required, actual, } => { assert_eq!(required, 1); assert_eq!(actual, 0); } _ => panic!("Invalid action"), } // !!!!!!!!!!!!!!!!!!!!!! // Here we are changing lock order because we want to keep tests original. Waiting case is covered, can go normally swap_buy.seller_lock_first = false; swap_sell.seller_lock_first = true; let tx_conf = api_buy .request_tx_confirmations(&kc_buy, &swap_buy) .unwrap(); let buy_resp = fsm_buy .process(Input::Check, &mut swap_buy, &ctx_buy, &tx_conf) .unwrap(); assert_eq!( swap_buy.state, StateId::BuyerPostingSecondaryToMultisigAccount ); // Buyer: should deposit bitcoin let address = match buy_resp.action.unwrap() { Action::DepositSecondary { currency: _, amount, address, } => { assert_eq!(amount, btc_amount); address } _ => panic!("Invalid action"), }; let address = Address::from_str(&address).unwrap(); // Buyer: first deposit let tx_1 = BtcTransaction { version: 2, lock_time: 0, input: vec![], output: vec![TxOut { value: btc_amount_1, script_pubkey: address.script_pubkey(), }], }; let txid_1 = tx_1.txid(); btc_nc.push_transaction(&tx_1); let tx_conf = api_buy .request_tx_confirmations(&kc_buy, &swap_buy) .unwrap(); let buy_resp = fsm_buy .process(Input::Check, &mut swap_buy, &ctx_buy, &tx_conf) .unwrap(); assert_eq!( swap_buy.state, StateId::BuyerPostingSecondaryToMultisigAccount ); match buy_resp.action.unwrap() { Action::DepositSecondary { currency: _, amount, address: _, } => assert_eq!(amount, btc_amount_2), _ => panic!("Invalid action"), }; // Buyer: second deposit btc_nc.mine_blocks(2); let tx_2 = BtcTransaction { version: 2, lock_time: 0, input: vec![], output: vec![TxOut { value: btc_amount_2, script_pubkey: address.script_pubkey(), }], }; let txid_2 = tx_2.txid(); btc_nc.push_transaction(&tx_2); let tx_conf = api_buy .request_tx_confirmations(&kc_buy, &swap_buy) .unwrap(); let buy_resp = fsm_buy .process(Input::Check, &mut swap_buy, &ctx_buy, &tx_conf) .unwrap(); assert_eq!(swap_buy.state, StateId::BuyerWaitingForLockConfirmations); match buy_resp.action.unwrap() { Action::WaitForLockConfirmations { mwc_required: _, mwc_actual: _, currency: _, sec_expected_to_be_posted: _, sec_required: _, sec_actual: actual, } => assert_eq!(actual, Some(1)), _ => panic!("Invalid action"), }; btc_nc.mine_blocks(5); // Buyer: wait for Grin confirmations let tx_conf = api_buy .request_tx_confirmations(&kc_buy, &swap_buy) .unwrap(); let buy_resp = fsm_buy .process(Input::Check, &mut swap_buy, &ctx_buy, &tx_conf) .unwrap(); assert_eq!(swap_buy.state, StateId::BuyerWaitingForLockConfirmations); match buy_resp.action.unwrap() { Action::WaitForLockConfirmations { mwc_required: _, mwc_actual: actual, currency: _, sec_expected_to_be_posted: _, sec_required: _, sec_actual: _, } => assert_eq!(actual, 0), _ => panic!("Invalid action"), }; // Check if buyer has correct confirmed outputs { let script = api_buy.script(&swap_buy).unwrap(); let (pending_amount, confirmed_amount, _, conf_outputs) = api_buy.btc_balance(&swap_buy, &script, 1).unwrap(); assert_eq!(pending_amount, 0); assert_eq!(confirmed_amount, btc_amount_1 + btc_amount_2); assert_eq!(conf_outputs.len(), 2); let mut match_1 = 0; let mut match_2 = 0; for output in &conf_outputs { if output.out_point.txid == txid_1 { match_1 += 1; } if output.out_point.txid == txid_2 { match_2 += 1; } } assert_eq!(match_1, 1); assert_eq!(match_2, 1); } if write_json { write( "swap_test/swap_buy_1.json", serde_json::to_string_pretty(&swap_buy).unwrap(), ) .unwrap(); write( "swap_test/message_2.json", serde_json::to_string_pretty(&message_2).unwrap(), ) .unwrap(); write( "swap_test/context_buy.json", serde_json::to_string_pretty(&ctx_buy).unwrap(), ) .unwrap(); } else { assert_eq!( read_to_string("swap_test/swap_buy_1.json").unwrap(), serde_json::to_string_pretty(&swap_buy).unwrap() ); assert_eq!( read_to_string("swap_test/message_2.json").unwrap(), serde_json::to_string_pretty(&message_2).unwrap() ); assert_eq!( read_to_string("swap_test/context_buy.json").unwrap(), serde_json::to_string_pretty(&ctx_buy).unwrap() ); } // Seller: receive accepted offer assert_eq!(swap_sell.state, StateId::SellerWaitingForAcceptanceMessage); let tx_conf = api_sell .request_tx_confirmations(&kc_sell, &swap_sell) .unwrap(); let sell_resp = fsm_sell .process( Input::IncomeMessage(message_2), &mut swap_sell, &ctx_sell, &tx_conf, ) .unwrap(); assert_eq!( sell_resp.action.unwrap().get_id_str(), "SellerPublishMwcLockTx" ); assert_eq!(swap_sell.state, StateId::SellerPostingLockMwcSlate); let tx_conf = api_sell .request_tx_confirmations(&kc_sell, &swap_sell) .unwrap(); let sell_resp = fsm_sell .process(Input::Execute, &mut swap_sell, &ctx_sell, &tx_conf) .unwrap(); assert_eq!(swap_sell.state, StateId::SellerWaitingForLockConfirmations); match sell_resp.action.unwrap() { Action::WaitForLockConfirmations { mwc_required: required, mwc_actual: actual, currency: _, sec_expected_to_be_posted: _, sec_required: _, sec_actual: _, } => { assert_eq!(required, 30); assert_eq!(actual, 0) } _ => panic!("Invalid action"), } if write_json { write( "swap_test/swap_sell_2.json", serde_json::to_string_pretty(&swap_sell).unwrap(), ) .unwrap(); } else { assert_eq!( read_to_string("swap_test/swap_sell_2.json").unwrap(), serde_json::to_string_pretty(&swap_sell).unwrap() ); } // Seller: wait for Grin confirmations nc.mine_blocks(10); let tx_conf = api_sell .request_tx_confirmations(&kc_sell, &swap_sell) .unwrap(); let sell_resp = fsm_sell .process(Input::Check, &mut swap_sell, &ctx_sell, &tx_conf) .unwrap(); assert_eq!(swap_sell.state, StateId::SellerWaitingForLockConfirmations); match sell_resp.action.unwrap() { Action::WaitForLockConfirmations { mwc_required: required, mwc_actual: actual, currency: _, sec_expected_to_be_posted: _, sec_required: _, sec_actual: _, } => { assert_eq!(required, 30); assert_eq!(actual, 10) } _ => panic!("Invalid action"), } let tx_conf = api_buy .request_tx_confirmations(&kc_buy, &swap_buy) .unwrap(); let buy_resp = fsm_buy .process(Input::Check, &mut swap_buy, &ctx_buy, &tx_conf) .unwrap(); assert_eq!(swap_buy.state, StateId::BuyerWaitingForLockConfirmations); match buy_resp.action.unwrap() { Action::WaitForLockConfirmations { mwc_required: required, mwc_actual: actual, currency: _, sec_expected_to_be_posted: _, sec_required: _, sec_actual: _, } => { assert_eq!(required, 30); assert_eq!(actual, 10) } _ => panic!("Invalid action"), } // Undo a BTC block to test seller { let mut state = btc_nc.state.lock(); state.height -= 1; } // Seller: wait BTC confirmations nc.mine_blocks(20); let tx_conf = api_sell .request_tx_confirmations(&kc_sell, &swap_sell) .unwrap(); let sell_resp = fsm_sell .process(Input::Check, &mut swap_sell, &ctx_sell, &tx_conf) .unwrap(); assert_eq!(swap_sell.state, StateId::SellerWaitingForLockConfirmations); match sell_resp.action.unwrap() { Action::WaitForLockConfirmations { mwc_required: _, mwc_actual: _, currency: _, sec_expected_to_be_posted: _, sec_required: required, sec_actual: actual, } => { assert_eq!(required, 6); assert_eq!(actual, Some(5)) } _ => panic!("Invalid action"), } btc_nc.mine_block(); if write_json { write( "swap_test/swap_sell_3.json", serde_json::to_string_pretty(&swap_sell).unwrap(), ) .unwrap(); } else { assert_eq!( read_to_string("swap_test/swap_sell_3.json").unwrap(), serde_json::to_string_pretty(&swap_sell).unwrap() ); } // Checking if both seller & Buyer are moved to the redeem message exchange step let tx_conf = api_sell .request_tx_confirmations(&kc_sell, &swap_sell) .unwrap(); let sell_resp = fsm_sell .process(Input::Check, &mut swap_sell, &ctx_sell, &tx_conf) .unwrap(); let tx_conf = api_buy .request_tx_confirmations(&kc_buy, &swap_buy) .unwrap(); let buy_resp = fsm_buy .process(Input::Check, &mut swap_buy, &ctx_buy, &tx_conf) .unwrap(); assert_eq!(swap_sell.state, StateId::SellerWaitingForInitRedeemMessage); assert_eq!(swap_buy.state, StateId::BuyerSendingInitRedeemMessage); assert_eq!( sell_resp.action.unwrap().get_id_str(), "SellerWaitingForInitRedeemMessage" ); let message_3 = match buy_resp.action.unwrap() { Action::BuyerSendInitRedeemMessage(message) => message, _ => panic!("Unexpected action"), }; let tx_conf = api_buy .request_tx_confirmations(&kc_buy, &swap_buy) .unwrap(); fsm_buy .process(Input::Execute, &mut swap_buy, &ctx_buy, &tx_conf) .unwrap(); assert_eq!(swap_buy.state, StateId::BuyerWaitingForRespondRedeemMessage); if write_json { write( "swap_test/swap_buy_2.json", serde_json::to_string_pretty(&swap_buy).unwrap(), ) .unwrap(); write( "swap_test/message_3.json", serde_json::to_string_pretty(&message_3).unwrap(), ) .unwrap(); } else { assert_eq!( read_to_string("swap_test/swap_buy_2.json").unwrap(), serde_json::to_string_pretty(&swap_buy).unwrap() ); assert_eq!( read_to_string("swap_test/message_3.json").unwrap(), serde_json::to_string_pretty(&message_3).unwrap() ); } // Seller: sign redeem let tx_conf = api_sell .request_tx_confirmations(&kc_sell, &swap_sell) .unwrap(); let sell_resp = fsm_sell .process(Input::Check, &mut swap_sell, &ctx_sell, &tx_conf) .unwrap(); assert_eq!(swap_sell.state, StateId::SellerWaitingForInitRedeemMessage); assert_eq!( sell_resp.action.unwrap().get_id_str(), "SellerWaitingForInitRedeemMessage" ); let tx_conf = api_sell .request_tx_confirmations(&kc_sell, &swap_sell) .unwrap(); let sell_resp = fsm_sell .process( Input::IncomeMessage(message_3), &mut swap_sell, &ctx_sell, &tx_conf, ) .unwrap(); assert_eq!(swap_sell.state, StateId::SellerSendingInitRedeemMessage); let message_4 = match sell_resp.action.unwrap() { Action::SellerSendRedeemMessage(message) => message, _ => panic!("Unexpected action"), }; let tx_conf = api_sell .request_tx_confirmations(&kc_sell, &swap_sell) .unwrap(); let sell_resp = fsm_sell .process(Input::Execute, &mut swap_sell, &ctx_sell, &tx_conf) .unwrap(); // Seller: wait for buyer's on-chain redeem tx assert_eq!(swap_sell.state, StateId::SellerWaitingForBuyerToRedeemMwc); assert_eq!( sell_resp.action.unwrap().get_id_str(), "SellerWaitForBuyerRedeemPublish" ); if write_json { write( "swap_test/swap_sell_4.json", serde_json::to_string_pretty(&swap_sell).unwrap(), ) .unwrap(); write( "swap_test/message_4.json", serde_json::to_string_pretty(&message_4).unwrap(), ) .unwrap(); } else { assert_eq!( read_to_string("swap_test/swap_sell_4.json").unwrap(), serde_json::to_string_pretty(&swap_sell).unwrap() ); assert_eq!( read_to_string("swap_test/message_4.json").unwrap(), serde_json::to_string_pretty(&message_4).unwrap() ); } // Buyer: redeem let tx_conf = api_buy .request_tx_confirmations(&kc_buy, &swap_buy) .unwrap(); let buy_resp = fsm_buy .process(Input::Check, &mut swap_buy, &ctx_buy, &tx_conf) .unwrap(); assert_eq!(swap_buy.state, StateId::BuyerWaitingForRespondRedeemMessage); assert_eq!( buy_resp.action.unwrap().get_id_str(), "BuyerWaitingForRedeemMessage" ); let tx_conf = api_buy .request_tx_confirmations(&kc_buy, &swap_buy) .unwrap(); let buy_resp = fsm_buy .process( Input::IncomeMessage(message_4), &mut swap_buy, &ctx_buy, &tx_conf, ) .unwrap(); assert_eq!(swap_buy.state, StateId::BuyerRedeemMwc); assert_eq!( buy_resp.action.unwrap().get_id_str(), "BuyerPublishMwcRedeemTx" ); let tx_conf = &api_buy .request_tx_confirmations(&kc_buy, &swap_buy) .unwrap(); let buy_resp = fsm_buy .process(Input::Execute, &mut swap_buy, &ctx_buy, &tx_conf) .unwrap(); assert_eq!(swap_buy.state, StateId::BuyerWaitForRedeemMwcConfirmations); assert_eq!( buy_resp.action.unwrap().get_id_str(), "WaitForMwcConfirmations" ); // Buyer: almost done, just need to wait for confirmations nc.mine_block(); let tx_conf = api_buy .request_tx_confirmations(&kc_buy, &swap_buy) .unwrap(); let buy_resp = fsm_buy .process(Input::Check, &mut swap_buy, &ctx_buy, &tx_conf) .unwrap(); assert_eq!(swap_buy.state, StateId::BuyerWaitForRedeemMwcConfirmations); match buy_resp.action.unwrap() { Action::WaitForMwcConfirmations { name: _, required, actual, } => { assert_eq!(actual, 1); assert_eq!(required, 30); } _ => panic!("Invalid action"), } // At this point, buyer would add Grin to their outputs // Now seller can redeem BTC if write_json { write( "swap_test/swap_buy_3.json", serde_json::to_string_pretty(&swap_buy).unwrap(), ) .unwrap(); } else { assert_eq!( read_to_string("swap_test/swap_buy_3.json").unwrap(), serde_json::to_string_pretty(&swap_buy).unwrap() ); } // Seller: publish BTC tx let tx_conf = api_sell .request_tx_confirmations(&kc_sell, &swap_sell) .unwrap(); let sell_resp = fsm_sell .process(Input::Check, &mut swap_sell, &ctx_sell, &tx_conf) .unwrap(); assert_eq!(swap_sell.state, StateId::SellerRedeemSecondaryCurrency); assert_eq!( sell_resp.action.unwrap().get_id_str(), "SellerPublishTxSecondaryRedeem" ); if write_json { write( "swap_test/swap_sell_5.json", serde_json::to_string_pretty(&swap_sell).unwrap(), ) .unwrap(); } else { assert_eq!( read_to_string("swap_test/swap_sell_5.json").unwrap(), serde_json::to_string_pretty(&swap_sell).unwrap() ); } // Seller: publishing and wait for BTC confirmations let tx_conf = api_sell .request_tx_confirmations(&kc_sell, &swap_sell) .unwrap(); let sell_resp = fsm_sell .process(Input::Execute, &mut swap_sell, &ctx_sell, &tx_conf) .unwrap(); assert_eq!( swap_sell.state, StateId::SellerWaitingForRedeemConfirmations ); match sell_resp.action.unwrap() { Action::WaitForSecondaryConfirmations { name: _, expected_to_be_posted: _, currency: _, required, actual, } => { assert_eq!(required, 6); assert_eq!(actual, 0) } _ => panic!("Invalid action"), } btc_nc.mine_block(); // still waiting let tx_conf = api_sell .request_tx_confirmations(&kc_sell, &swap_sell) .unwrap(); let sell_resp = fsm_sell .process(Input::Check, &mut swap_sell, &ctx_sell, &tx_conf) .unwrap(); assert_eq!( swap_sell.state, StateId::SellerWaitingForRedeemConfirmations ); match sell_resp.action.unwrap() { Action::WaitForSecondaryConfirmations { name: _, expected_to_be_posted: _, currency: _, required, actual, } => { assert_eq!(required, 6); assert_eq!(actual, 1) } _ => panic!("Invalid action"), } // Let's mine more blocks, so both Buyer and Seller will come to complete state nc.mine_blocks(30); btc_nc.mine_blocks(6); let tx_conf = api_sell .request_tx_confirmations(&kc_sell, &swap_sell) .unwrap(); let sell_resp = fsm_sell .process(Input::Check, &mut swap_sell, &ctx_sell, &tx_conf) .unwrap(); let tx_conf = &api_buy .request_tx_confirmations(&kc_buy, &swap_buy) .unwrap(); let buy_resp = fsm_buy .process(Input::Check, &mut swap_buy, &ctx_buy, &tx_conf) .unwrap(); // Seller & Buyer: complete! assert_eq!(swap_sell.state, StateId::SellerSwapComplete); assert_eq!(swap_buy.state, StateId::BuyerSwapComplete); assert!(sell_resp.action.is_none()); assert!(buy_resp.action.is_none()); if write_json { write( "swap_test/swap_sell_6.json", serde_json::to_string_pretty(&swap_sell).unwrap(), ) .unwrap(); } else { assert_eq!( read_to_string("swap_test/swap_sell_6.json").unwrap(), serde_json::to_string_pretty(&swap_sell).unwrap() ); } assert!(!write_json, "json files written"); } // Because of gonden output new line symbol we skipping Windows. #[cfg(not(target_os = "windows"))] #[test] #[serial] fn test_swap_serde() { global::set_local_chain_type(global::ChainTypes::Floonet); // Seller context let ctx_sell_str = read_to_string("swap_test/context_sell.json").unwrap(); let ctx_sell: Context = serde_json::from_str(&ctx_sell_str).unwrap(); assert_eq!( serde_json::to_string_pretty(&ctx_sell).unwrap(), ctx_sell_str ); // Buyer context let ctx_buy_str = read_to_string("swap_test/context_buy.json").unwrap(); let ctx_buy: Context = serde_json::from_str(&ctx_buy_str).unwrap(); assert_eq!(serde_json::to_string_pretty(&ctx_buy).unwrap(), ctx_buy_str); // Seller's swap state in different stages for i in 0..6 { println!("TRY SELL {}", i); let swap_str = read_to_string(format!("swap_test/swap_sell_{}.json", i + 1)).unwrap(); let swap: Swap = serde_json::from_str(&swap_str).unwrap(); assert_eq!(serde_json::to_string_pretty(&swap).unwrap(), swap_str); println!("OK SELL {}", i); } // Buyer's swap state in different stages for i in 0..3 { println!("TRY BUY {}", i); let swap_str = read_to_string(format!("swap_test/swap_buy_{}.json", i + 1)).unwrap(); let swap: Swap = serde_json::from_str(&swap_str).unwrap(); assert_eq!(serde_json::to_string_pretty(&swap).unwrap(), swap_str); println!("OK BUY {}", i); } // Messages for i in 0..4 { println!("TRY MSG {}", i); let message_str = read_to_string(format!("swap_test/message_{}.json", i + 1)).unwrap(); let message: Message = serde_json::from_str(&message_str).unwrap(); assert_eq!(serde_json::to_string_pretty(&message).unwrap(), message_str); println!("OK MSG {}", i); } } // test_swap_fsm timimg config. Constans will be used to validate the timing limits. const START_TIME: i64 = 1568000000; const MWC_CONFIRMATION: u64 = 30; const BTC_CONFIRMATION: u64 = 6; const MSG_EXCHANGE_TIME: i64 = 3600; const REDEEM_TIME: i64 = 3600; pub struct Trader<'a> { api: &'a BtcSwapApi<'a, TestNodeClient, TestBtcNodeClient>, pub swap: Swap, fsm: StateMachine<'a>, pub kc: ExtKeychain, ctx: Context, swap_stack: Vec<(Swap, TestNodeClientState, TestBtcNodeClientState)>, } impl<'a> Trader<'a> { pub fn process(&mut self, input: Input) -> Result<StateProcessRespond, ErrorKind> { let tx_conf = self.api.request_tx_confirmations(&self.kc, &self.swap)?; self.fsm.process(input, &mut self.swap, &self.ctx, &tx_conf) } pub fn _get_tx_conf(&self) -> Result<SwapTransactionsConfirmations, ErrorKind> { self.api.request_tx_confirmations(&self.kc, &self.swap) } pub fn is_cancellable(&self) -> bool { self.fsm.is_cancellable(&self.swap).unwrap() } pub fn pushs(&mut self) { self.swap_stack.push(( self.swap.clone(), self.api.node_client.get_state(), self.api.btc_node_client1.lock().get_state(), )); } pub fn pops(&mut self) { let (swap, nc_state, bnc_state) = self.swap_stack.pop().unwrap(); self.swap = swap; self.api.node_client.set_state(&nc_state); self.api.btc_node_client1.lock().set_state(&bnc_state); } } // return time2pass, time2fail fn calc_time_to_test( timeout1: &Option<(i64, i64)>, timeout2: &Option<(i64, i64)>, ) -> (Vec<i64>, Vec<i64>) { let (t, t2) = timeout1.clone().unwrap_or(timeout2.unwrap_or((-1, -1))); if t > 0 { assert!(swap::get_cur_time() < t); if t2 < 0 { ( vec![swap::get_cur_time(), (swap::get_cur_time() + t) / 2, t - 1], vec![ t + 1, t + MSG_EXCHANGE_TIME / 2, t + MSG_EXCHANGE_TIME, swap::get_cur_time() + 1000000000, ], ) } else { assert!(t < t2); ( vec![swap::get_cur_time(), (swap::get_cur_time() + t) / 2, t - 1], vec![t + 1, (t + t2) / 2, t2 - 1], ) } } else { if t2 < 0 { ( vec![ swap::get_cur_time(), swap::get_cur_time() + MSG_EXCHANGE_TIME, swap::get_cur_time() + 1000000000, ], vec![], ) } else { assert!(swap::get_cur_time() < t2); (vec![t2, t2 + MSG_EXCHANGE_TIME, t2 + 1000000000], vec![]) } } } // Test all possible responds (covereage for all inputs and with timeouts ) fn test_responds( trader: &mut Trader, expected_starting_state: StateId, timeout: Option<(i64, i64)>, // timeout if possible cancel_expected_state: Option<StateId>, check_before_expected_state: StateId, // Expected state before timeput check_after_expected_state: StateId, // Expected state after timeout timeout_execute: Option<(i64, i64)>, // timeout for execute. Might be different becaus of switching to the next stage. If none, timeout will be used execute_before_expected_state: Option<StateId>, // Expected state before timeput execute_after_expected_state: Option<StateId>, // Expected state after timeout message: Option<Message>, // Acceptable message message_before_expected_state: Option<StateId>, message_after_expected_state: Option<StateId>, ) { // Checking the timeout assert_eq!(trader.swap.state, expected_starting_state); if !is_test_response() { return; } let (time2pass, time2fail) = calc_time_to_test(&timeout, &None); let mut time_all = time2pass.clone(); time_all.extend(time2fail.iter().copied()); let start_time = swap::get_cur_time(); // Checking what Cancel does for t in &time_all { trader.pushs(); swap::set_testing_cur_time(*t); if cancel_expected_state.is_some() { let _sr = trader.process(Input::Cancel).unwrap(); assert_eq!(trader.swap.state, cancel_expected_state.clone().unwrap()); } else { assert_eq!(trader.is_cancellable(), false); let sr = trader.process(Input::Cancel); assert!(sr.is_err(), true); } trader.pops(); } // Check Inputs for t in &time2pass { trader.pushs(); swap::set_testing_cur_time(*t); let _sr = trader.process(Input::Check).unwrap(); assert_eq!(trader.swap.state, check_before_expected_state); trader.pops(); } for t in &time2fail { trader.pushs(); swap::set_testing_cur_time(*t); let _sr = trader.process(Input::Check).unwrap(); assert_eq!(trader.swap.state, check_after_expected_state); trader.pops(); } // Restore original time first swap::set_testing_cur_time(start_time); let (time2pass, time2fail) = calc_time_to_test(&timeout_execute, &timeout); // Execute for t in &time2pass { trader.pushs(); swap::set_testing_cur_time(*t); if execute_before_expected_state.is_some() { let _sr = trader.process(Input::Execute).unwrap(); assert_eq!( trader.swap.state, execute_before_expected_state.clone().unwrap() ); } else { let sr = trader.process(Input::Execute); assert_eq!(sr.is_err(), true); } trader.pops(); } for t in &time2fail { trader.pushs(); swap::set_testing_cur_time(*t); if execute_after_expected_state.is_some() { let _sr = trader.process(Input::Execute).unwrap(); assert_eq!( trader.swap.state, execute_after_expected_state.clone().unwrap() ); } else { let sr = trader.process(Input::Execute); assert_eq!(sr.is_err(), true); } trader.pops(); } // IncomeMessage for t in &time2pass { trader.pushs(); swap::set_testing_cur_time(*t); let message = Input::IncomeMessage(message.clone().unwrap_or(Message::new( trader.swap.id.clone(), Update::None, SecondaryUpdate::Empty, ))); if message_before_expected_state.is_some() { let _sr = trader.process(message).unwrap(); assert_eq!( trader.swap.state, message_before_expected_state.clone().unwrap() ); } else { let sr = trader.process(message); assert_eq!(sr.is_err(), true); } trader.pops(); } for t in &time2fail { trader.pushs(); swap::set_testing_cur_time(*t); let message = Input::IncomeMessage(message.clone().unwrap_or(Message::new( trader.swap.id.clone(), Update::None, SecondaryUpdate::Empty, ))); if message_after_expected_state.is_some() { let _sr = trader.process(message).unwrap(); assert_eq!( trader.swap.state, message_after_expected_state.clone().unwrap() ); } else { let sr = trader.process(message); assert_eq!(sr.is_err(), true); } trader.pops(); } // Restore original time swap::set_testing_cur_time(start_time); } #[test] #[serial] // The primary goal for this test is to cover all code path for edge cases fn test_swap_fsm() { activate_test_response(true); set_test_mode(true); swap::set_testing_cur_time(START_TIME); global::set_local_chain_type(ChainTypes::Floonet); let nc = TestNodeClient::new(300_000); let btc_nc = TestBtcNodeClient::new(500_000); let amount = 100 * GRIN_UNIT; let btc_amount_1 = 2_000_000; let btc_amount_2 = 1_000_000; let btc_amount_plus = 10_000; let btc_amount = btc_amount_1 + btc_amount_2; let mut api_sell = BtcSwapApi::new_test(Arc::new(nc.clone()), Arc::new(Mutex::new(btc_nc.clone()))); let kc_sell = keychain(1); let ctx_sell = context_sell(&kc_sell); let mut seller = { let secondary_redeem_address = btc_address(&kc_sell); let swap_sell = api_sell .create_swap_offer( &kc_sell, &ctx_sell, amount, btc_amount, Currency::Btc, secondary_redeem_address, true, // lock MWC first MWC_CONFIRMATION, BTC_CONFIRMATION, MSG_EXCHANGE_TIME as u64, REDEEM_TIME as u64, "file".to_string(), "/tmp/del.me".to_string(), None, None, ) .unwrap(); let fsm_sell = api_sell.get_fsm(&kc_sell, &swap_sell); // Seller: create swap offer Trader { api: &api_sell, swap: swap_sell, fsm: fsm_sell, kc: kc_sell, ctx: ctx_sell, swap_stack: Vec::new(), } }; // Initial state test. test_responds( &mut seller, StateId::SellerOfferCreated, Some((START_TIME + MSG_EXCHANGE_TIME, -1)), // timeout if possible Some(StateId::SellerCancelled), StateId::SellerSendingOffer, // Expected state before timeput StateId::SellerCancelled, // Expected state after timeout None, None, // Expected state before timeput None, // Expected state after timeout None, // Acceptable message None, None, ); // Go to the next step swap::set_testing_cur_time(START_TIME + 20); let res = seller.process(Input::Check).unwrap(); assert_eq!(seller.swap.state, StateId::SellerSendingOffer); assert_eq!( res.time_limit.clone().unwrap(), START_TIME + MSG_EXCHANGE_TIME ); assert_eq!(res.next_state_id, seller.swap.state); let message1 = match res.action.unwrap() { Action::SellerSendOfferMessage(m) => m, _ => panic!("Unexpected action"), }; // SellerSendingOffer test_responds( &mut seller, StateId::SellerSendingOffer, Some((START_TIME + MSG_EXCHANGE_TIME, -1)), // timeout if possible Some(StateId::SellerCancelled), StateId::SellerSendingOffer, // Expected state before timeput StateId::SellerCancelled, // Expected state after timeout None, Some(StateId::SellerWaitingForAcceptanceMessage), // Expected state before timeput Some(StateId::SellerCancelled), // Expected state after timeout None, // Acceptable message None, None, ); // Seller send the message, so confirming to FSM with that let res = seller.process(Input::Execute).unwrap(); assert_eq!( seller.swap.state, StateId::SellerWaitingForAcceptanceMessage ); assert_eq!( res.time_limit.clone().unwrap(), START_TIME + MSG_EXCHANGE_TIME ); assert_eq!(res.next_state_id, seller.swap.state); assert_eq!( res.action.unwrap().get_id_str(), "SellerWaitForOfferMessage" ); // Let's test send retry logic let res = seller.process(Input::Check).unwrap(); assert_eq!( res.next_state_id, StateId::SellerWaitingForAcceptanceMessage ); let res = seller.process(Input::Check).unwrap(); assert_eq!( res.next_state_id, StateId::SellerWaitingForAcceptanceMessage ); swap::set_testing_cur_time(swap::get_cur_time() + 61 * 5); let res = seller.process(Input::Check).unwrap(); assert_eq!(res.next_state_id, StateId::SellerSendingOffer); // simulate ack that we get from the network... seller.swap.ack_msg1(); let res = seller.process(Input::Check).unwrap(); assert_eq!( res.next_state_id, StateId::SellerWaitingForAcceptanceMessage ); swap::set_testing_cur_time(swap::get_cur_time() + 61 * 5); let res = seller.process(Input::Check).unwrap(); assert_eq!( res.next_state_id, StateId::SellerWaitingForAcceptanceMessage ); // ---------------------------------------------------------------------------------------------------------- // ---------------------------------------------------------------------------------------------------------- // ---------------------------------------------------------------------------------------------------------- // Creating buyer let kc_buy = keychain(2); let ctx_buy = context_buy(&kc_buy); let api_buy = BtcSwapApi::new_test(Arc::new(nc.clone()), Arc::new(Mutex::new(btc_nc.clone()))); //////////////////////////////////////////////////////////////////// // Testing how Buyer can validate the data { // Try to create offer with no inputs on the chain let (id, offer, secondary_update) = message1.clone().unwrap_offer().unwrap(); assert_eq!( BuyApi::accept_swap_offer( &kc_buy, &ctx_buy, id, offer.clone(), secondary_update.clone(), &nc ) .is_err(), true ); } // Add inputs to utxo set nc.mine_blocks(2); for input in seller.swap.lock_slate.tx.inputs_committed() { nc.push_output(input); } { // Should be good now... let (id, offer, secondary_update) = message1.clone().unwrap_offer().unwrap(); assert_eq!( BuyApi::accept_swap_offer( &kc_buy, &ctx_buy, id, offer.clone(), secondary_update.clone(), &nc ) .is_ok(), true ); } ///////////////////////////////////////////////////////////// // -------------------- Testing cases when seller try to tweak some data { // Try to create offer with wrong times let (id, offer, secondary_update) = message1.clone().unwrap_offer().unwrap(); swap::set_testing_cur_time(START_TIME - 30); assert_eq!( BuyApi::accept_swap_offer( &kc_buy, &ctx_buy, id, offer.clone(), secondary_update.clone(), &nc ) .is_err(), true ); } // Fixing the time. Assuming it took 100 seconds to deliver the message swap::set_testing_cur_time(START_TIME + 100); { // Offer with wrong network let (id, mut offer, secondary_update) = message1.clone().unwrap_offer().unwrap(); offer.network = Network::Mainnet; assert_eq!( BuyApi::accept_swap_offer( &kc_buy, &ctx_buy, id, offer.clone(), secondary_update.clone(), &nc ) .is_err(), true ); } { // Offer lock slate has height (not important)... let (id, mut offer, secondary_update) = message1.clone().unwrap_offer().unwrap(); let mut lock_slate: Slate = offer.lock_slate.into_slate_plain().unwrap(); lock_slate.lock_height = 10; offer.lock_slate = VersionedSlate::into_version_plain(lock_slate, SlateVersion::V3).unwrap(); assert_eq!( BuyApi::accept_swap_offer( &kc_buy, &ctx_buy, id, offer.clone(), secondary_update.clone(), &nc ) .is_err(), true ); } { // Offer lock slate has height (kernel value - attack) let (id, mut offer, secondary_update) = message1.clone().unwrap_offer().unwrap(); let mut lock_slate: Slate = offer.lock_slate.into_slate_plain().unwrap(); lock_slate.tx.body.kernels[0].features = KernelFeatures::HeightLocked { fee: lock_slate.fee, lock_height: 10, }; offer.lock_slate = VersionedSlate::into_version_plain(lock_slate, SlateVersion::V3).unwrap(); assert_eq!( BuyApi::accept_swap_offer( &kc_buy, &ctx_buy, id, offer.clone(), secondary_update.clone(), &nc ) .is_err(), true ); } { // Offer lock slate has height (kernel value - attack) let (id, mut offer, secondary_update) = message1.clone().unwrap_offer().unwrap(); let mut lock_slate: Slate = offer.lock_slate.into_slate_plain().unwrap(); lock_slate.lock_height = 10; lock_slate.tx.body.kernels[0].features = KernelFeatures::HeightLocked { fee: lock_slate.fee, lock_height: 10, }; offer.lock_slate = VersionedSlate::into_version_plain(lock_slate, SlateVersion::V3).unwrap(); assert_eq!( BuyApi::accept_swap_offer( &kc_buy, &ctx_buy, id, offer.clone(), secondary_update.clone(), &nc ) .is_err(), true ); } // Trying to tweak the fees { // Offer lock slate has height let (id, mut offer, secondary_update) = message1.clone().unwrap_offer().unwrap(); let mut lock_slate: Slate = offer.lock_slate.into_slate_plain().unwrap(); lock_slate.fee += 2; offer.lock_slate = VersionedSlate::into_version_plain(lock_slate, SlateVersion::V3).unwrap(); assert_eq!( BuyApi::accept_swap_offer( &kc_buy, &ctx_buy, id, offer.clone(), secondary_update.clone(), &nc ) .is_err(), true ); } { // Offer lock slate has height let (id, mut offer, secondary_update) = message1.clone().unwrap_offer().unwrap(); let mut lock_slate: Slate = offer.lock_slate.into_slate_plain().unwrap(); lock_slate.tx.body.kernels[0].features = KernelFeatures::Plain { fee: lock_slate.fee + 1, }; offer.lock_slate = VersionedSlate::into_version_plain(lock_slate, SlateVersion::V3).unwrap(); assert_eq!( BuyApi::accept_swap_offer( &kc_buy, &ctx_buy, id, offer.clone(), secondary_update.clone(), &nc ) .is_err(), true ); } { // Offer lock slate has height let (id, mut offer, secondary_update) = message1.clone().unwrap_offer().unwrap(); let mut lock_slate: Slate = offer.lock_slate.into_slate_plain().unwrap(); lock_slate.tx.body.kernels[0].features = KernelFeatures::Plain { fee: lock_slate.fee - 1, }; offer.lock_slate = VersionedSlate::into_version_plain(lock_slate, SlateVersion::V3).unwrap(); assert_eq!( BuyApi::accept_swap_offer( &kc_buy, &ctx_buy, id, offer.clone(), secondary_update.clone(), &nc ) .is_err(), true ); } { // Offer lock slate has height let (id, mut offer, secondary_update) = message1.clone().unwrap_offer().unwrap(); let mut lock_slate: Slate = offer.lock_slate.into_slate_plain().unwrap(); lock_slate.fee += 2; lock_slate.tx.body.kernels[0].features = KernelFeatures::Plain { fee: lock_slate.fee, }; offer.lock_slate = VersionedSlate::into_version_plain(lock_slate, SlateVersion::V3).unwrap(); assert_eq!( BuyApi::accept_swap_offer( &kc_buy, &ctx_buy, id, offer.clone(), secondary_update.clone(), &nc ) .is_err(), true ); } { // No inputs at lock let (id, mut offer, secondary_update) = message1.clone().unwrap_offer().unwrap(); let mut lock_slate: Slate = offer.lock_slate.into_slate_plain().unwrap(); lock_slate.tx.body.inputs = Inputs::CommitOnly(vec![]); offer.lock_slate = VersionedSlate::into_version_plain(lock_slate, SlateVersion::V3).unwrap(); assert_eq!( BuyApi::accept_swap_offer( &kc_buy, &ctx_buy, id, offer.clone(), secondary_update.clone(), &nc ) .is_err(), true ); } { // Amounts at lock let (id, mut offer, secondary_update) = message1.clone().unwrap_offer().unwrap(); let mut lock_slate: Slate = offer.lock_slate.into_slate_plain().unwrap(); lock_slate.amount += 1; offer.lock_slate = VersionedSlate::into_version_plain(lock_slate, SlateVersion::V3).unwrap(); assert_eq!( BuyApi::accept_swap_offer( &kc_buy, &ctx_buy, id, offer.clone(), secondary_update.clone(), &nc ) .is_err(), true ); } { // Refund slate must have expected lock value let (id, mut offer, secondary_update) = message1.clone().unwrap_offer().unwrap(); let mut refund_slate: Slate = offer.refund_slate.into_slate_plain().unwrap(); refund_slate.lock_height -= 1; offer.refund_slate = VersionedSlate::into_version_plain(refund_slate, SlateVersion::V3).unwrap(); assert_eq!( BuyApi::accept_swap_offer( &kc_buy, &ctx_buy, id, offer.clone(), secondary_update.clone(), &nc ) .is_err(), true ); } { // Refund slate must have expected lock value let (id, mut offer, secondary_update) = message1.clone().unwrap_offer().unwrap(); let mut refund_slate: Slate = offer.refund_slate.into_slate_plain().unwrap(); refund_slate.lock_height = 0; offer.refund_slate = VersionedSlate::into_version_plain(refund_slate, SlateVersion::V3).unwrap(); assert_eq!( BuyApi::accept_swap_offer( &kc_buy, &ctx_buy, id, offer.clone(), secondary_update.clone(), &nc ) .is_err(), true ); } { // Refund slate must have expected lock value, tweaking kernel, adding one more plain one let (id, mut offer, secondary_update) = message1.clone().unwrap_offer().unwrap(); let mut refund_slate: Slate = offer.refund_slate.into_slate_plain().unwrap(); refund_slate.tx.body.kernels.push(TxKernel::empty()); offer.refund_slate = VersionedSlate::into_version_plain(refund_slate, SlateVersion::V3).unwrap(); assert_eq!( BuyApi::accept_swap_offer( &kc_buy, &ctx_buy, id, offer.clone(), secondary_update.clone(), &nc ) .is_err(), true ); } { // Refund slate must have expected lock value, tweaking kernel to plain let (id, mut offer, secondary_update) = message1.clone().unwrap_offer().unwrap(); let mut refund_slate: Slate = offer.refund_slate.into_slate_plain().unwrap(); refund_slate.tx.body.kernels[0].features = KernelFeatures::Plain { fee: refund_slate.fee, }; offer.refund_slate = VersionedSlate::into_version_plain(refund_slate, SlateVersion::V3).unwrap(); assert_eq!( BuyApi::accept_swap_offer( &kc_buy, &ctx_buy, id, offer.clone(), secondary_update.clone(), &nc ) .is_err(), true ); } { // Refund slate must have expected lock value, tweaking kernel's height to plain let (id, mut offer, secondary_update) = message1.clone().unwrap_offer().unwrap(); let mut refund_slate: Slate = offer.refund_slate.into_slate_plain().unwrap(); refund_slate.tx.body.kernels[0].features = KernelFeatures::HeightLocked { fee: refund_slate.fee, lock_height: refund_slate.lock_height - 1, }; offer.refund_slate = VersionedSlate::into_version_plain(refund_slate, SlateVersion::V3).unwrap(); assert_eq!( BuyApi::accept_swap_offer( &kc_buy, &ctx_buy, id, offer.clone(), secondary_update.clone(), &nc ) .is_err(), true ); } { // Refund slate must have expected lock value, tweaking kernel's height to plain let (id, mut offer, secondary_update) = message1.clone().unwrap_offer().unwrap(); let mut refund_slate: Slate = offer.refund_slate.into_slate_plain().unwrap(); refund_slate.tx.body.kernels[0].features = KernelFeatures::HeightLocked { fee: refund_slate.fee, lock_height: 0, }; offer.refund_slate = VersionedSlate::into_version_plain(refund_slate, SlateVersion::V3).unwrap(); assert_eq!( BuyApi::accept_swap_offer( &kc_buy, &ctx_buy, id, offer.clone(), secondary_update.clone(), &nc ) .is_err(), true ); } { // Refund slate must have expected lock value, tweaking kernel's height to plain let (id, mut offer, secondary_update) = message1.clone().unwrap_offer().unwrap(); let mut refund_slate: Slate = offer.refund_slate.into_slate_plain().unwrap(); refund_slate.tx.body.kernels[0].features = KernelFeatures::HeightLocked { fee: refund_slate.fee, lock_height: 1, }; offer.refund_slate = VersionedSlate::into_version_plain(refund_slate, SlateVersion::V3).unwrap(); assert_eq!( BuyApi::accept_swap_offer( &kc_buy, &ctx_buy, id, offer.clone(), secondary_update.clone(), &nc ) .is_err(), true ); } { // Refund fees let (id, mut offer, secondary_update) = message1.clone().unwrap_offer().unwrap(); let mut refund_slate: Slate = offer.refund_slate.into_slate_plain().unwrap(); refund_slate.fee += 1; offer.refund_slate = VersionedSlate::into_version_plain(refund_slate, SlateVersion::V3).unwrap(); assert_eq!( BuyApi::accept_swap_offer( &kc_buy, &ctx_buy, id, offer.clone(), secondary_update.clone(), &nc ) .is_err(), true ); } { // Refund fees let (id, mut offer, secondary_update) = message1.clone().unwrap_offer().unwrap(); let mut refund_slate: Slate = offer.refund_slate.into_slate_plain().unwrap(); refund_slate.tx.body.kernels[0].features = KernelFeatures::HeightLocked { fee: refund_slate.fee + 1, lock_height: refund_slate.lock_height, }; offer.refund_slate = VersionedSlate::into_version_plain(refund_slate, SlateVersion::V3).unwrap(); assert_eq!( BuyApi::accept_swap_offer( &kc_buy, &ctx_buy, id, offer.clone(), secondary_update.clone(), &nc ) .is_err(), true ); } { // Amounts at refund let (id, mut offer, secondary_update) = message1.clone().unwrap_offer().unwrap(); let mut refund_slate: Slate = offer.refund_slate.into_slate_plain().unwrap(); refund_slate.amount -= 1; offer.refund_slate = VersionedSlate::into_version_plain(refund_slate, SlateVersion::V3).unwrap(); assert_eq!( BuyApi::accept_swap_offer( &kc_buy, &ctx_buy, id, offer.clone(), secondary_update.clone(), &nc ) .is_err(), true ); } // Secondary Data has only public key. Not much what we can tweak to steal the funds. // ---------------------------------------------------------------------------------------------- // Finaly going with buyer. Happy path let mut buyer = { let (id, offer, secondary_update) = message1.clone().unwrap_offer().unwrap(); let swap_buy = BuyApi::accept_swap_offer(&kc_buy, &ctx_buy, id, offer, secondary_update, &nc) .unwrap(); let fsm_buy = api_buy.get_fsm(&kc_buy, &swap_buy); // Seller: create swap offer Trader { api: &api_buy, swap: swap_buy, fsm: fsm_buy, kc: kc_buy, ctx: ctx_buy, swap_stack: Vec::new(), } }; // BTC address and let's prepare transactions. to be ready to deposit let input_script = buyer.api.script(&buyer.swap).unwrap(); let btc_address_to_deposit = buyer .swap .secondary_data .unwrap_btc() .unwrap() .address(Currency::Btc, &input_script, buyer.swap.network) .unwrap(); let tx_1 = BtcTransaction { version: 2, lock_time: 0, input: vec![], output: vec![TxOut { value: btc_amount_1, script_pubkey: Currency::Btc .address_2_script_pubkey(&btc_address_to_deposit) .unwrap(), }], }; let _txid_1 = tx_1.txid(); let tx_2 = BtcTransaction { version: 2, lock_time: 0, input: vec![], output: vec![TxOut { value: btc_amount_2, script_pubkey: Currency::Btc .address_2_script_pubkey(&btc_address_to_deposit) .unwrap(), }], }; let _txid_2 = tx_2.txid(); let tx_plus = BtcTransaction { version: 2, lock_time: 0, input: vec![], output: vec![TxOut { value: btc_amount_plus, script_pubkey: Currency::Btc .address_2_script_pubkey(&btc_address_to_deposit) .unwrap(), }], }; let _txid_plus = tx_plus.txid(); // Initial buyer state test. test_responds( &mut buyer, StateId::BuyerOfferCreated, Some((START_TIME + MSG_EXCHANGE_TIME, -1)), // timeout if possible Some(StateId::BuyerCancelled), StateId::BuyerSendingAcceptOfferMessage, // Expected state before timeput StateId::BuyerCancelled, // Expected state after timeout None, None, // Expected state before timeput None, // Expected state after timeout None, // Acceptable message None, None, ); swap::set_testing_cur_time(START_TIME + 120); let res = buyer.process(Input::Check).unwrap(); assert_eq!(buyer.swap.state, StateId::BuyerSendingAcceptOfferMessage); assert_eq!( res.time_limit.clone().unwrap(), START_TIME + MSG_EXCHANGE_TIME ); assert_eq!(res.next_state_id, buyer.swap.state); let message2 = match res.action.unwrap() { Action::BuyerSendAcceptOfferMessage(m) => m, _ => panic!("Unexpected action"), }; let lock_start_timelimit = START_TIME + MSG_EXCHANGE_TIME + BTC_CONFIRMATION as i64 * 10 * 60 * 11 / 10 / 20; let lock_second_message_round_timelimit = START_TIME + MSG_EXCHANGE_TIME + BTC_CONFIRMATION as i64 * 10 * 60 * 11 / 10 + MSG_EXCHANGE_TIME; let mwc_lock_time_limit = lock_second_message_round_timelimit + MWC_CONFIRMATION as i64 * 60 * 11 / 10 + REDEEM_TIME; let btc_lock_time_limit = mwc_lock_time_limit + REDEEM_TIME + REDEEM_TIME + MWC_CONFIRMATION as i64 * 60 * 11 / 10 + BTC_CONFIRMATION as i64 * 10 * 60 * 11 / 10; assert_eq!(seller.swap.get_time_start_lock(), lock_start_timelimit); assert_eq!( seller.swap.get_time_message_redeem(), lock_second_message_round_timelimit ); assert_eq!(seller.swap.get_time_btc_lock_script(), btc_lock_time_limit); assert_eq!(seller.swap.get_time_mwc_lock(), mwc_lock_time_limit); test_responds( &mut buyer, StateId::BuyerSendingAcceptOfferMessage, Some((START_TIME + MSG_EXCHANGE_TIME, -1)), // timeout if possible Some(StateId::BuyerCancelled), StateId::BuyerSendingAcceptOfferMessage, // Expected state before timeput StateId::BuyerCancelled, // Expected state after timeout Some((lock_start_timelimit, -1)), Some(StateId::BuyerWaitingForSellerToLock), // Expected state before timeput Some(StateId::BuyerCancelled), // Expected state after timeout None, // Acceptable message None, None, ); swap::set_testing_cur_time(START_TIME + 130); // Reporting that message is sent... let res = buyer.process(Input::Execute).unwrap(); assert_eq!(buyer.swap.state, StateId::BuyerWaitingForSellerToLock); assert_eq!(res.next_state_id, buyer.swap.state); assert_eq!(res.time_limit.clone().unwrap(), lock_start_timelimit); assert_eq!(res.action.unwrap().get_id_str(), "WaitForMwcConfirmations"); // Checking send message retry... let res = buyer.process(Input::Check).unwrap(); assert_eq!(res.next_state_id, StateId::BuyerWaitingForSellerToLock); let res = buyer.process(Input::Check).unwrap(); assert_eq!(res.next_state_id, StateId::BuyerWaitingForSellerToLock); swap::set_testing_cur_time(swap::get_cur_time() + 61 * 5); let res = buyer.process(Input::Check).unwrap(); assert_eq!(res.next_state_id, StateId::BuyerSendingAcceptOfferMessage); // simulate ack buyer.swap.ack_msg1(); let res = buyer.process(Input::Check).unwrap(); assert_eq!(res.next_state_id, StateId::BuyerWaitingForSellerToLock); // Seller is waiting for the message form the buyer... assert_eq!( seller.swap.state, StateId::SellerWaitingForAcceptanceMessage ); // Let's feed message to the seller. // Check if seller will wait for Buyer to deposit first. { // ------- It is a branch activity, will be rolled back soon seller.pushs(); assert_eq!(seller.swap.seller_lock_first, true); seller.swap.seller_lock_first = false; test_responds( &mut seller, StateId::SellerWaitingForAcceptanceMessage, Some((START_TIME + MSG_EXCHANGE_TIME, -1)), // timeout if possible Some(StateId::SellerCancelled), StateId::SellerWaitingForAcceptanceMessage, // Expected state before timeput StateId::SellerCancelled, // Expected state after timeout Some((lock_start_timelimit, -1)), None, // Expected state before timeput None, // Expected state after timeout Some(message2.clone()), // Acceptable message Some(StateId::SellerWaitingForBuyerLock), Some(StateId::SellerCancelled), ); // try to process wrong message assert_eq!( seller .process(Input::IncomeMessage(message1.clone())) .is_err(), true ); let res = seller .process(Input::IncomeMessage(message2.clone())) .unwrap(); assert_eq!(seller.swap.state, StateId::SellerWaitingForBuyerLock); assert_eq!(res.next_state_id, seller.swap.state); assert_eq!( res.action.unwrap().get_id_str(), "WaitForSecondaryConfirmations" ); // Double processing should be fine as well assert_eq!(seller.swap.state, StateId::SellerWaitingForBuyerLock); let res = seller .process(Input::IncomeMessage(message2.clone())) .unwrap(); assert_eq!(res.next_state_id, StateId::SellerWaitingForBuyerLock); test_responds( &mut seller, StateId::SellerWaitingForBuyerLock, Some((lock_start_timelimit, -1)), Some(StateId::SellerCancelled), StateId::SellerWaitingForBuyerLock, // Expected state before timeput StateId::SellerCancelled, // Expected state after timeout None, None, // Expected state before timeput None, // Expected state after timeout None, // Acceptable message None, None, ); let state = btc_nc.get_state(); btc_nc.post_transaction(&tx_1); test_responds( &mut seller, StateId::SellerWaitingForBuyerLock, Some((lock_start_timelimit, -1)), Some(StateId::SellerCancelled), StateId::SellerWaitingForBuyerLock, // Expected state before timeput StateId::SellerCancelled, // Expected state after timeout None, None, // Expected state before timeput None, // Expected state after timeout None, // Acceptable message None, None, ); btc_nc.mine_blocks(1); test_responds( &mut seller, StateId::SellerWaitingForBuyerLock, Some((lock_start_timelimit, -1)), Some(StateId::SellerCancelled), StateId::SellerWaitingForBuyerLock, // Expected state before timeput StateId::SellerCancelled, // Expected state after timeout None, None, // Expected state before timeput None, // Expected state after timeout None, // Acceptable message None, None, ); btc_nc.post_transaction(&tx_2); test_responds( &mut seller, StateId::SellerWaitingForBuyerLock, Some((lock_start_timelimit, -1)), Some(StateId::SellerCancelled), StateId::SellerWaitingForBuyerLock, // Expected state before timeput StateId::SellerCancelled, // Expected state after timeout None, None, // Expected state before timeput None, // Expected state after timeout None, // Acceptable message None, None, ); btc_nc.mine_blocks(1); test_responds( &mut seller, StateId::SellerWaitingForBuyerLock, Some((lock_start_timelimit, -1)), Some(StateId::SellerCancelled), StateId::SellerPostingLockMwcSlate, // Expected state before timeput StateId::SellerCancelled, // Expected state after timeout None, None, // Expected state before timeput None, // Expected state after timeout None, // Acceptable message None, None, ); btc_nc.post_transaction(&tx_plus); // Expected to be cancelled because buyer posted too much funds... test_responds( &mut seller, StateId::SellerWaitingForBuyerLock, Some((lock_start_timelimit, -1)), Some(StateId::SellerCancelled), StateId::SellerCancelled, // Expected state before timeput StateId::SellerCancelled, // Expected state after timeout None, None, // Expected state before timeput None, // Expected state after timeout None, // Acceptable message None, None, ); // Cleaning up after the branch btc_nc.set_state(&state); seller.pops(); // Branch test is ended, evething is restored. } { // BRANCH - Checking simple cancel case buyer.pushs(); seller.pushs(); let res = buyer.process(Input::Cancel).unwrap(); assert_eq!(res.next_state_id, StateId::BuyerCancelled); assert_eq!(res.action.is_some(), false); assert_eq!(res.time_limit.is_some(), false); let res = seller.process(Input::Cancel).unwrap(); assert_eq!(res.next_state_id, StateId::SellerCancelled); assert_eq!(res.action.is_some(), false); assert_eq!(res.time_limit.is_some(), false); test_responds( &mut seller, StateId::SellerCancelled, None, None, StateId::SellerCancelled, // Expected state before timeput StateId::SellerCancelled, // Expected state after timeout None, None, // Expected state before timeput None, // Expected state after timeout None, // Acceptable message None, None, ); test_responds( &mut buyer, StateId::BuyerCancelled, None, None, StateId::BuyerCancelled, // Expected state before timeput StateId::BuyerCancelled, // Expected state after timeout None, None, // Expected state before timeput None, // Expected state after timeout None, // Acceptable message None, None, ); seller.pops(); buyer.pops(); // End of branch } assert_eq!( seller.swap.state, StateId::SellerWaitingForAcceptanceMessage ); assert_eq!(buyer.swap.state, StateId::BuyerWaitingForSellerToLock); test_responds( &mut seller, StateId::SellerWaitingForAcceptanceMessage, Some((START_TIME + MSG_EXCHANGE_TIME, -1)), // timeout if possible Some(StateId::SellerCancelled), StateId::SellerWaitingForAcceptanceMessage, // Expected state before timeput StateId::SellerCancelled, // Expected state after timeout Some((lock_start_timelimit, -1)), None, // Expected state before timeput None, // Expected state after timeout Some(message2.clone()), // Acceptable message Some(StateId::SellerPostingLockMwcSlate), Some(StateId::SellerCancelled), ); // try to process wrong message assert_eq!( seller .process(Input::IncomeMessage(message1.clone())) .is_err(), true ); let res = seller .process(Input::IncomeMessage(message2.clone())) .unwrap(); assert_eq!(seller.swap.state, StateId::SellerPostingLockMwcSlate); assert_eq!(res.next_state_id, seller.swap.state); assert_eq!(res.time_limit.clone().unwrap(), lock_start_timelimit); assert_eq!(res.action.unwrap().get_id_str(), "SellerPublishMwcLockTx"); // Double processing should be fine assert_eq!(seller.swap.state, StateId::SellerPostingLockMwcSlate); let res = seller .process(Input::IncomeMessage(message2.clone())) .unwrap(); assert_eq!(res.next_state_id, StateId::SellerPostingLockMwcSlate); swap::set_testing_cur_time(START_TIME + 150); test_responds( &mut seller, StateId::SellerPostingLockMwcSlate, Some((lock_start_timelimit, -1)), Some(StateId::SellerCancelled), StateId::SellerPostingLockMwcSlate, // Expected state before timeput StateId::SellerCancelled, // Expected state after timeout None, Some(StateId::SellerWaitingForLockConfirmations), // Expected state before timeput Some(StateId::SellerCancelled), // Expected state after timeout None, // Acceptable message None, None, ); // Seller posting MWC transaction, testing retry tx cases. let nc_nolock_state = nc.get_state(); { // Let's check what happens if MWC is not published. Seller need to do a retry. let res = seller.process(Input::Execute).unwrap(); assert_eq!( seller.swap.state, StateId::SellerWaitingForLockConfirmations ); assert_eq!(res.next_state_id, seller.swap.state); assert_eq!( res.time_limit.clone().unwrap(), lock_second_message_round_timelimit ); assert_eq!(res.action.unwrap().get_id_str(), "WaitForLockConfirmations"); // check if record was created let first_post_time = swap::get_cur_time(); assert_eq!(seller.swap.posted_lock.clone().unwrap(), first_post_time); // let nc_lock_posted_state = nc.get_state(); // Erasing the mwc post data... // nc.set_state(&nc_nolock_state); swap::set_testing_cur_time(START_TIME + 150 + 60 * 5 + 1); // nothing was mined, should be not confirmed yet // Expecting that we will switch to the publish MWC lock state let res = seller.process(Input::Check).unwrap(); assert_eq!(seller.swap.state, StateId::SellerPostingLockMwcSlate); assert_eq!(res.action.unwrap().get_id_str(), "SellerPublishMwcLockTx"); // SellerPostingLockMwcSlate expecting to fail because tx into the tx pool assert_eq!(seller.process(Input::Execute).is_err(), true); // Let's check the cancel now is different. test_responds( &mut seller, StateId::SellerPostingLockMwcSlate, Some((lock_start_timelimit, -1)), Some(StateId::SellerWaitingForRefundHeight), StateId::SellerPostingLockMwcSlate, // Expected state before timeput StateId::SellerWaitingForRefundHeight, // Expected state after timeout None, None, // expected to fail. Some(StateId::SellerWaitingForRefundHeight), // Expected state after timeout None, // Acceptable message None, None, ); { // BRANCH - Check if cancel in far future is different. seller.pushs(); nc.mine_blocks(600); let res = seller.process(Input::Cancel).unwrap(); assert_eq!(res.next_state_id, StateId::SellerPostingRefundSlate); seller.pops(); } seller.pushs(); // Let's mine block. Now SellerPostingLockMwcSlate should be able to detect that we are good nwo and not publish nc.mine_block(); // block is mined, so it switched to SellerWaitingForLockConfirmations test_responds( &mut seller, StateId::SellerPostingLockMwcSlate, Some((lock_second_message_round_timelimit, -1)), // time from SellerWaitingForLockConfirmations Some(StateId::SellerWaitingForRefundHeight), StateId::SellerWaitingForLockConfirmations, // Expected state before timeput StateId::SellerWaitingForRefundHeight, // Expected state after timeout Some((lock_second_message_round_timelimit, -1)), Some(StateId::SellerWaitingForLockConfirmations), // Expected state before timeput Some(StateId::SellerWaitingForRefundHeight), // Expected state after timeout None, // Acceptable message None, None, ); let _res = seller.process(Input::Execute).unwrap(); assert_eq!( seller.swap.state, StateId::SellerWaitingForLockConfirmations ); // post wasn't made. assert_eq!(seller.swap.posted_lock.clone().unwrap(), first_post_time); seller.pops(); // Resetting mwc chain as lock tx was never published and retry nc.set_state(&nc_nolock_state); test_responds( &mut seller, StateId::SellerPostingLockMwcSlate, Some((lock_start_timelimit, -1)), Some(StateId::SellerWaitingForRefundHeight), StateId::SellerPostingLockMwcSlate, // Expected state before timeput StateId::SellerWaitingForRefundHeight, // Expected state after timeout None, Some(StateId::SellerWaitingForLockConfirmations), // Expected state before timeput Some(StateId::SellerWaitingForRefundHeight), // Expected state after timeout None, // Acceptable message None, None, ); let _res = seller.process(Input::Execute).unwrap(); assert_eq!( seller.swap.state, StateId::SellerWaitingForLockConfirmations ); // post was made with retry. Check the timestamp assert_eq!( seller.swap.posted_lock.clone().unwrap(), swap::get_cur_time() ); // Double processing should be fine assert_eq!( seller.swap.state, StateId::SellerWaitingForLockConfirmations ); let res = seller .process(Input::IncomeMessage(message2.clone())) .unwrap(); assert_eq!( res.next_state_id, StateId::SellerWaitingForLockConfirmations ); nc.mine_blocks(2); // Let's test reorg case. We want to repost the transaciton //swap::set_testing_cur_time(START_TIME + 150 + 60 * 5 * 2 + 2); // waiting is fine seller.pushs(); test_responds( &mut seller, StateId::SellerWaitingForLockConfirmations, Some((lock_second_message_round_timelimit, -1)), Some(StateId::SellerWaitingForRefundHeight), StateId::SellerWaitingForLockConfirmations, // Expected state before timeput StateId::SellerWaitingForRefundHeight, // Expected state after timeout None, None, // Expected state before timeput None, // Expected state after timeout None, // Acceptable message None, None, ); // Let's roll back the chain nc.set_state(&nc_nolock_state); // Expecting switch to SellerPostingLockMwcSlate // not a time for retry let res = seller.process(Input::Check).unwrap(); assert_eq!( res.next_state_id, StateId::SellerWaitingForLockConfirmations ); // let's trigger retry swap::set_testing_cur_time(START_TIME + 150 + 60 * 5 * 3 + 3); let res = seller.process(Input::Check).unwrap(); assert_eq!(res.next_state_id, StateId::SellerPostingLockMwcSlate); seller.pops(); // Last pop return us to a stage where seller already published MWC transaciton. So we can continue with Buyer } // Buyer should detect posted MWC and be able to switch to the next step test_responds( &mut buyer, StateId::BuyerWaitingForSellerToLock, Some((lock_start_timelimit, -1)), // timeout if possible Some(StateId::BuyerCancelled), StateId::BuyerPostingSecondaryToMultisigAccount, // Expected state before timeput StateId::BuyerCancelled, // Expected state after timeout None, None, // Expected state before timeput None, // Expected state after timeout None, // Acceptable message None, None, ); { // BRANCH - checking that Sellr lock will set ack to the message buyer.pushs(); // No retry if MWC are posted... buyer.swap.posted_msg1 = Some(swap::get_cur_time() - 60 * 10); let res = buyer.process(Input::Check).unwrap(); assert_eq!( res.next_state_id, StateId::BuyerPostingSecondaryToMultisigAccount ); assert_eq!(buyer.swap.posted_msg1.unwrap(), u32::MAX as i64); buyer.pops(); } let res = buyer.process(Input::Check).unwrap(); assert_eq!( buyer.swap.state, StateId::BuyerPostingSecondaryToMultisigAccount ); assert_eq!(res.time_limit.unwrap(), lock_start_timelimit); match res.action.unwrap() { Action::DepositSecondary { currency, amount, address, } => { assert_eq!(currency, Currency::Btc); assert_eq!(amount, btc_amount); assert_eq!(address, btc_address_to_deposit.to_string()); } _ => panic!("Invalid action"), } { // BRANCH - Checking retry messages buyer.pushs(); // No retry if MWC are posted... buyer.swap.posted_msg1 = Some(swap::get_cur_time() - 60 * 10); let res = buyer.process(Input::Check).unwrap(); assert_eq!( res.next_state_id, StateId::BuyerPostingSecondaryToMultisigAccount ); assert_eq!(buyer.swap.posted_msg1.unwrap(), u32::MAX as i64); // Doing some tweaks, need to reset ack first assert_eq!(buyer.swap.posted_msg1.unwrap(), u32::MAX as i64); let st = nc.get_state(); nc.set_state(&nc_nolock_state); buyer.swap.posted_msg1 = Some(swap::get_cur_time()); let res = buyer.process(Input::Check).unwrap(); assert_eq!( res.next_state_id, StateId::BuyerPostingSecondaryToMultisigAccount ); let res = buyer.process(Input::Check).unwrap(); assert_eq!( res.next_state_id, StateId::BuyerPostingSecondaryToMultisigAccount ); swap::set_testing_cur_time(swap::get_cur_time() + 61 * 5); let res = buyer.process(Input::Check).unwrap(); assert_eq!(res.next_state_id, StateId::BuyerSendingAcceptOfferMessage); // simulate ack, so should return back to the current step nc.set_state(&st); buyer.swap.ack_msg1(); let res = buyer.process(Input::Check).unwrap(); assert_eq!( res.next_state_id, StateId::BuyerPostingSecondaryToMultisigAccount ); buyer.pops(); } // Before nothing posted, Buyer still can cancel easily test_responds( &mut buyer, StateId::BuyerPostingSecondaryToMultisigAccount, Some((lock_start_timelimit, btc_lock_time_limit)), // timeout if possible Some(StateId::BuyerCancelled), StateId::BuyerPostingSecondaryToMultisigAccount, // Expected state before timeput StateId::BuyerCancelled, // Expected state after timeout None, None, // Expected state before timeput None, // Expected state after timeout None, // Acceptable message None, None, ); // Let's store BTC network without deposit let bnc_deposit_none = btc_nc.get_state(); btc_nc.post_transaction(&tx_1); // Posted, not mined. Buyer can't cancel easily test_responds( &mut buyer, StateId::BuyerPostingSecondaryToMultisigAccount, Some((lock_start_timelimit, btc_lock_time_limit)), // timeout if possible Some(StateId::BuyerWaitingForRefundTime), StateId::BuyerPostingSecondaryToMultisigAccount, // Expected state before timeput StateId::BuyerWaitingForRefundTime, // Expected state after timeout None, None, // Expected state before timeput None, // Expected state after timeout None, // Acceptable message None, None, ); let res = buyer.process(Input::Check).unwrap(); assert_eq!( buyer.swap.state, StateId::BuyerPostingSecondaryToMultisigAccount ); assert_eq!(res.time_limit.unwrap(), lock_start_timelimit); match res.action.unwrap() { Action::DepositSecondary { currency, amount, address, } => { assert_eq!(currency, Currency::Btc); assert_eq!(amount, btc_amount - btc_amount_1); assert_eq!(address, btc_address_to_deposit.to_string()); } _ => panic!("Invalid action"), } { // BRANCH - Check Buyer cancel in far future is different buyer.pushs(); let cur_ts = swap::get_cur_time(); swap::set_testing_cur_time(btc_lock_time_limit + 1 + 600 * 5); // Will wait 5 blocks before refund let res = buyer.process(Input::Cancel).unwrap(); assert_eq!(res.next_state_id, StateId::BuyerPostingRefundForSecondary); swap::set_testing_cur_time(cur_ts); buyer.pops(); } // Let's store BTC network with part deposit let bnc_deposit_1 = btc_nc.get_state(); btc_nc.post_transaction(&tx_2); // Both deposits without confirmations is fine to more to the next step test_responds( &mut buyer, StateId::BuyerPostingSecondaryToMultisigAccount, Some((lock_second_message_round_timelimit, btc_lock_time_limit)), // timeout if possible Some(StateId::BuyerWaitingForRefundTime), StateId::BuyerWaitingForLockConfirmations, // Expected state before timeput StateId::BuyerWaitingForRefundTime, // Expected state after timeout None, None, // Expected state before timeput None, // Expected state after timeout None, // Acceptable message None, None, ); // Checking if mining blocks will change nothing btc_nc.mine_blocks(1); test_responds( &mut buyer, StateId::BuyerPostingSecondaryToMultisigAccount, Some((lock_second_message_round_timelimit, btc_lock_time_limit)), // timeout if possible Some(StateId::BuyerWaitingForRefundTime), StateId::BuyerWaitingForLockConfirmations, // Expected state before timeput StateId::BuyerWaitingForRefundTime, // Expected state after timeout None, None, // Expected state before timeput None, // Expected state after timeout None, // Acceptable message None, None, ); { // Branch - Checking if posting too much will switch to cancellation buyer.pushs(); btc_nc.post_transaction(&tx_plus); test_responds( &mut buyer, StateId::BuyerPostingSecondaryToMultisigAccount, Some((lock_start_timelimit, btc_lock_time_limit)), // timeout if possible Some(StateId::BuyerWaitingForRefundTime), StateId::BuyerWaitingForRefundTime, // Expected state before timeput StateId::BuyerWaitingForRefundTime, // Expected state after timeout None, None, // Expected state before timeput None, // Expected state after timeout None, // Acceptable message None, None, ); buyer.pops(); } // Buyer is good to go to the waiting step let res = buyer.process(Input::Check).unwrap(); assert_eq!(buyer.swap.state, StateId::BuyerWaitingForLockConfirmations); assert_eq!(res.time_limit.unwrap(), lock_second_message_round_timelimit); assert_eq!(res.action.unwrap().get_id_str(), "WaitForLockConfirmations"); test_responds( &mut buyer, StateId::BuyerWaitingForLockConfirmations, Some((lock_second_message_round_timelimit, btc_lock_time_limit)), // timeout if possible Some(StateId::BuyerWaitingForRefundTime), StateId::BuyerWaitingForLockConfirmations, // Expected state before timeput StateId::BuyerWaitingForRefundTime, // Expected state after timeout None, None, // Expected state before timeput None, // Expected state after timeout None, // Acceptable message None, None, ); { // BRANCH - Checking retry messages buyer.pushs(); // No retry if MWC are posted... buyer.swap.posted_msg1 = Some(swap::get_cur_time() - 60 * 10); let res = buyer.process(Input::Check).unwrap(); assert_eq!(res.next_state_id, StateId::BuyerWaitingForLockConfirmations); assert_eq!(buyer.swap.posted_msg1.unwrap(), u32::MAX as i64); // Doing some tweaks, need to reset ack first assert_eq!(buyer.swap.posted_msg1.unwrap(), u32::MAX as i64); let st = nc.get_state(); nc.set_state(&nc_nolock_state); buyer.swap.posted_msg1 = Some(swap::get_cur_time()); let res = buyer.process(Input::Check).unwrap(); assert_eq!(res.next_state_id, StateId::BuyerWaitingForLockConfirmations); let res = buyer.process(Input::Check).unwrap(); assert_eq!(res.next_state_id, StateId::BuyerWaitingForLockConfirmations); swap::set_testing_cur_time(swap::get_cur_time() + 61 * 5); let res = buyer.process(Input::Check).unwrap(); assert_eq!(res.next_state_id, StateId::BuyerSendingAcceptOfferMessage); // simulate ack, so should return back to the current step buyer.swap.ack_msg1(); nc.set_state(&st); let res = buyer.process(Input::Check).unwrap(); assert_eq!(res.next_state_id, StateId::BuyerWaitingForLockConfirmations); buyer.pops(); } { // BRANCH - checking how buyer will switch back to deposit step if no funds will be found buyer.pushs(); // With small amount - should switch back to BuyerPostingSecondaryToMultisigAccount btc_nc.set_state(&bnc_deposit_1); test_responds( &mut buyer, StateId::BuyerWaitingForLockConfirmations, Some((lock_start_timelimit, btc_lock_time_limit)), // timeout if possible Some(StateId::BuyerWaitingForRefundTime), StateId::BuyerPostingSecondaryToMultisigAccount, // Expected state before timeput StateId::BuyerWaitingForRefundTime, // Expected state after timeout None, None, // Expected state before timeput None, // Expected state after timeout None, // Acceptable message None, None, ); // With no amount - should switch back to BuyerPostingSecondaryToMultisigAccount, cancel will be without refunds becuse the balance is empty btc_nc.set_state(&bnc_deposit_none); test_responds( &mut buyer, StateId::BuyerWaitingForLockConfirmations, Some((lock_start_timelimit, btc_lock_time_limit)), // timeout if possible Some(StateId::BuyerWaitingForRefundTime), StateId::BuyerPostingSecondaryToMultisigAccount, // Expected state before timeput StateId::BuyerCancelled, // Expected state after timeout None, None, // Expected state before timeput None, // Expected state after timeout None, // Acceptable message None, None, ); let res = buyer.process(Input::Check).unwrap(); assert_eq!( buyer.swap.state, StateId::BuyerPostingSecondaryToMultisigAccount ); assert_eq!(res.time_limit.unwrap(), lock_start_timelimit); match res.action.unwrap() { Action::DepositSecondary { currency, amount, address, } => { assert_eq!(currency, Currency::Btc); assert_eq!(amount, btc_amount); assert_eq!(address, btc_address_to_deposit.to_string()); } _ => panic!("Invalid action"), } buyer.pops(); // Branch is Over } // Updating Buyer refund address buyer .swap .update_secondary_address("mjdcskZm4Kimq7yzUGLtzwiEwMdBdTa3No".to_string()); { // BRANCH - checking refund workflows. seller.pushs(); buyer.pushs(); let time_to_restore = swap::get_cur_time(); let res = buyer.process(Input::Cancel).unwrap(); assert_eq!(res.next_state_id, StateId::BuyerWaitingForRefundTime); assert_eq!(res.action.unwrap().get_id_str(), "WaitingForBtcRefund"); assert_eq!(res.time_limit.unwrap(), btc_lock_time_limit + 600 * 5); // waiting for 5 extra blocks before refund let lock_height = seller.swap.refund_slate.lock_height; let need_blocks = lock_height - nc.state.lock().height; let res = seller.process(Input::Cancel).unwrap(); assert_eq!(res.next_state_id, StateId::SellerWaitingForRefundHeight); assert_eq!(res.action.unwrap().get_id_str(), "WaitForMwcRefundUnlock"); assert_eq!( res.time_limit.unwrap(), swap::get_cur_time() + (need_blocks * 60) as i64 ); // Seller SellerWaitingForRefundHeight depend on height, not on time. test_responds( &mut seller, StateId::SellerWaitingForRefundHeight, None, None, StateId::SellerWaitingForRefundHeight, // Expected state before timeput StateId::SellerWaitingForRefundHeight, // Expected state after timeout None, None, // Expected state before timeput None, // Expected state after timeout None, // Acceptable message None, None, ); nc.mine_blocks(need_blocks + 1); // seller got needed height, now seller is ready to refund test_responds( &mut seller, StateId::SellerWaitingForRefundHeight, None, None, StateId::SellerPostingRefundSlate, // Expected state before timeput StateId::SellerPostingRefundSlate, // Expected state after timeout None, None, // Expected state before timeput None, // Expected state after timeout None, // Acceptable message None, None, ); let res = seller.process(Input::Check).unwrap(); assert_eq!(res.next_state_id, StateId::SellerPostingRefundSlate); assert_eq!(res.action.unwrap().get_id_str(), "SellerPublishMwcRefundTx"); assert_eq!(res.time_limit.is_none(), true); test_responds( &mut seller, StateId::SellerPostingRefundSlate, None, None, StateId::SellerPostingRefundSlate, // Expected state before timeput StateId::SellerPostingRefundSlate, // Expected state after timeout None, Some(StateId::SellerWaitingForRefundConfirmations), // Expected state before timeput Some(StateId::SellerWaitingForRefundConfirmations), // Expected state after timeout None, // Acceptable message None, None, ); let nc_state_prepost = nc.get_state(); let res = seller.process(Input::Execute).unwrap(); assert_eq!( res.next_state_id, StateId::SellerWaitingForRefundConfirmations ); assert_eq!(res.action.unwrap().get_id_str(), "WaitForMwcConfirmations"); assert_eq!(res.time_limit.is_none(), true); // Checking post retry workflow. nc not mined, so not cofirmed until let res = seller.process(Input::Check).unwrap(); assert_eq!( res.next_state_id, StateId::SellerWaitingForRefundConfirmations ); // swap::set_testing_cur_time(swap::get_cur_time() + 60 * 6); let res = seller.process(Input::Check).unwrap(); assert_eq!(res.next_state_id, StateId::SellerPostingRefundSlate); // test network not supported repost assert_eq!(seller.process(Input::Execute).is_err(), true); let res = seller.process(Input::Check).unwrap(); assert_eq!(res.next_state_id, StateId::SellerPostingRefundSlate); nc.mine_block(); let res = seller.process(Input::Check).unwrap(); assert_eq!( res.next_state_id, StateId::SellerWaitingForRefundConfirmations ); // reorg should trigger retry nc.set_state(&nc_state_prepost); let res = seller.process(Input::Check).unwrap(); assert_eq!(res.next_state_id, StateId::SellerPostingRefundSlate); let res = seller.process(Input::Execute).unwrap(); assert_eq!( res.next_state_id, StateId::SellerWaitingForRefundConfirmations ); nc.mine_block(); let res = seller.process(Input::Check).unwrap(); assert_eq!( res.next_state_id, StateId::SellerWaitingForRefundConfirmations ); // Waiting and we shoudl done nc.mine_blocks(MWC_CONFIRMATION / 2); test_responds( &mut seller, StateId::SellerWaitingForRefundConfirmations, None, None, StateId::SellerWaitingForRefundConfirmations, // Expected state before timeput StateId::SellerWaitingForRefundConfirmations, // Expected state after timeout None, None, None, None, // Acceptable message None, None, ); nc.mine_blocks(MWC_CONFIRMATION / 2); test_responds( &mut seller, StateId::SellerWaitingForRefundConfirmations, None, None, StateId::SellerCancelledRefunded, // Expected state before timeput StateId::SellerCancelledRefunded, // Expected state after timeout None, None, None, None, // Acceptable message None, None, ); let res = seller.process(Input::Check).unwrap(); assert_eq!(res.next_state_id, StateId::SellerCancelledRefunded); assert_eq!(res.action.is_none(), true); assert_eq!(res.time_limit.is_none(), true); // Buyer turn to do a refund.... test_responds( &mut buyer, StateId::BuyerWaitingForRefundTime, Some((btc_lock_time_limit + 600 * 5, -1)), None, StateId::BuyerWaitingForRefundTime, // Expected state before timeput StateId::BuyerPostingRefundForSecondary, // Expected state after timeout None, None, // Expected state before timeput None, // Expected state after timeout None, // Acceptable message None, None, ); swap::set_testing_cur_time(btc_lock_time_limit + 1 + 600 * 5); let res = buyer.process(Input::Check).unwrap(); assert_eq!(res.next_state_id, StateId::BuyerPostingRefundForSecondary); assert_eq!( res.action.unwrap().get_id_str(), "BuyerPublishSecondaryRefundTx" ); assert_eq!(res.time_limit.is_none(), true); test_responds( &mut buyer, StateId::BuyerPostingRefundForSecondary, None, None, StateId::BuyerPostingRefundForSecondary, // Expected state before timeput StateId::BuyerPostingRefundForSecondary, // Expected state after timeout None, Some(StateId::BuyerWaitingForRefundConfirmations), Some(StateId::BuyerWaitingForRefundConfirmations), None, // Acceptable message None, None, ); let btc_state_prerefund = btc_nc.get_state(); let res = buyer.process(Input::Execute).unwrap(); assert_eq!( res.next_state_id, StateId::BuyerWaitingForRefundConfirmations ); assert_eq!( res.action.unwrap().get_id_str(), "WaitForSecondaryConfirmations" ); assert_eq!(res.time_limit.is_none(), true); let res = buyer.process(Input::Check).unwrap(); assert_eq!( res.next_state_id, StateId::BuyerWaitingForRefundConfirmations ); { // BRANCH - check if buyer can resubmit the Secondary refund transaction // Checking if resubmit works buyer.pushs(); let cur_time = swap::get_cur_time(); let res = buyer.process(Input::Check).unwrap(); assert_eq!( res.next_state_id, StateId::BuyerWaitingForRefundConfirmations ); swap::set_testing_cur_time(cur_time * 61 * 5); let res = buyer.process(Input::Check).unwrap(); assert_eq!( res.next_state_id, StateId::BuyerWaitingForRefundConfirmations ); // Changing fees, expecting to switch back to the posting buyer.swap.secondary_fee = 12.0; let res = buyer.process(Input::Check).unwrap(); assert_eq!(res.next_state_id, StateId::BuyerPostingRefundForSecondary); swap::set_testing_cur_time(cur_time); buyer.pops(); } // checking retry scenarion let btc_state_refund_posted = btc_nc.get_state(); btc_nc.set_state(&btc_state_prerefund); // no retry because of timeout let res = buyer.process(Input::Check).unwrap(); assert_eq!( res.next_state_id, StateId::BuyerWaitingForRefundConfirmations ); swap::set_testing_cur_time(swap::get_cur_time() + 60 * 6); let res = buyer.process(Input::Check).unwrap(); assert_eq!(res.next_state_id, StateId::BuyerPostingRefundForSecondary); // Check be restored btc_nc.set_state(&btc_state_refund_posted); let res = buyer.process(Input::Check).unwrap(); assert_eq!( res.next_state_id, StateId::BuyerWaitingForRefundConfirmations ); btc_nc.mine_blocks(1); test_responds( &mut buyer, StateId::BuyerWaitingForRefundConfirmations, None, None, StateId::BuyerWaitingForRefundConfirmations, // Expected state before timeput StateId::BuyerWaitingForRefundConfirmations, // Expected state after timeout None, None, // Expected state before timeput None, // Expected state after timeout None, // Acceptable message None, None, ); { // BRANCH - check if buyer can't resubmit the refund transaction because it is already mined // Checking if resubmit works buyer.pushs(); let cur_time = swap::get_cur_time(); let res = buyer.process(Input::Check).unwrap(); assert_eq!( res.next_state_id, StateId::BuyerWaitingForRefundConfirmations ); swap::set_testing_cur_time(cur_time * 61 * 5); let res = buyer.process(Input::Check).unwrap(); assert_eq!( res.next_state_id, StateId::BuyerWaitingForRefundConfirmations ); // Changing fees, expecting to switch back to the posting buyer.swap.secondary_fee = 12.0; let res = buyer.process(Input::Check).unwrap(); assert_eq!( res.next_state_id, StateId::BuyerWaitingForRefundConfirmations ); swap::set_testing_cur_time(cur_time); buyer.pops(); } btc_nc.mine_blocks(BTC_CONFIRMATION); test_responds( &mut buyer, StateId::BuyerWaitingForRefundConfirmations, None, None, StateId::BuyerCancelledRefunded, // Expected state before timeput StateId::BuyerCancelledRefunded, // Expected state after timeout None, None, // Expected state before timeput None, // Expected state after timeout None, // Acceptable message None, None, ); let res = buyer.process(Input::Check).unwrap(); assert_eq!(res.next_state_id, StateId::BuyerCancelledRefunded); assert_eq!(res.action.is_none(), true); assert_eq!(res.time_limit.is_none(), true); test_responds( &mut buyer, StateId::BuyerCancelledRefunded, None, None, StateId::BuyerCancelledRefunded, // Expected state before timeput StateId::BuyerCancelledRefunded, // Expected state after timeout None, None, // Expected state before timeput None, // Expected state after timeout None, // Acceptable message None, None, ); let res = buyer.process(Input::Check).unwrap(); assert_eq!(res.next_state_id, StateId::BuyerCancelledRefunded); assert_eq!(res.action.is_none(), true); assert_eq!(res.time_limit.is_none(), true); swap::set_testing_cur_time(time_to_restore); buyer.pops(); seller.pops(); } // Checking if Buyer and seller waiting for the confirmations. // They will wait for 30 MWC confirmations (2 are done) and 6 BTC (1 is done) for _btc_iter in 0..4 { test_responds( &mut buyer, StateId::BuyerWaitingForLockConfirmations, Some(( lock_second_message_round_timelimit, btc_lock_time_limit + 600 * 5, )), // timeout if possible Some(StateId::BuyerWaitingForRefundTime), StateId::BuyerWaitingForLockConfirmations, // Expected state before timeput StateId::BuyerWaitingForRefundTime, // Expected state after timeout None, None, // Expected state before timeput None, // Expected state after timeout None, // Acceptable message None, None, ); test_responds( &mut seller, StateId::SellerWaitingForLockConfirmations, Some((lock_second_message_round_timelimit, -1)), // timeout if possible Some(StateId::SellerWaitingForRefundHeight), StateId::SellerWaitingForLockConfirmations, // Expected state before timeput StateId::SellerWaitingForRefundHeight, // Expected state after timeout None, None, // Expected state before timeput None, // Expected state after timeout None, // Acceptable message None, None, ); nc.mine_blocks(10); btc_nc.mine_block(); } // We are almost done here. But still waiting. test_responds( &mut buyer, StateId::BuyerWaitingForLockConfirmations, Some(( lock_second_message_round_timelimit, btc_lock_time_limit + 600 * 5, )), // timeout if possible Some(StateId::BuyerWaitingForRefundTime), StateId::BuyerWaitingForLockConfirmations, // Expected state before timeput StateId::BuyerWaitingForRefundTime, // Expected state after timeout None, None, // Expected state before timeput None, // Expected state after timeout None, // Acceptable message None, None, ); test_responds( &mut seller, StateId::SellerWaitingForLockConfirmations, Some((lock_second_message_round_timelimit, -1)), // timeout if possible Some(StateId::SellerWaitingForRefundHeight), StateId::SellerWaitingForLockConfirmations, // Expected state before timeput StateId::SellerWaitingForRefundHeight, // Expected state after timeout None, None, // Expected state before timeput None, // Expected state after timeout None, // Acceptable message None, None, ); swap::set_testing_cur_time(START_TIME + 150 + 60 * 5 * 5); { // Branch - let's check of both buyer and seller will be able to switch back is chain will be cleared buyer.pushs(); seller.pushs(); nc.clean(); btc_nc.clean(); test_responds( &mut buyer, StateId::BuyerWaitingForLockConfirmations, Some((lock_start_timelimit, btc_lock_time_limit + 600 * 5)), // timeout if possible Some(StateId::BuyerWaitingForRefundTime), StateId::BuyerPostingSecondaryToMultisigAccount, // Expected state before timeput StateId::BuyerCancelled, // Expected state after timeout None, None, // Expected state before timeput None, // Expected state after timeout None, // Acceptable message None, None, ); test_responds( &mut seller, StateId::SellerWaitingForLockConfirmations, Some((lock_start_timelimit, -1)), // timeout if possible Some(StateId::SellerWaitingForRefundHeight), StateId::SellerPostingLockMwcSlate, // Expected state before timeput StateId::SellerWaitingForRefundHeight, // Expected state after timeout None, None, // Expected state before timeput None, // Expected state after timeout None, // Acceptable message None, None, ); seller.pops(); buyer.pops(); // End of the Branch } // Mine last needed blocks. That will trigger to nc.mine_blocks(20); btc_nc.mine_blocks(2); test_responds( &mut buyer, StateId::BuyerWaitingForLockConfirmations, Some(( lock_second_message_round_timelimit, btc_lock_time_limit + 600 * 5, )), // timeout if possible Some(StateId::BuyerWaitingForRefundTime), StateId::BuyerSendingInitRedeemMessage, // Expected state before timeput StateId::BuyerWaitingForRefundTime, // Expected state after timeout None, None, // Expected state before timeput None, // Expected state after timeout None, // Acceptable message None, None, ); test_responds( &mut seller, StateId::SellerWaitingForLockConfirmations, Some((lock_second_message_round_timelimit, -1)), // timeout if possible Some(StateId::SellerWaitingForRefundHeight), StateId::SellerWaitingForInitRedeemMessage, // Expected state before timeput StateId::SellerWaitingForRefundHeight, // Expected state after timeout None, None, // Expected state before timeput None, // Expected state after timeout None, // Acceptable message None, None, ); let res = buyer.process(Input::Check).unwrap(); assert_eq!(res.next_state_id, StateId::BuyerSendingInitRedeemMessage); assert_eq!(res.time_limit.unwrap(), lock_second_message_round_timelimit); let message3 = match res.action.unwrap() { Action::BuyerSendInitRedeemMessage(message) => message, _ => panic!("Invalid action"), }; { // BRANCH - checking thet message can be processing during confirmation step. It is fine seller.pushs(); assert_eq!( seller.swap.state, StateId::SellerWaitingForLockConfirmations ); assert_eq!( seller .process(Input::IncomeMessage(message3.clone())) .is_ok(), true ); seller.pops(); } let res = seller.process(Input::Check).unwrap(); assert_eq!( res.next_state_id, StateId::SellerWaitingForInitRedeemMessage ); assert_eq!(res.time_limit.unwrap(), lock_second_message_round_timelimit); assert_eq!( res.action.unwrap().get_id_str(), "SellerWaitingForInitRedeemMessage" ); swap::set_testing_cur_time(lock_second_message_round_timelimit - MSG_EXCHANGE_TIME); { // Branch. At this point both Buyer and seller are still checking for locked transactions. // Loss any of them should switch to cancellation buyer.pushs(); seller.pushs(); // Testing mwc chain reset nc.clean(); test_responds( &mut buyer, StateId::BuyerSendingInitRedeemMessage, Some(( lock_second_message_round_timelimit, btc_lock_time_limit + 600 * 5, )), // timeout if possible Some(StateId::BuyerWaitingForRefundTime), StateId::BuyerWaitingForLockConfirmations, // Expected state before timeput StateId::BuyerWaitingForRefundTime, // Expected state after timeout Some(( lock_second_message_round_timelimit, btc_lock_time_limit + 600 * 5, )), Some(StateId::BuyerWaitingForLockConfirmations), // Expected state before timeput Some(StateId::BuyerWaitingForRefundTime), // Expected state after timeout None, // Acceptable message None, None, ); test_responds( &mut seller, StateId::SellerWaitingForInitRedeemMessage, Some((lock_second_message_round_timelimit, -1)), // timeout if possible Some(StateId::SellerWaitingForRefundHeight), StateId::SellerWaitingForRefundHeight, // Expected state before timeput StateId::SellerWaitingForRefundHeight, // Expected state after timeout None, None, // Expected state before timeput None, // Expected state after timeout Some(message3.clone()), // Acceptable message Some(StateId::SellerWaitingForRefundHeight), Some(StateId::SellerWaitingForRefundHeight), ); seller.pops(); buyer.pops(); // --------------------------- buyer.pushs(); seller.pushs(); // Testing btc chain reset btc_nc.clean(); test_responds( &mut buyer, StateId::BuyerSendingInitRedeemMessage, Some(( lock_second_message_round_timelimit, btc_lock_time_limit + 600 * 5, )), // timeout if possible Some(StateId::BuyerWaitingForRefundTime), StateId::BuyerCancelled, // Expected state before timeput StateId::BuyerCancelled, // Expected state after timeout None, Some(StateId::BuyerCancelled), // Expected state before timeput Some(StateId::BuyerCancelled), // Expected state after timeout None, // Acceptable message None, None, ); test_responds( &mut seller, StateId::SellerWaitingForInitRedeemMessage, Some((lock_second_message_round_timelimit, -1)), // timeout if possible Some(StateId::SellerWaitingForRefundHeight), StateId::SellerWaitingForLockConfirmations, // Expected state before timeput StateId::SellerWaitingForRefundHeight, // Expected state after timeout None, None, // Expected state before timeput None, // Expected state after timeout Some(message3.clone()), // Acceptable message Some(StateId::SellerWaitingForLockConfirmations), Some(StateId::SellerWaitingForRefundHeight), ); seller.pops(); buyer.pops(); } // Normal case, message processing test_responds( &mut buyer, StateId::BuyerSendingInitRedeemMessage, Some(( lock_second_message_round_timelimit, btc_lock_time_limit + 600 * 5, )), // timeout if possible Some(StateId::BuyerWaitingForRefundTime), StateId::BuyerSendingInitRedeemMessage, // Expected state before timeput StateId::BuyerWaitingForRefundTime, // Expected state after timeout None, Some(StateId::BuyerWaitingForRespondRedeemMessage), // Expected state before timeput Some(StateId::BuyerWaitingForRefundTime), // Expected state after timeout None, // Acceptable message None, None, ); test_responds( &mut seller, StateId::SellerWaitingForInitRedeemMessage, Some((lock_second_message_round_timelimit, -1)), // timeout if possible Some(StateId::SellerWaitingForRefundHeight), StateId::SellerWaitingForInitRedeemMessage, // Expected state before timeout StateId::SellerWaitingForRefundHeight, // Expected state after timeout None, None, // Expected state before timeput None, // Expected state after timeout Some(message3.clone()), // Acceptable message Some(StateId::SellerSendingInitRedeemMessage), Some(StateId::SellerWaitingForBuyerToRedeemMwc), ); // Message is already known from steps above, it is message3. // Finishing execution let res = buyer.process(Input::Execute).unwrap(); assert_eq!( res.next_state_id, StateId::BuyerWaitingForRespondRedeemMessage ); assert_eq!(res.time_limit.unwrap(), lock_second_message_round_timelimit); assert_eq!( res.action.unwrap().get_id_str(), "BuyerWaitingForRedeemMessage" ); // Checking send message retry let res = buyer.process(Input::Check).unwrap(); assert_eq!( res.next_state_id, StateId::BuyerWaitingForRespondRedeemMessage ); swap::set_testing_cur_time(swap::get_cur_time() + 61 * 5); let res = buyer.process(Input::Check).unwrap(); assert_eq!(res.next_state_id, StateId::BuyerSendingInitRedeemMessage); buyer.swap.ack_msg2(); let res = buyer.process(Input::Check).unwrap(); assert_eq!( res.next_state_id, StateId::BuyerWaitingForRespondRedeemMessage ); assert_eq!( seller .process(Input::IncomeMessage(message1.clone())) .is_err(), true ); assert_eq!( seller .process(Input::IncomeMessage(message2.clone())) .is_err(), true ); let res = seller .process(Input::IncomeMessage(message3.clone())) .unwrap(); assert_eq!(res.next_state_id, StateId::SellerSendingInitRedeemMessage); assert_eq!(res.time_limit.unwrap(), lock_second_message_round_timelimit); let message4 = match res.action.unwrap() { Action::SellerSendRedeemMessage(m) => m, _ => panic!("Invalid action"), }; // Normal case, seller sends back message to buyers test_responds( &mut buyer, StateId::BuyerWaitingForRespondRedeemMessage, Some(( lock_second_message_round_timelimit, btc_lock_time_limit + 600 * 5, )), // timeout if possible Some(StateId::BuyerWaitingForRefundTime), StateId::BuyerWaitingForRespondRedeemMessage, // Expected state before timeput StateId::BuyerWaitingForRefundTime, // Expected state after timeout Some(( lock_second_message_round_timelimit + REDEEM_TIME, btc_lock_time_limit + 600 * 5, )), None, // Expected state before timeput None, // Expected state after timeout Some(message4.clone()), // Acceptable message Some(StateId::BuyerRedeemMwc), Some(StateId::BuyerWaitingForRefundTime), ); test_responds( &mut seller, StateId::SellerSendingInitRedeemMessage, Some((lock_second_message_round_timelimit, -1)), // timeout if possible None, StateId::SellerSendingInitRedeemMessage, // Expected state before timeput StateId::SellerWaitingForBuyerToRedeemMwc, // Expected state after timeout None, Some(StateId::SellerWaitingForBuyerToRedeemMwc), // Expected state before timeput Some(StateId::SellerWaitingForBuyerToRedeemMwc), // NOT CANCELLABLE by time, will check about the height None, // Acceptable message None, None, ); { // BRANCH - what happens if chan will loose it's data // Checking if Buyer sneaky, reporting that message was never received. But instead it goes with redeem process. buyer.pushs(); seller.pushs(); // Testing mwc chain reset let nc_state = nc.get_state(); assert_eq!(seller.swap.state, StateId::SellerSendingInitRedeemMessage); assert_eq!(seller.swap.message2.is_some(), false); assert_eq!(seller.swap.posted_msg2.is_none(), true); // Buyer is getting the messege but never respond back. let res = buyer .process(Input::IncomeMessage(message4.clone())) .unwrap(); assert_eq!(res.next_state_id, StateId::BuyerRedeemMwc); // Still nothing happens, seller still sending the message let res = seller.process(Input::Check).unwrap(); assert_eq!(res.next_state_id, StateId::SellerSendingInitRedeemMessage); // Buyer posting MWC slate let res = buyer.process(Input::Execute).unwrap(); assert_eq!( res.next_state_id, StateId::BuyerWaitForRedeemMwcConfirmations ); // Still nothing happens, seller still sending the message let res = seller.process(Input::Check).unwrap(); assert_eq!(res.next_state_id, StateId::SellerSendingInitRedeemMessage); // The block is mined, so the secret can be revealed nc.mine_block(); // Now seller should detect the fact that MWC are redeemed, the secret is revealed, so the message does delivered assert_eq!(seller.swap.posted_msg2.is_none(), true); let res = seller.process(Input::Check).unwrap(); assert_eq!(res.next_state_id, StateId::SellerRedeemSecondaryCurrency); assert_eq!(seller.swap.posted_msg2.is_none(), false); nc.set_state(&nc_state); seller.pops(); buyer.pops(); } { // BRANCH - what happens if chan will loose it's data // Loss any of them should switch to cancellation buyer.pushs(); seller.pushs(); // Testing mwc chain reset let nc_state = nc.get_state(); nc.clean(); test_responds( &mut buyer, StateId::BuyerWaitingForRespondRedeemMessage, Some(( lock_second_message_round_timelimit, btc_lock_time_limit + 600 * 5, )), // timeout if possible Some(StateId::BuyerWaitingForRefundTime), StateId::BuyerWaitingForLockConfirmations, // Expected state before timeput StateId::BuyerWaitingForRefundTime, // Expected state after timeout None, None, // Expected state before timeput None, // Expected state after timeout Some(message4.clone()), // Acceptable message Some(StateId::BuyerWaitingForLockConfirmations), Some(StateId::BuyerWaitingForRefundTime), ); test_responds( &mut seller, StateId::SellerSendingInitRedeemMessage, Some((lock_second_message_round_timelimit, -1)), // timeout if possible None, StateId::SellerWaitingForRefundHeight, // Expected state before timeput StateId::SellerWaitingForRefundHeight, // Expected state after timeout None, Some(StateId::SellerWaitingForBuyerToRedeemMwc), // Expected state before timeput Some(StateId::SellerWaitingForBuyerToRedeemMwc), // NOT CANCELLABLE by time, will check about the height None, // Acceptable message None, None, ); let cur_time = swap::get_cur_time(); swap::set_testing_cur_time(START_TIME + MSG_EXCHANGE_TIME); // Checking if glitch will be recoverable... let res = buyer.process(Input::Check).unwrap(); assert_eq!(res.next_state_id, StateId::BuyerWaitingForLockConfirmations); let res = seller.process(Input::Check).unwrap(); assert_eq!(res.next_state_id, StateId::SellerPostingLockMwcSlate); nc.set_state(&nc_state); let res = buyer.process(Input::Check).unwrap(); assert_eq!( res.next_state_id, StateId::BuyerWaitingForRespondRedeemMessage ); let res = seller.process(Input::Check).unwrap(); assert_eq!(res.next_state_id, StateId::SellerSendingInitRedeemMessage); swap::set_testing_cur_time(cur_time); seller.pops(); buyer.pops(); // --------------------------- buyer.pushs(); seller.pushs(); // Testing btc chain reset let btc_state = btc_nc.get_state(); btc_nc.clean(); // Expected to fail because it is too late to deposit more let tlim = buyer.swap.get_time_mwc_redeem(); test_responds( &mut buyer, StateId::BuyerWaitingForRespondRedeemMessage, Some((lock_second_message_round_timelimit, tlim)), // timeout if possible Some(StateId::BuyerWaitingForRefundTime), StateId::BuyerCancelled, // Expected state before timeput StateId::BuyerCancelled, // Expected state after timeout None, None, // Expected state before timeput None, // Expected state after timeout Some(message4.clone()), // Acceptable message Some(StateId::BuyerCancelled), Some(StateId::BuyerCancelled), ); btc_nc.set_state(&btc_state); btc_nc.state.lock().height -= 4; test_responds( &mut buyer, StateId::BuyerWaitingForRespondRedeemMessage, Some(( lock_second_message_round_timelimit, btc_lock_time_limit + 600 * 5, )), // timeout if possible Some(StateId::BuyerWaitingForRefundTime), StateId::BuyerWaitingForLockConfirmations, // Expected state before timeput StateId::BuyerWaitingForRefundTime, // Expected state after timeout Some(( lock_second_message_round_timelimit, btc_lock_time_limit + 600 * 5, )), None, // Expected state before timeput None, // Expected state after timeout Some(message4.clone()), // Acceptable message Some(StateId::BuyerWaitingForLockConfirmations), Some(StateId::BuyerWaitingForRefundTime), ); test_responds( &mut seller, StateId::SellerSendingInitRedeemMessage, Some((lock_second_message_round_timelimit, -1)), // timeout if possible None, StateId::SellerWaitingForLockConfirmations, // Expected state before timeput StateId::SellerWaitingForRefundHeight, // Expected state after timeout None, Some(StateId::SellerWaitingForBuyerToRedeemMwc), // Expected state before timeput Some(StateId::SellerWaitingForBuyerToRedeemMwc), // NOT CANCELLABLE by time, will check about the height None, // Acceptable message None, None, ); seller.pops(); buyer.pops(); } // processing message let res = seller.process(Input::Execute).unwrap(); assert_eq!(res.next_state_id, StateId::SellerWaitingForBuyerToRedeemMwc); assert_eq!( res.time_limit.unwrap(), swap::get_cur_time() + (seller .swap .refund_slate .lock_height .saturating_sub(nc.state.lock().height) * 60) as i64 ); // Time is related to refund, not to a real time... assert_eq!( res.action.unwrap().get_id_str(), "SellerWaitForBuyerRedeemPublish" ); // Check if send message retyr does work as expected let res = seller.process(Input::Check).unwrap(); assert_eq!(res.next_state_id, StateId::SellerWaitingForBuyerToRedeemMwc); swap::set_testing_cur_time(swap::get_cur_time() + 61 * 5); let res = seller.process(Input::Check).unwrap(); assert_eq!(res.next_state_id, StateId::SellerSendingInitRedeemMessage); seller.swap.ack_msg2(); let res = seller.process(Input::Check).unwrap(); assert_eq!(res.next_state_id, StateId::SellerWaitingForBuyerToRedeemMwc); assert_eq!( buyer .process(Input::IncomeMessage(message1.clone())) .is_err(), true ); assert_eq!( buyer .process(Input::IncomeMessage(message2.clone())) .is_err(), true ); assert_eq!( buyer .process(Input::IncomeMessage(message3.clone())) .is_err(), true ); let res = buyer .process(Input::IncomeMessage(message4.clone())) .unwrap(); assert_eq!(res.next_state_id, StateId::BuyerRedeemMwc); assert_eq!( res.time_limit.unwrap(), lock_second_message_round_timelimit + REDEEM_TIME ); assert_eq!(res.action.unwrap().get_id_str(), "BuyerPublishMwcRedeemTx"); // Double processing should be fine assert_eq!(buyer.swap.state, StateId::BuyerRedeemMwc); let res = buyer .process(Input::IncomeMessage(message4.clone())) .unwrap(); assert_eq!(res.next_state_id, StateId::BuyerRedeemMwc); test_responds( &mut buyer, StateId::BuyerRedeemMwc, Some(( lock_second_message_round_timelimit + REDEEM_TIME, btc_lock_time_limit + 600 * 5, )), // timeout if possible Some(StateId::BuyerWaitingForRefundTime), StateId::BuyerRedeemMwc, // Expected state before timeput StateId::BuyerWaitingForRefundTime, // Expected state after timeout None, Some(StateId::BuyerWaitForRedeemMwcConfirmations), // Expected state before timeput Some(StateId::BuyerWaitingForRefundTime), // Expected state after timeout None, None, None, ); test_responds( &mut seller, StateId::SellerWaitingForBuyerToRedeemMwc, Some((lock_second_message_round_timelimit + REDEEM_TIME, -1)), // timeout if possible None, // Non cancellable StateId::SellerWaitingForBuyerToRedeemMwc, // Expected state before timeput StateId::SellerWaitingForBuyerToRedeemMwc, // Expected state after timeout None, None, // Expected state before timeput None, // NOT CANCELLABLE by time, will check about the height None, // Acceptable message None, None, ); { // BRANCH - testing how seller can defend an attack. In worst case Buyer can manipulate with // seller refund and buyer redeem transaction. Only one of them can be active, so we are chekcing if byer can switch from one to another buyer.pushs(); seller.pushs(); let lock_height = seller.swap.refund_slate.lock_height; let need_blocks = lock_height - nc.state.lock().height - 1; nc.mine_blocks(need_blocks); seller.pushs(); // Close to the lock, still waiting for buyer to publish test_responds( &mut seller, StateId::SellerWaitingForBuyerToRedeemMwc, Some((lock_second_message_round_timelimit + REDEEM_TIME, -1)), // timeout if possible None, // Non cancellable StateId::SellerWaitingForBuyerToRedeemMwc, // Expected state before timeput StateId::SellerWaitingForBuyerToRedeemMwc, // Expected state after timeout None, None, // Expected state before timeput None, // NOT CANCELLABLE by time, will check about the height None, // Acceptable message None, None, ); nc.mine_blocks(2); // can redeem, switching test_responds( &mut seller, StateId::SellerWaitingForBuyerToRedeemMwc, Some((lock_second_message_round_timelimit + REDEEM_TIME, -1)), // timeout if possible None, // Non cancellable StateId::SellerPostingRefundSlate, // Expected state before timeput StateId::SellerPostingRefundSlate, // Expected state after timeout None, None, // Expected state before timeput None, // NOT CANCELLABLE by time, will check about the height None, // Acceptable message None, None, ); seller.pops(); // Let's do many switches with reorgs. will see what happens nc.mine_blocks(2); let nc_state_ready = nc.get_state(); let btc_state_ready = btc_nc.get_state(); { buyer.pushs(); // ---------------------------------------------------- // Try scenario: Buyer does redeem. It rolled back, so it will retry. // Seller does nothing let res = buyer.process(Input::Check).unwrap(); assert_eq!(res.next_state_id, StateId::BuyerRedeemMwc); let res = buyer.process(Input::Execute).unwrap(); assert_eq!( res.next_state_id, StateId::BuyerWaitForRedeemMwcConfirmations ); let res = buyer.process(Input::Check).unwrap(); assert_eq!( res.next_state_id, StateId::BuyerWaitForRedeemMwcConfirmations ); // Check retry at the same block... swap::set_testing_cur_time(swap::get_cur_time() + 60 * 6); let res = buyer.process(Input::Check).unwrap(); assert_eq!(res.next_state_id, StateId::BuyerRedeemMwc); assert_eq!(buyer.process(Input::Execute).is_err(), true); // For test node, repost doesn't work by some reasons // We should be good now nc.mine_block(); let res = buyer.process(Input::Check).unwrap(); assert_eq!( res.next_state_id, StateId::BuyerWaitForRedeemMwcConfirmations ); swap::set_testing_cur_time(swap::get_cur_time() + 60 * 6); let res = buyer.process(Input::Check).unwrap(); assert_eq!( res.next_state_id, StateId::BuyerWaitForRedeemMwcConfirmations ); let state_with_redeem = nc.get_state(); // Do roll back nc.set_state(&nc_state_ready); let res = buyer.process(Input::Check).unwrap(); assert_eq!(res.next_state_id, StateId::BuyerRedeemMwc); // Switch to exist data nc.set_state(&state_with_redeem); let res = buyer.process(Input::Check).unwrap(); assert_eq!( res.next_state_id, StateId::BuyerWaitForRedeemMwcConfirmations ); // Do roll back & publish nc.set_state(&nc_state_ready); let res = buyer.process(Input::Check).unwrap(); assert_eq!(res.next_state_id, StateId::BuyerRedeemMwc); let res = buyer.process(Input::Execute).unwrap(); assert_eq!( res.next_state_id, StateId::BuyerWaitForRedeemMwcConfirmations ); nc.mine_block(); swap::set_testing_cur_time(swap::get_cur_time() + 60 * 6); let res = buyer.process(Input::Check).unwrap(); assert_eq!( res.next_state_id, StateId::BuyerWaitForRedeemMwcConfirmations ); nc.mine_block(); swap::set_testing_cur_time(swap::get_cur_time() + 60 * 6); let res = buyer.process(Input::Check).unwrap(); assert_eq!( res.next_state_id, StateId::BuyerWaitForRedeemMwcConfirmations ); nc.mine_block(); swap::set_testing_cur_time(swap::get_cur_time() + 60 * 6); let res = buyer.process(Input::Check).unwrap(); assert_eq!( res.next_state_id, StateId::BuyerWaitForRedeemMwcConfirmations ); buyer.pops(); } { seller.pushs(); // ---------------------------------------------------- // Try scenario: Do Refund with reties. Buyer does nothing. let res = seller.process(Input::Check).unwrap(); assert_eq!(res.next_state_id, StateId::SellerPostingRefundSlate); assert_eq!(res.action.unwrap().get_id_str(), "SellerPublishMwcRefundTx"); assert_eq!(res.time_limit.is_none(), true); let res = seller.process(Input::Execute).unwrap(); assert_eq!( res.next_state_id, StateId::SellerWaitingForRefundConfirmations ); assert_eq!(res.action.unwrap().get_id_str(), "WaitForMwcConfirmations"); assert_eq!(res.time_limit.is_none(), true); // Let's do retry cycle. for the post // Still waiting, no retry let res = seller.process(Input::Check).unwrap(); assert_eq!( res.next_state_id, StateId::SellerWaitingForRefundConfirmations ); match res.action.unwrap() { Action::WaitForMwcConfirmations { name: _, required, actual, } => { assert_eq!(required, MWC_CONFIRMATION); assert_eq!(actual, 0); } _ => panic!("Invalid action"), }; assert_eq!(res.time_limit.is_none(), true); // Retry should be triggered. swap::set_testing_cur_time(swap::get_cur_time() + 6 * 60); let res = seller.process(Input::Check).unwrap(); assert_eq!(res.next_state_id, StateId::SellerPostingRefundSlate); nc.mine_blocks(2); // Now transaction is visible, we don't need to repost any more. Let's check how we handle that. let res = seller.process(Input::Check).unwrap(); assert_eq!( res.next_state_id, StateId::SellerWaitingForRefundConfirmations ); match res.action.unwrap() { Action::WaitForMwcConfirmations { name: _, required, actual, } => { assert_eq!(required, MWC_CONFIRMATION); assert_eq!(actual, 2); } _ => panic!("Invalid action"), }; //let nc_state_refund = nc.get_state(); // Let's simulate the reog nc.set_state(&nc_state_ready); let res = seller.process(Input::Check).unwrap(); assert_eq!(res.next_state_id, StateId::SellerPostingRefundSlate); seller.pops(); } // ----------------------------------- // Scenario where Buyer posting redeem transaction. // Then rewind it. // Seller should try to get both BTC and MWC refund { buyer.pushs(); seller.pushs(); let res = buyer.process(Input::Execute).unwrap(); assert_eq!( res.next_state_id, StateId::BuyerWaitForRedeemMwcConfirmations ); assert_eq!(res.action.unwrap().get_id_str(), "WaitForMwcConfirmations"); assert_eq!(res.time_limit.is_none(), true); // Seller doesn't see the transaction yet let res = seller.process(Input::Check).unwrap(); assert_eq!(res.next_state_id, StateId::SellerPostingRefundSlate); nc.mine_blocks(1); let res = seller.process(Input::Check).unwrap(); assert_eq!(res.next_state_id, StateId::SellerRedeemSecondaryCurrency); // Let's mwc chain to loos all data, it shoudn't affect anything at that stage nc.set_state(&nc_state_ready); assert_eq!( nc.get_kernel( &seller.swap.refund_slate.tx.body.kernels[0].excess, None, None ) .unwrap() .is_some(), false ); let res = seller.process(Input::Check).unwrap(); assert_eq!(res.next_state_id, StateId::SellerRedeemSecondaryCurrency); // Check if refund was posted... nc.mine_block(); assert_eq!( nc.get_kernel( &seller.swap.refund_slate.tx.body.kernels[0].excess, None, None ) .unwrap() .is_some(), true ); // Clear and retry if posted nc.set_state(&nc_state_ready); assert_eq!( nc.get_kernel( &seller.swap.refund_slate.tx.body.kernels[0].excess, None, None ) .unwrap() .is_some(), false ); let res = seller.process(Input::Execute).unwrap(); assert_eq!( res.next_state_id, StateId::SellerWaitingForRedeemConfirmations ); nc.mine_block(); assert_eq!( nc.get_kernel( &seller.swap.refund_slate.tx.body.kernels[0].excess, None, None ) .unwrap() .is_some(), true ); nc.set_state(&nc_state_ready); assert_eq!( nc.get_kernel( &seller.swap.refund_slate.tx.body.kernels[0].excess, None, None ) .unwrap() .is_some(), false ); let res = seller.process(Input::Check).unwrap(); assert_eq!( res.next_state_id, StateId::SellerWaitingForRedeemConfirmations ); nc.mine_block(); assert_eq!( nc.get_kernel( &seller.swap.refund_slate.tx.body.kernels[0].excess, None, None ) .unwrap() .is_some(), true ); btc_nc.mine_blocks(BTC_CONFIRMATION + 1); nc.set_state(&nc_state_ready); assert_eq!( nc.get_kernel( &seller.swap.refund_slate.tx.body.kernels[0].excess, None, None ) .unwrap() .is_some(), false ); let res = seller.process(Input::Check).unwrap(); assert_eq!(res.next_state_id, StateId::SellerSwapComplete); nc.mine_block(); assert_eq!( nc.get_kernel( &seller.swap.refund_slate.tx.body.kernels[0].excess, None, None ) .unwrap() .is_some(), true ); // At complete step - there is no more retrys nc.set_state(&nc_state_ready); assert_eq!( nc.get_kernel( &seller.swap.refund_slate.tx.body.kernels[0].excess, None, None ) .unwrap() .is_some(), false ); let res = seller.process(Input::Check).unwrap(); assert_eq!(res.next_state_id, StateId::SellerSwapComplete); nc.mine_block(); assert_eq!( nc.get_kernel( &seller.swap.refund_slate.tx.body.kernels[0].excess, None, None ) .unwrap() .is_some(), false ); seller.pops(); buyer.pops(); } { // Scenario. Seller publishing Refund, rollback and Buyer publishing redeem. // Rollback and Seller publishing Refund and does redeem. // Rollback and Buyer publishing redeem. // Both party need to finish the deal as expected. buyer.pushs(); seller.pushs(); let res = seller.process(Input::Check).unwrap(); assert_eq!(res.next_state_id, StateId::SellerPostingRefundSlate); let res = seller.process(Input::Execute).unwrap(); assert_eq!( res.next_state_id, StateId::SellerWaitingForRefundConfirmations ); nc.mine_block(); let res = seller.process(Input::Check).unwrap(); assert_eq!( res.next_state_id, StateId::SellerWaitingForRefundConfirmations ); let res = buyer.process(Input::Check).unwrap(); assert_eq!(res.next_state_id, StateId::BuyerRedeemMwc); // Check if Buyer can't publish tx assert_eq!(buyer.process(Input::Execute).is_err(), true); // let's rollback. So buyer can publish... // Validate seller retry logic first... let res = seller.process(Input::Check).unwrap(); assert_eq!( res.next_state_id, StateId::SellerWaitingForRefundConfirmations ); swap::set_testing_cur_time(swap::get_cur_time() + 60 * 6); // Interruption at SellerPostingRefundSlate { buyer.pushs(); seller.pushs(); let time = swap::get_cur_time(); nc.set_state(&nc_state_ready); nc.mine_blocks(2); let res = seller.process(Input::Check).unwrap(); assert_eq!(res.next_state_id, StateId::SellerPostingRefundSlate); let res = buyer.process(Input::Execute).unwrap(); assert_eq!( res.next_state_id, StateId::BuyerWaitForRedeemMwcConfirmations ); nc.mine_block(); let res = seller.process(Input::Check).unwrap(); assert_eq!(res.next_state_id, StateId::SellerRedeemSecondaryCurrency); let res = seller.process(Input::Execute).unwrap(); assert_eq!( res.next_state_id, StateId::SellerWaitingForRedeemConfirmations ); // Checking seller retry logic for Secondary redeem let res = seller.process(Input::Check).unwrap(); assert_eq!( res.next_state_id, StateId::SellerWaitingForRedeemConfirmations ); // reset data let btc_state_posted = btc_nc.get_state(); btc_nc.set_state(&btc_state_ready); let res = seller.process(Input::Check).unwrap(); assert_eq!( res.next_state_id, StateId::SellerWaitingForRedeemConfirmations ); // timeout is over, shold switch to post state swap::set_testing_cur_time(swap::get_cur_time() + 60 * 6); let res = seller.process(Input::Check).unwrap(); assert_eq!(res.next_state_id, StateId::SellerRedeemSecondaryCurrency); // let's recover the network. btc_nc.set_state(&btc_state_posted); let res = seller.process(Input::Check).unwrap(); assert_eq!( res.next_state_id, StateId::SellerWaitingForRedeemConfirmations ); btc_nc.mine_block(); let res = buyer.process(Input::Check).unwrap(); assert_eq!( res.next_state_id, StateId::BuyerWaitForRedeemMwcConfirmations ); swap::set_testing_cur_time(time); seller.pops(); buyer.pops(); } swap::set_testing_cur_time(swap::get_cur_time() - 60 * 6); nc.set_state(&nc_state_ready); nc.mine_blocks(2); // Interruption at SellerWaitingForRefundConfirmations and continue let res = seller.process(Input::Check).unwrap(); assert_eq!( res.next_state_id, StateId::SellerWaitingForRefundConfirmations ); let res = buyer.process(Input::Execute).unwrap(); assert_eq!( res.next_state_id, StateId::BuyerWaitForRedeemMwcConfirmations ); nc.mine_block(); let res = seller.process(Input::Check).unwrap(); assert_eq!(res.next_state_id, StateId::SellerRedeemSecondaryCurrency); let res = seller.process(Input::Execute).unwrap(); assert_eq!( res.next_state_id, StateId::SellerWaitingForRedeemConfirmations ); let res = buyer.process(Input::Check).unwrap(); assert_eq!( res.next_state_id, StateId::BuyerWaitForRedeemMwcConfirmations ); // Another rollback. Now Seller can redeem the BTC nc.set_state(&nc_state_ready); btc_nc.set_state(&btc_state_ready); nc.mine_blocks(2); // Checking if redeem timeout works assert_eq!( nc.get_kernel( &seller.swap.refund_slate.tx.body.kernels[0].excess, None, None ) .unwrap() .is_some(), false ); let res = seller.process(Input::Check).unwrap(); assert_eq!( res.next_state_id, StateId::SellerWaitingForRedeemConfirmations ); swap::set_testing_cur_time(swap::get_cur_time() + 60 * 6); let res = buyer.process(Input::Check).unwrap(); assert_eq!(res.next_state_id, StateId::BuyerRedeemMwc); assert_eq!( nc.get_kernel( &seller.swap.refund_slate.tx.body.kernels[0].excess, None, None ) .unwrap() .is_some(), false ); let res = seller.process(Input::Check).unwrap(); assert_eq!(res.next_state_id, StateId::SellerRedeemSecondaryCurrency); let res = seller.process(Input::Execute).unwrap(); assert_eq!( res.next_state_id, StateId::SellerWaitingForRedeemConfirmations ); // Checking that seller can redeem both BTC and MWC nc.mine_blocks(1); assert_eq!( nc.get_kernel( &seller.swap.refund_slate.tx.body.kernels[0].excess, None, None ) .unwrap() .is_some(), true ); // Attacker Buyer lost everything. His fault, seller was able to protect himself. let res = buyer.process(Input::Check).unwrap(); assert_eq!(res.next_state_id, StateId::BuyerRedeemMwc); assert_eq!(buyer.process(Input::Execute).is_err(), true); // Another rollback. Now Buyer redeem MWC, seller continue to redeem BTC. nc.set_state(&nc_state_ready); nc.mine_blocks(2); let res = buyer.process(Input::Check).unwrap(); assert_eq!(res.next_state_id, StateId::BuyerRedeemMwc); let res = buyer.process(Input::Execute).unwrap(); assert_eq!( res.next_state_id, StateId::BuyerWaitForRedeemMwcConfirmations ); nc.mine_block(); btc_nc.mine_block(); let res = seller.process(Input::Check).unwrap(); assert_eq!( res.next_state_id, StateId::SellerWaitingForRedeemConfirmations ); let res = buyer.process(Input::Check).unwrap(); assert_eq!( res.next_state_id, StateId::BuyerWaitForRedeemMwcConfirmations ); seller.pops(); buyer.pops(); } seller.pops(); buyer.pops(); // END of branch } // Now let's finish with happy path // At this point Buyer Can reed, seller is waiting for this moment let res = seller.process(Input::Check).unwrap(); assert_eq!(res.next_state_id, StateId::SellerWaitingForBuyerToRedeemMwc); let res = buyer.process(Input::Check).unwrap(); assert_eq!(res.next_state_id, StateId::BuyerRedeemMwc); // Let's buyer to redeem let res = buyer.process(Input::Execute).unwrap(); assert_eq!( res.next_state_id, StateId::BuyerWaitForRedeemMwcConfirmations ); assert_eq!(res.time_limit.is_none(), true); assert_eq!(res.action.unwrap().get_id_str(), "WaitForMwcConfirmations"); // Double processing should be fine assert_eq!( buyer.swap.state, StateId::BuyerWaitForRedeemMwcConfirmations ); let res = buyer .process(Input::IncomeMessage(message4.clone())) .unwrap(); assert_eq!( res.next_state_id, StateId::BuyerWaitForRedeemMwcConfirmations ); let res = seller.process(Input::Check).unwrap(); assert_eq!(res.next_state_id, StateId::SellerWaitingForBuyerToRedeemMwc); // Seller doesn't see the transaction yet // !!!! Here cancellation branch is not tested because it depend on chain height. That was test above test_responds( &mut seller, StateId::SellerWaitingForBuyerToRedeemMwc, None, // timeout if possible None, // Non cancellable StateId::SellerWaitingForBuyerToRedeemMwc, // Expected state before timeput StateId::SellerWaitingForBuyerToRedeemMwc, // Expected state after timeout None, None, // Expected state before timeput None, // NOT CANCELLABLE by time, will check about the height None, // Acceptable message None, None, ); nc.mine_block(); test_responds( &mut buyer, StateId::BuyerWaitForRedeemMwcConfirmations, None, // timeout if possible None, StateId::BuyerWaitForRedeemMwcConfirmations, // Expected state before timeput StateId::BuyerWaitForRedeemMwcConfirmations, // Expected state after timeout None, None, // Expected state before timeput None, // Expected state after timeout None, None, None, ); // Seller does see the transaction from buyer test_responds( &mut seller, StateId::SellerWaitingForBuyerToRedeemMwc, None, // timeout if possible None, // Non cancellable StateId::SellerRedeemSecondaryCurrency, // Expected state before timeput StateId::SellerRedeemSecondaryCurrency, // Expected state after timeout None, None, // Expected state before timeput None, // NOT CANCELLABLE by time, will check about the height None, // Acceptable message None, None, ); let res = seller.process(Input::Check).unwrap(); assert_eq!(res.next_state_id, StateId::SellerRedeemSecondaryCurrency); test_responds( &mut seller, StateId::SellerRedeemSecondaryCurrency, None, // timeout if possible None, // Non cancellable StateId::SellerRedeemSecondaryCurrency, // Expected state before timeput StateId::SellerRedeemSecondaryCurrency, // Expected state after timeout None, Some(StateId::SellerWaitingForRedeemConfirmations), // Expected state before timeput Some(StateId::SellerWaitingForRedeemConfirmations), // NOT CANCELLABLE by time, will check about the height None, // Acceptable message None, None, ); let res = seller.process(Input::Execute).unwrap(); assert_eq!( res.next_state_id, StateId::SellerWaitingForRedeemConfirmations ); assert_eq!( res.action.unwrap().get_id_str(), "WaitForSecondaryConfirmations" ); assert_eq!(res.time_limit.is_none(), true); { // BRANCH - check if seller can resubmit the Secondary transaction // Checking if resubmit works seller.pushs(); let cur_time = swap::get_cur_time(); let res = seller.process(Input::Check).unwrap(); assert_eq!( res.next_state_id, StateId::SellerWaitingForRedeemConfirmations ); swap::set_testing_cur_time(cur_time * 61 * 5); let res = seller.process(Input::Check).unwrap(); assert_eq!( res.next_state_id, StateId::SellerWaitingForRedeemConfirmations ); // Changing fees, expecting to switch back to the posting seller.swap.secondary_fee = 12.0; let res = seller.process(Input::Check).unwrap(); assert_eq!(res.next_state_id, StateId::SellerRedeemSecondaryCurrency); swap::set_testing_cur_time(cur_time); seller.pops(); } // Bith party waiting for confirmations nc.mine_blocks(MWC_CONFIRMATION / 2); btc_nc.mine_blocks(BTC_CONFIRMATION / 2); test_responds( &mut buyer, StateId::BuyerWaitForRedeemMwcConfirmations, None, // timeout if possible None, StateId::BuyerWaitForRedeemMwcConfirmations, // Expected state before timeput StateId::BuyerWaitForRedeemMwcConfirmations, // Expected state after timeout None, None, // Expected state before timeput None, // Expected state after timeout None, None, None, ); test_responds( &mut seller, StateId::SellerWaitingForRedeemConfirmations, None, // timeout if possible None, // Non cancellable StateId::SellerWaitingForRedeemConfirmations, // Expected state before timeput StateId::SellerWaitingForRedeemConfirmations, // Expected state after timeout None, None, // Expected state before timeput None, // NOT CANCELLABLE by time, will check about the height None, // Acceptable message None, None, ); { // BRANCH - check if seller unable to resubmit the Secondary transaction. It is already mined // Checking if resubmit works seller.pushs(); let cur_time = swap::get_cur_time(); let res = seller.process(Input::Check).unwrap(); assert_eq!( res.next_state_id, StateId::SellerWaitingForRedeemConfirmations ); swap::set_testing_cur_time(cur_time * 61 * 5); let res = seller.process(Input::Check).unwrap(); assert_eq!( res.next_state_id, StateId::SellerWaitingForRedeemConfirmations ); // Changing fees, because Tx is already mined, nothing should happen seller.swap.secondary_fee = 12.0; let res = seller.process(Input::Check).unwrap(); assert_eq!( res.next_state_id, StateId::SellerWaitingForRedeemConfirmations ); swap::set_testing_cur_time(cur_time); seller.pops(); } // Mine more, and all must be happy now nc.mine_blocks(MWC_CONFIRMATION / 2 + 1); btc_nc.mine_blocks(BTC_CONFIRMATION / 2 + 1); test_responds( &mut buyer, StateId::BuyerWaitForRedeemMwcConfirmations, None, // timeout if possible None, StateId::BuyerSwapComplete, // Expected state before timeput StateId::BuyerSwapComplete, // Expected state after timeout None, None, None, None, None, None, ); test_responds( &mut seller, StateId::SellerWaitingForRedeemConfirmations, None, // timeout if possible None, // Non cancellable StateId::SellerSwapComplete, // Expected state before timeput StateId::SellerSwapComplete, // Expected state after timeout None, None, None, None, // Acceptable message None, None, ); // Final step let res = seller.process(Input::Check).unwrap(); assert_eq!(res.next_state_id, StateId::SellerSwapComplete); assert_eq!(res.action.is_none(), true); assert_eq!(res.time_limit.is_none(), true); let res = buyer.process(Input::Check).unwrap(); assert_eq!(res.next_state_id, StateId::BuyerSwapComplete); assert_eq!(res.action.is_none(), true); assert_eq!(res.time_limit.is_none(), true); let res = seller.process(Input::Check).unwrap(); assert_eq!(res.next_state_id, StateId::SellerSwapComplete); let res = buyer.process(Input::Check).unwrap(); assert_eq!(res.next_state_id, StateId::BuyerSwapComplete); test_responds( &mut buyer, StateId::BuyerSwapComplete, None, // timeout if possible None, StateId::BuyerSwapComplete, // Expected state before timeput StateId::BuyerSwapComplete, // Expected state after timeout None, None, None, None, None, None, ); test_responds( &mut seller, StateId::SellerSwapComplete, None, // timeout if possible None, // Non cancellable StateId::SellerSwapComplete, // Expected state before timeput StateId::SellerSwapComplete, // Expected state after timeout None, None, None, None, // Acceptable message None, None, ); } }
30.232808
150
0.674514
3abe7626efc6ef922a277dcec9c17df2664e8090
35,571
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use ast::{Block, Crate, Ident, Mac_, PatKind}; use ast::{Name, MacStmtStyle, StmtKind, ItemKind}; use ast; use ext::hygiene::Mark; use ext::placeholders::{placeholder, PlaceholderExpander}; use attr::{self, HasAttrs}; use codemap::{ExpnInfo, NameAndSpan, MacroBang, MacroAttribute}; use syntax_pos::{self, Span, ExpnId}; use config::{is_test_or_bench, StripUnconfigured}; use ext::base::*; use feature_gate::{self, Features}; use fold; use fold::*; use parse::{ParseSess, PResult, lexer}; use parse::parser::Parser; use parse::token::{self, intern, keywords}; use print::pprust; use ptr::P; use tokenstream::{TokenTree, TokenStream}; use util::small_vector::SmallVector; use visit::Visitor; use std::mem; use std::path::PathBuf; use std::rc::Rc; macro_rules! expansions { ($($kind:ident: $ty:ty [$($vec:ident, $ty_elt:ty)*], $kind_name:expr, .$make:ident, $(.$fold:ident)* $(lift .$fold_elt:ident)*, $(.$visit:ident)* $(lift .$visit_elt:ident)*;)*) => { #[derive(Copy, Clone, PartialEq, Eq)] pub enum ExpansionKind { OptExpr, $( $kind, )* } pub enum Expansion { OptExpr(Option<P<ast::Expr>>), $( $kind($ty), )* } impl ExpansionKind { pub fn name(self) -> &'static str { match self { ExpansionKind::OptExpr => "expression", $( ExpansionKind::$kind => $kind_name, )* } } fn make_from<'a>(self, result: Box<MacResult + 'a>) -> Option<Expansion> { match self { ExpansionKind::OptExpr => result.make_expr().map(Some).map(Expansion::OptExpr), $( ExpansionKind::$kind => result.$make().map(Expansion::$kind), )* } } } impl Expansion { pub fn make_opt_expr(self) -> Option<P<ast::Expr>> { match self { Expansion::OptExpr(expr) => expr, _ => panic!("Expansion::make_* called on the wrong kind of expansion"), } } $( pub fn $make(self) -> $ty { match self { Expansion::$kind(ast) => ast, _ => panic!("Expansion::make_* called on the wrong kind of expansion"), } } )* pub fn fold_with<F: Folder>(self, folder: &mut F) -> Self { use self::Expansion::*; match self { OptExpr(expr) => OptExpr(expr.and_then(|expr| folder.fold_opt_expr(expr))), $($( $kind(ast) => $kind(folder.$fold(ast)), )*)* $($( $kind(ast) => { $kind(ast.into_iter().flat_map(|ast| folder.$fold_elt(ast)).collect()) }, )*)* } } pub fn visit_with<V: Visitor>(&self, visitor: &mut V) { match *self { Expansion::OptExpr(Some(ref expr)) => visitor.visit_expr(expr), Expansion::OptExpr(None) => {} $($( Expansion::$kind(ref ast) => visitor.$visit(ast), )*)* $($( Expansion::$kind(ref ast) => for ast in ast.as_slice() { visitor.$visit_elt(ast); }, )*)* } } } impl<'a, 'b> Folder for MacroExpander<'a, 'b> { fn fold_opt_expr(&mut self, expr: P<ast::Expr>) -> Option<P<ast::Expr>> { self.expand(Expansion::OptExpr(Some(expr))).make_opt_expr() } $($(fn $fold(&mut self, node: $ty) -> $ty { self.expand(Expansion::$kind(node)).$make() })*)* $($(fn $fold_elt(&mut self, node: $ty_elt) -> $ty { self.expand(Expansion::$kind(SmallVector::one(node))).$make() })*)* } impl<'a> MacResult for ::ext::tt::macro_rules::ParserAnyMacro<'a> { $(fn $make(self: Box<::ext::tt::macro_rules::ParserAnyMacro<'a>>) -> Option<$ty> { Some(self.make(ExpansionKind::$kind).$make()) })* } } } expansions! { Expr: P<ast::Expr> [], "expression", .make_expr, .fold_expr, .visit_expr; Pat: P<ast::Pat> [], "pattern", .make_pat, .fold_pat, .visit_pat; Ty: P<ast::Ty> [], "type", .make_ty, .fold_ty, .visit_ty; Stmts: SmallVector<ast::Stmt> [SmallVector, ast::Stmt], "statement", .make_stmts, lift .fold_stmt, lift .visit_stmt; Items: SmallVector<P<ast::Item>> [SmallVector, P<ast::Item>], "item", .make_items, lift .fold_item, lift .visit_item; TraitItems: SmallVector<ast::TraitItem> [SmallVector, ast::TraitItem], "trait item", .make_trait_items, lift .fold_trait_item, lift .visit_trait_item; ImplItems: SmallVector<ast::ImplItem> [SmallVector, ast::ImplItem], "impl item", .make_impl_items, lift .fold_impl_item, lift .visit_impl_item; } impl ExpansionKind { fn dummy(self, span: Span) -> Expansion { self.make_from(DummyResult::any(span)).unwrap() } fn expect_from_annotatables<I: IntoIterator<Item = Annotatable>>(self, items: I) -> Expansion { let items = items.into_iter(); match self { ExpansionKind::Items => Expansion::Items(items.map(Annotatable::expect_item).collect()), ExpansionKind::ImplItems => Expansion::ImplItems(items.map(Annotatable::expect_impl_item).collect()), ExpansionKind::TraitItems => Expansion::TraitItems(items.map(Annotatable::expect_trait_item).collect()), _ => unreachable!(), } } } pub struct Invocation { pub kind: InvocationKind, expansion_kind: ExpansionKind, expansion_data: ExpansionData, } pub enum InvocationKind { Bang { attrs: Vec<ast::Attribute>, mac: ast::Mac, ident: Option<Ident>, span: Span, }, Attr { attr: ast::Attribute, item: Annotatable, }, } impl Invocation { fn span(&self) -> Span { match self.kind { InvocationKind::Bang { span, .. } => span, InvocationKind::Attr { ref attr, .. } => attr.span, } } } pub struct MacroExpander<'a, 'b:'a> { pub cx: &'a mut ExtCtxt<'b>, monotonic: bool, // c.f. `cx.monotonic_expander()` } impl<'a, 'b> MacroExpander<'a, 'b> { pub fn new(cx: &'a mut ExtCtxt<'b>, monotonic: bool) -> Self { MacroExpander { cx: cx, monotonic: monotonic } } fn expand_crate(&mut self, mut krate: ast::Crate) -> ast::Crate { let err_count = self.cx.parse_sess.span_diagnostic.err_count(); let krate_item = Expansion::Items(SmallVector::one(P(ast::Item { attrs: krate.attrs, span: krate.span, node: ast::ItemKind::Mod(krate.module), ident: keywords::Invalid.ident(), id: ast::DUMMY_NODE_ID, vis: ast::Visibility::Public, }))); match self.expand(krate_item).make_items().pop().unwrap().unwrap() { ast::Item { attrs, node: ast::ItemKind::Mod(module), .. } => { krate.attrs = attrs; krate.module = module; }, _ => unreachable!(), }; if self.cx.parse_sess.span_diagnostic.err_count() - self.cx.resolve_err_count > err_count { self.cx.parse_sess.span_diagnostic.abort_if_errors(); } krate } // Fully expand all the invocations in `expansion`. fn expand(&mut self, expansion: Expansion) -> Expansion { let orig_expansion_data = self.cx.current_expansion.clone(); self.cx.current_expansion.depth = 0; let (expansion, mut invocations) = self.collect_invocations(expansion); invocations.reverse(); let mut expansions = vec![vec![(0, expansion)]]; while let Some(invoc) = invocations.pop() { let ExpansionData { depth, mark, .. } = invoc.expansion_data; self.cx.current_expansion = invoc.expansion_data.clone(); let scope = if self.monotonic { mark } else { orig_expansion_data.mark }; self.cx.current_expansion.mark = scope; let expansion = match self.cx.resolver.resolve_invoc(scope, &invoc) { Some(ext) => self.expand_invoc(invoc, ext), None => invoc.expansion_kind.dummy(invoc.span()), }; self.cx.current_expansion.depth = depth + 1; let (expansion, new_invocations) = self.collect_invocations(expansion); if expansions.len() == depth { expansions.push(Vec::new()); } expansions[depth].push((mark.as_u32(), expansion)); if !self.cx.ecfg.single_step { invocations.extend(new_invocations.into_iter().rev()); } } self.cx.current_expansion = orig_expansion_data; let mut placeholder_expander = PlaceholderExpander::new(self.cx, self.monotonic); while let Some(expansions) = expansions.pop() { for (mark, expansion) in expansions.into_iter().rev() { let expansion = expansion.fold_with(&mut placeholder_expander); placeholder_expander.add(ast::NodeId::from_u32(mark), expansion); } } placeholder_expander.remove(ast::NodeId::from_u32(0)) } fn collect_invocations(&mut self, expansion: Expansion) -> (Expansion, Vec<Invocation>) { let crate_config = mem::replace(&mut self.cx.cfg, Vec::new()); let result = { let mut collector = InvocationCollector { cfg: StripUnconfigured { config: &crate_config, should_test: self.cx.ecfg.should_test, sess: self.cx.parse_sess, features: self.cx.ecfg.features, }, cx: self.cx, invocations: Vec::new(), monotonic: self.monotonic, }; (expansion.fold_with(&mut collector), collector.invocations) }; self.cx.cfg = crate_config; if self.monotonic { let err_count = self.cx.parse_sess.span_diagnostic.err_count(); let mark = self.cx.current_expansion.mark; self.cx.resolver.visit_expansion(mark, &result.0); self.cx.resolve_err_count += self.cx.parse_sess.span_diagnostic.err_count() - err_count; } result } fn expand_invoc(&mut self, invoc: Invocation, ext: Rc<SyntaxExtension>) -> Expansion { match invoc.kind { InvocationKind::Bang { .. } => self.expand_bang_invoc(invoc, ext), InvocationKind::Attr { .. } => self.expand_attr_invoc(invoc, ext), } } fn expand_attr_invoc(&mut self, invoc: Invocation, ext: Rc<SyntaxExtension>) -> Expansion { let Invocation { expansion_kind: kind, .. } = invoc; let (attr, item) = match invoc.kind { InvocationKind::Attr { attr, item } => (attr, item), _ => unreachable!(), }; attr::mark_used(&attr); let name = intern(&attr.name()); self.cx.bt_push(ExpnInfo { call_site: attr.span, callee: NameAndSpan { format: MacroAttribute(name), span: Some(attr.span), allow_internal_unstable: false, } }); match *ext { MultiModifier(ref mac) => { let item = mac.expand(self.cx, attr.span, &attr.node.value, item); kind.expect_from_annotatables(item) } MultiDecorator(ref mac) => { let mut items = Vec::new(); mac.expand(self.cx, attr.span, &attr.node.value, &item, &mut |item| items.push(item)); items.push(item); kind.expect_from_annotatables(items) } SyntaxExtension::AttrProcMacro(ref mac) => { let attr_toks = TokenStream::from_tts(tts_for_attr(&attr, &self.cx.parse_sess)); let item_toks = TokenStream::from_tts(tts_for_item(&item, &self.cx.parse_sess)); let tok_result = mac.expand(self.cx, attr.span, attr_toks, item_toks); self.parse_expansion(tok_result, kind, name, attr.span) } _ => unreachable!(), } } /// Expand a macro invocation. Returns the result of expansion. fn expand_bang_invoc(&mut self, invoc: Invocation, ext: Rc<SyntaxExtension>) -> Expansion { let (mark, kind) = (invoc.expansion_data.mark, invoc.expansion_kind); let (attrs, mac, ident, span) = match invoc.kind { InvocationKind::Bang { attrs, mac, ident, span } => (attrs, mac, ident, span), _ => unreachable!(), }; let Mac_ { path, tts, .. } = mac.node; // Detect use of feature-gated or invalid attributes on macro invoations // since they will not be detected after macro expansion. for attr in attrs.iter() { feature_gate::check_attribute(&attr, &self.cx.parse_sess, &self.cx.parse_sess.codemap(), &self.cx.ecfg.features.unwrap()); } if path.segments.len() > 1 || path.global || !path.segments[0].parameters.is_empty() { self.cx.span_err(path.span, "expected macro name without module separators"); return kind.dummy(span); } let extname = path.segments[0].identifier.name; let ident = ident.unwrap_or(keywords::Invalid.ident()); let marked_tts = mark_tts(&tts, mark); let opt_expanded = match *ext { NormalTT(ref expandfun, exp_span, allow_internal_unstable) => { if ident.name != keywords::Invalid.name() { let msg = format!("macro {}! expects no ident argument, given '{}'", extname, ident); self.cx.span_err(path.span, &msg); return kind.dummy(span); } self.cx.bt_push(ExpnInfo { call_site: span, callee: NameAndSpan { format: MacroBang(extname), span: exp_span, allow_internal_unstable: allow_internal_unstable, }, }); kind.make_from(expandfun.expand(self.cx, span, &marked_tts)) } IdentTT(ref expander, tt_span, allow_internal_unstable) => { if ident.name == keywords::Invalid.name() { self.cx.span_err(path.span, &format!("macro {}! expects an ident argument", extname)); return kind.dummy(span); }; self.cx.bt_push(ExpnInfo { call_site: span, callee: NameAndSpan { format: MacroBang(extname), span: tt_span, allow_internal_unstable: allow_internal_unstable, } }); kind.make_from(expander.expand(self.cx, span, ident, marked_tts, attrs)) } MultiDecorator(..) | MultiModifier(..) | SyntaxExtension::AttrProcMacro(..) => { self.cx.span_err(path.span, &format!("`{}` can only be used in attributes", extname)); return kind.dummy(span); } SyntaxExtension::ProcMacro(ref expandfun) => { if ident.name != keywords::Invalid.name() { let msg = format!("macro {}! expects no ident argument, given '{}'", extname, ident); self.cx.span_err(path.span, &msg); return kind.dummy(span); } self.cx.bt_push(ExpnInfo { call_site: span, callee: NameAndSpan { format: MacroBang(extname), // FIXME procedural macros do not have proper span info // yet, when they do, we should use it here. span: None, // FIXME probably want to follow macro_rules macros here. allow_internal_unstable: false, }, }); let toks = TokenStream::from_tts(marked_tts); let tok_result = expandfun.expand(self.cx, span, toks); Some(self.parse_expansion(tok_result, kind, extname, span)) } }; let expanded = if let Some(expanded) = opt_expanded { expanded } else { let msg = format!("non-{kind} macro in {kind} position: {name}", name = path.segments[0].identifier.name, kind = kind.name()); self.cx.span_err(path.span, &msg); return kind.dummy(span); }; expanded.fold_with(&mut Marker { mark: mark, expn_id: Some(self.cx.backtrace()), }) } fn parse_expansion(&mut self, toks: TokenStream, kind: ExpansionKind, name: Name, span: Span) -> Expansion { let mut parser = self.cx.new_parser_from_tts(&toks.to_tts()); let expansion = match parser.parse_expansion(kind, false) { Ok(expansion) => expansion, Err(mut err) => { err.emit(); return kind.dummy(span); } }; parser.ensure_complete_parse(name, kind.name(), span); // FIXME better span info expansion.fold_with(&mut ChangeSpan { span: span }) } } impl<'a> Parser<'a> { pub fn parse_expansion(&mut self, kind: ExpansionKind, macro_legacy_warnings: bool) -> PResult<'a, Expansion> { Ok(match kind { ExpansionKind::Items => { let mut items = SmallVector::zero(); while let Some(item) = self.parse_item()? { items.push(item); } Expansion::Items(items) } ExpansionKind::TraitItems => { let mut items = SmallVector::zero(); while self.token != token::Eof { items.push(self.parse_trait_item()?); } Expansion::TraitItems(items) } ExpansionKind::ImplItems => { let mut items = SmallVector::zero(); while self.token != token::Eof { items.push(self.parse_impl_item()?); } Expansion::ImplItems(items) } ExpansionKind::Stmts => { let mut stmts = SmallVector::zero(); while self.token != token::Eof { if let Some(stmt) = self.parse_full_stmt(macro_legacy_warnings)? { stmts.push(stmt); } } Expansion::Stmts(stmts) } ExpansionKind::Expr => Expansion::Expr(self.parse_expr()?), ExpansionKind::OptExpr => Expansion::OptExpr(Some(self.parse_expr()?)), ExpansionKind::Ty => Expansion::Ty(self.parse_ty()?), ExpansionKind::Pat => Expansion::Pat(self.parse_pat()?), }) } pub fn ensure_complete_parse(&mut self, macro_name: ast::Name, kind_name: &str, span: Span) { if self.token != token::Eof { let msg = format!("macro expansion ignores token `{}` and any following", self.this_token_to_string()); let mut err = self.diagnostic().struct_span_err(self.span, &msg); let msg = format!("caused by the macro expansion here; the usage \ of `{}!` is likely invalid in {} context", macro_name, kind_name); err.span_note(span, &msg).emit(); } } } struct InvocationCollector<'a, 'b: 'a> { cx: &'a mut ExtCtxt<'b>, cfg: StripUnconfigured<'a>, invocations: Vec<Invocation>, monotonic: bool, } macro_rules! fully_configure { ($this:ident, $node:ident, $noop_fold:ident) => { match $noop_fold($node, &mut $this.cfg).pop() { Some(node) => node, None => return SmallVector::zero(), } } } impl<'a, 'b> InvocationCollector<'a, 'b> { fn collect(&mut self, expansion_kind: ExpansionKind, kind: InvocationKind) -> Expansion { let mark = Mark::fresh(); self.invocations.push(Invocation { kind: kind, expansion_kind: expansion_kind, expansion_data: ExpansionData { mark: mark, ..self.cx.current_expansion.clone() }, }); placeholder(expansion_kind, ast::NodeId::from_u32(mark.as_u32())) } fn collect_bang( &mut self, mac: ast::Mac, attrs: Vec<ast::Attribute>, span: Span, kind: ExpansionKind, ) -> Expansion { self.collect(kind, InvocationKind::Bang { attrs: attrs, mac: mac, ident: None, span: span }) } fn collect_attr(&mut self, attr: ast::Attribute, item: Annotatable, kind: ExpansionKind) -> Expansion { self.collect(kind, InvocationKind::Attr { attr: attr, item: item }) } // If `item` is an attr invocation, remove and return the macro attribute. fn classify_item<T: HasAttrs>(&mut self, mut item: T) -> (T, Option<ast::Attribute>) { let mut attr = None; item = item.map_attrs(|mut attrs| { attr = self.cx.resolver.find_attr_invoc(&mut attrs); attrs }); (item, attr) } fn configure<T: HasAttrs>(&mut self, node: T) -> Option<T> { self.cfg.configure(node) } } // These are pretty nasty. Ideally, we would keep the tokens around, linked from // the AST. However, we don't so we need to create new ones. Since the item might // have come from a macro expansion (possibly only in part), we can't use the // existing codemap. // // Therefore, we must use the pretty printer (yuck) to turn the AST node into a // string, which we then re-tokenise (double yuck), but first we have to patch // the pretty-printed string on to the end of the existing codemap (infinity-yuck). fn tts_for_item(item: &Annotatable, parse_sess: &ParseSess) -> Vec<TokenTree> { let text = match *item { Annotatable::Item(ref i) => pprust::item_to_string(i), Annotatable::TraitItem(ref ti) => pprust::trait_item_to_string(ti), Annotatable::ImplItem(ref ii) => pprust::impl_item_to_string(ii), }; string_to_tts(text, parse_sess) } fn tts_for_attr(attr: &ast::Attribute, parse_sess: &ParseSess) -> Vec<TokenTree> { string_to_tts(pprust::attr_to_string(attr), parse_sess) } fn string_to_tts(text: String, parse_sess: &ParseSess) -> Vec<TokenTree> { let filemap = parse_sess.codemap() .new_filemap(String::from("<macro expansion>"), None, text); let lexer = lexer::StringReader::new(&parse_sess.span_diagnostic, filemap); let mut parser = Parser::new(parse_sess, Vec::new(), Box::new(lexer)); panictry!(parser.parse_all_token_trees()) } impl<'a, 'b> Folder for InvocationCollector<'a, 'b> { fn fold_expr(&mut self, expr: P<ast::Expr>) -> P<ast::Expr> { let mut expr = self.cfg.configure_expr(expr).unwrap(); expr.node = self.cfg.configure_expr_kind(expr.node); if let ast::ExprKind::Mac(mac) = expr.node { self.collect_bang(mac, expr.attrs.into(), expr.span, ExpansionKind::Expr).make_expr() } else { P(noop_fold_expr(expr, self)) } } fn fold_opt_expr(&mut self, expr: P<ast::Expr>) -> Option<P<ast::Expr>> { let mut expr = configure!(self, expr).unwrap(); expr.node = self.cfg.configure_expr_kind(expr.node); if let ast::ExprKind::Mac(mac) = expr.node { self.collect_bang(mac, expr.attrs.into(), expr.span, ExpansionKind::OptExpr) .make_opt_expr() } else { Some(P(noop_fold_expr(expr, self))) } } fn fold_pat(&mut self, pat: P<ast::Pat>) -> P<ast::Pat> { match pat.node { PatKind::Mac(_) => {} _ => return noop_fold_pat(pat, self), } pat.and_then(|pat| match pat.node { PatKind::Mac(mac) => self.collect_bang(mac, Vec::new(), pat.span, ExpansionKind::Pat).make_pat(), _ => unreachable!(), }) } fn fold_stmt(&mut self, stmt: ast::Stmt) -> SmallVector<ast::Stmt> { let stmt = match self.cfg.configure_stmt(stmt) { Some(stmt) => stmt, None => return SmallVector::zero(), }; let (mac, style, attrs) = if let StmtKind::Mac(mac) = stmt.node { mac.unwrap() } else { // The placeholder expander gives ids to statements, so we avoid folding the id here. let ast::Stmt { id, node, span } = stmt; return noop_fold_stmt_kind(node, self).into_iter().map(|node| { ast::Stmt { id: id, node: node, span: span } }).collect() }; let mut placeholder = self.collect_bang(mac, attrs.into(), stmt.span, ExpansionKind::Stmts).make_stmts(); // If this is a macro invocation with a semicolon, then apply that // semicolon to the final statement produced by expansion. if style == MacStmtStyle::Semicolon { if let Some(stmt) = placeholder.pop() { placeholder.push(stmt.add_trailing_semicolon()); } } placeholder } fn fold_block(&mut self, block: P<Block>) -> P<Block> { let no_noninline_mod = mem::replace(&mut self.cx.current_expansion.no_noninline_mod, true); let result = noop_fold_block(block, self); self.cx.current_expansion.no_noninline_mod = no_noninline_mod; result } fn fold_item(&mut self, item: P<ast::Item>) -> SmallVector<P<ast::Item>> { let item = configure!(self, item); let (mut item, attr) = self.classify_item(item); if let Some(attr) = attr { let item = Annotatable::Item(fully_configure!(self, item, noop_fold_item)); return self.collect_attr(attr, item, ExpansionKind::Items).make_items(); } match item.node { ast::ItemKind::Mac(..) => { if match item.node { ItemKind::Mac(ref mac) => mac.node.path.segments.is_empty(), _ => unreachable!(), } { return SmallVector::one(item); } item.and_then(|item| match item.node { ItemKind::Mac(mac) => { self.collect(ExpansionKind::Items, InvocationKind::Bang { mac: mac, attrs: item.attrs, ident: Some(item.ident), span: item.span, }).make_items() } _ => unreachable!(), }) } ast::ItemKind::Mod(ast::Mod { inner, .. }) => { if item.ident == keywords::Invalid.ident() { return noop_fold_item(item, self); } let orig_no_noninline_mod = self.cx.current_expansion.no_noninline_mod; let mut module = (*self.cx.current_expansion.module).clone(); module.mod_path.push(item.ident); // Detect if this is an inline module (`mod m { ... }` as opposed to `mod m;`). // In the non-inline case, `inner` is never the dummy span (c.f. `parse_item_mod`). // Thus, if `inner` is the dummy span, we know the module is inline. let inline_module = item.span.contains(inner) || inner == syntax_pos::DUMMY_SP; if inline_module { if let Some(path) = attr::first_attr_value_str_by_name(&item.attrs, "path") { self.cx.current_expansion.no_noninline_mod = false; module.directory.push(&*path); } else { module.directory.push(&*item.ident.name.as_str()); } } else { self.cx.current_expansion.no_noninline_mod = false; module.directory = PathBuf::from(self.cx.parse_sess.codemap().span_to_filename(inner)); module.directory.pop(); } let orig_module = mem::replace(&mut self.cx.current_expansion.module, Rc::new(module)); let result = noop_fold_item(item, self); self.cx.current_expansion.module = orig_module; self.cx.current_expansion.no_noninline_mod = orig_no_noninline_mod; return result; } // Ensure that test functions are accessible from the test harness. ast::ItemKind::Fn(..) if self.cx.ecfg.should_test => { if item.attrs.iter().any(|attr| is_test_or_bench(attr)) { item = item.map(|mut item| { item.vis = ast::Visibility::Public; item }); } noop_fold_item(item, self) } _ => noop_fold_item(item, self), } } fn fold_trait_item(&mut self, item: ast::TraitItem) -> SmallVector<ast::TraitItem> { let item = configure!(self, item); let (item, attr) = self.classify_item(item); if let Some(attr) = attr { let item = Annotatable::TraitItem(P(fully_configure!(self, item, noop_fold_trait_item))); return self.collect_attr(attr, item, ExpansionKind::TraitItems).make_trait_items() } match item.node { ast::TraitItemKind::Macro(mac) => { let ast::TraitItem { attrs, span, .. } = item; self.collect_bang(mac, attrs, span, ExpansionKind::TraitItems).make_trait_items() } _ => fold::noop_fold_trait_item(item, self), } } fn fold_impl_item(&mut self, item: ast::ImplItem) -> SmallVector<ast::ImplItem> { let item = configure!(self, item); let (item, attr) = self.classify_item(item); if let Some(attr) = attr { let item = Annotatable::ImplItem(P(fully_configure!(self, item, noop_fold_impl_item))); return self.collect_attr(attr, item, ExpansionKind::ImplItems).make_impl_items(); } match item.node { ast::ImplItemKind::Macro(mac) => { let ast::ImplItem { attrs, span, .. } = item; self.collect_bang(mac, attrs, span, ExpansionKind::ImplItems).make_impl_items() } _ => fold::noop_fold_impl_item(item, self), } } fn fold_ty(&mut self, ty: P<ast::Ty>) -> P<ast::Ty> { let ty = match ty.node { ast::TyKind::Mac(_) => ty.unwrap(), _ => return fold::noop_fold_ty(ty, self), }; match ty.node { ast::TyKind::Mac(mac) => self.collect_bang(mac, Vec::new(), ty.span, ExpansionKind::Ty).make_ty(), _ => unreachable!(), } } fn fold_foreign_mod(&mut self, foreign_mod: ast::ForeignMod) -> ast::ForeignMod { noop_fold_foreign_mod(self.cfg.configure_foreign_mod(foreign_mod), self) } fn fold_item_kind(&mut self, item: ast::ItemKind) -> ast::ItemKind { noop_fold_item_kind(self.cfg.configure_item_kind(item), self) } fn new_id(&mut self, id: ast::NodeId) -> ast::NodeId { if self.monotonic { assert_eq!(id, ast::DUMMY_NODE_ID); self.cx.resolver.next_node_id() } else { id } } } pub struct ExpansionConfig<'feat> { pub crate_name: String, pub features: Option<&'feat Features>, pub recursion_limit: usize, pub trace_mac: bool, pub should_test: bool, // If false, strip `#[test]` nodes pub single_step: bool, pub keep_macs: bool, } macro_rules! feature_tests { ($( fn $getter:ident = $field:ident, )*) => { $( pub fn $getter(&self) -> bool { match self.features { Some(&Features { $field: true, .. }) => true, _ => false, } } )* } } impl<'feat> ExpansionConfig<'feat> { pub fn default(crate_name: String) -> ExpansionConfig<'static> { ExpansionConfig { crate_name: crate_name, features: None, recursion_limit: 64, trace_mac: false, should_test: false, single_step: false, keep_macs: false, } } feature_tests! { fn enable_quotes = quote, fn enable_asm = asm, fn enable_log_syntax = log_syntax, fn enable_concat_idents = concat_idents, fn enable_trace_macros = trace_macros, fn enable_allow_internal_unstable = allow_internal_unstable, fn enable_custom_derive = custom_derive, fn enable_pushpop_unsafe = pushpop_unsafe, fn enable_proc_macro = proc_macro, } } pub fn expand_crate(cx: &mut ExtCtxt, user_exts: Vec<NamedSyntaxExtension>, c: Crate) -> Crate { cx.initialize(user_exts, &c); cx.monotonic_expander().expand_crate(c) } // Expands crate using supplied MacroExpander - allows for // non-standard expansion behaviour (e.g. step-wise). pub fn expand_crate_with_expander(expander: &mut MacroExpander, user_exts: Vec<NamedSyntaxExtension>, c: Crate) -> Crate { expander.cx.initialize(user_exts, &c); expander.expand_crate(c) } // A Marker adds the given mark to the syntax context and // sets spans' `expn_id` to the given expn_id (unless it is `None`). struct Marker { mark: Mark, expn_id: Option<ExpnId> } impl Folder for Marker { fn fold_ident(&mut self, mut ident: Ident) -> Ident { ident.ctxt = ident.ctxt.apply_mark(self.mark); ident } fn fold_mac(&mut self, mac: ast::Mac) -> ast::Mac { noop_fold_mac(mac, self) } fn new_span(&mut self, mut span: Span) -> Span { if let Some(expn_id) = self.expn_id { span.expn_id = expn_id; } span } } // apply a given mark to the given token trees. Used prior to expansion of a macro. fn mark_tts(tts: &[TokenTree], m: Mark) -> Vec<TokenTree> { noop_fold_tts(tts, &mut Marker{mark:m, expn_id: None}) }
39.089011
100
0.54626
fe6845a9077c9a14f98faa420ed36de46aa57084
2,815
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // compile-flags:-Z extra-debug-info // debugger:set print union on // debugger:break zzz // debugger:run // debugger:finish // debugger:print case1 // check:$1 = {{Case1, a = 0, b = 31868, c = 31868, d = 31868, e = 31868}, {Case1, a = 0, b = 2088533116, c = 2088533116}, {Case1, a = 0, b = 8970181431921507452}} // debugger:print case2 // check:$2 = {{Case2, a = 0, b = 4369, c = 4369, d = 4369, e = 4369}, {Case2, a = 0, b = 286331153, c = 286331153}, {Case2, a = 0, b = 1229782938247303441}} // debugger:print case3 // check:$3 = {{Case3, a = 0, b = 22873, c = 22873, d = 22873, e = 22873}, {Case3, a = 0, b = 1499027801, c = 1499027801}, {Case3, a = 0, b = 6438275382588823897}} // debugger:print univariant // check:$4 = {a = -1} #[allow(unused_variable)]; // The first element is to ensure proper alignment, irrespective of the machines word size. Since // the size of the discriminant value is machine dependent, this has be taken into account when // datatype layout should be predictable as in this case. enum Regular { Case1 { a: u64, b: u16, c: u16, d: u16, e: u16}, Case2 { a: u64, b: u32, c: u32}, Case3 { a: u64, b: u64 } } enum Univariant { TheOnlyCase { a: i64 } } fn main() { // In order to avoid endianess trouble all of the following test values consist of a single // repeated byte. This way each interpretation of the union should look the same, no matter if // this is a big or little endian machine. // 0b0111110001111100011111000111110001111100011111000111110001111100 = 8970181431921507452 // 0b01111100011111000111110001111100 = 2088533116 // 0b0111110001111100 = 31868 // 0b01111100 = 124 let case1 = Case1 { a: 0, b: 31868, c: 31868, d: 31868, e: 31868 }; // 0b0001000100010001000100010001000100010001000100010001000100010001 = 1229782938247303441 // 0b00010001000100010001000100010001 = 286331153 // 0b0001000100010001 = 4369 // 0b00010001 = 17 let case2 = Case2 { a: 0, b: 286331153, c: 286331153 }; // 0b0101100101011001010110010101100101011001010110010101100101011001 = 6438275382588823897 // 0b01011001010110010101100101011001 = 1499027801 // 0b0101100101011001 = 22873 // 0b01011001 = 89 let case3 = Case3 { a: 0, b: 6438275382588823897 }; let univariant = TheOnlyCase { a: -1 }; zzz(); } fn zzz() {()}
38.040541
163
0.685258
6980571c1f7cf8d7b5d37b9c3334031c421c2e17
10,593
use std::ops::{Add, Div, Mul, Sub}; #[derive(Debug)] pub struct Matrix<T: Add + Sub + Copy> { pub items: Vec<Vec<T>>, pub rows: usize, pub cols: usize, } impl<T: Add + Sub + Mul + Div + Copy> Matrix<T> { pub fn new(vecs: Vec<Vec<T>>) -> Option<Matrix<T>> { let mut same_cols = true; let first_col_len = vecs[0].len(); for i in 0..vecs.len() { same_cols = same_cols && vecs[i].len() == first_col_len; if !same_cols { return None; } } Some(Matrix { rows: vecs.len(), cols: first_col_len, items: vecs, }) } pub fn new_from(x: T, m: i32, n: i32) -> Matrix<T> { let mut matrix = vec![]; for _ in 0..m { let mut r = vec![]; for _ in 0..n { r.push(x); } matrix.push(r); } Matrix::new(matrix).unwrap() } pub fn compare_dimensions(&self, other: &Matrix<T>) -> bool { let same_rows = self.rows == other.rows; let same_cols = self.cols == other.cols; same_cols && same_rows } pub fn transpose(&self) -> Matrix<T> { let mut t = vec![]; // n == t rows for n in 0..self.cols { t.push(vec![]); // m == t cols for m in 0..self.rows { t[n].push(self.items[m][n]); } } Matrix::new(t).expect("Cannot transpose matrix") } pub fn is_square(&self) -> bool { self.rows == self.cols } fn panic_if_not_square(&self) { if !self.is_square() { panic!("Matrix must be square!"); } } } impl Matrix<f64> { pub fn determinant_2(&self) -> f64 { let minors = self.minors(); if minors.rows == 2 && minors.cols == 2 { return minors.items[0][0] * minors.items[1][1] - minors.items[0][1] * minors.items[1][0]; } else { return minors.determinant_2(); } } pub fn minor(&self) -> f64 { if self.rows == 1 && self.cols == 1 { return self.items[0][0]; } else { return self.determinant_2(); } } pub fn minors(&self) -> Matrix<f64> { let mut minors: Vec<Vec<f64>> = vec![]; for a in 0..self.rows { let mut minor_row = vec![]; // remove row let m: Vec<&Vec<f64>> = self .items .iter() .enumerate() .filter(|(i, _)| *i != a) .map(|(_, row)| row) .collect(); for b in 0..self.cols { // remove col let m: Matrix<f64> = Matrix::new( m.iter() .map(|row| { row.iter() .enumerate() .filter(|(i, _)| *i != b) .map(|(_, col)| *col) .collect() }) .collect(), ) .expect("Cannot turn into matrix"); minor_row.push(m.minor()); } minors.push(minor_row); } Matrix::new(minors).unwrap() } pub fn cofactor(&self) -> Matrix<f64> { let mut m = vec![]; for i in 0..self.rows { m.push(vec![]); for j in 0..self.cols { let item = self.items[i][j]; let p = (i + 1) as i32 + (j + 1) as i32; let v = item * (-1.0_f64).powi(p); m[i].push(v); } } Matrix::new(m).unwrap() } pub fn determinant(&self) -> f64 { self.panic_if_not_square(); let cofactor = self.minors().cofactor(); let mut sum = 0.0; for j in 0..cofactor.cols { sum += self.items[0][j] * cofactor.items[0][j]; } sum } pub fn adjoint(&self) -> Matrix<f64> { self.panic_if_not_square(); self.minors().cofactor().transpose() } pub fn inverse(&self) -> Matrix<f64> { self.panic_if_not_square(); let determinant = self.determinant(); let adjoint = self.adjoint(); adjoint / determinant } } // Matrix + Matrix impl<T: Add<T, Output = T> + Sub + Mul + Div + Copy> Add<&Matrix<T>> for &Matrix<T> { type Output = Matrix<T>; fn add(self, other: &Matrix<T>) -> Matrix<T> { let same_d = self.compare_dimensions(other); if !same_d { panic!("Can not add matrices with different dimensions"); } let mut mat = vec![]; for i in 0..self.rows { let mut r = vec![]; for j in 0..self.items[i].len() { r.push(self.items[i][j] + other.items[i][j]); } mat.push(r); } Matrix::new(mat).unwrap() } } // Matrix - Matrix impl<T: Sub<T, Output = T> + Add + Mul + Div + Copy> Sub<&Matrix<T>> for &Matrix<T> { type Output = Matrix<T>; fn sub(self, other: &Matrix<T>) -> Matrix<T> { let same_d = self.compare_dimensions(other); if !same_d { panic!("Can not add matrices with different dimensions"); } let mut mat = vec![]; for i in 0..self.rows { let mut r = vec![]; for j in 0..self.items[i].len() { r.push(self.items[i][j] - other.items[i][j]); } mat.push(r); } Matrix::new(mat).unwrap() } } // Matrix * Scalar impl<T: Mul<T, Output = T> + Add + Sub + Div + Copy> Mul<T> for &Matrix<T> { type Output = Matrix<T>; fn mul(self, n: T) -> Matrix<T> { let mut mat = vec![]; for i in 0..self.rows { let mut r = vec![]; for j in 0..self.items[i].len() { r.push(self.items[i][j] * n); } mat.push(r); } Matrix::new(mat).unwrap() } } // Matrix / Scalar impl<T: Div<T, Output = T> + Add + Sub + Mul + Copy> Div<T> for &Matrix<T> { type Output = Matrix<T>; fn div(self, n: T) -> Matrix<T> { let mut mat = vec![]; for i in 0..self.rows { let mut r = vec![]; for j in 0..self.items[i].len() { r.push(self.items[i][j] / n); } mat.push(r); } Matrix::new(mat).unwrap() } } // Matrix * Matrix impl<T: Mul<T, Output = T> + Add<T, Output = T> + Sub + Div + Copy> Mul<&Matrix<T>> for &Matrix<T> { type Output = Matrix<T>; fn mul(self, other: &Matrix<T>) -> Matrix<T> { if self.cols != other.rows { panic!("A's columns are not the same as B's rows") } let mut mat = vec![]; for a in 0..self.rows { let mut r = vec![]; for c in 0..other.cols { let mut dot: Option<T> = None; for b in 0..self.cols { let a1 = self.items[a][b]; let b1 = other.items[b][c]; let mul = a1 * b1; // MODIFY DOT match dot { Some(v) => dot = Some(v + mul), None => dot = Some(mul), } } r.push(dot.expect("There is no inner product")); } mat.push(r); } Matrix::new(mat).expect("Matrix dimensions are not correct") } } // f64 bindings impl Add<Matrix<f64>> for Matrix<f64> { type Output = Matrix<f64>; fn add(self, other: Matrix<f64>) -> Matrix<f64> { let same_d = self.compare_dimensions(&other); if !same_d { panic!("Can not add matrices with different dimensions"); } let mut mat = vec![]; for i in 0..self.rows { let mut r = vec![]; for j in 0..self.items[i].len() { r.push(self.items[i][j] + other.items[i][j]); } mat.push(r); } Matrix::new(mat).unwrap() } } // Matrix - Matrix impl Sub<Matrix<f64>> for Matrix<f64> { type Output = Matrix<f64>; fn sub(self, other: Matrix<f64>) -> Matrix<f64> { let same_d = self.compare_dimensions(&other); if !same_d { panic!("Can not add matrices with different dimensions"); } let mut mat = vec![]; for i in 0..self.rows { let mut r = vec![]; for j in 0..self.items[i].len() { r.push(self.items[i][j] - other.items[i][j]); } mat.push(r); } Matrix::new(mat).unwrap() } } // Matrix * Scalar impl Mul<f64> for Matrix<f64> { type Output = Matrix<f64>; fn mul(self, n: f64) -> Matrix<f64> { let mut mat = vec![]; for i in 0..self.rows { let mut r = vec![]; for j in 0..self.items[i].len() { r.push(self.items[i][j] * n); } mat.push(r); } Matrix::new(mat).unwrap() } } // Matrix / Scalar impl Div<f64> for Matrix<f64> { type Output = Matrix<f64>; fn div(self, n: f64) -> Matrix<f64> { let mut mat = vec![]; for i in 0..self.rows { let mut r = vec![]; for j in 0..self.items[i].len() { r.push(self.items[i][j] / n); } mat.push(r); } Matrix::new(mat).unwrap() } } // Matrix * Matrix impl Mul for Matrix<f64> { type Output = Matrix<f64>; fn mul(self, other: Matrix<f64>) -> Matrix<f64> { if self.cols != other.rows { panic!("A's columns are not the same as B's rows") } let mut mat = vec![]; for a in 0..self.rows { let mut r = vec![]; for c in 0..other.cols { let mut dot: Option<f64> = None; for b in 0..self.cols { let a1 = self.items[a][b]; let b1 = other.items[b][c]; let mul = a1 * b1; // MODIFY DOT match dot { Some(v) => dot = Some(v + mul), None => dot = Some(mul), } } r.push(dot.expect("There is no inner product")); } mat.push(r); } Matrix::new(mat).expect("Matrix dimensions are not correct") } }
28.707317
100
0.436798
671fd4b904781725923543e944b3b939abb0dbf1
259
#[macro_use] extern crate diesel; use diesel::*; use diesel::dsl::count_star; table! { users { id -> Integer, } } fn main() { use self::users::dsl::*; let source = users.select((id, count_star())); //~^ ERROR MixedAggregates }
13.631579
50
0.57529