repo_id
stringlengths
21
96
file_path
stringlengths
31
155
content
stringlengths
1
92.9M
__index_level_0__
int64
0
0
rapidsai_public_repos/nvgraph/external/cub_semiring
rapidsai_public_repos/nvgraph/external/cub_semiring/block/block_load.cuh
/****************************************************************************** * Copyright (c) 2011, Duane Merrill. All rights reserved. * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ /** * \file * Operations for reading linear tiles of data into the CUDA thread block. */ #pragma once #include <iterator> #include "block_exchange.cuh" #include "../iterator/cache_modified_input_iterator.cuh" #include "../util_ptx.cuh" #include "../util_macro.cuh" #include "../util_type.cuh" #include "../util_namespace.cuh" /// Optional outer namespace(s) CUB_NS_PREFIX /// CUB namespace namespace cub { /** * \addtogroup UtilIo * @{ */ /******************************************************************//** * \name Blocked arrangement I/O (direct) *********************************************************************/ //@{ /** * \brief Load a linear segment of items into a blocked arrangement across the thread block. * * \blocked * * \tparam T <b>[inferred]</b> The data type to load. * \tparam ITEMS_PER_THREAD <b>[inferred]</b> The number of consecutive items partitioned onto each thread. * \tparam InputIteratorT <b>[inferred]</b> The random-access iterator type for input \iterator. */ template < typename InputT, int ITEMS_PER_THREAD, typename InputIteratorT> __device__ __forceinline__ void LoadDirectBlocked( int linear_tid, ///< [in] A suitable 1D thread-identifier for the calling thread (e.g., <tt>(threadIdx.y * blockDim.x) + linear_tid</tt> for 2D thread blocks) InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from InputT (&items)[ITEMS_PER_THREAD]) ///< [out] Data to load { InputIteratorT thread_itr = block_itr + (linear_tid * ITEMS_PER_THREAD); // Load directly in thread-blocked order #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) { items[ITEM] = thread_itr[ITEM]; } } /** * \brief Load a linear segment of items into a blocked arrangement across the thread block, guarded by range. * * \blocked * * \tparam T <b>[inferred]</b> The data type to load. * \tparam ITEMS_PER_THREAD <b>[inferred]</b> The number of consecutive items partitioned onto each thread. * \tparam InputIteratorT <b>[inferred]</b> The random-access iterator type for input \iterator. */ template < typename InputT, int ITEMS_PER_THREAD, typename InputIteratorT> __device__ __forceinline__ void LoadDirectBlocked( int linear_tid, ///< [in] A suitable 1D thread-identifier for the calling thread (e.g., <tt>(threadIdx.y * blockDim.x) + linear_tid</tt> for 2D thread blocks) InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from InputT (&items)[ITEMS_PER_THREAD], ///< [out] Data to load int valid_items) ///< [in] Number of valid items to load { InputIteratorT thread_itr = block_itr + (linear_tid * ITEMS_PER_THREAD); #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) { if ((linear_tid * ITEMS_PER_THREAD) + ITEM < valid_items) { items[ITEM] = thread_itr[ITEM]; } } } /** * \brief Load a linear segment of items into a blocked arrangement across the thread block, guarded by range, with a fall-back assignment of out-of-bound elements.. * * \blocked * * \tparam T <b>[inferred]</b> The data type to load. * \tparam ITEMS_PER_THREAD <b>[inferred]</b> The number of consecutive items partitioned onto each thread. * \tparam InputIteratorT <b>[inferred]</b> The random-access iterator type for input \iterator. */ template < typename InputT, typename DefaultT, int ITEMS_PER_THREAD, typename InputIteratorT> __device__ __forceinline__ void LoadDirectBlocked( int linear_tid, ///< [in] A suitable 1D thread-identifier for the calling thread (e.g., <tt>(threadIdx.y * blockDim.x) + linear_tid</tt> for 2D thread blocks) InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from InputT (&items)[ITEMS_PER_THREAD], ///< [out] Data to load int valid_items, ///< [in] Number of valid items to load DefaultT oob_default) ///< [in] Default value to assign out-of-bound items { #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) items[ITEM] = oob_default; LoadDirectBlocked(linear_tid, block_itr, items, valid_items); } #ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document /** * Internal implementation for load vectorization */ template < CacheLoadModifier MODIFIER, typename T, int ITEMS_PER_THREAD> __device__ __forceinline__ void InternalLoadDirectBlockedVectorized( int linear_tid, ///< [in] A suitable 1D thread-identifier for the calling thread (e.g., <tt>(threadIdx.y * blockDim.x) + linear_tid</tt> for 2D thread blocks) T *block_ptr, ///< [in] Input pointer for loading from T (&items)[ITEMS_PER_THREAD]) ///< [out] Data to load { // Biggest memory access word that T is a whole multiple of typedef typename UnitWord<T>::DeviceWord DeviceWord; enum { TOTAL_WORDS = sizeof(items) / sizeof(DeviceWord), VECTOR_SIZE = (TOTAL_WORDS % 4 == 0) ? 4 : (TOTAL_WORDS % 2 == 0) ? 2 : 1, VECTORS_PER_THREAD = TOTAL_WORDS / VECTOR_SIZE, }; // Vector type typedef typename CubVector<DeviceWord, VECTOR_SIZE>::Type Vector; // Vector items Vector vec_items[VECTORS_PER_THREAD]; // Aliased input ptr Vector* vec_ptr = reinterpret_cast<Vector*>(block_ptr) + (linear_tid * VECTORS_PER_THREAD); // Load directly in thread-blocked order #pragma unroll for (int ITEM = 0; ITEM < VECTORS_PER_THREAD; ITEM++) { vec_items[ITEM] = ThreadLoad<MODIFIER>(vec_ptr + ITEM); } // Copy #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) { items[ITEM] = *(reinterpret_cast<T*>(vec_items) + ITEM); } } #endif // DOXYGEN_SHOULD_SKIP_THIS /** * \brief Load a linear segment of items into a blocked arrangement across the thread block. * * \blocked * * The input offset (\p block_ptr + \p block_offset) must be quad-item aligned * * The following conditions will prevent vectorization and loading will fall back to cub::BLOCK_LOAD_DIRECT: * - \p ITEMS_PER_THREAD is odd * - The data type \p T is not a built-in primitive or CUDA vector type (e.g., \p short, \p int2, \p double, \p float2, etc.) * * \tparam T <b>[inferred]</b> The data type to load. * \tparam ITEMS_PER_THREAD <b>[inferred]</b> The number of consecutive items partitioned onto each thread. */ template < typename T, int ITEMS_PER_THREAD> __device__ __forceinline__ void LoadDirectBlockedVectorized( int linear_tid, ///< [in] A suitable 1D thread-identifier for the calling thread (e.g., <tt>(threadIdx.y * blockDim.x) + linear_tid</tt> for 2D thread blocks) T *block_ptr, ///< [in] Input pointer for loading from T (&items)[ITEMS_PER_THREAD]) ///< [out] Data to load { InternalLoadDirectBlockedVectorized<LOAD_DEFAULT>(linear_tid, block_ptr, items); } //@} end member group /******************************************************************//** * \name Striped arrangement I/O (direct) *********************************************************************/ //@{ /** * \brief Load a linear segment of items into a striped arrangement across the thread block. * * \striped * * \tparam BLOCK_THREADS The thread block size in threads * \tparam T <b>[inferred]</b> The data type to load. * \tparam ITEMS_PER_THREAD <b>[inferred]</b> The number of consecutive items partitioned onto each thread. * \tparam InputIteratorT <b>[inferred]</b> The random-access iterator type for input \iterator. */ template < int BLOCK_THREADS, typename InputT, int ITEMS_PER_THREAD, typename InputIteratorT> __device__ __forceinline__ void LoadDirectStriped( int linear_tid, ///< [in] A suitable 1D thread-identifier for the calling thread (e.g., <tt>(threadIdx.y * blockDim.x) + linear_tid</tt> for 2D thread blocks) InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from InputT (&items)[ITEMS_PER_THREAD]) ///< [out] Data to load { InputIteratorT thread_itr = block_itr + linear_tid; #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) { items[ITEM] = thread_itr[ITEM * BLOCK_THREADS]; } } /** * \brief Load a linear segment of items into a striped arrangement across the thread block, guarded by range * * \striped * * \tparam BLOCK_THREADS The thread block size in threads * \tparam T <b>[inferred]</b> The data type to load. * \tparam ITEMS_PER_THREAD <b>[inferred]</b> The number of consecutive items partitioned onto each thread. * \tparam InputIteratorT <b>[inferred]</b> The random-access iterator type for input \iterator. */ template < int BLOCK_THREADS, typename InputT, int ITEMS_PER_THREAD, typename InputIteratorT> __device__ __forceinline__ void LoadDirectStriped( int linear_tid, ///< [in] A suitable 1D thread-identifier for the calling thread (e.g., <tt>(threadIdx.y * blockDim.x) + linear_tid</tt> for 2D thread blocks) InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from InputT (&items)[ITEMS_PER_THREAD], ///< [out] Data to load int valid_items) ///< [in] Number of valid items to load { InputIteratorT thread_itr = block_itr + linear_tid; #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) { if (linear_tid + (ITEM * BLOCK_THREADS) < valid_items) { items[ITEM] = thread_itr[ITEM * BLOCK_THREADS]; } } } /** * \brief Load a linear segment of items into a striped arrangement across the thread block, guarded by range, with a fall-back assignment of out-of-bound elements. * * \striped * * \tparam BLOCK_THREADS The thread block size in threads * \tparam T <b>[inferred]</b> The data type to load. * \tparam ITEMS_PER_THREAD <b>[inferred]</b> The number of consecutive items partitioned onto each thread. * \tparam InputIteratorT <b>[inferred]</b> The random-access iterator type for input \iterator. */ template < int BLOCK_THREADS, typename InputT, typename DefaultT, int ITEMS_PER_THREAD, typename InputIteratorT> __device__ __forceinline__ void LoadDirectStriped( int linear_tid, ///< [in] A suitable 1D thread-identifier for the calling thread (e.g., <tt>(threadIdx.y * blockDim.x) + linear_tid</tt> for 2D thread blocks) InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from InputT (&items)[ITEMS_PER_THREAD], ///< [out] Data to load int valid_items, ///< [in] Number of valid items to load DefaultT oob_default) ///< [in] Default value to assign out-of-bound items { #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) items[ITEM] = oob_default; LoadDirectStriped<BLOCK_THREADS>(linear_tid, block_itr, items, valid_items); } //@} end member group /******************************************************************//** * \name Warp-striped arrangement I/O (direct) *********************************************************************/ //@{ /** * \brief Load a linear segment of items into a warp-striped arrangement across the thread block. * * \warpstriped * * \par Usage Considerations * The number of threads in the thread block must be a multiple of the architecture's warp size. * * \tparam T <b>[inferred]</b> The data type to load. * \tparam ITEMS_PER_THREAD <b>[inferred]</b> The number of consecutive items partitioned onto each thread. * \tparam InputIteratorT <b>[inferred]</b> The random-access iterator type for input \iterator. */ template < typename InputT, int ITEMS_PER_THREAD, typename InputIteratorT> __device__ __forceinline__ void LoadDirectWarpStriped( int linear_tid, ///< [in] A suitable 1D thread-identifier for the calling thread (e.g., <tt>(threadIdx.y * blockDim.x) + linear_tid</tt> for 2D thread blocks) InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from InputT (&items)[ITEMS_PER_THREAD]) ///< [out] Data to load { int tid = linear_tid & (CUB_PTX_WARP_THREADS - 1); int wid = linear_tid >> CUB_PTX_LOG_WARP_THREADS; int warp_offset = wid * CUB_PTX_WARP_THREADS * ITEMS_PER_THREAD; InputIteratorT thread_itr = block_itr + warp_offset + tid ; // Load directly in warp-striped order #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) { items[ITEM] = thread_itr[(ITEM * CUB_PTX_WARP_THREADS)]; } } /** * \brief Load a linear segment of items into a warp-striped arrangement across the thread block, guarded by range * * \warpstriped * * \par Usage Considerations * The number of threads in the thread block must be a multiple of the architecture's warp size. * * \tparam T <b>[inferred]</b> The data type to load. * \tparam ITEMS_PER_THREAD <b>[inferred]</b> The number of consecutive items partitioned onto each thread. * \tparam InputIteratorT <b>[inferred]</b> The random-access iterator type for input \iterator. */ template < typename InputT, int ITEMS_PER_THREAD, typename InputIteratorT> __device__ __forceinline__ void LoadDirectWarpStriped( int linear_tid, ///< [in] A suitable 1D thread-identifier for the calling thread (e.g., <tt>(threadIdx.y * blockDim.x) + linear_tid</tt> for 2D thread blocks) InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from InputT (&items)[ITEMS_PER_THREAD], ///< [out] Data to load int valid_items) ///< [in] Number of valid items to load { int tid = linear_tid & (CUB_PTX_WARP_THREADS - 1); int wid = linear_tid >> CUB_PTX_LOG_WARP_THREADS; int warp_offset = wid * CUB_PTX_WARP_THREADS * ITEMS_PER_THREAD; InputIteratorT thread_itr = block_itr + warp_offset + tid ; // Load directly in warp-striped order #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) { if (warp_offset + tid + (ITEM * CUB_PTX_WARP_THREADS) < valid_items) { items[ITEM] = thread_itr[(ITEM * CUB_PTX_WARP_THREADS)]; } } } /** * \brief Load a linear segment of items into a warp-striped arrangement across the thread block, guarded by range, with a fall-back assignment of out-of-bound elements. * * \warpstriped * * \par Usage Considerations * The number of threads in the thread block must be a multiple of the architecture's warp size. * * \tparam T <b>[inferred]</b> The data type to load. * \tparam ITEMS_PER_THREAD <b>[inferred]</b> The number of consecutive items partitioned onto each thread. * \tparam InputIteratorT <b>[inferred]</b> The random-access iterator type for input \iterator. */ template < typename InputT, typename DefaultT, int ITEMS_PER_THREAD, typename InputIteratorT> __device__ __forceinline__ void LoadDirectWarpStriped( int linear_tid, ///< [in] A suitable 1D thread-identifier for the calling thread (e.g., <tt>(threadIdx.y * blockDim.x) + linear_tid</tt> for 2D thread blocks) InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from InputT (&items)[ITEMS_PER_THREAD], ///< [out] Data to load int valid_items, ///< [in] Number of valid items to load DefaultT oob_default) ///< [in] Default value to assign out-of-bound items { // Load directly in warp-striped order #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) items[ITEM] = oob_default; LoadDirectWarpStriped(linear_tid, block_itr, items, valid_items); } //@} end member group /** @} */ // end group UtilIo //----------------------------------------------------------------------------- // Generic BlockLoad abstraction //----------------------------------------------------------------------------- /** * \brief cub::BlockLoadAlgorithm enumerates alternative algorithms for cub::BlockLoad to read a linear segment of data from memory into a blocked arrangement across a CUDA thread block. */ /** * \brief cub::BlockLoadAlgorithm enumerates alternative algorithms for cub::BlockLoad to read a linear segment of data from memory into a blocked arrangement across a CUDA thread block. */ enum BlockLoadAlgorithm { /** * \par Overview * * A [<em>blocked arrangement</em>](index.html#sec5sec3) of data is read * directly from memory. * * \par Performance Considerations * - The utilization of memory transactions (coalescing) decreases as the * access stride between threads increases (i.e., the number items per thread). */ BLOCK_LOAD_DIRECT, /** * \par Overview * * A [<em>blocked arrangement</em>](index.html#sec5sec3) of data is read * from memory using CUDA's built-in vectorized loads as a coalescing optimization. * For example, <tt>ld.global.v4.s32</tt> instructions will be generated * when \p T = \p int and \p ITEMS_PER_THREAD % 4 == 0. * * \par Performance Considerations * - The utilization of memory transactions (coalescing) remains high until the the * access stride between threads (i.e., the number items per thread) exceeds the * maximum vector load width (typically 4 items or 64B, whichever is lower). * - The following conditions will prevent vectorization and loading will fall back to cub::BLOCK_LOAD_DIRECT: * - \p ITEMS_PER_THREAD is odd * - The \p InputIteratorTis not a simple pointer type * - The block input offset is not quadword-aligned * - The data type \p T is not a built-in primitive or CUDA vector type (e.g., \p short, \p int2, \p double, \p float2, etc.) */ BLOCK_LOAD_VECTORIZE, /** * \par Overview * * A [<em>striped arrangement</em>](index.html#sec5sec3) of data is read * efficiently from memory and then locally transposed into a * [<em>blocked arrangement</em>](index.html#sec5sec3). * * \par Performance Considerations * - The utilization of memory transactions (coalescing) remains high regardless * of items loaded per thread. * - The local reordering incurs slightly longer latencies and throughput than the * direct cub::BLOCK_LOAD_DIRECT and cub::BLOCK_LOAD_VECTORIZE alternatives. */ BLOCK_LOAD_TRANSPOSE, /** * \par Overview * * A [<em>warp-striped arrangement</em>](index.html#sec5sec3) of data is * read efficiently from memory and then locally transposed into a * [<em>blocked arrangement</em>](index.html#sec5sec3). * * \par Usage Considerations * - BLOCK_THREADS must be a multiple of WARP_THREADS * * \par Performance Considerations * - The utilization of memory transactions (coalescing) remains high regardless * of items loaded per thread. * - The local reordering incurs slightly larger latencies than the * direct cub::BLOCK_LOAD_DIRECT and cub::BLOCK_LOAD_VECTORIZE alternatives. * - Provisions more shared storage, but incurs smaller latencies than the * BLOCK_LOAD_WARP_TRANSPOSE_TIMESLICED alternative. */ BLOCK_LOAD_WARP_TRANSPOSE, /** * \par Overview * * Like \p BLOCK_LOAD_WARP_TRANSPOSE, a [<em>warp-striped arrangement</em>](index.html#sec5sec3) * of data is read directly from memory and then is locally transposed into a * [<em>blocked arrangement</em>](index.html#sec5sec3). To reduce the shared memory * requirement, only one warp's worth of shared memory is provisioned and is * subsequently time-sliced among warps. * * \par Usage Considerations * - BLOCK_THREADS must be a multiple of WARP_THREADS * * \par Performance Considerations * - The utilization of memory transactions (coalescing) remains high regardless * of items loaded per thread. * - Provisions less shared memory temporary storage, but incurs larger * latencies than the BLOCK_LOAD_WARP_TRANSPOSE alternative. */ BLOCK_LOAD_WARP_TRANSPOSE_TIMESLICED, }; /** * \brief The BlockLoad class provides [<em>collective</em>](index.html#sec0) data movement methods for loading a linear segment of items from memory into a [<em>blocked arrangement</em>](index.html#sec5sec3) across a CUDA thread block. ![](block_load_logo.png) * \ingroup BlockModule * \ingroup UtilIo * * \tparam InputT The data type to read into (which must be convertible from the input iterator's value type). * \tparam BLOCK_DIM_X The thread block length in threads along the X dimension * \tparam ITEMS_PER_THREAD The number of consecutive items partitioned onto each thread. * \tparam ALGORITHM <b>[optional]</b> cub::BlockLoadAlgorithm tuning policy. default: cub::BLOCK_LOAD_DIRECT. * \tparam WARP_TIME_SLICING <b>[optional]</b> Whether or not only one warp's worth of shared memory should be allocated and time-sliced among block-warps during any load-related data transpositions (versus each warp having its own storage). (default: false) * \tparam BLOCK_DIM_Y <b>[optional]</b> The thread block length in threads along the Y dimension (default: 1) * \tparam BLOCK_DIM_Z <b>[optional]</b> The thread block length in threads along the Z dimension (default: 1) * \tparam PTX_ARCH <b>[optional]</b> \ptxversion * * \par Overview * - The BlockLoad class provides a single data movement abstraction that can be specialized * to implement different cub::BlockLoadAlgorithm strategies. This facilitates different * performance policies for different architectures, data types, granularity sizes, etc. * - BlockLoad can be optionally specialized by different data movement strategies: * -# <b>cub::BLOCK_LOAD_DIRECT</b>. A [<em>blocked arrangement</em>](index.html#sec5sec3) * of data is read directly from memory. [More...](\ref cub::BlockLoadAlgorithm) * -# <b>cub::BLOCK_LOAD_VECTORIZE</b>. A [<em>blocked arrangement</em>](index.html#sec5sec3) * of data is read directly from memory using CUDA's built-in vectorized loads as a * coalescing optimization. [More...](\ref cub::BlockLoadAlgorithm) * -# <b>cub::BLOCK_LOAD_TRANSPOSE</b>. A [<em>striped arrangement</em>](index.html#sec5sec3) * of data is read directly from memory and is then locally transposed into a * [<em>blocked arrangement</em>](index.html#sec5sec3). [More...](\ref cub::BlockLoadAlgorithm) * -# <b>cub::BLOCK_LOAD_WARP_TRANSPOSE</b>. A [<em>warp-striped arrangement</em>](index.html#sec5sec3) * of data is read directly from memory and is then locally transposed into a * [<em>blocked arrangement</em>](index.html#sec5sec3). [More...](\ref cub::BlockLoadAlgorithm) * -# <b>cub::BLOCK_LOAD_WARP_TRANSPOSE_TIMESLICED,</b>. A [<em>warp-striped arrangement</em>](index.html#sec5sec3) * of data is read directly from memory and is then locally transposed into a * [<em>blocked arrangement</em>](index.html#sec5sec3) one warp at a time. [More...](\ref cub::BlockLoadAlgorithm) * - \rowmajor * * \par A Simple Example * \blockcollective{BlockLoad} * \par * The code snippet below illustrates the loading of a linear * segment of 512 integers into a "blocked" arrangement across 128 threads where each * thread owns 4 consecutive items. The load is specialized for \p BLOCK_LOAD_WARP_TRANSPOSE, * meaning memory references are efficiently coalesced using a warp-striped access * pattern (after which items are locally reordered among threads). * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/block/block_load.cuh> * * __global__ void ExampleKernel(int *d_data, ...) * { * // Specialize BlockLoad for a 1D block of 128 threads owning 4 integer items each * typedef cub::BlockLoad<int, 128, 4, BLOCK_LOAD_WARP_TRANSPOSE> BlockLoad; * * // Allocate shared memory for BlockLoad * __shared__ typename BlockLoad::TempStorage temp_storage; * * // Load a segment of consecutive items that are blocked across threads * int thread_data[4]; * BlockLoad(temp_storage).Load(d_data, thread_data); * * \endcode * \par * Suppose the input \p d_data is <tt>0, 1, 2, 3, 4, 5, ...</tt>. * The set of \p thread_data across the block of threads in those threads will be * <tt>{ [0,1,2,3], [4,5,6,7], ..., [508,509,510,511] }</tt>. * */ template < typename InputT, int BLOCK_DIM_X, int ITEMS_PER_THREAD, BlockLoadAlgorithm ALGORITHM = BLOCK_LOAD_DIRECT, int BLOCK_DIM_Y = 1, int BLOCK_DIM_Z = 1, int PTX_ARCH = CUB_PTX_ARCH> class BlockLoad { private: /****************************************************************************** * Constants and typed definitions ******************************************************************************/ /// Constants enum { /// The thread block size in threads BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z, }; /****************************************************************************** * Algorithmic variants ******************************************************************************/ /// Load helper template <BlockLoadAlgorithm _POLICY, int DUMMY> struct LoadInternal; /** * BLOCK_LOAD_DIRECT specialization of load helper */ template <int DUMMY> struct LoadInternal<BLOCK_LOAD_DIRECT, DUMMY> { /// Shared memory storage layout type typedef NullType TempStorage; /// Linear thread-id int linear_tid; /// Constructor __device__ __forceinline__ LoadInternal( TempStorage &/*temp_storage*/, int linear_tid) : linear_tid(linear_tid) {} /// Load a linear segment of items from memory template <typename InputIteratorT> __device__ __forceinline__ void Load( InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from InputT (&items)[ITEMS_PER_THREAD]) ///< [out] Data to load { LoadDirectBlocked(linear_tid, block_itr, items); } /// Load a linear segment of items from memory, guarded by range template <typename InputIteratorT> __device__ __forceinline__ void Load( InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from InputT (&items)[ITEMS_PER_THREAD], ///< [out] Data to load int valid_items) ///< [in] Number of valid items to load { LoadDirectBlocked(linear_tid, block_itr, items, valid_items); } /// Load a linear segment of items from memory, guarded by range, with a fall-back assignment of out-of-bound elements template <typename InputIteratorT, typename DefaultT> __device__ __forceinline__ void Load( InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from InputT (&items)[ITEMS_PER_THREAD], ///< [out] Data to load int valid_items, ///< [in] Number of valid items to load DefaultT oob_default) ///< [in] Default value to assign out-of-bound items { LoadDirectBlocked(linear_tid, block_itr, items, valid_items, oob_default); } }; /** * BLOCK_LOAD_VECTORIZE specialization of load helper */ template <int DUMMY> struct LoadInternal<BLOCK_LOAD_VECTORIZE, DUMMY> { /// Shared memory storage layout type typedef NullType TempStorage; /// Linear thread-id int linear_tid; /// Constructor __device__ __forceinline__ LoadInternal( TempStorage &/*temp_storage*/, int linear_tid) : linear_tid(linear_tid) {} /// Load a linear segment of items from memory, specialized for native pointer types (attempts vectorization) template <typename InputIteratorT> __device__ __forceinline__ void Load( InputT *block_ptr, ///< [in] The thread block's base input iterator for loading from InputT (&items)[ITEMS_PER_THREAD]) ///< [out] Data to load { InternalLoadDirectBlockedVectorized<LOAD_DEFAULT>(linear_tid, block_ptr, items); } /// Load a linear segment of items from memory, specialized for native pointer types (attempts vectorization) template <typename InputIteratorT> __device__ __forceinline__ void Load( const InputT *block_ptr, ///< [in] The thread block's base input iterator for loading from InputT (&items)[ITEMS_PER_THREAD]) ///< [out] Data to load { InternalLoadDirectBlockedVectorized<LOAD_DEFAULT>(linear_tid, block_ptr, items); } /// Load a linear segment of items from memory, specialized for native pointer types (attempts vectorization) template < CacheLoadModifier MODIFIER, typename ValueType, typename OffsetT> __device__ __forceinline__ void Load( CacheModifiedInputIterator<MODIFIER, ValueType, OffsetT> block_itr, ///< [in] The thread block's base input iterator for loading from InputT (&items)[ITEMS_PER_THREAD]) ///< [out] Data to load { InternalLoadDirectBlockedVectorized<MODIFIER>(linear_tid, block_itr.ptr, items); } /// Load a linear segment of items from memory, specialized for opaque input iterators (skips vectorization) template <typename _InputIteratorT> __device__ __forceinline__ void Load( _InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from InputT (&items)[ITEMS_PER_THREAD]) ///< [out] Data to load { LoadDirectBlocked(linear_tid, block_itr, items); } /// Load a linear segment of items from memory, guarded by range (skips vectorization) template <typename InputIteratorT> __device__ __forceinline__ void Load( InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from InputT (&items)[ITEMS_PER_THREAD], ///< [out] Data to load int valid_items) ///< [in] Number of valid items to load { LoadDirectBlocked(linear_tid, block_itr, items, valid_items); } /// Load a linear segment of items from memory, guarded by range, with a fall-back assignment of out-of-bound elements (skips vectorization) template <typename InputIteratorT, typename DefaultT> __device__ __forceinline__ void Load( InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from InputT (&items)[ITEMS_PER_THREAD], ///< [out] Data to load int valid_items, ///< [in] Number of valid items to load DefaultT oob_default) ///< [in] Default value to assign out-of-bound items { LoadDirectBlocked(linear_tid, block_itr, items, valid_items, oob_default); } }; /** * BLOCK_LOAD_TRANSPOSE specialization of load helper */ template <int DUMMY> struct LoadInternal<BLOCK_LOAD_TRANSPOSE, DUMMY> { // BlockExchange utility type for keys typedef BlockExchange<InputT, BLOCK_DIM_X, ITEMS_PER_THREAD, false, BLOCK_DIM_Y, BLOCK_DIM_Z, PTX_ARCH> BlockExchange; /// Shared memory storage layout type struct _TempStorage : BlockExchange::TempStorage { /// Temporary storage for partially-full block guard volatile int valid_items; }; /// Alias wrapper allowing storage to be unioned struct TempStorage : Uninitialized<_TempStorage> {}; /// Thread reference to shared storage _TempStorage &temp_storage; /// Linear thread-id int linear_tid; /// Constructor __device__ __forceinline__ LoadInternal( TempStorage &temp_storage, int linear_tid) : temp_storage(temp_storage.Alias()), linear_tid(linear_tid) {} /// Load a linear segment of items from memory template <typename InputIteratorT> __device__ __forceinline__ void Load( InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from InputT (&items)[ITEMS_PER_THREAD]) ///< [out] Data to load{ { LoadDirectStriped<BLOCK_THREADS>(linear_tid, block_itr, items); BlockExchange(temp_storage).StripedToBlocked(items, items); } /// Load a linear segment of items from memory, guarded by range template <typename InputIteratorT> __device__ __forceinline__ void Load( InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from InputT (&items)[ITEMS_PER_THREAD], ///< [out] Data to load int valid_items) ///< [in] Number of valid items to load { if (linear_tid == 0) temp_storage.valid_items = valid_items; // Move through volatile smem as a workaround to prevent RF spilling on subsequent loads CTA_SYNC(); LoadDirectStriped<BLOCK_THREADS>(linear_tid, block_itr, items, temp_storage.valid_items); BlockExchange(temp_storage).StripedToBlocked(items, items); } /// Load a linear segment of items from memory, guarded by range, with a fall-back assignment of out-of-bound elements template <typename InputIteratorT, typename DefaultT> __device__ __forceinline__ void Load( InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from InputT (&items)[ITEMS_PER_THREAD], ///< [out] Data to load int valid_items, ///< [in] Number of valid items to load DefaultT oob_default) ///< [in] Default value to assign out-of-bound items { if (linear_tid == 0) temp_storage.valid_items = valid_items; // Move through volatile smem as a workaround to prevent RF spilling on subsequent loads CTA_SYNC(); LoadDirectStriped<BLOCK_THREADS>(linear_tid, block_itr, items, temp_storage.valid_items, oob_default); BlockExchange(temp_storage).StripedToBlocked(items, items); } }; /** * BLOCK_LOAD_WARP_TRANSPOSE specialization of load helper */ template <int DUMMY> struct LoadInternal<BLOCK_LOAD_WARP_TRANSPOSE, DUMMY> { enum { WARP_THREADS = CUB_WARP_THREADS(PTX_ARCH) }; // Assert BLOCK_THREADS must be a multiple of WARP_THREADS CUB_STATIC_ASSERT((BLOCK_THREADS % WARP_THREADS == 0), "BLOCK_THREADS must be a multiple of WARP_THREADS"); // BlockExchange utility type for keys typedef BlockExchange<InputT, BLOCK_DIM_X, ITEMS_PER_THREAD, false, BLOCK_DIM_Y, BLOCK_DIM_Z, PTX_ARCH> BlockExchange; /// Shared memory storage layout type struct _TempStorage : BlockExchange::TempStorage { /// Temporary storage for partially-full block guard volatile int valid_items; }; /// Alias wrapper allowing storage to be unioned struct TempStorage : Uninitialized<_TempStorage> {}; /// Thread reference to shared storage _TempStorage &temp_storage; /// Linear thread-id int linear_tid; /// Constructor __device__ __forceinline__ LoadInternal( TempStorage &temp_storage, int linear_tid) : temp_storage(temp_storage.Alias()), linear_tid(linear_tid) {} /// Load a linear segment of items from memory template <typename InputIteratorT> __device__ __forceinline__ void Load( InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from InputT (&items)[ITEMS_PER_THREAD]) ///< [out] Data to load{ { LoadDirectWarpStriped(linear_tid, block_itr, items); BlockExchange(temp_storage).WarpStripedToBlocked(items, items); } /// Load a linear segment of items from memory, guarded by range template <typename InputIteratorT> __device__ __forceinline__ void Load( InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from InputT (&items)[ITEMS_PER_THREAD], ///< [out] Data to load int valid_items) ///< [in] Number of valid items to load { if (linear_tid == 0) temp_storage.valid_items = valid_items; // Move through volatile smem as a workaround to prevent RF spilling on subsequent loads CTA_SYNC(); LoadDirectWarpStriped(linear_tid, block_itr, items, temp_storage.valid_items); BlockExchange(temp_storage).WarpStripedToBlocked(items, items); } /// Load a linear segment of items from memory, guarded by range, with a fall-back assignment of out-of-bound elements template <typename InputIteratorT, typename DefaultT> __device__ __forceinline__ void Load( InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from InputT (&items)[ITEMS_PER_THREAD], ///< [out] Data to load int valid_items, ///< [in] Number of valid items to load DefaultT oob_default) ///< [in] Default value to assign out-of-bound items { if (linear_tid == 0) temp_storage.valid_items = valid_items; // Move through volatile smem as a workaround to prevent RF spilling on subsequent loads CTA_SYNC(); LoadDirectWarpStriped(linear_tid, block_itr, items, temp_storage.valid_items, oob_default); BlockExchange(temp_storage).WarpStripedToBlocked(items, items); } }; /** * BLOCK_LOAD_WARP_TRANSPOSE_TIMESLICED specialization of load helper */ template <int DUMMY> struct LoadInternal<BLOCK_LOAD_WARP_TRANSPOSE_TIMESLICED, DUMMY> { enum { WARP_THREADS = CUB_WARP_THREADS(PTX_ARCH) }; // Assert BLOCK_THREADS must be a multiple of WARP_THREADS CUB_STATIC_ASSERT((BLOCK_THREADS % WARP_THREADS == 0), "BLOCK_THREADS must be a multiple of WARP_THREADS"); // BlockExchange utility type for keys typedef BlockExchange<InputT, BLOCK_DIM_X, ITEMS_PER_THREAD, true, BLOCK_DIM_Y, BLOCK_DIM_Z, PTX_ARCH> BlockExchange; /// Shared memory storage layout type struct _TempStorage : BlockExchange::TempStorage { /// Temporary storage for partially-full block guard volatile int valid_items; }; /// Alias wrapper allowing storage to be unioned struct TempStorage : Uninitialized<_TempStorage> {}; /// Thread reference to shared storage _TempStorage &temp_storage; /// Linear thread-id int linear_tid; /// Constructor __device__ __forceinline__ LoadInternal( TempStorage &temp_storage, int linear_tid) : temp_storage(temp_storage.Alias()), linear_tid(linear_tid) {} /// Load a linear segment of items from memory template <typename InputIteratorT> __device__ __forceinline__ void Load( InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from InputT (&items)[ITEMS_PER_THREAD]) ///< [out] Data to load{ { LoadDirectWarpStriped(linear_tid, block_itr, items); BlockExchange(temp_storage).WarpStripedToBlocked(items, items); } /// Load a linear segment of items from memory, guarded by range template <typename InputIteratorT> __device__ __forceinline__ void Load( InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from InputT (&items)[ITEMS_PER_THREAD], ///< [out] Data to load int valid_items) ///< [in] Number of valid items to load { if (linear_tid == 0) temp_storage.valid_items = valid_items; // Move through volatile smem as a workaround to prevent RF spilling on subsequent loads CTA_SYNC(); LoadDirectWarpStriped(linear_tid, block_itr, items, temp_storage.valid_items); BlockExchange(temp_storage).WarpStripedToBlocked(items, items); } /// Load a linear segment of items from memory, guarded by range, with a fall-back assignment of out-of-bound elements template <typename InputIteratorT, typename DefaultT> __device__ __forceinline__ void Load( InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from InputT (&items)[ITEMS_PER_THREAD], ///< [out] Data to load int valid_items, ///< [in] Number of valid items to load DefaultT oob_default) ///< [in] Default value to assign out-of-bound items { if (linear_tid == 0) temp_storage.valid_items = valid_items; // Move through volatile smem as a workaround to prevent RF spilling on subsequent loads CTA_SYNC(); LoadDirectWarpStriped(linear_tid, block_itr, items, temp_storage.valid_items, oob_default); BlockExchange(temp_storage).WarpStripedToBlocked(items, items); } }; /****************************************************************************** * Type definitions ******************************************************************************/ /// Internal load implementation to use typedef LoadInternal<ALGORITHM, 0> InternalLoad; /// Shared memory storage layout type typedef typename InternalLoad::TempStorage _TempStorage; /****************************************************************************** * Utility methods ******************************************************************************/ /// Internal storage allocator __device__ __forceinline__ _TempStorage& PrivateStorage() { __shared__ _TempStorage private_storage; return private_storage; } /****************************************************************************** * Thread fields ******************************************************************************/ /// Thread reference to shared storage _TempStorage &temp_storage; /// Linear thread-id int linear_tid; public: /// \smemstorage{BlockLoad} struct TempStorage : Uninitialized<_TempStorage> {}; /******************************************************************//** * \name Collective constructors *********************************************************************/ //@{ /** * \brief Collective constructor using a private static allocation of shared memory as temporary storage. */ __device__ __forceinline__ BlockLoad() : temp_storage(PrivateStorage()), linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)) {} /** * \brief Collective constructor using the specified memory allocation as temporary storage. */ __device__ __forceinline__ BlockLoad( TempStorage &temp_storage) ///< [in] Reference to memory allocation having layout type TempStorage : temp_storage(temp_storage.Alias()), linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)) {} //@} end member group /******************************************************************//** * \name Data movement *********************************************************************/ //@{ /** * \brief Load a linear segment of items from memory. * * \par * - \blocked * - \smemreuse * * \par Snippet * The code snippet below illustrates the loading of a linear * segment of 512 integers into a "blocked" arrangement across 128 threads where each * thread owns 4 consecutive items. The load is specialized for \p BLOCK_LOAD_WARP_TRANSPOSE, * meaning memory references are efficiently coalesced using a warp-striped access * pattern (after which items are locally reordered among threads). * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/block/block_load.cuh> * * __global__ void ExampleKernel(int *d_data, ...) * { * // Specialize BlockLoad for a 1D block of 128 threads owning 4 integer items each * typedef cub::BlockLoad<int, 128, 4, BLOCK_LOAD_WARP_TRANSPOSE> BlockLoad; * * // Allocate shared memory for BlockLoad * __shared__ typename BlockLoad::TempStorage temp_storage; * * // Load a segment of consecutive items that are blocked across threads * int thread_data[4]; * BlockLoad(temp_storage).Load(d_data, thread_data); * * \endcode * \par * Suppose the input \p d_data is <tt>0, 1, 2, 3, 4, 5, ...</tt>. * The set of \p thread_data across the block of threads in those threads will be * <tt>{ [0,1,2,3], [4,5,6,7], ..., [508,509,510,511] }</tt>. * */ template <typename InputIteratorT> __device__ __forceinline__ void Load( InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from InputT (&items)[ITEMS_PER_THREAD]) ///< [out] Data to load { InternalLoad(temp_storage, linear_tid).Load(block_itr, items); } /** * \brief Load a linear segment of items from memory, guarded by range. * * \par * - \blocked * - \smemreuse * * \par Snippet * The code snippet below illustrates the guarded loading of a linear * segment of 512 integers into a "blocked" arrangement across 128 threads where each * thread owns 4 consecutive items. The load is specialized for \p BLOCK_LOAD_WARP_TRANSPOSE, * meaning memory references are efficiently coalesced using a warp-striped access * pattern (after which items are locally reordered among threads). * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/block/block_load.cuh> * * __global__ void ExampleKernel(int *d_data, int valid_items, ...) * { * // Specialize BlockLoad for a 1D block of 128 threads owning 4 integer items each * typedef cub::BlockLoad<int, 128, 4, BLOCK_LOAD_WARP_TRANSPOSE> BlockLoad; * * // Allocate shared memory for BlockLoad * __shared__ typename BlockLoad::TempStorage temp_storage; * * // Load a segment of consecutive items that are blocked across threads * int thread_data[4]; * BlockLoad(temp_storage).Load(d_data, thread_data, valid_items); * * \endcode * \par * Suppose the input \p d_data is <tt>0, 1, 2, 3, 4, 5, 6...</tt> and \p valid_items is \p 5. * The set of \p thread_data across the block of threads in those threads will be * <tt>{ [0,1,2,3], [4,?,?,?], ..., [?,?,?,?] }</tt>, with only the first two threads * being unmasked to load portions of valid data (and other items remaining unassigned). * */ template <typename InputIteratorT> __device__ __forceinline__ void Load( InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from InputT (&items)[ITEMS_PER_THREAD], ///< [out] Data to load int valid_items) ///< [in] Number of valid items to load { InternalLoad(temp_storage, linear_tid).Load(block_itr, items, valid_items); } /** * \brief Load a linear segment of items from memory, guarded by range, with a fall-back assignment of out-of-bound elements * * \par * - \blocked * - \smemreuse * * \par Snippet * The code snippet below illustrates the guarded loading of a linear * segment of 512 integers into a "blocked" arrangement across 128 threads where each * thread owns 4 consecutive items. The load is specialized for \p BLOCK_LOAD_WARP_TRANSPOSE, * meaning memory references are efficiently coalesced using a warp-striped access * pattern (after which items are locally reordered among threads). * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/block/block_load.cuh> * * __global__ void ExampleKernel(int *d_data, int valid_items, ...) * { * // Specialize BlockLoad for a 1D block of 128 threads owning 4 integer items each * typedef cub::BlockLoad<int, 128, 4, BLOCK_LOAD_WARP_TRANSPOSE> BlockLoad; * * // Allocate shared memory for BlockLoad * __shared__ typename BlockLoad::TempStorage temp_storage; * * // Load a segment of consecutive items that are blocked across threads * int thread_data[4]; * BlockLoad(temp_storage).Load(d_data, thread_data, valid_items, -1); * * \endcode * \par * Suppose the input \p d_data is <tt>0, 1, 2, 3, 4, 5, 6...</tt>, * \p valid_items is \p 5, and the out-of-bounds default is \p -1. * The set of \p thread_data across the block of threads in those threads will be * <tt>{ [0,1,2,3], [4,-1,-1,-1], ..., [-1,-1,-1,-1] }</tt>, with only the first two threads * being unmasked to load portions of valid data (and other items are assigned \p -1) * */ template <typename InputIteratorT, typename DefaultT> __device__ __forceinline__ void Load( InputIteratorT block_itr, ///< [in] The thread block's base input iterator for loading from InputT (&items)[ITEMS_PER_THREAD], ///< [out] Data to load int valid_items, ///< [in] Number of valid items to load DefaultT oob_default) ///< [in] Default value to assign out-of-bound items { InternalLoad(temp_storage, linear_tid).Load(block_itr, items, valid_items, oob_default); } //@} end member group }; } // CUB namespace CUB_NS_POSTFIX // Optional outer namespace(s)
0
rapidsai_public_repos/nvgraph/external/cub_semiring
rapidsai_public_repos/nvgraph/external/cub_semiring/block/block_scan.cuh
/****************************************************************************** * Copyright (c) 2011, Duane Merrill. All rights reserved. * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ /** * \file * The cub::BlockScan class provides [<em>collective</em>](index.html#sec0) methods for computing a parallel prefix sum/scan of items partitioned across a CUDA thread block. */ #pragma once #include "specializations/block_scan_raking.cuh" #include "specializations/block_scan_warp_scans.cuh" #include "../util_arch.cuh" #include "../util_type.cuh" #include "../util_ptx.cuh" #include "../util_namespace.cuh" /// Optional outer namespace(s) CUB_NS_PREFIX /// CUB namespace namespace cub { /****************************************************************************** * Algorithmic variants ******************************************************************************/ /** * \brief BlockScanAlgorithm enumerates alternative algorithms for cub::BlockScan to compute a parallel prefix scan across a CUDA thread block. */ enum BlockScanAlgorithm { /** * \par Overview * An efficient "raking reduce-then-scan" prefix scan algorithm. Execution is comprised of five phases: * -# Upsweep sequential reduction in registers (if threads contribute more than one input each). Each thread then places the partial reduction of its item(s) into shared memory. * -# Upsweep sequential reduction in shared memory. Threads within a single warp rake across segments of shared partial reductions. * -# A warp-synchronous Kogge-Stone style exclusive scan within the raking warp. * -# Downsweep sequential exclusive scan in shared memory. Threads within a single warp rake across segments of shared partial reductions, seeded with the warp-scan output. * -# Downsweep sequential scan in registers (if threads contribute more than one input), seeded with the raking scan output. * * \par * \image html block_scan_raking.png * <div class="centercaption">\p BLOCK_SCAN_RAKING data flow for a hypothetical 16-thread thread block and 4-thread raking warp.</div> * * \par Performance Considerations * - Although this variant may suffer longer turnaround latencies when the * GPU is under-occupied, it can often provide higher overall throughput * across the GPU when suitably occupied. */ BLOCK_SCAN_RAKING, /** * \par Overview * Similar to cub::BLOCK_SCAN_RAKING, but with fewer shared memory reads at * the expense of higher register pressure. Raking threads preserve their * "upsweep" segment of values in registers while performing warp-synchronous * scan, allowing the "downsweep" not to re-read them from shared memory. */ BLOCK_SCAN_RAKING_MEMOIZE, /** * \par Overview * A quick "tiled warpscans" prefix scan algorithm. Execution is comprised of four phases: * -# Upsweep sequential reduction in registers (if threads contribute more than one input each). Each thread then places the partial reduction of its item(s) into shared memory. * -# Compute a shallow, but inefficient warp-synchronous Kogge-Stone style scan within each warp. * -# A propagation phase where the warp scan outputs in each warp are updated with the aggregate from each preceding warp. * -# Downsweep sequential scan in registers (if threads contribute more than one input), seeded with the raking scan output. * * \par * \image html block_scan_warpscans.png * <div class="centercaption">\p BLOCK_SCAN_WARP_SCANS data flow for a hypothetical 16-thread thread block and 4-thread raking warp.</div> * * \par Performance Considerations * - Although this variant may suffer lower overall throughput across the * GPU because due to a heavy reliance on inefficient warpscans, it can * often provide lower turnaround latencies when the GPU is under-occupied. */ BLOCK_SCAN_WARP_SCANS, }; /****************************************************************************** * Block scan ******************************************************************************/ /** * \brief The BlockScan class provides [<em>collective</em>](index.html#sec0) methods for computing a parallel prefix sum/scan of items partitioned across a CUDA thread block. ![](block_scan_logo.png) * \ingroup BlockModule * * \tparam T Data type being scanned * \tparam BLOCK_DIM_X The thread block length in threads along the X dimension * \tparam ALGORITHM <b>[optional]</b> cub::BlockScanAlgorithm enumerator specifying the underlying algorithm to use (default: cub::BLOCK_SCAN_RAKING) * \tparam BLOCK_DIM_Y <b>[optional]</b> The thread block length in threads along the Y dimension (default: 1) * \tparam BLOCK_DIM_Z <b>[optional]</b> The thread block length in threads along the Z dimension (default: 1) * \tparam PTX_ARCH <b>[optional]</b> \ptxversion * * \par Overview * - Given a list of input elements and a binary reduction operator, a [<em>prefix scan</em>](http://en.wikipedia.org/wiki/Prefix_sum) * produces an output list where each element is computed to be the reduction * of the elements occurring earlier in the input list. <em>Prefix sum</em> * connotes a prefix scan with the addition operator. The term \em inclusive indicates * that the <em>i</em><sup>th</sup> output reduction incorporates the <em>i</em><sup>th</sup> input. * The term \em exclusive indicates the <em>i</em><sup>th</sup> input is not incorporated into * the <em>i</em><sup>th</sup> output reduction. * - \rowmajor * - BlockScan can be optionally specialized by algorithm to accommodate different workload profiles: * -# <b>cub::BLOCK_SCAN_RAKING</b>. An efficient (high throughput) "raking reduce-then-scan" prefix scan algorithm. [More...](\ref cub::BlockScanAlgorithm) * -# <b>cub::BLOCK_SCAN_RAKING_MEMOIZE</b>. Similar to cub::BLOCK_SCAN_RAKING, but having higher throughput at the expense of additional register pressure for intermediate storage. [More...](\ref cub::BlockScanAlgorithm) * -# <b>cub::BLOCK_SCAN_WARP_SCANS</b>. A quick (low latency) "tiled warpscans" prefix scan algorithm. [More...](\ref cub::BlockScanAlgorithm) * * \par Performance Considerations * - \granularity * - Uses special instructions when applicable (e.g., warp \p SHFL) * - Uses synchronization-free communication between warp lanes when applicable * - Invokes a minimal number of minimal block-wide synchronization barriers (only * one or two depending on algorithm selection) * - Incurs zero bank conflicts for most types * - Computation is slightly more efficient (i.e., having lower instruction overhead) for: * - Prefix sum variants (<b><em>vs.</em></b> generic scan) * - \blocksize * - See cub::BlockScanAlgorithm for performance details regarding algorithmic alternatives * * \par A Simple Example * \blockcollective{BlockScan} * \par * The code snippet below illustrates an exclusive prefix sum of 512 integer items that * are partitioned in a [<em>blocked arrangement</em>](index.html#sec5sec3) across 128 threads * where each thread owns 4 consecutive items. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/block/block_scan.cuh> * * __global__ void ExampleKernel(...) * { * // Specialize BlockScan for a 1D block of 128 threads on type int * typedef cub::BlockScan<int, 128> BlockScan; * * // Allocate shared memory for BlockScan * __shared__ typename BlockScan::TempStorage temp_storage; * * // Obtain a segment of consecutive items that are blocked across threads * int thread_data[4]; * ... * * // Collectively compute the block-wide exclusive prefix sum * BlockScan(temp_storage).ExclusiveSum(thread_data, thread_data); * * \endcode * \par * Suppose the set of input \p thread_data across the block of threads is * <tt>{[1,1,1,1], [1,1,1,1], ..., [1,1,1,1]}</tt>. * The corresponding output \p thread_data in those threads will be * <tt>{[0,1,2,3], [4,5,6,7], ..., [508,509,510,511]}</tt>. * */ template < typename T, int BLOCK_DIM_X, BlockScanAlgorithm ALGORITHM = BLOCK_SCAN_RAKING, int BLOCK_DIM_Y = 1, int BLOCK_DIM_Z = 1, int PTX_ARCH = CUB_PTX_ARCH> class BlockScan { private: /****************************************************************************** * Constants and type definitions ******************************************************************************/ /// Constants enum { /// The thread block size in threads BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z, }; /** * Ensure the template parameterization meets the requirements of the * specified algorithm. Currently, the BLOCK_SCAN_WARP_SCANS policy * cannot be used with thread block sizes not a multiple of the * architectural warp size. */ static const BlockScanAlgorithm SAFE_ALGORITHM = ((ALGORITHM == BLOCK_SCAN_WARP_SCANS) && (BLOCK_THREADS % CUB_WARP_THREADS(PTX_ARCH) != 0)) ? BLOCK_SCAN_RAKING : ALGORITHM; typedef BlockScanWarpScans<T, BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z, PTX_ARCH> WarpScans; typedef BlockScanRaking<T, BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z, (SAFE_ALGORITHM == BLOCK_SCAN_RAKING_MEMOIZE), PTX_ARCH> Raking; /// Define the delegate type for the desired algorithm typedef typename If<(SAFE_ALGORITHM == BLOCK_SCAN_WARP_SCANS), WarpScans, Raking>::Type InternalBlockScan; /// Shared memory storage layout type for BlockScan typedef typename InternalBlockScan::TempStorage _TempStorage; /****************************************************************************** * Thread fields ******************************************************************************/ /// Shared storage reference _TempStorage &temp_storage; /// Linear thread-id unsigned int linear_tid; /****************************************************************************** * Utility methods ******************************************************************************/ /// Internal storage allocator __device__ __forceinline__ _TempStorage& PrivateStorage() { __shared__ _TempStorage private_storage; return private_storage; } /****************************************************************************** * Public types ******************************************************************************/ public: /// \smemstorage{BlockScan} struct TempStorage : Uninitialized<_TempStorage> {}; /******************************************************************//** * \name Collective constructors *********************************************************************/ //@{ /** * \brief Collective constructor using a private static allocation of shared memory as temporary storage. */ __device__ __forceinline__ BlockScan() : temp_storage(PrivateStorage()), linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)) {} /** * \brief Collective constructor using the specified memory allocation as temporary storage. */ __device__ __forceinline__ BlockScan( TempStorage &temp_storage) ///< [in] Reference to memory allocation having layout type TempStorage : temp_storage(temp_storage.Alias()), linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)) {} //@} end member group /******************************************************************//** * \name Exclusive prefix sum operations *********************************************************************/ //@{ /** * \brief Computes an exclusive block-wide prefix scan using addition (+) as the scan operator. Each thread contributes one input element. The value of 0 is applied as the initial value, and is assigned to \p output in <em>thread</em><sub>0</sub>. * * \par * - \identityzero * - \rowmajor * - \smemreuse * * \par Snippet * The code snippet below illustrates an exclusive prefix sum of 128 integer items that * are partitioned across 128 threads. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/block/block_scan.cuh> * * __global__ void ExampleKernel(...) * { * // Specialize BlockScan for a 1D block of 128 threads on type int * typedef cub::BlockScan<int, 128> BlockScan; * * // Allocate shared memory for BlockScan * __shared__ typename BlockScan::TempStorage temp_storage; * * // Obtain input item for each thread * int thread_data; * ... * * // Collectively compute the block-wide exclusive prefix sum * BlockScan(temp_storage).ExclusiveSum(thread_data, thread_data); * * \endcode * \par * Suppose the set of input \p thread_data across the block of threads is <tt>1, 1, ..., 1</tt>. The * corresponding output \p thread_data in those threads will be <tt>0, 1, ..., 127</tt>. * */ __device__ __forceinline__ void ExclusiveSum( T input, ///< [in] Calling thread's input item T &output) ///< [out] Calling thread's output item (may be aliased to \p input) { T initial_value = 0; ExclusiveScan(input, output, initial_value, cub::Sum()); } /** * \brief Computes an exclusive block-wide prefix scan using addition (+) as the scan operator. Each thread contributes one input element. The value of 0 is applied as the initial value, and is assigned to \p output in <em>thread</em><sub>0</sub>. Also provides every thread with the block-wide \p block_aggregate of all inputs. * * \par * - \identityzero * - \rowmajor * - \smemreuse * * \par Snippet * The code snippet below illustrates an exclusive prefix sum of 128 integer items that * are partitioned across 128 threads. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/block/block_scan.cuh> * * __global__ void ExampleKernel(...) * { * // Specialize BlockScan for a 1D block of 128 threads on type int * typedef cub::BlockScan<int, 128> BlockScan; * * // Allocate shared memory for BlockScan * __shared__ typename BlockScan::TempStorage temp_storage; * * // Obtain input item for each thread * int thread_data; * ... * * // Collectively compute the block-wide exclusive prefix sum * int block_aggregate; * BlockScan(temp_storage).ExclusiveSum(thread_data, thread_data, block_aggregate); * * \endcode * \par * Suppose the set of input \p thread_data across the block of threads is <tt>1, 1, ..., 1</tt>. The * corresponding output \p thread_data in those threads will be <tt>0, 1, ..., 127</tt>. * Furthermore the value \p 128 will be stored in \p block_aggregate for all threads. * */ __device__ __forceinline__ void ExclusiveSum( T input, ///< [in] Calling thread's input item T &output, ///< [out] Calling thread's output item (may be aliased to \p input) T &block_aggregate) ///< [out] block-wide aggregate reduction of input items { T initial_value = 0; ExclusiveScan(input, output, initial_value, cub::Sum(), block_aggregate); } /** * \brief Computes an exclusive block-wide prefix scan using addition (+) as the scan operator. Each thread contributes one input element. Instead of using 0 as the block-wide prefix, the call-back functor \p block_prefix_callback_op is invoked by the first warp in the block, and the value returned by <em>lane</em><sub>0</sub> in that warp is used as the "seed" value that logically prefixes the thread block's scan inputs. Also provides every thread with the block-wide \p block_aggregate of all inputs. * * \par * - \identityzero * - The \p block_prefix_callback_op functor must implement a member function <tt>T operator()(T block_aggregate)</tt>. * The functor's input parameter \p block_aggregate is the same value also returned by the scan operation. * The functor will be invoked by the first warp of threads in the block, however only the return value from * <em>lane</em><sub>0</sub> is applied as the block-wide prefix. Can be stateful. * - \rowmajor * - \smemreuse * * \par Snippet * The code snippet below illustrates a single thread block that progressively * computes an exclusive prefix sum over multiple "tiles" of input using a * prefix functor to maintain a running total between block-wide scans. Each tile consists * of 128 integer items that are partitioned across 128 threads. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/block/block_scan.cuh> * * // A stateful callback functor that maintains a running prefix to be applied * // during consecutive scan operations. * struct BlockPrefixCallbackOp * { * // Running prefix * int running_total; * * // Constructor * __device__ BlockPrefixCallbackOp(int running_total) : running_total(running_total) {} * * // Callback operator to be entered by the first warp of threads in the block. * // Thread-0 is responsible for returning a value for seeding the block-wide scan. * __device__ int operator()(int block_aggregate) * { * int old_prefix = running_total; * running_total += block_aggregate; * return old_prefix; * } * }; * * __global__ void ExampleKernel(int *d_data, int num_items, ...) * { * // Specialize BlockScan for a 1D block of 128 threads * typedef cub::BlockScan<int, 128> BlockScan; * * // Allocate shared memory for BlockScan * __shared__ typename BlockScan::TempStorage temp_storage; * * // Initialize running total * BlockPrefixCallbackOp prefix_op(0); * * // Have the block iterate over segments of items * for (int block_offset = 0; block_offset < num_items; block_offset += 128) * { * // Load a segment of consecutive items that are blocked across threads * int thread_data = d_data[block_offset]; * * // Collectively compute the block-wide exclusive prefix sum * BlockScan(temp_storage).ExclusiveSum( * thread_data, thread_data, prefix_op); * CTA_SYNC(); * * // Store scanned items to output segment * d_data[block_offset] = thread_data; * } * \endcode * \par * Suppose the input \p d_data is <tt>1, 1, 1, 1, 1, 1, 1, 1, ...</tt>. * The corresponding output for the first segment will be <tt>0, 1, ..., 127</tt>. * The output for the second segment will be <tt>128, 129, ..., 255</tt>. * * \tparam BlockPrefixCallbackOp <b>[inferred]</b> Call-back functor type having member <tt>T operator()(T block_aggregate)</tt> */ template <typename BlockPrefixCallbackOp> __device__ __forceinline__ void ExclusiveSum( T input, ///< [in] Calling thread's input item T &output, ///< [out] Calling thread's output item (may be aliased to \p input) BlockPrefixCallbackOp &block_prefix_callback_op) ///< [in-out] <b>[<em>warp</em><sub>0</sub> only]</b> Call-back functor for specifying a block-wide prefix to be applied to the logical input sequence. { ExclusiveScan(input, output, cub::Sum(), block_prefix_callback_op); } //@} end member group /******************************************************************//** * \name Exclusive prefix sum operations (multiple data per thread) *********************************************************************/ //@{ /** * \brief Computes an exclusive block-wide prefix scan using addition (+) as the scan operator. Each thread contributes an array of consecutive input elements. The value of 0 is applied as the initial value, and is assigned to \p output[0] in <em>thread</em><sub>0</sub>. * * \par * - \identityzero * - \blocked * - \granularity * - \smemreuse * * \par Snippet * The code snippet below illustrates an exclusive prefix sum of 512 integer items that * are partitioned in a [<em>blocked arrangement</em>](index.html#sec5sec3) across 128 threads * where each thread owns 4 consecutive items. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/block/block_scan.cuh> * * __global__ void ExampleKernel(...) * { * // Specialize BlockScan for a 1D block of 128 threads on type int * typedef cub::BlockScan<int, 128> BlockScan; * * // Allocate shared memory for BlockScan * __shared__ typename BlockScan::TempStorage temp_storage; * * // Obtain a segment of consecutive items that are blocked across threads * int thread_data[4]; * ... * * // Collectively compute the block-wide exclusive prefix sum * BlockScan(temp_storage).ExclusiveSum(thread_data, thread_data); * * \endcode * \par * Suppose the set of input \p thread_data across the block of threads is <tt>{ [1,1,1,1], [1,1,1,1], ..., [1,1,1,1] }</tt>. The * corresponding output \p thread_data in those threads will be <tt>{ [0,1,2,3], [4,5,6,7], ..., [508,509,510,511] }</tt>. * * \tparam ITEMS_PER_THREAD <b>[inferred]</b> The number of consecutive items partitioned onto each thread. */ template <int ITEMS_PER_THREAD> __device__ __forceinline__ void ExclusiveSum( T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items T (&output)[ITEMS_PER_THREAD]) ///< [out] Calling thread's output items (may be aliased to \p input) { T initial_value = 0; ExclusiveScan(input, output, initial_value, cub::Sum()); } /** * \brief Computes an exclusive block-wide prefix scan using addition (+) as the scan operator. Each thread contributes an array of consecutive input elements. The value of 0 is applied as the initial value, and is assigned to \p output[0] in <em>thread</em><sub>0</sub>. Also provides every thread with the block-wide \p block_aggregate of all inputs. * * \par * - \identityzero * - \blocked * - \granularity * - \smemreuse * * \par Snippet * The code snippet below illustrates an exclusive prefix sum of 512 integer items that * are partitioned in a [<em>blocked arrangement</em>](index.html#sec5sec3) across 128 threads * where each thread owns 4 consecutive items. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/block/block_scan.cuh> * * __global__ void ExampleKernel(...) * { * // Specialize BlockScan for a 1D block of 128 threads on type int * typedef cub::BlockScan<int, 128> BlockScan; * * // Allocate shared memory for BlockScan * __shared__ typename BlockScan::TempStorage temp_storage; * * // Obtain a segment of consecutive items that are blocked across threads * int thread_data[4]; * ... * * // Collectively compute the block-wide exclusive prefix sum * int block_aggregate; * BlockScan(temp_storage).ExclusiveSum(thread_data, thread_data, block_aggregate); * * \endcode * \par * Suppose the set of input \p thread_data across the block of threads is <tt>{ [1,1,1,1], [1,1,1,1], ..., [1,1,1,1] }</tt>. The * corresponding output \p thread_data in those threads will be <tt>{ [0,1,2,3], [4,5,6,7], ..., [508,509,510,511] }</tt>. * Furthermore the value \p 512 will be stored in \p block_aggregate for all threads. * * \tparam ITEMS_PER_THREAD <b>[inferred]</b> The number of consecutive items partitioned onto each thread. */ template <int ITEMS_PER_THREAD> __device__ __forceinline__ void ExclusiveSum( T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items T (&output)[ITEMS_PER_THREAD], ///< [out] Calling thread's output items (may be aliased to \p input) T &block_aggregate) ///< [out] block-wide aggregate reduction of input items { // Reduce consecutive thread items in registers T initial_value = 0; ExclusiveScan(input, output, initial_value, cub::Sum(), block_aggregate); } /** * \brief Computes an exclusive block-wide prefix scan using addition (+) as the scan operator. Each thread contributes an array of consecutive input elements. Instead of using 0 as the block-wide prefix, the call-back functor \p block_prefix_callback_op is invoked by the first warp in the block, and the value returned by <em>lane</em><sub>0</sub> in that warp is used as the "seed" value that logically prefixes the thread block's scan inputs. Also provides every thread with the block-wide \p block_aggregate of all inputs. * * \par * - \identityzero * - The \p block_prefix_callback_op functor must implement a member function <tt>T operator()(T block_aggregate)</tt>. * The functor's input parameter \p block_aggregate is the same value also returned by the scan operation. * The functor will be invoked by the first warp of threads in the block, however only the return value from * <em>lane</em><sub>0</sub> is applied as the block-wide prefix. Can be stateful. * - \blocked * - \granularity * - \smemreuse * * \par Snippet * The code snippet below illustrates a single thread block that progressively * computes an exclusive prefix sum over multiple "tiles" of input using a * prefix functor to maintain a running total between block-wide scans. Each tile consists * of 512 integer items that are partitioned in a [<em>blocked arrangement</em>](index.html#sec5sec3) * across 128 threads where each thread owns 4 consecutive items. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/block/block_scan.cuh> * * // A stateful callback functor that maintains a running prefix to be applied * // during consecutive scan operations. * struct BlockPrefixCallbackOp * { * // Running prefix * int running_total; * * // Constructor * __device__ BlockPrefixCallbackOp(int running_total) : running_total(running_total) {} * * // Callback operator to be entered by the first warp of threads in the block. * // Thread-0 is responsible for returning a value for seeding the block-wide scan. * __device__ int operator()(int block_aggregate) * { * int old_prefix = running_total; * running_total += block_aggregate; * return old_prefix; * } * }; * * __global__ void ExampleKernel(int *d_data, int num_items, ...) * { * // Specialize BlockLoad, BlockStore, and BlockScan for a 1D block of 128 threads, 4 ints per thread * typedef cub::BlockLoad<int*, 128, 4, BLOCK_LOAD_TRANSPOSE> BlockLoad; * typedef cub::BlockStore<int, 128, 4, BLOCK_STORE_TRANSPOSE> BlockStore; * typedef cub::BlockScan<int, 128> BlockScan; * * // Allocate aliased shared memory for BlockLoad, BlockStore, and BlockScan * __shared__ union { * typename BlockLoad::TempStorage load; * typename BlockScan::TempStorage scan; * typename BlockStore::TempStorage store; * } temp_storage; * * // Initialize running total * BlockPrefixCallbackOp prefix_op(0); * * // Have the block iterate over segments of items * for (int block_offset = 0; block_offset < num_items; block_offset += 128 * 4) * { * // Load a segment of consecutive items that are blocked across threads * int thread_data[4]; * BlockLoad(temp_storage.load).Load(d_data + block_offset, thread_data); * CTA_SYNC(); * * // Collectively compute the block-wide exclusive prefix sum * int block_aggregate; * BlockScan(temp_storage.scan).ExclusiveSum( * thread_data, thread_data, prefix_op); * CTA_SYNC(); * * // Store scanned items to output segment * BlockStore(temp_storage.store).Store(d_data + block_offset, thread_data); * CTA_SYNC(); * } * \endcode * \par * Suppose the input \p d_data is <tt>1, 1, 1, 1, 1, 1, 1, 1, ...</tt>. * The corresponding output for the first segment will be <tt>0, 1, 2, 3, ..., 510, 511</tt>. * The output for the second segment will be <tt>512, 513, 514, 515, ..., 1022, 1023</tt>. * * \tparam ITEMS_PER_THREAD <b>[inferred]</b> The number of consecutive items partitioned onto each thread. * \tparam BlockPrefixCallbackOp <b>[inferred]</b> Call-back functor type having member <tt>T operator()(T block_aggregate)</tt> */ template < int ITEMS_PER_THREAD, typename BlockPrefixCallbackOp> __device__ __forceinline__ void ExclusiveSum( T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items T (&output)[ITEMS_PER_THREAD], ///< [out] Calling thread's output items (may be aliased to \p input) BlockPrefixCallbackOp &block_prefix_callback_op) ///< [in-out] <b>[<em>warp</em><sub>0</sub> only]</b> Call-back functor for specifying a block-wide prefix to be applied to the logical input sequence. { ExclusiveScan(input, output, cub::Sum(), block_prefix_callback_op); } //@} end member group // Exclusive prefix sums /******************************************************************//** * \name Exclusive prefix scan operations *********************************************************************/ //@{ /** * \brief Computes an exclusive block-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. * * \par * - Supports non-commutative scan operators. * - \rowmajor * - \smemreuse * * \par Snippet * The code snippet below illustrates an exclusive prefix max scan of 128 integer items that * are partitioned across 128 threads. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/block/block_scan.cuh> * * __global__ void ExampleKernel(...) * { * // Specialize BlockScan for a 1D block of 128 threads on type int * typedef cub::BlockScan<int, 128> BlockScan; * * // Allocate shared memory for BlockScan * __shared__ typename BlockScan::TempStorage temp_storage; * * // Obtain input item for each thread * int thread_data; * ... * * // Collectively compute the block-wide exclusive prefix max scan * BlockScan(temp_storage).ExclusiveScan(thread_data, thread_data, INT_MIN, cub::Max()); * * \endcode * \par * Suppose the set of input \p thread_data across the block of threads is <tt>0, -1, 2, -3, ..., 126, -127</tt>. The * corresponding output \p thread_data in those threads will be <tt>INT_MIN, 0, 0, 2, ..., 124, 126</tt>. * * \tparam ScanOp <b>[inferred]</b> Binary scan functor type having member <tt>T operator()(const T &a, const T &b)</tt> */ template <typename ScanOp> __device__ __forceinline__ void ExclusiveScan( T input, ///< [in] Calling thread's input item T &output, ///< [out] Calling thread's output item (may be aliased to \p input) T initial_value, ///< [in] Initial value to seed the exclusive scan (and is assigned to \p output[0] in <em>thread</em><sub>0</sub>) ScanOp scan_op) ///< [in] Binary scan functor { InternalBlockScan(temp_storage).ExclusiveScan(input, output, initial_value, scan_op); } /** * \brief Computes an exclusive block-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. Also provides every thread with the block-wide \p block_aggregate of all inputs. * * \par * - Supports non-commutative scan operators. * - \rowmajor * - \smemreuse * * \par Snippet * The code snippet below illustrates an exclusive prefix max scan of 128 integer items that * are partitioned across 128 threads. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/block/block_scan.cuh> * * __global__ void ExampleKernel(...) * { * // Specialize BlockScan for a 1D block of 128 threads on type int * typedef cub::BlockScan<int, 128> BlockScan; * * // Allocate shared memory for BlockScan * __shared__ typename BlockScan::TempStorage temp_storage; * * // Obtain input item for each thread * int thread_data; * ... * * // Collectively compute the block-wide exclusive prefix max scan * int block_aggregate; * BlockScan(temp_storage).ExclusiveScan(thread_data, thread_data, INT_MIN, cub::Max(), block_aggregate); * * \endcode * \par * Suppose the set of input \p thread_data across the block of threads is <tt>0, -1, 2, -3, ..., 126, -127</tt>. The * corresponding output \p thread_data in those threads will be <tt>INT_MIN, 0, 0, 2, ..., 124, 126</tt>. * Furthermore the value \p 126 will be stored in \p block_aggregate for all threads. * * \tparam ScanOp <b>[inferred]</b> Binary scan functor type having member <tt>T operator()(const T &a, const T &b)</tt> */ template <typename ScanOp> __device__ __forceinline__ void ExclusiveScan( T input, ///< [in] Calling thread's input items T &output, ///< [out] Calling thread's output items (may be aliased to \p input) T initial_value, ///< [in] Initial value to seed the exclusive scan (and is assigned to \p output[0] in <em>thread</em><sub>0</sub>) ScanOp scan_op, ///< [in] Binary scan functor T &block_aggregate) ///< [out] block-wide aggregate reduction of input items { InternalBlockScan(temp_storage).ExclusiveScan(input, output, initial_value, scan_op, block_aggregate); } /** * \brief Computes an exclusive block-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. the call-back functor \p block_prefix_callback_op is invoked by the first warp in the block, and the value returned by <em>lane</em><sub>0</sub> in that warp is used as the "seed" value that logically prefixes the thread block's scan inputs. Also provides every thread with the block-wide \p block_aggregate of all inputs. * * \par * - The \p block_prefix_callback_op functor must implement a member function <tt>T operator()(T block_aggregate)</tt>. * The functor's input parameter \p block_aggregate is the same value also returned by the scan operation. * The functor will be invoked by the first warp of threads in the block, however only the return value from * <em>lane</em><sub>0</sub> is applied as the block-wide prefix. Can be stateful. * - Supports non-commutative scan operators. * - \rowmajor * - \smemreuse * * \par Snippet * The code snippet below illustrates a single thread block that progressively * computes an exclusive prefix max scan over multiple "tiles" of input using a * prefix functor to maintain a running total between block-wide scans. Each tile consists * of 128 integer items that are partitioned across 128 threads. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/block/block_scan.cuh> * * // A stateful callback functor that maintains a running prefix to be applied * // during consecutive scan operations. * struct BlockPrefixCallbackOp * { * // Running prefix * int running_total; * * // Constructor * __device__ BlockPrefixCallbackOp(int running_total) : running_total(running_total) {} * * // Callback operator to be entered by the first warp of threads in the block. * // Thread-0 is responsible for returning a value for seeding the block-wide scan. * __device__ int operator()(int block_aggregate) * { * int old_prefix = running_total; * running_total = (block_aggregate > old_prefix) ? block_aggregate : old_prefix; * return old_prefix; * } * }; * * __global__ void ExampleKernel(int *d_data, int num_items, ...) * { * // Specialize BlockScan for a 1D block of 128 threads * typedef cub::BlockScan<int, 128> BlockScan; * * // Allocate shared memory for BlockScan * __shared__ typename BlockScan::TempStorage temp_storage; * * // Initialize running total * BlockPrefixCallbackOp prefix_op(INT_MIN); * * // Have the block iterate over segments of items * for (int block_offset = 0; block_offset < num_items; block_offset += 128) * { * // Load a segment of consecutive items that are blocked across threads * int thread_data = d_data[block_offset]; * * // Collectively compute the block-wide exclusive prefix max scan * BlockScan(temp_storage).ExclusiveScan( * thread_data, thread_data, INT_MIN, cub::Max(), prefix_op); * CTA_SYNC(); * * // Store scanned items to output segment * d_data[block_offset] = thread_data; * } * \endcode * \par * Suppose the input \p d_data is <tt>0, -1, 2, -3, 4, -5, ...</tt>. * The corresponding output for the first segment will be <tt>INT_MIN, 0, 0, 2, ..., 124, 126</tt>. * The output for the second segment will be <tt>126, 128, 128, 130, ..., 252, 254</tt>. * * \tparam ScanOp <b>[inferred]</b> Binary scan functor type having member <tt>T operator()(const T &a, const T &b)</tt> * \tparam BlockPrefixCallbackOp <b>[inferred]</b> Call-back functor type having member <tt>T operator()(T block_aggregate)</tt> */ template < typename ScanOp, typename BlockPrefixCallbackOp> __device__ __forceinline__ void ExclusiveScan( T input, ///< [in] Calling thread's input item T &output, ///< [out] Calling thread's output item (may be aliased to \p input) ScanOp scan_op, ///< [in] Binary scan functor BlockPrefixCallbackOp &block_prefix_callback_op) ///< [in-out] <b>[<em>warp</em><sub>0</sub> only]</b> Call-back functor for specifying a block-wide prefix to be applied to the logical input sequence. { InternalBlockScan(temp_storage).ExclusiveScan(input, output, scan_op, block_prefix_callback_op); } //@} end member group // Inclusive prefix sums /******************************************************************//** * \name Exclusive prefix scan operations (multiple data per thread) *********************************************************************/ //@{ /** * \brief Computes an exclusive block-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes an array of consecutive input elements. * * \par * - Supports non-commutative scan operators. * - \blocked * - \granularity * - \smemreuse * * \par Snippet * The code snippet below illustrates an exclusive prefix max scan of 512 integer items that * are partitioned in a [<em>blocked arrangement</em>](index.html#sec5sec3) across 128 threads * where each thread owns 4 consecutive items. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/block/block_scan.cuh> * * __global__ void ExampleKernel(...) * { * // Specialize BlockScan for a 1D block of 128 threads on type int * typedef cub::BlockScan<int, 128> BlockScan; * * // Allocate shared memory for BlockScan * __shared__ typename BlockScan::TempStorage temp_storage; * * // Obtain a segment of consecutive items that are blocked across threads * int thread_data[4]; * ... * * // Collectively compute the block-wide exclusive prefix max scan * BlockScan(temp_storage).ExclusiveScan(thread_data, thread_data, INT_MIN, cub::Max()); * * \endcode * \par * Suppose the set of input \p thread_data across the block of threads is * <tt>{ [0,-1,2,-3], [4,-5,6,-7], ..., [508,-509,510,-511] }</tt>. * The corresponding output \p thread_data in those threads will be * <tt>{ [INT_MIN,0,0,2], [2,4,4,6], ..., [506,508,508,510] }</tt>. * * \tparam ITEMS_PER_THREAD <b>[inferred]</b> The number of consecutive items partitioned onto each thread. * \tparam ScanOp <b>[inferred]</b> Binary scan functor type having member <tt>T operator()(const T &a, const T &b)</tt> */ template < int ITEMS_PER_THREAD, typename ScanOp> __device__ __forceinline__ void ExclusiveScan( T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items T (&output)[ITEMS_PER_THREAD], ///< [out] Calling thread's output items (may be aliased to \p input) T initial_value, ///< [in] Initial value to seed the exclusive scan (and is assigned to \p output[0] in <em>thread</em><sub>0</sub>) ScanOp scan_op) ///< [in] Binary scan functor { // Reduce consecutive thread items in registers T thread_prefix = internal::ThreadReduce(input, scan_op); // Exclusive thread block-scan ExclusiveScan(thread_prefix, thread_prefix, initial_value, scan_op); // Exclusive scan in registers with prefix as seed internal::ThreadScanExclusive(input, output, scan_op, thread_prefix); } /** * \brief Computes an exclusive block-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes an array of consecutive input elements. Also provides every thread with the block-wide \p block_aggregate of all inputs. * * \par * - Supports non-commutative scan operators. * - \blocked * - \granularity * - \smemreuse * * \par Snippet * The code snippet below illustrates an exclusive prefix max scan of 512 integer items that * are partitioned in a [<em>blocked arrangement</em>](index.html#sec5sec3) across 128 threads * where each thread owns 4 consecutive items. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/block/block_scan.cuh> * * __global__ void ExampleKernel(...) * { * // Specialize BlockScan for a 1D block of 128 threads on type int * typedef cub::BlockScan<int, 128> BlockScan; * * // Allocate shared memory for BlockScan * __shared__ typename BlockScan::TempStorage temp_storage; * * // Obtain a segment of consecutive items that are blocked across threads * int thread_data[4]; * ... * * // Collectively compute the block-wide exclusive prefix max scan * int block_aggregate; * BlockScan(temp_storage).ExclusiveScan(thread_data, thread_data, INT_MIN, cub::Max(), block_aggregate); * * \endcode * \par * Suppose the set of input \p thread_data across the block of threads is <tt>{ [0,-1,2,-3], [4,-5,6,-7], ..., [508,-509,510,-511] }</tt>. The * corresponding output \p thread_data in those threads will be <tt>{ [INT_MIN,0,0,2], [2,4,4,6], ..., [506,508,508,510] }</tt>. * Furthermore the value \p 510 will be stored in \p block_aggregate for all threads. * * \tparam ITEMS_PER_THREAD <b>[inferred]</b> The number of consecutive items partitioned onto each thread. * \tparam ScanOp <b>[inferred]</b> Binary scan functor type having member <tt>T operator()(const T &a, const T &b)</tt> */ template < int ITEMS_PER_THREAD, typename ScanOp> __device__ __forceinline__ void ExclusiveScan( T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items T (&output)[ITEMS_PER_THREAD], ///< [out] Calling thread's output items (may be aliased to \p input) T initial_value, ///< [in] Initial value to seed the exclusive scan (and is assigned to \p output[0] in <em>thread</em><sub>0</sub>) ScanOp scan_op, ///< [in] Binary scan functor T &block_aggregate) ///< [out] block-wide aggregate reduction of input items { // Reduce consecutive thread items in registers T thread_prefix = internal::ThreadReduce(input, scan_op); // Exclusive thread block-scan ExclusiveScan(thread_prefix, thread_prefix, initial_value, scan_op, block_aggregate); // Exclusive scan in registers with prefix as seed internal::ThreadScanExclusive(input, output, scan_op, thread_prefix); } /** * \brief Computes an exclusive block-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes an array of consecutive input elements. the call-back functor \p block_prefix_callback_op is invoked by the first warp in the block, and the value returned by <em>lane</em><sub>0</sub> in that warp is used as the "seed" value that logically prefixes the thread block's scan inputs. Also provides every thread with the block-wide \p block_aggregate of all inputs. * * \par * - The \p block_prefix_callback_op functor must implement a member function <tt>T operator()(T block_aggregate)</tt>. * The functor's input parameter \p block_aggregate is the same value also returned by the scan operation. * The functor will be invoked by the first warp of threads in the block, however only the return value from * <em>lane</em><sub>0</sub> is applied as the block-wide prefix. Can be stateful. * - Supports non-commutative scan operators. * - \blocked * - \granularity * - \smemreuse * * \par Snippet * The code snippet below illustrates a single thread block that progressively * computes an exclusive prefix max scan over multiple "tiles" of input using a * prefix functor to maintain a running total between block-wide scans. Each tile consists * of 128 integer items that are partitioned across 128 threads. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/block/block_scan.cuh> * * // A stateful callback functor that maintains a running prefix to be applied * // during consecutive scan operations. * struct BlockPrefixCallbackOp * { * // Running prefix * int running_total; * * // Constructor * __device__ BlockPrefixCallbackOp(int running_total) : running_total(running_total) {} * * // Callback operator to be entered by the first warp of threads in the block. * // Thread-0 is responsible for returning a value for seeding the block-wide scan. * __device__ int operator()(int block_aggregate) * { * int old_prefix = running_total; * running_total = (block_aggregate > old_prefix) ? block_aggregate : old_prefix; * return old_prefix; * } * }; * * __global__ void ExampleKernel(int *d_data, int num_items, ...) * { * // Specialize BlockLoad, BlockStore, and BlockScan for a 1D block of 128 threads, 4 ints per thread * typedef cub::BlockLoad<int*, 128, 4, BLOCK_LOAD_TRANSPOSE> BlockLoad; * typedef cub::BlockStore<int, 128, 4, BLOCK_STORE_TRANSPOSE> BlockStore; * typedef cub::BlockScan<int, 128> BlockScan; * * // Allocate aliased shared memory for BlockLoad, BlockStore, and BlockScan * __shared__ union { * typename BlockLoad::TempStorage load; * typename BlockScan::TempStorage scan; * typename BlockStore::TempStorage store; * } temp_storage; * * // Initialize running total * BlockPrefixCallbackOp prefix_op(0); * * // Have the block iterate over segments of items * for (int block_offset = 0; block_offset < num_items; block_offset += 128 * 4) * { * // Load a segment of consecutive items that are blocked across threads * int thread_data[4]; * BlockLoad(temp_storage.load).Load(d_data + block_offset, thread_data); * CTA_SYNC(); * * // Collectively compute the block-wide exclusive prefix max scan * BlockScan(temp_storage.scan).ExclusiveScan( * thread_data, thread_data, INT_MIN, cub::Max(), prefix_op); * CTA_SYNC(); * * // Store scanned items to output segment * BlockStore(temp_storage.store).Store(d_data + block_offset, thread_data); * CTA_SYNC(); * } * \endcode * \par * Suppose the input \p d_data is <tt>0, -1, 2, -3, 4, -5, ...</tt>. * The corresponding output for the first segment will be <tt>INT_MIN, 0, 0, 2, 2, 4, ..., 508, 510</tt>. * The output for the second segment will be <tt>510, 512, 512, 514, 514, 516, ..., 1020, 1022</tt>. * * \tparam ITEMS_PER_THREAD <b>[inferred]</b> The number of consecutive items partitioned onto each thread. * \tparam ScanOp <b>[inferred]</b> Binary scan functor type having member <tt>T operator()(const T &a, const T &b)</tt> * \tparam BlockPrefixCallbackOp <b>[inferred]</b> Call-back functor type having member <tt>T operator()(T block_aggregate)</tt> */ template < int ITEMS_PER_THREAD, typename ScanOp, typename BlockPrefixCallbackOp> __device__ __forceinline__ void ExclusiveScan( T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items T (&output)[ITEMS_PER_THREAD], ///< [out] Calling thread's output items (may be aliased to \p input) ScanOp scan_op, ///< [in] Binary scan functor BlockPrefixCallbackOp &block_prefix_callback_op) ///< [in-out] <b>[<em>warp</em><sub>0</sub> only]</b> Call-back functor for specifying a block-wide prefix to be applied to the logical input sequence. { // Reduce consecutive thread items in registers T thread_prefix = internal::ThreadReduce(input, scan_op); // Exclusive thread block-scan ExclusiveScan(thread_prefix, thread_prefix, scan_op, block_prefix_callback_op); // Exclusive scan in registers with prefix as seed internal::ThreadScanExclusive(input, output, scan_op, thread_prefix); } //@} end member group #ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document no-initial-value scans /******************************************************************//** * \name Exclusive prefix scan operations (no initial value, single datum per thread) *********************************************************************/ //@{ /** * \brief Computes an exclusive block-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. With no initial value, the output computed for <em>thread</em><sub>0</sub> is undefined. * * \par * - Supports non-commutative scan operators. * - \rowmajor * - \smemreuse * * \tparam ScanOp <b>[inferred]</b> Binary scan functor type having member <tt>T operator()(const T &a, const T &b)</tt> */ template <typename ScanOp> __device__ __forceinline__ void ExclusiveScan( T input, ///< [in] Calling thread's input item T &output, ///< [out] Calling thread's output item (may be aliased to \p input) ScanOp scan_op) ///< [in] Binary scan functor { InternalBlockScan(temp_storage).ExclusiveScan(input, output, scan_op); } /** * \brief Computes an exclusive block-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. Also provides every thread with the block-wide \p block_aggregate of all inputs. With no initial value, the output computed for <em>thread</em><sub>0</sub> is undefined. * * \par * - Supports non-commutative scan operators. * - \rowmajor * - \smemreuse * * \tparam ScanOp <b>[inferred]</b> Binary scan functor type having member <tt>T operator()(const T &a, const T &b)</tt> */ template <typename ScanOp> __device__ __forceinline__ void ExclusiveScan( T input, ///< [in] Calling thread's input item T &output, ///< [out] Calling thread's output item (may be aliased to \p input) ScanOp scan_op, ///< [in] Binary scan functor T &block_aggregate) ///< [out] block-wide aggregate reduction of input items { InternalBlockScan(temp_storage).ExclusiveScan(input, output, scan_op, block_aggregate); } //@} end member group /******************************************************************//** * \name Exclusive prefix scan operations (no initial value, multiple data per thread) *********************************************************************/ //@{ /** * \brief Computes an exclusive block-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes an array of consecutive input elements. With no initial value, the output computed for <em>thread</em><sub>0</sub> is undefined. * * \par * - Supports non-commutative scan operators. * - \blocked * - \granularity * - \smemreuse * * \tparam ITEMS_PER_THREAD <b>[inferred]</b> The number of consecutive items partitioned onto each thread. * \tparam ScanOp <b>[inferred]</b> Binary scan functor type having member <tt>T operator()(const T &a, const T &b)</tt> */ template < int ITEMS_PER_THREAD, typename ScanOp> __device__ __forceinline__ void ExclusiveScan( T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items T (&output)[ITEMS_PER_THREAD], ///< [out] Calling thread's output items (may be aliased to \p input) ScanOp scan_op) ///< [in] Binary scan functor { // Reduce consecutive thread items in registers T thread_partial = internal::ThreadReduce(input, scan_op); // Exclusive thread block-scan ExclusiveScan(thread_partial, thread_partial, scan_op); // Exclusive scan in registers with prefix internal::ThreadScanExclusive(input, output, scan_op, thread_partial, (linear_tid != 0)); } /** * \brief Computes an exclusive block-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes an array of consecutive input elements. Also provides every thread with the block-wide \p block_aggregate of all inputs. With no initial value, the output computed for <em>thread</em><sub>0</sub> is undefined. * * \par * - Supports non-commutative scan operators. * - \blocked * - \granularity * - \smemreuse * * \tparam ITEMS_PER_THREAD <b>[inferred]</b> The number of consecutive items partitioned onto each thread. * \tparam ScanOp <b>[inferred]</b> Binary scan functor type having member <tt>T operator()(const T &a, const T &b)</tt> */ template < int ITEMS_PER_THREAD, typename ScanOp> __device__ __forceinline__ void ExclusiveScan( T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items T (&output)[ITEMS_PER_THREAD], ///< [out] Calling thread's output items (may be aliased to \p input) ScanOp scan_op, ///< [in] Binary scan functor T &block_aggregate) ///< [out] block-wide aggregate reduction of input items { // Reduce consecutive thread items in registers T thread_partial = internal::ThreadReduce(input, scan_op); // Exclusive thread block-scan ExclusiveScan(thread_partial, thread_partial, scan_op, block_aggregate); // Exclusive scan in registers with prefix internal::ThreadScanExclusive(input, output, scan_op, thread_partial, (linear_tid != 0)); } //@} end member group #endif // DOXYGEN_SHOULD_SKIP_THIS // Do not document no-initial-value scans /******************************************************************//** * \name Inclusive prefix sum operations *********************************************************************/ //@{ /** * \brief Computes an inclusive block-wide prefix scan using addition (+) as the scan operator. Each thread contributes one input element. * * \par * - \rowmajor * - \smemreuse * * \par Snippet * The code snippet below illustrates an inclusive prefix sum of 128 integer items that * are partitioned across 128 threads. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/block/block_scan.cuh> * * __global__ void ExampleKernel(...) * { * // Specialize BlockScan for a 1D block of 128 threads on type int * typedef cub::BlockScan<int, 128> BlockScan; * * // Allocate shared memory for BlockScan * __shared__ typename BlockScan::TempStorage temp_storage; * * // Obtain input item for each thread * int thread_data; * ... * * // Collectively compute the block-wide inclusive prefix sum * BlockScan(temp_storage).InclusiveSum(thread_data, thread_data); * * \endcode * \par * Suppose the set of input \p thread_data across the block of threads is <tt>1, 1, ..., 1</tt>. The * corresponding output \p thread_data in those threads will be <tt>1, 2, ..., 128</tt>. * */ __device__ __forceinline__ void InclusiveSum( T input, ///< [in] Calling thread's input item T &output) ///< [out] Calling thread's output item (may be aliased to \p input) { InclusiveScan(input, output, cub::Sum()); } /** * \brief Computes an inclusive block-wide prefix scan using addition (+) as the scan operator. Each thread contributes one input element. Also provides every thread with the block-wide \p block_aggregate of all inputs. * * \par * - \rowmajor * - \smemreuse * * \par Snippet * The code snippet below illustrates an inclusive prefix sum of 128 integer items that * are partitioned across 128 threads. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/block/block_scan.cuh> * * __global__ void ExampleKernel(...) * { * // Specialize BlockScan for a 1D block of 128 threads on type int * typedef cub::BlockScan<int, 128> BlockScan; * * // Allocate shared memory for BlockScan * __shared__ typename BlockScan::TempStorage temp_storage; * * // Obtain input item for each thread * int thread_data; * ... * * // Collectively compute the block-wide inclusive prefix sum * int block_aggregate; * BlockScan(temp_storage).InclusiveSum(thread_data, thread_data, block_aggregate); * * \endcode * \par * Suppose the set of input \p thread_data across the block of threads is <tt>1, 1, ..., 1</tt>. The * corresponding output \p thread_data in those threads will be <tt>1, 2, ..., 128</tt>. * Furthermore the value \p 128 will be stored in \p block_aggregate for all threads. * */ __device__ __forceinline__ void InclusiveSum( T input, ///< [in] Calling thread's input item T &output, ///< [out] Calling thread's output item (may be aliased to \p input) T &block_aggregate) ///< [out] block-wide aggregate reduction of input items { InclusiveScan(input, output, cub::Sum(), block_aggregate); } /** * \brief Computes an inclusive block-wide prefix scan using addition (+) as the scan operator. Each thread contributes one input element. Instead of using 0 as the block-wide prefix, the call-back functor \p block_prefix_callback_op is invoked by the first warp in the block, and the value returned by <em>lane</em><sub>0</sub> in that warp is used as the "seed" value that logically prefixes the thread block's scan inputs. Also provides every thread with the block-wide \p block_aggregate of all inputs. * * \par * - The \p block_prefix_callback_op functor must implement a member function <tt>T operator()(T block_aggregate)</tt>. * The functor's input parameter \p block_aggregate is the same value also returned by the scan operation. * The functor will be invoked by the first warp of threads in the block, however only the return value from * <em>lane</em><sub>0</sub> is applied as the block-wide prefix. Can be stateful. * - \rowmajor * - \smemreuse * * \par Snippet * The code snippet below illustrates a single thread block that progressively * computes an inclusive prefix sum over multiple "tiles" of input using a * prefix functor to maintain a running total between block-wide scans. Each tile consists * of 128 integer items that are partitioned across 128 threads. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/block/block_scan.cuh> * * // A stateful callback functor that maintains a running prefix to be applied * // during consecutive scan operations. * struct BlockPrefixCallbackOp * { * // Running prefix * int running_total; * * // Constructor * __device__ BlockPrefixCallbackOp(int running_total) : running_total(running_total) {} * * // Callback operator to be entered by the first warp of threads in the block. * // Thread-0 is responsible for returning a value for seeding the block-wide scan. * __device__ int operator()(int block_aggregate) * { * int old_prefix = running_total; * running_total += block_aggregate; * return old_prefix; * } * }; * * __global__ void ExampleKernel(int *d_data, int num_items, ...) * { * // Specialize BlockScan for a 1D block of 128 threads * typedef cub::BlockScan<int, 128> BlockScan; * * // Allocate shared memory for BlockScan * __shared__ typename BlockScan::TempStorage temp_storage; * * // Initialize running total * BlockPrefixCallbackOp prefix_op(0); * * // Have the block iterate over segments of items * for (int block_offset = 0; block_offset < num_items; block_offset += 128) * { * // Load a segment of consecutive items that are blocked across threads * int thread_data = d_data[block_offset]; * * // Collectively compute the block-wide inclusive prefix sum * BlockScan(temp_storage).InclusiveSum( * thread_data, thread_data, prefix_op); * CTA_SYNC(); * * // Store scanned items to output segment * d_data[block_offset] = thread_data; * } * \endcode * \par * Suppose the input \p d_data is <tt>1, 1, 1, 1, 1, 1, 1, 1, ...</tt>. * The corresponding output for the first segment will be <tt>1, 2, ..., 128</tt>. * The output for the second segment will be <tt>129, 130, ..., 256</tt>. * * \tparam BlockPrefixCallbackOp <b>[inferred]</b> Call-back functor type having member <tt>T operator()(T block_aggregate)</tt> */ template <typename BlockPrefixCallbackOp> __device__ __forceinline__ void InclusiveSum( T input, ///< [in] Calling thread's input item T &output, ///< [out] Calling thread's output item (may be aliased to \p input) BlockPrefixCallbackOp &block_prefix_callback_op) ///< [in-out] <b>[<em>warp</em><sub>0</sub> only]</b> Call-back functor for specifying a block-wide prefix to be applied to the logical input sequence. { InclusiveScan(input, output, cub::Sum(), block_prefix_callback_op); } //@} end member group /******************************************************************//** * \name Inclusive prefix sum operations (multiple data per thread) *********************************************************************/ //@{ /** * \brief Computes an inclusive block-wide prefix scan using addition (+) as the scan operator. Each thread contributes an array of consecutive input elements. * * \par * - \blocked * - \granularity * - \smemreuse * * \par Snippet * The code snippet below illustrates an inclusive prefix sum of 512 integer items that * are partitioned in a [<em>blocked arrangement</em>](index.html#sec5sec3) across 128 threads * where each thread owns 4 consecutive items. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/block/block_scan.cuh> * * __global__ void ExampleKernel(...) * { * // Specialize BlockScan for a 1D block of 128 threads on type int * typedef cub::BlockScan<int, 128> BlockScan; * * // Allocate shared memory for BlockScan * __shared__ typename BlockScan::TempStorage temp_storage; * * // Obtain a segment of consecutive items that are blocked across threads * int thread_data[4]; * ... * * // Collectively compute the block-wide inclusive prefix sum * BlockScan(temp_storage).InclusiveSum(thread_data, thread_data); * * \endcode * \par * Suppose the set of input \p thread_data across the block of threads is <tt>{ [1,1,1,1], [1,1,1,1], ..., [1,1,1,1] }</tt>. The * corresponding output \p thread_data in those threads will be <tt>{ [1,2,3,4], [5,6,7,8], ..., [509,510,511,512] }</tt>. * * \tparam ITEMS_PER_THREAD <b>[inferred]</b> The number of consecutive items partitioned onto each thread. */ template <int ITEMS_PER_THREAD> __device__ __forceinline__ void InclusiveSum( T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items T (&output)[ITEMS_PER_THREAD]) ///< [out] Calling thread's output items (may be aliased to \p input) { if (ITEMS_PER_THREAD == 1) { InclusiveSum(input[0], output[0]); } else { // Reduce consecutive thread items in registers Sum scan_op; T thread_prefix = internal::ThreadReduce(input, scan_op); // Exclusive thread block-scan ExclusiveSum(thread_prefix, thread_prefix); // Inclusive scan in registers with prefix as seed internal::ThreadScanInclusive(input, output, scan_op, thread_prefix, (linear_tid != 0)); } } /** * \brief Computes an inclusive block-wide prefix scan using addition (+) as the scan operator. Each thread contributes an array of consecutive input elements. Also provides every thread with the block-wide \p block_aggregate of all inputs. * * \par * - \blocked * - \granularity * - \smemreuse * * \par Snippet * The code snippet below illustrates an inclusive prefix sum of 512 integer items that * are partitioned in a [<em>blocked arrangement</em>](index.html#sec5sec3) across 128 threads * where each thread owns 4 consecutive items. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/block/block_scan.cuh> * * __global__ void ExampleKernel(...) * { * // Specialize BlockScan for a 1D block of 128 threads on type int * typedef cub::BlockScan<int, 128> BlockScan; * * // Allocate shared memory for BlockScan * __shared__ typename BlockScan::TempStorage temp_storage; * * // Obtain a segment of consecutive items that are blocked across threads * int thread_data[4]; * ... * * // Collectively compute the block-wide inclusive prefix sum * int block_aggregate; * BlockScan(temp_storage).InclusiveSum(thread_data, thread_data, block_aggregate); * * \endcode * \par * Suppose the set of input \p thread_data across the block of threads is * <tt>{ [1,1,1,1], [1,1,1,1], ..., [1,1,1,1] }</tt>. The * corresponding output \p thread_data in those threads will be * <tt>{ [1,2,3,4], [5,6,7,8], ..., [509,510,511,512] }</tt>. * Furthermore the value \p 512 will be stored in \p block_aggregate for all threads. * * \tparam ITEMS_PER_THREAD <b>[inferred]</b> The number of consecutive items partitioned onto each thread. * \tparam ScanOp <b>[inferred]</b> Binary scan functor type having member <tt>T operator()(const T &a, const T &b)</tt> */ template <int ITEMS_PER_THREAD> __device__ __forceinline__ void InclusiveSum( T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items T (&output)[ITEMS_PER_THREAD], ///< [out] Calling thread's output items (may be aliased to \p input) T &block_aggregate) ///< [out] block-wide aggregate reduction of input items { if (ITEMS_PER_THREAD == 1) { InclusiveSum(input[0], output[0], block_aggregate); } else { // Reduce consecutive thread items in registers Sum scan_op; T thread_prefix = internal::ThreadReduce(input, scan_op); // Exclusive thread block-scan ExclusiveSum(thread_prefix, thread_prefix, block_aggregate); // Inclusive scan in registers with prefix as seed internal::ThreadScanInclusive(input, output, scan_op, thread_prefix, (linear_tid != 0)); } } /** * \brief Computes an inclusive block-wide prefix scan using addition (+) as the scan operator. Each thread contributes an array of consecutive input elements. Instead of using 0 as the block-wide prefix, the call-back functor \p block_prefix_callback_op is invoked by the first warp in the block, and the value returned by <em>lane</em><sub>0</sub> in that warp is used as the "seed" value that logically prefixes the thread block's scan inputs. Also provides every thread with the block-wide \p block_aggregate of all inputs. * * \par * - The \p block_prefix_callback_op functor must implement a member function <tt>T operator()(T block_aggregate)</tt>. * The functor's input parameter \p block_aggregate is the same value also returned by the scan operation. * The functor will be invoked by the first warp of threads in the block, however only the return value from * <em>lane</em><sub>0</sub> is applied as the block-wide prefix. Can be stateful. * - \blocked * - \granularity * - \smemreuse * * \par Snippet * The code snippet below illustrates a single thread block that progressively * computes an inclusive prefix sum over multiple "tiles" of input using a * prefix functor to maintain a running total between block-wide scans. Each tile consists * of 512 integer items that are partitioned in a [<em>blocked arrangement</em>](index.html#sec5sec3) * across 128 threads where each thread owns 4 consecutive items. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/block/block_scan.cuh> * * // A stateful callback functor that maintains a running prefix to be applied * // during consecutive scan operations. * struct BlockPrefixCallbackOp * { * // Running prefix * int running_total; * * // Constructor * __device__ BlockPrefixCallbackOp(int running_total) : running_total(running_total) {} * * // Callback operator to be entered by the first warp of threads in the block. * // Thread-0 is responsible for returning a value for seeding the block-wide scan. * __device__ int operator()(int block_aggregate) * { * int old_prefix = running_total; * running_total += block_aggregate; * return old_prefix; * } * }; * * __global__ void ExampleKernel(int *d_data, int num_items, ...) * { * // Specialize BlockLoad, BlockStore, and BlockScan for a 1D block of 128 threads, 4 ints per thread * typedef cub::BlockLoad<int*, 128, 4, BLOCK_LOAD_TRANSPOSE> BlockLoad; * typedef cub::BlockStore<int, 128, 4, BLOCK_STORE_TRANSPOSE> BlockStore; * typedef cub::BlockScan<int, 128> BlockScan; * * // Allocate aliased shared memory for BlockLoad, BlockStore, and BlockScan * __shared__ union { * typename BlockLoad::TempStorage load; * typename BlockScan::TempStorage scan; * typename BlockStore::TempStorage store; * } temp_storage; * * // Initialize running total * BlockPrefixCallbackOp prefix_op(0); * * // Have the block iterate over segments of items * for (int block_offset = 0; block_offset < num_items; block_offset += 128 * 4) * { * // Load a segment of consecutive items that are blocked across threads * int thread_data[4]; * BlockLoad(temp_storage.load).Load(d_data + block_offset, thread_data); * CTA_SYNC(); * * // Collectively compute the block-wide inclusive prefix sum * BlockScan(temp_storage.scan).IncluisveSum( * thread_data, thread_data, prefix_op); * CTA_SYNC(); * * // Store scanned items to output segment * BlockStore(temp_storage.store).Store(d_data + block_offset, thread_data); * CTA_SYNC(); * } * \endcode * \par * Suppose the input \p d_data is <tt>1, 1, 1, 1, 1, 1, 1, 1, ...</tt>. * The corresponding output for the first segment will be <tt>1, 2, 3, 4, ..., 511, 512</tt>. * The output for the second segment will be <tt>513, 514, 515, 516, ..., 1023, 1024</tt>. * * \tparam ITEMS_PER_THREAD <b>[inferred]</b> The number of consecutive items partitioned onto each thread. * \tparam BlockPrefixCallbackOp <b>[inferred]</b> Call-back functor type having member <tt>T operator()(T block_aggregate)</tt> */ template < int ITEMS_PER_THREAD, typename BlockPrefixCallbackOp> __device__ __forceinline__ void InclusiveSum( T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items T (&output)[ITEMS_PER_THREAD], ///< [out] Calling thread's output items (may be aliased to \p input) BlockPrefixCallbackOp &block_prefix_callback_op) ///< [in-out] <b>[<em>warp</em><sub>0</sub> only]</b> Call-back functor for specifying a block-wide prefix to be applied to the logical input sequence. { if (ITEMS_PER_THREAD == 1) { InclusiveSum(input[0], output[0], block_prefix_callback_op); } else { // Reduce consecutive thread items in registers Sum scan_op; T thread_prefix = internal::ThreadReduce(input, scan_op); // Exclusive thread block-scan ExclusiveSum(thread_prefix, thread_prefix, block_prefix_callback_op); // Inclusive scan in registers with prefix as seed internal::ThreadScanInclusive(input, output, scan_op, thread_prefix); } } //@} end member group /******************************************************************//** * \name Inclusive prefix scan operations *********************************************************************/ //@{ /** * \brief Computes an inclusive block-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. * * \par * - Supports non-commutative scan operators. * - \rowmajor * - \smemreuse * * \par Snippet * The code snippet below illustrates an inclusive prefix max scan of 128 integer items that * are partitioned across 128 threads. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/block/block_scan.cuh> * * __global__ void ExampleKernel(...) * { * // Specialize BlockScan for a 1D block of 128 threads on type int * typedef cub::BlockScan<int, 128> BlockScan; * * // Allocate shared memory for BlockScan * __shared__ typename BlockScan::TempStorage temp_storage; * * // Obtain input item for each thread * int thread_data; * ... * * // Collectively compute the block-wide inclusive prefix max scan * BlockScan(temp_storage).InclusiveScan(thread_data, thread_data, cub::Max()); * * \endcode * \par * Suppose the set of input \p thread_data across the block of threads is <tt>0, -1, 2, -3, ..., 126, -127</tt>. The * corresponding output \p thread_data in those threads will be <tt>0, 0, 2, 2, ..., 126, 126</tt>. * * \tparam ScanOp <b>[inferred]</b> Binary scan functor type having member <tt>T operator()(const T &a, const T &b)</tt> */ template <typename ScanOp> __device__ __forceinline__ void InclusiveScan( T input, ///< [in] Calling thread's input item T &output, ///< [out] Calling thread's output item (may be aliased to \p input) ScanOp scan_op) ///< [in] Binary scan functor { InternalBlockScan(temp_storage).InclusiveScan(input, output, scan_op); } /** * \brief Computes an inclusive block-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. Also provides every thread with the block-wide \p block_aggregate of all inputs. * * \par * - Supports non-commutative scan operators. * - \rowmajor * - \smemreuse * * \par Snippet * The code snippet below illustrates an inclusive prefix max scan of 128 integer items that * are partitioned across 128 threads. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/block/block_scan.cuh> * * __global__ void ExampleKernel(...) * { * // Specialize BlockScan for a 1D block of 128 threads on type int * typedef cub::BlockScan<int, 128> BlockScan; * * // Allocate shared memory for BlockScan * __shared__ typename BlockScan::TempStorage temp_storage; * * // Obtain input item for each thread * int thread_data; * ... * * // Collectively compute the block-wide inclusive prefix max scan * int block_aggregate; * BlockScan(temp_storage).InclusiveScan(thread_data, thread_data, cub::Max(), block_aggregate); * * \endcode * \par * Suppose the set of input \p thread_data across the block of threads is <tt>0, -1, 2, -3, ..., 126, -127</tt>. The * corresponding output \p thread_data in those threads will be <tt>0, 0, 2, 2, ..., 126, 126</tt>. * Furthermore the value \p 126 will be stored in \p block_aggregate for all threads. * * \tparam ScanOp <b>[inferred]</b> Binary scan functor type having member <tt>T operator()(const T &a, const T &b)</tt> */ template <typename ScanOp> __device__ __forceinline__ void InclusiveScan( T input, ///< [in] Calling thread's input item T &output, ///< [out] Calling thread's output item (may be aliased to \p input) ScanOp scan_op, ///< [in] Binary scan functor T &block_aggregate) ///< [out] block-wide aggregate reduction of input items { InternalBlockScan(temp_storage).InclusiveScan(input, output, scan_op, block_aggregate); } /** * \brief Computes an inclusive block-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. the call-back functor \p block_prefix_callback_op is invoked by the first warp in the block, and the value returned by <em>lane</em><sub>0</sub> in that warp is used as the "seed" value that logically prefixes the thread block's scan inputs. Also provides every thread with the block-wide \p block_aggregate of all inputs. * * \par * - The \p block_prefix_callback_op functor must implement a member function <tt>T operator()(T block_aggregate)</tt>. * The functor's input parameter \p block_aggregate is the same value also returned by the scan operation. * The functor will be invoked by the first warp of threads in the block, however only the return value from * <em>lane</em><sub>0</sub> is applied as the block-wide prefix. Can be stateful. * - Supports non-commutative scan operators. * - \rowmajor * - \smemreuse * * \par Snippet * The code snippet below illustrates a single thread block that progressively * computes an inclusive prefix max scan over multiple "tiles" of input using a * prefix functor to maintain a running total between block-wide scans. Each tile consists * of 128 integer items that are partitioned across 128 threads. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/block/block_scan.cuh> * * // A stateful callback functor that maintains a running prefix to be applied * // during consecutive scan operations. * struct BlockPrefixCallbackOp * { * // Running prefix * int running_total; * * // Constructor * __device__ BlockPrefixCallbackOp(int running_total) : running_total(running_total) {} * * // Callback operator to be entered by the first warp of threads in the block. * // Thread-0 is responsible for returning a value for seeding the block-wide scan. * __device__ int operator()(int block_aggregate) * { * int old_prefix = running_total; * running_total = (block_aggregate > old_prefix) ? block_aggregate : old_prefix; * return old_prefix; * } * }; * * __global__ void ExampleKernel(int *d_data, int num_items, ...) * { * // Specialize BlockScan for a 1D block of 128 threads * typedef cub::BlockScan<int, 128> BlockScan; * * // Allocate shared memory for BlockScan * __shared__ typename BlockScan::TempStorage temp_storage; * * // Initialize running total * BlockPrefixCallbackOp prefix_op(INT_MIN); * * // Have the block iterate over segments of items * for (int block_offset = 0; block_offset < num_items; block_offset += 128) * { * // Load a segment of consecutive items that are blocked across threads * int thread_data = d_data[block_offset]; * * // Collectively compute the block-wide inclusive prefix max scan * BlockScan(temp_storage).InclusiveScan( * thread_data, thread_data, cub::Max(), prefix_op); * CTA_SYNC(); * * // Store scanned items to output segment * d_data[block_offset] = thread_data; * } * \endcode * \par * Suppose the input \p d_data is <tt>0, -1, 2, -3, 4, -5, ...</tt>. * The corresponding output for the first segment will be <tt>0, 0, 2, 2, ..., 126, 126</tt>. * The output for the second segment will be <tt>128, 128, 130, 130, ..., 254, 254</tt>. * * \tparam ScanOp <b>[inferred]</b> Binary scan functor type having member <tt>T operator()(const T &a, const T &b)</tt> * \tparam BlockPrefixCallbackOp <b>[inferred]</b> Call-back functor type having member <tt>T operator()(T block_aggregate)</tt> */ template < typename ScanOp, typename BlockPrefixCallbackOp> __device__ __forceinline__ void InclusiveScan( T input, ///< [in] Calling thread's input item T &output, ///< [out] Calling thread's output item (may be aliased to \p input) ScanOp scan_op, ///< [in] Binary scan functor BlockPrefixCallbackOp &block_prefix_callback_op) ///< [in-out] <b>[<em>warp</em><sub>0</sub> only]</b> Call-back functor for specifying a block-wide prefix to be applied to the logical input sequence. { InternalBlockScan(temp_storage).InclusiveScan(input, output, scan_op, block_prefix_callback_op); } //@} end member group /******************************************************************//** * \name Inclusive prefix scan operations (multiple data per thread) *********************************************************************/ //@{ /** * \brief Computes an inclusive block-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes an array of consecutive input elements. * * \par * - Supports non-commutative scan operators. * - \blocked * - \granularity * - \smemreuse * * \par Snippet * The code snippet below illustrates an inclusive prefix max scan of 512 integer items that * are partitioned in a [<em>blocked arrangement</em>](index.html#sec5sec3) across 128 threads * where each thread owns 4 consecutive items. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/block/block_scan.cuh> * * __global__ void ExampleKernel(...) * { * // Specialize BlockScan for a 1D block of 128 threads on type int * typedef cub::BlockScan<int, 128> BlockScan; * * // Allocate shared memory for BlockScan * __shared__ typename BlockScan::TempStorage temp_storage; * * // Obtain a segment of consecutive items that are blocked across threads * int thread_data[4]; * ... * * // Collectively compute the block-wide inclusive prefix max scan * BlockScan(temp_storage).InclusiveScan(thread_data, thread_data, cub::Max()); * * \endcode * \par * Suppose the set of input \p thread_data across the block of threads is <tt>{ [0,-1,2,-3], [4,-5,6,-7], ..., [508,-509,510,-511] }</tt>. The * corresponding output \p thread_data in those threads will be <tt>{ [0,0,2,2], [4,4,6,6], ..., [508,508,510,510] }</tt>. * * \tparam ITEMS_PER_THREAD <b>[inferred]</b> The number of consecutive items partitioned onto each thread. * \tparam ScanOp <b>[inferred]</b> Binary scan functor type having member <tt>T operator()(const T &a, const T &b)</tt> */ template < int ITEMS_PER_THREAD, typename ScanOp> __device__ __forceinline__ void InclusiveScan( T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items T (&output)[ITEMS_PER_THREAD], ///< [out] Calling thread's output items (may be aliased to \p input) ScanOp scan_op) ///< [in] Binary scan functor { if (ITEMS_PER_THREAD == 1) { InclusiveScan(input[0], output[0], scan_op); } else { // Reduce consecutive thread items in registers T thread_prefix = internal::ThreadReduce(input, scan_op); // Exclusive thread block-scan ExclusiveScan(thread_prefix, thread_prefix, scan_op); // Inclusive scan in registers with prefix as seed (first thread does not seed) internal::ThreadScanInclusive(input, output, scan_op, thread_prefix, (linear_tid != 0)); } } /** * \brief Computes an inclusive block-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes an array of consecutive input elements. Also provides every thread with the block-wide \p block_aggregate of all inputs. * * \par * - Supports non-commutative scan operators. * - \blocked * - \granularity * - \smemreuse * * \par Snippet * The code snippet below illustrates an inclusive prefix max scan of 512 integer items that * are partitioned in a [<em>blocked arrangement</em>](index.html#sec5sec3) across 128 threads * where each thread owns 4 consecutive items. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/block/block_scan.cuh> * * __global__ void ExampleKernel(...) * { * // Specialize BlockScan for a 1D block of 128 threads on type int * typedef cub::BlockScan<int, 128> BlockScan; * * // Allocate shared memory for BlockScan * __shared__ typename BlockScan::TempStorage temp_storage; * * // Obtain a segment of consecutive items that are blocked across threads * int thread_data[4]; * ... * * // Collectively compute the block-wide inclusive prefix max scan * int block_aggregate; * BlockScan(temp_storage).InclusiveScan(thread_data, thread_data, cub::Max(), block_aggregate); * * \endcode * \par * Suppose the set of input \p thread_data across the block of threads is * <tt>{ [0,-1,2,-3], [4,-5,6,-7], ..., [508,-509,510,-511] }</tt>. * The corresponding output \p thread_data in those threads will be * <tt>{ [0,0,2,2], [4,4,6,6], ..., [508,508,510,510] }</tt>. * Furthermore the value \p 510 will be stored in \p block_aggregate for all threads. * * \tparam ITEMS_PER_THREAD <b>[inferred]</b> The number of consecutive items partitioned onto each thread. * \tparam ScanOp <b>[inferred]</b> Binary scan functor type having member <tt>T operator()(const T &a, const T &b)</tt> */ template < int ITEMS_PER_THREAD, typename ScanOp> __device__ __forceinline__ void InclusiveScan( T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items T (&output)[ITEMS_PER_THREAD], ///< [out] Calling thread's output items (may be aliased to \p input) ScanOp scan_op, ///< [in] Binary scan functor T &block_aggregate) ///< [out] block-wide aggregate reduction of input items { if (ITEMS_PER_THREAD == 1) { InclusiveScan(input[0], output[0], scan_op, block_aggregate); } else { // Reduce consecutive thread items in registers T thread_prefix = internal::ThreadReduce(input, scan_op); // Exclusive thread block-scan (with no initial value) ExclusiveScan(thread_prefix, thread_prefix, scan_op, block_aggregate); // Inclusive scan in registers with prefix as seed (first thread does not seed) internal::ThreadScanInclusive(input, output, scan_op, thread_prefix, (linear_tid != 0)); } } /** * \brief Computes an inclusive block-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes an array of consecutive input elements. the call-back functor \p block_prefix_callback_op is invoked by the first warp in the block, and the value returned by <em>lane</em><sub>0</sub> in that warp is used as the "seed" value that logically prefixes the thread block's scan inputs. Also provides every thread with the block-wide \p block_aggregate of all inputs. * * \par * - The \p block_prefix_callback_op functor must implement a member function <tt>T operator()(T block_aggregate)</tt>. * The functor's input parameter \p block_aggregate is the same value also returned by the scan operation. * The functor will be invoked by the first warp of threads in the block, however only the return value from * <em>lane</em><sub>0</sub> is applied as the block-wide prefix. Can be stateful. * - Supports non-commutative scan operators. * - \blocked * - \granularity * - \smemreuse * * \par Snippet * The code snippet below illustrates a single thread block that progressively * computes an inclusive prefix max scan over multiple "tiles" of input using a * prefix functor to maintain a running total between block-wide scans. Each tile consists * of 128 integer items that are partitioned across 128 threads. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/block/block_scan.cuh> * * // A stateful callback functor that maintains a running prefix to be applied * // during consecutive scan operations. * struct BlockPrefixCallbackOp * { * // Running prefix * int running_total; * * // Constructor * __device__ BlockPrefixCallbackOp(int running_total) : running_total(running_total) {} * * // Callback operator to be entered by the first warp of threads in the block. * // Thread-0 is responsible for returning a value for seeding the block-wide scan. * __device__ int operator()(int block_aggregate) * { * int old_prefix = running_total; * running_total = (block_aggregate > old_prefix) ? block_aggregate : old_prefix; * return old_prefix; * } * }; * * __global__ void ExampleKernel(int *d_data, int num_items, ...) * { * // Specialize BlockLoad, BlockStore, and BlockScan for a 1D block of 128 threads, 4 ints per thread * typedef cub::BlockLoad<int*, 128, 4, BLOCK_LOAD_TRANSPOSE> BlockLoad; * typedef cub::BlockStore<int, 128, 4, BLOCK_STORE_TRANSPOSE> BlockStore; * typedef cub::BlockScan<int, 128> BlockScan; * * // Allocate aliased shared memory for BlockLoad, BlockStore, and BlockScan * __shared__ union { * typename BlockLoad::TempStorage load; * typename BlockScan::TempStorage scan; * typename BlockStore::TempStorage store; * } temp_storage; * * // Initialize running total * BlockPrefixCallbackOp prefix_op(0); * * // Have the block iterate over segments of items * for (int block_offset = 0; block_offset < num_items; block_offset += 128 * 4) * { * // Load a segment of consecutive items that are blocked across threads * int thread_data[4]; * BlockLoad(temp_storage.load).Load(d_data + block_offset, thread_data); * CTA_SYNC(); * * // Collectively compute the block-wide inclusive prefix max scan * BlockScan(temp_storage.scan).InclusiveScan( * thread_data, thread_data, cub::Max(), prefix_op); * CTA_SYNC(); * * // Store scanned items to output segment * BlockStore(temp_storage.store).Store(d_data + block_offset, thread_data); * CTA_SYNC(); * } * \endcode * \par * Suppose the input \p d_data is <tt>0, -1, 2, -3, 4, -5, ...</tt>. * The corresponding output for the first segment will be <tt>0, 0, 2, 2, 4, 4, ..., 510, 510</tt>. * The output for the second segment will be <tt>512, 512, 514, 514, 516, 516, ..., 1022, 1022</tt>. * * \tparam ITEMS_PER_THREAD <b>[inferred]</b> The number of consecutive items partitioned onto each thread. * \tparam ScanOp <b>[inferred]</b> Binary scan functor type having member <tt>T operator()(const T &a, const T &b)</tt> * \tparam BlockPrefixCallbackOp <b>[inferred]</b> Call-back functor type having member <tt>T operator()(T block_aggregate)</tt> */ template < int ITEMS_PER_THREAD, typename ScanOp, typename BlockPrefixCallbackOp> __device__ __forceinline__ void InclusiveScan( T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items T (&output)[ITEMS_PER_THREAD], ///< [out] Calling thread's output items (may be aliased to \p input) ScanOp scan_op, ///< [in] Binary scan functor BlockPrefixCallbackOp &block_prefix_callback_op) ///< [in-out] <b>[<em>warp</em><sub>0</sub> only]</b> Call-back functor for specifying a block-wide prefix to be applied to the logical input sequence. { if (ITEMS_PER_THREAD == 1) { InclusiveScan(input[0], output[0], scan_op, block_prefix_callback_op); } else { // Reduce consecutive thread items in registers T thread_prefix = internal::ThreadReduce(input, scan_op); // Exclusive thread block-scan ExclusiveScan(thread_prefix, thread_prefix, scan_op, block_prefix_callback_op); // Inclusive scan in registers with prefix as seed internal::ThreadScanInclusive(input, output, scan_op, thread_prefix); } } //@} end member group }; /** * \example example_block_scan.cu */ } // CUB namespace CUB_NS_POSTFIX // Optional outer namespace(s)
0
rapidsai_public_repos/nvgraph/external/cub_semiring
rapidsai_public_repos/nvgraph/external/cub_semiring/block/block_shuffle.cuh
/****************************************************************************** * Copyright (c) 2011, Duane Merrill. All rights reserved. * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ /** * \file * The cub::BlockShuffle class provides [<em>collective</em>](index.html#sec0) methods for shuffling data partitioned across a CUDA thread block. */ #pragma once #include "../util_arch.cuh" #include "../util_ptx.cuh" #include "../util_macro.cuh" #include "../util_type.cuh" #include "../util_namespace.cuh" /// Optional outer namespace(s) CUB_NS_PREFIX /// CUB namespace namespace cub { /** * \brief The BlockShuffle class provides [<em>collective</em>](index.html#sec0) methods for shuffling data partitioned across a CUDA thread block. * \ingroup BlockModule * * \tparam T The data type to be exchanged. * \tparam BLOCK_DIM_X The thread block length in threads along the X dimension * \tparam BLOCK_DIM_Y <b>[optional]</b> The thread block length in threads along the Y dimension (default: 1) * \tparam BLOCK_DIM_Z <b>[optional]</b> The thread block length in threads along the Z dimension (default: 1) * \tparam PTX_ARCH <b>[optional]</b> \ptxversion * * \par Overview * It is commonplace for blocks of threads to rearrange data items between * threads. The BlockShuffle abstraction allows threads to efficiently shift items * either (a) up to their successor or (b) down to their predecessor. * */ template < typename T, int BLOCK_DIM_X, int BLOCK_DIM_Y = 1, int BLOCK_DIM_Z = 1, int PTX_ARCH = CUB_PTX_ARCH> class BlockShuffle { private: /****************************************************************************** * Constants ******************************************************************************/ enum { BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z, LOG_WARP_THREADS = CUB_LOG_WARP_THREADS(PTX_ARCH), WARP_THREADS = 1 << LOG_WARP_THREADS, WARPS = (BLOCK_THREADS + WARP_THREADS - 1) / WARP_THREADS, }; /****************************************************************************** * Type definitions ******************************************************************************/ /// Shared memory storage layout type (last element from each thread's input) struct _TempStorage { T prev[BLOCK_THREADS]; T next[BLOCK_THREADS]; }; public: /// \smemstorage{BlockShuffle} struct TempStorage : Uninitialized<_TempStorage> {}; private: /****************************************************************************** * Thread fields ******************************************************************************/ /// Shared storage reference _TempStorage &temp_storage; /// Linear thread-id unsigned int linear_tid; /****************************************************************************** * Utility methods ******************************************************************************/ /// Internal storage allocator __device__ __forceinline__ _TempStorage& PrivateStorage() { __shared__ _TempStorage private_storage; return private_storage; } public: /******************************************************************//** * \name Collective constructors *********************************************************************/ //@{ /** * \brief Collective constructor using a private static allocation of shared memory as temporary storage. */ __device__ __forceinline__ BlockShuffle() : temp_storage(PrivateStorage()), linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)) {} /** * \brief Collective constructor using the specified memory allocation as temporary storage. */ __device__ __forceinline__ BlockShuffle( TempStorage &temp_storage) ///< [in] Reference to memory allocation having layout type TempStorage : temp_storage(temp_storage.Alias()), linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)) {} //@} end member group /******************************************************************//** * \name Shuffle movement *********************************************************************/ //@{ /** * \brief Each <em>thread<sub>i</sub></em> obtains the \p input provided by <em>thread</em><sub><em>i</em>+<tt>distance</tt></sub>. The offset \p distance may be negative. * * \par * - \smemreuse */ __device__ __forceinline__ void Offset( T input, ///< [in] The input item from the calling thread (<em>thread<sub>i</sub></em>) T& output, ///< [out] The \p input item from the successor (or predecessor) thread <em>thread</em><sub><em>i</em>+<tt>distance</tt></sub> (may be aliased to \p input). This value is only updated for for <em>thread<sub>i</sub></em> when 0 <= (<em>i</em> + \p distance) < <tt>BLOCK_THREADS-1</tt> int distance = 1) ///< [in] Offset distance (may be negative) { temp_storage[linear_tid].prev = input; CTA_SYNC(); if ((linear_tid + distance >= 0) && (linear_tid + distance < BLOCK_THREADS)) output = temp_storage[linear_tid + distance].prev; } /** * \brief Each <em>thread<sub>i</sub></em> obtains the \p input provided by <em>thread</em><sub><em>i</em>+<tt>distance</tt></sub>. * * \par * - \smemreuse */ __device__ __forceinline__ void Rotate( T input, ///< [in] The calling thread's input item T& output, ///< [out] The \p input item from thread <em>thread</em><sub>(<em>i</em>+<tt>distance></tt>)%<tt><BLOCK_THREADS></tt></sub> (may be aliased to \p input). This value is not updated for <em>thread</em><sub>BLOCK_THREADS-1</sub> unsigned int distance = 1) ///< [in] Offset distance (0 < \p distance < <tt>BLOCK_THREADS</tt>) { temp_storage[linear_tid].prev = input; CTA_SYNC(); unsigned int offset = threadIdx.x + distance; if (offset >= BLOCK_THREADS) offset -= BLOCK_THREADS; output = temp_storage[offset].prev; } /** * \brief The thread block rotates its [<em>blocked arrangement</em>](index.html#sec5sec3) of \p input items, shifting it up by one item * * \par * - \blocked * - \granularity * - \smemreuse */ template <int ITEMS_PER_THREAD> __device__ __forceinline__ void Up( T (&input)[ITEMS_PER_THREAD], ///< [in] The calling thread's input items T (&prev)[ITEMS_PER_THREAD]) ///< [out] The corresponding predecessor items (may be aliased to \p input). The item \p prev[0] is not updated for <em>thread</em><sub>0</sub>. { temp_storage[linear_tid].prev = input[ITEMS_PER_THREAD - 1]; CTA_SYNC(); #pragma unroll for (int ITEM = ITEMS_PER_THREAD - 1; ITEM > 0; --ITEM) prev[ITEM] = input[ITEM - 1]; if (linear_tid > 0) prev[0] = temp_storage[linear_tid - 1].prev; } /** * \brief The thread block rotates its [<em>blocked arrangement</em>](index.html#sec5sec3) of \p input items, shifting it up by one item. All threads receive the \p input provided by <em>thread</em><sub><tt>BLOCK_THREADS-1</tt></sub>. * * \par * - \blocked * - \granularity * - \smemreuse */ template <int ITEMS_PER_THREAD> __device__ __forceinline__ void Up( T (&input)[ITEMS_PER_THREAD], ///< [in] The calling thread's input items T (&prev)[ITEMS_PER_THREAD], ///< [out] The corresponding predecessor items (may be aliased to \p input). The item \p prev[0] is not updated for <em>thread</em><sub>0</sub>. T &block_suffix) ///< [out] The item \p input[ITEMS_PER_THREAD-1] from <em>thread</em><sub><tt>BLOCK_THREADS-1</tt></sub>, provided to all threads { Up(input, prev); block_suffix = temp_storage[BLOCK_THREADS - 1].prev; } /** * \brief The thread block rotates its [<em>blocked arrangement</em>](index.html#sec5sec3) of \p input items, shifting it down by one item * * \par * - \blocked * - \granularity * - \smemreuse */ template <int ITEMS_PER_THREAD> __device__ __forceinline__ void Down( T (&input)[ITEMS_PER_THREAD], ///< [in] The calling thread's input items T (&prev)[ITEMS_PER_THREAD]) ///< [out] The corresponding predecessor items (may be aliased to \p input). The value \p prev[0] is not updated for <em>thread</em><sub>BLOCK_THREADS-1</sub>. { temp_storage[linear_tid].prev = input[ITEMS_PER_THREAD - 1]; CTA_SYNC(); #pragma unroll for (int ITEM = ITEMS_PER_THREAD - 1; ITEM > 0; --ITEM) prev[ITEM] = input[ITEM - 1]; if (linear_tid > 0) prev[0] = temp_storage[linear_tid - 1].prev; } /** * \brief The thread block rotates its [<em>blocked arrangement</em>](index.html#sec5sec3) of input items, shifting it down by one item. All threads receive \p input[0] provided by <em>thread</em><sub><tt>0</tt></sub>. * * \par * - \blocked * - \granularity * - \smemreuse */ template <int ITEMS_PER_THREAD> __device__ __forceinline__ void Down( T (&input)[ITEMS_PER_THREAD], ///< [in] The calling thread's input items T (&prev)[ITEMS_PER_THREAD], ///< [out] The corresponding predecessor items (may be aliased to \p input). The value \p prev[0] is not updated for <em>thread</em><sub>BLOCK_THREADS-1</sub>. T &block_prefix) ///< [out] The item \p input[0] from <em>thread</em><sub><tt>0</tt></sub>, provided to all threads { Up(input, prev); block_prefix = temp_storage[BLOCK_THREADS - 1].prev; } //@} end member group }; } // CUB namespace CUB_NS_POSTFIX // Optional outer namespace(s)
0
rapidsai_public_repos/nvgraph/external/cub_semiring
rapidsai_public_repos/nvgraph/external/cub_semiring/block/block_discontinuity.cuh
/****************************************************************************** * Copyright (c) 2011, Duane Merrill. All rights reserved. * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ /** * \file * The cub::BlockDiscontinuity class provides [<em>collective</em>](index.html#sec0) methods for flagging discontinuities within an ordered set of items partitioned across a CUDA thread block. */ #pragma once #include "../util_type.cuh" #include "../util_ptx.cuh" #include "../util_namespace.cuh" /// Optional outer namespace(s) CUB_NS_PREFIX /// CUB namespace namespace cub { /** * \brief The BlockDiscontinuity class provides [<em>collective</em>](index.html#sec0) methods for flagging discontinuities within an ordered set of items partitioned across a CUDA thread block. ![](discont_logo.png) * \ingroup BlockModule * * \tparam T The data type to be flagged. * \tparam BLOCK_DIM_X The thread block length in threads along the X dimension * \tparam BLOCK_DIM_Y <b>[optional]</b> The thread block length in threads along the Y dimension (default: 1) * \tparam BLOCK_DIM_Z <b>[optional]</b> The thread block length in threads along the Z dimension (default: 1) * \tparam PTX_ARCH <b>[optional]</b> \ptxversion * * \par Overview * - A set of "head flags" (or "tail flags") is often used to indicate corresponding items * that differ from their predecessors (or successors). For example, head flags are convenient * for demarcating disjoint data segments as part of a segmented scan or reduction. * - \blocked * * \par Performance Considerations * - \granularity * * \par A Simple Example * \blockcollective{BlockDiscontinuity} * \par * The code snippet below illustrates the head flagging of 512 integer items that * are partitioned in a [<em>blocked arrangement</em>](index.html#sec5sec3) across 128 threads * where each thread owns 4 consecutive items. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/block/block_discontinuity.cuh> * * __global__ void ExampleKernel(...) * { * // Specialize BlockDiscontinuity for a 1D block of 128 threads on type int * typedef cub::BlockDiscontinuity<int, 128> BlockDiscontinuity; * * // Allocate shared memory for BlockDiscontinuity * __shared__ typename BlockDiscontinuity::TempStorage temp_storage; * * // Obtain a segment of consecutive items that are blocked across threads * int thread_data[4]; * ... * * // Collectively compute head flags for discontinuities in the segment * int head_flags[4]; * BlockDiscontinuity(temp_storage).FlagHeads(head_flags, thread_data, cub::Inequality()); * * \endcode * \par * Suppose the set of input \p thread_data across the block of threads is * <tt>{ [0,0,1,1], [1,1,1,1], [2,3,3,3], [3,4,4,4], ... }</tt>. * The corresponding output \p head_flags in those threads will be * <tt>{ [1,0,1,0], [0,0,0,0], [1,1,0,0], [0,1,0,0], ... }</tt>. * * \par Performance Considerations * - Incurs zero bank conflicts for most types * */ template < typename T, int BLOCK_DIM_X, int BLOCK_DIM_Y = 1, int BLOCK_DIM_Z = 1, int PTX_ARCH = CUB_PTX_ARCH> class BlockDiscontinuity { private: /****************************************************************************** * Constants and type definitions ******************************************************************************/ /// Constants enum { /// The thread block size in threads BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z, }; /// Shared memory storage layout type (last element from each thread's input) struct _TempStorage { T first_items[BLOCK_THREADS]; T last_items[BLOCK_THREADS]; }; /****************************************************************************** * Utility methods ******************************************************************************/ /// Internal storage allocator __device__ __forceinline__ _TempStorage& PrivateStorage() { __shared__ _TempStorage private_storage; return private_storage; } /// Specialization for when FlagOp has third index param template <typename FlagOp, bool HAS_PARAM = BinaryOpHasIdxParam<T, FlagOp>::HAS_PARAM> struct ApplyOp { // Apply flag operator static __device__ __forceinline__ bool FlagT(FlagOp flag_op, const T &a, const T &b, int idx) { return flag_op(a, b, idx); } }; /// Specialization for when FlagOp does not have a third index param template <typename FlagOp> struct ApplyOp<FlagOp, false> { // Apply flag operator static __device__ __forceinline__ bool FlagT(FlagOp flag_op, const T &a, const T &b, int /*idx*/) { return flag_op(a, b); } }; /// Templated unrolling of item comparison (inductive case) template <int ITERATION, int MAX_ITERATIONS> struct Iterate { // Head flags template < int ITEMS_PER_THREAD, typename FlagT, typename FlagOp> static __device__ __forceinline__ void FlagHeads( int linear_tid, FlagT (&flags)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity head_flags T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items T (&preds)[ITEMS_PER_THREAD], ///< [out] Calling thread's predecessor items FlagOp flag_op) ///< [in] Binary boolean flag predicate { preds[ITERATION] = input[ITERATION - 1]; flags[ITERATION] = ApplyOp<FlagOp>::FlagT( flag_op, preds[ITERATION], input[ITERATION], (linear_tid * ITEMS_PER_THREAD) + ITERATION); Iterate<ITERATION + 1, MAX_ITERATIONS>::FlagHeads(linear_tid, flags, input, preds, flag_op); } // Tail flags template < int ITEMS_PER_THREAD, typename FlagT, typename FlagOp> static __device__ __forceinline__ void FlagTails( int linear_tid, FlagT (&flags)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity head_flags T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items FlagOp flag_op) ///< [in] Binary boolean flag predicate { flags[ITERATION] = ApplyOp<FlagOp>::FlagT( flag_op, input[ITERATION], input[ITERATION + 1], (linear_tid * ITEMS_PER_THREAD) + ITERATION + 1); Iterate<ITERATION + 1, MAX_ITERATIONS>::FlagTails(linear_tid, flags, input, flag_op); } }; /// Templated unrolling of item comparison (termination case) template <int MAX_ITERATIONS> struct Iterate<MAX_ITERATIONS, MAX_ITERATIONS> { // Head flags template < int ITEMS_PER_THREAD, typename FlagT, typename FlagOp> static __device__ __forceinline__ void FlagHeads( int /*linear_tid*/, FlagT (&/*flags*/)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity head_flags T (&/*input*/)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items T (&/*preds*/)[ITEMS_PER_THREAD], ///< [out] Calling thread's predecessor items FlagOp /*flag_op*/) ///< [in] Binary boolean flag predicate {} // Tail flags template < int ITEMS_PER_THREAD, typename FlagT, typename FlagOp> static __device__ __forceinline__ void FlagTails( int /*linear_tid*/, FlagT (&/*flags*/)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity head_flags T (&/*input*/)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items FlagOp /*flag_op*/) ///< [in] Binary boolean flag predicate {} }; /****************************************************************************** * Thread fields ******************************************************************************/ /// Shared storage reference _TempStorage &temp_storage; /// Linear thread-id unsigned int linear_tid; public: /// \smemstorage{BlockDiscontinuity} struct TempStorage : Uninitialized<_TempStorage> {}; /******************************************************************//** * \name Collective constructors *********************************************************************/ //@{ /** * \brief Collective constructor using a private static allocation of shared memory as temporary storage. */ __device__ __forceinline__ BlockDiscontinuity() : temp_storage(PrivateStorage()), linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)) {} /** * \brief Collective constructor using the specified memory allocation as temporary storage. */ __device__ __forceinline__ BlockDiscontinuity( TempStorage &temp_storage) ///< [in] Reference to memory allocation having layout type TempStorage : temp_storage(temp_storage.Alias()), linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)) {} //@} end member group /******************************************************************//** * \name Head flag operations *********************************************************************/ //@{ #ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document template < int ITEMS_PER_THREAD, typename FlagT, typename FlagOp> __device__ __forceinline__ void FlagHeads( FlagT (&head_flags)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity head_flags T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items T (&preds)[ITEMS_PER_THREAD], ///< [out] Calling thread's predecessor items FlagOp flag_op) ///< [in] Binary boolean flag predicate { // Share last item temp_storage.last_items[linear_tid] = input[ITEMS_PER_THREAD - 1]; CTA_SYNC(); if (linear_tid == 0) { // Set flag for first thread-item (preds[0] is undefined) head_flags[0] = 1; } else { preds[0] = temp_storage.last_items[linear_tid - 1]; head_flags[0] = ApplyOp<FlagOp>::FlagT(flag_op, preds[0], input[0], linear_tid * ITEMS_PER_THREAD); } // Set head_flags for remaining items Iterate<1, ITEMS_PER_THREAD>::FlagHeads(linear_tid, head_flags, input, preds, flag_op); } template < int ITEMS_PER_THREAD, typename FlagT, typename FlagOp> __device__ __forceinline__ void FlagHeads( FlagT (&head_flags)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity head_flags T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items T (&preds)[ITEMS_PER_THREAD], ///< [out] Calling thread's predecessor items FlagOp flag_op, ///< [in] Binary boolean flag predicate T tile_predecessor_item) ///< [in] <b>[<em>thread</em><sub>0</sub> only]</b> Item with which to compare the first tile item (<tt>input<sub>0</sub></tt> from <em>thread</em><sub>0</sub>). { // Share last item temp_storage.last_items[linear_tid] = input[ITEMS_PER_THREAD - 1]; CTA_SYNC(); // Set flag for first thread-item preds[0] = (linear_tid == 0) ? tile_predecessor_item : // First thread temp_storage.last_items[linear_tid - 1]; head_flags[0] = ApplyOp<FlagOp>::FlagT(flag_op, preds[0], input[0], linear_tid * ITEMS_PER_THREAD); // Set head_flags for remaining items Iterate<1, ITEMS_PER_THREAD>::FlagHeads(linear_tid, head_flags, input, preds, flag_op); } #endif // DOXYGEN_SHOULD_SKIP_THIS /** * \brief Sets head flags indicating discontinuities between items partitioned across the thread block, for which the first item has no reference and is always flagged. * * \par * - The flag <tt>head_flags<sub><em>i</em></sub></tt> is set for item * <tt>input<sub><em>i</em></sub></tt> when * <tt>flag_op(</tt><em>previous-item</em><tt>, input<sub><em>i</em></sub>)</tt> * returns \p true (where <em>previous-item</em> is either the preceding item * in the same thread or the last item in the previous thread). * - For <em>thread</em><sub>0</sub>, item <tt>input<sub>0</sub></tt> is always flagged. * - \blocked * - \granularity * - \smemreuse * * \par Snippet * The code snippet below illustrates the head-flagging of 512 integer items that * are partitioned in a [<em>blocked arrangement</em>](index.html#sec5sec3) across 128 threads * where each thread owns 4 consecutive items. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/block/block_discontinuity.cuh> * * __global__ void ExampleKernel(...) * { * // Specialize BlockDiscontinuity for a 1D block of 128 threads on type int * typedef cub::BlockDiscontinuity<int, 128> BlockDiscontinuity; * * // Allocate shared memory for BlockDiscontinuity * __shared__ typename BlockDiscontinuity::TempStorage temp_storage; * * // Obtain a segment of consecutive items that are blocked across threads * int thread_data[4]; * ... * * // Collectively compute head flags for discontinuities in the segment * int head_flags[4]; * BlockDiscontinuity(temp_storage).FlagHeads(head_flags, thread_data, cub::Inequality()); * * \endcode * \par * Suppose the set of input \p thread_data across the block of threads is * <tt>{ [0,0,1,1], [1,1,1,1], [2,3,3,3], [3,4,4,4], ... }</tt>. * The corresponding output \p head_flags in those threads will be * <tt>{ [1,0,1,0], [0,0,0,0], [1,1,0,0], [0,1,0,0], ... }</tt>. * * \tparam ITEMS_PER_THREAD <b>[inferred]</b> The number of consecutive items partitioned onto each thread. * \tparam FlagT <b>[inferred]</b> The flag type (must be an integer type) * \tparam FlagOp <b>[inferred]</b> Binary predicate functor type having member <tt>T operator()(const T &a, const T &b)</tt> or member <tt>T operator()(const T &a, const T &b, unsigned int b_index)</tt>, and returning \p true if a discontinuity exists between \p a and \p b, otherwise \p false. \p b_index is the rank of b in the aggregate tile of data. */ template < int ITEMS_PER_THREAD, typename FlagT, typename FlagOp> __device__ __forceinline__ void FlagHeads( FlagT (&head_flags)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity head_flags T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items FlagOp flag_op) ///< [in] Binary boolean flag predicate { T preds[ITEMS_PER_THREAD]; FlagHeads(head_flags, input, preds, flag_op); } /** * \brief Sets head flags indicating discontinuities between items partitioned across the thread block. * * \par * - The flag <tt>head_flags<sub><em>i</em></sub></tt> is set for item * <tt>input<sub><em>i</em></sub></tt> when * <tt>flag_op(</tt><em>previous-item</em><tt>, input<sub><em>i</em></sub>)</tt> * returns \p true (where <em>previous-item</em> is either the preceding item * in the same thread or the last item in the previous thread). * - For <em>thread</em><sub>0</sub>, item <tt>input<sub>0</sub></tt> is compared * against \p tile_predecessor_item. * - \blocked * - \granularity * - \smemreuse * * \par Snippet * The code snippet below illustrates the head-flagging of 512 integer items that * are partitioned in a [<em>blocked arrangement</em>](index.html#sec5sec3) across 128 threads * where each thread owns 4 consecutive items. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/block/block_discontinuity.cuh> * * __global__ void ExampleKernel(...) * { * // Specialize BlockDiscontinuity for a 1D block of 128 threads on type int * typedef cub::BlockDiscontinuity<int, 128> BlockDiscontinuity; * * // Allocate shared memory for BlockDiscontinuity * __shared__ typename BlockDiscontinuity::TempStorage temp_storage; * * // Obtain a segment of consecutive items that are blocked across threads * int thread_data[4]; * ... * * // Have thread0 obtain the predecessor item for the entire tile * int tile_predecessor_item; * if (threadIdx.x == 0) tile_predecessor_item == ... * * // Collectively compute head flags for discontinuities in the segment * int head_flags[4]; * BlockDiscontinuity(temp_storage).FlagHeads( * head_flags, thread_data, cub::Inequality(), tile_predecessor_item); * * \endcode * \par * Suppose the set of input \p thread_data across the block of threads is * <tt>{ [0,0,1,1], [1,1,1,1], [2,3,3,3], [3,4,4,4], ... }</tt>, * and that \p tile_predecessor_item is \p 0. The corresponding output \p head_flags in those threads will be * <tt>{ [0,0,1,0], [0,0,0,0], [1,1,0,0], [0,1,0,0], ... }</tt>. * * \tparam ITEMS_PER_THREAD <b>[inferred]</b> The number of consecutive items partitioned onto each thread. * \tparam FlagT <b>[inferred]</b> The flag type (must be an integer type) * \tparam FlagOp <b>[inferred]</b> Binary predicate functor type having member <tt>T operator()(const T &a, const T &b)</tt> or member <tt>T operator()(const T &a, const T &b, unsigned int b_index)</tt>, and returning \p true if a discontinuity exists between \p a and \p b, otherwise \p false. \p b_index is the rank of b in the aggregate tile of data. */ template < int ITEMS_PER_THREAD, typename FlagT, typename FlagOp> __device__ __forceinline__ void FlagHeads( FlagT (&head_flags)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity head_flags T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items FlagOp flag_op, ///< [in] Binary boolean flag predicate T tile_predecessor_item) ///< [in] <b>[<em>thread</em><sub>0</sub> only]</b> Item with which to compare the first tile item (<tt>input<sub>0</sub></tt> from <em>thread</em><sub>0</sub>). { T preds[ITEMS_PER_THREAD]; FlagHeads(head_flags, input, preds, flag_op, tile_predecessor_item); } //@} end member group /******************************************************************//** * \name Tail flag operations *********************************************************************/ //@{ /** * \brief Sets tail flags indicating discontinuities between items partitioned across the thread block, for which the last item has no reference and is always flagged. * * \par * - The flag <tt>tail_flags<sub><em>i</em></sub></tt> is set for item * <tt>input<sub><em>i</em></sub></tt> when * <tt>flag_op(input<sub><em>i</em></sub>, </tt><em>next-item</em><tt>)</tt> * returns \p true (where <em>next-item</em> is either the next item * in the same thread or the first item in the next thread). * - For <em>thread</em><sub><em>BLOCK_THREADS</em>-1</sub>, item * <tt>input</tt><sub><em>ITEMS_PER_THREAD</em>-1</sub> is always flagged. * - \blocked * - \granularity * - \smemreuse * * \par Snippet * The code snippet below illustrates the tail-flagging of 512 integer items that * are partitioned in a [<em>blocked arrangement</em>](index.html#sec5sec3) across 128 threads * where each thread owns 4 consecutive items. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/block/block_discontinuity.cuh> * * __global__ void ExampleKernel(...) * { * // Specialize BlockDiscontinuity for a 1D block of 128 threads on type int * typedef cub::BlockDiscontinuity<int, 128> BlockDiscontinuity; * * // Allocate shared memory for BlockDiscontinuity * __shared__ typename BlockDiscontinuity::TempStorage temp_storage; * * // Obtain a segment of consecutive items that are blocked across threads * int thread_data[4]; * ... * * // Collectively compute tail flags for discontinuities in the segment * int tail_flags[4]; * BlockDiscontinuity(temp_storage).FlagTails(tail_flags, thread_data, cub::Inequality()); * * \endcode * \par * Suppose the set of input \p thread_data across the block of threads is * <tt>{ [0,0,1,1], [1,1,1,1], [2,3,3,3], ..., [124,125,125,125] }</tt>. * The corresponding output \p tail_flags in those threads will be * <tt>{ [0,1,0,0], [0,0,0,1], [1,0,0,...], ..., [1,0,0,1] }</tt>. * * \tparam ITEMS_PER_THREAD <b>[inferred]</b> The number of consecutive items partitioned onto each thread. * \tparam FlagT <b>[inferred]</b> The flag type (must be an integer type) * \tparam FlagOp <b>[inferred]</b> Binary predicate functor type having member <tt>T operator()(const T &a, const T &b)</tt> or member <tt>T operator()(const T &a, const T &b, unsigned int b_index)</tt>, and returning \p true if a discontinuity exists between \p a and \p b, otherwise \p false. \p b_index is the rank of b in the aggregate tile of data. */ template < int ITEMS_PER_THREAD, typename FlagT, typename FlagOp> __device__ __forceinline__ void FlagTails( FlagT (&tail_flags)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity tail_flags T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items FlagOp flag_op) ///< [in] Binary boolean flag predicate { // Share first item temp_storage.first_items[linear_tid] = input[0]; CTA_SYNC(); // Set flag for last thread-item tail_flags[ITEMS_PER_THREAD - 1] = (linear_tid == BLOCK_THREADS - 1) ? 1 : // Last thread ApplyOp<FlagOp>::FlagT( flag_op, input[ITEMS_PER_THREAD - 1], temp_storage.first_items[linear_tid + 1], (linear_tid * ITEMS_PER_THREAD) + ITEMS_PER_THREAD); // Set tail_flags for remaining items Iterate<0, ITEMS_PER_THREAD - 1>::FlagTails(linear_tid, tail_flags, input, flag_op); } /** * \brief Sets tail flags indicating discontinuities between items partitioned across the thread block. * * \par * - The flag <tt>tail_flags<sub><em>i</em></sub></tt> is set for item * <tt>input<sub><em>i</em></sub></tt> when * <tt>flag_op(input<sub><em>i</em></sub>, </tt><em>next-item</em><tt>)</tt> * returns \p true (where <em>next-item</em> is either the next item * in the same thread or the first item in the next thread). * - For <em>thread</em><sub><em>BLOCK_THREADS</em>-1</sub>, item * <tt>input</tt><sub><em>ITEMS_PER_THREAD</em>-1</sub> is compared * against \p tile_successor_item. * - \blocked * - \granularity * - \smemreuse * * \par Snippet * The code snippet below illustrates the tail-flagging of 512 integer items that * are partitioned in a [<em>blocked arrangement</em>](index.html#sec5sec3) across 128 threads * where each thread owns 4 consecutive items. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/block/block_discontinuity.cuh> * * __global__ void ExampleKernel(...) * { * // Specialize BlockDiscontinuity for a 1D block of 128 threads on type int * typedef cub::BlockDiscontinuity<int, 128> BlockDiscontinuity; * * // Allocate shared memory for BlockDiscontinuity * __shared__ typename BlockDiscontinuity::TempStorage temp_storage; * * // Obtain a segment of consecutive items that are blocked across threads * int thread_data[4]; * ... * * // Have thread127 obtain the successor item for the entire tile * int tile_successor_item; * if (threadIdx.x == 127) tile_successor_item == ... * * // Collectively compute tail flags for discontinuities in the segment * int tail_flags[4]; * BlockDiscontinuity(temp_storage).FlagTails( * tail_flags, thread_data, cub::Inequality(), tile_successor_item); * * \endcode * \par * Suppose the set of input \p thread_data across the block of threads is * <tt>{ [0,0,1,1], [1,1,1,1], [2,3,3,3], ..., [124,125,125,125] }</tt> * and that \p tile_successor_item is \p 125. The corresponding output \p tail_flags in those threads will be * <tt>{ [0,1,0,0], [0,0,0,1], [1,0,0,...], ..., [1,0,0,0] }</tt>. * * \tparam ITEMS_PER_THREAD <b>[inferred]</b> The number of consecutive items partitioned onto each thread. * \tparam FlagT <b>[inferred]</b> The flag type (must be an integer type) * \tparam FlagOp <b>[inferred]</b> Binary predicate functor type having member <tt>T operator()(const T &a, const T &b)</tt> or member <tt>T operator()(const T &a, const T &b, unsigned int b_index)</tt>, and returning \p true if a discontinuity exists between \p a and \p b, otherwise \p false. \p b_index is the rank of b in the aggregate tile of data. */ template < int ITEMS_PER_THREAD, typename FlagT, typename FlagOp> __device__ __forceinline__ void FlagTails( FlagT (&tail_flags)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity tail_flags T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items FlagOp flag_op, ///< [in] Binary boolean flag predicate T tile_successor_item) ///< [in] <b>[<em>thread</em><sub><tt>BLOCK_THREADS</tt>-1</sub> only]</b> Item with which to compare the last tile item (<tt>input</tt><sub><em>ITEMS_PER_THREAD</em>-1</sub> from <em>thread</em><sub><em>BLOCK_THREADS</em>-1</sub>). { // Share first item temp_storage.first_items[linear_tid] = input[0]; CTA_SYNC(); // Set flag for last thread-item T successor_item = (linear_tid == BLOCK_THREADS - 1) ? tile_successor_item : // Last thread temp_storage.first_items[linear_tid + 1]; tail_flags[ITEMS_PER_THREAD - 1] = ApplyOp<FlagOp>::FlagT( flag_op, input[ITEMS_PER_THREAD - 1], successor_item, (linear_tid * ITEMS_PER_THREAD) + ITEMS_PER_THREAD); // Set tail_flags for remaining items Iterate<0, ITEMS_PER_THREAD - 1>::FlagTails(linear_tid, tail_flags, input, flag_op); } //@} end member group /******************************************************************//** * \name Head & tail flag operations *********************************************************************/ //@{ /** * \brief Sets both head and tail flags indicating discontinuities between items partitioned across the thread block. * * \par * - The flag <tt>head_flags<sub><em>i</em></sub></tt> is set for item * <tt>input<sub><em>i</em></sub></tt> when * <tt>flag_op(</tt><em>previous-item</em><tt>, input<sub><em>i</em></sub>)</tt> * returns \p true (where <em>previous-item</em> is either the preceding item * in the same thread or the last item in the previous thread). * - For <em>thread</em><sub>0</sub>, item <tt>input<sub>0</sub></tt> is always flagged. * - The flag <tt>tail_flags<sub><em>i</em></sub></tt> is set for item * <tt>input<sub><em>i</em></sub></tt> when * <tt>flag_op(input<sub><em>i</em></sub>, </tt><em>next-item</em><tt>)</tt> * returns \p true (where <em>next-item</em> is either the next item * in the same thread or the first item in the next thread). * - For <em>thread</em><sub><em>BLOCK_THREADS</em>-1</sub>, item * <tt>input</tt><sub><em>ITEMS_PER_THREAD</em>-1</sub> is always flagged. * - \blocked * - \granularity * - \smemreuse * * \par Snippet * The code snippet below illustrates the head- and tail-flagging of 512 integer items that * are partitioned in a [<em>blocked arrangement</em>](index.html#sec5sec3) across 128 threads * where each thread owns 4 consecutive items. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/block/block_discontinuity.cuh> * * __global__ void ExampleKernel(...) * { * // Specialize BlockDiscontinuity for a 1D block of 128 threads on type int * typedef cub::BlockDiscontinuity<int, 128> BlockDiscontinuity; * * // Allocate shared memory for BlockDiscontinuity * __shared__ typename BlockDiscontinuity::TempStorage temp_storage; * * // Obtain a segment of consecutive items that are blocked across threads * int thread_data[4]; * ... * * // Collectively compute head and flags for discontinuities in the segment * int head_flags[4]; * int tail_flags[4]; * BlockDiscontinuity(temp_storage).FlagTails( * head_flags, tail_flags, thread_data, cub::Inequality()); * * \endcode * \par * Suppose the set of input \p thread_data across the block of threads is * <tt>{ [0,0,1,1], [1,1,1,1], [2,3,3,3], ..., [124,125,125,125] }</tt> * and that the tile_successor_item is \p 125. The corresponding output \p head_flags * in those threads will be <tt>{ [1,0,1,0], [0,0,0,0], [1,1,0,0], [0,1,0,0], ... }</tt>. * and the corresponding output \p tail_flags in those threads will be * <tt>{ [0,1,0,0], [0,0,0,1], [1,0,0,...], ..., [1,0,0,1] }</tt>. * * \tparam ITEMS_PER_THREAD <b>[inferred]</b> The number of consecutive items partitioned onto each thread. * \tparam FlagT <b>[inferred]</b> The flag type (must be an integer type) * \tparam FlagOp <b>[inferred]</b> Binary predicate functor type having member <tt>T operator()(const T &a, const T &b)</tt> or member <tt>T operator()(const T &a, const T &b, unsigned int b_index)</tt>, and returning \p true if a discontinuity exists between \p a and \p b, otherwise \p false. \p b_index is the rank of b in the aggregate tile of data. */ template < int ITEMS_PER_THREAD, typename FlagT, typename FlagOp> __device__ __forceinline__ void FlagHeadsAndTails( FlagT (&head_flags)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity head_flags FlagT (&tail_flags)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity tail_flags T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items FlagOp flag_op) ///< [in] Binary boolean flag predicate { // Share first and last items temp_storage.first_items[linear_tid] = input[0]; temp_storage.last_items[linear_tid] = input[ITEMS_PER_THREAD - 1]; CTA_SYNC(); T preds[ITEMS_PER_THREAD]; // Set flag for first thread-item preds[0] = temp_storage.last_items[linear_tid - 1]; if (linear_tid == 0) { head_flags[0] = 1; } else { head_flags[0] = ApplyOp<FlagOp>::FlagT( flag_op, preds[0], input[0], linear_tid * ITEMS_PER_THREAD); } // Set flag for last thread-item tail_flags[ITEMS_PER_THREAD - 1] = (linear_tid == BLOCK_THREADS - 1) ? 1 : // Last thread ApplyOp<FlagOp>::FlagT( flag_op, input[ITEMS_PER_THREAD - 1], temp_storage.first_items[linear_tid + 1], (linear_tid * ITEMS_PER_THREAD) + ITEMS_PER_THREAD); // Set head_flags for remaining items Iterate<1, ITEMS_PER_THREAD>::FlagHeads(linear_tid, head_flags, input, preds, flag_op); // Set tail_flags for remaining items Iterate<0, ITEMS_PER_THREAD - 1>::FlagTails(linear_tid, tail_flags, input, flag_op); } /** * \brief Sets both head and tail flags indicating discontinuities between items partitioned across the thread block. * * \par * - The flag <tt>head_flags<sub><em>i</em></sub></tt> is set for item * <tt>input<sub><em>i</em></sub></tt> when * <tt>flag_op(</tt><em>previous-item</em><tt>, input<sub><em>i</em></sub>)</tt> * returns \p true (where <em>previous-item</em> is either the preceding item * in the same thread or the last item in the previous thread). * - For <em>thread</em><sub>0</sub>, item <tt>input<sub>0</sub></tt> is always flagged. * - The flag <tt>tail_flags<sub><em>i</em></sub></tt> is set for item * <tt>input<sub><em>i</em></sub></tt> when * <tt>flag_op(input<sub><em>i</em></sub>, </tt><em>next-item</em><tt>)</tt> * returns \p true (where <em>next-item</em> is either the next item * in the same thread or the first item in the next thread). * - For <em>thread</em><sub><em>BLOCK_THREADS</em>-1</sub>, item * <tt>input</tt><sub><em>ITEMS_PER_THREAD</em>-1</sub> is compared * against \p tile_predecessor_item. * - \blocked * - \granularity * - \smemreuse * * \par Snippet * The code snippet below illustrates the head- and tail-flagging of 512 integer items that * are partitioned in a [<em>blocked arrangement</em>](index.html#sec5sec3) across 128 threads * where each thread owns 4 consecutive items. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/block/block_discontinuity.cuh> * * __global__ void ExampleKernel(...) * { * // Specialize BlockDiscontinuity for a 1D block of 128 threads on type int * typedef cub::BlockDiscontinuity<int, 128> BlockDiscontinuity; * * // Allocate shared memory for BlockDiscontinuity * __shared__ typename BlockDiscontinuity::TempStorage temp_storage; * * // Obtain a segment of consecutive items that are blocked across threads * int thread_data[4]; * ... * * // Have thread127 obtain the successor item for the entire tile * int tile_successor_item; * if (threadIdx.x == 127) tile_successor_item == ... * * // Collectively compute head and flags for discontinuities in the segment * int head_flags[4]; * int tail_flags[4]; * BlockDiscontinuity(temp_storage).FlagTails( * head_flags, tail_flags, tile_successor_item, thread_data, cub::Inequality()); * * \endcode * \par * Suppose the set of input \p thread_data across the block of threads is * <tt>{ [0,0,1,1], [1,1,1,1], [2,3,3,3], ..., [124,125,125,125] }</tt> * and that the tile_successor_item is \p 125. The corresponding output \p head_flags * in those threads will be <tt>{ [1,0,1,0], [0,0,0,0], [1,1,0,0], [0,1,0,0], ... }</tt>. * and the corresponding output \p tail_flags in those threads will be * <tt>{ [0,1,0,0], [0,0,0,1], [1,0,0,...], ..., [1,0,0,0] }</tt>. * * \tparam ITEMS_PER_THREAD <b>[inferred]</b> The number of consecutive items partitioned onto each thread. * \tparam FlagT <b>[inferred]</b> The flag type (must be an integer type) * \tparam FlagOp <b>[inferred]</b> Binary predicate functor type having member <tt>T operator()(const T &a, const T &b)</tt> or member <tt>T operator()(const T &a, const T &b, unsigned int b_index)</tt>, and returning \p true if a discontinuity exists between \p a and \p b, otherwise \p false. \p b_index is the rank of b in the aggregate tile of data. */ template < int ITEMS_PER_THREAD, typename FlagT, typename FlagOp> __device__ __forceinline__ void FlagHeadsAndTails( FlagT (&head_flags)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity head_flags FlagT (&tail_flags)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity tail_flags T tile_successor_item, ///< [in] <b>[<em>thread</em><sub><tt>BLOCK_THREADS</tt>-1</sub> only]</b> Item with which to compare the last tile item (<tt>input</tt><sub><em>ITEMS_PER_THREAD</em>-1</sub> from <em>thread</em><sub><em>BLOCK_THREADS</em>-1</sub>). T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items FlagOp flag_op) ///< [in] Binary boolean flag predicate { // Share first and last items temp_storage.first_items[linear_tid] = input[0]; temp_storage.last_items[linear_tid] = input[ITEMS_PER_THREAD - 1]; CTA_SYNC(); T preds[ITEMS_PER_THREAD]; // Set flag for first thread-item if (linear_tid == 0) { head_flags[0] = 1; } else { preds[0] = temp_storage.last_items[linear_tid - 1]; head_flags[0] = ApplyOp<FlagOp>::FlagT( flag_op, preds[0], input[0], linear_tid * ITEMS_PER_THREAD); } // Set flag for last thread-item T successor_item = (linear_tid == BLOCK_THREADS - 1) ? tile_successor_item : // Last thread temp_storage.first_items[linear_tid + 1]; tail_flags[ITEMS_PER_THREAD - 1] = ApplyOp<FlagOp>::FlagT( flag_op, input[ITEMS_PER_THREAD - 1], successor_item, (linear_tid * ITEMS_PER_THREAD) + ITEMS_PER_THREAD); // Set head_flags for remaining items Iterate<1, ITEMS_PER_THREAD>::FlagHeads(linear_tid, head_flags, input, preds, flag_op); // Set tail_flags for remaining items Iterate<0, ITEMS_PER_THREAD - 1>::FlagTails(linear_tid, tail_flags, input, flag_op); } /** * \brief Sets both head and tail flags indicating discontinuities between items partitioned across the thread block. * * \par * - The flag <tt>head_flags<sub><em>i</em></sub></tt> is set for item * <tt>input<sub><em>i</em></sub></tt> when * <tt>flag_op(</tt><em>previous-item</em><tt>, input<sub><em>i</em></sub>)</tt> * returns \p true (where <em>previous-item</em> is either the preceding item * in the same thread or the last item in the previous thread). * - For <em>thread</em><sub>0</sub>, item <tt>input<sub>0</sub></tt> is compared * against \p tile_predecessor_item. * - The flag <tt>tail_flags<sub><em>i</em></sub></tt> is set for item * <tt>input<sub><em>i</em></sub></tt> when * <tt>flag_op(input<sub><em>i</em></sub>, </tt><em>next-item</em><tt>)</tt> * returns \p true (where <em>next-item</em> is either the next item * in the same thread or the first item in the next thread). * - For <em>thread</em><sub><em>BLOCK_THREADS</em>-1</sub>, item * <tt>input</tt><sub><em>ITEMS_PER_THREAD</em>-1</sub> is always flagged. * - \blocked * - \granularity * - \smemreuse * * \par Snippet * The code snippet below illustrates the head- and tail-flagging of 512 integer items that * are partitioned in a [<em>blocked arrangement</em>](index.html#sec5sec3) across 128 threads * where each thread owns 4 consecutive items. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/block/block_discontinuity.cuh> * * __global__ void ExampleKernel(...) * { * // Specialize BlockDiscontinuity for a 1D block of 128 threads on type int * typedef cub::BlockDiscontinuity<int, 128> BlockDiscontinuity; * * // Allocate shared memory for BlockDiscontinuity * __shared__ typename BlockDiscontinuity::TempStorage temp_storage; * * // Obtain a segment of consecutive items that are blocked across threads * int thread_data[4]; * ... * * // Have thread0 obtain the predecessor item for the entire tile * int tile_predecessor_item; * if (threadIdx.x == 0) tile_predecessor_item == ... * * // Have thread127 obtain the successor item for the entire tile * int tile_successor_item; * if (threadIdx.x == 127) tile_successor_item == ... * * // Collectively compute head and flags for discontinuities in the segment * int head_flags[4]; * int tail_flags[4]; * BlockDiscontinuity(temp_storage).FlagTails( * head_flags, tile_predecessor_item, tail_flags, tile_successor_item, * thread_data, cub::Inequality()); * * \endcode * \par * Suppose the set of input \p thread_data across the block of threads is * <tt>{ [0,0,1,1], [1,1,1,1], [2,3,3,3], ..., [124,125,125,125] }</tt>, * that the \p tile_predecessor_item is \p 0, and that the * \p tile_successor_item is \p 125. The corresponding output \p head_flags * in those threads will be <tt>{ [0,0,1,0], [0,0,0,0], [1,1,0,0], [0,1,0,0], ... }</tt>. * and the corresponding output \p tail_flags in those threads will be * <tt>{ [0,1,0,0], [0,0,0,1], [1,0,0,...], ..., [1,0,0,1] }</tt>. * * \tparam ITEMS_PER_THREAD <b>[inferred]</b> The number of consecutive items partitioned onto each thread. * \tparam FlagT <b>[inferred]</b> The flag type (must be an integer type) * \tparam FlagOp <b>[inferred]</b> Binary predicate functor type having member <tt>T operator()(const T &a, const T &b)</tt> or member <tt>T operator()(const T &a, const T &b, unsigned int b_index)</tt>, and returning \p true if a discontinuity exists between \p a and \p b, otherwise \p false. \p b_index is the rank of b in the aggregate tile of data. */ template < int ITEMS_PER_THREAD, typename FlagT, typename FlagOp> __device__ __forceinline__ void FlagHeadsAndTails( FlagT (&head_flags)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity head_flags T tile_predecessor_item, ///< [in] <b>[<em>thread</em><sub>0</sub> only]</b> Item with which to compare the first tile item (<tt>input<sub>0</sub></tt> from <em>thread</em><sub>0</sub>). FlagT (&tail_flags)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity tail_flags T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items FlagOp flag_op) ///< [in] Binary boolean flag predicate { // Share first and last items temp_storage.first_items[linear_tid] = input[0]; temp_storage.last_items[linear_tid] = input[ITEMS_PER_THREAD - 1]; CTA_SYNC(); T preds[ITEMS_PER_THREAD]; // Set flag for first thread-item preds[0] = (linear_tid == 0) ? tile_predecessor_item : // First thread temp_storage.last_items[linear_tid - 1]; head_flags[0] = ApplyOp<FlagOp>::FlagT( flag_op, preds[0], input[0], linear_tid * ITEMS_PER_THREAD); // Set flag for last thread-item tail_flags[ITEMS_PER_THREAD - 1] = (linear_tid == BLOCK_THREADS - 1) ? 1 : // Last thread ApplyOp<FlagOp>::FlagT( flag_op, input[ITEMS_PER_THREAD - 1], temp_storage.first_items[linear_tid + 1], (linear_tid * ITEMS_PER_THREAD) + ITEMS_PER_THREAD); // Set head_flags for remaining items Iterate<1, ITEMS_PER_THREAD>::FlagHeads(linear_tid, head_flags, input, preds, flag_op); // Set tail_flags for remaining items Iterate<0, ITEMS_PER_THREAD - 1>::FlagTails(linear_tid, tail_flags, input, flag_op); } /** * \brief Sets both head and tail flags indicating discontinuities between items partitioned across the thread block. * * \par * - The flag <tt>head_flags<sub><em>i</em></sub></tt> is set for item * <tt>input<sub><em>i</em></sub></tt> when * <tt>flag_op(</tt><em>previous-item</em><tt>, input<sub><em>i</em></sub>)</tt> * returns \p true (where <em>previous-item</em> is either the preceding item * in the same thread or the last item in the previous thread). * - For <em>thread</em><sub>0</sub>, item <tt>input<sub>0</sub></tt> is compared * against \p tile_predecessor_item. * - The flag <tt>tail_flags<sub><em>i</em></sub></tt> is set for item * <tt>input<sub><em>i</em></sub></tt> when * <tt>flag_op(input<sub><em>i</em></sub>, </tt><em>next-item</em><tt>)</tt> * returns \p true (where <em>next-item</em> is either the next item * in the same thread or the first item in the next thread). * - For <em>thread</em><sub><em>BLOCK_THREADS</em>-1</sub>, item * <tt>input</tt><sub><em>ITEMS_PER_THREAD</em>-1</sub> is compared * against \p tile_successor_item. * - \blocked * - \granularity * - \smemreuse * * \par Snippet * The code snippet below illustrates the head- and tail-flagging of 512 integer items that * are partitioned in a [<em>blocked arrangement</em>](index.html#sec5sec3) across 128 threads * where each thread owns 4 consecutive items. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/block/block_discontinuity.cuh> * * __global__ void ExampleKernel(...) * { * // Specialize BlockDiscontinuity for a 1D block of 128 threads on type int * typedef cub::BlockDiscontinuity<int, 128> BlockDiscontinuity; * * // Allocate shared memory for BlockDiscontinuity * __shared__ typename BlockDiscontinuity::TempStorage temp_storage; * * // Obtain a segment of consecutive items that are blocked across threads * int thread_data[4]; * ... * * // Have thread0 obtain the predecessor item for the entire tile * int tile_predecessor_item; * if (threadIdx.x == 0) tile_predecessor_item == ... * * // Have thread127 obtain the successor item for the entire tile * int tile_successor_item; * if (threadIdx.x == 127) tile_successor_item == ... * * // Collectively compute head and flags for discontinuities in the segment * int head_flags[4]; * int tail_flags[4]; * BlockDiscontinuity(temp_storage).FlagTails( * head_flags, tile_predecessor_item, tail_flags, tile_successor_item, * thread_data, cub::Inequality()); * * \endcode * \par * Suppose the set of input \p thread_data across the block of threads is * <tt>{ [0,0,1,1], [1,1,1,1], [2,3,3,3], ..., [124,125,125,125] }</tt>, * that the \p tile_predecessor_item is \p 0, and that the * \p tile_successor_item is \p 125. The corresponding output \p head_flags * in those threads will be <tt>{ [0,0,1,0], [0,0,0,0], [1,1,0,0], [0,1,0,0], ... }</tt>. * and the corresponding output \p tail_flags in those threads will be * <tt>{ [0,1,0,0], [0,0,0,1], [1,0,0,...], ..., [1,0,0,0] }</tt>. * * \tparam ITEMS_PER_THREAD <b>[inferred]</b> The number of consecutive items partitioned onto each thread. * \tparam FlagT <b>[inferred]</b> The flag type (must be an integer type) * \tparam FlagOp <b>[inferred]</b> Binary predicate functor type having member <tt>T operator()(const T &a, const T &b)</tt> or member <tt>T operator()(const T &a, const T &b, unsigned int b_index)</tt>, and returning \p true if a discontinuity exists between \p a and \p b, otherwise \p false. \p b_index is the rank of b in the aggregate tile of data. */ template < int ITEMS_PER_THREAD, typename FlagT, typename FlagOp> __device__ __forceinline__ void FlagHeadsAndTails( FlagT (&head_flags)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity head_flags T tile_predecessor_item, ///< [in] <b>[<em>thread</em><sub>0</sub> only]</b> Item with which to compare the first tile item (<tt>input<sub>0</sub></tt> from <em>thread</em><sub>0</sub>). FlagT (&tail_flags)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity tail_flags T tile_successor_item, ///< [in] <b>[<em>thread</em><sub><tt>BLOCK_THREADS</tt>-1</sub> only]</b> Item with which to compare the last tile item (<tt>input</tt><sub><em>ITEMS_PER_THREAD</em>-1</sub> from <em>thread</em><sub><em>BLOCK_THREADS</em>-1</sub>). T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items FlagOp flag_op) ///< [in] Binary boolean flag predicate { // Share first and last items temp_storage.first_items[linear_tid] = input[0]; temp_storage.last_items[linear_tid] = input[ITEMS_PER_THREAD - 1]; CTA_SYNC(); T preds[ITEMS_PER_THREAD]; // Set flag for first thread-item preds[0] = (linear_tid == 0) ? tile_predecessor_item : // First thread temp_storage.last_items[linear_tid - 1]; head_flags[0] = ApplyOp<FlagOp>::FlagT( flag_op, preds[0], input[0], linear_tid * ITEMS_PER_THREAD); // Set flag for last thread-item T successor_item = (linear_tid == BLOCK_THREADS - 1) ? tile_successor_item : // Last thread temp_storage.first_items[linear_tid + 1]; tail_flags[ITEMS_PER_THREAD - 1] = ApplyOp<FlagOp>::FlagT( flag_op, input[ITEMS_PER_THREAD - 1], successor_item, (linear_tid * ITEMS_PER_THREAD) + ITEMS_PER_THREAD); // Set head_flags for remaining items Iterate<1, ITEMS_PER_THREAD>::FlagHeads(linear_tid, head_flags, input, preds, flag_op); // Set tail_flags for remaining items Iterate<0, ITEMS_PER_THREAD - 1>::FlagTails(linear_tid, tail_flags, input, flag_op); } //@} end member group }; } // CUB namespace CUB_NS_POSTFIX // Optional outer namespace(s)
0
rapidsai_public_repos/nvgraph/external/cub_semiring/block
rapidsai_public_repos/nvgraph/external/cub_semiring/block/specializations/block_scan_warp_scans3.cuh
/****************************************************************************** * Copyright (c) 2011, Duane Merrill. All rights reserved. * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ /** * \file * cub::BlockScanWarpscans provides warpscan-based variants of parallel prefix scan across a CUDA thread block. */ #pragma once #include "../../util_arch.cuh" #include "../../util_ptx.cuh" #include "../../warp/warp_scan.cuh" #include "../../util_namespace.cuh" /// Optional outer namespace(s) CUB_NS_PREFIX /// CUB namespace namespace cub { /** * \brief BlockScanWarpScans provides warpscan-based variants of parallel prefix scan across a CUDA thread block. */ template < typename T, int BLOCK_DIM_X, ///< The thread block length in threads along the X dimension int BLOCK_DIM_Y, ///< The thread block length in threads along the Y dimension int BLOCK_DIM_Z, ///< The thread block length in threads along the Z dimension int PTX_ARCH> ///< The PTX compute capability for which to to specialize this collective struct BlockScanWarpScans { //--------------------------------------------------------------------- // Types and constants //--------------------------------------------------------------------- /// Constants enum { /// The thread block size in threads BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z, /// Number of warp threads INNER_WARP_THREADS = CUB_WARP_THREADS(PTX_ARCH), OUTER_WARP_THREADS = BLOCK_THREADS / INNER_WARP_THREADS, /// Number of outer scan warps OUTER_WARPS = INNER_WARP_THREADS }; /// Outer WarpScan utility type typedef WarpScan<T, OUTER_WARP_THREADS, PTX_ARCH> OuterWarpScanT; /// Inner WarpScan utility type typedef WarpScan<T, INNER_WARP_THREADS, PTX_ARCH> InnerWarpScanT; typedef typename OuterWarpScanT::TempStorage OuterScanArray[OUTER_WARPS]; /// Shared memory storage layout type struct _TempStorage { union Aliasable { Uninitialized<OuterScanArray> outer_warp_scan; ///< Buffer for warp-synchronous outer scans typename InnerWarpScanT::TempStorage inner_warp_scan; ///< Buffer for warp-synchronous inner scan } aliasable; T warp_aggregates[OUTER_WARPS]; T block_aggregate; ///< Shared prefix for the entire thread block }; /// Alias wrapper allowing storage to be unioned struct TempStorage : Uninitialized<_TempStorage> {}; //--------------------------------------------------------------------- // Per-thread fields //--------------------------------------------------------------------- // Thread fields _TempStorage &temp_storage; unsigned int linear_tid; unsigned int warp_id; unsigned int lane_id; //--------------------------------------------------------------------- // Constructors //--------------------------------------------------------------------- /// Constructor __device__ __forceinline__ BlockScanWarpScans( TempStorage &temp_storage) : temp_storage(temp_storage.Alias()), linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)), warp_id((OUTER_WARPS == 1) ? 0 : linear_tid / OUTER_WARP_THREADS), lane_id((OUTER_WARPS == 1) ? linear_tid : linear_tid % OUTER_WARP_THREADS) {} //--------------------------------------------------------------------- // Exclusive scans //--------------------------------------------------------------------- /// Computes an exclusive thread block-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. With no initial value, the output computed for <em>thread</em><sub>0</sub> is undefined. template <typename ScanOp> __device__ __forceinline__ void ExclusiveScan( T input, ///< [in] Calling thread's input item T &exclusive_output, ///< [out] Calling thread's output item (may be aliased to \p input) ScanOp scan_op) ///< [in] Binary scan operator { // Compute block-wide exclusive scan. The exclusive output from tid0 is invalid. T block_aggregate; ExclusiveScan(input, exclusive_output, scan_op, block_aggregate); } /// Computes an exclusive thread block-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. template <typename ScanOp> __device__ __forceinline__ void ExclusiveScan( T input, ///< [in] Calling thread's input items T &exclusive_output, ///< [out] Calling thread's output items (may be aliased to \p input) const T &initial_value, ///< [in] Initial value to seed the exclusive scan ScanOp scan_op) ///< [in] Binary scan operator { T block_aggregate; ExclusiveScan(input, exclusive_output, initial_value, scan_op, block_aggregate); } /// Computes an exclusive thread block-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. Also provides every thread with the block-wide \p block_aggregate of all inputs. With no initial value, the output computed for <em>thread</em><sub>0</sub> is undefined. template <typename ScanOp> __device__ __forceinline__ void ExclusiveScan( T input, ///< [in] Calling thread's input item T &exclusive_output, ///< [out] Calling thread's output item (may be aliased to \p input) ScanOp scan_op, ///< [in] Binary scan operator T &block_aggregate) ///< [out] Threadblock-wide aggregate reduction of input items { // Compute warp scan in each warp. The exclusive output from each lane0 is invalid. T inclusive_output; OuterWarpScanT(temp_storage.aliasable.outer_warp_scan.Alias()[warp_id]).Scan( input, inclusive_output, exclusive_output, scan_op); // Share outer warp total if (lane_id == OUTER_WARP_THREADS - 1) temp_storage.warp_aggregates[warp_id] = inclusive_output; CTA_SYNC(); if (linear_tid < INNER_WARP_THREADS) { T outer_warp_input = temp_storage.warp_aggregates[linear_tid]; T outer_warp_exclusive; InnerWarpScanT(temp_storage.aliasable.inner_warp_scan).ExclusiveScan( outer_warp_input, outer_warp_exclusive, scan_op, block_aggregate); temp_storage.block_aggregate = block_aggregate; temp_storage.warp_aggregates[linear_tid] = outer_warp_exclusive; } CTA_SYNC(); if (warp_id != 0) { // Retrieve block aggregate block_aggregate = temp_storage.block_aggregate; // Apply warp prefix to our lane's partial T outer_warp_exclusive = temp_storage.warp_aggregates[warp_id]; exclusive_output = scan_op(outer_warp_exclusive, exclusive_output); if (lane_id == 0) exclusive_output = outer_warp_exclusive; } } /// Computes an exclusive thread block-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. Also provides every thread with the block-wide \p block_aggregate of all inputs. template <typename ScanOp> __device__ __forceinline__ void ExclusiveScan( T input, ///< [in] Calling thread's input items T &exclusive_output, ///< [out] Calling thread's output items (may be aliased to \p input) const T &initial_value, ///< [in] Initial value to seed the exclusive scan ScanOp scan_op, ///< [in] Binary scan operator T &block_aggregate) ///< [out] Threadblock-wide aggregate reduction of input items { // Compute warp scan in each warp. The exclusive output from each lane0 is invalid. T inclusive_output; OuterWarpScanT(temp_storage.aliasable.outer_warp_scan.Alias()[warp_id]).Scan( input, inclusive_output, exclusive_output, scan_op); // Share outer warp total if (lane_id == OUTER_WARP_THREADS - 1) { temp_storage.warp_aggregates[warp_id] = inclusive_output; } CTA_SYNC(); if (linear_tid < INNER_WARP_THREADS) { T outer_warp_input = temp_storage.warp_aggregates[linear_tid]; T outer_warp_exclusive; InnerWarpScanT(temp_storage.aliasable.inner_warp_scan).ExclusiveScan( outer_warp_input, outer_warp_exclusive, initial_value, scan_op, block_aggregate); temp_storage.block_aggregate = block_aggregate; temp_storage.warp_aggregates[linear_tid] = outer_warp_exclusive; } CTA_SYNC(); // Retrieve block aggregate block_aggregate = temp_storage.block_aggregate; // Apply warp prefix to our lane's partial T outer_warp_exclusive = temp_storage.warp_aggregates[warp_id]; exclusive_output = scan_op(outer_warp_exclusive, exclusive_output); if (lane_id == 0) exclusive_output = outer_warp_exclusive; } /// Computes an exclusive thread block-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. The call-back functor \p block_prefix_callback_op is invoked by the first warp in the block, and the value returned by <em>lane</em><sub>0</sub> in that warp is used as the "seed" value that logically prefixes the thread block's scan inputs. template < typename ScanOp, typename BlockPrefixCallbackOp> __device__ __forceinline__ void ExclusiveScan( T input, ///< [in] Calling thread's input item T &exclusive_output, ///< [out] Calling thread's output item (may be aliased to \p input) ScanOp scan_op, ///< [in] Binary scan operator BlockPrefixCallbackOp &block_prefix_callback_op) ///< [in-out] <b>[<em>warp</em><sub>0</sub> only]</b> Call-back functor for specifying a thread block-wide prefix to be applied to all inputs. { // Compute warp scan in each warp. The exclusive output from each lane0 is invalid. T inclusive_output; OuterWarpScanT(temp_storage.aliasable.outer_warp_scan.Alias()[warp_id]).Scan( input, inclusive_output, exclusive_output, scan_op); // Share outer warp total if (lane_id == OUTER_WARP_THREADS - 1) temp_storage.warp_aggregates[warp_id] = inclusive_output; CTA_SYNC(); if (linear_tid < INNER_WARP_THREADS) { InnerWarpScanT inner_scan(temp_storage.aliasable.inner_warp_scan); T upsweep = temp_storage.warp_aggregates[linear_tid]; T downsweep_prefix, block_aggregate; inner_scan.ExclusiveScan(upsweep, downsweep_prefix, scan_op, block_aggregate); // Use callback functor to get block prefix in lane0 and then broadcast to other lanes T block_prefix = block_prefix_callback_op(block_aggregate); block_prefix = inner_scan.Broadcast(block_prefix, 0); downsweep_prefix = scan_op(block_prefix, downsweep_prefix); if (linear_tid == 0) downsweep_prefix = block_prefix; temp_storage.warp_aggregates[linear_tid] = downsweep_prefix; } CTA_SYNC(); // Apply warp prefix to our lane's partial (or assign it if partial is invalid) T outer_warp_exclusive = temp_storage.warp_aggregates[warp_id]; exclusive_output = scan_op(outer_warp_exclusive, exclusive_output); if (lane_id == 0) exclusive_output = outer_warp_exclusive; } //--------------------------------------------------------------------- // Inclusive scans //--------------------------------------------------------------------- /// Computes an inclusive thread block-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. template <typename ScanOp> __device__ __forceinline__ void InclusiveScan( T input, ///< [in] Calling thread's input item T &inclusive_output, ///< [out] Calling thread's output item (may be aliased to \p input) ScanOp scan_op) ///< [in] Binary scan operator { T block_aggregate; InclusiveScan(input, inclusive_output, scan_op, block_aggregate); } /// Computes an inclusive thread block-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. Also provides every thread with the block-wide \p block_aggregate of all inputs. template <typename ScanOp> __device__ __forceinline__ void InclusiveScan( T input, ///< [in] Calling thread's input item T &inclusive_output, ///< [out] Calling thread's output item (may be aliased to \p input) ScanOp scan_op, ///< [in] Binary scan operator T &block_aggregate) ///< [out] Threadblock-wide aggregate reduction of input items { // Compute warp scan in each warp. The exclusive output from each lane0 is invalid. OuterWarpScanT(temp_storage.aliasable.outer_warp_scan.Alias()[warp_id]).InclusiveScan( input, inclusive_output, scan_op); // Share outer warp total if (lane_id == OUTER_WARP_THREADS - 1) temp_storage.warp_aggregates[warp_id] = inclusive_output; CTA_SYNC(); if (linear_tid < INNER_WARP_THREADS) { T outer_warp_input = temp_storage.warp_aggregates[linear_tid]; T outer_warp_exclusive; InnerWarpScanT(temp_storage.aliasable.inner_warp_scan).ExclusiveScan( outer_warp_input, outer_warp_exclusive, scan_op, block_aggregate); temp_storage.block_aggregate = block_aggregate; temp_storage.warp_aggregates[linear_tid] = outer_warp_exclusive; } CTA_SYNC(); if (warp_id != 0) { // Retrieve block aggregate block_aggregate = temp_storage.block_aggregate; // Apply warp prefix to our lane's partial T outer_warp_exclusive = temp_storage.warp_aggregates[warp_id]; inclusive_output = scan_op(outer_warp_exclusive, inclusive_output); } } /// Computes an inclusive thread block-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. the call-back functor \p block_prefix_callback_op is invoked by the first warp in the block, and the value returned by <em>lane</em><sub>0</sub> in that warp is used as the "seed" value that logically prefixes the thread block's scan inputs. template < typename ScanOp, typename BlockPrefixCallbackOp> __device__ __forceinline__ void InclusiveScan( T input, ///< [in] Calling thread's input item T &inclusive_output, ///< [out] Calling thread's output item (may be aliased to \p input) ScanOp scan_op, ///< [in] Binary scan operator BlockPrefixCallbackOp &block_prefix_callback_op) ///< [in-out] <b>[<em>warp</em><sub>0</sub> only]</b> Call-back functor for specifying a thread block-wide prefix to be applied to all inputs. { // Compute warp scan in each warp. The exclusive output from each lane0 is invalid. OuterWarpScanT(temp_storage.aliasable.outer_warp_scan.Alias()[warp_id]).InclusiveScan( input, inclusive_output, scan_op); // Share outer warp total if (lane_id == OUTER_WARP_THREADS - 1) temp_storage.warp_aggregates[warp_id] = inclusive_output; CTA_SYNC(); if (linear_tid < INNER_WARP_THREADS) { InnerWarpScanT inner_scan(temp_storage.aliasable.inner_warp_scan); T upsweep = temp_storage.warp_aggregates[linear_tid]; T downsweep_prefix, block_aggregate; inner_scan.ExclusiveScan(upsweep, downsweep_prefix, scan_op, block_aggregate); // Use callback functor to get block prefix in lane0 and then broadcast to other lanes T block_prefix = block_prefix_callback_op(block_aggregate); block_prefix = inner_scan.Broadcast(block_prefix, 0); downsweep_prefix = scan_op(block_prefix, downsweep_prefix); if (linear_tid == 0) downsweep_prefix = block_prefix; temp_storage.warp_aggregates[linear_tid] = downsweep_prefix; } CTA_SYNC(); // Apply warp prefix to our lane's partial T outer_warp_exclusive = temp_storage.warp_aggregates[warp_id]; inclusive_output = scan_op(outer_warp_exclusive, inclusive_output); } }; } // CUB namespace CUB_NS_POSTFIX // Optional outer namespace(s)
0
rapidsai_public_repos/nvgraph/external/cub_semiring/block
rapidsai_public_repos/nvgraph/external/cub_semiring/block/specializations/block_scan_warp_scans2.cuh
/****************************************************************************** * Copyright (c) 2011, Duane Merrill. All rights reserved. * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ /** * \file * cub::BlockScanWarpscans provides warpscan-based variants of parallel prefix scan across a CUDA thread block. */ #pragma once #include "../../util_arch.cuh" #include "../../util_ptx.cuh" #include "../../warp/warp_scan.cuh" #include "../../util_namespace.cuh" /// Optional outer namespace(s) CUB_NS_PREFIX /// CUB namespace namespace cub { /** * \brief BlockScanWarpScans provides warpscan-based variants of parallel prefix scan across a CUDA thread block. */ template < typename T, int BLOCK_DIM_X, ///< The thread block length in threads along the X dimension int BLOCK_DIM_Y, ///< The thread block length in threads along the Y dimension int BLOCK_DIM_Z, ///< The thread block length in threads along the Z dimension int PTX_ARCH> ///< The PTX compute capability for which to to specialize this collective struct BlockScanWarpScans { //--------------------------------------------------------------------- // Types and constants //--------------------------------------------------------------------- /// Constants enum { /// Number of warp threads WARP_THREADS = CUB_WARP_THREADS(PTX_ARCH), /// The thread block size in threads BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z, /// Number of active warps WARPS = (BLOCK_THREADS + WARP_THREADS - 1) / WARP_THREADS, }; /// WarpScan utility type typedef WarpScan<T, WARP_THREADS, PTX_ARCH> WarpScanT; /// WarpScan utility type typedef WarpScan<T, WARPS, PTX_ARCH> WarpAggregateScanT; /// Shared memory storage layout type struct _TempStorage { typename WarpAggregateScanT::TempStorage inner_scan[WARPS]; ///< Buffer for warp-synchronous scans typename WarpScanT::TempStorage warp_scan[WARPS]; ///< Buffer for warp-synchronous scans T warp_aggregates[WARPS]; T block_prefix; ///< Shared prefix for the entire thread block }; /// Alias wrapper allowing storage to be unioned struct TempStorage : Uninitialized<_TempStorage> {}; //--------------------------------------------------------------------- // Per-thread fields //--------------------------------------------------------------------- // Thread fields _TempStorage &temp_storage; unsigned int linear_tid; unsigned int warp_id; unsigned int lane_id; //--------------------------------------------------------------------- // Constructors //--------------------------------------------------------------------- /// Constructor __device__ __forceinline__ BlockScanWarpScans( TempStorage &temp_storage) : temp_storage(temp_storage.Alias()), linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)), warp_id((WARPS == 1) ? 0 : linear_tid / WARP_THREADS), lane_id(LaneId()) {} //--------------------------------------------------------------------- // Utility methods //--------------------------------------------------------------------- template <typename ScanOp, int WARP> __device__ __forceinline__ void ApplyWarpAggregates( T &warp_prefix, ///< [out] The calling thread's partial reduction ScanOp scan_op, ///< [in] Binary scan operator T &block_aggregate, ///< [out] Threadblock-wide aggregate reduction of input items Int2Type<WARP> addend_warp) { if (warp_id == WARP) warp_prefix = block_aggregate; T addend = temp_storage.warp_aggregates[WARP]; block_aggregate = scan_op(block_aggregate, addend); ApplyWarpAggregates(warp_prefix, scan_op, block_aggregate, Int2Type<WARP + 1>()); } template <typename ScanOp> __device__ __forceinline__ void ApplyWarpAggregates( T &warp_prefix, ///< [out] The calling thread's partial reduction ScanOp scan_op, ///< [in] Binary scan operator T &block_aggregate, ///< [out] Threadblock-wide aggregate reduction of input items Int2Type<WARPS> addend_warp) {} /// Use the warp-wide aggregates to compute the calling warp's prefix. Also returns block-wide aggregate in all threads. template <typename ScanOp> __device__ __forceinline__ T ComputeWarpPrefix( ScanOp scan_op, ///< [in] Binary scan operator T warp_aggregate, ///< [in] <b>[<em>lane</em><sub>WARP_THREADS - 1</sub> only]</b> Warp-wide aggregate reduction of input items T &block_aggregate) ///< [out] Threadblock-wide aggregate reduction of input items { // Last lane in each warp shares its warp-aggregate if (lane_id == WARP_THREADS - 1) temp_storage.warp_aggregates[warp_id] = warp_aggregate; CTA_SYNC(); // Accumulate block aggregates and save the one that is our warp's prefix T warp_prefix; block_aggregate = temp_storage.warp_aggregates[0]; // Use template unrolling (since the PTX backend can't handle unrolling it for SM1x) ApplyWarpAggregates(warp_prefix, scan_op, block_aggregate, Int2Type<1>()); /* #pragma unroll for (int WARP = 1; WARP < WARPS; ++WARP) { if (warp_id == WARP) warp_prefix = block_aggregate; T addend = temp_storage.warp_aggregates[WARP]; block_aggregate = scan_op(block_aggregate, addend); } */ return warp_prefix; } /// Use the warp-wide aggregates and initial-value to compute the calling warp's prefix. Also returns block-wide aggregate in all threads. template <typename ScanOp> __device__ __forceinline__ T ComputeWarpPrefix( ScanOp scan_op, ///< [in] Binary scan operator T warp_aggregate, ///< [in] <b>[<em>lane</em><sub>WARP_THREADS - 1</sub> only]</b> Warp-wide aggregate reduction of input items T &block_aggregate, ///< [out] Threadblock-wide aggregate reduction of input items const T &initial_value) ///< [in] Initial value to seed the exclusive scan { T warp_prefix = ComputeWarpPrefix(scan_op, warp_aggregate, block_aggregate); warp_prefix = scan_op(initial_value, warp_prefix); if (warp_id == 0) warp_prefix = initial_value; return warp_prefix; } //--------------------------------------------------------------------- // Exclusive scans //--------------------------------------------------------------------- /// Computes an exclusive thread block-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. With no initial value, the output computed for <em>thread</em><sub>0</sub> is undefined. template <typename ScanOp> __device__ __forceinline__ void ExclusiveScan( T input, ///< [in] Calling thread's input item T &exclusive_output, ///< [out] Calling thread's output item (may be aliased to \p input) ScanOp scan_op) ///< [in] Binary scan operator { // Compute block-wide exclusive scan. The exclusive output from tid0 is invalid. T block_aggregate; ExclusiveScan(input, exclusive_output, scan_op, block_aggregate); } /// Computes an exclusive thread block-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. template <typename ScanOp> __device__ __forceinline__ void ExclusiveScan( T input, ///< [in] Calling thread's input items T &exclusive_output, ///< [out] Calling thread's output items (may be aliased to \p input) const T &initial_value, ///< [in] Initial value to seed the exclusive scan ScanOp scan_op) ///< [in] Binary scan operator { T block_aggregate; ExclusiveScan(input, exclusive_output, initial_value, scan_op, block_aggregate); } /// Computes an exclusive thread block-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. Also provides every thread with the block-wide \p block_aggregate of all inputs. With no initial value, the output computed for <em>thread</em><sub>0</sub> is undefined. template <typename ScanOp> __device__ __forceinline__ void ExclusiveScan( T input, ///< [in] Calling thread's input item T &exclusive_output, ///< [out] Calling thread's output item (may be aliased to \p input) ScanOp scan_op, ///< [in] Binary scan operator T &block_aggregate) ///< [out] Threadblock-wide aggregate reduction of input items { WarpScanT my_warp_scan(temp_storage.warp_scan[warp_id]); // Compute warp scan in each warp. The exclusive output from each lane0 is invalid. T inclusive_output; my_warp_scan.Scan(input, inclusive_output, exclusive_output, scan_op); // Compute the warp-wide prefix and block-wide aggregate for each warp. Warp prefix for warp0 is invalid. // T warp_prefix = ComputeWarpPrefix(scan_op, inclusive_output, block_aggregate); //-------------------------------------------------- // Last lane in each warp shares its warp-aggregate if (lane_id == WARP_THREADS - 1) temp_storage.warp_aggregates[warp_id] = inclusive_output; CTA_SYNC(); // Get the warp scan partial T warp_inclusive, warp_prefix; if (lane_id < WARPS) { // Scan the warpscan partials T warp_val = temp_storage.warp_aggregates[lane_id]; WarpAggregateScanT(temp_storage.inner_scan[warp_id]).Scan(warp_val, warp_inclusive, warp_prefix, scan_op); } warp_prefix = my_warp_scan.Broadcast(warp_prefix, warp_id); block_aggregate = my_warp_scan.Broadcast(warp_inclusive, WARPS - 1); //-------------------------------------------------- // Apply warp prefix to our lane's partial if (warp_id != 0) { exclusive_output = scan_op(warp_prefix, exclusive_output); if (lane_id == 0) exclusive_output = warp_prefix; } } /// Computes an exclusive thread block-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. Also provides every thread with the block-wide \p block_aggregate of all inputs. template <typename ScanOp> __device__ __forceinline__ void ExclusiveScan( T input, ///< [in] Calling thread's input items T &exclusive_output, ///< [out] Calling thread's output items (may be aliased to \p input) const T &initial_value, ///< [in] Initial value to seed the exclusive scan ScanOp scan_op, ///< [in] Binary scan operator T &block_aggregate) ///< [out] Threadblock-wide aggregate reduction of input items { WarpScanT my_warp_scan(temp_storage.warp_scan[warp_id]); // Compute warp scan in each warp. The exclusive output from each lane0 is invalid. T inclusive_output; my_warp_scan.Scan(input, inclusive_output, exclusive_output, scan_op); // Compute the warp-wide prefix and block-wide aggregate for each warp // T warp_prefix = ComputeWarpPrefix(scan_op, inclusive_output, block_aggregate, initial_value); //-------------------------------------------------- // Last lane in each warp shares its warp-aggregate if (lane_id == WARP_THREADS - 1) temp_storage.warp_aggregates[warp_id] = inclusive_output; CTA_SYNC(); // Get the warp scan partial T warp_inclusive, warp_prefix; if (lane_id < WARPS) { // Scan the warpscan partials T warp_val = temp_storage.warp_aggregates[lane_id]; WarpAggregateScanT(temp_storage.inner_scan[warp_id]).Scan(warp_val, warp_inclusive, warp_prefix, initial_value, scan_op); } warp_prefix = my_warp_scan.Broadcast(warp_prefix, warp_id); block_aggregate = my_warp_scan.Broadcast(warp_inclusive, WARPS - 1); //-------------------------------------------------- // Apply warp prefix to our lane's partial exclusive_output = scan_op(warp_prefix, exclusive_output); if (lane_id == 0) exclusive_output = warp_prefix; } /// Computes an exclusive thread block-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. the call-back functor \p block_prefix_callback_op is invoked by the first warp in the block, and the value returned by <em>lane</em><sub>0</sub> in that warp is used as the "seed" value that logically prefixes the thread block's scan inputs. Also provides every thread with the block-wide \p block_aggregate of all inputs. template < typename ScanOp, typename BlockPrefixCallbackOp> __device__ __forceinline__ void ExclusiveScan( T input, ///< [in] Calling thread's input item T &exclusive_output, ///< [out] Calling thread's output item (may be aliased to \p input) ScanOp scan_op, ///< [in] Binary scan operator BlockPrefixCallbackOp &block_prefix_callback_op) ///< [in-out] <b>[<em>warp</em><sub>0</sub> only]</b> Call-back functor for specifying a thread block-wide prefix to be applied to all inputs. { // Compute block-wide exclusive scan. The exclusive output from tid0 is invalid. T block_aggregate; ExclusiveScan(input, exclusive_output, scan_op, block_aggregate); // Use the first warp to determine the thread block prefix, returning the result in lane0 if (warp_id == 0) { T block_prefix = block_prefix_callback_op(block_aggregate); if (lane_id == 0) { // Share the prefix with all threads temp_storage.block_prefix = block_prefix; exclusive_output = block_prefix; // The block prefix is the exclusive output for tid0 } } CTA_SYNC(); // Incorporate thread block prefix into outputs T block_prefix = temp_storage.block_prefix; if (linear_tid > 0) { exclusive_output = scan_op(block_prefix, exclusive_output); } } //--------------------------------------------------------------------- // Inclusive scans //--------------------------------------------------------------------- /// Computes an inclusive thread block-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. template <typename ScanOp> __device__ __forceinline__ void InclusiveScan( T input, ///< [in] Calling thread's input item T &inclusive_output, ///< [out] Calling thread's output item (may be aliased to \p input) ScanOp scan_op) ///< [in] Binary scan operator { T block_aggregate; InclusiveScan(input, inclusive_output, scan_op, block_aggregate); } /// Computes an inclusive thread block-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. Also provides every thread with the block-wide \p block_aggregate of all inputs. template <typename ScanOp> __device__ __forceinline__ void InclusiveScan( T input, ///< [in] Calling thread's input item T &inclusive_output, ///< [out] Calling thread's output item (may be aliased to \p input) ScanOp scan_op, ///< [in] Binary scan operator T &block_aggregate) ///< [out] Threadblock-wide aggregate reduction of input items { WarpScanT(temp_storage.warp_scan[warp_id]).InclusiveScan(input, inclusive_output, scan_op); // Compute the warp-wide prefix and block-wide aggregate for each warp. Warp prefix for warp0 is invalid. T warp_prefix = ComputeWarpPrefix(scan_op, inclusive_output, block_aggregate); // Apply warp prefix to our lane's partial if (warp_id != 0) { inclusive_output = scan_op(warp_prefix, inclusive_output); } } /// Computes an inclusive thread block-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. the call-back functor \p block_prefix_callback_op is invoked by the first warp in the block, and the value returned by <em>lane</em><sub>0</sub> in that warp is used as the "seed" value that logically prefixes the thread block's scan inputs. Also provides every thread with the block-wide \p block_aggregate of all inputs. template < typename ScanOp, typename BlockPrefixCallbackOp> __device__ __forceinline__ void InclusiveScan( T input, ///< [in] Calling thread's input item T &exclusive_output, ///< [out] Calling thread's output item (may be aliased to \p input) ScanOp scan_op, ///< [in] Binary scan operator BlockPrefixCallbackOp &block_prefix_callback_op) ///< [in-out] <b>[<em>warp</em><sub>0</sub> only]</b> Call-back functor for specifying a thread block-wide prefix to be applied to all inputs. { T block_aggregate; InclusiveScan(input, exclusive_output, scan_op, block_aggregate); // Use the first warp to determine the thread block prefix, returning the result in lane0 if (warp_id == 0) { T block_prefix = block_prefix_callback_op(block_aggregate); if (lane_id == 0) { // Share the prefix with all threads temp_storage.block_prefix = block_prefix; } } CTA_SYNC(); // Incorporate thread block prefix into outputs T block_prefix = temp_storage.block_prefix; exclusive_output = scan_op(block_prefix, exclusive_output); } }; } // CUB namespace CUB_NS_POSTFIX // Optional outer namespace(s)
0
rapidsai_public_repos/nvgraph/external/cub_semiring/block
rapidsai_public_repos/nvgraph/external/cub_semiring/block/specializations/block_scan_warp_scans.cuh
/****************************************************************************** * Copyright (c) 2011, Duane Merrill. All rights reserved. * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ /** * \file * cub::BlockScanWarpscans provides warpscan-based variants of parallel prefix scan across a CUDA thread block. */ #pragma once #include "../../util_arch.cuh" #include "../../util_ptx.cuh" #include "../../warp/warp_scan.cuh" #include "../../util_namespace.cuh" /// Optional outer namespace(s) CUB_NS_PREFIX /// CUB namespace namespace cub { /** * \brief BlockScanWarpScans provides warpscan-based variants of parallel prefix scan across a CUDA thread block. */ template < typename T, int BLOCK_DIM_X, ///< The thread block length in threads along the X dimension int BLOCK_DIM_Y, ///< The thread block length in threads along the Y dimension int BLOCK_DIM_Z, ///< The thread block length in threads along the Z dimension int PTX_ARCH> ///< The PTX compute capability for which to to specialize this collective struct BlockScanWarpScans { //--------------------------------------------------------------------- // Types and constants //--------------------------------------------------------------------- /// Constants enum { /// Number of warp threads WARP_THREADS = CUB_WARP_THREADS(PTX_ARCH), /// The thread block size in threads BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z, /// Number of active warps WARPS = (BLOCK_THREADS + WARP_THREADS - 1) / WARP_THREADS, }; /// WarpScan utility type typedef WarpScan<T, WARP_THREADS, PTX_ARCH> WarpScanT; /// WarpScan utility type typedef WarpScan<T, WARPS, PTX_ARCH> WarpAggregateScan; /// Shared memory storage layout type struct __align__(32) _TempStorage { T warp_aggregates[WARPS]; typename WarpScanT::TempStorage warp_scan[WARPS]; ///< Buffer for warp-synchronous scans T block_prefix; ///< Shared prefix for the entire thread block }; /// Alias wrapper allowing storage to be unioned struct TempStorage : Uninitialized<_TempStorage> {}; //--------------------------------------------------------------------- // Per-thread fields //--------------------------------------------------------------------- // Thread fields _TempStorage &temp_storage; unsigned int linear_tid; unsigned int warp_id; unsigned int lane_id; //--------------------------------------------------------------------- // Constructors //--------------------------------------------------------------------- /// Constructor __device__ __forceinline__ BlockScanWarpScans( TempStorage &temp_storage) : temp_storage(temp_storage.Alias()), linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)), warp_id((WARPS == 1) ? 0 : linear_tid / WARP_THREADS), lane_id(LaneId()) {} //--------------------------------------------------------------------- // Utility methods //--------------------------------------------------------------------- template <typename ScanOp, int WARP> __device__ __forceinline__ void ApplyWarpAggregates( T &warp_prefix, ///< [out] The calling thread's partial reduction ScanOp scan_op, ///< [in] Binary scan operator T &block_aggregate, ///< [out] Threadblock-wide aggregate reduction of input items Int2Type<WARP> /*addend_warp*/) { if (warp_id == WARP) warp_prefix = block_aggregate; T addend = temp_storage.warp_aggregates[WARP]; block_aggregate = scan_op(block_aggregate, addend); ApplyWarpAggregates(warp_prefix, scan_op, block_aggregate, Int2Type<WARP + 1>()); } template <typename ScanOp> __device__ __forceinline__ void ApplyWarpAggregates( T &/*warp_prefix*/, ///< [out] The calling thread's partial reduction ScanOp /*scan_op*/, ///< [in] Binary scan operator T &/*block_aggregate*/, ///< [out] Threadblock-wide aggregate reduction of input items Int2Type<WARPS> /*addend_warp*/) {} /// Use the warp-wide aggregates to compute the calling warp's prefix. Also returns block-wide aggregate in all threads. template <typename ScanOp> __device__ __forceinline__ T ComputeWarpPrefix( ScanOp scan_op, ///< [in] Binary scan operator T warp_aggregate, ///< [in] <b>[<em>lane</em><sub>WARP_THREADS - 1</sub> only]</b> Warp-wide aggregate reduction of input items T &block_aggregate) ///< [out] Threadblock-wide aggregate reduction of input items { // Last lane in each warp shares its warp-aggregate if (lane_id == WARP_THREADS - 1) temp_storage.warp_aggregates[warp_id] = warp_aggregate; CTA_SYNC(); // Accumulate block aggregates and save the one that is our warp's prefix T warp_prefix; block_aggregate = temp_storage.warp_aggregates[0]; // Use template unrolling (since the PTX backend can't handle unrolling it for SM1x) ApplyWarpAggregates(warp_prefix, scan_op, block_aggregate, Int2Type<1>()); /* #pragma unroll for (int WARP = 1; WARP < WARPS; ++WARP) { if (warp_id == WARP) warp_prefix = block_aggregate; T addend = temp_storage.warp_aggregates[WARP]; block_aggregate = scan_op(block_aggregate, addend); } */ return warp_prefix; } /// Use the warp-wide aggregates and initial-value to compute the calling warp's prefix. Also returns block-wide aggregate in all threads. template <typename ScanOp> __device__ __forceinline__ T ComputeWarpPrefix( ScanOp scan_op, ///< [in] Binary scan operator T warp_aggregate, ///< [in] <b>[<em>lane</em><sub>WARP_THREADS - 1</sub> only]</b> Warp-wide aggregate reduction of input items T &block_aggregate, ///< [out] Threadblock-wide aggregate reduction of input items const T &initial_value) ///< [in] Initial value to seed the exclusive scan { T warp_prefix = ComputeWarpPrefix(scan_op, warp_aggregate, block_aggregate); warp_prefix = scan_op(initial_value, warp_prefix); if (warp_id == 0) warp_prefix = initial_value; return warp_prefix; } //--------------------------------------------------------------------- // Exclusive scans //--------------------------------------------------------------------- /// Computes an exclusive thread block-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. With no initial value, the output computed for <em>thread</em><sub>0</sub> is undefined. template <typename ScanOp> __device__ __forceinline__ void ExclusiveScan( T input, ///< [in] Calling thread's input item T &exclusive_output, ///< [out] Calling thread's output item (may be aliased to \p input) ScanOp scan_op) ///< [in] Binary scan operator { // Compute block-wide exclusive scan. The exclusive output from tid0 is invalid. T block_aggregate; ExclusiveScan(input, exclusive_output, scan_op, block_aggregate); } /// Computes an exclusive thread block-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. template <typename ScanOp> __device__ __forceinline__ void ExclusiveScan( T input, ///< [in] Calling thread's input items T &exclusive_output, ///< [out] Calling thread's output items (may be aliased to \p input) const T &initial_value, ///< [in] Initial value to seed the exclusive scan ScanOp scan_op) ///< [in] Binary scan operator { T block_aggregate; ExclusiveScan(input, exclusive_output, initial_value, scan_op, block_aggregate); } /// Computes an exclusive thread block-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. Also provides every thread with the block-wide \p block_aggregate of all inputs. With no initial value, the output computed for <em>thread</em><sub>0</sub> is undefined. template <typename ScanOp> __device__ __forceinline__ void ExclusiveScan( T input, ///< [in] Calling thread's input item T &exclusive_output, ///< [out] Calling thread's output item (may be aliased to \p input) ScanOp scan_op, ///< [in] Binary scan operator T &block_aggregate) ///< [out] Threadblock-wide aggregate reduction of input items { // Compute warp scan in each warp. The exclusive output from each lane0 is invalid. T inclusive_output; WarpScanT(temp_storage.warp_scan[warp_id]).Scan(input, inclusive_output, exclusive_output, scan_op); // Compute the warp-wide prefix and block-wide aggregate for each warp. Warp prefix for warp0 is invalid. T warp_prefix = ComputeWarpPrefix(scan_op, inclusive_output, block_aggregate); // Apply warp prefix to our lane's partial if (warp_id != 0) { exclusive_output = scan_op(warp_prefix, exclusive_output); if (lane_id == 0) exclusive_output = warp_prefix; } } /// Computes an exclusive thread block-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. Also provides every thread with the block-wide \p block_aggregate of all inputs. template <typename ScanOp> __device__ __forceinline__ void ExclusiveScan( T input, ///< [in] Calling thread's input items T &exclusive_output, ///< [out] Calling thread's output items (may be aliased to \p input) const T &initial_value, ///< [in] Initial value to seed the exclusive scan ScanOp scan_op, ///< [in] Binary scan operator T &block_aggregate) ///< [out] Threadblock-wide aggregate reduction of input items { // Compute warp scan in each warp. The exclusive output from each lane0 is invalid. T inclusive_output; WarpScanT(temp_storage.warp_scan[warp_id]).Scan(input, inclusive_output, exclusive_output, scan_op); // Compute the warp-wide prefix and block-wide aggregate for each warp T warp_prefix = ComputeWarpPrefix(scan_op, inclusive_output, block_aggregate, initial_value); // Apply warp prefix to our lane's partial exclusive_output = scan_op(warp_prefix, exclusive_output); if (lane_id == 0) exclusive_output = warp_prefix; } /// Computes an exclusive thread block-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. the call-back functor \p block_prefix_callback_op is invoked by the first warp in the block, and the value returned by <em>lane</em><sub>0</sub> in that warp is used as the "seed" value that logically prefixes the thread block's scan inputs. Also provides every thread with the block-wide \p block_aggregate of all inputs. template < typename ScanOp, typename BlockPrefixCallbackOp> __device__ __forceinline__ void ExclusiveScan( T input, ///< [in] Calling thread's input item T &exclusive_output, ///< [out] Calling thread's output item (may be aliased to \p input) ScanOp scan_op, ///< [in] Binary scan operator BlockPrefixCallbackOp &block_prefix_callback_op) ///< [in-out] <b>[<em>warp</em><sub>0</sub> only]</b> Call-back functor for specifying a thread block-wide prefix to be applied to all inputs. { // Compute block-wide exclusive scan. The exclusive output from tid0 is invalid. T block_aggregate; ExclusiveScan(input, exclusive_output, scan_op, block_aggregate); // Use the first warp to determine the thread block prefix, returning the result in lane0 if (warp_id == 0) { T block_prefix = block_prefix_callback_op(block_aggregate); if (lane_id == 0) { // Share the prefix with all threads temp_storage.block_prefix = block_prefix; exclusive_output = block_prefix; // The block prefix is the exclusive output for tid0 } } CTA_SYNC(); // Incorporate thread block prefix into outputs T block_prefix = temp_storage.block_prefix; if (linear_tid > 0) { exclusive_output = scan_op(block_prefix, exclusive_output); } } //--------------------------------------------------------------------- // Inclusive scans //--------------------------------------------------------------------- /// Computes an inclusive thread block-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. template <typename ScanOp> __device__ __forceinline__ void InclusiveScan( T input, ///< [in] Calling thread's input item T &inclusive_output, ///< [out] Calling thread's output item (may be aliased to \p input) ScanOp scan_op) ///< [in] Binary scan operator { T block_aggregate; InclusiveScan(input, inclusive_output, scan_op, block_aggregate); } /// Computes an inclusive thread block-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. Also provides every thread with the block-wide \p block_aggregate of all inputs. template <typename ScanOp> __device__ __forceinline__ void InclusiveScan( T input, ///< [in] Calling thread's input item T &inclusive_output, ///< [out] Calling thread's output item (may be aliased to \p input) ScanOp scan_op, ///< [in] Binary scan operator T &block_aggregate) ///< [out] Threadblock-wide aggregate reduction of input items { WarpScanT(temp_storage.warp_scan[warp_id]).InclusiveScan(input, inclusive_output, scan_op); // Compute the warp-wide prefix and block-wide aggregate for each warp. Warp prefix for warp0 is invalid. T warp_prefix = ComputeWarpPrefix(scan_op, inclusive_output, block_aggregate); // Apply warp prefix to our lane's partial if (warp_id != 0) { inclusive_output = scan_op(warp_prefix, inclusive_output); } } /// Computes an inclusive thread block-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. the call-back functor \p block_prefix_callback_op is invoked by the first warp in the block, and the value returned by <em>lane</em><sub>0</sub> in that warp is used as the "seed" value that logically prefixes the thread block's scan inputs. Also provides every thread with the block-wide \p block_aggregate of all inputs. template < typename ScanOp, typename BlockPrefixCallbackOp> __device__ __forceinline__ void InclusiveScan( T input, ///< [in] Calling thread's input item T &exclusive_output, ///< [out] Calling thread's output item (may be aliased to \p input) ScanOp scan_op, ///< [in] Binary scan operator BlockPrefixCallbackOp &block_prefix_callback_op) ///< [in-out] <b>[<em>warp</em><sub>0</sub> only]</b> Call-back functor for specifying a thread block-wide prefix to be applied to all inputs. { T block_aggregate; InclusiveScan(input, exclusive_output, scan_op, block_aggregate); // Use the first warp to determine the thread block prefix, returning the result in lane0 if (warp_id == 0) { T block_prefix = block_prefix_callback_op(block_aggregate); if (lane_id == 0) { // Share the prefix with all threads temp_storage.block_prefix = block_prefix; } } CTA_SYNC(); // Incorporate thread block prefix into outputs T block_prefix = temp_storage.block_prefix; exclusive_output = scan_op(block_prefix, exclusive_output); } }; } // CUB namespace CUB_NS_POSTFIX // Optional outer namespace(s)
0
rapidsai_public_repos/nvgraph/external/cub_semiring/block
rapidsai_public_repos/nvgraph/external/cub_semiring/block/specializations/block_reduce_warp_reductions.cuh
/****************************************************************************** * Copyright (c) 2011, Duane Merrill. All rights reserved. * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ /** * \file * cub::BlockReduceWarpReductions provides variants of warp-reduction-based parallel reduction across a CUDA thread block. Supports non-commutative reduction operators. */ #pragma once #include "../../warp/warp_reduce.cuh" #include "../../util_ptx.cuh" #include "../../util_arch.cuh" #include "../../util_namespace.cuh" /// Optional outer namespace(s) CUB_NS_PREFIX /// CUB namespace namespace cub { /** * \brief BlockReduceWarpReductions provides variants of warp-reduction-based parallel reduction across a CUDA thread block. Supports non-commutative reduction operators. */ template < typename T, ///< Data type being reduced int BLOCK_DIM_X, ///< The thread block length in threads along the X dimension int BLOCK_DIM_Y, ///< The thread block length in threads along the Y dimension int BLOCK_DIM_Z, ///< The thread block length in threads along the Z dimension int PTX_ARCH> ///< The PTX compute capability for which to to specialize this collective struct BlockReduceWarpReductions { /// Constants enum { /// The thread block size in threads BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z, /// Number of warp threads WARP_THREADS = CUB_WARP_THREADS(PTX_ARCH), /// Number of active warps WARPS = (BLOCK_THREADS + WARP_THREADS - 1) / WARP_THREADS, /// The logical warp size for warp reductions LOGICAL_WARP_SIZE = CUB_MIN(BLOCK_THREADS, WARP_THREADS), /// Whether or not the logical warp size evenly divides the thread block size EVEN_WARP_MULTIPLE = (BLOCK_THREADS % LOGICAL_WARP_SIZE == 0) }; /// WarpReduce utility type typedef typename WarpReduce<T, LOGICAL_WARP_SIZE, PTX_ARCH>::InternalWarpReduce WarpReduce; /// Shared memory storage layout type struct _TempStorage { typename WarpReduce::TempStorage warp_reduce[WARPS]; ///< Buffer for warp-synchronous scan T warp_aggregates[WARPS]; ///< Shared totals from each warp-synchronous scan T block_prefix; ///< Shared prefix for the entire thread block }; /// Alias wrapper allowing storage to be unioned struct TempStorage : Uninitialized<_TempStorage> {}; // Thread fields _TempStorage &temp_storage; unsigned int linear_tid; unsigned int warp_id; unsigned int lane_id; /// Constructor __device__ __forceinline__ BlockReduceWarpReductions( TempStorage &temp_storage) : temp_storage(temp_storage.Alias()), linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)), warp_id((WARPS == 1) ? 0 : linear_tid / WARP_THREADS), lane_id(LaneId()) {} template <bool FULL_TILE, typename ReductionOp, int SUCCESSOR_WARP> __device__ __forceinline__ T ApplyWarpAggregates( ReductionOp reduction_op, ///< [in] Binary scan operator T warp_aggregate, ///< [in] <b>[<em>lane</em><sub>0</sub> only]</b> Warp-wide aggregate reduction of input items int num_valid, ///< [in] Number of valid elements (may be less than BLOCK_THREADS) Int2Type<SUCCESSOR_WARP> /*successor_warp*/) { if (FULL_TILE || (SUCCESSOR_WARP * LOGICAL_WARP_SIZE < num_valid)) { T addend = temp_storage.warp_aggregates[SUCCESSOR_WARP]; warp_aggregate = reduction_op(warp_aggregate, addend); } return ApplyWarpAggregates<FULL_TILE>(reduction_op, warp_aggregate, num_valid, Int2Type<SUCCESSOR_WARP + 1>()); } template <bool FULL_TILE, typename ReductionOp> __device__ __forceinline__ T ApplyWarpAggregates( ReductionOp /*reduction_op*/, ///< [in] Binary scan operator T warp_aggregate, ///< [in] <b>[<em>lane</em><sub>0</sub> only]</b> Warp-wide aggregate reduction of input items int /*num_valid*/, ///< [in] Number of valid elements (may be less than BLOCK_THREADS) Int2Type<WARPS> /*successor_warp*/) { return warp_aggregate; } /// Returns block-wide aggregate in <em>thread</em><sub>0</sub>. template < bool FULL_TILE, typename ReductionOp> __device__ __forceinline__ T ApplyWarpAggregates( ReductionOp reduction_op, ///< [in] Binary scan operator T warp_aggregate, ///< [in] <b>[<em>lane</em><sub>0</sub> only]</b> Warp-wide aggregate reduction of input items int num_valid) ///< [in] Number of valid elements (may be less than BLOCK_THREADS) { // Share lane aggregates if (lane_id == 0) { temp_storage.warp_aggregates[warp_id] = warp_aggregate; } CTA_SYNC(); // Update total aggregate in warp 0, lane 0 if (linear_tid == 0) { warp_aggregate = ApplyWarpAggregates<FULL_TILE>(reduction_op, warp_aggregate, num_valid, Int2Type<1>()); } return warp_aggregate; } /// Computes a thread block-wide reduction using addition (+) as the reduction operator. The first num_valid threads each contribute one reduction partial. The return value is only valid for thread<sub>0</sub>. template <bool FULL_TILE> __device__ __forceinline__ T Sum( T input, ///< [in] Calling thread's input partial reductions int num_valid) ///< [in] Number of valid elements (may be less than BLOCK_THREADS) { cub::Sum reduction_op; unsigned int warp_offset = warp_id * LOGICAL_WARP_SIZE; unsigned int warp_num_valid = (FULL_TILE && EVEN_WARP_MULTIPLE) ? LOGICAL_WARP_SIZE : (warp_offset < num_valid) ? num_valid - warp_offset : 0; // Warp reduction in every warp T warp_aggregate = WarpReduce(temp_storage.warp_reduce[warp_id]).template Reduce<(FULL_TILE && EVEN_WARP_MULTIPLE), 1>( input, warp_num_valid, cub::Sum()); // Update outputs and block_aggregate with warp-wide aggregates from lane-0s return ApplyWarpAggregates<FULL_TILE>(reduction_op, warp_aggregate, num_valid); } /// Computes a thread block-wide reduction using the specified reduction operator. The first num_valid threads each contribute one reduction partial. The return value is only valid for thread<sub>0</sub>. template < bool FULL_TILE, typename ReductionOp> __device__ __forceinline__ T Reduce( T input, ///< [in] Calling thread's input partial reductions int num_valid, ///< [in] Number of valid elements (may be less than BLOCK_THREADS) ReductionOp reduction_op) ///< [in] Binary reduction operator { unsigned int warp_offset = warp_id * LOGICAL_WARP_SIZE; unsigned int warp_num_valid = (FULL_TILE && EVEN_WARP_MULTIPLE) ? LOGICAL_WARP_SIZE : (warp_offset < static_cast<unsigned int>(num_valid)) ? num_valid - warp_offset : 0; // Warp reduction in every warp T warp_aggregate = WarpReduce(temp_storage.warp_reduce[warp_id]).template Reduce<(FULL_TILE && EVEN_WARP_MULTIPLE), 1>( input, warp_num_valid, reduction_op); // Update outputs and block_aggregate with warp-wide aggregates from lane-0s return ApplyWarpAggregates<FULL_TILE>(reduction_op, warp_aggregate, num_valid); } }; } // CUB namespace CUB_NS_POSTFIX // Optional outer namespace(s)
0
rapidsai_public_repos/nvgraph/external/cub_semiring/block
rapidsai_public_repos/nvgraph/external/cub_semiring/block/specializations/block_histogram_sort.cuh
/****************************************************************************** * Copyright (c) 2011, Duane Merrill. All rights reserved. * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ /** * \file * The cub::BlockHistogramSort class provides sorting-based methods for constructing block-wide histograms from data samples partitioned across a CUDA thread block. */ #pragma once #include "../../block/block_radix_sort.cuh" #include "../../block/block_discontinuity.cuh" #include "../../util_ptx.cuh" #include "../../util_namespace.cuh" /// Optional outer namespace(s) CUB_NS_PREFIX /// CUB namespace namespace cub { /** * \brief The BlockHistogramSort class provides sorting-based methods for constructing block-wide histograms from data samples partitioned across a CUDA thread block. */ template < typename T, ///< Sample type int BLOCK_DIM_X, ///< The thread block length in threads along the X dimension int ITEMS_PER_THREAD, ///< The number of samples per thread int BINS, ///< The number of bins into which histogram samples may fall int BLOCK_DIM_Y, ///< The thread block length in threads along the Y dimension int BLOCK_DIM_Z, ///< The thread block length in threads along the Z dimension int PTX_ARCH> ///< The PTX compute capability for which to to specialize this collective struct BlockHistogramSort { /// Constants enum { /// The thread block size in threads BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z, }; // Parameterize BlockRadixSort type for our thread block typedef BlockRadixSort< T, BLOCK_DIM_X, ITEMS_PER_THREAD, NullType, 4, (PTX_ARCH >= 350) ? true : false, BLOCK_SCAN_WARP_SCANS, cudaSharedMemBankSizeFourByte, BLOCK_DIM_Y, BLOCK_DIM_Z, PTX_ARCH> BlockRadixSortT; // Parameterize BlockDiscontinuity type for our thread block typedef BlockDiscontinuity< T, BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z, PTX_ARCH> BlockDiscontinuityT; /// Shared memory union _TempStorage { // Storage for sorting bin values typename BlockRadixSortT::TempStorage sort; struct { // Storage for detecting discontinuities in the tile of sorted bin values typename BlockDiscontinuityT::TempStorage flag; // Storage for noting begin/end offsets of bin runs in the tile of sorted bin values unsigned int run_begin[BINS]; unsigned int run_end[BINS]; }; }; /// Alias wrapper allowing storage to be unioned struct TempStorage : Uninitialized<_TempStorage> {}; // Thread fields _TempStorage &temp_storage; unsigned int linear_tid; /// Constructor __device__ __forceinline__ BlockHistogramSort( TempStorage &temp_storage) : temp_storage(temp_storage.Alias()), linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)) {} // Discontinuity functor struct DiscontinuityOp { // Reference to temp_storage _TempStorage &temp_storage; // Constructor __device__ __forceinline__ DiscontinuityOp(_TempStorage &temp_storage) : temp_storage(temp_storage) {} // Discontinuity predicate __device__ __forceinline__ bool operator()(const T &a, const T &b, int b_index) { if (a != b) { // Note the begin/end offsets in shared storage temp_storage.run_begin[b] = b_index; temp_storage.run_end[a] = b_index; return true; } else { return false; } } }; // Composite data onto an existing histogram template < typename CounterT > __device__ __forceinline__ void Composite( T (&items)[ITEMS_PER_THREAD], ///< [in] Calling thread's input values to histogram CounterT histogram[BINS]) ///< [out] Reference to shared/device-accessible memory histogram { enum { TILE_SIZE = BLOCK_THREADS * ITEMS_PER_THREAD }; // Sort bytes in blocked arrangement BlockRadixSortT(temp_storage.sort).Sort(items); CTA_SYNC(); // Initialize the shared memory's run_begin and run_end for each bin int histo_offset = 0; #pragma unroll for(; histo_offset + BLOCK_THREADS <= BINS; histo_offset += BLOCK_THREADS) { temp_storage.run_begin[histo_offset + linear_tid] = TILE_SIZE; temp_storage.run_end[histo_offset + linear_tid] = TILE_SIZE; } // Finish up with guarded initialization if necessary if ((BINS % BLOCK_THREADS != 0) && (histo_offset + linear_tid < BINS)) { temp_storage.run_begin[histo_offset + linear_tid] = TILE_SIZE; temp_storage.run_end[histo_offset + linear_tid] = TILE_SIZE; } CTA_SYNC(); int flags[ITEMS_PER_THREAD]; // unused // Compute head flags to demarcate contiguous runs of the same bin in the sorted tile DiscontinuityOp flag_op(temp_storage); BlockDiscontinuityT(temp_storage.flag).FlagHeads(flags, items, flag_op); // Update begin for first item if (linear_tid == 0) temp_storage.run_begin[items[0]] = 0; CTA_SYNC(); // Composite into histogram histo_offset = 0; #pragma unroll for(; histo_offset + BLOCK_THREADS <= BINS; histo_offset += BLOCK_THREADS) { int thread_offset = histo_offset + linear_tid; CounterT count = temp_storage.run_end[thread_offset] - temp_storage.run_begin[thread_offset]; histogram[thread_offset] += count; } // Finish up with guarded composition if necessary if ((BINS % BLOCK_THREADS != 0) && (histo_offset + linear_tid < BINS)) { int thread_offset = histo_offset + linear_tid; CounterT count = temp_storage.run_end[thread_offset] - temp_storage.run_begin[thread_offset]; histogram[thread_offset] += count; } } }; } // CUB namespace CUB_NS_POSTFIX // Optional outer namespace(s)
0
rapidsai_public_repos/nvgraph/external/cub_semiring/block
rapidsai_public_repos/nvgraph/external/cub_semiring/block/specializations/block_scan_raking.cuh
/****************************************************************************** * Copyright (c) 2011, Duane Merrill. All rights reserved. * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ /** * \file * cub::BlockScanRaking provides variants of raking-based parallel prefix scan across a CUDA thread block. */ #pragma once #include "../../util_ptx.cuh" #include "../../util_arch.cuh" #include "../../block/block_raking_layout.cuh" #include "../../thread/thread_reduce.cuh" #include "../../thread/thread_scan.cuh" #include "../../warp/warp_scan.cuh" #include "../../util_namespace.cuh" /// Optional outer namespace(s) CUB_NS_PREFIX /// CUB namespace namespace cub { /** * \brief BlockScanRaking provides variants of raking-based parallel prefix scan across a CUDA thread block. */ template < typename T, ///< Data type being scanned int BLOCK_DIM_X, ///< The thread block length in threads along the X dimension int BLOCK_DIM_Y, ///< The thread block length in threads along the Y dimension int BLOCK_DIM_Z, ///< The thread block length in threads along the Z dimension bool MEMOIZE, ///< Whether or not to buffer outer raking scan partials to incur fewer shared memory reads at the expense of higher register pressure int PTX_ARCH> ///< The PTX compute capability for which to to specialize this collective struct BlockScanRaking { //--------------------------------------------------------------------- // Types and constants //--------------------------------------------------------------------- /// Constants enum { /// The thread block size in threads BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z, }; /// Layout type for padded thread block raking grid typedef BlockRakingLayout<T, BLOCK_THREADS, PTX_ARCH> BlockRakingLayout; /// Constants enum { /// Number of raking threads RAKING_THREADS = BlockRakingLayout::RAKING_THREADS, /// Number of raking elements per warp synchronous raking thread SEGMENT_LENGTH = BlockRakingLayout::SEGMENT_LENGTH, /// Cooperative work can be entirely warp synchronous WARP_SYNCHRONOUS = (BLOCK_THREADS == RAKING_THREADS), }; /// WarpScan utility type typedef WarpScan<T, RAKING_THREADS, PTX_ARCH> WarpScan; /// Shared memory storage layout type struct _TempStorage { typename WarpScan::TempStorage warp_scan; ///< Buffer for warp-synchronous scan typename BlockRakingLayout::TempStorage raking_grid; ///< Padded thread block raking grid T block_aggregate; ///< Block aggregate }; /// Alias wrapper allowing storage to be unioned struct TempStorage : Uninitialized<_TempStorage> {}; //--------------------------------------------------------------------- // Per-thread fields //--------------------------------------------------------------------- // Thread fields _TempStorage &temp_storage; unsigned int linear_tid; T cached_segment[SEGMENT_LENGTH]; //--------------------------------------------------------------------- // Utility methods //--------------------------------------------------------------------- /// Templated reduction template <int ITERATION, typename ScanOp> __device__ __forceinline__ T GuardedReduce( T* raking_ptr, ///< [in] Input array ScanOp scan_op, ///< [in] Binary reduction operator T raking_partial, ///< [in] Prefix to seed reduction with Int2Type<ITERATION> /*iteration*/) { if ((BlockRakingLayout::UNGUARDED) || (((linear_tid * SEGMENT_LENGTH) + ITERATION) < BLOCK_THREADS)) { T addend = raking_ptr[ITERATION]; raking_partial = scan_op(raking_partial, addend); } return GuardedReduce(raking_ptr, scan_op, raking_partial, Int2Type<ITERATION + 1>()); } /// Templated reduction (base case) template <typename ScanOp> __device__ __forceinline__ T GuardedReduce( T* /*raking_ptr*/, ///< [in] Input array ScanOp /*scan_op*/, ///< [in] Binary reduction operator T raking_partial, ///< [in] Prefix to seed reduction with Int2Type<SEGMENT_LENGTH> /*iteration*/) { return raking_partial; } /// Templated copy template <int ITERATION> __device__ __forceinline__ void CopySegment( T* out, ///< [out] Out array T* in, ///< [in] Input array Int2Type<ITERATION> /*iteration*/) { out[ITERATION] = in[ITERATION]; CopySegment(out, in, Int2Type<ITERATION + 1>()); } /// Templated copy (base case) __device__ __forceinline__ void CopySegment( T* /*out*/, ///< [out] Out array T* /*in*/, ///< [in] Input array Int2Type<SEGMENT_LENGTH> /*iteration*/) {} /// Performs upsweep raking reduction, returning the aggregate template <typename ScanOp> __device__ __forceinline__ T Upsweep( ScanOp scan_op) { T *smem_raking_ptr = BlockRakingLayout::RakingPtr(temp_storage.raking_grid, linear_tid); // Read data into registers CopySegment(cached_segment, smem_raking_ptr, Int2Type<0>()); T raking_partial = cached_segment[0]; return GuardedReduce(cached_segment, scan_op, raking_partial, Int2Type<1>()); } /// Performs exclusive downsweep raking scan template <typename ScanOp> __device__ __forceinline__ void ExclusiveDownsweep( ScanOp scan_op, T raking_partial, bool apply_prefix = true) { T *smem_raking_ptr = BlockRakingLayout::RakingPtr(temp_storage.raking_grid, linear_tid); // Read data back into registers if (!MEMOIZE) { CopySegment(cached_segment, smem_raking_ptr, Int2Type<0>()); } internal::ThreadScanExclusive(cached_segment, cached_segment, scan_op, raking_partial, apply_prefix); // Write data back to smem CopySegment(smem_raking_ptr, cached_segment, Int2Type<0>()); } /// Performs inclusive downsweep raking scan template <typename ScanOp> __device__ __forceinline__ void InclusiveDownsweep( ScanOp scan_op, T raking_partial, bool apply_prefix = true) { T *smem_raking_ptr = BlockRakingLayout::RakingPtr(temp_storage.raking_grid, linear_tid); // Read data back into registers if (!MEMOIZE) { CopySegment(cached_segment, smem_raking_ptr, Int2Type<0>()); } internal::ThreadScanInclusive(cached_segment, cached_segment, scan_op, raking_partial, apply_prefix); // Write data back to smem CopySegment(smem_raking_ptr, cached_segment, Int2Type<0>()); } //--------------------------------------------------------------------- // Constructors //--------------------------------------------------------------------- /// Constructor __device__ __forceinline__ BlockScanRaking( TempStorage &temp_storage) : temp_storage(temp_storage.Alias()), linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)) {} //--------------------------------------------------------------------- // Exclusive scans //--------------------------------------------------------------------- /// Computes an exclusive thread block-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. With no initial value, the output computed for <em>thread</em><sub>0</sub> is undefined. template <typename ScanOp> __device__ __forceinline__ void ExclusiveScan( T input, ///< [in] Calling thread's input item T &exclusive_output, ///< [out] Calling thread's output item (may be aliased to \p input) ScanOp scan_op) ///< [in] Binary scan operator { if (WARP_SYNCHRONOUS) { // Short-circuit directly to warp-synchronous scan WarpScan(temp_storage.warp_scan).ExclusiveScan(input, exclusive_output, scan_op); } else { // Place thread partial into shared memory raking grid T *placement_ptr = BlockRakingLayout::PlacementPtr(temp_storage.raking_grid, linear_tid); *placement_ptr = input; CTA_SYNC(); // Reduce parallelism down to just raking threads if (linear_tid < RAKING_THREADS) { // Raking upsweep reduction across shared partials T upsweep_partial = Upsweep(scan_op); // Warp-synchronous scan T exclusive_partial; WarpScan(temp_storage.warp_scan).ExclusiveScan(upsweep_partial, exclusive_partial, scan_op); // Exclusive raking downsweep scan ExclusiveDownsweep(scan_op, exclusive_partial, (linear_tid != 0)); } CTA_SYNC(); // Grab thread prefix from shared memory exclusive_output = *placement_ptr; } } /// Computes an exclusive thread block-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. template <typename ScanOp> __device__ __forceinline__ void ExclusiveScan( T input, ///< [in] Calling thread's input items T &output, ///< [out] Calling thread's output items (may be aliased to \p input) const T &initial_value, ///< [in] Initial value to seed the exclusive scan ScanOp scan_op) ///< [in] Binary scan operator { if (WARP_SYNCHRONOUS) { // Short-circuit directly to warp-synchronous scan WarpScan(temp_storage.warp_scan).ExclusiveScan(input, output, initial_value, scan_op); } else { // Place thread partial into shared memory raking grid T *placement_ptr = BlockRakingLayout::PlacementPtr(temp_storage.raking_grid, linear_tid); *placement_ptr = input; CTA_SYNC(); // Reduce parallelism down to just raking threads if (linear_tid < RAKING_THREADS) { // Raking upsweep reduction across shared partials T upsweep_partial = Upsweep(scan_op); // Exclusive Warp-synchronous scan T exclusive_partial; WarpScan(temp_storage.warp_scan).ExclusiveScan(upsweep_partial, exclusive_partial, initial_value, scan_op); // Exclusive raking downsweep scan ExclusiveDownsweep(scan_op, exclusive_partial); } CTA_SYNC(); // Grab exclusive partial from shared memory output = *placement_ptr; } } /// Computes an exclusive thread block-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. Also provides every thread with the block-wide \p block_aggregate of all inputs. With no initial value, the output computed for <em>thread</em><sub>0</sub> is undefined. template <typename ScanOp> __device__ __forceinline__ void ExclusiveScan( T input, ///< [in] Calling thread's input item T &output, ///< [out] Calling thread's output item (may be aliased to \p input) ScanOp scan_op, ///< [in] Binary scan operator T &block_aggregate) ///< [out] Threadblock-wide aggregate reduction of input items { if (WARP_SYNCHRONOUS) { // Short-circuit directly to warp-synchronous scan WarpScan(temp_storage.warp_scan).ExclusiveScan(input, output, scan_op, block_aggregate); } else { // Place thread partial into shared memory raking grid T *placement_ptr = BlockRakingLayout::PlacementPtr(temp_storage.raking_grid, linear_tid); *placement_ptr = input; CTA_SYNC(); // Reduce parallelism down to just raking threads if (linear_tid < RAKING_THREADS) { // Raking upsweep reduction across shared partials T upsweep_partial= Upsweep(scan_op); // Warp-synchronous scan T inclusive_partial; T exclusive_partial; WarpScan(temp_storage.warp_scan).Scan(upsweep_partial, inclusive_partial, exclusive_partial, scan_op); // Exclusive raking downsweep scan ExclusiveDownsweep(scan_op, exclusive_partial, (linear_tid != 0)); // Broadcast aggregate to all threads if (linear_tid == RAKING_THREADS - 1) temp_storage.block_aggregate = inclusive_partial; } CTA_SYNC(); // Grab thread prefix from shared memory output = *placement_ptr; // Retrieve block aggregate block_aggregate = temp_storage.block_aggregate; } } /// Computes an exclusive thread block-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. Also provides every thread with the block-wide \p block_aggregate of all inputs. template <typename ScanOp> __device__ __forceinline__ void ExclusiveScan( T input, ///< [in] Calling thread's input items T &output, ///< [out] Calling thread's output items (may be aliased to \p input) const T &initial_value, ///< [in] Initial value to seed the exclusive scan ScanOp scan_op, ///< [in] Binary scan operator T &block_aggregate) ///< [out] Threadblock-wide aggregate reduction of input items { if (WARP_SYNCHRONOUS) { // Short-circuit directly to warp-synchronous scan WarpScan(temp_storage.warp_scan).ExclusiveScan(input, output, initial_value, scan_op, block_aggregate); } else { // Place thread partial into shared memory raking grid T *placement_ptr = BlockRakingLayout::PlacementPtr(temp_storage.raking_grid, linear_tid); *placement_ptr = input; CTA_SYNC(); // Reduce parallelism down to just raking threads if (linear_tid < RAKING_THREADS) { // Raking upsweep reduction across shared partials T upsweep_partial = Upsweep(scan_op); // Warp-synchronous scan T exclusive_partial; WarpScan(temp_storage.warp_scan).ExclusiveScan(upsweep_partial, exclusive_partial, initial_value, scan_op, block_aggregate); // Exclusive raking downsweep scan ExclusiveDownsweep(scan_op, exclusive_partial); // Broadcast aggregate to other threads if (linear_tid == 0) temp_storage.block_aggregate = block_aggregate; } CTA_SYNC(); // Grab exclusive partial from shared memory output = *placement_ptr; // Retrieve block aggregate block_aggregate = temp_storage.block_aggregate; } } /// Computes an exclusive thread block-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. the call-back functor \p block_prefix_callback_op is invoked by the first warp in the block, and the value returned by <em>lane</em><sub>0</sub> in that warp is used as the "seed" value that logically prefixes the thread block's scan inputs. Also provides every thread with the block-wide \p block_aggregate of all inputs. template < typename ScanOp, typename BlockPrefixCallbackOp> __device__ __forceinline__ void ExclusiveScan( T input, ///< [in] Calling thread's input item T &output, ///< [out] Calling thread's output item (may be aliased to \p input) ScanOp scan_op, ///< [in] Binary scan operator BlockPrefixCallbackOp &block_prefix_callback_op) ///< [in-out] <b>[<em>warp</em><sub>0</sub> only]</b> Call-back functor for specifying a thread block-wide prefix to be applied to all inputs. { if (WARP_SYNCHRONOUS) { // Short-circuit directly to warp-synchronous scan T block_aggregate; WarpScan warp_scan(temp_storage.warp_scan); warp_scan.ExclusiveScan(input, output, scan_op, block_aggregate); // Obtain warp-wide prefix in lane0, then broadcast to other lanes T block_prefix = block_prefix_callback_op(block_aggregate); block_prefix = warp_scan.Broadcast(block_prefix, 0); output = scan_op(block_prefix, output); if (linear_tid == 0) output = block_prefix; } else { // Place thread partial into shared memory raking grid T *placement_ptr = BlockRakingLayout::PlacementPtr(temp_storage.raking_grid, linear_tid); *placement_ptr = input; CTA_SYNC(); // Reduce parallelism down to just raking threads if (linear_tid < RAKING_THREADS) { WarpScan warp_scan(temp_storage.warp_scan); // Raking upsweep reduction across shared partials T upsweep_partial = Upsweep(scan_op); // Warp-synchronous scan T exclusive_partial, block_aggregate; warp_scan.ExclusiveScan(upsweep_partial, exclusive_partial, scan_op, block_aggregate); // Obtain block-wide prefix in lane0, then broadcast to other lanes T block_prefix = block_prefix_callback_op(block_aggregate); block_prefix = warp_scan.Broadcast(block_prefix, 0); // Update prefix with warpscan exclusive partial T downsweep_prefix = scan_op(block_prefix, exclusive_partial); if (linear_tid == 0) downsweep_prefix = block_prefix; // Exclusive raking downsweep scan ExclusiveDownsweep(scan_op, downsweep_prefix); } CTA_SYNC(); // Grab thread prefix from shared memory output = *placement_ptr; } } //--------------------------------------------------------------------- // Inclusive scans //--------------------------------------------------------------------- /// Computes an inclusive thread block-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. template <typename ScanOp> __device__ __forceinline__ void InclusiveScan( T input, ///< [in] Calling thread's input item T &output, ///< [out] Calling thread's output item (may be aliased to \p input) ScanOp scan_op) ///< [in] Binary scan operator { if (WARP_SYNCHRONOUS) { // Short-circuit directly to warp-synchronous scan WarpScan(temp_storage.warp_scan).InclusiveScan(input, output, scan_op); } else { // Place thread partial into shared memory raking grid T *placement_ptr = BlockRakingLayout::PlacementPtr(temp_storage.raking_grid, linear_tid); *placement_ptr = input; CTA_SYNC(); // Reduce parallelism down to just raking threads if (linear_tid < RAKING_THREADS) { // Raking upsweep reduction across shared partials T upsweep_partial = Upsweep(scan_op); // Exclusive Warp-synchronous scan T exclusive_partial; WarpScan(temp_storage.warp_scan).ExclusiveScan(upsweep_partial, exclusive_partial, scan_op); // Inclusive raking downsweep scan InclusiveDownsweep(scan_op, exclusive_partial, (linear_tid != 0)); } CTA_SYNC(); // Grab thread prefix from shared memory output = *placement_ptr; } } /// Computes an inclusive thread block-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. Also provides every thread with the block-wide \p block_aggregate of all inputs. template <typename ScanOp> __device__ __forceinline__ void InclusiveScan( T input, ///< [in] Calling thread's input item T &output, ///< [out] Calling thread's output item (may be aliased to \p input) ScanOp scan_op, ///< [in] Binary scan operator T &block_aggregate) ///< [out] Threadblock-wide aggregate reduction of input items { if (WARP_SYNCHRONOUS) { // Short-circuit directly to warp-synchronous scan WarpScan(temp_storage.warp_scan).InclusiveScan(input, output, scan_op, block_aggregate); } else { // Place thread partial into shared memory raking grid T *placement_ptr = BlockRakingLayout::PlacementPtr(temp_storage.raking_grid, linear_tid); *placement_ptr = input; CTA_SYNC(); // Reduce parallelism down to just raking threads if (linear_tid < RAKING_THREADS) { // Raking upsweep reduction across shared partials T upsweep_partial = Upsweep(scan_op); // Warp-synchronous scan T inclusive_partial; T exclusive_partial; WarpScan(temp_storage.warp_scan).Scan(upsweep_partial, inclusive_partial, exclusive_partial, scan_op); // Inclusive raking downsweep scan InclusiveDownsweep(scan_op, exclusive_partial, (linear_tid != 0)); // Broadcast aggregate to all threads if (linear_tid == RAKING_THREADS - 1) temp_storage.block_aggregate = inclusive_partial; } CTA_SYNC(); // Grab thread prefix from shared memory output = *placement_ptr; // Retrieve block aggregate block_aggregate = temp_storage.block_aggregate; } } /// Computes an inclusive thread block-wide prefix scan using the specified binary \p scan_op functor. Each thread contributes one input element. the call-back functor \p block_prefix_callback_op is invoked by the first warp in the block, and the value returned by <em>lane</em><sub>0</sub> in that warp is used as the "seed" value that logically prefixes the thread block's scan inputs. Also provides every thread with the block-wide \p block_aggregate of all inputs. template < typename ScanOp, typename BlockPrefixCallbackOp> __device__ __forceinline__ void InclusiveScan( T input, ///< [in] Calling thread's input item T &output, ///< [out] Calling thread's output item (may be aliased to \p input) ScanOp scan_op, ///< [in] Binary scan operator BlockPrefixCallbackOp &block_prefix_callback_op) ///< [in-out] <b>[<em>warp</em><sub>0</sub> only]</b> Call-back functor for specifying a thread block-wide prefix to be applied to all inputs. { if (WARP_SYNCHRONOUS) { // Short-circuit directly to warp-synchronous scan T block_aggregate; WarpScan warp_scan(temp_storage.warp_scan); warp_scan.InclusiveScan(input, output, scan_op, block_aggregate); // Obtain warp-wide prefix in lane0, then broadcast to other lanes T block_prefix = block_prefix_callback_op(block_aggregate); block_prefix = warp_scan.Broadcast(block_prefix, 0); // Update prefix with exclusive warpscan partial output = scan_op(block_prefix, output); } else { // Place thread partial into shared memory raking grid T *placement_ptr = BlockRakingLayout::PlacementPtr(temp_storage.raking_grid, linear_tid); *placement_ptr = input; CTA_SYNC(); // Reduce parallelism down to just raking threads if (linear_tid < RAKING_THREADS) { WarpScan warp_scan(temp_storage.warp_scan); // Raking upsweep reduction across shared partials T upsweep_partial = Upsweep(scan_op); // Warp-synchronous scan T exclusive_partial, block_aggregate; warp_scan.ExclusiveScan(upsweep_partial, exclusive_partial, scan_op, block_aggregate); // Obtain block-wide prefix in lane0, then broadcast to other lanes T block_prefix = block_prefix_callback_op(block_aggregate); block_prefix = warp_scan.Broadcast(block_prefix, 0); // Update prefix with warpscan exclusive partial T downsweep_prefix = scan_op(block_prefix, exclusive_partial); if (linear_tid == 0) downsweep_prefix = block_prefix; // Inclusive raking downsweep scan InclusiveDownsweep(scan_op, downsweep_prefix); } CTA_SYNC(); // Grab thread prefix from shared memory output = *placement_ptr; } } }; } // CUB namespace CUB_NS_POSTFIX // Optional outer namespace(s)
0
rapidsai_public_repos/nvgraph/external/cub_semiring/block
rapidsai_public_repos/nvgraph/external/cub_semiring/block/specializations/block_histogram_atomic.cuh
/****************************************************************************** * Copyright (c) 2011, Duane Merrill. All rights reserved. * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ /** * \file * The cub::BlockHistogramAtomic class provides atomic-based methods for constructing block-wide histograms from data samples partitioned across a CUDA thread block. */ #pragma once #include "../../util_namespace.cuh" /// Optional outer namespace(s) CUB_NS_PREFIX /// CUB namespace namespace cub { /** * \brief The BlockHistogramAtomic class provides atomic-based methods for constructing block-wide histograms from data samples partitioned across a CUDA thread block. */ template <int BINS> struct BlockHistogramAtomic { /// Shared memory storage layout type struct TempStorage {}; /// Constructor __device__ __forceinline__ BlockHistogramAtomic( TempStorage &temp_storage) {} /// Composite data onto an existing histogram template < typename T, typename CounterT, int ITEMS_PER_THREAD> __device__ __forceinline__ void Composite( T (&items)[ITEMS_PER_THREAD], ///< [in] Calling thread's input values to histogram CounterT histogram[BINS]) ///< [out] Reference to shared/device-accessible memory histogram { // Update histogram #pragma unroll for (int i = 0; i < ITEMS_PER_THREAD; ++i) { atomicAdd(histogram + items[i], 1); } } }; } // CUB namespace CUB_NS_POSTFIX // Optional outer namespace(s)
0
rapidsai_public_repos/nvgraph/external/cub_semiring/block
rapidsai_public_repos/nvgraph/external/cub_semiring/block/specializations/block_reduce_raking.cuh
/****************************************************************************** * Copyright (c) 2011, Duane Merrill. All rights reserved. * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ /** * \file * cub::BlockReduceRaking provides raking-based methods of parallel reduction across a CUDA thread block. Supports non-commutative reduction operators. */ #pragma once #include "../../block/block_raking_layout.cuh" #include "../../warp/warp_reduce.cuh" #include "../../thread/thread_reduce.cuh" #include "../../util_ptx.cuh" #include "../../util_namespace.cuh" /// Optional outer namespace(s) CUB_NS_PREFIX /// CUB namespace namespace cub { /** * \brief BlockReduceRaking provides raking-based methods of parallel reduction across a CUDA thread block. Supports non-commutative reduction operators. * * Supports non-commutative binary reduction operators. Unlike commutative * reduction operators (e.g., addition), the application of a non-commutative * reduction operator (e.g, string concatenation) across a sequence of inputs must * honor the relative ordering of items and partial reductions when applying the * reduction operator. * * Compared to the implementation of BlockReduceRaking (which does not support * non-commutative operators), this implementation requires a few extra * rounds of inter-thread communication. */ template < typename T, ///< Data type being reduced int BLOCK_DIM_X, ///< The thread block length in threads along the X dimension int BLOCK_DIM_Y, ///< The thread block length in threads along the Y dimension int BLOCK_DIM_Z, ///< The thread block length in threads along the Z dimension int PTX_ARCH> ///< The PTX compute capability for which to to specialize this collective struct BlockReduceRaking { /// Constants enum { /// The thread block size in threads BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z, }; /// Layout type for padded thread block raking grid typedef BlockRakingLayout<T, BLOCK_THREADS, PTX_ARCH> BlockRakingLayout; /// WarpReduce utility type typedef typename WarpReduce<T, BlockRakingLayout::RAKING_THREADS, PTX_ARCH>::InternalWarpReduce WarpReduce; /// Constants enum { /// Number of raking threads RAKING_THREADS = BlockRakingLayout::RAKING_THREADS, /// Number of raking elements per warp synchronous raking thread SEGMENT_LENGTH = BlockRakingLayout::SEGMENT_LENGTH, /// Cooperative work can be entirely warp synchronous WARP_SYNCHRONOUS = (RAKING_THREADS == BLOCK_THREADS), /// Whether or not warp-synchronous reduction should be unguarded (i.e., the warp-reduction elements is a power of two WARP_SYNCHRONOUS_UNGUARDED = PowerOfTwo<RAKING_THREADS>::VALUE, /// Whether or not accesses into smem are unguarded RAKING_UNGUARDED = BlockRakingLayout::UNGUARDED, }; /// Shared memory storage layout type union _TempStorage { typename WarpReduce::TempStorage warp_storage; ///< Storage for warp-synchronous reduction typename BlockRakingLayout::TempStorage raking_grid; ///< Padded thread block raking grid }; /// Alias wrapper allowing storage to be unioned struct TempStorage : Uninitialized<_TempStorage> {}; // Thread fields _TempStorage &temp_storage; unsigned int linear_tid; /// Constructor __device__ __forceinline__ BlockReduceRaking( TempStorage &temp_storage) : temp_storage(temp_storage.Alias()), linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)) {} template <bool IS_FULL_TILE, typename ReductionOp, int ITERATION> __device__ __forceinline__ T RakingReduction( ReductionOp reduction_op, ///< [in] Binary scan operator T *raking_segment, T partial, ///< [in] <b>[<em>lane</em><sub>0</sub> only]</b> Warp-wide aggregate reduction of input items int num_valid, ///< [in] Number of valid elements (may be less than BLOCK_THREADS) Int2Type<ITERATION> /*iteration*/) { // Update partial if addend is in range if ((IS_FULL_TILE && RAKING_UNGUARDED) || ((linear_tid * SEGMENT_LENGTH) + ITERATION < num_valid)) { T addend = raking_segment[ITERATION]; partial = reduction_op(partial, addend); } return RakingReduction<IS_FULL_TILE>(reduction_op, raking_segment, partial, num_valid, Int2Type<ITERATION + 1>()); } template <bool IS_FULL_TILE, typename ReductionOp> __device__ __forceinline__ T RakingReduction( ReductionOp /*reduction_op*/, ///< [in] Binary scan operator T * /*raking_segment*/, T partial, ///< [in] <b>[<em>lane</em><sub>0</sub> only]</b> Warp-wide aggregate reduction of input items int /*num_valid*/, ///< [in] Number of valid elements (may be less than BLOCK_THREADS) Int2Type<SEGMENT_LENGTH> /*iteration*/) { return partial; } /// Computes a thread block-wide reduction using the specified reduction operator. The first num_valid threads each contribute one reduction partial. The return value is only valid for thread<sub>0</sub>. template < bool IS_FULL_TILE, typename ReductionOp> __device__ __forceinline__ T Reduce( T partial, ///< [in] Calling thread's input partial reductions int num_valid, ///< [in] Number of valid elements (may be less than BLOCK_THREADS) ReductionOp reduction_op) ///< [in] Binary reduction operator { if (WARP_SYNCHRONOUS) { // Short-circuit directly to warp synchronous reduction (unguarded if active threads is a power-of-two) partial = WarpReduce(temp_storage.warp_storage).template Reduce<IS_FULL_TILE, SEGMENT_LENGTH>( partial, num_valid, reduction_op); } else { // Place partial into shared memory grid. *BlockRakingLayout::PlacementPtr(temp_storage.raking_grid, linear_tid) = partial; CTA_SYNC(); // Reduce parallelism to one warp if (linear_tid < RAKING_THREADS) { // Raking reduction in grid T *raking_segment = BlockRakingLayout::RakingPtr(temp_storage.raking_grid, linear_tid); partial = raking_segment[0]; partial = RakingReduction<IS_FULL_TILE>(reduction_op, raking_segment, partial, num_valid, Int2Type<1>()); partial = WarpReduce(temp_storage.warp_storage).template Reduce<IS_FULL_TILE && RAKING_UNGUARDED, SEGMENT_LENGTH>( partial, num_valid, reduction_op); } } return partial; } /// Computes a thread block-wide reduction using addition (+) as the reduction operator. The first num_valid threads each contribute one reduction partial. The return value is only valid for thread<sub>0</sub>. template <bool IS_FULL_TILE> __device__ __forceinline__ T Sum( T partial, ///< [in] Calling thread's input partial reductions int num_valid) ///< [in] Number of valid elements (may be less than BLOCK_THREADS) { cub::Sum reduction_op; return Reduce<IS_FULL_TILE>(partial, num_valid, reduction_op); } }; } // CUB namespace CUB_NS_POSTFIX // Optional outer namespace(s)
0
rapidsai_public_repos/nvgraph/external/cub_semiring/block
rapidsai_public_repos/nvgraph/external/cub_semiring/block/specializations/block_reduce_raking_commutative_only.cuh
/****************************************************************************** * Copyright (c) 2011, Duane Merrill. All rights reserved. * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ /** * \file * cub::BlockReduceRakingCommutativeOnly provides raking-based methods of parallel reduction across a CUDA thread block. Does not support non-commutative reduction operators. */ #pragma once #include "block_reduce_raking.cuh" #include "../../warp/warp_reduce.cuh" #include "../../thread/thread_reduce.cuh" #include "../../util_ptx.cuh" #include "../../util_namespace.cuh" /// Optional outer namespace(s) CUB_NS_PREFIX /// CUB namespace namespace cub { /** * \brief BlockReduceRakingCommutativeOnly provides raking-based methods of parallel reduction across a CUDA thread block. Does not support non-commutative reduction operators. Does not support block sizes that are not a multiple of the warp size. */ template < typename T, ///< Data type being reduced int BLOCK_DIM_X, ///< The thread block length in threads along the X dimension int BLOCK_DIM_Y, ///< The thread block length in threads along the Y dimension int BLOCK_DIM_Z, ///< The thread block length in threads along the Z dimension int PTX_ARCH> ///< The PTX compute capability for which to to specialize this collective struct BlockReduceRakingCommutativeOnly { /// Constants enum { /// The thread block size in threads BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z, }; // The fall-back implementation to use when BLOCK_THREADS is not a multiple of the warp size or not all threads have valid values typedef BlockReduceRaking<T, BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z, PTX_ARCH> FallBack; /// Constants enum { /// Number of warp threads WARP_THREADS = CUB_WARP_THREADS(PTX_ARCH), /// Whether or not to use fall-back USE_FALLBACK = ((BLOCK_THREADS % WARP_THREADS != 0) || (BLOCK_THREADS <= WARP_THREADS)), /// Number of raking threads RAKING_THREADS = WARP_THREADS, /// Number of threads actually sharing items with the raking threads SHARING_THREADS = CUB_MAX(1, BLOCK_THREADS - RAKING_THREADS), /// Number of raking elements per warp synchronous raking thread SEGMENT_LENGTH = SHARING_THREADS / WARP_THREADS, }; /// WarpReduce utility type typedef WarpReduce<T, RAKING_THREADS, PTX_ARCH> WarpReduce; /// Layout type for padded thread block raking grid typedef BlockRakingLayout<T, SHARING_THREADS, PTX_ARCH> BlockRakingLayout; /// Shared memory storage layout type union _TempStorage { struct { typename WarpReduce::TempStorage warp_storage; ///< Storage for warp-synchronous reduction typename BlockRakingLayout::TempStorage raking_grid; ///< Padded thread block raking grid }; typename FallBack::TempStorage fallback_storage; ///< Fall-back storage for non-commutative block scan }; /// Alias wrapper allowing storage to be unioned struct TempStorage : Uninitialized<_TempStorage> {}; // Thread fields _TempStorage &temp_storage; unsigned int linear_tid; /// Constructor __device__ __forceinline__ BlockReduceRakingCommutativeOnly( TempStorage &temp_storage) : temp_storage(temp_storage.Alias()), linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)) {} /// Computes a thread block-wide reduction using addition (+) as the reduction operator. The first num_valid threads each contribute one reduction partial. The return value is only valid for thread<sub>0</sub>. template <bool FULL_TILE> __device__ __forceinline__ T Sum( T partial, ///< [in] Calling thread's input partial reductions int num_valid) ///< [in] Number of valid elements (may be less than BLOCK_THREADS) { if (USE_FALLBACK || !FULL_TILE) { return FallBack(temp_storage.fallback_storage).template Sum<FULL_TILE>(partial, num_valid); } else { // Place partial into shared memory grid if (linear_tid >= RAKING_THREADS) *BlockRakingLayout::PlacementPtr(temp_storage.raking_grid, linear_tid - RAKING_THREADS) = partial; CTA_SYNC(); // Reduce parallelism to one warp if (linear_tid < RAKING_THREADS) { // Raking reduction in grid T *raking_segment = BlockRakingLayout::RakingPtr(temp_storage.raking_grid, linear_tid); partial = internal::ThreadReduce<SEGMENT_LENGTH>(raking_segment, cub::Sum(), partial); // Warpscan partial = WarpReduce(temp_storage.warp_storage).Sum(partial); } } return partial; } /// Computes a thread block-wide reduction using the specified reduction operator. The first num_valid threads each contribute one reduction partial. The return value is only valid for thread<sub>0</sub>. template < bool FULL_TILE, typename ReductionOp> __device__ __forceinline__ T Reduce( T partial, ///< [in] Calling thread's input partial reductions int num_valid, ///< [in] Number of valid elements (may be less than BLOCK_THREADS) ReductionOp reduction_op) ///< [in] Binary reduction operator { if (USE_FALLBACK || !FULL_TILE) { return FallBack(temp_storage.fallback_storage).template Reduce<FULL_TILE>(partial, num_valid, reduction_op); } else { // Place partial into shared memory grid if (linear_tid >= RAKING_THREADS) *BlockRakingLayout::PlacementPtr(temp_storage.raking_grid, linear_tid - RAKING_THREADS) = partial; CTA_SYNC(); // Reduce parallelism to one warp if (linear_tid < RAKING_THREADS) { // Raking reduction in grid T *raking_segment = BlockRakingLayout::RakingPtr(temp_storage.raking_grid, linear_tid); partial = internal::ThreadReduce<SEGMENT_LENGTH>(raking_segment, reduction_op, partial); // Warpscan partial = WarpReduce(temp_storage.warp_storage).Reduce(partial, reduction_op); } } return partial; } }; } // CUB namespace CUB_NS_POSTFIX // Optional outer namespace(s)
0
rapidsai_public_repos/nvgraph/external/cub_semiring
rapidsai_public_repos/nvgraph/external/cub_semiring/thread/thread_operators.cuh
/****************************************************************************** * Copyright (c) 2011, Duane Merrill. All rights reserved. * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ /** * \file * Simple binary operator functor types */ /****************************************************************************** * Simple functor operators ******************************************************************************/ #pragma once #include "../util_macro.cuh" #include "../util_type.cuh" #include "../util_namespace.cuh" /// Optional outer namespace(s) CUB_NS_PREFIX /// CUB namespace namespace cub { /** * \addtogroup UtilModule * @{ */ /** * \brief Default equality functor */ struct Equality { /// Boolean equality operator, returns <tt>(a == b)</tt> template <typename T> __host__ __device__ __forceinline__ bool operator()(const T &a, const T &b) const { return a == b; } }; /** * \brief Default inequality functor */ struct Inequality { /// Boolean inequality operator, returns <tt>(a != b)</tt> template <typename T> __host__ __device__ __forceinline__ bool operator()(const T &a, const T &b) const { return a != b; } }; /** * \brief Inequality functor (wraps equality functor) */ template <typename EqualityOp> struct InequalityWrapper { /// Wrapped equality operator EqualityOp op; /// Constructor __host__ __device__ __forceinline__ InequalityWrapper(EqualityOp op) : op(op) {} /// Boolean inequality operator, returns <tt>(a != b)</tt> template <typename T> __host__ __device__ __forceinline__ bool operator()(const T &a, const T &b) { return !op(a, b); } }; /** * \brief Default sum functor */ struct Sum { /// Boolean sum operator, returns <tt>a + b</tt> template <typename T> __host__ __device__ __forceinline__ T operator()(const T &a, const T &b) const { return a + b; } }; /** * \brief Default max functor */ struct Max { /// Boolean max operator, returns <tt>(a > b) ? a : b</tt> template <typename T> __host__ __device__ __forceinline__ T operator()(const T &a, const T &b) const { return CUB_MAX(a, b); } }; /** * \brief Arg max functor (keeps the value and offset of the first occurrence of the larger item) */ struct ArgMax { /// Boolean max operator, preferring the item having the smaller offset in case of ties template <typename T, typename OffsetT> __host__ __device__ __forceinline__ KeyValuePair<OffsetT, T> operator()( const KeyValuePair<OffsetT, T> &a, const KeyValuePair<OffsetT, T> &b) const { // Mooch BUG (device reduce argmax gk110 3.2 million random fp32) // return ((b.value > a.value) || ((a.value == b.value) && (b.key < a.key))) ? b : a; if ((b.value > a.value) || ((a.value == b.value) && (b.key < a.key))) return b; return a; } }; /** * \brief Default min functor */ struct Min { /// Boolean min operator, returns <tt>(a < b) ? a : b</tt> template <typename T> __host__ __device__ __forceinline__ T operator()(const T &a, const T &b) const { return CUB_MIN(a, b); } }; /** * \brief Arg min functor (keeps the value and offset of the first occurrence of the smallest item) */ struct ArgMin { /// Boolean min operator, preferring the item having the smaller offset in case of ties template <typename T, typename OffsetT> __host__ __device__ __forceinline__ KeyValuePair<OffsetT, T> operator()( const KeyValuePair<OffsetT, T> &a, const KeyValuePair<OffsetT, T> &b) const { // Mooch BUG (device reduce argmax gk110 3.2 million random fp32) // return ((b.value < a.value) || ((a.value == b.value) && (b.key < a.key))) ? b : a; if ((b.value < a.value) || ((a.value == b.value) && (b.key < a.key))) return b; return a; } }; /** * \brief Default cast functor */ template <typename B> struct CastOp { /// Cast operator, returns <tt>(B) a</tt> template <typename A> __host__ __device__ __forceinline__ B operator()(const A &a) const { return (B) a; } }; /** * \brief Binary operator wrapper for switching non-commutative scan arguments */ template <typename ScanOp> class SwizzleScanOp { private: /// Wrapped scan operator ScanOp scan_op; public: /// Constructor __host__ __device__ __forceinline__ SwizzleScanOp(ScanOp scan_op) : scan_op(scan_op) {} /// Switch the scan arguments template <typename T> __host__ __device__ __forceinline__ T operator()(const T &a, const T &b) { T _a(a); T _b(b); return scan_op(_b, _a); } }; /** * \brief Reduce-by-segment functor. * * Given two cub::KeyValuePair inputs \p a and \p b and a * binary associative combining operator \p <tt>f(const T &x, const T &y)</tt>, * an instance of this functor returns a cub::KeyValuePair whose \p key * field is <tt>a.key</tt> + <tt>b.key</tt>, and whose \p value field * is either b.value if b.key is non-zero, or f(a.value, b.value) otherwise. * * ReduceBySegmentOp is an associative, non-commutative binary combining operator * for input sequences of cub::KeyValuePair pairings. Such * sequences are typically used to represent a segmented set of values to be reduced * and a corresponding set of {0,1}-valued integer "head flags" demarcating the * first value of each segment. * */ template <typename ReductionOpT> ///< Binary reduction operator to apply to values struct ReduceBySegmentOp { /// Wrapped reduction operator ReductionOpT op; /// Constructor __host__ __device__ __forceinline__ ReduceBySegmentOp() {} /// Constructor __host__ __device__ __forceinline__ ReduceBySegmentOp(ReductionOpT op) : op(op) {} /// Scan operator template <typename KeyValuePairT> ///< KeyValuePair pairing of T (value) and OffsetT (head flag) __host__ __device__ __forceinline__ KeyValuePairT operator()( const KeyValuePairT &first, ///< First partial reduction const KeyValuePairT &second) ///< Second partial reduction { KeyValuePairT retval; retval.key = first.key + second.key; retval.value = (second.key) ? second.value : // The second partial reduction spans a segment reset, so it's value aggregate becomes the running aggregate op(first.value, second.value); // The second partial reduction does not span a reset, so accumulate both into the running aggregate return retval; } }; template <typename ReductionOpT> ///< Binary reduction operator to apply to values struct ReduceByKeyOp { /// Wrapped reduction operator ReductionOpT op; /// Constructor __host__ __device__ __forceinline__ ReduceByKeyOp() {} /// Constructor __host__ __device__ __forceinline__ ReduceByKeyOp(ReductionOpT op) : op(op) {} /// Scan operator template <typename KeyValuePairT> __host__ __device__ __forceinline__ KeyValuePairT operator()( const KeyValuePairT &first, ///< First partial reduction const KeyValuePairT &second) ///< Second partial reduction { KeyValuePairT retval = second; if (first.key == second.key) retval.value = op(first.value, retval.value); return retval; } }; /** @} */ // end group UtilModule } // CUB namespace CUB_NS_POSTFIX // Optional outer namespace(s)
0
rapidsai_public_repos/nvgraph/external/cub_semiring
rapidsai_public_repos/nvgraph/external/cub_semiring/thread/thread_search.cuh
/****************************************************************************** * Copyright (c) 2011, Duane Merrill. All rights reserved. * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ /** * \file * Thread utilities for sequential search */ #pragma once #include "../util_namespace.cuh" /// Optional outer namespace(s) CUB_NS_PREFIX /// CUB namespace namespace cub { /** * Computes the begin offsets into A and B for the specific diagonal */ template < typename AIteratorT, typename BIteratorT, typename OffsetT, typename CoordinateT> __host__ __device__ __forceinline__ void MergePathSearch( OffsetT diagonal, AIteratorT a, BIteratorT b, OffsetT a_len, OffsetT b_len, CoordinateT& path_coordinate) { /// The value type of the input iterator typedef typename std::iterator_traits<AIteratorT>::value_type T; OffsetT split_min = CUB_MAX(diagonal - b_len, 0); OffsetT split_max = CUB_MIN(diagonal, a_len); while (split_min < split_max) { OffsetT split_pivot = (split_min + split_max) >> 1; if (a[split_pivot] <= b[diagonal - split_pivot - 1]) { // Move candidate split range up A, down B split_min = split_pivot + 1; } else { // Move candidate split range up B, down A split_max = split_pivot; } } path_coordinate.x = CUB_MIN(split_min, a_len); path_coordinate.y = diagonal - split_min; } /** * \brief Returns the offset of the first value within \p input which does not compare less than \p val */ template < typename InputIteratorT, typename OffsetT, typename T> __device__ __forceinline__ OffsetT LowerBound( InputIteratorT input, ///< [in] Input sequence OffsetT num_items, ///< [in] Input sequence length T val) ///< [in] Search key { OffsetT retval = 0; while (num_items > 0) { OffsetT half = num_items >> 1; if (input[retval + half] < val) { retval = retval + (half + 1); num_items = num_items - (half + 1); } else { num_items = half; } } return retval; } /** * \brief Returns the offset of the first value within \p input which compares greater than \p val */ template < typename InputIteratorT, typename OffsetT, typename T> __device__ __forceinline__ OffsetT UpperBound( InputIteratorT input, ///< [in] Input sequence OffsetT num_items, ///< [in] Input sequence length T val) ///< [in] Search key { OffsetT retval = 0; while (num_items > 0) { OffsetT half = num_items >> 1; if (val < input[retval + half]) { num_items = half; } else { retval = retval + (half + 1); num_items = num_items - (half + 1); } } return retval; } } // CUB namespace CUB_NS_POSTFIX // Optional outer namespace(s)
0
rapidsai_public_repos/nvgraph/external/cub_semiring
rapidsai_public_repos/nvgraph/external/cub_semiring/thread/thread_scan.cuh
/****************************************************************************** * Copyright (c) 2011, Duane Merrill. All rights reserved. * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ /** * \file * Thread utilities for sequential prefix scan over statically-sized array types */ #pragma once #include "../thread/thread_operators.cuh" #include "../util_namespace.cuh" /// Optional outer namespace(s) CUB_NS_PREFIX /// CUB namespace namespace cub { /// Internal namespace (to prevent ADL mishaps between static functions when mixing different CUB installations) namespace internal { /** * \addtogroup UtilModule * @{ */ /** * \name Sequential prefix scan over statically-sized array types * @{ */ template < int LENGTH, typename T, typename ScanOp> __device__ __forceinline__ T ThreadScanExclusive( T inclusive, T exclusive, T *input, ///< [in] Input array T *output, ///< [out] Output array (may be aliased to \p input) ScanOp scan_op, ///< [in] Binary scan operator Int2Type<LENGTH> /*length*/) { #pragma unroll for (int i = 0; i < LENGTH; ++i) { inclusive = scan_op(exclusive, input[i]); output[i] = exclusive; exclusive = inclusive; } return inclusive; } /** * \brief Perform a sequential exclusive prefix scan over \p LENGTH elements of the \p input array, seeded with the specified \p prefix. The aggregate is returned. * * \tparam LENGTH LengthT of \p input and \p output arrays * \tparam T <b>[inferred]</b> The data type to be scanned. * \tparam ScanOp <b>[inferred]</b> Binary scan operator type having member <tt>T operator()(const T &a, const T &b)</tt> */ template < int LENGTH, typename T, typename ScanOp> __device__ __forceinline__ T ThreadScanExclusive( T *input, ///< [in] Input array T *output, ///< [out] Output array (may be aliased to \p input) ScanOp scan_op, ///< [in] Binary scan operator T prefix, ///< [in] Prefix to seed scan with bool apply_prefix = true) ///< [in] Whether or not the calling thread should apply its prefix. If not, the first output element is undefined. (Handy for preventing thread-0 from applying a prefix.) { T inclusive = input[0]; if (apply_prefix) { inclusive = scan_op(prefix, inclusive); } output[0] = prefix; T exclusive = inclusive; return ThreadScanExclusive(inclusive, exclusive, input + 1, output + 1, scan_op, Int2Type<LENGTH - 1>()); } /** * \brief Perform a sequential exclusive prefix scan over the statically-sized \p input array, seeded with the specified \p prefix. The aggregate is returned. * * \tparam LENGTH <b>[inferred]</b> LengthT of \p input and \p output arrays * \tparam T <b>[inferred]</b> The data type to be scanned. * \tparam ScanOp <b>[inferred]</b> Binary scan operator type having member <tt>T operator()(const T &a, const T &b)</tt> */ template < int LENGTH, typename T, typename ScanOp> __device__ __forceinline__ T ThreadScanExclusive( T (&input)[LENGTH], ///< [in] Input array T (&output)[LENGTH], ///< [out] Output array (may be aliased to \p input) ScanOp scan_op, ///< [in] Binary scan operator T prefix, ///< [in] Prefix to seed scan with bool apply_prefix = true) ///< [in] Whether or not the calling thread should apply its prefix. (Handy for preventing thread-0 from applying a prefix.) { return ThreadScanExclusive<LENGTH>((T*) input, (T*) output, scan_op, prefix, apply_prefix); } template < int LENGTH, typename T, typename ScanOp> __device__ __forceinline__ T ThreadScanInclusive( T inclusive, T *input, ///< [in] Input array T *output, ///< [out] Output array (may be aliased to \p input) ScanOp scan_op, ///< [in] Binary scan operator Int2Type<LENGTH> /*length*/) { #pragma unroll for (int i = 0; i < LENGTH; ++i) { inclusive = scan_op(inclusive, input[i]); output[i] = inclusive; } return inclusive; } /** * \brief Perform a sequential inclusive prefix scan over \p LENGTH elements of the \p input array. The aggregate is returned. * * \tparam LENGTH LengthT of \p input and \p output arrays * \tparam T <b>[inferred]</b> The data type to be scanned. * \tparam ScanOp <b>[inferred]</b> Binary scan operator type having member <tt>T operator()(const T &a, const T &b)</tt> */ template < int LENGTH, typename T, typename ScanOp> __device__ __forceinline__ T ThreadScanInclusive( T *input, ///< [in] Input array T *output, ///< [out] Output array (may be aliased to \p input) ScanOp scan_op) ///< [in] Binary scan operator { T inclusive = input[0]; output[0] = inclusive; // Continue scan return ThreadScanInclusive(inclusive, input + 1, output + 1, scan_op, Int2Type<LENGTH - 1>()); } /** * \brief Perform a sequential inclusive prefix scan over the statically-sized \p input array. The aggregate is returned. * * \tparam LENGTH <b>[inferred]</b> LengthT of \p input and \p output arrays * \tparam T <b>[inferred]</b> The data type to be scanned. * \tparam ScanOp <b>[inferred]</b> Binary scan operator type having member <tt>T operator()(const T &a, const T &b)</tt> */ template < int LENGTH, typename T, typename ScanOp> __device__ __forceinline__ T ThreadScanInclusive( T (&input)[LENGTH], ///< [in] Input array T (&output)[LENGTH], ///< [out] Output array (may be aliased to \p input) ScanOp scan_op) ///< [in] Binary scan operator { return ThreadScanInclusive<LENGTH>((T*) input, (T*) output, scan_op); } /** * \brief Perform a sequential inclusive prefix scan over \p LENGTH elements of the \p input array, seeded with the specified \p prefix. The aggregate is returned. * * \tparam LENGTH LengthT of \p input and \p output arrays * \tparam T <b>[inferred]</b> The data type to be scanned. * \tparam ScanOp <b>[inferred]</b> Binary scan operator type having member <tt>T operator()(const T &a, const T &b)</tt> */ template < int LENGTH, typename T, typename ScanOp> __device__ __forceinline__ T ThreadScanInclusive( T *input, ///< [in] Input array T *output, ///< [out] Output array (may be aliased to \p input) ScanOp scan_op, ///< [in] Binary scan operator T prefix, ///< [in] Prefix to seed scan with bool apply_prefix = true) ///< [in] Whether or not the calling thread should apply its prefix. (Handy for preventing thread-0 from applying a prefix.) { T inclusive = input[0]; if (apply_prefix) { inclusive = scan_op(prefix, inclusive); } output[0] = inclusive; // Continue scan return ThreadScanInclusive(inclusive, input + 1, output + 1, scan_op, Int2Type<LENGTH - 1>()); } /** * \brief Perform a sequential inclusive prefix scan over the statically-sized \p input array, seeded with the specified \p prefix. The aggregate is returned. * * \tparam LENGTH <b>[inferred]</b> LengthT of \p input and \p output arrays * \tparam T <b>[inferred]</b> The data type to be scanned. * \tparam ScanOp <b>[inferred]</b> Binary scan operator type having member <tt>T operator()(const T &a, const T &b)</tt> */ template < int LENGTH, typename T, typename ScanOp> __device__ __forceinline__ T ThreadScanInclusive( T (&input)[LENGTH], ///< [in] Input array T (&output)[LENGTH], ///< [out] Output array (may be aliased to \p input) ScanOp scan_op, ///< [in] Binary scan operator T prefix, ///< [in] Prefix to seed scan with bool apply_prefix = true) ///< [in] Whether or not the calling thread should apply its prefix. (Handy for preventing thread-0 from applying a prefix.) { return ThreadScanInclusive<LENGTH>((T*) input, (T*) output, scan_op, prefix, apply_prefix); } //@} end member group /** @} */ // end group UtilModule } // internal namespace } // CUB namespace CUB_NS_POSTFIX // Optional outer namespace(s)
0
rapidsai_public_repos/nvgraph/external/cub_semiring
rapidsai_public_repos/nvgraph/external/cub_semiring/thread/thread_load.cuh
/****************************************************************************** * Copyright (c) 2011, Duane Merrill. All rights reserved. * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ /** * \file * Thread utilities for reading memory using PTX cache modifiers. */ #pragma once #include <cuda.h> #include <iterator> #include "../util_ptx.cuh" #include "../util_type.cuh" #include "../util_namespace.cuh" /// Optional outer namespace(s) CUB_NS_PREFIX /// CUB namespace namespace cub { /** * \addtogroup UtilIo * @{ */ //----------------------------------------------------------------------------- // Tags and constants //----------------------------------------------------------------------------- /** * \brief Enumeration of cache modifiers for memory load operations. */ enum CacheLoadModifier { LOAD_DEFAULT, ///< Default (no modifier) LOAD_CA, ///< Cache at all levels LOAD_CG, ///< Cache at global level LOAD_CS, ///< Cache streaming (likely to be accessed once) LOAD_CV, ///< Cache as volatile (including cached system lines) LOAD_LDG, ///< Cache as texture LOAD_VOLATILE, ///< Volatile (any memory space) }; /** * \name Thread I/O (cache modified) * @{ */ /** * \brief Thread utility for reading memory using cub::CacheLoadModifier cache modifiers. Can be used to load any data type. * * \par Example * \code * #include <cub/cub.cuh> // or equivalently <cub/thread/thread_load.cuh> * * // 32-bit load using cache-global modifier: * int *d_in; * int val = cub::ThreadLoad<cub::LOAD_CA>(d_in + threadIdx.x); * * // 16-bit load using default modifier * short *d_in; * short val = cub::ThreadLoad<cub::LOAD_DEFAULT>(d_in + threadIdx.x); * * // 256-bit load using cache-volatile modifier * double4 *d_in; * double4 val = cub::ThreadLoad<cub::LOAD_CV>(d_in + threadIdx.x); * * // 96-bit load using cache-streaming modifier * struct TestFoo { bool a; short b; }; * TestFoo *d_struct; * TestFoo val = cub::ThreadLoad<cub::LOAD_CS>(d_in + threadIdx.x); * \endcode * * \tparam MODIFIER <b>[inferred]</b> CacheLoadModifier enumeration * \tparam InputIteratorT <b>[inferred]</b> Input iterator type \iterator */ template < CacheLoadModifier MODIFIER, typename InputIteratorT> __device__ __forceinline__ typename std::iterator_traits<InputIteratorT>::value_type ThreadLoad(InputIteratorT itr); //@} end member group #ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document /// Helper structure for templated load iteration (inductive case) template <int COUNT, int MAX> struct IterateThreadLoad { template <CacheLoadModifier MODIFIER, typename T> static __device__ __forceinline__ void Load(T const *ptr, T *vals) { vals[COUNT] = ThreadLoad<MODIFIER>(ptr + COUNT); IterateThreadLoad<COUNT + 1, MAX>::template Load<MODIFIER>(ptr, vals); } template <typename InputIteratorT, typename T> static __device__ __forceinline__ void Dereference(InputIteratorT itr, T *vals) { vals[COUNT] = itr[COUNT]; IterateThreadLoad<COUNT + 1, MAX>::Dereference(itr, vals); } }; /// Helper structure for templated load iteration (termination case) template <int MAX> struct IterateThreadLoad<MAX, MAX> { template <CacheLoadModifier MODIFIER, typename T> static __device__ __forceinline__ void Load(T const * /*ptr*/, T * /*vals*/) {} template <typename InputIteratorT, typename T> static __device__ __forceinline__ void Dereference(InputIteratorT /*itr*/, T * /*vals*/) {} }; /** * Define a uint4 (16B) ThreadLoad specialization for the given Cache load modifier */ #define _CUB_LOAD_16(cub_modifier, ptx_modifier) \ template<> \ __device__ __forceinline__ uint4 ThreadLoad<cub_modifier, uint4 const *>(uint4 const *ptr) \ { \ uint4 retval; \ asm volatile ("ld."#ptx_modifier".v4.u32 {%0, %1, %2, %3}, [%4];" : \ "=r"(retval.x), \ "=r"(retval.y), \ "=r"(retval.z), \ "=r"(retval.w) : \ _CUB_ASM_PTR_(ptr)); \ return retval; \ } \ template<> \ __device__ __forceinline__ ulonglong2 ThreadLoad<cub_modifier, ulonglong2 const *>(ulonglong2 const *ptr) \ { \ ulonglong2 retval; \ asm volatile ("ld."#ptx_modifier".v2.u64 {%0, %1}, [%2];" : \ "=l"(retval.x), \ "=l"(retval.y) : \ _CUB_ASM_PTR_(ptr)); \ return retval; \ } /** * Define a uint2 (8B) ThreadLoad specialization for the given Cache load modifier */ #define _CUB_LOAD_8(cub_modifier, ptx_modifier) \ template<> \ __device__ __forceinline__ ushort4 ThreadLoad<cub_modifier, ushort4 const *>(ushort4 const *ptr) \ { \ ushort4 retval; \ asm volatile ("ld."#ptx_modifier".v4.u16 {%0, %1, %2, %3}, [%4];" : \ "=h"(retval.x), \ "=h"(retval.y), \ "=h"(retval.z), \ "=h"(retval.w) : \ _CUB_ASM_PTR_(ptr)); \ return retval; \ } \ template<> \ __device__ __forceinline__ uint2 ThreadLoad<cub_modifier, uint2 const *>(uint2 const *ptr) \ { \ uint2 retval; \ asm volatile ("ld."#ptx_modifier".v2.u32 {%0, %1}, [%2];" : \ "=r"(retval.x), \ "=r"(retval.y) : \ _CUB_ASM_PTR_(ptr)); \ return retval; \ } \ template<> \ __device__ __forceinline__ unsigned long long ThreadLoad<cub_modifier, unsigned long long const *>(unsigned long long const *ptr) \ { \ unsigned long long retval; \ asm volatile ("ld."#ptx_modifier".u64 %0, [%1];" : \ "=l"(retval) : \ _CUB_ASM_PTR_(ptr)); \ return retval; \ } /** * Define a uint (4B) ThreadLoad specialization for the given Cache load modifier */ #define _CUB_LOAD_4(cub_modifier, ptx_modifier) \ template<> \ __device__ __forceinline__ unsigned int ThreadLoad<cub_modifier, unsigned int const *>(unsigned int const *ptr) \ { \ unsigned int retval; \ asm volatile ("ld."#ptx_modifier".u32 %0, [%1];" : \ "=r"(retval) : \ _CUB_ASM_PTR_(ptr)); \ return retval; \ } /** * Define a unsigned short (2B) ThreadLoad specialization for the given Cache load modifier */ #define _CUB_LOAD_2(cub_modifier, ptx_modifier) \ template<> \ __device__ __forceinline__ unsigned short ThreadLoad<cub_modifier, unsigned short const *>(unsigned short const *ptr) \ { \ unsigned short retval; \ asm volatile ("ld."#ptx_modifier".u16 %0, [%1];" : \ "=h"(retval) : \ _CUB_ASM_PTR_(ptr)); \ return retval; \ } /** * Define an unsigned char (1B) ThreadLoad specialization for the given Cache load modifier */ #define _CUB_LOAD_1(cub_modifier, ptx_modifier) \ template<> \ __device__ __forceinline__ unsigned char ThreadLoad<cub_modifier, unsigned char const *>(unsigned char const *ptr) \ { \ unsigned short retval; \ asm volatile ( \ "{" \ " .reg .u8 datum;" \ " ld."#ptx_modifier".u8 datum, [%1];" \ " cvt.u16.u8 %0, datum;" \ "}" : \ "=h"(retval) : \ _CUB_ASM_PTR_(ptr)); \ return (unsigned char) retval; \ } /** * Define powers-of-two ThreadLoad specializations for the given Cache load modifier */ #define _CUB_LOAD_ALL(cub_modifier, ptx_modifier) \ _CUB_LOAD_16(cub_modifier, ptx_modifier) \ _CUB_LOAD_8(cub_modifier, ptx_modifier) \ _CUB_LOAD_4(cub_modifier, ptx_modifier) \ _CUB_LOAD_2(cub_modifier, ptx_modifier) \ _CUB_LOAD_1(cub_modifier, ptx_modifier) \ /** * Define powers-of-two ThreadLoad specializations for the various Cache load modifiers */ #if CUB_PTX_ARCH >= 200 _CUB_LOAD_ALL(LOAD_CA, ca) _CUB_LOAD_ALL(LOAD_CG, cg) _CUB_LOAD_ALL(LOAD_CS, cs) _CUB_LOAD_ALL(LOAD_CV, cv) #else _CUB_LOAD_ALL(LOAD_CA, global) // Use volatile to ensure coherent reads when this PTX is JIT'd to run on newer architectures with L1 _CUB_LOAD_ALL(LOAD_CG, volatile.global) _CUB_LOAD_ALL(LOAD_CS, global) _CUB_LOAD_ALL(LOAD_CV, volatile.global) #endif #if CUB_PTX_ARCH >= 350 _CUB_LOAD_ALL(LOAD_LDG, global.nc) #else _CUB_LOAD_ALL(LOAD_LDG, global) #endif // Macro cleanup #undef _CUB_LOAD_ALL #undef _CUB_LOAD_1 #undef _CUB_LOAD_2 #undef _CUB_LOAD_4 #undef _CUB_LOAD_8 #undef _CUB_LOAD_16 /** * ThreadLoad definition for LOAD_DEFAULT modifier on iterator types */ template <typename InputIteratorT> __device__ __forceinline__ typename std::iterator_traits<InputIteratorT>::value_type ThreadLoad( InputIteratorT itr, Int2Type<LOAD_DEFAULT> /*modifier*/, Int2Type<false> /*is_pointer*/) { return *itr; } /** * ThreadLoad definition for LOAD_DEFAULT modifier on pointer types */ template <typename T> __device__ __forceinline__ T ThreadLoad( T *ptr, Int2Type<LOAD_DEFAULT> /*modifier*/, Int2Type<true> /*is_pointer*/) { return *ptr; } /** * ThreadLoad definition for LOAD_VOLATILE modifier on primitive pointer types */ template <typename T> __device__ __forceinline__ T ThreadLoadVolatilePointer( T *ptr, Int2Type<true> /*is_primitive*/) { T retval = *reinterpret_cast<volatile T*>(ptr); return retval; } /** * ThreadLoad definition for LOAD_VOLATILE modifier on non-primitive pointer types */ template <typename T> __device__ __forceinline__ T ThreadLoadVolatilePointer( T *ptr, Int2Type<false> /*is_primitive*/) { typedef typename UnitWord<T>::VolatileWord VolatileWord; // Word type for memcopying const int VOLATILE_MULTIPLE = sizeof(T) / sizeof(VolatileWord); /* VolatileWord words[VOLATILE_MULTIPLE]; IterateThreadLoad<0, VOLATILE_MULTIPLE>::Dereference( reinterpret_cast<volatile VolatileWord*>(ptr), words); return *reinterpret_cast<T*>(words); */ T retval; VolatileWord *words = reinterpret_cast<VolatileWord*>(&retval); IterateThreadLoad<0, VOLATILE_MULTIPLE>::Dereference( reinterpret_cast<volatile VolatileWord*>(ptr), words); return retval; } /** * ThreadLoad definition for LOAD_VOLATILE modifier on pointer types */ template <typename T> __device__ __forceinline__ T ThreadLoad( T *ptr, Int2Type<LOAD_VOLATILE> /*modifier*/, Int2Type<true> /*is_pointer*/) { // Apply tags for partial-specialization return ThreadLoadVolatilePointer(ptr, Int2Type<Traits<T>::PRIMITIVE>()); } /** * ThreadLoad definition for generic modifiers on pointer types */ template <typename T, int MODIFIER> __device__ __forceinline__ T ThreadLoad( T const *ptr, Int2Type<MODIFIER> /*modifier*/, Int2Type<true> /*is_pointer*/) { typedef typename UnitWord<T>::DeviceWord DeviceWord; const int DEVICE_MULTIPLE = sizeof(T) / sizeof(DeviceWord); DeviceWord words[DEVICE_MULTIPLE]; IterateThreadLoad<0, DEVICE_MULTIPLE>::template Load<CacheLoadModifier(MODIFIER)>( reinterpret_cast<DeviceWord*>(const_cast<T*>(ptr)), words); return *reinterpret_cast<T*>(words); } /** * ThreadLoad definition for generic modifiers */ template < CacheLoadModifier MODIFIER, typename InputIteratorT> __device__ __forceinline__ typename std::iterator_traits<InputIteratorT>::value_type ThreadLoad(InputIteratorT itr) { // Apply tags for partial-specialization return ThreadLoad( itr, Int2Type<MODIFIER>(), Int2Type<IsPointer<InputIteratorT>::VALUE>()); } #endif // DOXYGEN_SHOULD_SKIP_THIS /** @} */ // end group UtilIo } // CUB namespace CUB_NS_POSTFIX // Optional outer namespace(s)
0
rapidsai_public_repos/nvgraph/external/cub_semiring
rapidsai_public_repos/nvgraph/external/cub_semiring/thread/thread_store.cuh
/****************************************************************************** * Copyright (c) 2011, Duane Merrill. All rights reserved. * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ /** * \file * Thread utilities for writing memory using PTX cache modifiers. */ #pragma once #include <cuda.h> #include "../util_ptx.cuh" #include "../util_type.cuh" #include "../util_namespace.cuh" /// Optional outer namespace(s) CUB_NS_PREFIX /// CUB namespace namespace cub { /** * \addtogroup UtilIo * @{ */ //----------------------------------------------------------------------------- // Tags and constants //----------------------------------------------------------------------------- /** * \brief Enumeration of cache modifiers for memory store operations. */ enum CacheStoreModifier { STORE_DEFAULT, ///< Default (no modifier) STORE_WB, ///< Cache write-back all coherent levels STORE_CG, ///< Cache at global level STORE_CS, ///< Cache streaming (likely to be accessed once) STORE_WT, ///< Cache write-through (to system memory) STORE_VOLATILE, ///< Volatile shared (any memory space) }; /** * \name Thread I/O (cache modified) * @{ */ /** * \brief Thread utility for writing memory using cub::CacheStoreModifier cache modifiers. Can be used to store any data type. * * \par Example * \code * #include <cub/cub.cuh> // or equivalently <cub/thread/thread_store.cuh> * * // 32-bit store using cache-global modifier: * int *d_out; * int val; * cub::ThreadStore<cub::STORE_CG>(d_out + threadIdx.x, val); * * // 16-bit store using default modifier * short *d_out; * short val; * cub::ThreadStore<cub::STORE_DEFAULT>(d_out + threadIdx.x, val); * * // 256-bit store using write-through modifier * double4 *d_out; * double4 val; * cub::ThreadStore<cub::STORE_WT>(d_out + threadIdx.x, val); * * // 96-bit store using cache-streaming cache modifier * struct TestFoo { bool a; short b; }; * TestFoo *d_struct; * TestFoo val; * cub::ThreadStore<cub::STORE_CS>(d_out + threadIdx.x, val); * \endcode * * \tparam MODIFIER <b>[inferred]</b> CacheStoreModifier enumeration * \tparam InputIteratorT <b>[inferred]</b> Output iterator type \iterator * \tparam T <b>[inferred]</b> Data type of output value */ template < CacheStoreModifier MODIFIER, typename OutputIteratorT, typename T> __device__ __forceinline__ void ThreadStore(OutputIteratorT itr, T val); //@} end member group #ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document /// Helper structure for templated store iteration (inductive case) template <int COUNT, int MAX> struct IterateThreadStore { template <CacheStoreModifier MODIFIER, typename T> static __device__ __forceinline__ void Store(T *ptr, T *vals) { ThreadStore<MODIFIER>(ptr + COUNT, vals[COUNT]); IterateThreadStore<COUNT + 1, MAX>::template Store<MODIFIER>(ptr, vals); } template <typename OutputIteratorT, typename T> static __device__ __forceinline__ void Dereference(OutputIteratorT ptr, T *vals) { ptr[COUNT] = vals[COUNT]; IterateThreadStore<COUNT + 1, MAX>::Dereference(ptr, vals); } }; /// Helper structure for templated store iteration (termination case) template <int MAX> struct IterateThreadStore<MAX, MAX> { template <CacheStoreModifier MODIFIER, typename T> static __device__ __forceinline__ void Store(T * /*ptr*/, T * /*vals*/) {} template <typename OutputIteratorT, typename T> static __device__ __forceinline__ void Dereference(OutputIteratorT /*ptr*/, T * /*vals*/) {} }; /** * Define a uint4 (16B) ThreadStore specialization for the given Cache load modifier */ #define _CUB_STORE_16(cub_modifier, ptx_modifier) \ template<> \ __device__ __forceinline__ void ThreadStore<cub_modifier, uint4*, uint4>(uint4* ptr, uint4 val) \ { \ asm volatile ("st."#ptx_modifier".v4.u32 [%0], {%1, %2, %3, %4};" : : \ _CUB_ASM_PTR_(ptr), \ "r"(val.x), \ "r"(val.y), \ "r"(val.z), \ "r"(val.w)); \ } \ template<> \ __device__ __forceinline__ void ThreadStore<cub_modifier, ulonglong2*, ulonglong2>(ulonglong2* ptr, ulonglong2 val) \ { \ asm volatile ("st."#ptx_modifier".v2.u64 [%0], {%1, %2};" : : \ _CUB_ASM_PTR_(ptr), \ "l"(val.x), \ "l"(val.y)); \ } /** * Define a uint2 (8B) ThreadStore specialization for the given Cache load modifier */ #define _CUB_STORE_8(cub_modifier, ptx_modifier) \ template<> \ __device__ __forceinline__ void ThreadStore<cub_modifier, ushort4*, ushort4>(ushort4* ptr, ushort4 val) \ { \ asm volatile ("st."#ptx_modifier".v4.u16 [%0], {%1, %2, %3, %4};" : : \ _CUB_ASM_PTR_(ptr), \ "h"(val.x), \ "h"(val.y), \ "h"(val.z), \ "h"(val.w)); \ } \ template<> \ __device__ __forceinline__ void ThreadStore<cub_modifier, uint2*, uint2>(uint2* ptr, uint2 val) \ { \ asm volatile ("st."#ptx_modifier".v2.u32 [%0], {%1, %2};" : : \ _CUB_ASM_PTR_(ptr), \ "r"(val.x), \ "r"(val.y)); \ } \ template<> \ __device__ __forceinline__ void ThreadStore<cub_modifier, unsigned long long*, unsigned long long>(unsigned long long* ptr, unsigned long long val) \ { \ asm volatile ("st."#ptx_modifier".u64 [%0], %1;" : : \ _CUB_ASM_PTR_(ptr), \ "l"(val)); \ } /** * Define a unsigned int (4B) ThreadStore specialization for the given Cache load modifier */ #define _CUB_STORE_4(cub_modifier, ptx_modifier) \ template<> \ __device__ __forceinline__ void ThreadStore<cub_modifier, unsigned int*, unsigned int>(unsigned int* ptr, unsigned int val) \ { \ asm volatile ("st."#ptx_modifier".u32 [%0], %1;" : : \ _CUB_ASM_PTR_(ptr), \ "r"(val)); \ } /** * Define a unsigned short (2B) ThreadStore specialization for the given Cache load modifier */ #define _CUB_STORE_2(cub_modifier, ptx_modifier) \ template<> \ __device__ __forceinline__ void ThreadStore<cub_modifier, unsigned short*, unsigned short>(unsigned short* ptr, unsigned short val) \ { \ asm volatile ("st."#ptx_modifier".u16 [%0], %1;" : : \ _CUB_ASM_PTR_(ptr), \ "h"(val)); \ } /** * Define a unsigned char (1B) ThreadStore specialization for the given Cache load modifier */ #define _CUB_STORE_1(cub_modifier, ptx_modifier) \ template<> \ __device__ __forceinline__ void ThreadStore<cub_modifier, unsigned char*, unsigned char>(unsigned char* ptr, unsigned char val) \ { \ asm volatile ( \ "{" \ " .reg .u8 datum;" \ " cvt.u8.u16 datum, %1;" \ " st."#ptx_modifier".u8 [%0], datum;" \ "}" : : \ _CUB_ASM_PTR_(ptr), \ "h"((unsigned short) val)); \ } /** * Define powers-of-two ThreadStore specializations for the given Cache load modifier */ #define _CUB_STORE_ALL(cub_modifier, ptx_modifier) \ _CUB_STORE_16(cub_modifier, ptx_modifier) \ _CUB_STORE_8(cub_modifier, ptx_modifier) \ _CUB_STORE_4(cub_modifier, ptx_modifier) \ _CUB_STORE_2(cub_modifier, ptx_modifier) \ _CUB_STORE_1(cub_modifier, ptx_modifier) \ /** * Define ThreadStore specializations for the various Cache load modifiers */ #if CUB_PTX_ARCH >= 200 _CUB_STORE_ALL(STORE_WB, wb) _CUB_STORE_ALL(STORE_CG, cg) _CUB_STORE_ALL(STORE_CS, cs) _CUB_STORE_ALL(STORE_WT, wt) #else _CUB_STORE_ALL(STORE_WB, global) _CUB_STORE_ALL(STORE_CG, global) _CUB_STORE_ALL(STORE_CS, global) _CUB_STORE_ALL(STORE_WT, volatile.global) #endif // Macro cleanup #undef _CUB_STORE_ALL #undef _CUB_STORE_1 #undef _CUB_STORE_2 #undef _CUB_STORE_4 #undef _CUB_STORE_8 #undef _CUB_STORE_16 /** * ThreadStore definition for STORE_DEFAULT modifier on iterator types */ template <typename OutputIteratorT, typename T> __device__ __forceinline__ void ThreadStore( OutputIteratorT itr, T val, Int2Type<STORE_DEFAULT> /*modifier*/, Int2Type<false> /*is_pointer*/) { *itr = val; } /** * ThreadStore definition for STORE_DEFAULT modifier on pointer types */ template <typename T> __device__ __forceinline__ void ThreadStore( T *ptr, T val, Int2Type<STORE_DEFAULT> /*modifier*/, Int2Type<true> /*is_pointer*/) { *ptr = val; } /** * ThreadStore definition for STORE_VOLATILE modifier on primitive pointer types */ template <typename T> __device__ __forceinline__ void ThreadStoreVolatilePtr( T *ptr, T val, Int2Type<true> /*is_primitive*/) { *reinterpret_cast<volatile T*>(ptr) = val; } /** * ThreadStore definition for STORE_VOLATILE modifier on non-primitive pointer types */ template <typename T> __device__ __forceinline__ void ThreadStoreVolatilePtr( T *ptr, T val, Int2Type<false> /*is_primitive*/) { // Create a temporary using shuffle-words, then store using volatile-words typedef typename UnitWord<T>::VolatileWord VolatileWord; typedef typename UnitWord<T>::ShuffleWord ShuffleWord; const int VOLATILE_MULTIPLE = sizeof(T) / sizeof(VolatileWord); const int SHUFFLE_MULTIPLE = sizeof(T) / sizeof(ShuffleWord); VolatileWord words[VOLATILE_MULTIPLE]; #pragma unroll for (int i = 0; i < SHUFFLE_MULTIPLE; ++i) reinterpret_cast<ShuffleWord*>(words)[i] = reinterpret_cast<ShuffleWord*>(&val)[i]; IterateThreadStore<0, VOLATILE_MULTIPLE>::template Dereference( reinterpret_cast<volatile VolatileWord*>(ptr), words); } /** * ThreadStore definition for STORE_VOLATILE modifier on pointer types */ template <typename T> __device__ __forceinline__ void ThreadStore( T *ptr, T val, Int2Type<STORE_VOLATILE> /*modifier*/, Int2Type<true> /*is_pointer*/) { ThreadStoreVolatilePtr(ptr, val, Int2Type<Traits<T>::PRIMITIVE>()); } /** * ThreadStore definition for generic modifiers on pointer types */ template <typename T, int MODIFIER> __device__ __forceinline__ void ThreadStore( T *ptr, T val, Int2Type<MODIFIER> /*modifier*/, Int2Type<true> /*is_pointer*/) { // Create a temporary using shuffle-words, then store using device-words typedef typename UnitWord<T>::DeviceWord DeviceWord; typedef typename UnitWord<T>::ShuffleWord ShuffleWord; const int DEVICE_MULTIPLE = sizeof(T) / sizeof(DeviceWord); const int SHUFFLE_MULTIPLE = sizeof(T) / sizeof(ShuffleWord); DeviceWord words[DEVICE_MULTIPLE]; #pragma unroll for (int i = 0; i < SHUFFLE_MULTIPLE; ++i) reinterpret_cast<ShuffleWord*>(words)[i] = reinterpret_cast<ShuffleWord*>(&val)[i]; IterateThreadStore<0, DEVICE_MULTIPLE>::template Store<CacheStoreModifier(MODIFIER)>( reinterpret_cast<DeviceWord*>(ptr), words); } /** * ThreadStore definition for generic modifiers */ template <CacheStoreModifier MODIFIER, typename OutputIteratorT, typename T> __device__ __forceinline__ void ThreadStore(OutputIteratorT itr, T val) { ThreadStore( itr, val, Int2Type<MODIFIER>(), Int2Type<IsPointer<OutputIteratorT>::VALUE>()); } #endif // DOXYGEN_SHOULD_SKIP_THIS /** @} */ // end group UtilIo } // CUB namespace CUB_NS_POSTFIX // Optional outer namespace(s)
0
rapidsai_public_repos/nvgraph/external/cub_semiring
rapidsai_public_repos/nvgraph/external/cub_semiring/thread/thread_reduce.cuh
/****************************************************************************** * Copyright (c) 2011, Duane Merrill. All rights reserved. * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ /** * \file * Thread utilities for sequential reduction over statically-sized array types */ #pragma once #include "../thread/thread_operators.cuh" #include "../util_namespace.cuh" /// Optional outer namespace(s) CUB_NS_PREFIX /// CUB namespace namespace cub { /// Internal namespace (to prevent ADL mishaps between static functions when mixing different CUB installations) namespace internal { /** * Sequential reduction over statically-sized array types */ template < int LENGTH, typename T, typename ReductionOp> __device__ __forceinline__ T ThreadReduce( T* input, ///< [in] Input array ReductionOp reduction_op, ///< [in] Binary reduction operator T prefix, ///< [in] Prefix to seed reduction with Int2Type<LENGTH> /*length*/) { T retval = prefix; #pragma unroll for (int i = 0; i < LENGTH; ++i) retval = reduction_op(retval, input[i]); return retval; } /** * \brief Perform a sequential reduction over \p LENGTH elements of the \p input array, seeded with the specified \p prefix. The aggregate is returned. * * \tparam LENGTH LengthT of input array * \tparam T <b>[inferred]</b> The data type to be reduced. * \tparam ScanOp <b>[inferred]</b> Binary reduction operator type having member <tt>T operator()(const T &a, const T &b)</tt> */ template < int LENGTH, typename T, typename ReductionOp> __device__ __forceinline__ T ThreadReduce( T* input, ///< [in] Input array ReductionOp reduction_op, ///< [in] Binary reduction operator T prefix) ///< [in] Prefix to seed reduction with { return ThreadReduce(input, reduction_op, prefix, Int2Type<LENGTH>()); } /** * \brief Perform a sequential reduction over \p LENGTH elements of the \p input array. The aggregate is returned. * * \tparam LENGTH LengthT of input array * \tparam T <b>[inferred]</b> The data type to be reduced. * \tparam ScanOp <b>[inferred]</b> Binary reduction operator type having member <tt>T operator()(const T &a, const T &b)</tt> */ template < int LENGTH, typename T, typename ReductionOp> __device__ __forceinline__ T ThreadReduce( T* input, ///< [in] Input array ReductionOp reduction_op) ///< [in] Binary reduction operator { T prefix = input[0]; return ThreadReduce<LENGTH - 1>(input + 1, reduction_op, prefix); } /** * \brief Perform a sequential reduction over the statically-sized \p input array, seeded with the specified \p prefix. The aggregate is returned. * * \tparam LENGTH <b>[inferred]</b> LengthT of \p input array * \tparam T <b>[inferred]</b> The data type to be reduced. * \tparam ScanOp <b>[inferred]</b> Binary reduction operator type having member <tt>T operator()(const T &a, const T &b)</tt> */ template < int LENGTH, typename T, typename ReductionOp> __device__ __forceinline__ T ThreadReduce( T (&input)[LENGTH], ///< [in] Input array ReductionOp reduction_op, ///< [in] Binary reduction operator T prefix) ///< [in] Prefix to seed reduction with { return ThreadReduce(input, reduction_op, prefix, Int2Type<LENGTH>()); } /** * \brief Serial reduction with the specified operator * * \tparam LENGTH <b>[inferred]</b> LengthT of \p input array * \tparam T <b>[inferred]</b> The data type to be reduced. * \tparam ScanOp <b>[inferred]</b> Binary reduction operator type having member <tt>T operator()(const T &a, const T &b)</tt> */ template < int LENGTH, typename T, typename ReductionOp> __device__ __forceinline__ T ThreadReduce( T (&input)[LENGTH], ///< [in] Input array ReductionOp reduction_op) ///< [in] Binary reduction operator { return ThreadReduce<LENGTH>((T*) input, reduction_op); } } // internal namespace } // CUB namespace CUB_NS_POSTFIX // Optional outer namespace(s)
0
rapidsai_public_repos/nvgraph/external/cub_semiring
rapidsai_public_repos/nvgraph/external/cub_semiring/warp/warp_reduce.cuh
/****************************************************************************** * Copyright (c) 2011, Duane Merrill. All rights reserved. * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ /** * \file * The cub::WarpReduce class provides [<em>collective</em>](index.html#sec0) methods for computing a parallel reduction of items partitioned across a CUDA thread warp. */ #pragma once #include "specializations/warp_reduce_shfl.cuh" #include "specializations/warp_reduce_smem.cuh" #include "../thread/thread_operators.cuh" #include "../util_arch.cuh" #include "../util_type.cuh" #include "../util_namespace.cuh" /// Optional outer namespace(s) CUB_NS_PREFIX /// CUB namespace namespace cub { /** * \addtogroup WarpModule * @{ */ /** * \brief The WarpReduce class provides [<em>collective</em>](index.html#sec0) methods for computing a parallel reduction of items partitioned across a CUDA thread warp. ![](warp_reduce_logo.png) * * \tparam T The reduction input/output element type * \tparam LOGICAL_WARP_THREADS <b>[optional]</b> The number of threads per "logical" warp (may be less than the number of hardware warp threads). Default is the warp size of the targeted CUDA compute-capability (e.g., 32 threads for SM20). * \tparam PTX_ARCH <b>[optional]</b> \ptxversion * * \par Overview * - A <a href="http://en.wikipedia.org/wiki/Reduce_(higher-order_function)"><em>reduction</em></a> (or <em>fold</em>) * uses a binary combining operator to compute a single aggregate from a list of input elements. * - Supports "logical" warps smaller than the physical warp size (e.g., logical warps of 8 threads) * - The number of entrant threads must be an multiple of \p LOGICAL_WARP_THREADS * * \par Performance Considerations * - Uses special instructions when applicable (e.g., warp \p SHFL instructions) * - Uses synchronization-free communication between warp lanes when applicable * - Incurs zero bank conflicts for most types * - Computation is slightly more efficient (i.e., having lower instruction overhead) for: * - Summation (<b><em>vs.</em></b> generic reduction) * - The architecture's warp size is a whole multiple of \p LOGICAL_WARP_THREADS * * \par Simple Examples * \warpcollective{WarpReduce} * \par * The code snippet below illustrates four concurrent warp sum reductions within a block of * 128 threads (one per each of the 32-thread warps). * \par * \code * #include <cub/cub.cuh> * * __global__ void ExampleKernel(...) * { * // Specialize WarpReduce for type int * typedef cub::WarpReduce<int> WarpReduce; * * // Allocate WarpReduce shared memory for 4 warps * __shared__ typename WarpReduce::TempStorage temp_storage[4]; * * // Obtain one input item per thread * int thread_data = ... * * // Return the warp-wide sums to each lane0 (threads 0, 32, 64, and 96) * int warp_id = threadIdx.x / 32; * int aggregate = WarpReduce(temp_storage[warp_id]).Sum(thread_data); * * \endcode * \par * Suppose the set of input \p thread_data across the block of threads is <tt>{0, 1, 2, 3, ..., 127}</tt>. * The corresponding output \p aggregate in threads 0, 32, 64, and 96 will \p 496, \p 1520, * \p 2544, and \p 3568, respectively (and is undefined in other threads). * * \par * The code snippet below illustrates a single warp sum reduction within a block of * 128 threads. * \par * \code * #include <cub/cub.cuh> * * __global__ void ExampleKernel(...) * { * // Specialize WarpReduce for type int * typedef cub::WarpReduce<int> WarpReduce; * * // Allocate WarpReduce shared memory for one warp * __shared__ typename WarpReduce::TempStorage temp_storage; * ... * * // Only the first warp performs a reduction * if (threadIdx.x < 32) * { * // Obtain one input item per thread * int thread_data = ... * * // Return the warp-wide sum to lane0 * int aggregate = WarpReduce(temp_storage).Sum(thread_data); * * \endcode * \par * Suppose the set of input \p thread_data across the warp of threads is <tt>{0, 1, 2, 3, ..., 31}</tt>. * The corresponding output \p aggregate in thread0 will be \p 496 (and is undefined in other threads). * */ template < typename T, int LOGICAL_WARP_THREADS = CUB_PTX_WARP_THREADS, int PTX_ARCH = CUB_PTX_ARCH> class WarpReduce { private: /****************************************************************************** * Constants and type definitions ******************************************************************************/ enum { /// Whether the logical warp size and the PTX warp size coincide IS_ARCH_WARP = (LOGICAL_WARP_THREADS == CUB_WARP_THREADS(PTX_ARCH)), /// Whether the logical warp size is a power-of-two IS_POW_OF_TWO = PowerOfTwo<LOGICAL_WARP_THREADS>::VALUE, }; public: #ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document /// Internal specialization. Use SHFL-based reduction if (architecture is >= SM30) and (LOGICAL_WARP_THREADS is a power-of-two) typedef typename If<(PTX_ARCH >= 300) && (IS_POW_OF_TWO), WarpReduceShfl<T, LOGICAL_WARP_THREADS, PTX_ARCH>, WarpReduceSmem<T, LOGICAL_WARP_THREADS, PTX_ARCH> >::Type InternalWarpReduce; #endif // DOXYGEN_SHOULD_SKIP_THIS private: /// Shared memory storage layout type for WarpReduce typedef typename InternalWarpReduce::TempStorage _TempStorage; /****************************************************************************** * Thread fields ******************************************************************************/ /// Shared storage reference _TempStorage &temp_storage; /****************************************************************************** * Utility methods ******************************************************************************/ public: /// \smemstorage{WarpReduce} struct TempStorage : Uninitialized<_TempStorage> {}; /******************************************************************//** * \name Collective constructors *********************************************************************/ //@{ /** * \brief Collective constructor using the specified memory allocation as temporary storage. Logical warp and lane identifiers are constructed from <tt>threadIdx.x</tt>. */ __device__ __forceinline__ WarpReduce( TempStorage &temp_storage) ///< [in] Reference to memory allocation having layout type TempStorage : temp_storage(temp_storage.Alias()) {} //@} end member group /******************************************************************//** * \name Summation reductions *********************************************************************/ //@{ /** * \brief Computes a warp-wide sum in the calling warp. The output is valid in warp <em>lane</em><sub>0</sub>. * * \smemreuse * * \par Snippet * The code snippet below illustrates four concurrent warp sum reductions within a block of * 128 threads (one per each of the 32-thread warps). * \par * \code * #include <cub/cub.cuh> * * __global__ void ExampleKernel(...) * { * // Specialize WarpReduce for type int * typedef cub::WarpReduce<int> WarpReduce; * * // Allocate WarpReduce shared memory for 4 warps * __shared__ typename WarpReduce::TempStorage temp_storage[4]; * * // Obtain one input item per thread * int thread_data = ... * * // Return the warp-wide sums to each lane0 * int warp_id = threadIdx.x / 32; * int aggregate = WarpReduce(temp_storage[warp_id]).Sum(thread_data); * * \endcode * \par * Suppose the set of input \p thread_data across the block of threads is <tt>{0, 1, 2, 3, ..., 127}</tt>. * The corresponding output \p aggregate in threads 0, 32, 64, and 96 will \p 496, \p 1520, * \p 2544, and \p 3568, respectively (and is undefined in other threads). * */ __device__ __forceinline__ T Sum( T input) ///< [in] Calling thread's input { return InternalWarpReduce(temp_storage).template Reduce<true, 1>(input, LOGICAL_WARP_THREADS, cub::Sum()); } /** * \brief Computes a partially-full warp-wide sum in the calling warp. The output is valid in warp <em>lane</em><sub>0</sub>. * * All threads across the calling warp must agree on the same value for \p valid_items. Otherwise the result is undefined. * * \smemreuse * * \par Snippet * The code snippet below illustrates a sum reduction within a single, partially-full * block of 32 threads (one warp). * \par * \code * #include <cub/cub.cuh> * * __global__ void ExampleKernel(int *d_data, int valid_items) * { * // Specialize WarpReduce for type int * typedef cub::WarpReduce<int> WarpReduce; * * // Allocate WarpReduce shared memory for one warp * __shared__ typename WarpReduce::TempStorage temp_storage; * * // Obtain one input item per thread if in range * int thread_data; * if (threadIdx.x < valid_items) * thread_data = d_data[threadIdx.x]; * * // Return the warp-wide sums to each lane0 * int aggregate = WarpReduce(temp_storage).Sum( * thread_data, valid_items); * * \endcode * \par * Suppose the input \p d_data is <tt>{0, 1, 2, 3, 4, ...</tt> and \p valid_items * is \p 4. The corresponding output \p aggregate in thread0 is \p 6 (and is * undefined in other threads). * */ __device__ __forceinline__ T Sum( T input, ///< [in] Calling thread's input int valid_items) ///< [in] Total number of valid items in the calling thread's logical warp (may be less than \p LOGICAL_WARP_THREADS) { // Determine if we don't need bounds checking return InternalWarpReduce(temp_storage).template Reduce<false, 1>(input, valid_items, cub::Sum()); } /** * \brief Computes a segmented sum in the calling warp where segments are defined by head-flags. The sum of each segment is returned to the first lane in that segment (which always includes <em>lane</em><sub>0</sub>). * * \smemreuse * * \par Snippet * The code snippet below illustrates a head-segmented warp sum * reduction within a block of 32 threads (one warp). * \par * \code * #include <cub/cub.cuh> * * __global__ void ExampleKernel(...) * { * // Specialize WarpReduce for type int * typedef cub::WarpReduce<int> WarpReduce; * * // Allocate WarpReduce shared memory for one warp * __shared__ typename WarpReduce::TempStorage temp_storage; * * // Obtain one input item and flag per thread * int thread_data = ... * int head_flag = ... * * // Return the warp-wide sums to each lane0 * int aggregate = WarpReduce(temp_storage).HeadSegmentedSum( * thread_data, head_flag); * * \endcode * \par * Suppose the set of input \p thread_data and \p head_flag across the block of threads * is <tt>{0, 1, 2, 3, ..., 31</tt> and is <tt>{1, 0, 0, 0, 1, 0, 0, 0, ..., 1, 0, 0, 0</tt>, * respectively. The corresponding output \p aggregate in threads 0, 4, 8, etc. will be * \p 6, \p 22, \p 38, etc. (and is undefined in other threads). * * \tparam ReductionOp <b>[inferred]</b> Binary reduction operator type having member <tt>T operator()(const T &a, const T &b)</tt> * */ template < typename FlagT> __device__ __forceinline__ T HeadSegmentedSum( T input, ///< [in] Calling thread's input FlagT head_flag) ///< [in] Head flag denoting whether or not \p input is the start of a new segment { return HeadSegmentedReduce(input, head_flag, cub::Sum()); } /** * \brief Computes a segmented sum in the calling warp where segments are defined by tail-flags. The sum of each segment is returned to the first lane in that segment (which always includes <em>lane</em><sub>0</sub>). * * \smemreuse * * \par Snippet * The code snippet below illustrates a tail-segmented warp sum * reduction within a block of 32 threads (one warp). * \par * \code * #include <cub/cub.cuh> * * __global__ void ExampleKernel(...) * { * // Specialize WarpReduce for type int * typedef cub::WarpReduce<int> WarpReduce; * * // Allocate WarpReduce shared memory for one warp * __shared__ typename WarpReduce::TempStorage temp_storage; * * // Obtain one input item and flag per thread * int thread_data = ... * int tail_flag = ... * * // Return the warp-wide sums to each lane0 * int aggregate = WarpReduce(temp_storage).TailSegmentedSum( * thread_data, tail_flag); * * \endcode * \par * Suppose the set of input \p thread_data and \p tail_flag across the block of threads * is <tt>{0, 1, 2, 3, ..., 31</tt> and is <tt>{0, 0, 0, 1, 0, 0, 0, 1, ..., 0, 0, 0, 1</tt>, * respectively. The corresponding output \p aggregate in threads 0, 4, 8, etc. will be * \p 6, \p 22, \p 38, etc. (and is undefined in other threads). * * \tparam ReductionOp <b>[inferred]</b> Binary reduction operator type having member <tt>T operator()(const T &a, const T &b)</tt> */ template < typename FlagT> __device__ __forceinline__ T TailSegmentedSum( T input, ///< [in] Calling thread's input FlagT tail_flag) ///< [in] Head flag denoting whether or not \p input is the start of a new segment { return TailSegmentedReduce(input, tail_flag, cub::Sum()); } //@} end member group /******************************************************************//** * \name Generic reductions *********************************************************************/ //@{ /** * \brief Computes a warp-wide reduction in the calling warp using the specified binary reduction functor. The output is valid in warp <em>lane</em><sub>0</sub>. * * Supports non-commutative reduction operators * * \smemreuse * * \par Snippet * The code snippet below illustrates four concurrent warp max reductions within a block of * 128 threads (one per each of the 32-thread warps). * \par * \code * #include <cub/cub.cuh> * * __global__ void ExampleKernel(...) * { * // Specialize WarpReduce for type int * typedef cub::WarpReduce<int> WarpReduce; * * // Allocate WarpReduce shared memory for 4 warps * __shared__ typename WarpReduce::TempStorage temp_storage[4]; * * // Obtain one input item per thread * int thread_data = ... * * // Return the warp-wide reductions to each lane0 * int warp_id = threadIdx.x / 32; * int aggregate = WarpReduce(temp_storage[warp_id]).Reduce( * thread_data, cub::Max()); * * \endcode * \par * Suppose the set of input \p thread_data across the block of threads is <tt>{0, 1, 2, 3, ..., 127}</tt>. * The corresponding output \p aggregate in threads 0, 32, 64, and 96 will \p 31, \p 63, * \p 95, and \p 127, respectively (and is undefined in other threads). * * \tparam ReductionOp <b>[inferred]</b> Binary reduction operator type having member <tt>T operator()(const T &a, const T &b)</tt> */ template <typename ReductionOp> __device__ __forceinline__ T Reduce( T input, ///< [in] Calling thread's input ReductionOp reduction_op) ///< [in] Binary reduction operator { return InternalWarpReduce(temp_storage).template Reduce<true, 1>(input, LOGICAL_WARP_THREADS, reduction_op); } /** * \brief Computes a partially-full warp-wide reduction in the calling warp using the specified binary reduction functor. The output is valid in warp <em>lane</em><sub>0</sub>. * * All threads across the calling warp must agree on the same value for \p valid_items. Otherwise the result is undefined. * * Supports non-commutative reduction operators * * \smemreuse * * \par Snippet * The code snippet below illustrates a max reduction within a single, partially-full * block of 32 threads (one warp). * \par * \code * #include <cub/cub.cuh> * * __global__ void ExampleKernel(int *d_data, int valid_items) * { * // Specialize WarpReduce for type int * typedef cub::WarpReduce<int> WarpReduce; * * // Allocate WarpReduce shared memory for one warp * __shared__ typename WarpReduce::TempStorage temp_storage; * * // Obtain one input item per thread if in range * int thread_data; * if (threadIdx.x < valid_items) * thread_data = d_data[threadIdx.x]; * * // Return the warp-wide reductions to each lane0 * int aggregate = WarpReduce(temp_storage).Reduce( * thread_data, cub::Max(), valid_items); * * \endcode * \par * Suppose the input \p d_data is <tt>{0, 1, 2, 3, 4, ...</tt> and \p valid_items * is \p 4. The corresponding output \p aggregate in thread0 is \p 3 (and is * undefined in other threads). * * \tparam ReductionOp <b>[inferred]</b> Binary reduction operator type having member <tt>T operator()(const T &a, const T &b)</tt> */ template <typename ReductionOp> __device__ __forceinline__ T Reduce( T input, ///< [in] Calling thread's input ReductionOp reduction_op, ///< [in] Binary reduction operator int valid_items) ///< [in] Total number of valid items in the calling thread's logical warp (may be less than \p LOGICAL_WARP_THREADS) { return InternalWarpReduce(temp_storage).template Reduce<false, 1>(input, valid_items, reduction_op); } /** * \brief Computes a segmented reduction in the calling warp where segments are defined by head-flags. The reduction of each segment is returned to the first lane in that segment (which always includes <em>lane</em><sub>0</sub>). * * Supports non-commutative reduction operators * * \smemreuse * * \par Snippet * The code snippet below illustrates a head-segmented warp max * reduction within a block of 32 threads (one warp). * \par * \code * #include <cub/cub.cuh> * * __global__ void ExampleKernel(...) * { * // Specialize WarpReduce for type int * typedef cub::WarpReduce<int> WarpReduce; * * // Allocate WarpReduce shared memory for one warp * __shared__ typename WarpReduce::TempStorage temp_storage; * * // Obtain one input item and flag per thread * int thread_data = ... * int head_flag = ... * * // Return the warp-wide reductions to each lane0 * int aggregate = WarpReduce(temp_storage).HeadSegmentedReduce( * thread_data, head_flag, cub::Max()); * * \endcode * \par * Suppose the set of input \p thread_data and \p head_flag across the block of threads * is <tt>{0, 1, 2, 3, ..., 31</tt> and is <tt>{1, 0, 0, 0, 1, 0, 0, 0, ..., 1, 0, 0, 0</tt>, * respectively. The corresponding output \p aggregate in threads 0, 4, 8, etc. will be * \p 3, \p 7, \p 11, etc. (and is undefined in other threads). * * \tparam ReductionOp <b>[inferred]</b> Binary reduction operator type having member <tt>T operator()(const T &a, const T &b)</tt> */ template < typename ReductionOp, typename FlagT> __device__ __forceinline__ T HeadSegmentedReduce( T input, ///< [in] Calling thread's input FlagT head_flag, ///< [in] Head flag denoting whether or not \p input is the start of a new segment ReductionOp reduction_op) ///< [in] Reduction operator { return InternalWarpReduce(temp_storage).template SegmentedReduce<true>(input, head_flag, reduction_op); } /** * \brief Computes a segmented reduction in the calling warp where segments are defined by tail-flags. The reduction of each segment is returned to the first lane in that segment (which always includes <em>lane</em><sub>0</sub>). * * Supports non-commutative reduction operators * * \smemreuse * * \par Snippet * The code snippet below illustrates a tail-segmented warp max * reduction within a block of 32 threads (one warp). * \par * \code * #include <cub/cub.cuh> * * __global__ void ExampleKernel(...) * { * // Specialize WarpReduce for type int * typedef cub::WarpReduce<int> WarpReduce; * * // Allocate WarpReduce shared memory for one warp * __shared__ typename WarpReduce::TempStorage temp_storage; * * // Obtain one input item and flag per thread * int thread_data = ... * int tail_flag = ... * * // Return the warp-wide reductions to each lane0 * int aggregate = WarpReduce(temp_storage).TailSegmentedReduce( * thread_data, tail_flag, cub::Max()); * * \endcode * \par * Suppose the set of input \p thread_data and \p tail_flag across the block of threads * is <tt>{0, 1, 2, 3, ..., 31</tt> and is <tt>{0, 0, 0, 1, 0, 0, 0, 1, ..., 0, 0, 0, 1</tt>, * respectively. The corresponding output \p aggregate in threads 0, 4, 8, etc. will be * \p 3, \p 7, \p 11, etc. (and is undefined in other threads). * * \tparam ReductionOp <b>[inferred]</b> Binary reduction operator type having member <tt>T operator()(const T &a, const T &b)</tt> */ template < typename ReductionOp, typename FlagT> __device__ __forceinline__ T TailSegmentedReduce( T input, ///< [in] Calling thread's input FlagT tail_flag, ///< [in] Tail flag denoting whether or not \p input is the end of the current segment ReductionOp reduction_op) ///< [in] Reduction operator { return InternalWarpReduce(temp_storage).template SegmentedReduce<false>(input, tail_flag, reduction_op); } //@} end member group }; /** @} */ // end group WarpModule } // CUB namespace CUB_NS_POSTFIX // Optional outer namespace(s)
0
rapidsai_public_repos/nvgraph/external/cub_semiring
rapidsai_public_repos/nvgraph/external/cub_semiring/warp/warp_scan.cuh
/****************************************************************************** * Copyright (c) 2011, Duane Merrill. All rights reserved. * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ /** * \file * The cub::WarpScan class provides [<em>collective</em>](index.html#sec0) methods for computing a parallel prefix scan of items partitioned across a CUDA thread warp. */ #pragma once #include "specializations/warp_scan_shfl.cuh" #include "specializations/warp_scan_smem.cuh" #include "../thread/thread_operators.cuh" #include "../util_arch.cuh" #include "../util_type.cuh" #include "../util_namespace.cuh" /// Optional outer namespace(s) CUB_NS_PREFIX /// CUB namespace namespace cub { /** * \addtogroup WarpModule * @{ */ /** * \brief The WarpScan class provides [<em>collective</em>](index.html#sec0) methods for computing a parallel prefix scan of items partitioned across a CUDA thread warp. ![](warp_scan_logo.png) * * \tparam T The scan input/output element type * \tparam LOGICAL_WARP_THREADS <b>[optional]</b> The number of threads per "logical" warp (may be less than the number of hardware warp threads). Default is the warp size associated with the CUDA Compute Capability targeted by the compiler (e.g., 32 threads for SM20). * \tparam PTX_ARCH <b>[optional]</b> \ptxversion * * \par Overview * - Given a list of input elements and a binary reduction operator, a [<em>prefix scan</em>](http://en.wikipedia.org/wiki/Prefix_sum) * produces an output list where each element is computed to be the reduction * of the elements occurring earlier in the input list. <em>Prefix sum</em> * connotes a prefix scan with the addition operator. The term \em inclusive indicates * that the <em>i</em><sup>th</sup> output reduction incorporates the <em>i</em><sup>th</sup> input. * The term \em exclusive indicates the <em>i</em><sup>th</sup> input is not incorporated into * the <em>i</em><sup>th</sup> output reduction. * - Supports non-commutative scan operators * - Supports "logical" warps smaller than the physical warp size (e.g., a logical warp of 8 threads) * - The number of entrant threads must be an multiple of \p LOGICAL_WARP_THREADS * * \par Performance Considerations * - Uses special instructions when applicable (e.g., warp \p SHFL) * - Uses synchronization-free communication between warp lanes when applicable * - Incurs zero bank conflicts for most types * - Computation is slightly more efficient (i.e., having lower instruction overhead) for: * - Summation (<b><em>vs.</em></b> generic scan) * - The architecture's warp size is a whole multiple of \p LOGICAL_WARP_THREADS * * \par Simple Examples * \warpcollective{WarpScan} * \par * The code snippet below illustrates four concurrent warp prefix sums within a block of * 128 threads (one per each of the 32-thread warps). * \par * \code * #include <cub/cub.cuh> * * __global__ void ExampleKernel(...) * { * // Specialize WarpScan for type int * typedef cub::WarpScan<int> WarpScan; * * // Allocate WarpScan shared memory for 4 warps * __shared__ typename WarpScan::TempStorage temp_storage[4]; * * // Obtain one input item per thread * int thread_data = ... * * // Compute warp-wide prefix sums * int warp_id = threadIdx.x / 32; * WarpScan(temp_storage[warp_id]).ExclusiveSum(thread_data, thread_data); * * \endcode * \par * Suppose the set of input \p thread_data across the block of threads is <tt>{1, 1, 1, 1, ...}</tt>. * The corresponding output \p thread_data in each of the four warps of threads will be * <tt>0, 1, 2, 3, ..., 31}</tt>. * * \par * The code snippet below illustrates a single warp prefix sum within a block of * 128 threads. * \par * \code * #include <cub/cub.cuh> * * __global__ void ExampleKernel(...) * { * // Specialize WarpScan for type int * typedef cub::WarpScan<int> WarpScan; * * // Allocate WarpScan shared memory for one warp * __shared__ typename WarpScan::TempStorage temp_storage; * ... * * // Only the first warp performs a prefix sum * if (threadIdx.x < 32) * { * // Obtain one input item per thread * int thread_data = ... * * // Compute warp-wide prefix sums * WarpScan(temp_storage).ExclusiveSum(thread_data, thread_data); * * \endcode * \par * Suppose the set of input \p thread_data across the warp of threads is <tt>{1, 1, 1, 1, ...}</tt>. * The corresponding output \p thread_data will be <tt>{0, 1, 2, 3, ..., 31}</tt>. * */ template < typename T, int LOGICAL_WARP_THREADS = CUB_PTX_WARP_THREADS, int PTX_ARCH = CUB_PTX_ARCH> class WarpScan { private: /****************************************************************************** * Constants and type definitions ******************************************************************************/ enum { /// Whether the logical warp size and the PTX warp size coincide IS_ARCH_WARP = (LOGICAL_WARP_THREADS == CUB_WARP_THREADS(PTX_ARCH)), /// Whether the logical warp size is a power-of-two IS_POW_OF_TWO = ((LOGICAL_WARP_THREADS & (LOGICAL_WARP_THREADS - 1)) == 0), /// Whether the data type is an integer (which has fully-associative addition) IS_INTEGER = ((Traits<T>::CATEGORY == SIGNED_INTEGER) || (Traits<T>::CATEGORY == UNSIGNED_INTEGER)) }; /// Internal specialization. Use SHFL-based scan if (architecture is >= SM30) and (LOGICAL_WARP_THREADS is a power-of-two) typedef typename If<(PTX_ARCH >= 300) && (IS_POW_OF_TWO), WarpScanShfl<T, LOGICAL_WARP_THREADS, PTX_ARCH>, WarpScanSmem<T, LOGICAL_WARP_THREADS, PTX_ARCH> >::Type InternalWarpScan; /// Shared memory storage layout type for WarpScan typedef typename InternalWarpScan::TempStorage _TempStorage; /****************************************************************************** * Thread fields ******************************************************************************/ /// Shared storage reference _TempStorage &temp_storage; unsigned int lane_id; /****************************************************************************** * Public types ******************************************************************************/ public: /// \smemstorage{WarpScan} struct TempStorage : Uninitialized<_TempStorage> {}; /******************************************************************//** * \name Collective constructors *********************************************************************/ //@{ /** * \brief Collective constructor using the specified memory allocation as temporary storage. Logical warp and lane identifiers are constructed from <tt>threadIdx.x</tt>. */ __device__ __forceinline__ WarpScan( TempStorage &temp_storage) ///< [in] Reference to memory allocation having layout type TempStorage : temp_storage(temp_storage.Alias()), lane_id(IS_ARCH_WARP ? LaneId() : LaneId() % LOGICAL_WARP_THREADS) {} //@} end member group /******************************************************************//** * \name Inclusive prefix sums *********************************************************************/ //@{ /** * \brief Computes an inclusive prefix sum across the calling warp. * * \par * - \smemreuse * * \par Snippet * The code snippet below illustrates four concurrent warp-wide inclusive prefix sums within a block of * 128 threads (one per each of the 32-thread warps). * \par * \code * #include <cub/cub.cuh> * * __global__ void ExampleKernel(...) * { * // Specialize WarpScan for type int * typedef cub::WarpScan<int> WarpScan; * * // Allocate WarpScan shared memory for 4 warps * __shared__ typename WarpScan::TempStorage temp_storage[4]; * * // Obtain one input item per thread * int thread_data = ... * * // Compute inclusive warp-wide prefix sums * int warp_id = threadIdx.x / 32; * WarpScan(temp_storage[warp_id]).InclusiveSum(thread_data, thread_data); * * \endcode * \par * Suppose the set of input \p thread_data across the block of threads is <tt>{1, 1, 1, 1, ...}</tt>. * The corresponding output \p thread_data in each of the four warps of threads will be * <tt>1, 2, 3, ..., 32}</tt>. */ __device__ __forceinline__ void InclusiveSum( T input, ///< [in] Calling thread's input item. T &inclusive_output) ///< [out] Calling thread's output item. May be aliased with \p input. { InclusiveScan(input, inclusive_output, cub::Sum()); } /** * \brief Computes an inclusive prefix sum across the calling warp. Also provides every thread with the warp-wide \p warp_aggregate of all inputs. * * \par * - \smemreuse * * \par Snippet * The code snippet below illustrates four concurrent warp-wide inclusive prefix sums within a block of * 128 threads (one per each of the 32-thread warps). * \par * \code * #include <cub/cub.cuh> * * __global__ void ExampleKernel(...) * { * // Specialize WarpScan for type int * typedef cub::WarpScan<int> WarpScan; * * // Allocate WarpScan shared memory for 4 warps * __shared__ typename WarpScan::TempStorage temp_storage[4]; * * // Obtain one input item per thread * int thread_data = ... * * // Compute inclusive warp-wide prefix sums * int warp_aggregate; * int warp_id = threadIdx.x / 32; * WarpScan(temp_storage[warp_id]).InclusiveSum(thread_data, thread_data, warp_aggregate); * * \endcode * \par * Suppose the set of input \p thread_data across the block of threads is <tt>{1, 1, 1, 1, ...}</tt>. * The corresponding output \p thread_data in each of the four warps of threads will be * <tt>1, 2, 3, ..., 32}</tt>. Furthermore, \p warp_aggregate for all threads in all warps will be \p 32. */ __device__ __forceinline__ void InclusiveSum( T input, ///< [in] Calling thread's input item. T &inclusive_output, ///< [out] Calling thread's output item. May be aliased with \p input. T &warp_aggregate) ///< [out] Warp-wide aggregate reduction of input items. { InclusiveScan(input, inclusive_output, cub::Sum(), warp_aggregate); } //@} end member group /******************************************************************//** * \name Exclusive prefix sums *********************************************************************/ //@{ /** * \brief Computes an exclusive prefix sum across the calling warp. The value of 0 is applied as the initial value, and is assigned to \p exclusive_output in <em>thread</em><sub>0</sub>. * * \par * - \identityzero * - \smemreuse * * \par Snippet * The code snippet below illustrates four concurrent warp-wide exclusive prefix sums within a block of * 128 threads (one per each of the 32-thread warps). * \par * \code * #include <cub/cub.cuh> * * __global__ void ExampleKernel(...) * { * // Specialize WarpScan for type int * typedef cub::WarpScan<int> WarpScan; * * // Allocate WarpScan shared memory for 4 warps * __shared__ typename WarpScan::TempStorage temp_storage[4]; * * // Obtain one input item per thread * int thread_data = ... * * // Compute exclusive warp-wide prefix sums * int warp_id = threadIdx.x / 32; * WarpScan(temp_storage[warp_id]).ExclusiveSum(thread_data, thread_data); * * \endcode * \par * Suppose the set of input \p thread_data across the block of threads is <tt>{1, 1, 1, 1, ...}</tt>. * The corresponding output \p thread_data in each of the four warps of threads will be * <tt>0, 1, 2, ..., 31}</tt>. * */ __device__ __forceinline__ void ExclusiveSum( T input, ///< [in] Calling thread's input item. T &exclusive_output) ///< [out] Calling thread's output item. May be aliased with \p input. { T initial_value = 0; ExclusiveScan(input, exclusive_output, initial_value, cub::Sum()); } /** * \brief Computes an exclusive prefix sum across the calling warp. The value of 0 is applied as the initial value, and is assigned to \p exclusive_output in <em>thread</em><sub>0</sub>. Also provides every thread with the warp-wide \p warp_aggregate of all inputs. * * \par * - \identityzero * - \smemreuse * * \par Snippet * The code snippet below illustrates four concurrent warp-wide exclusive prefix sums within a block of * 128 threads (one per each of the 32-thread warps). * \par * \code * #include <cub/cub.cuh> * * __global__ void ExampleKernel(...) * { * // Specialize WarpScan for type int * typedef cub::WarpScan<int> WarpScan; * * // Allocate WarpScan shared memory for 4 warps * __shared__ typename WarpScan::TempStorage temp_storage[4]; * * // Obtain one input item per thread * int thread_data = ... * * // Compute exclusive warp-wide prefix sums * int warp_aggregate; * int warp_id = threadIdx.x / 32; * WarpScan(temp_storage[warp_id]).ExclusiveSum(thread_data, thread_data, warp_aggregate); * * \endcode * \par * Suppose the set of input \p thread_data across the block of threads is <tt>{1, 1, 1, 1, ...}</tt>. * The corresponding output \p thread_data in each of the four warps of threads will be * <tt>0, 1, 2, ..., 31}</tt>. Furthermore, \p warp_aggregate for all threads in all warps will be \p 32. */ __device__ __forceinline__ void ExclusiveSum( T input, ///< [in] Calling thread's input item. T &exclusive_output, ///< [out] Calling thread's output item. May be aliased with \p input. T &warp_aggregate) ///< [out] Warp-wide aggregate reduction of input items. { T initial_value = 0; ExclusiveScan(input, exclusive_output, initial_value, cub::Sum(), warp_aggregate); } //@} end member group /******************************************************************//** * \name Inclusive prefix scans *********************************************************************/ //@{ /** * \brief Computes an inclusive prefix scan using the specified binary scan functor across the calling warp. * * \par * - \smemreuse * * \par Snippet * The code snippet below illustrates four concurrent warp-wide inclusive prefix max scans within a block of * 128 threads (one per each of the 32-thread warps). * \par * \code * #include <cub/cub.cuh> * * __global__ void ExampleKernel(...) * { * // Specialize WarpScan for type int * typedef cub::WarpScan<int> WarpScan; * * // Allocate WarpScan shared memory for 4 warps * __shared__ typename WarpScan::TempStorage temp_storage[4]; * * // Obtain one input item per thread * int thread_data = ... * * // Compute inclusive warp-wide prefix max scans * int warp_id = threadIdx.x / 32; * WarpScan(temp_storage[warp_id]).InclusiveScan(thread_data, thread_data, cub::Max()); * * \endcode * \par * Suppose the set of input \p thread_data across the block of threads is <tt>{0, -1, 2, -3, ..., 126, -127}</tt>. * The corresponding output \p thread_data in the first warp would be * <tt>0, 0, 2, 2, ..., 30, 30</tt>, the output for the second warp would be <tt>32, 32, 34, 34, ..., 62, 62</tt>, etc. * * \tparam ScanOp <b>[inferred]</b> Binary scan operator type having member <tt>T operator()(const T &a, const T &b)</tt> */ template <typename ScanOp> __device__ __forceinline__ void InclusiveScan( T input, ///< [in] Calling thread's input item. T &inclusive_output, ///< [out] Calling thread's output item. May be aliased with \p input. ScanOp scan_op) ///< [in] Binary scan operator { InternalWarpScan(temp_storage).InclusiveScan(input, inclusive_output, scan_op); } /** * \brief Computes an inclusive prefix scan using the specified binary scan functor across the calling warp. Also provides every thread with the warp-wide \p warp_aggregate of all inputs. * * \par * - \smemreuse * * \par Snippet * The code snippet below illustrates four concurrent warp-wide inclusive prefix max scans within a block of * 128 threads (one per each of the 32-thread warps). * \par * \code * #include <cub/cub.cuh> * * __global__ void ExampleKernel(...) * { * // Specialize WarpScan for type int * typedef cub::WarpScan<int> WarpScan; * * // Allocate WarpScan shared memory for 4 warps * __shared__ typename WarpScan::TempStorage temp_storage[4]; * * // Obtain one input item per thread * int thread_data = ... * * // Compute inclusive warp-wide prefix max scans * int warp_aggregate; * int warp_id = threadIdx.x / 32; * WarpScan(temp_storage[warp_id]).InclusiveScan( * thread_data, thread_data, cub::Max(), warp_aggregate); * * \endcode * \par * Suppose the set of input \p thread_data across the block of threads is <tt>{0, -1, 2, -3, ..., 126, -127}</tt>. * The corresponding output \p thread_data in the first warp would be * <tt>0, 0, 2, 2, ..., 30, 30</tt>, the output for the second warp would be <tt>32, 32, 34, 34, ..., 62, 62</tt>, etc. * Furthermore, \p warp_aggregate would be assigned \p 30 for threads in the first warp, \p 62 for threads * in the second warp, etc. * * \tparam ScanOp <b>[inferred]</b> Binary scan operator type having member <tt>T operator()(const T &a, const T &b)</tt> */ template <typename ScanOp> __device__ __forceinline__ void InclusiveScan( T input, ///< [in] Calling thread's input item. T &inclusive_output, ///< [out] Calling thread's output item. May be aliased with \p input. ScanOp scan_op, ///< [in] Binary scan operator T &warp_aggregate) ///< [out] Warp-wide aggregate reduction of input items. { InternalWarpScan(temp_storage).InclusiveScan(input, inclusive_output, scan_op, warp_aggregate); } //@} end member group /******************************************************************//** * \name Exclusive prefix scans *********************************************************************/ //@{ /** * \brief Computes an exclusive prefix scan using the specified binary scan functor across the calling warp. Because no initial value is supplied, the \p output computed for <em>warp-lane</em><sub>0</sub> is undefined. * * \par * - \smemreuse * * \par Snippet * The code snippet below illustrates four concurrent warp-wide exclusive prefix max scans within a block of * 128 threads (one per each of the 32-thread warps). * \par * \code * #include <cub/cub.cuh> * * __global__ void ExampleKernel(...) * { * // Specialize WarpScan for type int * typedef cub::WarpScan<int> WarpScan; * * // Allocate WarpScan shared memory for 4 warps * __shared__ typename WarpScan::TempStorage temp_storage[4]; * * // Obtain one input item per thread * int thread_data = ... * * // Compute exclusive warp-wide prefix max scans * int warp_id = threadIdx.x / 32; * WarpScan(temp_storage[warp_id]).ExclusiveScan(thread_data, thread_data, cub::Max()); * * \endcode * \par * Suppose the set of input \p thread_data across the block of threads is <tt>{0, -1, 2, -3, ..., 126, -127}</tt>. * The corresponding output \p thread_data in the first warp would be * <tt>?, 0, 0, 2, ..., 28, 30</tt>, the output for the second warp would be <tt>?, 32, 32, 34, ..., 60, 62</tt>, etc. * (The output \p thread_data in warp lane<sub>0</sub> is undefined.) * * \tparam ScanOp <b>[inferred]</b> Binary scan operator type having member <tt>T operator()(const T &a, const T &b)</tt> */ template <typename ScanOp> __device__ __forceinline__ void ExclusiveScan( T input, ///< [in] Calling thread's input item. T &exclusive_output, ///< [out] Calling thread's output item. May be aliased with \p input. ScanOp scan_op) ///< [in] Binary scan operator { InternalWarpScan internal(temp_storage); T inclusive_output; internal.InclusiveScan(input, inclusive_output, scan_op); internal.Update( input, inclusive_output, exclusive_output, scan_op, Int2Type<IS_INTEGER>()); } /** * \brief Computes an exclusive prefix scan using the specified binary scan functor across the calling warp. * * \par * - \smemreuse * * \par Snippet * The code snippet below illustrates four concurrent warp-wide exclusive prefix max scans within a block of * 128 threads (one per each of the 32-thread warps). * \par * \code * #include <cub/cub.cuh> * * __global__ void ExampleKernel(...) * { * // Specialize WarpScan for type int * typedef cub::WarpScan<int> WarpScan; * * // Allocate WarpScan shared memory for 4 warps * __shared__ typename WarpScan::TempStorage temp_storage[4]; * * // Obtain one input item per thread * int thread_data = ... * * // Compute exclusive warp-wide prefix max scans * int warp_id = threadIdx.x / 32; * WarpScan(temp_storage[warp_id]).ExclusiveScan(thread_data, thread_data, INT_MIN, cub::Max()); * * \endcode * \par * Suppose the set of input \p thread_data across the block of threads is <tt>{0, -1, 2, -3, ..., 126, -127}</tt>. * The corresponding output \p thread_data in the first warp would be * <tt>INT_MIN, 0, 0, 2, ..., 28, 30</tt>, the output for the second warp would be <tt>30, 32, 32, 34, ..., 60, 62</tt>, etc. * * \tparam ScanOp <b>[inferred]</b> Binary scan operator type having member <tt>T operator()(const T &a, const T &b)</tt> */ template <typename ScanOp> __device__ __forceinline__ void ExclusiveScan( T input, ///< [in] Calling thread's input item. T &exclusive_output, ///< [out] Calling thread's output item. May be aliased with \p input. T initial_value, ///< [in] Initial value to seed the exclusive scan ScanOp scan_op) ///< [in] Binary scan operator { InternalWarpScan internal(temp_storage); T inclusive_output; internal.InclusiveScan(input, inclusive_output, scan_op); internal.Update( input, inclusive_output, exclusive_output, scan_op, initial_value, Int2Type<IS_INTEGER>()); } /** * \brief Computes an exclusive prefix scan using the specified binary scan functor across the calling warp. Because no initial value is supplied, the \p output computed for <em>warp-lane</em><sub>0</sub> is undefined. Also provides every thread with the warp-wide \p warp_aggregate of all inputs. * * \par * - \smemreuse * * \par Snippet * The code snippet below illustrates four concurrent warp-wide exclusive prefix max scans within a block of * 128 threads (one per each of the 32-thread warps). * \par * \code * #include <cub/cub.cuh> * * __global__ void ExampleKernel(...) * { * // Specialize WarpScan for type int * typedef cub::WarpScan<int> WarpScan; * * // Allocate WarpScan shared memory for 4 warps * __shared__ typename WarpScan::TempStorage temp_storage[4]; * * // Obtain one input item per thread * int thread_data = ... * * // Compute exclusive warp-wide prefix max scans * int warp_aggregate; * int warp_id = threadIdx.x / 32; * WarpScan(temp_storage[warp_id]).ExclusiveScan(thread_data, thread_data, cub::Max(), warp_aggregate); * * \endcode * \par * Suppose the set of input \p thread_data across the block of threads is <tt>{0, -1, 2, -3, ..., 126, -127}</tt>. * The corresponding output \p thread_data in the first warp would be * <tt>?, 0, 0, 2, ..., 28, 30</tt>, the output for the second warp would be <tt>?, 32, 32, 34, ..., 60, 62</tt>, etc. * (The output \p thread_data in warp lane<sub>0</sub> is undefined.) Furthermore, \p warp_aggregate would be assigned \p 30 for threads in the first warp, \p 62 for threads * in the second warp, etc. * * \tparam ScanOp <b>[inferred]</b> Binary scan operator type having member <tt>T operator()(const T &a, const T &b)</tt> */ template <typename ScanOp> __device__ __forceinline__ void ExclusiveScan( T input, ///< [in] Calling thread's input item. T &exclusive_output, ///< [out] Calling thread's output item. May be aliased with \p input. ScanOp scan_op, ///< [in] Binary scan operator T &warp_aggregate) ///< [out] Warp-wide aggregate reduction of input items. { InternalWarpScan internal(temp_storage); T inclusive_output; internal.InclusiveScan(input, inclusive_output, scan_op); internal.Update( input, inclusive_output, exclusive_output, warp_aggregate, scan_op, Int2Type<IS_INTEGER>()); } /** * \brief Computes an exclusive prefix scan using the specified binary scan functor across the calling warp. Also provides every thread with the warp-wide \p warp_aggregate of all inputs. * * \par * - \smemreuse * * \par Snippet * The code snippet below illustrates four concurrent warp-wide exclusive prefix max scans within a block of * 128 threads (one per each of the 32-thread warps). * \par * \code * #include <cub/cub.cuh> * * __global__ void ExampleKernel(...) * { * // Specialize WarpScan for type int * typedef cub::WarpScan<int> WarpScan; * * // Allocate WarpScan shared memory for 4 warps * __shared__ typename WarpScan::TempStorage temp_storage[4]; * * // Obtain one input item per thread * int thread_data = ... * * // Compute exclusive warp-wide prefix max scans * int warp_aggregate; * int warp_id = threadIdx.x / 32; * WarpScan(temp_storage[warp_id]).ExclusiveScan(thread_data, thread_data, INT_MIN, cub::Max(), warp_aggregate); * * \endcode * \par * Suppose the set of input \p thread_data across the block of threads is <tt>{0, -1, 2, -3, ..., 126, -127}</tt>. * The corresponding output \p thread_data in the first warp would be * <tt>INT_MIN, 0, 0, 2, ..., 28, 30</tt>, the output for the second warp would be <tt>30, 32, 32, 34, ..., 60, 62</tt>, etc. * Furthermore, \p warp_aggregate would be assigned \p 30 for threads in the first warp, \p 62 for threads * in the second warp, etc. * * \tparam ScanOp <b>[inferred]</b> Binary scan operator type having member <tt>T operator()(const T &a, const T &b)</tt> */ template <typename ScanOp> __device__ __forceinline__ void ExclusiveScan( T input, ///< [in] Calling thread's input item. T &exclusive_output, ///< [out] Calling thread's output item. May be aliased with \p input. T initial_value, ///< [in] Initial value to seed the exclusive scan ScanOp scan_op, ///< [in] Binary scan operator T &warp_aggregate) ///< [out] Warp-wide aggregate reduction of input items. { InternalWarpScan internal(temp_storage); T inclusive_output; internal.InclusiveScan(input, inclusive_output, scan_op); internal.Update( input, inclusive_output, exclusive_output, warp_aggregate, scan_op, initial_value, Int2Type<IS_INTEGER>()); } //@} end member group /******************************************************************//** * \name Combination (inclusive & exclusive) prefix scans *********************************************************************/ //@{ /** * \brief Computes both inclusive and exclusive prefix scans using the specified binary scan functor across the calling warp. Because no initial value is supplied, the \p exclusive_output computed for <em>warp-lane</em><sub>0</sub> is undefined. * * \par * - \smemreuse * * \par Snippet * The code snippet below illustrates four concurrent warp-wide exclusive prefix max scans within a block of * 128 threads (one per each of the 32-thread warps). * \par * \code * #include <cub/cub.cuh> * * __global__ void ExampleKernel(...) * { * // Specialize WarpScan for type int * typedef cub::WarpScan<int> WarpScan; * * // Allocate WarpScan shared memory for 4 warps * __shared__ typename WarpScan::TempStorage temp_storage[4]; * * // Obtain one input item per thread * int thread_data = ... * * // Compute exclusive warp-wide prefix max scans * int inclusive_partial, exclusive_partial; * WarpScan(temp_storage[warp_id]).Scan(thread_data, inclusive_partial, exclusive_partial, cub::Max()); * * \endcode * \par * Suppose the set of input \p thread_data across the block of threads is <tt>{0, -1, 2, -3, ..., 126, -127}</tt>. * The corresponding output \p inclusive_partial in the first warp would be * <tt>0, 0, 2, 2, ..., 30, 30</tt>, the output for the second warp would be <tt>32, 32, 34, 34, ..., 62, 62</tt>, etc. * The corresponding output \p exclusive_partial in the first warp would be * <tt>?, 0, 0, 2, ..., 28, 30</tt>, the output for the second warp would be <tt>?, 32, 32, 34, ..., 60, 62</tt>, etc. * (The output \p thread_data in warp lane<sub>0</sub> is undefined.) * * \tparam ScanOp <b>[inferred]</b> Binary scan operator type having member <tt>T operator()(const T &a, const T &b)</tt> */ template <typename ScanOp> __device__ __forceinline__ void Scan( T input, ///< [in] Calling thread's input item. T &inclusive_output, ///< [out] Calling thread's inclusive-scan output item. T &exclusive_output, ///< [out] Calling thread's exclusive-scan output item. ScanOp scan_op) ///< [in] Binary scan operator { InternalWarpScan internal(temp_storage); internal.InclusiveScan(input, inclusive_output, scan_op); internal.Update( input, inclusive_output, exclusive_output, scan_op, Int2Type<IS_INTEGER>()); } /** * \brief Computes both inclusive and exclusive prefix scans using the specified binary scan functor across the calling warp. * * \par * - \smemreuse * * \par Snippet * The code snippet below illustrates four concurrent warp-wide prefix max scans within a block of * 128 threads (one per each of the 32-thread warps). * \par * \code * #include <cub/cub.cuh> * * __global__ void ExampleKernel(...) * { * // Specialize WarpScan for type int * typedef cub::WarpScan<int> WarpScan; * * // Allocate WarpScan shared memory for 4 warps * __shared__ typename WarpScan::TempStorage temp_storage[4]; * * // Obtain one input item per thread * int thread_data = ... * * // Compute inclusive warp-wide prefix max scans * int warp_id = threadIdx.x / 32; * int inclusive_partial, exclusive_partial; * WarpScan(temp_storage[warp_id]).Scan(thread_data, inclusive_partial, exclusive_partial, INT_MIN, cub::Max()); * * \endcode * \par * Suppose the set of input \p thread_data across the block of threads is <tt>{0, -1, 2, -3, ..., 126, -127}</tt>. * The corresponding output \p inclusive_partial in the first warp would be * <tt>0, 0, 2, 2, ..., 30, 30</tt>, the output for the second warp would be <tt>32, 32, 34, 34, ..., 62, 62</tt>, etc. * The corresponding output \p exclusive_partial in the first warp would be * <tt>INT_MIN, 0, 0, 2, ..., 28, 30</tt>, the output for the second warp would be <tt>30, 32, 32, 34, ..., 60, 62</tt>, etc. * * \tparam ScanOp <b>[inferred]</b> Binary scan operator type having member <tt>T operator()(const T &a, const T &b)</tt> */ template <typename ScanOp> __device__ __forceinline__ void Scan( T input, ///< [in] Calling thread's input item. T &inclusive_output, ///< [out] Calling thread's inclusive-scan output item. T &exclusive_output, ///< [out] Calling thread's exclusive-scan output item. T initial_value, ///< [in] Initial value to seed the exclusive scan ScanOp scan_op) ///< [in] Binary scan operator { InternalWarpScan internal(temp_storage); internal.InclusiveScan(input, inclusive_output, scan_op); internal.Update( input, inclusive_output, exclusive_output, scan_op, initial_value, Int2Type<IS_INTEGER>()); } //@} end member group /******************************************************************//** * \name Data exchange *********************************************************************/ //@{ /** * \brief Broadcast the value \p input from <em>warp-lane</em><sub><tt>src_lane</tt></sub> to all lanes in the warp * * \par * - \smemreuse * * \par Snippet * The code snippet below illustrates the warp-wide broadcasts of values from * lanes<sub>0</sub> in each of four warps to all other threads in those warps. * \par * \code * #include <cub/cub.cuh> * * __global__ void ExampleKernel(...) * { * // Specialize WarpScan for type int * typedef cub::WarpScan<int> WarpScan; * * // Allocate WarpScan shared memory for 4 warps * __shared__ typename WarpScan::TempStorage temp_storage[4]; * * // Obtain one input item per thread * int thread_data = ... * * // Broadcast from lane0 in each warp to all other threads in the warp * int warp_id = threadIdx.x / 32; * thread_data = WarpScan(temp_storage[warp_id]).Broadcast(thread_data, 0); * * \endcode * \par * Suppose the set of input \p thread_data across the block of threads is <tt>{0, 1, 2, 3, ..., 127}</tt>. * The corresponding output \p thread_data will be * <tt>{0, 0, ..., 0}</tt> in warp<sub>0</sub>, * <tt>{32, 32, ..., 32}</tt> in warp<sub>1</sub>, * <tt>{64, 64, ..., 64}</tt> in warp<sub>2</sub>, etc. */ __device__ __forceinline__ T Broadcast( T input, ///< [in] The value to broadcast unsigned int src_lane) ///< [in] Which warp lane is to do the broadcasting { return InternalWarpScan(temp_storage).Broadcast(input, src_lane); } //@} end member group }; /** @} */ // end group WarpModule } // CUB namespace CUB_NS_POSTFIX // Optional outer namespace(s)
0
rapidsai_public_repos/nvgraph/external/cub_semiring/warp
rapidsai_public_repos/nvgraph/external/cub_semiring/warp/specializations/warp_reduce_shfl.cuh
/****************************************************************************** * Copyright (c) 2011, Duane Merrill. All rights reserved. * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ /** * \file * cub::WarpReduceShfl provides SHFL-based variants of parallel reduction of items partitioned across a CUDA thread warp. */ #pragma once #include "../../thread/thread_operators.cuh" #include "../../util_ptx.cuh" #include "../../util_type.cuh" #include "../../util_macro.cuh" #include "../../util_namespace.cuh" /// Optional outer namespace(s) CUB_NS_PREFIX /// CUB namespace namespace cub { /** * \brief WarpReduceShfl provides SHFL-based variants of parallel reduction of items partitioned across a CUDA thread warp. * * LOGICAL_WARP_THREADS must be a power-of-two */ template < typename T, ///< Data type being reduced int LOGICAL_WARP_THREADS, ///< Number of threads per logical warp int PTX_ARCH> ///< The PTX compute capability for which to to specialize this collective struct WarpReduceShfl { //--------------------------------------------------------------------- // Constants and type definitions //--------------------------------------------------------------------- enum { /// Whether the logical warp size and the PTX warp size coincide IS_ARCH_WARP = (LOGICAL_WARP_THREADS == CUB_WARP_THREADS(PTX_ARCH)), /// The number of warp reduction steps STEPS = Log2<LOGICAL_WARP_THREADS>::VALUE, /// Number of logical warps in a PTX warp LOGICAL_WARPS = CUB_WARP_THREADS(PTX_ARCH) / LOGICAL_WARP_THREADS, }; template <typename S> struct IsInteger { enum { ///Whether the data type is a small (32b or less) integer for which we can use a single SFHL instruction per exchange IS_SMALL_UNSIGNED = (Traits<S>::CATEGORY == UNSIGNED_INTEGER) && (sizeof(S) <= sizeof(unsigned int)) }; }; // Creates a mask where the last thread in each logical warp is set template <int WARP, int WARPS> struct LastLaneMask { enum { BASE_MASK = 1 << (LOGICAL_WARP_THREADS - 1), MASK = (LastLaneMask<WARP + 1, WARPS>::MASK << LOGICAL_WARP_THREADS) | BASE_MASK, }; }; // Creates a mask where the last thread in each logical warp is set template <int WARP> struct LastLaneMask<WARP, WARP> { enum { MASK = 1 << (LOGICAL_WARP_THREADS - 1), }; }; /// Shared memory storage layout type typedef NullType TempStorage; //--------------------------------------------------------------------- // Thread fields //--------------------------------------------------------------------- unsigned int lane_id; unsigned int member_mask; //--------------------------------------------------------------------- // Construction //--------------------------------------------------------------------- /// Constructor __device__ __forceinline__ WarpReduceShfl( TempStorage &/*temp_storage*/) : lane_id(LaneId()), member_mask((0xffffffff >> (32 - LOGICAL_WARP_THREADS)) << ((IS_ARCH_WARP) ? 0 : // arch-width subwarps need not be tiled within the arch-warp ((lane_id / LOGICAL_WARP_THREADS) * LOGICAL_WARP_THREADS))) {} //--------------------------------------------------------------------- // Reduction steps //--------------------------------------------------------------------- /// Reduction (specialized for summation across uint32 types) __device__ __forceinline__ unsigned int ReduceStep( unsigned int input, ///< [in] Calling thread's input item. cub::Sum /*reduction_op*/, ///< [in] Binary reduction operator int last_lane, ///< [in] Index of last lane in segment int offset) ///< [in] Up-offset to pull from { unsigned int output; // Use predicate set from SHFL to guard against invalid peers #ifdef CUB_USE_COOPERATIVE_GROUPS asm volatile( "{" " .reg .u32 r0;" " .reg .pred p;" " shfl.sync.down.b32 r0|p, %1, %2, %3, %5;" " @p add.u32 r0, r0, %4;" " mov.u32 %0, r0;" "}" : "=r"(output) : "r"(input), "r"(offset), "r"(last_lane), "r"(input), "r"(member_mask)); #else asm volatile( "{" " .reg .u32 r0;" " .reg .pred p;" " shfl.down.b32 r0|p, %1, %2, %3;" " @p add.u32 r0, r0, %4;" " mov.u32 %0, r0;" "}" : "=r"(output) : "r"(input), "r"(offset), "r"(last_lane), "r"(input)); #endif return output; } /// Reduction (specialized for summation across fp32 types) __device__ __forceinline__ float ReduceStep( float input, ///< [in] Calling thread's input item. cub::Sum /*reduction_op*/, ///< [in] Binary reduction operator int last_lane, ///< [in] Index of last lane in segment int offset) ///< [in] Up-offset to pull from { float output; // Use predicate set from SHFL to guard against invalid peers #ifdef CUB_USE_COOPERATIVE_GROUPS asm volatile( "{" " .reg .f32 r0;" " .reg .pred p;" " shfl.sync.down.b32 r0|p, %1, %2, %3, %5;" " @p add.f32 r0, r0, %4;" " mov.f32 %0, r0;" "}" : "=f"(output) : "f"(input), "r"(offset), "r"(last_lane), "f"(input), "r"(member_mask)); #else asm volatile( "{" " .reg .f32 r0;" " .reg .pred p;" " shfl.down.b32 r0|p, %1, %2, %3;" " @p add.f32 r0, r0, %4;" " mov.f32 %0, r0;" "}" : "=f"(output) : "f"(input), "r"(offset), "r"(last_lane), "f"(input)); #endif return output; } /// Reduction (specialized for summation across unsigned long long types) __device__ __forceinline__ unsigned long long ReduceStep( unsigned long long input, ///< [in] Calling thread's input item. cub::Sum /*reduction_op*/, ///< [in] Binary reduction operator int last_lane, ///< [in] Index of last lane in segment int offset) ///< [in] Up-offset to pull from { unsigned long long output; #ifdef CUB_USE_COOPERATIVE_GROUPS asm volatile( "{" " .reg .u32 lo;" " .reg .u32 hi;" " .reg .pred p;" " mov.b64 {lo, hi}, %1;" " shfl.sync.down.b32 lo|p, lo, %2, %3, %4;" " shfl.sync.down.b32 hi|p, hi, %2, %3, %4;" " mov.b64 %0, {lo, hi};" " @p add.u64 %0, %0, %1;" "}" : "=l"(output) : "l"(input), "r"(offset), "r"(last_lane), "r"(member_mask)); #else asm volatile( "{" " .reg .u32 lo;" " .reg .u32 hi;" " .reg .pred p;" " mov.b64 {lo, hi}, %1;" " shfl.down.b32 lo|p, lo, %2, %3;" " shfl.down.b32 hi|p, hi, %2, %3;" " mov.b64 %0, {lo, hi};" " @p add.u64 %0, %0, %1;" "}" : "=l"(output) : "l"(input), "r"(offset), "r"(last_lane)); #endif return output; } /// Reduction (specialized for summation across long long types) __device__ __forceinline__ long long ReduceStep( long long input, ///< [in] Calling thread's input item. cub::Sum /*reduction_op*/, ///< [in] Binary reduction operator int last_lane, ///< [in] Index of last lane in segment int offset) ///< [in] Up-offset to pull from { long long output; // Use predicate set from SHFL to guard against invalid peers #ifdef CUB_USE_COOPERATIVE_GROUPS asm volatile( "{" " .reg .u32 lo;" " .reg .u32 hi;" " .reg .pred p;" " mov.b64 {lo, hi}, %1;" " shfl.sync.down.b32 lo|p, lo, %2, %3, %4;" " shfl.sync.down.b32 hi|p, hi, %2, %3, %4;" " mov.b64 %0, {lo, hi};" " @p add.s64 %0, %0, %1;" "}" : "=l"(output) : "l"(input), "r"(offset), "r"(last_lane), "r"(member_mask)); #else asm volatile( "{" " .reg .u32 lo;" " .reg .u32 hi;" " .reg .pred p;" " mov.b64 {lo, hi}, %1;" " shfl.down.b32 lo|p, lo, %2, %3;" " shfl.down.b32 hi|p, hi, %2, %3;" " mov.b64 %0, {lo, hi};" " @p add.s64 %0, %0, %1;" "}" : "=l"(output) : "l"(input), "r"(offset), "r"(last_lane)); #endif return output; } /// Reduction (specialized for summation across double types) __device__ __forceinline__ double ReduceStep( double input, ///< [in] Calling thread's input item. cub::Sum /*reduction_op*/, ///< [in] Binary reduction operator int last_lane, ///< [in] Index of last lane in segment int offset) ///< [in] Up-offset to pull from { double output; // Use predicate set from SHFL to guard against invalid peers #ifdef CUB_USE_COOPERATIVE_GROUPS asm volatile( "{" " .reg .u32 lo;" " .reg .u32 hi;" " .reg .pred p;" " .reg .f64 r0;" " mov.b64 %0, %1;" " mov.b64 {lo, hi}, %1;" " shfl.sync.down.b32 lo|p, lo, %2, %3, %4;" " shfl.sync.down.b32 hi|p, hi, %2, %3, %4;" " mov.b64 r0, {lo, hi};" " @p add.f64 %0, %0, r0;" "}" : "=d"(output) : "d"(input), "r"(offset), "r"(last_lane), "r"(member_mask)); #else asm volatile( "{" " .reg .u32 lo;" " .reg .u32 hi;" " .reg .pred p;" " .reg .f64 r0;" " mov.b64 %0, %1;" " mov.b64 {lo, hi}, %1;" " shfl.down.b32 lo|p, lo, %2, %3;" " shfl.down.b32 hi|p, hi, %2, %3;" " mov.b64 r0, {lo, hi};" " @p add.f64 %0, %0, r0;" "}" : "=d"(output) : "d"(input), "r"(offset), "r"(last_lane)); #endif return output; } /// Reduction (specialized for swizzled ReduceByKeyOp<cub::Sum> across KeyValuePair<KeyT, ValueT> types) template <typename ValueT, typename KeyT> __device__ __forceinline__ KeyValuePair<KeyT, ValueT> ReduceStep( KeyValuePair<KeyT, ValueT> input, ///< [in] Calling thread's input item. SwizzleScanOp<ReduceByKeyOp<cub::Sum> > /*reduction_op*/, ///< [in] Binary reduction operator int last_lane, ///< [in] Index of last lane in segment int offset) ///< [in] Up-offset to pull from { KeyValuePair<KeyT, ValueT> output; KeyT other_key = ShuffleDown(input.key, offset, last_lane, member_mask); output.key = input.key; output.value = ReduceStep( input.value, cub::Sum(), last_lane, offset, Int2Type<IsInteger<ValueT>::IS_SMALL_UNSIGNED>()); if (input.key != other_key) output.value = input.value; return output; } /// Reduction (specialized for swizzled ReduceBySegmentOp<cub::Sum> across KeyValuePair<OffsetT, ValueT> types) template <typename ValueT, typename OffsetT> __device__ __forceinline__ KeyValuePair<OffsetT, ValueT> ReduceStep( KeyValuePair<OffsetT, ValueT> input, ///< [in] Calling thread's input item. SwizzleScanOp<ReduceBySegmentOp<cub::Sum> > /*reduction_op*/, ///< [in] Binary reduction operator int last_lane, ///< [in] Index of last lane in segment int offset) ///< [in] Up-offset to pull from { KeyValuePair<OffsetT, ValueT> output; output.value = ReduceStep(input.value, cub::Sum(), last_lane, offset, Int2Type<IsInteger<ValueT>::IS_SMALL_UNSIGNED>()); output.key = ReduceStep(input.key, cub::Sum(), last_lane, offset, Int2Type<IsInteger<OffsetT>::IS_SMALL_UNSIGNED>()); if (input.key > 0) output.value = input.value; return output; } /// Reduction step (generic) template <typename _T, typename ReductionOp> __device__ __forceinline__ _T ReduceStep( _T input, ///< [in] Calling thread's input item. ReductionOp reduction_op, ///< [in] Binary reduction operator int last_lane, ///< [in] Index of last lane in segment int offset) ///< [in] Up-offset to pull from { _T output = input; _T temp = ShuffleDown(output, offset, last_lane, member_mask); // Perform reduction op if valid if (offset + lane_id <= last_lane) output = reduction_op(input, temp); return output; } /// Reduction step (specialized for small unsigned integers size 32b or less) template <typename _T, typename ReductionOp> __device__ __forceinline__ _T ReduceStep( _T input, ///< [in] Calling thread's input item. ReductionOp reduction_op, ///< [in] Binary reduction operator int last_lane, ///< [in] Index of last lane in segment int offset, ///< [in] Up-offset to pull from Int2Type<true> /*is_small_unsigned*/) ///< [in] Marker type indicating whether T is a small unsigned integer { return ReduceStep(input, reduction_op, last_lane, offset); } /// Reduction step (specialized for types other than small unsigned integers size 32b or less) template <typename _T, typename ReductionOp> __device__ __forceinline__ _T ReduceStep( _T input, ///< [in] Calling thread's input item. ReductionOp reduction_op, ///< [in] Binary reduction operator int last_lane, ///< [in] Index of last lane in segment int offset, ///< [in] Up-offset to pull from Int2Type<false> /*is_small_unsigned*/) ///< [in] Marker type indicating whether T is a small unsigned integer { return ReduceStep(input, reduction_op, last_lane, offset); } //--------------------------------------------------------------------- // Templated inclusive scan iteration //--------------------------------------------------------------------- template <typename ReductionOp, int STEP> __device__ __forceinline__ void ReduceStep( T& input, ///< [in] Calling thread's input item. ReductionOp reduction_op, ///< [in] Binary reduction operator int last_lane, ///< [in] Index of last lane in segment Int2Type<STEP> /*step*/) { input = ReduceStep(input, reduction_op, last_lane, 1 << STEP, Int2Type<IsInteger<T>::IS_SMALL_UNSIGNED>()); ReduceStep(input, reduction_op, last_lane, Int2Type<STEP + 1>()); } template <typename ReductionOp> __device__ __forceinline__ void ReduceStep( T& /*input*/, ///< [in] Calling thread's input item. ReductionOp /*reduction_op*/, ///< [in] Binary reduction operator int /*last_lane*/, ///< [in] Index of last lane in segment Int2Type<STEPS> /*step*/) {} //--------------------------------------------------------------------- // Reduction operations //--------------------------------------------------------------------- /// Reduction template < bool ALL_LANES_VALID, ///< Whether all lanes in each warp are contributing a valid fold of items int FOLDED_ITEMS_PER_LANE, ///< Number of items folded into each lane typename ReductionOp> __device__ __forceinline__ T Reduce( T input, ///< [in] Calling thread's input int folded_items_per_warp, ///< [in] Total number of valid items folded into each logical warp ReductionOp reduction_op) ///< [in] Binary reduction operator { // Get the lane of the first and last thread in the logical warp int first_thread = 0; int last_thread = LOGICAL_WARP_THREADS - 1; if (!IS_ARCH_WARP) { first_thread = lane_id & (~(LOGICAL_WARP_THREADS - 1)); last_thread |= lane_id; } // Common case is FOLDED_ITEMS_PER_LANE = 1 (or a multiple of 32) int lanes_with_valid_data = (folded_items_per_warp - 1) / FOLDED_ITEMS_PER_LANE; // Get the last valid lane int last_lane = (ALL_LANES_VALID) ? last_thread : CUB_MIN(last_thread, first_thread + lanes_with_valid_data); T output = input; // // Iterate reduction steps // #pragma unroll // for (int STEP = 0; STEP < STEPS; STEP++) // { // output = ReduceStep(output, reduction_op, last_lane, 1 << STEP, Int2Type<IsInteger<T>::IS_SMALL_UNSIGNED>()); // } // Template-iterate reduction steps ReduceStep(output, reduction_op, last_lane, Int2Type<0>()); return output; } /// Segmented reduction template < bool HEAD_SEGMENTED, ///< Whether flags indicate a segment-head or a segment-tail typename FlagT, typename ReductionOp> __device__ __forceinline__ T SegmentedReduce( T input, ///< [in] Calling thread's input FlagT flag, ///< [in] Whether or not the current lane is a segment head/tail ReductionOp reduction_op) ///< [in] Binary reduction operator { // Get the start flags for each thread in the warp. int warp_flags = WARP_BALLOT(flag, member_mask); // Convert to tail-segmented if (HEAD_SEGMENTED) warp_flags >>= 1; // Mask in the last lanes of each logical warp warp_flags |= LastLaneMask<1, LOGICAL_WARPS>::MASK; // Mask out the bits below the current thread warp_flags &= LaneMaskGe(); // Find the next set flag int last_lane = __clz(__brev(warp_flags)); T output = input; // // Iterate reduction steps // #pragma unroll // for (int STEP = 0; STEP < STEPS; STEP++) // { // output = ReduceStep(output, reduction_op, last_lane, 1 << STEP, Int2Type<IsInteger<T>::IS_SMALL_UNSIGNED>()); // } // Template-iterate reduction steps ReduceStep(output, reduction_op, last_lane, Int2Type<0>()); return output; } }; } // CUB namespace CUB_NS_POSTFIX // Optional outer namespace(s)
0
rapidsai_public_repos/nvgraph/external/cub_semiring/warp
rapidsai_public_repos/nvgraph/external/cub_semiring/warp/specializations/warp_reduce_smem.cuh
/****************************************************************************** * Copyright (c) 2011, Duane Merrill. All rights reserved. * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ /** * \file * cub::WarpReduceSmem provides smem-based variants of parallel reduction of items partitioned across a CUDA thread warp. */ #pragma once #include "../../thread/thread_operators.cuh" #include "../../thread/thread_load.cuh" #include "../../thread/thread_store.cuh" #include "../../util_type.cuh" #include "../../util_namespace.cuh" /// Optional outer namespace(s) CUB_NS_PREFIX /// CUB namespace namespace cub { /** * \brief WarpReduceSmem provides smem-based variants of parallel reduction of items partitioned across a CUDA thread warp. */ template < typename T, ///< Data type being reduced int LOGICAL_WARP_THREADS, ///< Number of threads per logical warp int PTX_ARCH> ///< The PTX compute capability for which to to specialize this collective struct WarpReduceSmem { /****************************************************************************** * Constants and type definitions ******************************************************************************/ enum { /// Whether the logical warp size and the PTX warp size coincide IS_ARCH_WARP = (LOGICAL_WARP_THREADS == CUB_WARP_THREADS(PTX_ARCH)), /// Whether the logical warp size is a power-of-two IS_POW_OF_TWO = PowerOfTwo<LOGICAL_WARP_THREADS>::VALUE, /// The number of warp scan steps STEPS = Log2<LOGICAL_WARP_THREADS>::VALUE, /// The number of threads in half a warp HALF_WARP_THREADS = 1 << (STEPS - 1), /// The number of shared memory elements per warp WARP_SMEM_ELEMENTS = LOGICAL_WARP_THREADS + HALF_WARP_THREADS, /// FlagT status (when not using ballot) UNSET = 0x0, // Is initially unset SET = 0x1, // Is initially set SEEN = 0x2, // Has seen another head flag from a successor peer }; /// Shared memory flag type typedef unsigned char SmemFlag; /// Shared memory storage layout type (1.5 warps-worth of elements for each warp) struct _TempStorage { T reduce[WARP_SMEM_ELEMENTS]; SmemFlag flags[WARP_SMEM_ELEMENTS]; }; // Alias wrapper allowing storage to be unioned struct TempStorage : Uninitialized<_TempStorage> {}; /****************************************************************************** * Thread fields ******************************************************************************/ _TempStorage &temp_storage; unsigned int lane_id; unsigned int member_mask; /****************************************************************************** * Construction ******************************************************************************/ /// Constructor __device__ __forceinline__ WarpReduceSmem( TempStorage &temp_storage) : temp_storage(temp_storage.Alias()), lane_id(IS_ARCH_WARP ? LaneId() : LaneId() % LOGICAL_WARP_THREADS), member_mask((0xffffffff >> (32 - LOGICAL_WARP_THREADS)) << ((IS_ARCH_WARP || !IS_POW_OF_TWO ) ? 0 : // arch-width and non-power-of-two subwarps cannot be tiled with the arch-warp ((LaneId() / LOGICAL_WARP_THREADS) * LOGICAL_WARP_THREADS))) {} /****************************************************************************** * Utility methods ******************************************************************************/ //--------------------------------------------------------------------- // Regular reduction //--------------------------------------------------------------------- /** * Reduction step */ template < bool ALL_LANES_VALID, ///< Whether all lanes in each warp are contributing a valid fold of items int FOLDED_ITEMS_PER_LANE, ///< Number of items folded into each lane typename ReductionOp, int STEP> __device__ __forceinline__ T ReduceStep( T input, ///< [in] Calling thread's input int folded_items_per_warp, ///< [in] Total number of valid items folded into each logical warp ReductionOp reduction_op, ///< [in] Reduction operator Int2Type<STEP> /*step*/) { const int OFFSET = 1 << STEP; // Share input through buffer ThreadStore<STORE_VOLATILE>(&temp_storage.reduce[lane_id], input); WARP_SYNC(member_mask); // Update input if peer_addend is in range if ((ALL_LANES_VALID && IS_POW_OF_TWO) || ((lane_id + OFFSET) * FOLDED_ITEMS_PER_LANE < folded_items_per_warp)) { T peer_addend = ThreadLoad<LOAD_VOLATILE>(&temp_storage.reduce[lane_id + OFFSET]); input = reduction_op(input, peer_addend); } WARP_SYNC(member_mask); return ReduceStep<ALL_LANES_VALID, FOLDED_ITEMS_PER_LANE>(input, folded_items_per_warp, reduction_op, Int2Type<STEP + 1>()); } /** * Reduction step (terminate) */ template < bool ALL_LANES_VALID, ///< Whether all lanes in each warp are contributing a valid fold of items int FOLDED_ITEMS_PER_LANE, ///< Number of items folded into each lane typename ReductionOp> __device__ __forceinline__ T ReduceStep( T input, ///< [in] Calling thread's input int /*folded_items_per_warp*/, ///< [in] Total number of valid items folded into each logical warp ReductionOp /*reduction_op*/, ///< [in] Reduction operator Int2Type<STEPS> /*step*/) { return input; } //--------------------------------------------------------------------- // Segmented reduction //--------------------------------------------------------------------- /** * Ballot-based segmented reduce */ template < bool HEAD_SEGMENTED, ///< Whether flags indicate a segment-head or a segment-tail typename FlagT, typename ReductionOp> __device__ __forceinline__ T SegmentedReduce( T input, ///< [in] Calling thread's input FlagT flag, ///< [in] Whether or not the current lane is a segment head/tail ReductionOp reduction_op, ///< [in] Reduction operator Int2Type<true> /*has_ballot*/) ///< [in] Marker type for whether the target arch has ballot functionality { // Get the start flags for each thread in the warp. int warp_flags = WARP_BALLOT(flag, member_mask); if (!HEAD_SEGMENTED) warp_flags <<= 1; // Keep bits above the current thread. warp_flags &= LaneMaskGt(); // Accommodate packing of multiple logical warps in a single physical warp if (!IS_ARCH_WARP) { warp_flags >>= (LaneId() / LOGICAL_WARP_THREADS) * LOGICAL_WARP_THREADS; } // Find next flag int next_flag = __clz(__brev(warp_flags)); // Clip the next segment at the warp boundary if necessary if (LOGICAL_WARP_THREADS != 32) next_flag = CUB_MIN(next_flag, LOGICAL_WARP_THREADS); #pragma unroll for (int STEP = 0; STEP < STEPS; STEP++) { const int OFFSET = 1 << STEP; // Share input into buffer ThreadStore<STORE_VOLATILE>(&temp_storage.reduce[lane_id], input); WARP_SYNC(member_mask); // Update input if peer_addend is in range if (OFFSET + lane_id < next_flag) { T peer_addend = ThreadLoad<LOAD_VOLATILE>(&temp_storage.reduce[lane_id + OFFSET]); input = reduction_op(input, peer_addend); } WARP_SYNC(member_mask); } return input; } /** * Smem-based segmented reduce */ template < bool HEAD_SEGMENTED, ///< Whether flags indicate a segment-head or a segment-tail typename FlagT, typename ReductionOp> __device__ __forceinline__ T SegmentedReduce( T input, ///< [in] Calling thread's input FlagT flag, ///< [in] Whether or not the current lane is a segment head/tail ReductionOp reduction_op, ///< [in] Reduction operator Int2Type<false> /*has_ballot*/) ///< [in] Marker type for whether the target arch has ballot functionality { enum { UNSET = 0x0, // Is initially unset SET = 0x1, // Is initially set SEEN = 0x2, // Has seen another head flag from a successor peer }; // Alias flags onto shared data storage volatile SmemFlag *flag_storage = temp_storage.flags; SmemFlag flag_status = (flag) ? SET : UNSET; for (int STEP = 0; STEP < STEPS; STEP++) { const int OFFSET = 1 << STEP; // Share input through buffer ThreadStore<STORE_VOLATILE>(&temp_storage.reduce[lane_id], input); WARP_SYNC(member_mask); // Get peer from buffer T peer_addend = ThreadLoad<LOAD_VOLATILE>(&temp_storage.reduce[lane_id + OFFSET]); WARP_SYNC(member_mask); // Share flag through buffer flag_storage[lane_id] = flag_status; // Get peer flag from buffer SmemFlag peer_flag_status = flag_storage[lane_id + OFFSET]; // Update input if peer was in range if (lane_id < LOGICAL_WARP_THREADS - OFFSET) { if (HEAD_SEGMENTED) { // Head-segmented if ((flag_status & SEEN) == 0) { // Has not seen a more distant head flag if (peer_flag_status & SET) { // Has now seen a head flag flag_status |= SEEN; } else { // Peer is not a head flag: grab its count input = reduction_op(input, peer_addend); } // Update seen status to include that of peer flag_status |= (peer_flag_status & SEEN); } } else { // Tail-segmented. Simply propagate flag status if (!flag_status) { input = reduction_op(input, peer_addend); flag_status |= peer_flag_status; } } } } return input; } /****************************************************************************** * Interface ******************************************************************************/ /** * Reduction */ template < bool ALL_LANES_VALID, ///< Whether all lanes in each warp are contributing a valid fold of items int FOLDED_ITEMS_PER_LANE, ///< Number of items folded into each lane typename ReductionOp> __device__ __forceinline__ T Reduce( T input, ///< [in] Calling thread's input int folded_items_per_warp, ///< [in] Total number of valid items folded into each logical warp ReductionOp reduction_op) ///< [in] Reduction operator { return ReduceStep<ALL_LANES_VALID, FOLDED_ITEMS_PER_LANE>(input, folded_items_per_warp, reduction_op, Int2Type<0>()); } /** * Segmented reduction */ template < bool HEAD_SEGMENTED, ///< Whether flags indicate a segment-head or a segment-tail typename FlagT, typename ReductionOp> __device__ __forceinline__ T SegmentedReduce( T input, ///< [in] Calling thread's input FlagT flag, ///< [in] Whether or not the current lane is a segment head/tail ReductionOp reduction_op) ///< [in] Reduction operator { return SegmentedReduce<HEAD_SEGMENTED>(input, flag, reduction_op, Int2Type<(PTX_ARCH >= 200)>()); } }; } // CUB namespace CUB_NS_POSTFIX // Optional outer namespace(s)
0
rapidsai_public_repos/nvgraph/external/cub_semiring/warp
rapidsai_public_repos/nvgraph/external/cub_semiring/warp/specializations/warp_scan_shfl.cuh
/****************************************************************************** * Copyright (c) 2011, Duane Merrill. All rights reserved. * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ /** * \file * cub::WarpScanShfl provides SHFL-based variants of parallel prefix scan of items partitioned across a CUDA thread warp. */ #pragma once #include "../../thread/thread_operators.cuh" #include "../../util_type.cuh" #include "../../util_ptx.cuh" #include "../../util_namespace.cuh" /// Optional outer namespace(s) CUB_NS_PREFIX /// CUB namespace namespace cub { /** * \brief WarpScanShfl provides SHFL-based variants of parallel prefix scan of items partitioned across a CUDA thread warp. * * LOGICAL_WARP_THREADS must be a power-of-two */ template < typename T, ///< Data type being scanned int LOGICAL_WARP_THREADS, ///< Number of threads per logical warp int PTX_ARCH> ///< The PTX compute capability for which to to specialize this collective struct WarpScanShfl { //--------------------------------------------------------------------- // Constants and type definitions //--------------------------------------------------------------------- enum { /// Whether the logical warp size and the PTX warp size coincide IS_ARCH_WARP = (LOGICAL_WARP_THREADS == CUB_WARP_THREADS(PTX_ARCH)), /// The number of warp scan steps STEPS = Log2<LOGICAL_WARP_THREADS>::VALUE, /// The 5-bit SHFL mask for logically splitting warps into sub-segments starts 8-bits up SHFL_C = ((0xFFFFFFFFU << STEPS) & 31) << 8, }; template <typename S> struct IntegerTraits { enum { ///Whether the data type is a small (32b or less) integer for which we can use a single SFHL instruction per exchange IS_SMALL_UNSIGNED = (Traits<S>::CATEGORY == UNSIGNED_INTEGER) && (sizeof(S) <= sizeof(unsigned int)) }; }; /// Shared memory storage layout type struct TempStorage {}; //--------------------------------------------------------------------- // Thread fields //--------------------------------------------------------------------- unsigned int lane_id; unsigned int member_mask; //--------------------------------------------------------------------- // Construction //--------------------------------------------------------------------- /// Constructor __device__ __forceinline__ WarpScanShfl( TempStorage &/*temp_storage*/) : lane_id(LaneId()), member_mask((0xffffffff >> (32 - LOGICAL_WARP_THREADS)) << ((IS_ARCH_WARP) ? 0 : // arch-width subwarps need not be tiled within the arch-warp ((lane_id / LOGICAL_WARP_THREADS) * LOGICAL_WARP_THREADS))) {} //--------------------------------------------------------------------- // Inclusive scan steps //--------------------------------------------------------------------- /// Inclusive prefix scan step (specialized for summation across int32 types) __device__ __forceinline__ int InclusiveScanStep( int input, ///< [in] Calling thread's input item. cub::Sum /*scan_op*/, ///< [in] Binary scan operator int first_lane, ///< [in] Index of first lane in segment int offset) ///< [in] Up-offset to pull from { int output; int shfl_c = first_lane | SHFL_C; // Shuffle control (mask and first-lane) // Use predicate set from SHFL to guard against invalid peers #ifdef CUB_USE_COOPERATIVE_GROUPS asm volatile( "{" " .reg .s32 r0;" " .reg .pred p;" " shfl.sync.up.b32 r0|p, %1, %2, %3, %5;" " @p add.s32 r0, r0, %4;" " mov.s32 %0, r0;" "}" : "=r"(output) : "r"(input), "r"(offset), "r"(shfl_c), "r"(input), "r"(member_mask)); #else asm volatile( "{" " .reg .s32 r0;" " .reg .pred p;" " shfl.up.b32 r0|p, %1, %2, %3;" " @p add.s32 r0, r0, %4;" " mov.s32 %0, r0;" "}" : "=r"(output) : "r"(input), "r"(offset), "r"(shfl_c), "r"(input)); #endif return output; } /// Inclusive prefix scan step (specialized for summation across uint32 types) __device__ __forceinline__ unsigned int InclusiveScanStep( unsigned int input, ///< [in] Calling thread's input item. cub::Sum /*scan_op*/, ///< [in] Binary scan operator int first_lane, ///< [in] Index of first lane in segment int offset) ///< [in] Up-offset to pull from { unsigned int output; int shfl_c = first_lane | SHFL_C; // Shuffle control (mask and first-lane) // Use predicate set from SHFL to guard against invalid peers #ifdef CUB_USE_COOPERATIVE_GROUPS asm volatile( "{" " .reg .u32 r0;" " .reg .pred p;" " shfl.sync.up.b32 r0|p, %1, %2, %3, %5;" " @p add.u32 r0, r0, %4;" " mov.u32 %0, r0;" "}" : "=r"(output) : "r"(input), "r"(offset), "r"(shfl_c), "r"(input), "r"(member_mask)); #else asm volatile( "{" " .reg .u32 r0;" " .reg .pred p;" " shfl.up.b32 r0|p, %1, %2, %3;" " @p add.u32 r0, r0, %4;" " mov.u32 %0, r0;" "}" : "=r"(output) : "r"(input), "r"(offset), "r"(shfl_c), "r"(input)); #endif return output; } /// Inclusive prefix scan step (specialized for summation across fp32 types) __device__ __forceinline__ float InclusiveScanStep( float input, ///< [in] Calling thread's input item. cub::Sum /*scan_op*/, ///< [in] Binary scan operator int first_lane, ///< [in] Index of first lane in segment int offset) ///< [in] Up-offset to pull from { float output; int shfl_c = first_lane | SHFL_C; // Shuffle control (mask and first-lane) // Use predicate set from SHFL to guard against invalid peers #ifdef CUB_USE_COOPERATIVE_GROUPS asm volatile( "{" " .reg .f32 r0;" " .reg .pred p;" " shfl.sync.up.b32 r0|p, %1, %2, %3, %5;" " @p add.f32 r0, r0, %4;" " mov.f32 %0, r0;" "}" : "=f"(output) : "f"(input), "r"(offset), "r"(shfl_c), "f"(input), "r"(member_mask)); #else asm volatile( "{" " .reg .f32 r0;" " .reg .pred p;" " shfl.up.b32 r0|p, %1, %2, %3;" " @p add.f32 r0, r0, %4;" " mov.f32 %0, r0;" "}" : "=f"(output) : "f"(input), "r"(offset), "r"(shfl_c), "f"(input)); #endif return output; } /// Inclusive prefix scan step (specialized for summation across unsigned long long types) __device__ __forceinline__ unsigned long long InclusiveScanStep( unsigned long long input, ///< [in] Calling thread's input item. cub::Sum /*scan_op*/, ///< [in] Binary scan operator int first_lane, ///< [in] Index of first lane in segment int offset) ///< [in] Up-offset to pull from { unsigned long long output; int shfl_c = first_lane | SHFL_C; // Shuffle control (mask and first-lane) // Use predicate set from SHFL to guard against invalid peers #ifdef CUB_USE_COOPERATIVE_GROUPS asm volatile( "{" " .reg .u64 r0;" " .reg .u32 lo;" " .reg .u32 hi;" " .reg .pred p;" " mov.b64 {lo, hi}, %1;" " shfl.sync.up.b32 lo|p, lo, %2, %3, %5;" " shfl.sync.up.b32 hi|p, hi, %2, %3, %5;" " mov.b64 r0, {lo, hi};" " @p add.u64 r0, r0, %4;" " mov.u64 %0, r0;" "}" : "=l"(output) : "l"(input), "r"(offset), "r"(shfl_c), "l"(input), "r"(member_mask)); #else asm volatile( "{" " .reg .u64 r0;" " .reg .u32 lo;" " .reg .u32 hi;" " .reg .pred p;" " mov.b64 {lo, hi}, %1;" " shfl.up.b32 lo|p, lo, %2, %3;" " shfl.up.b32 hi|p, hi, %2, %3;" " mov.b64 r0, {lo, hi};" " @p add.u64 r0, r0, %4;" " mov.u64 %0, r0;" "}" : "=l"(output) : "l"(input), "r"(offset), "r"(shfl_c), "l"(input)); #endif return output; } /// Inclusive prefix scan step (specialized for summation across long long types) __device__ __forceinline__ long long InclusiveScanStep( long long input, ///< [in] Calling thread's input item. cub::Sum /*scan_op*/, ///< [in] Binary scan operator int first_lane, ///< [in] Index of first lane in segment int offset) ///< [in] Up-offset to pull from { long long output; int shfl_c = first_lane | SHFL_C; // Shuffle control (mask and first-lane) // Use predicate set from SHFL to guard against invalid peers #ifdef CUB_USE_COOPERATIVE_GROUPS asm volatile( "{" " .reg .s64 r0;" " .reg .u32 lo;" " .reg .u32 hi;" " .reg .pred p;" " mov.b64 {lo, hi}, %1;" " shfl.sync.up.b32 lo|p, lo, %2, %3, %5;" " shfl.sync.up.b32 hi|p, hi, %2, %3, %5;" " mov.b64 r0, {lo, hi};" " @p add.s64 r0, r0, %4;" " mov.s64 %0, r0;" "}" : "=l"(output) : "l"(input), "r"(offset), "r"(shfl_c), "l"(input), "r"(member_mask)); #else asm volatile( "{" " .reg .s64 r0;" " .reg .u32 lo;" " .reg .u32 hi;" " .reg .pred p;" " mov.b64 {lo, hi}, %1;" " shfl.up.b32 lo|p, lo, %2, %3;" " shfl.up.b32 hi|p, hi, %2, %3;" " mov.b64 r0, {lo, hi};" " @p add.s64 r0, r0, %4;" " mov.s64 %0, r0;" "}" : "=l"(output) : "l"(input), "r"(offset), "r"(shfl_c), "l"(input)); #endif return output; } /// Inclusive prefix scan step (specialized for summation across fp64 types) __device__ __forceinline__ double InclusiveScanStep( double input, ///< [in] Calling thread's input item. cub::Sum /*scan_op*/, ///< [in] Binary scan operator int first_lane, ///< [in] Index of first lane in segment int offset) ///< [in] Up-offset to pull from { double output; int shfl_c = first_lane | SHFL_C; // Shuffle control (mask and first-lane) // Use predicate set from SHFL to guard against invalid peers #ifdef CUB_USE_COOPERATIVE_GROUPS asm volatile( "{" " .reg .u32 lo;" " .reg .u32 hi;" " .reg .pred p;" " .reg .f64 r0;" " mov.b64 %0, %1;" " mov.b64 {lo, hi}, %1;" " shfl.sync.up.b32 lo|p, lo, %2, %3, %4;" " shfl.sync.up.b32 hi|p, hi, %2, %3, %4;" " mov.b64 r0, {lo, hi};" " @p add.f64 %0, %0, r0;" "}" : "=d"(output) : "d"(input), "r"(offset), "r"(shfl_c), "r"(member_mask)); #else asm volatile( "{" " .reg .u32 lo;" " .reg .u32 hi;" " .reg .pred p;" " .reg .f64 r0;" " mov.b64 %0, %1;" " mov.b64 {lo, hi}, %1;" " shfl.up.b32 lo|p, lo, %2, %3;" " shfl.up.b32 hi|p, hi, %2, %3;" " mov.b64 r0, {lo, hi};" " @p add.f64 %0, %0, r0;" "}" : "=d"(output) : "d"(input), "r"(offset), "r"(shfl_c)); #endif return output; } /* /// Inclusive prefix scan (specialized for ReduceBySegmentOp<cub::Sum> across KeyValuePair<OffsetT, Value> types) template <typename Value, typename OffsetT> __device__ __forceinline__ KeyValuePair<OffsetT, Value>InclusiveScanStep( KeyValuePair<OffsetT, Value> input, ///< [in] Calling thread's input item. ReduceBySegmentOp<cub::Sum> scan_op, ///< [in] Binary scan operator int first_lane, ///< [in] Index of first lane in segment int offset) ///< [in] Up-offset to pull from { KeyValuePair<OffsetT, Value> output; output.value = InclusiveScanStep(input.value, cub::Sum(), first_lane, offset, Int2Type<IntegerTraits<Value>::IS_SMALL_UNSIGNED>()); output.key = InclusiveScanStep(input.key, cub::Sum(), first_lane, offset, Int2Type<IntegerTraits<OffsetT>::IS_SMALL_UNSIGNED>()); if (input.key > 0) output.value = input.value; return output; } */ /// Inclusive prefix scan step (generic) template <typename _T, typename ScanOpT> __device__ __forceinline__ _T InclusiveScanStep( _T input, ///< [in] Calling thread's input item. ScanOpT scan_op, ///< [in] Binary scan operator int first_lane, ///< [in] Index of first lane in segment int offset) ///< [in] Up-offset to pull from { _T temp = ShuffleUp(input, offset, first_lane, member_mask); // Perform scan op if from a valid peer _T output = scan_op(temp, input); if (static_cast<int>(lane_id) < first_lane + offset) output = input; return output; } /// Inclusive prefix scan step (specialized for small integers size 32b or less) template <typename _T, typename ScanOpT> __device__ __forceinline__ _T InclusiveScanStep( _T input, ///< [in] Calling thread's input item. ScanOpT scan_op, ///< [in] Binary scan operator int first_lane, ///< [in] Index of first lane in segment int offset, ///< [in] Up-offset to pull from Int2Type<true> /*is_small_unsigned*/) ///< [in] Marker type indicating whether T is a small integer { return InclusiveScanStep(input, scan_op, first_lane, offset); } /// Inclusive prefix scan step (specialized for types other than small integers size 32b or less) template <typename _T, typename ScanOpT> __device__ __forceinline__ _T InclusiveScanStep( _T input, ///< [in] Calling thread's input item. ScanOpT scan_op, ///< [in] Binary scan operator int first_lane, ///< [in] Index of first lane in segment int offset, ///< [in] Up-offset to pull from Int2Type<false> /*is_small_unsigned*/) ///< [in] Marker type indicating whether T is a small integer { return InclusiveScanStep(input, scan_op, first_lane, offset); } //--------------------------------------------------------------------- // Templated inclusive scan iteration //--------------------------------------------------------------------- template <typename _T, typename ScanOp, int STEP> __device__ __forceinline__ void InclusiveScanStep( _T& input, ///< [in] Calling thread's input item. ScanOp scan_op, ///< [in] Binary scan operator int first_lane, ///< [in] Index of first lane in segment Int2Type<STEP> /*step*/) ///< [in] Marker type indicating scan step { input = InclusiveScanStep(input, scan_op, first_lane, 1 << STEP, Int2Type<IntegerTraits<T>::IS_SMALL_UNSIGNED>()); InclusiveScanStep(input, scan_op, first_lane, Int2Type<STEP + 1>()); } template <typename _T, typename ScanOp> __device__ __forceinline__ void InclusiveScanStep( _T& /*input*/, ///< [in] Calling thread's input item. ScanOp /*scan_op*/, ///< [in] Binary scan operator int /*first_lane*/, ///< [in] Index of first lane in segment Int2Type<STEPS> /*step*/) ///< [in] Marker type indicating scan step {} /****************************************************************************** * Interface ******************************************************************************/ //--------------------------------------------------------------------- // Broadcast //--------------------------------------------------------------------- /// Broadcast __device__ __forceinline__ T Broadcast( T input, ///< [in] The value to broadcast int src_lane) ///< [in] Which warp lane is to do the broadcasting { return ShuffleIndex(input, src_lane, LOGICAL_WARP_THREADS, member_mask); } //--------------------------------------------------------------------- // Inclusive operations //--------------------------------------------------------------------- /// Inclusive scan template <typename _T, typename ScanOpT> __device__ __forceinline__ void InclusiveScan( _T input, ///< [in] Calling thread's input item. _T &inclusive_output, ///< [out] Calling thread's output item. May be aliased with \p input. ScanOpT scan_op) ///< [in] Binary scan operator { inclusive_output = input; // Iterate scan steps int segment_first_lane = 0; // Iterate scan steps // InclusiveScanStep(inclusive_output, scan_op, segment_first_lane, Int2Type<0>()); // Iterate scan steps #pragma unroll for (int STEP = 0; STEP < STEPS; STEP++) { inclusive_output = InclusiveScanStep( inclusive_output, scan_op, segment_first_lane, (1 << STEP), Int2Type<IntegerTraits<T>::IS_SMALL_UNSIGNED>()); } } /// Inclusive scan, specialized for reduce-value-by-key template <typename KeyT, typename ValueT, typename ReductionOpT> __device__ __forceinline__ void InclusiveScan( KeyValuePair<KeyT, ValueT> input, ///< [in] Calling thread's input item. KeyValuePair<KeyT, ValueT> &inclusive_output, ///< [out] Calling thread's output item. May be aliased with \p input. ReduceByKeyOp<ReductionOpT > scan_op) ///< [in] Binary scan operator { inclusive_output = input; KeyT pred_key = ShuffleUp(inclusive_output.key, 1, 0, member_mask); unsigned int ballot = WARP_BALLOT((pred_key != inclusive_output.key), member_mask); // Mask away all lanes greater than ours ballot = ballot & LaneMaskLe(); // Find index of first set bit int segment_first_lane = CUB_MAX(0, 31 - __clz(ballot)); // Iterate scan steps // InclusiveScanStep(inclusive_output.value, scan_op.op, segment_first_lane, Int2Type<0>()); // Iterate scan steps #pragma unroll for (int STEP = 0; STEP < STEPS; STEP++) { inclusive_output.value = InclusiveScanStep( inclusive_output.value, scan_op.op, segment_first_lane, (1 << STEP), Int2Type<IntegerTraits<T>::IS_SMALL_UNSIGNED>()); } } /// Inclusive scan with aggregate template <typename ScanOpT> __device__ __forceinline__ void InclusiveScan( T input, ///< [in] Calling thread's input item. T &inclusive_output, ///< [out] Calling thread's output item. May be aliased with \p input. ScanOpT scan_op, ///< [in] Binary scan operator T &warp_aggregate) ///< [out] Warp-wide aggregate reduction of input items. { InclusiveScan(input, inclusive_output, scan_op); // Grab aggregate from last warp lane warp_aggregate = ShuffleIndex(inclusive_output, LOGICAL_WARP_THREADS - 1, LOGICAL_WARP_THREADS, member_mask); } //--------------------------------------------------------------------- // Get exclusive from inclusive //--------------------------------------------------------------------- /// Update inclusive and exclusive using input and inclusive template <typename ScanOpT, typename IsIntegerT> __device__ __forceinline__ void Update( T /*input*/, ///< [in] T &inclusive, ///< [in, out] T &exclusive, ///< [out] ScanOpT /*scan_op*/, ///< [in] IsIntegerT /*is_integer*/) ///< [in] { // initial value unknown exclusive = ShuffleUp(inclusive, 1, 0, member_mask); } /// Update inclusive and exclusive using input and inclusive (specialized for summation of integer types) __device__ __forceinline__ void Update( T input, T &inclusive, T &exclusive, cub::Sum /*scan_op*/, Int2Type<true> /*is_integer*/) { // initial value presumed 0 exclusive = inclusive - input; } /// Update inclusive and exclusive using initial value using input, inclusive, and initial value template <typename ScanOpT, typename IsIntegerT> __device__ __forceinline__ void Update ( T /*input*/, T &inclusive, T &exclusive, ScanOpT scan_op, T initial_value, IsIntegerT /*is_integer*/) { inclusive = scan_op(initial_value, inclusive); exclusive = ShuffleUp(inclusive, 1, 0, member_mask); unsigned int segment_id = (IS_ARCH_WARP) ? lane_id : lane_id % LOGICAL_WARP_THREADS; if (segment_id == 0) exclusive = initial_value; } /// Update inclusive and exclusive using initial value using input and inclusive (specialized for summation of integer types) __device__ __forceinline__ void Update ( T input, T &inclusive, T &exclusive, cub::Sum scan_op, T initial_value, Int2Type<true> /*is_integer*/) { inclusive = scan_op(initial_value, inclusive); exclusive = inclusive - input; } /// Update inclusive, exclusive, and warp aggregate using input and inclusive template <typename ScanOpT, typename IsIntegerT> __device__ __forceinline__ void Update ( T input, T &inclusive, T &exclusive, T &warp_aggregate, ScanOpT scan_op, IsIntegerT is_integer) { warp_aggregate = ShuffleIndex(inclusive, LOGICAL_WARP_THREADS - 1, LOGICAL_WARP_THREADS, member_mask); Update(input, inclusive, exclusive, scan_op, is_integer); } /// Update inclusive, exclusive, and warp aggregate using input, inclusive, and initial value template <typename ScanOpT, typename IsIntegerT> __device__ __forceinline__ void Update ( T input, T &inclusive, T &exclusive, T &warp_aggregate, ScanOpT scan_op, T initial_value, IsIntegerT is_integer) { warp_aggregate = ShuffleIndex(inclusive, LOGICAL_WARP_THREADS - 1, LOGICAL_WARP_THREADS, member_mask); Update(input, inclusive, exclusive, scan_op, initial_value, is_integer); } }; } // CUB namespace CUB_NS_POSTFIX // Optional outer namespace(s)
0
rapidsai_public_repos/nvgraph/external/cub_semiring/warp
rapidsai_public_repos/nvgraph/external/cub_semiring/warp/specializations/warp_scan_smem.cuh
/****************************************************************************** * Copyright (c) 2011, Duane Merrill. All rights reserved. * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ /** * \file * cub::WarpScanSmem provides smem-based variants of parallel prefix scan of items partitioned across a CUDA thread warp. */ #pragma once #include "../../thread/thread_operators.cuh" #include "../../thread/thread_load.cuh" #include "../../thread/thread_store.cuh" #include "../../util_type.cuh" #include "../../util_namespace.cuh" /// Optional outer namespace(s) CUB_NS_PREFIX /// CUB namespace namespace cub { /** * \brief WarpScanSmem provides smem-based variants of parallel prefix scan of items partitioned across a CUDA thread warp. */ template < typename T, ///< Data type being scanned int LOGICAL_WARP_THREADS, ///< Number of threads per logical warp int PTX_ARCH> ///< The PTX compute capability for which to to specialize this collective struct WarpScanSmem { /****************************************************************************** * Constants and type definitions ******************************************************************************/ enum { /// Whether the logical warp size and the PTX warp size coincide IS_ARCH_WARP = (LOGICAL_WARP_THREADS == CUB_WARP_THREADS(PTX_ARCH)), /// Whether the logical warp size is a power-of-two IS_POW_OF_TWO = PowerOfTwo<LOGICAL_WARP_THREADS>::VALUE, /// The number of warp scan steps STEPS = Log2<LOGICAL_WARP_THREADS>::VALUE, /// The number of threads in half a warp HALF_WARP_THREADS = 1 << (STEPS - 1), /// The number of shared memory elements per warp WARP_SMEM_ELEMENTS = LOGICAL_WARP_THREADS + HALF_WARP_THREADS, }; /// Storage cell type (workaround for SM1x compiler bugs with custom-ops like Max() on signed chars) typedef typename If<((Equals<T, char>::VALUE || Equals<T, signed char>::VALUE) && (PTX_ARCH < 200)), int, T>::Type CellT; /// Shared memory storage layout type (1.5 warps-worth of elements for each warp) typedef CellT _TempStorage[WARP_SMEM_ELEMENTS]; // Alias wrapper allowing storage to be unioned struct TempStorage : Uninitialized<_TempStorage> {}; /****************************************************************************** * Thread fields ******************************************************************************/ _TempStorage &temp_storage; unsigned int lane_id; unsigned int member_mask; /****************************************************************************** * Construction ******************************************************************************/ /// Constructor __device__ __forceinline__ WarpScanSmem( TempStorage &temp_storage) : temp_storage(temp_storage.Alias()), lane_id(IS_ARCH_WARP ? LaneId() : LaneId() % LOGICAL_WARP_THREADS), member_mask((0xffffffff >> (32 - LOGICAL_WARP_THREADS)) << ((IS_ARCH_WARP || !IS_POW_OF_TWO ) ? 0 : // arch-width and non-power-of-two subwarps cannot be tiled with the arch-warp ((LaneId() / LOGICAL_WARP_THREADS) * LOGICAL_WARP_THREADS))) {} /****************************************************************************** * Utility methods ******************************************************************************/ /// Basic inclusive scan iteration (template unrolled, inductive-case specialization) template < bool HAS_IDENTITY, int STEP, typename ScanOp> __device__ __forceinline__ void ScanStep( T &partial, ScanOp scan_op, Int2Type<STEP> /*step*/) { const int OFFSET = 1 << STEP; // Share partial into buffer ThreadStore<STORE_VOLATILE>(&temp_storage[HALF_WARP_THREADS + lane_id], (CellT) partial); WARP_SYNC(member_mask); // Update partial if addend is in range if (HAS_IDENTITY || (lane_id >= OFFSET)) { T addend = (T) ThreadLoad<LOAD_VOLATILE>(&temp_storage[HALF_WARP_THREADS + lane_id - OFFSET]); partial = scan_op(addend, partial); } WARP_SYNC(member_mask); ScanStep<HAS_IDENTITY>(partial, scan_op, Int2Type<STEP + 1>()); } /// Basic inclusive scan iteration(template unrolled, base-case specialization) template < bool HAS_IDENTITY, typename ScanOp> __device__ __forceinline__ void ScanStep( T &/*partial*/, ScanOp /*scan_op*/, Int2Type<STEPS> /*step*/) {} /// Inclusive prefix scan (specialized for summation across primitive types) __device__ __forceinline__ void InclusiveScan( T input, ///< [in] Calling thread's input item. T &output, ///< [out] Calling thread's output item. May be aliased with \p input. Sum scan_op, ///< [in] Binary scan operator Int2Type<true> /*is_primitive*/) ///< [in] Marker type indicating whether T is primitive type { T identity = 0; ThreadStore<STORE_VOLATILE>(&temp_storage[lane_id], (CellT) identity); WARP_SYNC(member_mask); // Iterate scan steps output = input; ScanStep<true>(output, scan_op, Int2Type<0>()); } /// Inclusive prefix scan template <typename ScanOp, int IS_PRIMITIVE> __device__ __forceinline__ void InclusiveScan( T input, ///< [in] Calling thread's input item. T &output, ///< [out] Calling thread's output item. May be aliased with \p input. ScanOp scan_op, ///< [in] Binary scan operator Int2Type<IS_PRIMITIVE> /*is_primitive*/) ///< [in] Marker type indicating whether T is primitive type { // Iterate scan steps output = input; ScanStep<false>(output, scan_op, Int2Type<0>()); } /****************************************************************************** * Interface ******************************************************************************/ //--------------------------------------------------------------------- // Broadcast //--------------------------------------------------------------------- /// Broadcast __device__ __forceinline__ T Broadcast( T input, ///< [in] The value to broadcast unsigned int src_lane) ///< [in] Which warp lane is to do the broadcasting { if (lane_id == src_lane) { ThreadStore<STORE_VOLATILE>(temp_storage, (CellT) input); } WARP_SYNC(member_mask); return (T)ThreadLoad<LOAD_VOLATILE>(temp_storage); } //--------------------------------------------------------------------- // Inclusive operations //--------------------------------------------------------------------- /// Inclusive scan template <typename ScanOp> __device__ __forceinline__ void InclusiveScan( T input, ///< [in] Calling thread's input item. T &inclusive_output, ///< [out] Calling thread's output item. May be aliased with \p input. ScanOp scan_op) ///< [in] Binary scan operator { InclusiveScan(input, inclusive_output, scan_op, Int2Type<Traits<T>::PRIMITIVE>()); } /// Inclusive scan with aggregate template <typename ScanOp> __device__ __forceinline__ void InclusiveScan( T input, ///< [in] Calling thread's input item. T &inclusive_output, ///< [out] Calling thread's output item. May be aliased with \p input. ScanOp scan_op, ///< [in] Binary scan operator T &warp_aggregate) ///< [out] Warp-wide aggregate reduction of input items. { InclusiveScan(input, inclusive_output, scan_op); // Retrieve aggregate ThreadStore<STORE_VOLATILE>(&temp_storage[HALF_WARP_THREADS + lane_id], (CellT) inclusive_output); WARP_SYNC(member_mask); warp_aggregate = (T) ThreadLoad<LOAD_VOLATILE>(&temp_storage[WARP_SMEM_ELEMENTS - 1]); WARP_SYNC(member_mask); } //--------------------------------------------------------------------- // Get exclusive from inclusive //--------------------------------------------------------------------- /// Update inclusive and exclusive using input and inclusive template <typename ScanOpT, typename IsIntegerT> __device__ __forceinline__ void Update( T /*input*/, ///< [in] T &inclusive, ///< [in, out] T &exclusive, ///< [out] ScanOpT /*scan_op*/, ///< [in] IsIntegerT /*is_integer*/) ///< [in] { // initial value unknown ThreadStore<STORE_VOLATILE>(&temp_storage[HALF_WARP_THREADS + lane_id], (CellT) inclusive); WARP_SYNC(member_mask); exclusive = (T) ThreadLoad<LOAD_VOLATILE>(&temp_storage[HALF_WARP_THREADS + lane_id - 1]); } /// Update inclusive and exclusive using input and inclusive (specialized for summation of integer types) __device__ __forceinline__ void Update( T input, T &inclusive, T &exclusive, cub::Sum /*scan_op*/, Int2Type<true> /*is_integer*/) { // initial value presumed 0 exclusive = inclusive - input; } /// Update inclusive and exclusive using initial value using input, inclusive, and initial value template <typename ScanOpT, typename IsIntegerT> __device__ __forceinline__ void Update ( T /*input*/, T &inclusive, T &exclusive, ScanOpT scan_op, T initial_value, IsIntegerT /*is_integer*/) { inclusive = scan_op(initial_value, inclusive); ThreadStore<STORE_VOLATILE>(&temp_storage[HALF_WARP_THREADS + lane_id], (CellT) inclusive); WARP_SYNC(member_mask); exclusive = (T) ThreadLoad<LOAD_VOLATILE>(&temp_storage[HALF_WARP_THREADS + lane_id - 1]); if (lane_id == 0) exclusive = initial_value; } /// Update inclusive and exclusive using initial value using input and inclusive (specialized for summation of integer types) __device__ __forceinline__ void Update ( T input, T &inclusive, T &exclusive, cub::Sum scan_op, T initial_value, Int2Type<true> /*is_integer*/) { inclusive = scan_op(initial_value, inclusive); exclusive = inclusive - input; } /// Update inclusive, exclusive, and warp aggregate using input and inclusive template <typename ScanOpT, typename IsIntegerT> __device__ __forceinline__ void Update ( T /*input*/, T &inclusive, T &exclusive, T &warp_aggregate, ScanOpT /*scan_op*/, IsIntegerT /*is_integer*/) { // Initial value presumed to be unknown or identity (either way our padding is correct) ThreadStore<STORE_VOLATILE>(&temp_storage[HALF_WARP_THREADS + lane_id], (CellT) inclusive); WARP_SYNC(member_mask); exclusive = (T) ThreadLoad<LOAD_VOLATILE>(&temp_storage[HALF_WARP_THREADS + lane_id - 1]); warp_aggregate = (T) ThreadLoad<LOAD_VOLATILE>(&temp_storage[WARP_SMEM_ELEMENTS - 1]); } /// Update inclusive, exclusive, and warp aggregate using input and inclusive (specialized for summation of integer types) __device__ __forceinline__ void Update ( T input, T &inclusive, T &exclusive, T &warp_aggregate, cub::Sum /*scan_o*/, Int2Type<true> /*is_integer*/) { // Initial value presumed to be unknown or identity (either way our padding is correct) ThreadStore<STORE_VOLATILE>(&temp_storage[HALF_WARP_THREADS + lane_id], (CellT) inclusive); WARP_SYNC(member_mask); warp_aggregate = (T) ThreadLoad<LOAD_VOLATILE>(&temp_storage[WARP_SMEM_ELEMENTS - 1]); exclusive = inclusive - input; } /// Update inclusive, exclusive, and warp aggregate using input, inclusive, and initial value template <typename ScanOpT, typename IsIntegerT> __device__ __forceinline__ void Update ( T /*input*/, T &inclusive, T &exclusive, T &warp_aggregate, ScanOpT scan_op, T initial_value, IsIntegerT /*is_integer*/) { // Broadcast warp aggregate ThreadStore<STORE_VOLATILE>(&temp_storage[HALF_WARP_THREADS + lane_id], (CellT) inclusive); WARP_SYNC(member_mask); warp_aggregate = (T) ThreadLoad<LOAD_VOLATILE>(&temp_storage[WARP_SMEM_ELEMENTS - 1]); WARP_SYNC(member_mask); // Update inclusive with initial value inclusive = scan_op(initial_value, inclusive); // Get exclusive from exclusive ThreadStore<STORE_VOLATILE>(&temp_storage[HALF_WARP_THREADS + lane_id - 1], (CellT) inclusive); WARP_SYNC(member_mask); exclusive = (T) ThreadLoad<LOAD_VOLATILE>(&temp_storage[HALF_WARP_THREADS + lane_id - 2]); if (lane_id == 0) exclusive = initial_value; } }; } // CUB namespace CUB_NS_POSTFIX // Optional outer namespace(s)
0
rapidsai_public_repos/nvgraph/external/cub_semiring
rapidsai_public_repos/nvgraph/external/cub_semiring/device/device_segmented_reduce.cuh
/****************************************************************************** * Copyright (c) 2011, Duane Merrill. All rights reserved. * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ /** * \file * cub::DeviceSegmentedReduce provides device-wide, parallel operations for computing a batched reduction across multiple sequences of data items residing within device-accessible memory. */ #pragma once #include <stdio.h> #include <iterator> #include "../iterator/arg_index_input_iterator.cuh" #include "dispatch/dispatch_reduce.cuh" #include "dispatch/dispatch_reduce_by_key.cuh" #include "../util_type.cuh" #include "../util_namespace.cuh" /// Optional outer namespace(s) CUB_NS_PREFIX /// CUB namespace namespace cub { /** * \brief DeviceSegmentedReduce provides device-wide, parallel operations for computing a reduction across multiple sequences of data items residing within device-accessible memory. ![](reduce_logo.png) * \ingroup SegmentedModule * * \par Overview * A <a href="http://en.wikipedia.org/wiki/Reduce_(higher-order_function)"><em>reduction</em></a> (or <em>fold</em>) * uses a binary combining operator to compute a single aggregate from a sequence of input elements. * * \par Usage Considerations * \cdp_class{DeviceSegmentedReduce} * */ struct DeviceSegmentedReduce { /** * \brief Computes a device-wide segmented reduction using the specified binary \p reduction_op functor. * * \par * - Does not support binary reduction operators that are non-commutative. * - When input a contiguous sequence of segments, a single sequence * \p segment_offsets (of length <tt>num_segments+1</tt>) can be aliased * for both the \p d_begin_offsets and \p d_end_offsets parameters (where * the latter is specified as <tt>segment_offsets+1</tt>). * - \devicestorage * * \par Snippet * The code snippet below illustrates a custom min-reduction of a device vector of \p int data elements. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/device/device_radix_sort.cuh> * * // CustomMin functor * struct CustomMin * { * template <typename T> * CUB_RUNTIME_FUNCTION __forceinline__ * T operator()(const T &a, const T &b) const { * return (b < a) ? b : a; * } * }; * * // Declare, allocate, and initialize device-accessible pointers for input and output * int num_segments; // e.g., 3 * int *d_offsets; // e.g., [0, 3, 3, 7] * int *d_in; // e.g., [8, 6, 7, 5, 3, 0, 9] * int *d_out; // e.g., [-, -, -] * CustomMin min_op; * int initial_value; // e.g., INT_MAX * ... * * // Determine temporary device storage requirements * void *d_temp_storage = NULL; * size_t temp_storage_bytes = 0; * cub::DeviceSegmentedReduce::Reduce(d_temp_storage, temp_storage_bytes, d_in, d_out, * num_segments, d_offsets, d_offsets + 1, min_op, initial_value); * * // Allocate temporary storage * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Run reduction * cub::DeviceSegmentedReduce::Reduce(d_temp_storage, temp_storage_bytes, d_in, d_out, * num_segments, d_offsets, d_offsets + 1, min_op, initial_value); * * // d_out <-- [6, INT_MAX, 0] * * \endcode * * \tparam InputIteratorT <b>[inferred]</b> Random-access input iterator type for reading input items \iterator * \tparam OutputIteratorT <b>[inferred]</b> Output iterator type for recording the reduced aggregate \iterator * \tparam OffsetIteratorT <b>[inferred]</b> Random-access input iterator type for reading segment offsets \iterator * \tparam ReductionOp <b>[inferred]</b> Binary reduction functor type having member <tt>T operator()(const T &a, const T &b)</tt> * \tparam T <b>[inferred]</b> Data element type that is convertible to the \p value type of \p InputIteratorT */ template < typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT, typename ReductionOp, typename T> CUB_RUNTIME_FUNCTION static cudaError_t Reduce( void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation InputIteratorT d_in, ///< [in] Pointer to the input sequence of data items OutputIteratorT d_out, ///< [out] Pointer to the output aggregate int num_segments, ///< [in] The number of segments that comprise the sorting data OffsetIteratorT d_begin_offsets, ///< [in] Pointer to the sequence of beginning offsets of length \p num_segments, such that <tt>d_begin_offsets[i]</tt> is the first element of the <em>i</em><sup>th</sup> data segment in <tt>d_keys_*</tt> and <tt>d_values_*</tt> OffsetIteratorT d_end_offsets, ///< [in] Pointer to the sequence of ending offsets of length \p num_segments, such that <tt>d_end_offsets[i]-1</tt> is the last element of the <em>i</em><sup>th</sup> data segment in <tt>d_keys_*</tt> and <tt>d_values_*</tt>. If <tt>d_end_offsets[i]-1</tt> <= <tt>d_begin_offsets[i]</tt>, the <em>i</em><sup>th</sup> is considered empty. ReductionOp reduction_op, ///< [in] Binary reduction functor T initial_value, ///< [in] Initial value of the reduction for each segment cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. { // Signed integer type for global offsets typedef int OffsetT; return DispatchSegmentedReduce<InputIteratorT, OutputIteratorT, OffsetIteratorT, OffsetT, ReductionOp>::Dispatch( d_temp_storage, temp_storage_bytes, d_in, d_out, num_segments, d_begin_offsets, d_end_offsets, reduction_op, initial_value, stream, debug_synchronous); } /** * \brief Computes a device-wide segmented sum using the addition ('+') operator. * * \par * - Uses \p 0 as the initial value of the reduction for each segment. * - When input a contiguous sequence of segments, a single sequence * \p segment_offsets (of length <tt>num_segments+1</tt>) can be aliased * for both the \p d_begin_offsets and \p d_end_offsets parameters (where * the latter is specified as <tt>segment_offsets+1</tt>). * - Does not support \p + operators that are non-commutative.. * - \devicestorage * * \par Snippet * The code snippet below illustrates the sum reduction of a device vector of \p int data elements. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/device/device_radix_sort.cuh> * * // Declare, allocate, and initialize device-accessible pointers for input and output * int num_segments; // e.g., 3 * int *d_offsets; // e.g., [0, 3, 3, 7] * int *d_in; // e.g., [8, 6, 7, 5, 3, 0, 9] * int *d_out; // e.g., [-, -, -] * ... * * // Determine temporary device storage requirements * void *d_temp_storage = NULL; * size_t temp_storage_bytes = 0; * cub::DeviceSegmentedReduce::Sum(d_temp_storage, temp_storage_bytes, d_in, d_out, * num_segments, d_offsets, d_offsets + 1); * * // Allocate temporary storage * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Run sum-reduction * cub::DeviceSegmentedReduce::Sum(d_temp_storage, temp_storage_bytes, d_in, d_out, * num_segments, d_offsets, d_offsets + 1); * * // d_out <-- [21, 0, 17] * * \endcode * * \tparam InputIteratorT <b>[inferred]</b> Random-access input iterator type for reading input items \iterator * \tparam OutputIteratorT <b>[inferred]</b> Output iterator type for recording the reduced aggregate \iterator * \tparam OffsetIteratorT <b>[inferred]</b> Random-access input iterator type for reading segment offsets \iterator */ template < typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT> CUB_RUNTIME_FUNCTION static cudaError_t Sum( void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation InputIteratorT d_in, ///< [in] Pointer to the input sequence of data items OutputIteratorT d_out, ///< [out] Pointer to the output aggregate int num_segments, ///< [in] The number of segments that comprise the sorting data OffsetIteratorT d_begin_offsets, ///< [in] Pointer to the sequence of beginning offsets of length \p num_segments, such that <tt>d_begin_offsets[i]</tt> is the first element of the <em>i</em><sup>th</sup> data segment in <tt>d_keys_*</tt> and <tt>d_values_*</tt> OffsetIteratorT d_end_offsets, ///< [in] Pointer to the sequence of ending offsets of length \p num_segments, such that <tt>d_end_offsets[i]-1</tt> is the last element of the <em>i</em><sup>th</sup> data segment in <tt>d_keys_*</tt> and <tt>d_values_*</tt>. If <tt>d_end_offsets[i]-1</tt> <= <tt>d_begin_offsets[i]</tt>, the <em>i</em><sup>th</sup> is considered empty. cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. { // Signed integer type for global offsets typedef int OffsetT; // The output value type typedef typename If<(Equals<typename std::iterator_traits<OutputIteratorT>::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ? typename std::iterator_traits<InputIteratorT>::value_type, // ... then the input iterator's value type, typename std::iterator_traits<OutputIteratorT>::value_type>::Type OutputT; // ... else the output iterator's value type return DispatchSegmentedReduce<InputIteratorT, OutputIteratorT, OffsetIteratorT, OffsetT, cub::Sum>::Dispatch( d_temp_storage, temp_storage_bytes, d_in, d_out, num_segments, d_begin_offsets, d_end_offsets, cub::Sum(), OutputT(), // zero-initialize stream, debug_synchronous); } /** * \brief Computes a device-wide segmented minimum using the less-than ('<') operator. * * \par * - Uses <tt>std::numeric_limits<T>::max()</tt> as the initial value of the reduction for each segment. * - When input a contiguous sequence of segments, a single sequence * \p segment_offsets (of length <tt>num_segments+1</tt>) can be aliased * for both the \p d_begin_offsets and \p d_end_offsets parameters (where * the latter is specified as <tt>segment_offsets+1</tt>). * - Does not support \p < operators that are non-commutative. * - \devicestorage * * \par Snippet * The code snippet below illustrates the min-reduction of a device vector of \p int data elements. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/device/device_radix_sort.cuh> * * // Declare, allocate, and initialize device-accessible pointers for input and output * int num_segments; // e.g., 3 * int *d_offsets; // e.g., [0, 3, 3, 7] * int *d_in; // e.g., [8, 6, 7, 5, 3, 0, 9] * int *d_out; // e.g., [-, -, -] * ... * * // Determine temporary device storage requirements * void *d_temp_storage = NULL; * size_t temp_storage_bytes = 0; * cub::DeviceSegmentedReduce::Min(d_temp_storage, temp_storage_bytes, d_in, d_out, * num_segments, d_offsets, d_offsets + 1); * * // Allocate temporary storage * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Run min-reduction * cub::DeviceSegmentedReduce::Min(d_temp_storage, temp_storage_bytes, d_in, d_out, * num_segments, d_offsets, d_offsets + 1); * * // d_out <-- [6, INT_MAX, 0] * * \endcode * * \tparam InputIteratorT <b>[inferred]</b> Random-access input iterator type for reading input items \iterator * \tparam OutputIteratorT <b>[inferred]</b> Output iterator type for recording the reduced aggregate \iterator * \tparam OffsetIteratorT <b>[inferred]</b> Random-access input iterator type for reading segment offsets \iterator */ template < typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT> CUB_RUNTIME_FUNCTION static cudaError_t Min( void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation InputIteratorT d_in, ///< [in] Pointer to the input sequence of data items OutputIteratorT d_out, ///< [out] Pointer to the output aggregate int num_segments, ///< [in] The number of segments that comprise the sorting data OffsetIteratorT d_begin_offsets, ///< [in] Pointer to the sequence of beginning offsets of length \p num_segments, such that <tt>d_begin_offsets[i]</tt> is the first element of the <em>i</em><sup>th</sup> data segment in <tt>d_keys_*</tt> and <tt>d_values_*</tt> OffsetIteratorT d_end_offsets, ///< [in] Pointer to the sequence of ending offsets of length \p num_segments, such that <tt>d_end_offsets[i]-1</tt> is the last element of the <em>i</em><sup>th</sup> data segment in <tt>d_keys_*</tt> and <tt>d_values_*</tt>. If <tt>d_end_offsets[i]-1</tt> <= <tt>d_begin_offsets[i]</tt>, the <em>i</em><sup>th</sup> is considered empty. cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. { // Signed integer type for global offsets typedef int OffsetT; // The input value type typedef typename std::iterator_traits<InputIteratorT>::value_type InputT; return DispatchSegmentedReduce<InputIteratorT, OutputIteratorT, OffsetIteratorT, OffsetT, cub::Min>::Dispatch( d_temp_storage, temp_storage_bytes, d_in, d_out, num_segments, d_begin_offsets, d_end_offsets, cub::Min(), Traits<InputT>::Max(), // replace with std::numeric_limits<T>::max() when C++11 support is more prevalent stream, debug_synchronous); } /** * \brief Finds the first device-wide minimum in each segment using the less-than ('<') operator, also returning the in-segment index of that item. * * \par * - The output value type of \p d_out is cub::KeyValuePair <tt><int, T></tt> (assuming the value type of \p d_in is \p T) * - The minimum of the <em>i</em><sup>th</sup> segment is written to <tt>d_out[i].value</tt> and its offset in that segment is written to <tt>d_out[i].key</tt>. * - The <tt>{1, std::numeric_limits<T>::max()}</tt> tuple is produced for zero-length inputs * - When input a contiguous sequence of segments, a single sequence * \p segment_offsets (of length <tt>num_segments+1</tt>) can be aliased * for both the \p d_begin_offsets and \p d_end_offsets parameters (where * the latter is specified as <tt>segment_offsets+1</tt>). * - Does not support \p < operators that are non-commutative. * - \devicestorage * * \par Snippet * The code snippet below illustrates the argmin-reduction of a device vector of \p int data elements. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/device/device_radix_sort.cuh> * * // Declare, allocate, and initialize device-accessible pointers for input and output * int num_segments; // e.g., 3 * int *d_offsets; // e.g., [0, 3, 3, 7] * int *d_in; // e.g., [8, 6, 7, 5, 3, 0, 9] * KeyValuePair<int, int> *d_out; // e.g., [{-,-}, {-,-}, {-,-}] * ... * * // Determine temporary device storage requirements * void *d_temp_storage = NULL; * size_t temp_storage_bytes = 0; * cub::DeviceSegmentedReduce::ArgMin(d_temp_storage, temp_storage_bytes, d_in, d_out, * num_segments, d_offsets, d_offsets + 1); * * // Allocate temporary storage * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Run argmin-reduction * cub::DeviceSegmentedReduce::ArgMin(d_temp_storage, temp_storage_bytes, d_in, d_out, * num_segments, d_offsets, d_offsets + 1); * * // d_out <-- [{1,6}, {1,INT_MAX}, {2,0}] * * \endcode * * \tparam InputIteratorT <b>[inferred]</b> Random-access input iterator type for reading input items (of some type \p T) \iterator * \tparam OutputIteratorT <b>[inferred]</b> Output iterator type for recording the reduced aggregate (having value type <tt>KeyValuePair<int, T></tt>) \iterator * \tparam OffsetIteratorT <b>[inferred]</b> Random-access input iterator type for reading segment offsets \iterator */ template < typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT> CUB_RUNTIME_FUNCTION static cudaError_t ArgMin( void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation InputIteratorT d_in, ///< [in] Pointer to the input sequence of data items OutputIteratorT d_out, ///< [out] Pointer to the output aggregate int num_segments, ///< [in] The number of segments that comprise the sorting data OffsetIteratorT d_begin_offsets, ///< [in] Pointer to the sequence of beginning offsets of length \p num_segments, such that <tt>d_begin_offsets[i]</tt> is the first element of the <em>i</em><sup>th</sup> data segment in <tt>d_keys_*</tt> and <tt>d_values_*</tt> OffsetIteratorT d_end_offsets, ///< [in] Pointer to the sequence of ending offsets of length \p num_segments, such that <tt>d_end_offsets[i]-1</tt> is the last element of the <em>i</em><sup>th</sup> data segment in <tt>d_keys_*</tt> and <tt>d_values_*</tt>. If <tt>d_end_offsets[i]-1</tt> <= <tt>d_begin_offsets[i]</tt>, the <em>i</em><sup>th</sup> is considered empty. cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. { // Signed integer type for global offsets typedef int OffsetT; // The input type typedef typename std::iterator_traits<InputIteratorT>::value_type InputValueT; // The output tuple type typedef typename If<(Equals<typename std::iterator_traits<OutputIteratorT>::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ? KeyValuePair<OffsetT, InputValueT>, // ... then the key value pair OffsetT + InputValueT typename std::iterator_traits<OutputIteratorT>::value_type>::Type OutputTupleT; // ... else the output iterator's value type // The output value type typedef typename OutputTupleT::Value OutputValueT; // Wrapped input iterator to produce index-value <OffsetT, InputT> tuples typedef ArgIndexInputIterator<InputIteratorT, OffsetT, OutputValueT> ArgIndexInputIteratorT; ArgIndexInputIteratorT d_indexed_in(d_in); // Initial value OutputTupleT initial_value(1, Traits<InputValueT>::Max()); // replace with std::numeric_limits<T>::max() when C++11 support is more prevalent return DispatchSegmentedReduce<ArgIndexInputIteratorT, OutputIteratorT, OffsetIteratorT, OffsetT, cub::ArgMin>::Dispatch( d_temp_storage, temp_storage_bytes, d_indexed_in, d_out, num_segments, d_begin_offsets, d_end_offsets, cub::ArgMin(), initial_value, stream, debug_synchronous); } /** * \brief Computes a device-wide segmented maximum using the greater-than ('>') operator. * * \par * - Uses <tt>std::numeric_limits<T>::lowest()</tt> as the initial value of the reduction. * - When input a contiguous sequence of segments, a single sequence * \p segment_offsets (of length <tt>num_segments+1</tt>) can be aliased * for both the \p d_begin_offsets and \p d_end_offsets parameters (where * the latter is specified as <tt>segment_offsets+1</tt>). * - Does not support \p > operators that are non-commutative. * - \devicestorage * * \par Snippet * The code snippet below illustrates the max-reduction of a device vector of \p int data elements. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/device/device_radix_sort.cuh> * * // Declare, allocate, and initialize device-accessible pointers for input and output * int num_segments; // e.g., 3 * int *d_offsets; // e.g., [0, 3, 3, 7] * int *d_in; // e.g., [8, 6, 7, 5, 3, 0, 9] * int *d_out; // e.g., [-, -, -] * ... * * // Determine temporary device storage requirements * void *d_temp_storage = NULL; * size_t temp_storage_bytes = 0; * cub::DeviceSegmentedReduce::Max(d_temp_storage, temp_storage_bytes, d_in, d_out, * num_segments, d_offsets, d_offsets + 1); * * // Allocate temporary storage * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Run max-reduction * cub::DeviceSegmentedReduce::Max(d_temp_storage, temp_storage_bytes, d_in, d_out, * num_segments, d_offsets, d_offsets + 1); * * // d_out <-- [8, INT_MIN, 9] * * \endcode * * \tparam InputIteratorT <b>[inferred]</b> Random-access input iterator type for reading input items \iterator * \tparam OutputIteratorT <b>[inferred]</b> Output iterator type for recording the reduced aggregate \iterator * \tparam OffsetIteratorT <b>[inferred]</b> Random-access input iterator type for reading segment offsets \iterator */ template < typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT> CUB_RUNTIME_FUNCTION static cudaError_t Max( void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation InputIteratorT d_in, ///< [in] Pointer to the input sequence of data items OutputIteratorT d_out, ///< [out] Pointer to the output aggregate int num_segments, ///< [in] The number of segments that comprise the sorting data OffsetIteratorT d_begin_offsets, ///< [in] Pointer to the sequence of beginning offsets of length \p num_segments, such that <tt>d_begin_offsets[i]</tt> is the first element of the <em>i</em><sup>th</sup> data segment in <tt>d_keys_*</tt> and <tt>d_values_*</tt> OffsetIteratorT d_end_offsets, ///< [in] Pointer to the sequence of ending offsets of length \p num_segments, such that <tt>d_end_offsets[i]-1</tt> is the last element of the <em>i</em><sup>th</sup> data segment in <tt>d_keys_*</tt> and <tt>d_values_*</tt>. If <tt>d_end_offsets[i]-1</tt> <= <tt>d_begin_offsets[i]</tt>, the <em>i</em><sup>th</sup> is considered empty. cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. { // Signed integer type for global offsets typedef int OffsetT; // The input value type typedef typename std::iterator_traits<InputIteratorT>::value_type InputT; return DispatchSegmentedReduce<InputIteratorT, OutputIteratorT, OffsetIteratorT, OffsetT, cub::Max>::Dispatch( d_temp_storage, temp_storage_bytes, d_in, d_out, num_segments, d_begin_offsets, d_end_offsets, cub::Max(), Traits<InputT>::Lowest(), // replace with std::numeric_limits<T>::lowest() when C++11 support is more prevalent stream, debug_synchronous); } /** * \brief Finds the first device-wide maximum in each segment using the greater-than ('>') operator, also returning the in-segment index of that item * * \par * - The output value type of \p d_out is cub::KeyValuePair <tt><int, T></tt> (assuming the value type of \p d_in is \p T) * - The maximum of the <em>i</em><sup>th</sup> segment is written to <tt>d_out[i].value</tt> and its offset in that segment is written to <tt>d_out[i].key</tt>. * - The <tt>{1, std::numeric_limits<T>::lowest()}</tt> tuple is produced for zero-length inputs * - When input a contiguous sequence of segments, a single sequence * \p segment_offsets (of length <tt>num_segments+1</tt>) can be aliased * for both the \p d_begin_offsets and \p d_end_offsets parameters (where * the latter is specified as <tt>segment_offsets+1</tt>). * - Does not support \p > operators that are non-commutative. * - \devicestorage * * \par Snippet * The code snippet below illustrates the argmax-reduction of a device vector of \p int data elements. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/device/device_reduce.cuh> * * // Declare, allocate, and initialize device-accessible pointers for input and output * int num_segments; // e.g., 3 * int *d_offsets; // e.g., [0, 3, 3, 7] * int *d_in; // e.g., [8, 6, 7, 5, 3, 0, 9] * KeyValuePair<int, int> *d_out; // e.g., [{-,-}, {-,-}, {-,-}] * ... * * // Determine temporary device storage requirements * void *d_temp_storage = NULL; * size_t temp_storage_bytes = 0; * cub::DeviceSegmentedReduce::ArgMax(d_temp_storage, temp_storage_bytes, d_in, d_out, * num_segments, d_offsets, d_offsets + 1); * * // Allocate temporary storage * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Run argmax-reduction * cub::DeviceSegmentedReduce::ArgMax(d_temp_storage, temp_storage_bytes, d_in, d_out, * num_segments, d_offsets, d_offsets + 1); * * // d_out <-- [{0,8}, {1,INT_MIN}, {3,9}] * * \endcode * * \tparam InputIteratorT <b>[inferred]</b> Random-access input iterator type for reading input items (of some type \p T) \iterator * \tparam OutputIteratorT <b>[inferred]</b> Output iterator type for recording the reduced aggregate (having value type <tt>KeyValuePair<int, T></tt>) \iterator * \tparam OffsetIteratorT <b>[inferred]</b> Random-access input iterator type for reading segment offsets \iterator */ template < typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT> CUB_RUNTIME_FUNCTION static cudaError_t ArgMax( void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation InputIteratorT d_in, ///< [in] Pointer to the input sequence of data items OutputIteratorT d_out, ///< [out] Pointer to the output aggregate int num_segments, ///< [in] The number of segments that comprise the sorting data OffsetIteratorT d_begin_offsets, ///< [in] Pointer to the sequence of beginning offsets of length \p num_segments, such that <tt>d_begin_offsets[i]</tt> is the first element of the <em>i</em><sup>th</sup> data segment in <tt>d_keys_*</tt> and <tt>d_values_*</tt> OffsetIteratorT d_end_offsets, ///< [in] Pointer to the sequence of ending offsets of length \p num_segments, such that <tt>d_end_offsets[i]-1</tt> is the last element of the <em>i</em><sup>th</sup> data segment in <tt>d_keys_*</tt> and <tt>d_values_*</tt>. If <tt>d_end_offsets[i]-1</tt> <= <tt>d_begin_offsets[i]</tt>, the <em>i</em><sup>th</sup> is considered empty. cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. { // Signed integer type for global offsets typedef int OffsetT; // The input type typedef typename std::iterator_traits<InputIteratorT>::value_type InputValueT; // The output tuple type typedef typename If<(Equals<typename std::iterator_traits<OutputIteratorT>::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ? KeyValuePair<OffsetT, InputValueT>, // ... then the key value pair OffsetT + InputValueT typename std::iterator_traits<OutputIteratorT>::value_type>::Type OutputTupleT; // ... else the output iterator's value type // The output value type typedef typename OutputTupleT::Value OutputValueT; // Wrapped input iterator to produce index-value <OffsetT, InputT> tuples typedef ArgIndexInputIterator<InputIteratorT, OffsetT, OutputValueT> ArgIndexInputIteratorT; ArgIndexInputIteratorT d_indexed_in(d_in); // Initial value OutputTupleT initial_value(1, Traits<InputValueT>::Lowest()); // replace with std::numeric_limits<T>::lowest() when C++11 support is more prevalent return DispatchSegmentedReduce<ArgIndexInputIteratorT, OutputIteratorT, OffsetIteratorT, OffsetT, cub::ArgMax>::Dispatch( d_temp_storage, temp_storage_bytes, d_indexed_in, d_out, num_segments, d_begin_offsets, d_end_offsets, cub::ArgMax(), initial_value, stream, debug_synchronous); } }; } // CUB namespace CUB_NS_POSTFIX // Optional outer namespace(s)
0
rapidsai_public_repos/nvgraph/external/cub_semiring
rapidsai_public_repos/nvgraph/external/cub_semiring/device/device_segmented_radix_sort.cuh
/****************************************************************************** * Copyright (c) 2011, Duane Merrill. All rights reserved. * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ /** * \file * cub::DeviceSegmentedRadixSort provides device-wide, parallel operations for computing a batched radix sort across multiple, non-overlapping sequences of data items residing within device-accessible memory. */ #pragma once #include <stdio.h> #include <iterator> #include "dispatch/dispatch_radix_sort.cuh" #include "../util_arch.cuh" #include "../util_namespace.cuh" /// Optional outer namespace(s) CUB_NS_PREFIX /// CUB namespace namespace cub { /** * \brief DeviceSegmentedRadixSort provides device-wide, parallel operations for computing a batched radix sort across multiple, non-overlapping sequences of data items residing within device-accessible memory. ![](segmented_sorting_logo.png) * \ingroup SegmentedModule * * \par Overview * The [<em>radix sorting method</em>](http://en.wikipedia.org/wiki/Radix_sort) arranges * items into ascending (or descending) order. The algorithm relies upon a positional representation for * keys, i.e., each key is comprised of an ordered sequence of symbols (e.g., digits, * characters, etc.) specified from least-significant to most-significant. For a * given input sequence of keys and a set of rules specifying a total ordering * of the symbolic alphabet, the radix sorting method produces a lexicographic * ordering of those keys. * * \par * DeviceSegmentedRadixSort can sort all of the built-in C++ numeric primitive types, e.g.: * <tt>unsigned char</tt>, \p int, \p double, etc. Although the direct radix sorting * method can only be applied to unsigned integral types, DeviceSegmentedRadixSort * is able to sort signed and floating-point types via simple bit-wise transformations * that ensure lexicographic key ordering. * * \par Usage Considerations * \cdp_class{DeviceSegmentedRadixSort} * */ struct DeviceSegmentedRadixSort { /******************************************************************//** * \name Key-value pairs *********************************************************************/ //@{ /** * \brief Sorts segments of key-value pairs into ascending order. (~<em>2N </em>auxiliary storage required) * * \par * - The contents of the input data are not altered by the sorting operation * - When input a contiguous sequence of segments, a single sequence * \p segment_offsets (of length <tt>num_segments+1</tt>) can be aliased * for both the \p d_begin_offsets and \p d_end_offsets parameters (where * the latter is specified as <tt>segment_offsets+1</tt>). * - An optional bit subrange <tt>[begin_bit, end_bit)</tt> of differentiating key bits can be specified. This can reduce overall sorting overhead and yield a corresponding performance improvement. * - \devicestorageNP For sorting using only <em>O</em>(<tt>P</tt>) temporary storage, see the sorting interface using DoubleBuffer wrappers below. * - \devicestorage * * \par Snippet * The code snippet below illustrates the batched sorting of three segments (with one zero-length segment) of \p int keys * with associated vector of \p int values. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/device/device_segmentd_radix_sort.cuh> * * // Declare, allocate, and initialize device-accessible pointers for sorting data * int num_items; // e.g., 7 * int num_segments; // e.g., 3 * int *d_offsets; // e.g., [0, 3, 3, 7] * int *d_keys_in; // e.g., [8, 6, 7, 5, 3, 0, 9] * int *d_keys_out; // e.g., [-, -, -, -, -, -, -] * int *d_values_in; // e.g., [0, 1, 2, 3, 4, 5, 6] * int *d_values_out; // e.g., [-, -, -, -, -, -, -] * ... * * // Determine temporary device storage requirements * void *d_temp_storage = NULL; * size_t temp_storage_bytes = 0; * cub::DeviceSegmentedRadixSort::SortPairs(d_temp_storage, temp_storage_bytes, * d_keys_in, d_keys_out, d_values_in, d_values_out, * num_items, num_segments, d_offsets, d_offsets + 1); * * // Allocate temporary storage * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Run sorting operation * cub::DeviceSegmentedRadixSort::SortPairs(d_temp_storage, temp_storage_bytes, * d_keys_in, d_keys_out, d_values_in, d_values_out, * num_items, num_segments, d_offsets, d_offsets + 1); * * // d_keys_out <-- [6, 7, 8, 0, 3, 5, 9] * // d_values_out <-- [1, 2, 0, 5, 4, 3, 6] * * \endcode * * \tparam KeyT <b>[inferred]</b> Key type * \tparam ValueT <b>[inferred]</b> Value type * \tparam OffsetIteratorT <b>[inferred]</b> Random-access input iterator type for reading segment offsets \iterator */ template < typename KeyT, typename ValueT, typename OffsetIteratorT> CUB_RUNTIME_FUNCTION static cudaError_t SortPairs( void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation const KeyT *d_keys_in, ///< [in] %Device-accessible pointer to the input data of key data to sort KeyT *d_keys_out, ///< [out] %Device-accessible pointer to the sorted output sequence of key data const ValueT *d_values_in, ///< [in] %Device-accessible pointer to the corresponding input sequence of associated value items ValueT *d_values_out, ///< [out] %Device-accessible pointer to the correspondingly-reordered output sequence of associated value items int num_items, ///< [in] The total number of items to sort (across all segments) int num_segments, ///< [in] The number of segments that comprise the sorting data OffsetIteratorT d_begin_offsets, ///< [in] Pointer to the sequence of beginning offsets of length \p num_segments, such that <tt>d_begin_offsets[i]</tt> is the first element of the <em>i</em><sup>th</sup> data segment in <tt>d_keys_*</tt> and <tt>d_values_*</tt> OffsetIteratorT d_end_offsets, ///< [in] Pointer to the sequence of ending offsets of length \p num_segments, such that <tt>d_end_offsets[i]-1</tt> is the last element of the <em>i</em><sup>th</sup> data segment in <tt>d_keys_*</tt> and <tt>d_values_*</tt>. If <tt>d_end_offsets[i]-1</tt> <= <tt>d_begin_offsets[i]</tt>, the <em>i</em><sup>th</sup> is considered empty. int begin_bit = 0, ///< [in] <b>[optional]</b> The least-significant bit index (inclusive) needed for key comparison int end_bit = sizeof(KeyT) * 8, ///< [in] <b>[optional]</b> The most-significant bit index (exclusive) needed for key comparison (e.g., sizeof(unsigned int) * 8) cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. { // Signed integer type for global offsets typedef int OffsetT; DoubleBuffer<KeyT> d_keys(const_cast<KeyT*>(d_keys_in), d_keys_out); DoubleBuffer<ValueT> d_values(const_cast<ValueT*>(d_values_in), d_values_out); return DispatchSegmentedRadixSort<false, KeyT, ValueT, OffsetIteratorT, OffsetT>::Dispatch( d_temp_storage, temp_storage_bytes, d_keys, d_values, num_items, num_segments, d_begin_offsets, d_end_offsets, begin_bit, end_bit, false, stream, debug_synchronous); } /** * \brief Sorts segments of key-value pairs into ascending order. (~<em>N </em>auxiliary storage required) * * \par * - The sorting operation is given a pair of key buffers and a corresponding * pair of associated value buffers. Each pair is managed by a DoubleBuffer * structure that indicates which of the two buffers is "current" (and thus * contains the input data to be sorted). * - The contents of both buffers within each pair may be altered by the sorting * operation. * - Upon completion, the sorting operation will update the "current" indicator * within each DoubleBuffer wrapper to reference which of the two buffers * now contains the sorted output sequence (a function of the number of key bits * specified and the targeted device architecture). * - When input a contiguous sequence of segments, a single sequence * \p segment_offsets (of length <tt>num_segments+1</tt>) can be aliased * for both the \p d_begin_offsets and \p d_end_offsets parameters (where * the latter is specified as <tt>segment_offsets+1</tt>). * - An optional bit subrange <tt>[begin_bit, end_bit)</tt> of differentiating key bits can be specified. This can reduce overall sorting overhead and yield a corresponding performance improvement. * - \devicestorageP * - \devicestorage * * \par Snippet * The code snippet below illustrates the batched sorting of three segments (with one zero-length segment) of \p int keys * with associated vector of \p int values. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/device/device_segmentd_radix_sort.cuh> * * // Declare, allocate, and initialize device-accessible pointers for sorting data * int num_items; // e.g., 7 * int num_segments; // e.g., 3 * int *d_offsets; // e.g., [0, 3, 3, 7] * int *d_key_buf; // e.g., [8, 6, 7, 5, 3, 0, 9] * int *d_key_alt_buf; // e.g., [-, -, -, -, -, -, -] * int *d_value_buf; // e.g., [0, 1, 2, 3, 4, 5, 6] * int *d_value_alt_buf; // e.g., [-, -, -, -, -, -, -] * ... * * // Create a set of DoubleBuffers to wrap pairs of device pointers * cub::DoubleBuffer<int> d_keys(d_key_buf, d_key_alt_buf); * cub::DoubleBuffer<int> d_values(d_value_buf, d_value_alt_buf); * * // Determine temporary device storage requirements * void *d_temp_storage = NULL; * size_t temp_storage_bytes = 0; * cub::DeviceSegmentedRadixSort::SortPairs(d_temp_storage, temp_storage_bytes, d_keys, d_values, * num_items, num_segments, d_offsets, d_offsets + 1); * * // Allocate temporary storage * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Run sorting operation * cub::DeviceSegmentedRadixSort::SortPairs(d_temp_storage, temp_storage_bytes, d_keys, d_values, * num_items, num_segments, d_offsets, d_offsets + 1); * * // d_keys.Current() <-- [6, 7, 8, 0, 3, 5, 9] * // d_values.Current() <-- [5, 4, 3, 1, 2, 0, 6] * * \endcode * * \tparam KeyT <b>[inferred]</b> Key type * \tparam ValueT <b>[inferred]</b> Value type * \tparam OffsetIteratorT <b>[inferred]</b> Random-access input iterator type for reading segment offsets \iterator */ template < typename KeyT, typename ValueT, typename OffsetIteratorT> CUB_RUNTIME_FUNCTION static cudaError_t SortPairs( void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation DoubleBuffer<KeyT> &d_keys, ///< [in,out] Reference to the double-buffer of keys whose "current" device-accessible buffer contains the unsorted input keys and, upon return, is updated to point to the sorted output keys DoubleBuffer<ValueT> &d_values, ///< [in,out] Double-buffer of values whose "current" device-accessible buffer contains the unsorted input values and, upon return, is updated to point to the sorted output values int num_items, ///< [in] The total number of items to sort (across all segments) int num_segments, ///< [in] The number of segments that comprise the sorting data OffsetIteratorT d_begin_offsets, ///< [in] Pointer to the sequence of beginning offsets of length \p num_segments, such that <tt>d_begin_offsets[i]</tt> is the first element of the <em>i</em><sup>th</sup> data segment in <tt>d_keys_*</tt> and <tt>d_values_*</tt> OffsetIteratorT d_end_offsets, ///< [in] Pointer to the sequence of ending offsets of length \p num_segments, such that <tt>d_end_offsets[i]-1</tt> is the last element of the <em>i</em><sup>th</sup> data segment in <tt>d_keys_*</tt> and <tt>d_values_*</tt>. If <tt>d_end_offsets[i]-1</tt> <= <tt>d_begin_offsets[i]</tt>, the <em>i</em><sup>th</sup> is considered empty. int begin_bit = 0, ///< [in] <b>[optional]</b> The least-significant bit index (inclusive) needed for key comparison int end_bit = sizeof(KeyT) * 8, ///< [in] <b>[optional]</b> The most-significant bit index (exclusive) needed for key comparison (e.g., sizeof(unsigned int) * 8) cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. { // Signed integer type for global offsets typedef int OffsetT; return DispatchSegmentedRadixSort<false, KeyT, ValueT, OffsetIteratorT, OffsetT>::Dispatch( d_temp_storage, temp_storage_bytes, d_keys, d_values, num_items, num_segments, d_begin_offsets, d_end_offsets, begin_bit, end_bit, true, stream, debug_synchronous); } /** * \brief Sorts segments of key-value pairs into descending order. (~<em>2N</em> auxiliary storage required). * * \par * - The contents of the input data are not altered by the sorting operation * - When input a contiguous sequence of segments, a single sequence * \p segment_offsets (of length <tt>num_segments+1</tt>) can be aliased * for both the \p d_begin_offsets and \p d_end_offsets parameters (where * the latter is specified as <tt>segment_offsets+1</tt>). * - An optional bit subrange <tt>[begin_bit, end_bit)</tt> of differentiating key bits can be specified. This can reduce overall sorting overhead and yield a corresponding performance improvement. * - \devicestorageNP For sorting using only <em>O</em>(<tt>P</tt>) temporary storage, see the sorting interface using DoubleBuffer wrappers below. * - \devicestorage * * \par Snippet * The code snippet below illustrates the batched sorting of three segments (with one zero-length segment) of \p int keys * with associated vector of \p int values. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/device/device_segmentd_radix_sort.cuh> * * // Declare, allocate, and initialize device-accessible pointers for sorting data * int num_items; // e.g., 7 * int num_segments; // e.g., 3 * int *d_offsets; // e.g., [0, 3, 3, 7] * int *d_keys_in; // e.g., [8, 6, 7, 5, 3, 0, 9] * int *d_keys_out; // e.g., [-, -, -, -, -, -, -] * int *d_values_in; // e.g., [0, 1, 2, 3, 4, 5, 6] * int *d_values_out; // e.g., [-, -, -, -, -, -, -] * ... * * // Determine temporary device storage requirements * void *d_temp_storage = NULL; * size_t temp_storage_bytes = 0; * cub::DeviceSegmentedRadixSort::SortPairsDescending(d_temp_storage, temp_storage_bytes, * d_keys_in, d_keys_out, d_values_in, d_values_out, * num_items, num_segments, d_offsets, d_offsets + 1); * * // Allocate temporary storage * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Run sorting operation * cub::DeviceSegmentedRadixSort::SortPairsDescending(d_temp_storage, temp_storage_bytes, * d_keys_in, d_keys_out, d_values_in, d_values_out, * num_items, num_segments, d_offsets, d_offsets + 1); * * // d_keys_out <-- [8, 7, 6, 9, 5, 3, 0] * // d_values_out <-- [0, 2, 1, 6, 3, 4, 5] * * \endcode * * \tparam KeyT <b>[inferred]</b> Key type * \tparam ValueT <b>[inferred]</b> Value type * \tparam OffsetIteratorT <b>[inferred]</b> Random-access input iterator type for reading segment offsets \iterator */ template < typename KeyT, typename ValueT, typename OffsetIteratorT> CUB_RUNTIME_FUNCTION static cudaError_t SortPairsDescending( void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation const KeyT *d_keys_in, ///< [in] %Device-accessible pointer to the input data of key data to sort KeyT *d_keys_out, ///< [out] %Device-accessible pointer to the sorted output sequence of key data const ValueT *d_values_in, ///< [in] %Device-accessible pointer to the corresponding input sequence of associated value items ValueT *d_values_out, ///< [out] %Device-accessible pointer to the correspondingly-reordered output sequence of associated value items int num_items, ///< [in] The total number of items to sort (across all segments) int num_segments, ///< [in] The number of segments that comprise the sorting data OffsetIteratorT d_begin_offsets, ///< [in] Pointer to the sequence of beginning offsets of length \p num_segments, such that <tt>d_begin_offsets[i]</tt> is the first element of the <em>i</em><sup>th</sup> data segment in <tt>d_keys_*</tt> and <tt>d_values_*</tt> OffsetIteratorT d_end_offsets, ///< [in] Pointer to the sequence of ending offsets of length \p num_segments, such that <tt>d_end_offsets[i]-1</tt> is the last element of the <em>i</em><sup>th</sup> data segment in <tt>d_keys_*</tt> and <tt>d_values_*</tt>. If <tt>d_end_offsets[i]-1</tt> <= <tt>d_begin_offsets[i]</tt>, the <em>i</em><sup>th</sup> is considered empty. int begin_bit = 0, ///< [in] <b>[optional]</b> The least-significant bit index (inclusive) needed for key comparison int end_bit = sizeof(KeyT) * 8, ///< [in] <b>[optional]</b> The most-significant bit index (exclusive) needed for key comparison (e.g., sizeof(unsigned int) * 8) cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. { // Signed integer type for global offsets typedef int OffsetT; DoubleBuffer<KeyT> d_keys(const_cast<KeyT*>(d_keys_in), d_keys_out); DoubleBuffer<ValueT> d_values(const_cast<ValueT*>(d_values_in), d_values_out); return DispatchSegmentedRadixSort<true, KeyT, ValueT, OffsetIteratorT, OffsetT>::Dispatch( d_temp_storage, temp_storage_bytes, d_keys, d_values, num_items, num_segments, d_begin_offsets, d_end_offsets, begin_bit, end_bit, false, stream, debug_synchronous); } /** * \brief Sorts segments of key-value pairs into descending order. (~<em>N </em>auxiliary storage required). * * \par * - The sorting operation is given a pair of key buffers and a corresponding * pair of associated value buffers. Each pair is managed by a DoubleBuffer * structure that indicates which of the two buffers is "current" (and thus * contains the input data to be sorted). * - The contents of both buffers within each pair may be altered by the sorting * operation. * - Upon completion, the sorting operation will update the "current" indicator * within each DoubleBuffer wrapper to reference which of the two buffers * now contains the sorted output sequence (a function of the number of key bits * specified and the targeted device architecture). * - When input a contiguous sequence of segments, a single sequence * \p segment_offsets (of length <tt>num_segments+1</tt>) can be aliased * for both the \p d_begin_offsets and \p d_end_offsets parameters (where * the latter is specified as <tt>segment_offsets+1</tt>). * - An optional bit subrange <tt>[begin_bit, end_bit)</tt> of differentiating key bits can be specified. This can reduce overall sorting overhead and yield a corresponding performance improvement. * - \devicestorageP * - \devicestorage * * \par Snippet * The code snippet below illustrates the batched sorting of three segments (with one zero-length segment) of \p int keys * with associated vector of \p int values. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/device/device_segmentd_radix_sort.cuh> * * // Declare, allocate, and initialize device-accessible pointers for sorting data * int num_items; // e.g., 7 * int num_segments; // e.g., 3 * int *d_offsets; // e.g., [0, 3, 3, 7] * int *d_key_buf; // e.g., [8, 6, 7, 5, 3, 0, 9] * int *d_key_alt_buf; // e.g., [-, -, -, -, -, -, -] * int *d_value_buf; // e.g., [0, 1, 2, 3, 4, 5, 6] * int *d_value_alt_buf; // e.g., [-, -, -, -, -, -, -] * ... * * // Create a set of DoubleBuffers to wrap pairs of device pointers * cub::DoubleBuffer<int> d_keys(d_key_buf, d_key_alt_buf); * cub::DoubleBuffer<int> d_values(d_value_buf, d_value_alt_buf); * * // Determine temporary device storage requirements * void *d_temp_storage = NULL; * size_t temp_storage_bytes = 0; * cub::DeviceSegmentedRadixSort::SortPairsDescending(d_temp_storage, temp_storage_bytes, d_keys, d_values, * num_items, num_segments, d_offsets, d_offsets + 1); * * // Allocate temporary storage * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Run sorting operation * cub::DeviceSegmentedRadixSort::SortPairsDescending(d_temp_storage, temp_storage_bytes, d_keys, d_values, * num_items, num_segments, d_offsets, d_offsets + 1); * * // d_keys.Current() <-- [8, 7, 6, 9, 5, 3, 0] * // d_values.Current() <-- [0, 2, 1, 6, 3, 4, 5] * * \endcode * * \tparam KeyT <b>[inferred]</b> Key type * \tparam ValueT <b>[inferred]</b> Value type * \tparam OffsetIteratorT <b>[inferred]</b> Random-access input iterator type for reading segment offsets \iterator */ template < typename KeyT, typename ValueT, typename OffsetIteratorT> CUB_RUNTIME_FUNCTION static cudaError_t SortPairsDescending( void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation DoubleBuffer<KeyT> &d_keys, ///< [in,out] Reference to the double-buffer of keys whose "current" device-accessible buffer contains the unsorted input keys and, upon return, is updated to point to the sorted output keys DoubleBuffer<ValueT> &d_values, ///< [in,out] Double-buffer of values whose "current" device-accessible buffer contains the unsorted input values and, upon return, is updated to point to the sorted output values int num_items, ///< [in] The total number of items to sort (across all segments) int num_segments, ///< [in] The number of segments that comprise the sorting data OffsetIteratorT d_begin_offsets, ///< [in] Pointer to the sequence of beginning offsets of length \p num_segments, such that <tt>d_begin_offsets[i]</tt> is the first element of the <em>i</em><sup>th</sup> data segment in <tt>d_keys_*</tt> and <tt>d_values_*</tt> OffsetIteratorT d_end_offsets, ///< [in] Pointer to the sequence of ending offsets of length \p num_segments, such that <tt>d_end_offsets[i]-1</tt> is the last element of the <em>i</em><sup>th</sup> data segment in <tt>d_keys_*</tt> and <tt>d_values_*</tt>. If <tt>d_end_offsets[i]-1</tt> <= <tt>d_begin_offsets[i]</tt>, the <em>i</em><sup>th</sup> is considered empty. int begin_bit = 0, ///< [in] <b>[optional]</b> The least-significant bit index (inclusive) needed for key comparison int end_bit = sizeof(KeyT) * 8, ///< [in] <b>[optional]</b> The most-significant bit index (exclusive) needed for key comparison (e.g., sizeof(unsigned int) * 8) cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. { // Signed integer type for global offsets typedef int OffsetT; return DispatchSegmentedRadixSort<true, KeyT, ValueT, OffsetIteratorT, OffsetT>::Dispatch( d_temp_storage, temp_storage_bytes, d_keys, d_values, num_items, num_segments, d_begin_offsets, d_end_offsets, begin_bit, end_bit, true, stream, debug_synchronous); } //@} end member group /******************************************************************//** * \name Keys-only *********************************************************************/ //@{ /** * \brief Sorts segments of keys into ascending order. (~<em>2N </em>auxiliary storage required) * * \par * - The contents of the input data are not altered by the sorting operation * - An optional bit subrange <tt>[begin_bit, end_bit)</tt> of differentiating key bits can be specified. This can reduce overall sorting overhead and yield a corresponding performance improvement. * - When input a contiguous sequence of segments, a single sequence * \p segment_offsets (of length <tt>num_segments+1</tt>) can be aliased * for both the \p d_begin_offsets and \p d_end_offsets parameters (where * the latter is specified as <tt>segment_offsets+1</tt>). * - \devicestorageNP For sorting using only <em>O</em>(<tt>P</tt>) temporary storage, see the sorting interface using DoubleBuffer wrappers below. * - \devicestorage * * \par Snippet * The code snippet below illustrates the batched sorting of three segments (with one zero-length segment) of \p int keys. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/device/device_segmentd_radix_sort.cuh> * * // Declare, allocate, and initialize device-accessible pointers for sorting data * int num_items; // e.g., 7 * int num_segments; // e.g., 3 * int *d_offsets; // e.g., [0, 3, 3, 7] * int *d_keys_in; // e.g., [8, 6, 7, 5, 3, 0, 9] * int *d_keys_out; // e.g., [-, -, -, -, -, -, -] * ... * * // Determine temporary device storage requirements * void *d_temp_storage = NULL; * size_t temp_storage_bytes = 0; * cub::DeviceSegmentedRadixSort::SortKeys(d_temp_storage, temp_storage_bytes, d_keys_in, d_keys_out, * num_items, num_segments, d_offsets, d_offsets + 1); * * // Allocate temporary storage * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Run sorting operation * cub::DeviceSegmentedRadixSort::SortKeys(d_temp_storage, temp_storage_bytes, d_keys_in, d_keys_out, * num_items, num_segments, d_offsets, d_offsets + 1); * * // d_keys_out <-- [6, 7, 8, 0, 3, 5, 9] * * \endcode * * \tparam KeyT <b>[inferred]</b> Key type * \tparam OffsetIteratorT <b>[inferred]</b> Random-access input iterator type for reading segment offsets \iterator */ template < typename KeyT, typename OffsetIteratorT> CUB_RUNTIME_FUNCTION static cudaError_t SortKeys( void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation const KeyT *d_keys_in, ///< [in] %Device-accessible pointer to the input data of key data to sort KeyT *d_keys_out, ///< [out] %Device-accessible pointer to the sorted output sequence of key data int num_items, ///< [in] The total number of items to sort (across all segments) int num_segments, ///< [in] The number of segments that comprise the sorting data OffsetIteratorT d_begin_offsets, ///< [in] Pointer to the sequence of beginning offsets of length \p num_segments, such that <tt>d_begin_offsets[i]</tt> is the first element of the <em>i</em><sup>th</sup> data segment in <tt>d_keys_*</tt> and <tt>d_values_*</tt> OffsetIteratorT d_end_offsets, ///< [in] Pointer to the sequence of ending offsets of length \p num_segments, such that <tt>d_end_offsets[i]-1</tt> is the last element of the <em>i</em><sup>th</sup> data segment in <tt>d_keys_*</tt> and <tt>d_values_*</tt>. If <tt>d_end_offsets[i]-1</tt> <= <tt>d_begin_offsets[i]</tt>, the <em>i</em><sup>th</sup> is considered empty. int begin_bit = 0, ///< [in] <b>[optional]</b> The least-significant bit index (inclusive) needed for key comparison int end_bit = sizeof(KeyT) * 8, ///< [in] <b>[optional]</b> The most-significant bit index (exclusive) needed for key comparison (e.g., sizeof(unsigned int) * 8) cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. { // Signed integer type for global offsets typedef int OffsetT; // Null value type DoubleBuffer<KeyT> d_keys(const_cast<KeyT*>(d_keys_in), d_keys_out); DoubleBuffer<NullType> d_values; return DispatchSegmentedRadixSort<false, KeyT, NullType, OffsetIteratorT, OffsetT>::Dispatch( d_temp_storage, temp_storage_bytes, d_keys, d_values, num_items, num_segments, d_begin_offsets, d_end_offsets, begin_bit, end_bit, false, stream, debug_synchronous); } /** * \brief Sorts segments of keys into ascending order. (~<em>N </em>auxiliary storage required). * * \par * - The sorting operation is given a pair of key buffers managed by a * DoubleBuffer structure that indicates which of the two buffers is * "current" (and thus contains the input data to be sorted). * - The contents of both buffers may be altered by the sorting operation. * - Upon completion, the sorting operation will update the "current" indicator * within the DoubleBuffer wrapper to reference which of the two buffers * now contains the sorted output sequence (a function of the number of key bits * specified and the targeted device architecture). * - When input a contiguous sequence of segments, a single sequence * \p segment_offsets (of length <tt>num_segments+1</tt>) can be aliased * for both the \p d_begin_offsets and \p d_end_offsets parameters (where * the latter is specified as <tt>segment_offsets+1</tt>). * - An optional bit subrange <tt>[begin_bit, end_bit)</tt> of differentiating key bits can be specified. This can reduce overall sorting overhead and yield a corresponding performance improvement. * - \devicestorageP * - \devicestorage * * \par Snippet * The code snippet below illustrates the batched sorting of three segments (with one zero-length segment) of \p int keys. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/device/device_segmentd_radix_sort.cuh> * * // Declare, allocate, and initialize device-accessible pointers for sorting data * int num_items; // e.g., 7 * int num_segments; // e.g., 3 * int *d_offsets; // e.g., [0, 3, 3, 7] * int *d_key_buf; // e.g., [8, 6, 7, 5, 3, 0, 9] * int *d_key_alt_buf; // e.g., [-, -, -, -, -, -, -] * ... * * // Create a DoubleBuffer to wrap the pair of device pointers * cub::DoubleBuffer<int> d_keys(d_key_buf, d_key_alt_buf); * * // Determine temporary device storage requirements * void *d_temp_storage = NULL; * size_t temp_storage_bytes = 0; * cub::DeviceSegmentedRadixSort::SortKeys(d_temp_storage, temp_storage_bytes, d_keys, * num_items, num_segments, d_offsets, d_offsets + 1); * * // Allocate temporary storage * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Run sorting operation * cub::DeviceSegmentedRadixSort::SortKeys(d_temp_storage, temp_storage_bytes, d_keys, * num_items, num_segments, d_offsets, d_offsets + 1); * * // d_keys.Current() <-- [6, 7, 8, 0, 3, 5, 9] * * \endcode * * \tparam KeyT <b>[inferred]</b> Key type * \tparam OffsetIteratorT <b>[inferred]</b> Random-access input iterator type for reading segment offsets \iterator */ template < typename KeyT, typename OffsetIteratorT> CUB_RUNTIME_FUNCTION static cudaError_t SortKeys( void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation DoubleBuffer<KeyT> &d_keys, ///< [in,out] Reference to the double-buffer of keys whose "current" device-accessible buffer contains the unsorted input keys and, upon return, is updated to point to the sorted output keys int num_items, ///< [in] The total number of items to sort (across all segments) int num_segments, ///< [in] The number of segments that comprise the sorting data OffsetIteratorT d_begin_offsets, ///< [in] Pointer to the sequence of beginning offsets of length \p num_segments, such that <tt>d_begin_offsets[i]</tt> is the first element of the <em>i</em><sup>th</sup> data segment in <tt>d_keys_*</tt> and <tt>d_values_*</tt> OffsetIteratorT d_end_offsets, ///< [in] Pointer to the sequence of ending offsets of length \p num_segments, such that <tt>d_end_offsets[i]-1</tt> is the last element of the <em>i</em><sup>th</sup> data segment in <tt>d_keys_*</tt> and <tt>d_values_*</tt>. If <tt>d_end_offsets[i]-1</tt> <= <tt>d_begin_offsets[i]</tt>, the <em>i</em><sup>th</sup> is considered empty. int begin_bit = 0, ///< [in] <b>[optional]</b> The least-significant bit index (inclusive) needed for key comparison int end_bit = sizeof(KeyT) * 8, ///< [in] <b>[optional]</b> The most-significant bit index (exclusive) needed for key comparison (e.g., sizeof(unsigned int) * 8) cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. { // Signed integer type for global offsets typedef int OffsetT; // Null value type DoubleBuffer<NullType> d_values; return DispatchSegmentedRadixSort<false, KeyT, NullType, OffsetIteratorT, OffsetT>::Dispatch( d_temp_storage, temp_storage_bytes, d_keys, d_values, num_items, num_segments, d_begin_offsets, d_end_offsets, begin_bit, end_bit, true, stream, debug_synchronous); } /** * \brief Sorts segments of keys into descending order. (~<em>2N</em> auxiliary storage required). * * \par * - The contents of the input data are not altered by the sorting operation * - When input a contiguous sequence of segments, a single sequence * \p segment_offsets (of length <tt>num_segments+1</tt>) can be aliased * for both the \p d_begin_offsets and \p d_end_offsets parameters (where * the latter is specified as <tt>segment_offsets+1</tt>). * - An optional bit subrange <tt>[begin_bit, end_bit)</tt> of differentiating key bits can be specified. This can reduce overall sorting overhead and yield a corresponding performance improvement. * - \devicestorageNP For sorting using only <em>O</em>(<tt>P</tt>) temporary storage, see the sorting interface using DoubleBuffer wrappers below. * - \devicestorage * * \par Snippet * The code snippet below illustrates the batched sorting of three segments (with one zero-length segment) of \p int keys. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/device/device_segmentd_radix_sort.cuh> * * // Declare, allocate, and initialize device-accessible pointers for sorting data * int num_items; // e.g., 7 * int num_segments; // e.g., 3 * int *d_offsets; // e.g., [0, 3, 3, 7] * int *d_keys_in; // e.g., [8, 6, 7, 5, 3, 0, 9] * int *d_keys_out; // e.g., [-, -, -, -, -, -, -] * ... * * // Create a DoubleBuffer to wrap the pair of device pointers * cub::DoubleBuffer<int> d_keys(d_key_buf, d_key_alt_buf); * * // Determine temporary device storage requirements * void *d_temp_storage = NULL; * size_t temp_storage_bytes = 0; * cub::DeviceSegmentedRadixSort::SortKeysDescending(d_temp_storage, temp_storage_bytes, d_keys_in, d_keys_out, * num_items, num_segments, d_offsets, d_offsets + 1); * * // Allocate temporary storage * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Run sorting operation * cub::DeviceSegmentedRadixSort::SortKeysDescending(d_temp_storage, temp_storage_bytes, d_keys_in, d_keys_out, * num_items, num_segments, d_offsets, d_offsets + 1); * * // d_keys_out <-- [8, 7, 6, 9, 5, 3, 0] * * \endcode * * \tparam KeyT <b>[inferred]</b> Key type * \tparam OffsetIteratorT <b>[inferred]</b> Random-access input iterator type for reading segment offsets \iterator */ template < typename KeyT, typename OffsetIteratorT> CUB_RUNTIME_FUNCTION static cudaError_t SortKeysDescending( void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation const KeyT *d_keys_in, ///< [in] %Device-accessible pointer to the input data of key data to sort KeyT *d_keys_out, ///< [out] %Device-accessible pointer to the sorted output sequence of key data int num_items, ///< [in] The total number of items to sort (across all segments) int num_segments, ///< [in] The number of segments that comprise the sorting data OffsetIteratorT d_begin_offsets, ///< [in] Pointer to the sequence of beginning offsets of length \p num_segments, such that <tt>d_begin_offsets[i]</tt> is the first element of the <em>i</em><sup>th</sup> data segment in <tt>d_keys_*</tt> and <tt>d_values_*</tt> OffsetIteratorT d_end_offsets, ///< [in] Pointer to the sequence of ending offsets of length \p num_segments, such that <tt>d_end_offsets[i]-1</tt> is the last element of the <em>i</em><sup>th</sup> data segment in <tt>d_keys_*</tt> and <tt>d_values_*</tt>. If <tt>d_end_offsets[i]-1</tt> <= <tt>d_begin_offsets[i]</tt>, the <em>i</em><sup>th</sup> is considered empty. int begin_bit = 0, ///< [in] <b>[optional]</b> The least-significant bit index (inclusive) needed for key comparison int end_bit = sizeof(KeyT) * 8, ///< [in] <b>[optional]</b> The most-significant bit index (exclusive) needed for key comparison (e.g., sizeof(unsigned int) * 8) cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. { // Signed integer type for global offsets typedef int OffsetT; DoubleBuffer<KeyT> d_keys(const_cast<KeyT*>(d_keys_in), d_keys_out); DoubleBuffer<NullType> d_values; return DispatchSegmentedRadixSort<true, KeyT, NullType, OffsetIteratorT, OffsetT>::Dispatch( d_temp_storage, temp_storage_bytes, d_keys, d_values, num_items, num_segments, d_begin_offsets, d_end_offsets, begin_bit, end_bit, false, stream, debug_synchronous); } /** * \brief Sorts segments of keys into descending order. (~<em>N </em>auxiliary storage required). * * \par * - The sorting operation is given a pair of key buffers managed by a * DoubleBuffer structure that indicates which of the two buffers is * "current" (and thus contains the input data to be sorted). * - The contents of both buffers may be altered by the sorting operation. * - Upon completion, the sorting operation will update the "current" indicator * within the DoubleBuffer wrapper to reference which of the two buffers * now contains the sorted output sequence (a function of the number of key bits * specified and the targeted device architecture). * - When input a contiguous sequence of segments, a single sequence * \p segment_offsets (of length <tt>num_segments+1</tt>) can be aliased * for both the \p d_begin_offsets and \p d_end_offsets parameters (where * the latter is specified as <tt>segment_offsets+1</tt>). * - An optional bit subrange <tt>[begin_bit, end_bit)</tt> of differentiating key bits can be specified. This can reduce overall sorting overhead and yield a corresponding performance improvement. * - \devicestorageP * - \devicestorage * * \par Snippet * The code snippet below illustrates the batched sorting of three segments (with one zero-length segment) of \p int keys. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/device/device_segmentd_radix_sort.cuh> * * // Declare, allocate, and initialize device-accessible pointers for sorting data * int num_items; // e.g., 7 * int num_segments; // e.g., 3 * int *d_offsets; // e.g., [0, 3, 3, 7] * int *d_key_buf; // e.g., [8, 6, 7, 5, 3, 0, 9] * int *d_key_alt_buf; // e.g., [-, -, -, -, -, -, -] * ... * * // Create a DoubleBuffer to wrap the pair of device pointers * cub::DoubleBuffer<int> d_keys(d_key_buf, d_key_alt_buf); * * // Determine temporary device storage requirements * void *d_temp_storage = NULL; * size_t temp_storage_bytes = 0; * cub::DeviceSegmentedRadixSort::SortKeysDescending(d_temp_storage, temp_storage_bytes, d_keys, * num_items, num_segments, d_offsets, d_offsets + 1); * * // Allocate temporary storage * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Run sorting operation * cub::DeviceSegmentedRadixSort::SortKeysDescending(d_temp_storage, temp_storage_bytes, d_keys, * num_items, num_segments, d_offsets, d_offsets + 1); * * // d_keys.Current() <-- [8, 7, 6, 9, 5, 3, 0] * * \endcode * * \tparam KeyT <b>[inferred]</b> Key type * \tparam OffsetIteratorT <b>[inferred]</b> Random-access input iterator type for reading segment offsets \iterator */ template < typename KeyT, typename OffsetIteratorT> CUB_RUNTIME_FUNCTION static cudaError_t SortKeysDescending( void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation DoubleBuffer<KeyT> &d_keys, ///< [in,out] Reference to the double-buffer of keys whose "current" device-accessible buffer contains the unsorted input keys and, upon return, is updated to point to the sorted output keys int num_items, ///< [in] The total number of items to sort (across all segments) int num_segments, ///< [in] The number of segments that comprise the sorting data OffsetIteratorT d_begin_offsets, ///< [in] Pointer to the sequence of beginning offsets of length \p num_segments, such that <tt>d_begin_offsets[i]</tt> is the first element of the <em>i</em><sup>th</sup> data segment in <tt>d_keys_*</tt> and <tt>d_values_*</tt> OffsetIteratorT d_end_offsets, ///< [in] Pointer to the sequence of ending offsets of length \p num_segments, such that <tt>d_end_offsets[i]-1</tt> is the last element of the <em>i</em><sup>th</sup> data segment in <tt>d_keys_*</tt> and <tt>d_values_*</tt>. If <tt>d_end_offsets[i]-1</tt> <= <tt>d_begin_offsets[i]</tt>, the <em>i</em><sup>th</sup> is considered empty. int begin_bit = 0, ///< [in] <b>[optional]</b> The least-significant bit index (inclusive) needed for key comparison int end_bit = sizeof(KeyT) * 8, ///< [in] <b>[optional]</b> The most-significant bit index (exclusive) needed for key comparison (e.g., sizeof(unsigned int) * 8) cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. { // Signed integer type for global offsets typedef int OffsetT; // Null value type DoubleBuffer<NullType> d_values; return DispatchSegmentedRadixSort<true, KeyT, NullType, OffsetIteratorT, OffsetT>::Dispatch( d_temp_storage, temp_storage_bytes, d_keys, d_values, num_items, num_segments, d_begin_offsets, d_end_offsets, begin_bit, end_bit, true, stream, debug_synchronous); } //@} end member group }; } // CUB namespace CUB_NS_POSTFIX // Optional outer namespace(s)
0
rapidsai_public_repos/nvgraph/external/cub_semiring
rapidsai_public_repos/nvgraph/external/cub_semiring/device/device_run_length_encode.cuh
/****************************************************************************** * Copyright (c) 2011, Duane Merrill. All rights reserved. * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ /** * \file * cub::DeviceRunLengthEncode provides device-wide, parallel operations for computing a run-length encoding across a sequence of data items residing within device-accessible memory. */ #pragma once #include <stdio.h> #include <iterator> #include "dispatch/dispatch_rle.cuh" #include "dispatch/dispatch_reduce_by_key.cuh" #include "../util_namespace.cuh" /// Optional outer namespace(s) CUB_NS_PREFIX /// CUB namespace namespace cub { /** * \brief DeviceRunLengthEncode provides device-wide, parallel operations for demarcating "runs" of same-valued items within a sequence residing within device-accessible memory. ![](run_length_encode_logo.png) * \ingroup SingleModule * * \par Overview * A <a href="http://en.wikipedia.org/wiki/Run-length_encoding"><em>run-length encoding</em></a> * computes a simple compressed representation of a sequence of input elements such that each * maximal "run" of consecutive same-valued data items is encoded as a single data value along with a * count of the elements in that run. * * \par Usage Considerations * \cdp_class{DeviceRunLengthEncode} * * \par Performance * \linear_performance{run-length encode} * * \par * The following chart illustrates DeviceRunLengthEncode::RunLengthEncode performance across * different CUDA architectures for \p int32 items. * Segments have lengths uniformly sampled from [1,1000]. * * \image html rle_int32_len_500.png * * \par * \plots_below * */ struct DeviceRunLengthEncode { /** * \brief Computes a run-length encoding of the sequence \p d_in. * * \par * - For the <em>i</em><sup>th</sup> run encountered, the first key of the run and its length are written to * <tt>d_unique_out[<em>i</em>]</tt> and <tt>d_counts_out[<em>i</em>]</tt>, * respectively. * - The total number of runs encountered is written to \p d_num_runs_out. * - The <tt>==</tt> equality operator is used to determine whether values are equivalent * - \devicestorage * * \par Performance * The following charts illustrate saturated encode performance across different * CUDA architectures for \p int32 and \p int64 items, respectively. Segments have * lengths uniformly sampled from [1,1000]. * * \image html rle_int32_len_500.png * \image html rle_int64_len_500.png * * \par * The following charts are similar, but with segment lengths uniformly sampled from [1,10]: * * \image html rle_int32_len_5.png * \image html rle_int64_len_5.png * * \par Snippet * The code snippet below illustrates the run-length encoding of a sequence of \p int values. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/device/device_run_length_encode.cuh> * * // Declare, allocate, and initialize device-accessible pointers for input and output * int num_items; // e.g., 8 * int *d_in; // e.g., [0, 2, 2, 9, 5, 5, 5, 8] * int *d_unique_out; // e.g., [ , , , , , , , ] * int *d_counts_out; // e.g., [ , , , , , , , ] * int *d_num_runs_out; // e.g., [ ] * ... * * // Determine temporary device storage requirements * void *d_temp_storage = NULL; * size_t temp_storage_bytes = 0; * cub::DeviceRunLengthEncode::Encode(d_temp_storage, temp_storage_bytes, d_in, d_unique_out, d_counts_out, d_num_runs_out, num_items); * * // Allocate temporary storage * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Run encoding * cub::DeviceRunLengthEncode::Encode(d_temp_storage, temp_storage_bytes, d_in, d_unique_out, d_counts_out, d_num_runs_out, num_items); * * // d_unique_out <-- [0, 2, 9, 5, 8] * // d_counts_out <-- [1, 2, 1, 3, 1] * // d_num_runs_out <-- [5] * * \endcode * * \tparam InputIteratorT <b>[inferred]</b> Random-access input iterator type for reading input items \iterator * \tparam UniqueOutputIteratorT <b>[inferred]</b> Random-access output iterator type for writing unique output items \iterator * \tparam LengthsOutputIteratorT <b>[inferred]</b> Random-access output iterator type for writing output counts \iterator * \tparam NumRunsOutputIteratorT <b>[inferred]</b> Output iterator type for recording the number of runs encountered \iterator */ template < typename InputIteratorT, typename UniqueOutputIteratorT, typename LengthsOutputIteratorT, typename NumRunsOutputIteratorT> CUB_RUNTIME_FUNCTION __forceinline__ static cudaError_t Encode( void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation InputIteratorT d_in, ///< [in] Pointer to the input sequence of keys UniqueOutputIteratorT d_unique_out, ///< [out] Pointer to the output sequence of unique keys (one key per run) LengthsOutputIteratorT d_counts_out, ///< [out] Pointer to the output sequence of run-lengths (one count per run) NumRunsOutputIteratorT d_num_runs_out, ///< [out] Pointer to total number of runs int num_items, ///< [in] Total number of associated key+value pairs (i.e., the length of \p d_in_keys and \p d_in_values) cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. May cause significant slowdown. Default is \p false. { typedef int OffsetT; // Signed integer type for global offsets typedef NullType* FlagIterator; // FlagT iterator type (not used) typedef NullType SelectOp; // Selection op (not used) typedef Equality EqualityOp; // Default == operator typedef cub::Sum ReductionOp; // Value reduction operator // The lengths output value type typedef typename If<(Equals<typename std::iterator_traits<LengthsOutputIteratorT>::value_type, void>::VALUE), // LengthT = (if output iterator's value type is void) ? OffsetT, // ... then the OffsetT type, typename std::iterator_traits<LengthsOutputIteratorT>::value_type>::Type LengthT; // ... else the output iterator's value type // Generator type for providing 1s values for run-length reduction typedef ConstantInputIterator<LengthT, OffsetT> LengthsInputIteratorT; return DispatchReduceByKey<InputIteratorT, UniqueOutputIteratorT, LengthsInputIteratorT, LengthsOutputIteratorT, NumRunsOutputIteratorT, EqualityOp, ReductionOp, OffsetT>::Dispatch( d_temp_storage, temp_storage_bytes, d_in, d_unique_out, LengthsInputIteratorT((LengthT) 1), d_counts_out, d_num_runs_out, EqualityOp(), ReductionOp(), num_items, stream, debug_synchronous); } /** * \brief Enumerates the starting offsets and lengths of all non-trivial runs (of length > 1) of same-valued keys in the sequence \p d_in. * * \par * - For the <em>i</em><sup>th</sup> non-trivial run, the run's starting offset * and its length are written to <tt>d_offsets_out[<em>i</em>]</tt> and * <tt>d_lengths_out[<em>i</em>]</tt>, respectively. * - The total number of runs encountered is written to \p d_num_runs_out. * - The <tt>==</tt> equality operator is used to determine whether values are equivalent * - \devicestorage * * \par Performance * * \par Snippet * The code snippet below illustrates the identification of non-trivial runs within a sequence of \p int values. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/device/device_run_length_encode.cuh> * * // Declare, allocate, and initialize device-accessible pointers for input and output * int num_items; // e.g., 8 * int *d_in; // e.g., [0, 2, 2, 9, 5, 5, 5, 8] * int *d_offsets_out; // e.g., [ , , , , , , , ] * int *d_lengths_out; // e.g., [ , , , , , , , ] * int *d_num_runs_out; // e.g., [ ] * ... * * // Determine temporary device storage requirements * void *d_temp_storage = NULL; * size_t temp_storage_bytes = 0; * cub::DeviceRunLengthEncode::NonTrivialRuns(d_temp_storage, temp_storage_bytes, d_in, d_offsets_out, d_lengths_out, d_num_runs_out, num_items); * * // Allocate temporary storage * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Run encoding * cub::DeviceRunLengthEncode::NonTrivialRuns(d_temp_storage, temp_storage_bytes, d_in, d_offsets_out, d_lengths_out, d_num_runs_out, num_items); * * // d_offsets_out <-- [1, 4] * // d_lengths_out <-- [2, 3] * // d_num_runs_out <-- [2] * * \endcode * * \tparam InputIteratorT <b>[inferred]</b> Random-access input iterator type for reading input items \iterator * \tparam OffsetsOutputIteratorT <b>[inferred]</b> Random-access output iterator type for writing run-offset values \iterator * \tparam LengthsOutputIteratorT <b>[inferred]</b> Random-access output iterator type for writing run-length values \iterator * \tparam NumRunsOutputIteratorT <b>[inferred]</b> Output iterator type for recording the number of runs encountered \iterator */ template < typename InputIteratorT, typename OffsetsOutputIteratorT, typename LengthsOutputIteratorT, typename NumRunsOutputIteratorT> CUB_RUNTIME_FUNCTION __forceinline__ static cudaError_t NonTrivialRuns( void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation InputIteratorT d_in, ///< [in] Pointer to input sequence of data items OffsetsOutputIteratorT d_offsets_out, ///< [out] Pointer to output sequence of run-offsets (one offset per non-trivial run) LengthsOutputIteratorT d_lengths_out, ///< [out] Pointer to output sequence of run-lengths (one count per non-trivial run) NumRunsOutputIteratorT d_num_runs_out, ///< [out] Pointer to total number of runs (i.e., length of \p d_offsets_out) int num_items, ///< [in] Total number of associated key+value pairs (i.e., the length of \p d_in_keys and \p d_in_values) cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. May cause significant slowdown. Default is \p false. { typedef int OffsetT; // Signed integer type for global offsets typedef Equality EqualityOp; // Default == operator return DeviceRleDispatch<InputIteratorT, OffsetsOutputIteratorT, LengthsOutputIteratorT, NumRunsOutputIteratorT, EqualityOp, OffsetT>::Dispatch( d_temp_storage, temp_storage_bytes, d_in, d_offsets_out, d_lengths_out, d_num_runs_out, EqualityOp(), num_items, stream, debug_synchronous); } }; } // CUB namespace CUB_NS_POSTFIX // Optional outer namespace(s)
0
rapidsai_public_repos/nvgraph/external/cub_semiring
rapidsai_public_repos/nvgraph/external/cub_semiring/device/device_reduce.cuh
/****************************************************************************** * Copyright (c) 2011, Duane Merrill. All rights reserved. * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ /** * \file * cub::DeviceReduce provides device-wide, parallel operations for computing a reduction across a sequence of data items residing within device-accessible memory. */ #pragma once #include <stdio.h> #include <iterator> #include <limits> #include "../iterator/arg_index_input_iterator.cuh" #include "dispatch/dispatch_reduce.cuh" #include "dispatch/dispatch_reduce_by_key.cuh" #include "../util_namespace.cuh" /// Optional outer namespace(s) CUB_NS_PREFIX /// CUB namespace namespace cub { /** * \brief DeviceReduce provides device-wide, parallel operations for computing a reduction across a sequence of data items residing within device-accessible memory. ![](reduce_logo.png) * \ingroup SingleModule * * \par Overview * A <a href="http://en.wikipedia.org/wiki/Reduce_(higher-order_function)"><em>reduction</em></a> (or <em>fold</em>) * uses a binary combining operator to compute a single aggregate from a sequence of input elements. * * \par Usage Considerations * \cdp_class{DeviceReduce} * * \par Performance * \linear_performance{reduction, reduce-by-key, and run-length encode} * * \par * The following chart illustrates DeviceReduce::Sum * performance across different CUDA architectures for \p int32 keys. * * \image html reduce_int32.png * * \par * The following chart illustrates DeviceReduce::ReduceByKey (summation) * performance across different CUDA architectures for \p fp32 * values. Segments are identified by \p int32 keys, and have lengths uniformly sampled from [1,1000]. * * \image html reduce_by_key_fp32_len_500.png * * \par * \plots_below * */ struct DeviceReduce { /** * \brief Computes a device-wide reduction using the specified binary \p reduction_op functor and initial value \p init. * * \par * - Does not support binary reduction operators that are non-commutative. * - Provides "run-to-run" determinism for pseudo-associative reduction * (e.g., addition of floating point types) on the same GPU device. * However, results for pseudo-associative reduction may be inconsistent * from one device to a another device of a different compute-capability * because CUB can employ different tile-sizing for different architectures. * - \devicestorage * * \par Snippet * The code snippet below illustrates a user-defined min-reduction of a device vector of \p int data elements. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/device/device_radix_sort.cuh> * * // CustomMin functor * struct CustomMin * { * template <typename T> * __device__ __forceinline__ * T operator()(const T &a, const T &b) const { * return (b < a) ? b : a; * } * }; * * // Declare, allocate, and initialize device-accessible pointers for input and output * int num_items; // e.g., 7 * int *d_in; // e.g., [8, 6, 7, 5, 3, 0, 9] * int *d_out; // e.g., [-] * CustomMin min_op; * int init; // e.g., INT_MAX * ... * * // Determine temporary device storage requirements * void *d_temp_storage = NULL; * size_t temp_storage_bytes = 0; * cub::DeviceReduce::Reduce(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, min_op, init); * * // Allocate temporary storage * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Run reduction * cub::DeviceReduce::Reduce(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, min_op, init); * * // d_out <-- [0] * * \endcode * * \tparam InputIteratorT <b>[inferred]</b> Random-access input iterator type for reading input items \iterator * \tparam OutputIteratorT <b>[inferred]</b> Output iterator type for recording the reduced aggregate \iterator * \tparam ReductionOpT <b>[inferred]</b> Binary reduction functor type having member <tt>T operator()(const T &a, const T &b)</tt> * \tparam T <b>[inferred]</b> Data element type that is convertible to the \p value type of \p InputIteratorT */ template < typename InputIteratorT, typename OutputIteratorT, typename ReductionOpT, typename T> CUB_RUNTIME_FUNCTION static cudaError_t Reduce( void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation InputIteratorT d_in, ///< [in] Pointer to the input sequence of data items OutputIteratorT d_out, ///< [out] Pointer to the output aggregate int num_items, ///< [in] Total number of input items (i.e., length of \p d_in) ReductionOpT reduction_op, ///< [in] Binary reduction functor T init, ///< [in] Initial value of the reduction cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. { // Signed integer type for global offsets typedef int OffsetT; return DispatchReduce<InputIteratorT, OutputIteratorT, OffsetT, ReductionOpT>::Dispatch( d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, reduction_op, init, stream, debug_synchronous); } /** * \brief Computes a device-wide sum using the addition (\p +) operator. * * \par * - Uses \p 0 as the initial value of the reduction. * - Does not support \p + operators that are non-commutative.. * - Provides "run-to-run" determinism for pseudo-associative reduction * (e.g., addition of floating point types) on the same GPU device. * However, results for pseudo-associative reduction may be inconsistent * from one device to a another device of a different compute-capability * because CUB can employ different tile-sizing for different architectures. * - \devicestorage * * \par Performance * The following charts illustrate saturated sum-reduction performance across different * CUDA architectures for \p int32 and \p int64 items, respectively. * * \image html reduce_int32.png * \image html reduce_int64.png * * \par Snippet * The code snippet below illustrates the sum-reduction of a device vector of \p int data elements. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/device/device_radix_sort.cuh> * * // Declare, allocate, and initialize device-accessible pointers for input and output * int num_items; // e.g., 7 * int *d_in; // e.g., [8, 6, 7, 5, 3, 0, 9] * int *d_out; // e.g., [-] * ... * * // Determine temporary device storage requirements * void *d_temp_storage = NULL; * size_t temp_storage_bytes = 0; * cub::DeviceReduce::Sum(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items); * * // Allocate temporary storage * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Run sum-reduction * cub::DeviceReduce::Sum(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items); * * // d_out <-- [38] * * \endcode * * \tparam InputIteratorT <b>[inferred]</b> Random-access input iterator type for reading input items \iterator * \tparam OutputIteratorT <b>[inferred]</b> Output iterator type for recording the reduced aggregate \iterator */ template < typename InputIteratorT, typename OutputIteratorT> CUB_RUNTIME_FUNCTION static cudaError_t Sum( void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation InputIteratorT d_in, ///< [in] Pointer to the input sequence of data items OutputIteratorT d_out, ///< [out] Pointer to the output aggregate int num_items, ///< [in] Total number of input items (i.e., length of \p d_in) cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. { // Signed integer type for global offsets typedef int OffsetT; // The output value type typedef typename If<(Equals<typename std::iterator_traits<OutputIteratorT>::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ? typename std::iterator_traits<InputIteratorT>::value_type, // ... then the input iterator's value type, typename std::iterator_traits<OutputIteratorT>::value_type>::Type OutputT; // ... else the output iterator's value type return DispatchReduce<InputIteratorT, OutputIteratorT, OffsetT, cub::Sum>::Dispatch( d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, cub::Sum(), OutputT(), // zero-initialize stream, debug_synchronous); } /** * \brief Computes a device-wide minimum using the less-than ('<') operator. * * \par * - Uses <tt>std::numeric_limits<T>::max()</tt> as the initial value of the reduction. * - Does not support \p < operators that are non-commutative. * - Provides "run-to-run" determinism for pseudo-associative reduction * (e.g., addition of floating point types) on the same GPU device. * However, results for pseudo-associative reduction may be inconsistent * from one device to a another device of a different compute-capability * because CUB can employ different tile-sizing for different architectures. * - \devicestorage * * \par Snippet * The code snippet below illustrates the min-reduction of a device vector of \p int data elements. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/device/device_radix_sort.cuh> * * // Declare, allocate, and initialize device-accessible pointers for input and output * int num_items; // e.g., 7 * int *d_in; // e.g., [8, 6, 7, 5, 3, 0, 9] * int *d_out; // e.g., [-] * ... * * // Determine temporary device storage requirements * void *d_temp_storage = NULL; * size_t temp_storage_bytes = 0; * cub::DeviceReduce::Min(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items); * * // Allocate temporary storage * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Run min-reduction * cub::DeviceReduce::Min(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items); * * // d_out <-- [0] * * \endcode * * \tparam InputIteratorT <b>[inferred]</b> Random-access input iterator type for reading input items \iterator * \tparam OutputIteratorT <b>[inferred]</b> Output iterator type for recording the reduced aggregate \iterator */ template < typename InputIteratorT, typename OutputIteratorT> CUB_RUNTIME_FUNCTION static cudaError_t Min( void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation InputIteratorT d_in, ///< [in] Pointer to the input sequence of data items OutputIteratorT d_out, ///< [out] Pointer to the output aggregate int num_items, ///< [in] Total number of input items (i.e., length of \p d_in) cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. { // Signed integer type for global offsets typedef int OffsetT; // The input value type typedef typename std::iterator_traits<InputIteratorT>::value_type InputT; return DispatchReduce<InputIteratorT, OutputIteratorT, OffsetT, cub::Min>::Dispatch( d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, cub::Min(), Traits<InputT>::Max(), // replace with std::numeric_limits<T>::max() when C++11 support is more prevalent stream, debug_synchronous); } /** * \brief Finds the first device-wide minimum using the less-than ('<') operator, also returning the index of that item. * * \par * - The output value type of \p d_out is cub::KeyValuePair <tt><int, T></tt> (assuming the value type of \p d_in is \p T) * - The minimum is written to <tt>d_out.value</tt> and its offset in the input array is written to <tt>d_out.key</tt>. * - The <tt>{1, std::numeric_limits<T>::max()}</tt> tuple is produced for zero-length inputs * - Does not support \p < operators that are non-commutative. * - Provides "run-to-run" determinism for pseudo-associative reduction * (e.g., addition of floating point types) on the same GPU device. * However, results for pseudo-associative reduction may be inconsistent * from one device to a another device of a different compute-capability * because CUB can employ different tile-sizing for different architectures. * - \devicestorage * * \par Snippet * The code snippet below illustrates the argmin-reduction of a device vector of \p int data elements. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/device/device_radix_sort.cuh> * * // Declare, allocate, and initialize device-accessible pointers for input and output * int num_items; // e.g., 7 * int *d_in; // e.g., [8, 6, 7, 5, 3, 0, 9] * KeyValuePair<int, int> *d_out; // e.g., [{-,-}] * ... * * // Determine temporary device storage requirements * void *d_temp_storage = NULL; * size_t temp_storage_bytes = 0; * cub::DeviceReduce::ArgMin(d_temp_storage, temp_storage_bytes, d_in, d_argmin, num_items); * * // Allocate temporary storage * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Run argmin-reduction * cub::DeviceReduce::ArgMin(d_temp_storage, temp_storage_bytes, d_in, d_argmin, num_items); * * // d_out <-- [{5, 0}] * * \endcode * * \tparam InputIteratorT <b>[inferred]</b> Random-access input iterator type for reading input items (of some type \p T) \iterator * \tparam OutputIteratorT <b>[inferred]</b> Output iterator type for recording the reduced aggregate (having value type <tt>cub::KeyValuePair<int, T></tt>) \iterator */ template < typename InputIteratorT, typename OutputIteratorT> CUB_RUNTIME_FUNCTION static cudaError_t ArgMin( void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation InputIteratorT d_in, ///< [in] Pointer to the input sequence of data items OutputIteratorT d_out, ///< [out] Pointer to the output aggregate int num_items, ///< [in] Total number of input items (i.e., length of \p d_in) cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. { // Signed integer type for global offsets typedef int OffsetT; // The input type typedef typename std::iterator_traits<InputIteratorT>::value_type InputValueT; // The output tuple type typedef typename If<(Equals<typename std::iterator_traits<OutputIteratorT>::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ? KeyValuePair<OffsetT, InputValueT>, // ... then the key value pair OffsetT + InputValueT typename std::iterator_traits<OutputIteratorT>::value_type>::Type OutputTupleT; // ... else the output iterator's value type // The output value type typedef typename OutputTupleT::Value OutputValueT; // Wrapped input iterator to produce index-value <OffsetT, InputT> tuples typedef ArgIndexInputIterator<InputIteratorT, OffsetT, OutputValueT> ArgIndexInputIteratorT; ArgIndexInputIteratorT d_indexed_in(d_in); // Initial value OutputTupleT initial_value(1, Traits<InputValueT>::Max()); // replace with std::numeric_limits<T>::max() when C++11 support is more prevalent return DispatchReduce<ArgIndexInputIteratorT, OutputIteratorT, OffsetT, cub::ArgMin>::Dispatch( d_temp_storage, temp_storage_bytes, d_indexed_in, d_out, num_items, cub::ArgMin(), initial_value, stream, debug_synchronous); } /** * \brief Computes a device-wide maximum using the greater-than ('>') operator. * * \par * - Uses <tt>std::numeric_limits<T>::lowest()</tt> as the initial value of the reduction. * - Does not support \p > operators that are non-commutative. * - Provides "run-to-run" determinism for pseudo-associative reduction * (e.g., addition of floating point types) on the same GPU device. * However, results for pseudo-associative reduction may be inconsistent * from one device to a another device of a different compute-capability * because CUB can employ different tile-sizing for different architectures. * - \devicestorage * * \par Snippet * The code snippet below illustrates the max-reduction of a device vector of \p int data elements. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/device/device_radix_sort.cuh> * * // Declare, allocate, and initialize device-accessible pointers for input and output * int num_items; // e.g., 7 * int *d_in; // e.g., [8, 6, 7, 5, 3, 0, 9] * int *d_out; // e.g., [-] * ... * * // Determine temporary device storage requirements * void *d_temp_storage = NULL; * size_t temp_storage_bytes = 0; * cub::DeviceReduce::Max(d_temp_storage, temp_storage_bytes, d_in, d_max, num_items); * * // Allocate temporary storage * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Run max-reduction * cub::DeviceReduce::Max(d_temp_storage, temp_storage_bytes, d_in, d_max, num_items); * * // d_out <-- [9] * * \endcode * * \tparam InputIteratorT <b>[inferred]</b> Random-access input iterator type for reading input items \iterator * \tparam OutputIteratorT <b>[inferred]</b> Output iterator type for recording the reduced aggregate \iterator */ template < typename InputIteratorT, typename OutputIteratorT> CUB_RUNTIME_FUNCTION static cudaError_t Max( void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation InputIteratorT d_in, ///< [in] Pointer to the input sequence of data items OutputIteratorT d_out, ///< [out] Pointer to the output aggregate int num_items, ///< [in] Total number of input items (i.e., length of \p d_in) cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. { // Signed integer type for global offsets typedef int OffsetT; // The input value type typedef typename std::iterator_traits<InputIteratorT>::value_type InputT; return DispatchReduce<InputIteratorT, OutputIteratorT, OffsetT, cub::Max>::Dispatch( d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, cub::Max(), Traits<InputT>::Lowest(), // replace with std::numeric_limits<T>::lowest() when C++11 support is more prevalent stream, debug_synchronous); } /** * \brief Finds the first device-wide maximum using the greater-than ('>') operator, also returning the index of that item * * \par * - The output value type of \p d_out is cub::KeyValuePair <tt><int, T></tt> (assuming the value type of \p d_in is \p T) * - The maximum is written to <tt>d_out.value</tt> and its offset in the input array is written to <tt>d_out.key</tt>. * - The <tt>{1, std::numeric_limits<T>::lowest()}</tt> tuple is produced for zero-length inputs * - Does not support \p > operators that are non-commutative. * - Provides "run-to-run" determinism for pseudo-associative reduction * (e.g., addition of floating point types) on the same GPU device. * However, results for pseudo-associative reduction may be inconsistent * from one device to a another device of a different compute-capability * because CUB can employ different tile-sizing for different architectures. * - \devicestorage * * \par Snippet * The code snippet below illustrates the argmax-reduction of a device vector of \p int data elements. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/device/device_reduce.cuh> * * // Declare, allocate, and initialize device-accessible pointers for input and output * int num_items; // e.g., 7 * int *d_in; // e.g., [8, 6, 7, 5, 3, 0, 9] * KeyValuePair<int, int> *d_out; // e.g., [{-,-}] * ... * * // Determine temporary device storage requirements * void *d_temp_storage = NULL; * size_t temp_storage_bytes = 0; * cub::DeviceReduce::ArgMax(d_temp_storage, temp_storage_bytes, d_in, d_argmax, num_items); * * // Allocate temporary storage * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Run argmax-reduction * cub::DeviceReduce::ArgMax(d_temp_storage, temp_storage_bytes, d_in, d_argmax, num_items); * * // d_out <-- [{6, 9}] * * \endcode * * \tparam InputIteratorT <b>[inferred]</b> Random-access input iterator type for reading input items (of some type \p T) \iterator * \tparam OutputIteratorT <b>[inferred]</b> Output iterator type for recording the reduced aggregate (having value type <tt>cub::KeyValuePair<int, T></tt>) \iterator */ template < typename InputIteratorT, typename OutputIteratorT> CUB_RUNTIME_FUNCTION static cudaError_t ArgMax( void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation InputIteratorT d_in, ///< [in] Pointer to the input sequence of data items OutputIteratorT d_out, ///< [out] Pointer to the output aggregate int num_items, ///< [in] Total number of input items (i.e., length of \p d_in) cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. { // Signed integer type for global offsets typedef int OffsetT; // The input type typedef typename std::iterator_traits<InputIteratorT>::value_type InputValueT; // The output tuple type typedef typename If<(Equals<typename std::iterator_traits<OutputIteratorT>::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ? KeyValuePair<OffsetT, InputValueT>, // ... then the key value pair OffsetT + InputValueT typename std::iterator_traits<OutputIteratorT>::value_type>::Type OutputTupleT; // ... else the output iterator's value type // The output value type typedef typename OutputTupleT::Value OutputValueT; // Wrapped input iterator to produce index-value <OffsetT, InputT> tuples typedef ArgIndexInputIterator<InputIteratorT, OffsetT, OutputValueT> ArgIndexInputIteratorT; ArgIndexInputIteratorT d_indexed_in(d_in); // Initial value OutputTupleT initial_value(1, Traits<InputValueT>::Lowest()); // replace with std::numeric_limits<T>::lowest() when C++11 support is more prevalent return DispatchReduce<ArgIndexInputIteratorT, OutputIteratorT, OffsetT, cub::ArgMax>::Dispatch( d_temp_storage, temp_storage_bytes, d_indexed_in, d_out, num_items, cub::ArgMax(), initial_value, stream, debug_synchronous); } /** * \brief Reduces segments of values, where segments are demarcated by corresponding runs of identical keys. * * \par * This operation computes segmented reductions within \p d_values_in using * the specified binary \p reduction_op functor. The segments are identified by * "runs" of corresponding keys in \p d_keys_in, where runs are maximal ranges of * consecutive, identical keys. For the <em>i</em><sup>th</sup> run encountered, * the first key of the run and the corresponding value aggregate of that run are * written to <tt>d_unique_out[<em>i</em>]</tt> and <tt>d_aggregates_out[<em>i</em>]</tt>, * respectively. The total number of runs encountered is written to \p d_num_runs_out. * * \par * - The <tt>==</tt> equality operator is used to determine whether keys are equivalent * - Provides "run-to-run" determinism for pseudo-associative reduction * (e.g., addition of floating point types) on the same GPU device. * However, results for pseudo-associative reduction may be inconsistent * from one device to a another device of a different compute-capability * because CUB can employ different tile-sizing for different architectures. * - \devicestorage * * \par Performance * The following chart illustrates reduction-by-key (sum) performance across * different CUDA architectures for \p fp32 and \p fp64 values, respectively. Segments * are identified by \p int32 keys, and have lengths uniformly sampled from [1,1000]. * * \image html reduce_by_key_fp32_len_500.png * \image html reduce_by_key_fp64_len_500.png * * \par * The following charts are similar, but with segment lengths uniformly sampled from [1,10]: * * \image html reduce_by_key_fp32_len_5.png * \image html reduce_by_key_fp64_len_5.png * * \par Snippet * The code snippet below illustrates the segmented reduction of \p int values grouped * by runs of associated \p int keys. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/device/device_reduce.cuh> * * // CustomMin functor * struct CustomMin * { * template <typename T> * CUB_RUNTIME_FUNCTION __forceinline__ * T operator()(const T &a, const T &b) const { * return (b < a) ? b : a; * } * }; * * // Declare, allocate, and initialize device-accessible pointers for input and output * int num_items; // e.g., 8 * int *d_keys_in; // e.g., [0, 2, 2, 9, 5, 5, 5, 8] * int *d_values_in; // e.g., [0, 7, 1, 6, 2, 5, 3, 4] * int *d_unique_out; // e.g., [-, -, -, -, -, -, -, -] * int *d_aggregates_out; // e.g., [-, -, -, -, -, -, -, -] * int *d_num_runs_out; // e.g., [-] * CustomMin reduction_op; * ... * * // Determine temporary device storage requirements * void *d_temp_storage = NULL; * size_t temp_storage_bytes = 0; * cub::DeviceReduce::ReduceByKey(d_temp_storage, temp_storage_bytes, d_keys_in, d_unique_out, d_values_in, d_aggregates_out, d_num_runs_out, reduction_op, num_items); * * // Allocate temporary storage * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Run reduce-by-key * cub::DeviceReduce::ReduceByKey(d_temp_storage, temp_storage_bytes, d_keys_in, d_unique_out, d_values_in, d_aggregates_out, d_num_runs_out, reduction_op, num_items); * * // d_unique_out <-- [0, 2, 9, 5, 8] * // d_aggregates_out <-- [0, 1, 6, 2, 4] * // d_num_runs_out <-- [5] * * \endcode * * \tparam KeysInputIteratorT <b>[inferred]</b> Random-access input iterator type for reading input keys \iterator * \tparam UniqueOutputIteratorT <b>[inferred]</b> Random-access output iterator type for writing unique output keys \iterator * \tparam ValuesInputIteratorT <b>[inferred]</b> Random-access input iterator type for reading input values \iterator * \tparam AggregatesOutputIterator <b>[inferred]</b> Random-access output iterator type for writing output value aggregates \iterator * \tparam NumRunsOutputIteratorT <b>[inferred]</b> Output iterator type for recording the number of runs encountered \iterator * \tparam ReductionOpT <b>[inferred]</b> Binary reduction functor type having member <tt>T operator()(const T &a, const T &b)</tt> */ template < typename KeysInputIteratorT, typename UniqueOutputIteratorT, typename ValuesInputIteratorT, typename AggregatesOutputIteratorT, typename NumRunsOutputIteratorT, typename ReductionOpT> CUB_RUNTIME_FUNCTION __forceinline__ static cudaError_t ReduceByKey( void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation KeysInputIteratorT d_keys_in, ///< [in] Pointer to the input sequence of keys UniqueOutputIteratorT d_unique_out, ///< [out] Pointer to the output sequence of unique keys (one key per run) ValuesInputIteratorT d_values_in, ///< [in] Pointer to the input sequence of corresponding values AggregatesOutputIteratorT d_aggregates_out, ///< [out] Pointer to the output sequence of value aggregates (one aggregate per run) NumRunsOutputIteratorT d_num_runs_out, ///< [out] Pointer to total number of runs encountered (i.e., the length of d_unique_out) ReductionOpT reduction_op, ///< [in] Binary reduction functor int num_items, ///< [in] Total number of associated key+value pairs (i.e., the length of \p d_in_keys and \p d_in_values) cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. May cause significant slowdown. Default is \p false. { // Signed integer type for global offsets typedef int OffsetT; // FlagT iterator type (not used) // Selection op (not used) // Default == operator typedef Equality EqualityOp; return DispatchReduceByKey<KeysInputIteratorT, UniqueOutputIteratorT, ValuesInputIteratorT, AggregatesOutputIteratorT, NumRunsOutputIteratorT, EqualityOp, ReductionOpT, OffsetT>::Dispatch( d_temp_storage, temp_storage_bytes, d_keys_in, d_unique_out, d_values_in, d_aggregates_out, d_num_runs_out, EqualityOp(), reduction_op, num_items, stream, debug_synchronous); } }; /** * \example example_device_reduce.cu */ } // CUB namespace CUB_NS_POSTFIX // Optional outer namespace(s)
0
rapidsai_public_repos/nvgraph/external/cub_semiring
rapidsai_public_repos/nvgraph/external/cub_semiring/device/device_spmv.cuh
/****************************************************************************** * Copyright (c) 2011, Duane Merrill. All rights reserved. * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ /** * \file * cub::DeviceSpmv provides device-wide parallel operations for performing sparse-matrix * vector multiplication (SpMV). */ #pragma once #include <stdio.h> #include <iterator> #include <limits> #include "dispatch/dispatch_spmv_orig.cuh" #include "../util_namespace.cuh" /// Optional outer namespace(s) CUB_NS_PREFIX /// CUB namespace namespace cub { /** * \brief DeviceSpmv provides device-wide parallel operations for performing sparse-matrix * dense-vector multiplication (SpMV). * \ingroup SingleModule * * \par Overview * The [<em>SpMV computation</em>](http://en.wikipedia.org/wiki/Sparse_matrix-vector_multiplication) * performs the matrix-vector operation * <em>y</em> = <em>alpha</em>*<b>A</b>*<em>x</em> + <em>beta</em>*<em>y</em>, * where: * - <b>A</b> is an <em>m</em>x<em>n</em> sparse matrix whose non-zero structure is specified in * [<em>compressed-storage-row (CSR) format</em>](http://en.wikipedia.org/wiki/Sparse_matrix#Compressed_row_Storage_.28CRS_or_CSR.29) * (i.e., three arrays: <em>values</em>, <em>row_offsets</em>, and <em>column_indices</em>) * - <em>x</em> and <em>y</em> are dense vectors * - <em>alpha</em> and <em>beta</em> are scalar multiplicands * * \par Usage Considerations * \cdp_class{DeviceSpmv} * */ struct DeviceSpmv { /******************************************************************//** * \name CSR matrix operations *********************************************************************/ //@{ /** * \brief This function performs the matrix-vector operation <em>y</em> = <b>A</b>*<em>x</em>. * * \par Snippet * The code snippet below illustrates SpMV upon a 9x9 CSR matrix <b>A</b> * representing a 3x3 lattice (24 non-zeros). * * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/device/device_spmv.cuh> * * // Declare, allocate, and initialize device-accessible pointers for input matrix A, input vector x, * // and output vector y * int num_rows = 9; * int num_cols = 9; * int num_nonzeros = 24; * * float* d_values; // e.g., [1, 1, 1, 1, 1, 1, 1, 1, * // 1, 1, 1, 1, 1, 1, 1, 1, * // 1, 1, 1, 1, 1, 1, 1, 1] * * int* d_column_indices; // e.g., [1, 3, 0, 2, 4, 1, 5, 0, * // 4, 6, 1, 3, 5, 7, 2, 4, * // 8, 3, 7, 4, 6, 8, 5, 7] * * int* d_row_offsets; // e.g., [0, 2, 5, 7, 10, 14, 17, 19, 22, 24] * * float* d_vector_x; // e.g., [1, 1, 1, 1, 1, 1, 1, 1, 1] * float* d_vector_y; // e.g., [ , , , , , , , , ] * ... * * // Determine temporary device storage requirements * void* d_temp_storage = NULL; * size_t temp_storage_bytes = 0; * cub::DeviceSpmv::CsrMV(d_temp_storage, temp_storage_bytes, d_values, * d_row_offsets, d_column_indices, d_vector_x, d_vector_y, * num_rows, num_cols, num_nonzeros, alpha, beta); * * // Allocate temporary storage * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Run SpMV * cub::DeviceSpmv::CsrMV(d_temp_storage, temp_storage_bytes, d_values, * d_row_offsets, d_column_indices, d_vector_x, d_vector_y, * num_rows, num_cols, num_nonzeros, alpha, beta); * * // d_vector_y <-- [2, 3, 2, 3, 4, 3, 2, 3, 2] * * \endcode * * \tparam ValueT <b>[inferred]</b> Matrix and vector value type (e.g., /p float, /p double, etc.) */ template < typename ValueT, typename SemiringT> CUB_RUNTIME_FUNCTION static cudaError_t CsrMV( void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t& temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation const ValueT* d_values, ///< [in] Pointer to the array of \p num_nonzeros values of the corresponding nonzero elements of matrix <b>A</b>. const int* d_row_offsets, ///< [in] Pointer to the array of \p m + 1 offsets demarcating the start of every row in \p d_column_indices and \p d_values (with the final entry being equal to \p num_nonzeros) const int* d_column_indices, ///< [in] Pointer to the array of \p num_nonzeros column-indices of the corresponding nonzero elements of matrix <b>A</b>. (Indices are zero-valued.) const ValueT* d_vector_x, ///< [in] Pointer to the array of \p num_cols values corresponding to the dense input vector <em>x</em> ValueT* d_vector_y, ///< [out] Pointer to the array of \p num_rows values corresponding to the dense output vector <em>y</em> ValueT alpha, ValueT beta, int num_rows, ///< [in] number of rows of matrix <b>A</b>. int num_cols, ///< [in] number of columns of matrix <b>A</b>. int num_nonzeros, ///< [in] number of nonzero elements of matrix <b>A</b>. cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. May cause significant slowdown. Default is \p false. { SpmvParams<ValueT, int> spmv_params; spmv_params.d_values = d_values; spmv_params.d_row_end_offsets = d_row_offsets + 1; spmv_params.d_column_indices = d_column_indices; spmv_params.d_vector_x = d_vector_x; spmv_params.d_vector_y = d_vector_y; spmv_params.num_rows = num_rows; spmv_params.num_cols = num_cols; spmv_params.num_nonzeros = num_nonzeros; spmv_params.alpha = alpha; spmv_params.beta = beta; return DispatchSpmv<ValueT, int, SemiringT>::Dispatch( d_temp_storage, temp_storage_bytes, spmv_params, stream, debug_synchronous); } //@} end member group }; } // CUB namespace CUB_NS_POSTFIX // Optional outer namespace(s)
0
rapidsai_public_repos/nvgraph/external/cub_semiring
rapidsai_public_repos/nvgraph/external/cub_semiring/device/device_partition.cuh
/****************************************************************************** * Copyright (c) 2011, Duane Merrill. All rights reserved. * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ /** * \file * cub::DevicePartition provides device-wide, parallel operations for partitioning sequences of data items residing within device-accessible memory. */ #pragma once #include <stdio.h> #include <iterator> #include "dispatch/dispatch_select_if.cuh" #include "../util_namespace.cuh" /// Optional outer namespace(s) CUB_NS_PREFIX /// CUB namespace namespace cub { /** * \brief DevicePartition provides device-wide, parallel operations for partitioning sequences of data items residing within device-accessible memory. ![](partition_logo.png) * \ingroup SingleModule * * \par Overview * These operations apply a selection criterion to construct a partitioned output sequence from items selected/unselected from * a specified input sequence. * * \par Usage Considerations * \cdp_class{DevicePartition} * * \par Performance * \linear_performance{partition} * * \par * The following chart illustrates DevicePartition::If * performance across different CUDA architectures for \p int32 items, * where 50% of the items are randomly selected for the first partition. * \plots_below * * \image html partition_if_int32_50_percent.png * */ struct DevicePartition { /** * \brief Uses the \p d_flags sequence to split the corresponding items from \p d_in into a partitioned sequence \p d_out. The total number of items copied into the first partition is written to \p d_num_selected_out. ![](partition_flags_logo.png) * * \par * - The value type of \p d_flags must be castable to \p bool (e.g., \p bool, \p char, \p int, etc.). * - Copies of the selected items are compacted into \p d_out and maintain their original * relative ordering, however copies of the unselected items are compacted into the * rear of \p d_out in reverse order. * - \devicestorage * * \par Snippet * The code snippet below illustrates the compaction of items selected from an \p int device vector. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/device/device_partition.cuh> * * // Declare, allocate, and initialize device-accessible pointers for input, flags, and output * int num_items; // e.g., 8 * int *d_in; // e.g., [1, 2, 3, 4, 5, 6, 7, 8] * char *d_flags; // e.g., [1, 0, 0, 1, 0, 1, 1, 0] * int *d_out; // e.g., [ , , , , , , , ] * int *d_num_selected_out; // e.g., [ ] * ... * * // Determine temporary device storage requirements * void *d_temp_storage = NULL; * size_t temp_storage_bytes = 0; * cub::DevicePartition::Flagged(d_temp_storage, temp_storage_bytes, d_in, d_flags, d_out, d_num_selected_out, num_items); * * // Allocate temporary storage * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Run selection * cub::DevicePartition::Flagged(d_temp_storage, temp_storage_bytes, d_in, d_flags, d_out, d_num_selected_out, num_items); * * // d_out <-- [1, 4, 6, 7, 8, 5, 3, 2] * // d_num_selected_out <-- [4] * * \endcode * * \tparam InputIteratorT <b>[inferred]</b> Random-access input iterator type for reading input items \iterator * \tparam FlagIterator <b>[inferred]</b> Random-access input iterator type for reading selection flags \iterator * \tparam OutputIteratorT <b>[inferred]</b> Random-access output iterator type for writing output items \iterator * \tparam NumSelectedIteratorT <b>[inferred]</b> Output iterator type for recording the number of items selected \iterator */ template < typename InputIteratorT, typename FlagIterator, typename OutputIteratorT, typename NumSelectedIteratorT> CUB_RUNTIME_FUNCTION __forceinline__ static cudaError_t Flagged( void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation InputIteratorT d_in, ///< [in] Pointer to the input sequence of data items FlagIterator d_flags, ///< [in] Pointer to the input sequence of selection flags OutputIteratorT d_out, ///< [out] Pointer to the output sequence of partitioned data items NumSelectedIteratorT d_num_selected_out, ///< [out] Pointer to the output total number of items selected (i.e., the offset of the unselected partition) int num_items, ///< [in] Total number of items to select from cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. May cause significant slowdown. Default is \p false. { typedef int OffsetT; // Signed integer type for global offsets typedef NullType SelectOp; // Selection op (not used) typedef NullType EqualityOp; // Equality operator (not used) return DispatchSelectIf<InputIteratorT, FlagIterator, OutputIteratorT, NumSelectedIteratorT, SelectOp, EqualityOp, OffsetT, true>::Dispatch( d_temp_storage, temp_storage_bytes, d_in, d_flags, d_out, d_num_selected_out, SelectOp(), EqualityOp(), num_items, stream, debug_synchronous); } /** * \brief Uses the \p select_op functor to split the corresponding items from \p d_in into a partitioned sequence \p d_out. The total number of items copied into the first partition is written to \p d_num_selected_out. ![](partition_logo.png) * * \par * - Copies of the selected items are compacted into \p d_out and maintain their original * relative ordering, however copies of the unselected items are compacted into the * rear of \p d_out in reverse order. * - \devicestorage * * \par Performance * The following charts illustrate saturated partition-if performance across different * CUDA architectures for \p int32 and \p int64 items, respectively. Items are * selected for the first partition with 50% probability. * * \image html partition_if_int32_50_percent.png * \image html partition_if_int64_50_percent.png * * \par * The following charts are similar, but 5% selection probability for the first partition: * * \image html partition_if_int32_5_percent.png * \image html partition_if_int64_5_percent.png * * \par Snippet * The code snippet below illustrates the compaction of items selected from an \p int device vector. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/device/device_partition.cuh> * * // Functor type for selecting values less than some criteria * struct LessThan * { * int compare; * * CUB_RUNTIME_FUNCTION __forceinline__ * LessThan(int compare) : compare(compare) {} * * CUB_RUNTIME_FUNCTION __forceinline__ * bool operator()(const int &a) const { * return (a < compare); * } * }; * * // Declare, allocate, and initialize device-accessible pointers for input and output * int num_items; // e.g., 8 * int *d_in; // e.g., [0, 2, 3, 9, 5, 2, 81, 8] * int *d_out; // e.g., [ , , , , , , , ] * int *d_num_selected_out; // e.g., [ ] * LessThan select_op(7); * ... * * // Determine temporary device storage requirements * void *d_temp_storage = NULL; * size_t temp_storage_bytes = 0; * cub::DeviceSelect::If(d_temp_storage, temp_storage_bytes, d_in, d_out, d_num_selected_out, num_items, select_op); * * // Allocate temporary storage * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Run selection * cub::DeviceSelect::If(d_temp_storage, temp_storage_bytes, d_in, d_out, d_num_selected_out, num_items, select_op); * * // d_out <-- [0, 2, 3, 5, 2, 8, 81, 9] * // d_num_selected_out <-- [5] * * \endcode * * \tparam InputIteratorT <b>[inferred]</b> Random-access input iterator type for reading input items \iterator * \tparam OutputIteratorT <b>[inferred]</b> Random-access output iterator type for writing output items \iterator * \tparam NumSelectedIteratorT <b>[inferred]</b> Output iterator type for recording the number of items selected \iterator * \tparam SelectOp <b>[inferred]</b> Selection functor type having member <tt>bool operator()(const T &a)</tt> */ template < typename InputIteratorT, typename OutputIteratorT, typename NumSelectedIteratorT, typename SelectOp> CUB_RUNTIME_FUNCTION __forceinline__ static cudaError_t If( void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation InputIteratorT d_in, ///< [in] Pointer to the input sequence of data items OutputIteratorT d_out, ///< [out] Pointer to the output sequence of partitioned data items NumSelectedIteratorT d_num_selected_out, ///< [out] Pointer to the output total number of items selected (i.e., the offset of the unselected partition) int num_items, ///< [in] Total number of items to select from SelectOp select_op, ///< [in] Unary selection operator cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. May cause significant slowdown. Default is \p false. { typedef int OffsetT; // Signed integer type for global offsets typedef NullType* FlagIterator; // FlagT iterator type (not used) typedef NullType EqualityOp; // Equality operator (not used) return DispatchSelectIf<InputIteratorT, FlagIterator, OutputIteratorT, NumSelectedIteratorT, SelectOp, EqualityOp, OffsetT, true>::Dispatch( d_temp_storage, temp_storage_bytes, d_in, NULL, d_out, d_num_selected_out, select_op, EqualityOp(), num_items, stream, debug_synchronous); } }; /** * \example example_device_partition_flagged.cu * \example example_device_partition_if.cu */ } // CUB namespace CUB_NS_POSTFIX // Optional outer namespace(s)
0
rapidsai_public_repos/nvgraph/external/cub_semiring
rapidsai_public_repos/nvgraph/external/cub_semiring/device/device_scan.cuh
/****************************************************************************** * Copyright (c) 2011, Duane Merrill. All rights reserved. * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ /** * \file * cub::DeviceScan provides device-wide, parallel operations for computing a prefix scan across a sequence of data items residing within device-accessible memory. */ #pragma once #include <stdio.h> #include <iterator> #include "dispatch/dispatch_scan.cuh" #include "../util_namespace.cuh" /// Optional outer namespace(s) CUB_NS_PREFIX /// CUB namespace namespace cub { /** * \brief DeviceScan provides device-wide, parallel operations for computing a prefix scan across a sequence of data items residing within device-accessible memory. ![](device_scan.png) * \ingroup SingleModule * * \par Overview * Given a sequence of input elements and a binary reduction operator, a [<em>prefix scan</em>](http://en.wikipedia.org/wiki/Prefix_sum) * produces an output sequence where each element is computed to be the reduction * of the elements occurring earlier in the input sequence. <em>Prefix sum</em> * connotes a prefix scan with the addition operator. The term \em inclusive indicates * that the <em>i</em><sup>th</sup> output reduction incorporates the <em>i</em><sup>th</sup> input. * The term \em exclusive indicates the <em>i</em><sup>th</sup> input is not incorporated into * the <em>i</em><sup>th</sup> output reduction. * * \par * As of CUB 1.0.1 (2013), CUB's device-wide scan APIs have implemented our <em>"decoupled look-back"</em> algorithm * for performing global prefix scan with only a single pass through the * input data, as described in our 2016 technical report [1]. The central * idea is to leverage a small, constant factor of redundant work in order to overlap the latencies * of global prefix propagation with local computation. As such, our algorithm requires only * ~2<em>n</em> data movement (<em>n</em> inputs are read, <em>n</em> outputs are written), and typically * proceeds at "memcpy" speeds. * * \par * [1] [Duane Merrill and Michael Garland. "Single-pass Parallel Prefix Scan with Decoupled Look-back", <em>NVIDIA Technical Report NVR-2016-002</em>, 2016.](https://research.nvidia.com/publication/single-pass-parallel-prefix-scan-decoupled-look-back) * * \par Usage Considerations * \cdp_class{DeviceScan} * * \par Performance * \linear_performance{prefix scan} * * \par * The following chart illustrates DeviceScan::ExclusiveSum * performance across different CUDA architectures for \p int32 keys. * \plots_below * * \image html scan_int32.png * */ struct DeviceScan { /******************************************************************//** * \name Exclusive scans *********************************************************************/ //@{ /** * \brief Computes a device-wide exclusive prefix sum. The value of 0 is applied as the initial value, and is assigned to *d_out. * * \par * - Supports non-commutative sum operators. * - Provides "run-to-run" determinism for pseudo-associative reduction * (e.g., addition of floating point types) on the same GPU device. * However, results for pseudo-associative reduction may be inconsistent * from one device to a another device of a different compute-capability * because CUB can employ different tile-sizing for different architectures. * - \devicestorage * * \par Performance * The following charts illustrate saturated exclusive sum performance across different * CUDA architectures for \p int32 and \p int64 items, respectively. * * \image html scan_int32.png * \image html scan_int64.png * * \par Snippet * The code snippet below illustrates the exclusive prefix sum of an \p int device vector. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/device/device_scan.cuh> * * // Declare, allocate, and initialize device-accessible pointers for input and output * int num_items; // e.g., 7 * int *d_in; // e.g., [8, 6, 7, 5, 3, 0, 9] * int *d_out; // e.g., [ , , , , , , ] * ... * * // Determine temporary device storage requirements * void *d_temp_storage = NULL; * size_t temp_storage_bytes = 0; * cub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items); * * // Allocate temporary storage * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Run exclusive prefix sum * cub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items); * * // d_out s<-- [0, 8, 14, 21, 26, 29, 29] * * \endcode * * \tparam InputIteratorT <b>[inferred]</b> Random-access input iterator type for reading scan inputs \iterator * \tparam OutputIteratorT <b>[inferred]</b> Random-access output iterator type for writing scan outputs \iterator */ template < typename InputIteratorT, typename OutputIteratorT> CUB_RUNTIME_FUNCTION static cudaError_t ExclusiveSum( void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation InputIteratorT d_in, ///< [in] Pointer to the input sequence of data items OutputIteratorT d_out, ///< [out] Pointer to the output sequence of data items int num_items, ///< [in] Total number of input items (i.e., the length of \p d_in) cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. May cause significant slowdown. Default is \p false. { // Signed integer type for global offsets typedef int OffsetT; // The output value type typedef typename If<(Equals<typename std::iterator_traits<OutputIteratorT>::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ? typename std::iterator_traits<InputIteratorT>::value_type, // ... then the input iterator's value type, typename std::iterator_traits<OutputIteratorT>::value_type>::Type OutputT; // ... else the output iterator's value type // Initial value OutputT init_value = 0; return DispatchScan<InputIteratorT, OutputIteratorT, Sum, OutputT, OffsetT>::Dispatch( d_temp_storage, temp_storage_bytes, d_in, d_out, Sum(), init_value, num_items, stream, debug_synchronous); } /** * \brief Computes a device-wide exclusive prefix scan using the specified binary \p scan_op functor. The \p init_value value is applied as the initial value, and is assigned to *d_out. * * \par * - Supports non-commutative scan operators. * - Provides "run-to-run" determinism for pseudo-associative reduction * (e.g., addition of floating point types) on the same GPU device. * However, results for pseudo-associative reduction may be inconsistent * from one device to a another device of a different compute-capability * because CUB can employ different tile-sizing for different architectures. * - \devicestorage * * \par Snippet * The code snippet below illustrates the exclusive prefix min-scan of an \p int device vector * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/device/device_scan.cuh> * * // CustomMin functor * struct CustomMin * { * template <typename T> * CUB_RUNTIME_FUNCTION __forceinline__ * T operator()(const T &a, const T &b) const { * return (b < a) ? b : a; * } * }; * * // Declare, allocate, and initialize device-accessible pointers for input and output * int num_items; // e.g., 7 * int *d_in; // e.g., [8, 6, 7, 5, 3, 0, 9] * int *d_out; // e.g., [ , , , , , , ] * CustomMin min_op * ... * * // Determine temporary device storage requirements for exclusive prefix scan * void *d_temp_storage = NULL; * size_t temp_storage_bytes = 0; * cub::DeviceScan::ExclusiveScan(d_temp_storage, temp_storage_bytes, d_in, d_out, min_op, (int) MAX_INT, num_items); * * // Allocate temporary storage for exclusive prefix scan * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Run exclusive prefix min-scan * cub::DeviceScan::ExclusiveScan(d_temp_storage, temp_storage_bytes, d_in, d_out, min_op, (int) MAX_INT, num_items); * * // d_out <-- [2147483647, 8, 6, 6, 5, 3, 0] * * \endcode * * \tparam InputIteratorT <b>[inferred]</b> Random-access input iterator type for reading scan inputs \iterator * \tparam OutputIteratorT <b>[inferred]</b> Random-access output iterator type for writing scan outputs \iterator * \tparam ScanOp <b>[inferred]</b> Binary scan functor type having member <tt>T operator()(const T &a, const T &b)</tt> * \tparam Identity <b>[inferred]</b> Type of the \p identity value used Binary scan functor type having member <tt>T operator()(const T &a, const T &b)</tt> */ template < typename InputIteratorT, typename OutputIteratorT, typename ScanOpT, typename InitValueT> CUB_RUNTIME_FUNCTION static cudaError_t ExclusiveScan( void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation InputIteratorT d_in, ///< [in] Pointer to the input sequence of data items OutputIteratorT d_out, ///< [out] Pointer to the output sequence of data items ScanOpT scan_op, ///< [in] Binary scan functor InitValueT init_value, ///< [in] Initial value to seed the exclusive scan (and is assigned to *d_out) int num_items, ///< [in] Total number of input items (i.e., the length of \p d_in) cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. May cause significant slowdown. Default is \p false. { // Signed integer type for global offsets typedef int OffsetT; return DispatchScan<InputIteratorT, OutputIteratorT, ScanOpT, InitValueT, OffsetT>::Dispatch( d_temp_storage, temp_storage_bytes, d_in, d_out, scan_op, init_value, num_items, stream, debug_synchronous); } //@} end member group /******************************************************************//** * \name Inclusive scans *********************************************************************/ //@{ /** * \brief Computes a device-wide inclusive prefix sum. * * \par * - Supports non-commutative sum operators. * - Provides "run-to-run" determinism for pseudo-associative reduction * (e.g., addition of floating point types) on the same GPU device. * However, results for pseudo-associative reduction may be inconsistent * from one device to a another device of a different compute-capability * because CUB can employ different tile-sizing for different architectures. * - \devicestorage * * \par Snippet * The code snippet below illustrates the inclusive prefix sum of an \p int device vector. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/device/device_scan.cuh> * * // Declare, allocate, and initialize device-accessible pointers for input and output * int num_items; // e.g., 7 * int *d_in; // e.g., [8, 6, 7, 5, 3, 0, 9] * int *d_out; // e.g., [ , , , , , , ] * ... * * // Determine temporary device storage requirements for inclusive prefix sum * void *d_temp_storage = NULL; * size_t temp_storage_bytes = 0; * cub::DeviceScan::InclusiveSum(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items); * * // Allocate temporary storage for inclusive prefix sum * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Run inclusive prefix sum * cub::DeviceScan::InclusiveSum(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items); * * // d_out <-- [8, 14, 21, 26, 29, 29, 38] * * \endcode * * \tparam InputIteratorT <b>[inferred]</b> Random-access input iterator type for reading scan inputs \iterator * \tparam OutputIteratorT <b>[inferred]</b> Random-access output iterator type for writing scan outputs \iterator */ template < typename InputIteratorT, typename OutputIteratorT> CUB_RUNTIME_FUNCTION static cudaError_t InclusiveSum( void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t& temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation InputIteratorT d_in, ///< [in] Pointer to the input sequence of data items OutputIteratorT d_out, ///< [out] Pointer to the output sequence of data items int num_items, ///< [in] Total number of input items (i.e., the length of \p d_in) cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. May cause significant slowdown. Default is \p false. { // Signed integer type for global offsets typedef int OffsetT; return DispatchScan<InputIteratorT, OutputIteratorT, Sum, NullType, OffsetT>::Dispatch( d_temp_storage, temp_storage_bytes, d_in, d_out, Sum(), NullType(), num_items, stream, debug_synchronous); } /** * \brief Computes a device-wide inclusive prefix scan using the specified binary \p scan_op functor. * * \par * - Supports non-commutative scan operators. * - Provides "run-to-run" determinism for pseudo-associative reduction * (e.g., addition of floating point types) on the same GPU device. * However, results for pseudo-associative reduction may be inconsistent * from one device to a another device of a different compute-capability * because CUB can employ different tile-sizing for different architectures. * - \devicestorage * * \par Snippet * The code snippet below illustrates the inclusive prefix min-scan of an \p int device vector. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/device/device_scan.cuh> * * // CustomMin functor * struct CustomMin * { * template <typename T> * CUB_RUNTIME_FUNCTION __forceinline__ * T operator()(const T &a, const T &b) const { * return (b < a) ? b : a; * } * }; * * // Declare, allocate, and initialize device-accessible pointers for input and output * int num_items; // e.g., 7 * int *d_in; // e.g., [8, 6, 7, 5, 3, 0, 9] * int *d_out; // e.g., [ , , , , , , ] * CustomMin min_op; * ... * * // Determine temporary device storage requirements for inclusive prefix scan * void *d_temp_storage = NULL; * size_t temp_storage_bytes = 0; * cub::DeviceScan::InclusiveScan(d_temp_storage, temp_storage_bytes, d_in, d_out, min_op, num_items); * * // Allocate temporary storage for inclusive prefix scan * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Run inclusive prefix min-scan * cub::DeviceScan::InclusiveScan(d_temp_storage, temp_storage_bytes, d_in, d_out, min_op, num_items); * * // d_out <-- [8, 6, 6, 5, 3, 0, 0] * * \endcode * * \tparam InputIteratorT <b>[inferred]</b> Random-access input iterator type for reading scan inputs \iterator * \tparam OutputIteratorT <b>[inferred]</b> Random-access output iterator type for writing scan outputs \iterator * \tparam ScanOp <b>[inferred]</b> Binary scan functor type having member <tt>T operator()(const T &a, const T &b)</tt> */ template < typename InputIteratorT, typename OutputIteratorT, typename ScanOpT> CUB_RUNTIME_FUNCTION static cudaError_t InclusiveScan( void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation InputIteratorT d_in, ///< [in] Pointer to the input sequence of data items OutputIteratorT d_out, ///< [out] Pointer to the output sequence of data items ScanOpT scan_op, ///< [in] Binary scan functor int num_items, ///< [in] Total number of input items (i.e., the length of \p d_in) cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. May cause significant slowdown. Default is \p false. { // Signed integer type for global offsets typedef int OffsetT; return DispatchScan<InputIteratorT, OutputIteratorT, ScanOpT, NullType, OffsetT>::Dispatch( d_temp_storage, temp_storage_bytes, d_in, d_out, scan_op, NullType(), num_items, stream, debug_synchronous); } //@} end member group }; /** * \example example_device_scan.cu */ } // CUB namespace CUB_NS_POSTFIX // Optional outer namespace(s)
0
rapidsai_public_repos/nvgraph/external/cub_semiring
rapidsai_public_repos/nvgraph/external/cub_semiring/device/device_histogram.cuh
/****************************************************************************** * Copyright (c) 2011, Duane Merrill. All rights reserved. * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ /** * \file * cub::DeviceHistogram provides device-wide parallel operations for constructing histogram(s) from a sequence of samples data residing within device-accessible memory. */ #pragma once #include <stdio.h> #include <iterator> #include <limits> #include "dispatch/dispatch_histogram.cuh" #include "../util_namespace.cuh" /// Optional outer namespace(s) CUB_NS_PREFIX /// CUB namespace namespace cub { /** * \brief DeviceHistogram provides device-wide parallel operations for constructing histogram(s) from a sequence of samples data residing within device-accessible memory. ![](histogram_logo.png) * \ingroup SingleModule * * \par Overview * A <a href="http://en.wikipedia.org/wiki/Histogram"><em>histogram</em></a> * counts the number of observations that fall into each of the disjoint categories (known as <em>bins</em>). * * \par Usage Considerations * \cdp_class{DeviceHistogram} * */ struct DeviceHistogram { /******************************************************************//** * \name Evenly-segmented bin ranges *********************************************************************/ //@{ /** * \brief Computes an intensity histogram from a sequence of data samples using equal-width bins. * * \par * - The number of histogram bins is (\p num_levels - 1) * - All bins comprise the same width of sample values: (\p upper_level - \p lower_level) / (\p num_levels - 1) * - \devicestorage * * \par Snippet * The code snippet below illustrates the computation of a six-bin histogram * from a sequence of float samples * * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/device/device_histogram.cuh> * * // Declare, allocate, and initialize device-accessible pointers for input samples and * // output histogram * int num_samples; // e.g., 10 * float* d_samples; // e.g., [2.2, 6.0, 7.1, 2.9, 3.5, 0.3, 2.9, 2.0, 6.1, 999.5] * int* d_histogram; // e.g., [ -, -, -, -, -, -, -, -] * int num_levels; // e.g., 7 (seven level boundaries for six bins) * float lower_level; // e.g., 0.0 (lower sample value boundary of lowest bin) * float upper_level; // e.g., 12.0 (upper sample value boundary of upper bin) * ... * * // Determine temporary device storage requirements * void* d_temp_storage = NULL; * size_t temp_storage_bytes = 0; * cub::DeviceHistogram::HistogramEven(d_temp_storage, temp_storage_bytes, * d_samples, d_histogram, num_levels, lower_level, upper_level, num_samples); * * // Allocate temporary storage * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Compute histograms * cub::DeviceHistogram::HistogramEven(d_temp_storage, temp_storage_bytes, * d_samples, d_histogram, num_levels, lower_level, upper_level, num_samples); * * // d_histogram <-- [1, 0, 5, 0, 3, 0, 0, 0]; * * \endcode * * \tparam SampleIteratorT <b>[inferred]</b> Random-access input iterator type for reading input samples. \iterator * \tparam CounterT <b>[inferred]</b> Integer type for histogram bin counters * \tparam LevelT <b>[inferred]</b> Type for specifying boundaries (levels) * \tparam OffsetT <b>[inferred]</b> Signed integer type for sequence offsets, list lengths, pointer differences, etc. \offset_size1 */ template < typename SampleIteratorT, typename CounterT, typename LevelT, typename OffsetT> CUB_RUNTIME_FUNCTION static cudaError_t HistogramEven( void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t& temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation SampleIteratorT d_samples, ///< [in] The pointer to the input sequence of data samples. CounterT* d_histogram, ///< [out] The pointer to the histogram counter output array of length <tt>num_levels</tt> - 1. int num_levels, ///< [in] The number of boundaries (levels) for delineating histogram samples. Implies that the number of bins is <tt>num_levels</tt> - 1. LevelT lower_level, ///< [in] The lower sample value bound (inclusive) for the lowest histogram bin. LevelT upper_level, ///< [in] The upper sample value bound (exclusive) for the highest histogram bin. OffsetT num_samples, ///< [in] The number of input samples (i.e., the length of \p d_samples) cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. May cause significant slowdown. Default is \p false. { /// The sample value type of the input iterator typedef typename std::iterator_traits<SampleIteratorT>::value_type SampleT; CounterT* d_histogram1[1] = {d_histogram}; int num_levels1[1] = {num_levels}; LevelT lower_level1[1] = {lower_level}; LevelT upper_level1[1] = {upper_level}; return MultiHistogramEven<1, 1>( d_temp_storage, temp_storage_bytes, d_samples, d_histogram1, num_levels1, lower_level1, upper_level1, num_samples, 1, sizeof(SampleT) * num_samples, stream, debug_synchronous); } /** * \brief Computes an intensity histogram from a sequence of data samples using equal-width bins. * * \par * - A two-dimensional <em>region of interest</em> within \p d_samples can be specified * using the \p num_row_samples, num_rows, and \p row_stride_bytes parameters. * - The row stride must be a whole multiple of the sample data type * size, i.e., <tt>(row_stride_bytes % sizeof(SampleT)) == 0</tt>. * - The number of histogram bins is (\p num_levels - 1) * - All bins comprise the same width of sample values: (\p upper_level - \p lower_level) / (\p num_levels - 1) * - \devicestorage * * \par Snippet * The code snippet below illustrates the computation of a six-bin histogram * from a 2x5 region of interest within a flattened 2x7 array of float samples. * * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/device/device_histogram.cuh> * * // Declare, allocate, and initialize device-accessible pointers for input samples and * // output histogram * int num_row_samples; // e.g., 5 * int num_rows; // e.g., 2; * size_t row_stride_bytes; // e.g., 7 * sizeof(float) * float* d_samples; // e.g., [2.2, 6.0, 7.1, 2.9, 3.5, -, -, * // 0.3, 2.9, 2.0, 6.1, 999.5, -, -] * int* d_histogram; // e.g., [ -, -, -, -, -, -, -, -] * int num_levels; // e.g., 7 (seven level boundaries for six bins) * float lower_level; // e.g., 0.0 (lower sample value boundary of lowest bin) * float upper_level; // e.g., 12.0 (upper sample value boundary of upper bin) * ... * * // Determine temporary device storage requirements * void* d_temp_storage = NULL; * size_t temp_storage_bytes = 0; * cub::DeviceHistogram::HistogramEven(d_temp_storage, temp_storage_bytes, * d_samples, d_histogram, num_levels, lower_level, upper_level, * num_row_samples, num_rows, row_stride_bytes); * * // Allocate temporary storage * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Compute histograms * cub::DeviceHistogram::HistogramEven(d_temp_storage, temp_storage_bytes, d_samples, d_histogram, * d_samples, d_histogram, num_levels, lower_level, upper_level, * num_row_samples, num_rows, row_stride_bytes); * * // d_histogram <-- [1, 0, 5, 0, 3, 0, 0, 0]; * * \endcode * * \tparam SampleIteratorT <b>[inferred]</b> Random-access input iterator type for reading input samples. \iterator * \tparam CounterT <b>[inferred]</b> Integer type for histogram bin counters * \tparam LevelT <b>[inferred]</b> Type for specifying boundaries (levels) * \tparam OffsetT <b>[inferred]</b> Signed integer type for sequence offsets, list lengths, pointer differences, etc. \offset_size1 */ template < typename SampleIteratorT, typename CounterT, typename LevelT, typename OffsetT> CUB_RUNTIME_FUNCTION static cudaError_t HistogramEven( void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t& temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation SampleIteratorT d_samples, ///< [in] The pointer to the input sequence of data samples. CounterT* d_histogram, ///< [out] The pointer to the histogram counter output array of length <tt>num_levels</tt> - 1. int num_levels, ///< [in] The number of boundaries (levels) for delineating histogram samples. Implies that the number of bins is <tt>num_levels</tt> - 1. LevelT lower_level, ///< [in] The lower sample value bound (inclusive) for the lowest histogram bin. LevelT upper_level, ///< [in] The upper sample value bound (exclusive) for the highest histogram bin. OffsetT num_row_samples, ///< [in] The number of data samples per row in the region of interest OffsetT num_rows, ///< [in] The number of rows in the region of interest size_t row_stride_bytes, ///< [in] The number of bytes between starts of consecutive rows in the region of interest cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. May cause significant slowdown. Default is \p false. { CounterT* d_histogram1[1] = {d_histogram}; int num_levels1[1] = {num_levels}; LevelT lower_level1[1] = {lower_level}; LevelT upper_level1[1] = {upper_level}; return MultiHistogramEven<1, 1>( d_temp_storage, temp_storage_bytes, d_samples, d_histogram1, num_levels1, lower_level1, upper_level1, num_row_samples, num_rows, row_stride_bytes, stream, debug_synchronous); } /** * \brief Computes per-channel intensity histograms from a sequence of multi-channel "pixel" data samples using equal-width bins. * * \par * - The input is a sequence of <em>pixel</em> structures, where each pixel comprises * a record of \p NUM_CHANNELS consecutive data samples (e.g., an <em>RGBA</em> pixel). * - Of the \p NUM_CHANNELS specified, the function will only compute histograms * for the first \p NUM_ACTIVE_CHANNELS (e.g., only <em>RGB</em> histograms from <em>RGBA</em> * pixel samples). * - The number of histogram bins for channel<sub><em>i</em></sub> is <tt>num_levels[i]</tt> - 1. * - For channel<sub><em>i</em></sub>, the range of values for all histogram bins * have the same width: (<tt>upper_level[i]</tt> - <tt>lower_level[i]</tt>) / (<tt> num_levels[i]</tt> - 1) * - \devicestorage * * \par Snippet * The code snippet below illustrates the computation of three 256-bin <em>RGB</em> histograms * from a quad-channel sequence of <em>RGBA</em> pixels (8 bits per channel per pixel) * * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/device/device_histogram.cuh> * * // Declare, allocate, and initialize device-accessible pointers for input samples * // and output histograms * int num_pixels; // e.g., 5 * unsigned char* d_samples; // e.g., [(2, 6, 7, 5), (3, 0, 2, 1), (7, 0, 6, 2), * // (0, 6, 7, 5), (3, 0, 2, 6)] * int* d_histogram[3]; // e.g., three device pointers to three device buffers, * // each allocated with 256 integer counters * int num_levels[3]; // e.g., {257, 257, 257}; * unsigned int lower_level[3]; // e.g., {0, 0, 0}; * unsigned int upper_level[3]; // e.g., {256, 256, 256}; * ... * * // Determine temporary device storage requirements * void* d_temp_storage = NULL; * size_t temp_storage_bytes = 0; * cub::DeviceHistogram::MultiHistogramEven<4, 3>(d_temp_storage, temp_storage_bytes, * d_samples, d_histogram, num_levels, lower_level, upper_level, num_pixels); * * // Allocate temporary storage * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Compute histograms * cub::DeviceHistogram::MultiHistogramEven<4, 3>(d_temp_storage, temp_storage_bytes, * d_samples, d_histogram, num_levels, lower_level, upper_level, num_pixels); * * // d_histogram <-- [ [1, 0, 1, 2, 0, 0, 0, 1, 0, 0, 0, ..., 0], * // [0, 3, 0, 0, 0, 0, 2, 0, 0, 0, 0, ..., 0], * // [0, 0, 2, 0, 0, 0, 1, 2, 0, 0, 0, ..., 0] ] * * \endcode * * \tparam NUM_CHANNELS Number of channels interleaved in the input data (may be greater than the number of channels being actively histogrammed) * \tparam NUM_ACTIVE_CHANNELS <b>[inferred]</b> Number of channels actively being histogrammed * \tparam SampleIteratorT <b>[inferred]</b> Random-access input iterator type for reading input samples. \iterator * \tparam CounterT <b>[inferred]</b> Integer type for histogram bin counters * \tparam LevelT <b>[inferred]</b> Type for specifying boundaries (levels) * \tparam OffsetT <b>[inferred]</b> Signed integer type for sequence offsets, list lengths, pointer differences, etc. \offset_size1 */ template < int NUM_CHANNELS, int NUM_ACTIVE_CHANNELS, typename SampleIteratorT, typename CounterT, typename LevelT, typename OffsetT> CUB_RUNTIME_FUNCTION static cudaError_t MultiHistogramEven( void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t& temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation SampleIteratorT d_samples, ///< [in] The pointer to the multi-channel input sequence of data samples. The samples from different channels are assumed to be interleaved (e.g., an array of 32-bit pixels where each pixel consists of four <em>RGBA</em> 8-bit samples). CounterT* d_histogram[NUM_ACTIVE_CHANNELS], ///< [out] The pointers to the histogram counter output arrays, one for each active channel. For channel<sub><em>i</em></sub>, the allocation length of <tt>d_histogram[i]</tt> should be <tt>num_levels[i]</tt> - 1. int num_levels[NUM_ACTIVE_CHANNELS], ///< [in] The number of boundaries (levels) for delineating histogram samples in each active channel. Implies that the number of bins for channel<sub><em>i</em></sub> is <tt>num_levels[i]</tt> - 1. LevelT lower_level[NUM_ACTIVE_CHANNELS], ///< [in] The lower sample value bound (inclusive) for the lowest histogram bin in each active channel. LevelT upper_level[NUM_ACTIVE_CHANNELS], ///< [in] The upper sample value bound (exclusive) for the highest histogram bin in each active channel. OffsetT num_pixels, ///< [in] The number of multi-channel pixels (i.e., the length of \p d_samples / NUM_CHANNELS) cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. May cause significant slowdown. Default is \p false. { /// The sample value type of the input iterator typedef typename std::iterator_traits<SampleIteratorT>::value_type SampleT; return MultiHistogramEven<NUM_CHANNELS, NUM_ACTIVE_CHANNELS>( d_temp_storage, temp_storage_bytes, d_samples, d_histogram, num_levels, lower_level, upper_level, num_pixels, 1, sizeof(SampleT) * NUM_CHANNELS * num_pixels, stream, debug_synchronous); } /** * \brief Computes per-channel intensity histograms from a sequence of multi-channel "pixel" data samples using equal-width bins. * * \par * - The input is a sequence of <em>pixel</em> structures, where each pixel comprises * a record of \p NUM_CHANNELS consecutive data samples (e.g., an <em>RGBA</em> pixel). * - Of the \p NUM_CHANNELS specified, the function will only compute histograms * for the first \p NUM_ACTIVE_CHANNELS (e.g., only <em>RGB</em> histograms from <em>RGBA</em> * pixel samples). * - A two-dimensional <em>region of interest</em> within \p d_samples can be specified * using the \p num_row_samples, num_rows, and \p row_stride_bytes parameters. * - The row stride must be a whole multiple of the sample data type * size, i.e., <tt>(row_stride_bytes % sizeof(SampleT)) == 0</tt>. * - The number of histogram bins for channel<sub><em>i</em></sub> is <tt>num_levels[i]</tt> - 1. * - For channel<sub><em>i</em></sub>, the range of values for all histogram bins * have the same width: (<tt>upper_level[i]</tt> - <tt>lower_level[i]</tt>) / (<tt> num_levels[i]</tt> - 1) * - \devicestorage * * \par Snippet * The code snippet below illustrates the computation of three 256-bin <em>RGB</em> histograms from a 2x3 region of * interest of within a flattened 2x4 array of quad-channel <em>RGBA</em> pixels (8 bits per channel per pixel). * * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/device/device_histogram.cuh> * * // Declare, allocate, and initialize device-accessible pointers for input samples * // and output histograms * int num_row_pixels; // e.g., 3 * int num_rows; // e.g., 2 * size_t row_stride_bytes; // e.g., 4 * sizeof(unsigned char) * NUM_CHANNELS * unsigned char* d_samples; // e.g., [(2, 6, 7, 5), (3, 0, 2, 1), (7, 0, 6, 2), (-, -, -, -), * // (0, 6, 7, 5), (3, 0, 2, 6), (1, 1, 1, 1), (-, -, -, -)] * int* d_histogram[3]; // e.g., three device pointers to three device buffers, * // each allocated with 256 integer counters * int num_levels[3]; // e.g., {257, 257, 257}; * unsigned int lower_level[3]; // e.g., {0, 0, 0}; * unsigned int upper_level[3]; // e.g., {256, 256, 256}; * ... * * // Determine temporary device storage requirements * void* d_temp_storage = NULL; * size_t temp_storage_bytes = 0; * cub::DeviceHistogram::MultiHistogramEven<4, 3>(d_temp_storage, temp_storage_bytes, * d_samples, d_histogram, num_levels, lower_level, upper_level, * num_row_pixels, num_rows, row_stride_bytes); * * // Allocate temporary storage * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Compute histograms * cub::DeviceHistogram::MultiHistogramEven<4, 3>(d_temp_storage, temp_storage_bytes, * d_samples, d_histogram, num_levels, lower_level, upper_level, * num_row_pixels, num_rows, row_stride_bytes); * * // d_histogram <-- [ [1, 1, 1, 2, 0, 0, 0, 1, 0, 0, 0, ..., 0], * // [0, 4, 0, 0, 0, 0, 2, 0, 0, 0, 0, ..., 0], * // [0, 1, 2, 0, 0, 0, 1, 2, 0, 0, 0, ..., 0] ] * * \endcode * * \tparam NUM_CHANNELS Number of channels interleaved in the input data (may be greater than the number of channels being actively histogrammed) * \tparam NUM_ACTIVE_CHANNELS <b>[inferred]</b> Number of channels actively being histogrammed * \tparam SampleIteratorT <b>[inferred]</b> Random-access input iterator type for reading input samples. \iterator * \tparam CounterT <b>[inferred]</b> Integer type for histogram bin counters * \tparam LevelT <b>[inferred]</b> Type for specifying boundaries (levels) * \tparam OffsetT <b>[inferred]</b> Signed integer type for sequence offsets, list lengths, pointer differences, etc. \offset_size1 */ template < int NUM_CHANNELS, int NUM_ACTIVE_CHANNELS, typename SampleIteratorT, typename CounterT, typename LevelT, typename OffsetT> CUB_RUNTIME_FUNCTION static cudaError_t MultiHistogramEven( void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t& temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation SampleIteratorT d_samples, ///< [in] The pointer to the multi-channel input sequence of data samples. The samples from different channels are assumed to be interleaved (e.g., an array of 32-bit pixels where each pixel consists of four <em>RGBA</em> 8-bit samples). CounterT* d_histogram[NUM_ACTIVE_CHANNELS], ///< [out] The pointers to the histogram counter output arrays, one for each active channel. For channel<sub><em>i</em></sub>, the allocation length of <tt>d_histogram[i]</tt> should be <tt>num_levels[i]</tt> - 1. int num_levels[NUM_ACTIVE_CHANNELS], ///< [in] The number of boundaries (levels) for delineating histogram samples in each active channel. Implies that the number of bins for channel<sub><em>i</em></sub> is <tt>num_levels[i]</tt> - 1. LevelT lower_level[NUM_ACTIVE_CHANNELS], ///< [in] The lower sample value bound (inclusive) for the lowest histogram bin in each active channel. LevelT upper_level[NUM_ACTIVE_CHANNELS], ///< [in] The upper sample value bound (exclusive) for the highest histogram bin in each active channel. OffsetT num_row_pixels, ///< [in] The number of multi-channel pixels per row in the region of interest OffsetT num_rows, ///< [in] The number of rows in the region of interest size_t row_stride_bytes, ///< [in] The number of bytes between starts of consecutive rows in the region of interest cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. May cause significant slowdown. Default is \p false. { /// The sample value type of the input iterator typedef typename std::iterator_traits<SampleIteratorT>::value_type SampleT; Int2Type<sizeof(SampleT) == 1> is_byte_sample; if ((sizeof(OffsetT) > sizeof(int)) && ((unsigned long long) (num_rows * row_stride_bytes) < (unsigned long long) std::numeric_limits<int>::max())) { // Down-convert OffsetT data type return DipatchHistogram<NUM_CHANNELS, NUM_ACTIVE_CHANNELS, SampleIteratorT, CounterT, LevelT, int>::DispatchEven( d_temp_storage, temp_storage_bytes, d_samples, d_histogram, num_levels, lower_level, upper_level, (int) num_row_pixels, (int) num_rows, (int) (row_stride_bytes / sizeof(SampleT)), stream, debug_synchronous, is_byte_sample); } return DipatchHistogram<NUM_CHANNELS, NUM_ACTIVE_CHANNELS, SampleIteratorT, CounterT, LevelT, OffsetT>::DispatchEven( d_temp_storage, temp_storage_bytes, d_samples, d_histogram, num_levels, lower_level, upper_level, num_row_pixels, num_rows, (OffsetT) (row_stride_bytes / sizeof(SampleT)), stream, debug_synchronous, is_byte_sample); } //@} end member group /******************************************************************//** * \name Custom bin ranges *********************************************************************/ //@{ /** * \brief Computes an intensity histogram from a sequence of data samples using the specified bin boundary levels. * * \par * - The number of histogram bins is (\p num_levels - 1) * - The value range for bin<sub><em>i</em></sub> is [<tt>level[i]</tt>, <tt>level[i+1]</tt>) * - \devicestorage * * \par Snippet * The code snippet below illustrates the computation of an six-bin histogram * from a sequence of float samples * * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/device/device_histogram.cuh> * * // Declare, allocate, and initialize device-accessible pointers for input samples and * // output histogram * int num_samples; // e.g., 10 * float* d_samples; // e.g., [2.2, 6.0, 7.1, 2.9, 3.5, 0.3, 2.9, 2.0, 6.1, 999.5] * int* d_histogram; // e.g., [ -, -, -, -, -, -, -, -] * int num_levels // e.g., 7 (seven level boundaries for six bins) * float* d_levels; // e.g., [0.0, 2.0, 4.0, 6.0, 8.0, 12.0, 16.0] * ... * * // Determine temporary device storage requirements * void* d_temp_storage = NULL; * size_t temp_storage_bytes = 0; * cub::DeviceHistogram::HistogramRange(d_temp_storage, temp_storage_bytes, * d_samples, d_histogram, num_levels, d_levels, num_samples); * * // Allocate temporary storage * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Compute histograms * cub::DeviceHistogram::HistogramRange(d_temp_storage, temp_storage_bytes, * d_samples, d_histogram, num_levels, d_levels, num_samples); * * // d_histogram <-- [1, 0, 5, 0, 3, 0, 0, 0]; * * \endcode * * \tparam SampleIteratorT <b>[inferred]</b> Random-access input iterator type for reading input samples. \iterator * \tparam CounterT <b>[inferred]</b> Integer type for histogram bin counters * \tparam LevelT <b>[inferred]</b> Type for specifying boundaries (levels) * \tparam OffsetT <b>[inferred]</b> Signed integer type for sequence offsets, list lengths, pointer differences, etc. \offset_size1 */ template < typename SampleIteratorT, typename CounterT, typename LevelT, typename OffsetT> CUB_RUNTIME_FUNCTION static cudaError_t HistogramRange( void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t& temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation SampleIteratorT d_samples, ///< [in] The pointer to the input sequence of data samples. CounterT* d_histogram, ///< [out] The pointer to the histogram counter output array of length <tt>num_levels</tt> - 1. int num_levels, ///< [in] The number of boundaries (levels) for delineating histogram samples. Implies that the number of bins is <tt>num_levels</tt> - 1. LevelT* d_levels, ///< [in] The pointer to the array of boundaries (levels). Bin ranges are defined by consecutive boundary pairings: lower sample value boundaries are inclusive and upper sample value boundaries are exclusive. OffsetT num_samples, ///< [in] The number of data samples per row in the region of interest cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. May cause significant slowdown. Default is \p false. { /// The sample value type of the input iterator typedef typename std::iterator_traits<SampleIteratorT>::value_type SampleT; CounterT* d_histogram1[1] = {d_histogram}; int num_levels1[1] = {num_levels}; LevelT* d_levels1[1] = {d_levels}; return MultiHistogramRange<1, 1>( d_temp_storage, temp_storage_bytes, d_samples, d_histogram1, num_levels1, d_levels1, num_samples, 1, sizeof(SampleT) * num_samples, stream, debug_synchronous); } /** * \brief Computes an intensity histogram from a sequence of data samples using the specified bin boundary levels. * * \par * - A two-dimensional <em>region of interest</em> within \p d_samples can be specified * using the \p num_row_samples, num_rows, and \p row_stride_bytes parameters. * - The row stride must be a whole multiple of the sample data type * size, i.e., <tt>(row_stride_bytes % sizeof(SampleT)) == 0</tt>. * - The number of histogram bins is (\p num_levels - 1) * - The value range for bin<sub><em>i</em></sub> is [<tt>level[i]</tt>, <tt>level[i+1]</tt>) * - \devicestorage * * \par Snippet * The code snippet below illustrates the computation of a six-bin histogram * from a 2x5 region of interest within a flattened 2x7 array of float samples. * * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/device/device_histogram.cuh> * * // Declare, allocate, and initialize device-accessible pointers for input samples and * // output histogram * int num_row_samples; // e.g., 5 * int num_rows; // e.g., 2; * int row_stride_bytes; // e.g., 7 * sizeof(float) * float* d_samples; // e.g., [2.2, 6.0, 7.1, 2.9, 3.5, -, -, * // 0.3, 2.9, 2.0, 6.1, 999.5, -, -] * int* d_histogram; // e.g., [ , , , , , , , ] * int num_levels // e.g., 7 (seven level boundaries for six bins) * float *d_levels; // e.g., [0.0, 2.0, 4.0, 6.0, 8.0, 12.0, 16.0] * ... * * // Determine temporary device storage requirements * void* d_temp_storage = NULL; * size_t temp_storage_bytes = 0; * cub::DeviceHistogram::HistogramRange(d_temp_storage, temp_storage_bytes, * d_samples, d_histogram, num_levels, d_levels, * num_row_samples, num_rows, row_stride_bytes); * * // Allocate temporary storage * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Compute histograms * cub::DeviceHistogram::HistogramRange(d_temp_storage, temp_storage_bytes, * d_samples, d_histogram, num_levels, d_levels, * num_row_samples, num_rows, row_stride_bytes); * * // d_histogram <-- [1, 0, 5, 0, 3, 0, 0, 0]; * * \endcode * * \tparam SampleIteratorT <b>[inferred]</b> Random-access input iterator type for reading input samples. \iterator * \tparam CounterT <b>[inferred]</b> Integer type for histogram bin counters * \tparam LevelT <b>[inferred]</b> Type for specifying boundaries (levels) * \tparam OffsetT <b>[inferred]</b> Signed integer type for sequence offsets, list lengths, pointer differences, etc. \offset_size1 */ template < typename SampleIteratorT, typename CounterT, typename LevelT, typename OffsetT> CUB_RUNTIME_FUNCTION static cudaError_t HistogramRange( void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t& temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation SampleIteratorT d_samples, ///< [in] The pointer to the input sequence of data samples. CounterT* d_histogram, ///< [out] The pointer to the histogram counter output array of length <tt>num_levels</tt> - 1. int num_levels, ///< [in] The number of boundaries (levels) for delineating histogram samples. Implies that the number of bins is <tt>num_levels</tt> - 1. LevelT* d_levels, ///< [in] The pointer to the array of boundaries (levels). Bin ranges are defined by consecutive boundary pairings: lower sample value boundaries are inclusive and upper sample value boundaries are exclusive. OffsetT num_row_samples, ///< [in] The number of data samples per row in the region of interest OffsetT num_rows, ///< [in] The number of rows in the region of interest size_t row_stride_bytes, ///< [in] The number of bytes between starts of consecutive rows in the region of interest cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. May cause significant slowdown. Default is \p false. { CounterT* d_histogram1[1] = {d_histogram}; int num_levels1[1] = {num_levels}; LevelT* d_levels1[1] = {d_levels}; return MultiHistogramRange<1, 1>( d_temp_storage, temp_storage_bytes, d_samples, d_histogram1, num_levels1, d_levels1, num_row_samples, num_rows, row_stride_bytes, stream, debug_synchronous); } /** * \brief Computes per-channel intensity histograms from a sequence of multi-channel "pixel" data samples using the specified bin boundary levels. * * \par * - The input is a sequence of <em>pixel</em> structures, where each pixel comprises * a record of \p NUM_CHANNELS consecutive data samples (e.g., an <em>RGBA</em> pixel). * - Of the \p NUM_CHANNELS specified, the function will only compute histograms * for the first \p NUM_ACTIVE_CHANNELS (e.g., <em>RGB</em> histograms from <em>RGBA</em> * pixel samples). * - The number of histogram bins for channel<sub><em>i</em></sub> is <tt>num_levels[i]</tt> - 1. * - For channel<sub><em>i</em></sub>, the range of values for all histogram bins * have the same width: (<tt>upper_level[i]</tt> - <tt>lower_level[i]</tt>) / (<tt> num_levels[i]</tt> - 1) * - \devicestorage * * \par Snippet * The code snippet below illustrates the computation of three 4-bin <em>RGB</em> histograms * from a quad-channel sequence of <em>RGBA</em> pixels (8 bits per channel per pixel) * * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/device/device_histogram.cuh> * * // Declare, allocate, and initialize device-accessible pointers for input samples * // and output histograms * int num_pixels; // e.g., 5 * unsigned char *d_samples; // e.g., [(2, 6, 7, 5),(3, 0, 2, 1),(7, 0, 6, 2), * // (0, 6, 7, 5),(3, 0, 2, 6)] * unsigned int *d_histogram[3]; // e.g., [[ -, -, -, -],[ -, -, -, -],[ -, -, -, -]]; * int num_levels[3]; // e.g., {5, 5, 5}; * unsigned int *d_levels[3]; // e.g., [ [0, 2, 4, 6, 8], * // [0, 2, 4, 6, 8], * // [0, 2, 4, 6, 8] ]; * ... * * // Determine temporary device storage requirements * void* d_temp_storage = NULL; * size_t temp_storage_bytes = 0; * cub::DeviceHistogram::MultiHistogramRange<4, 3>(d_temp_storage, temp_storage_bytes, * d_samples, d_histogram, num_levels, d_levels, num_pixels); * * // Allocate temporary storage * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Compute histograms * cub::DeviceHistogram::MultiHistogramRange<4, 3>(d_temp_storage, temp_storage_bytes, * d_samples, d_histogram, num_levels, d_levels, num_pixels); * * // d_histogram <-- [ [1, 3, 0, 1], * // [3, 0, 0, 2], * // [0, 2, 0, 3] ] * * \endcode * * \tparam NUM_CHANNELS Number of channels interleaved in the input data (may be greater than the number of channels being actively histogrammed) * \tparam NUM_ACTIVE_CHANNELS <b>[inferred]</b> Number of channels actively being histogrammed * \tparam SampleIteratorT <b>[inferred]</b> Random-access input iterator type for reading input samples. \iterator * \tparam CounterT <b>[inferred]</b> Integer type for histogram bin counters * \tparam LevelT <b>[inferred]</b> Type for specifying boundaries (levels) * \tparam OffsetT <b>[inferred]</b> Signed integer type for sequence offsets, list lengths, pointer differences, etc. \offset_size1 */ template < int NUM_CHANNELS, int NUM_ACTIVE_CHANNELS, typename SampleIteratorT, typename CounterT, typename LevelT, typename OffsetT> CUB_RUNTIME_FUNCTION static cudaError_t MultiHistogramRange( void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t& temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation SampleIteratorT d_samples, ///< [in] The pointer to the multi-channel input sequence of data samples. The samples from different channels are assumed to be interleaved (e.g., an array of 32-bit pixels where each pixel consists of four <em>RGBA</em> 8-bit samples). CounterT* d_histogram[NUM_ACTIVE_CHANNELS], ///< [out] The pointers to the histogram counter output arrays, one for each active channel. For channel<sub><em>i</em></sub>, the allocation length of <tt>d_histogram[i]</tt> should be <tt>num_levels[i]</tt> - 1. int num_levels[NUM_ACTIVE_CHANNELS], ///< [in] The number of boundaries (levels) for delineating histogram samples in each active channel. Implies that the number of bins for channel<sub><em>i</em></sub> is <tt>num_levels[i]</tt> - 1. LevelT* d_levels[NUM_ACTIVE_CHANNELS], ///< [in] The pointers to the arrays of boundaries (levels), one for each active channel. Bin ranges are defined by consecutive boundary pairings: lower sample value boundaries are inclusive and upper sample value boundaries are exclusive. OffsetT num_pixels, ///< [in] The number of multi-channel pixels (i.e., the length of \p d_samples / NUM_CHANNELS) cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. May cause significant slowdown. Default is \p false. { /// The sample value type of the input iterator typedef typename std::iterator_traits<SampleIteratorT>::value_type SampleT; return MultiHistogramRange<NUM_CHANNELS, NUM_ACTIVE_CHANNELS>( d_temp_storage, temp_storage_bytes, d_samples, d_histogram, num_levels, d_levels, num_pixels, 1, sizeof(SampleT) * NUM_CHANNELS * num_pixels, stream, debug_synchronous); } /** * \brief Computes per-channel intensity histograms from a sequence of multi-channel "pixel" data samples using the specified bin boundary levels. * * \par * - The input is a sequence of <em>pixel</em> structures, where each pixel comprises * a record of \p NUM_CHANNELS consecutive data samples (e.g., an <em>RGBA</em> pixel). * - Of the \p NUM_CHANNELS specified, the function will only compute histograms * for the first \p NUM_ACTIVE_CHANNELS (e.g., <em>RGB</em> histograms from <em>RGBA</em> * pixel samples). * - A two-dimensional <em>region of interest</em> within \p d_samples can be specified * using the \p num_row_samples, num_rows, and \p row_stride_bytes parameters. * - The row stride must be a whole multiple of the sample data type * size, i.e., <tt>(row_stride_bytes % sizeof(SampleT)) == 0</tt>. * - The number of histogram bins for channel<sub><em>i</em></sub> is <tt>num_levels[i]</tt> - 1. * - For channel<sub><em>i</em></sub>, the range of values for all histogram bins * have the same width: (<tt>upper_level[i]</tt> - <tt>lower_level[i]</tt>) / (<tt> num_levels[i]</tt> - 1) * - \devicestorage * * \par Snippet * The code snippet below illustrates the computation of three 4-bin <em>RGB</em> histograms from a 2x3 region of * interest of within a flattened 2x4 array of quad-channel <em>RGBA</em> pixels (8 bits per channel per pixel). * * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/device/device_histogram.cuh> * * // Declare, allocate, and initialize device-accessible pointers for input samples * // and output histograms * int num_row_pixels; // e.g., 3 * int num_rows; // e.g., 2 * size_t row_stride_bytes; // e.g., 4 * sizeof(unsigned char) * NUM_CHANNELS * unsigned char* d_samples; // e.g., [(2, 6, 7, 5),(3, 0, 2, 1),(1, 1, 1, 1),(-, -, -, -), * // (7, 0, 6, 2),(0, 6, 7, 5),(3, 0, 2, 6),(-, -, -, -)] * int* d_histogram[3]; // e.g., [[ -, -, -, -],[ -, -, -, -],[ -, -, -, -]]; * int num_levels[3]; // e.g., {5, 5, 5}; * unsigned int* d_levels[3]; // e.g., [ [0, 2, 4, 6, 8], * // [0, 2, 4, 6, 8], * // [0, 2, 4, 6, 8] ]; * ... * * // Determine temporary device storage requirements * void* d_temp_storage = NULL; * size_t temp_storage_bytes = 0; * cub::DeviceHistogram::MultiHistogramRange<4, 3>(d_temp_storage, temp_storage_bytes, * d_samples, d_histogram, num_levels, d_levels, num_row_pixels, num_rows, row_stride_bytes); * * // Allocate temporary storage * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Compute histograms * cub::DeviceHistogram::MultiHistogramRange<4, 3>(d_temp_storage, temp_storage_bytes, * d_samples, d_histogram, num_levels, d_levels, num_row_pixels, num_rows, row_stride_bytes); * * // d_histogram <-- [ [2, 3, 0, 1], * // [3, 0, 0, 2], * // [1, 2, 0, 3] ] * * \endcode * * \tparam NUM_CHANNELS Number of channels interleaved in the input data (may be greater than the number of channels being actively histogrammed) * \tparam NUM_ACTIVE_CHANNELS <b>[inferred]</b> Number of channels actively being histogrammed * \tparam SampleIteratorT <b>[inferred]</b> Random-access input iterator type for reading input samples. \iterator * \tparam CounterT <b>[inferred]</b> Integer type for histogram bin counters * \tparam LevelT <b>[inferred]</b> Type for specifying boundaries (levels) * \tparam OffsetT <b>[inferred]</b> Signed integer type for sequence offsets, list lengths, pointer differences, etc. \offset_size1 */ template < int NUM_CHANNELS, int NUM_ACTIVE_CHANNELS, typename SampleIteratorT, typename CounterT, typename LevelT, typename OffsetT> CUB_RUNTIME_FUNCTION static cudaError_t MultiHistogramRange( void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t& temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation SampleIteratorT d_samples, ///< [in] The pointer to the multi-channel input sequence of data samples. The samples from different channels are assumed to be interleaved (e.g., an array of 32-bit pixels where each pixel consists of four <em>RGBA</em> 8-bit samples). CounterT* d_histogram[NUM_ACTIVE_CHANNELS], ///< [out] The pointers to the histogram counter output arrays, one for each active channel. For channel<sub><em>i</em></sub>, the allocation length of <tt>d_histogram[i]</tt> should be <tt>num_levels[i]</tt> - 1. int num_levels[NUM_ACTIVE_CHANNELS], ///< [in] The number of boundaries (levels) for delineating histogram samples in each active channel. Implies that the number of bins for channel<sub><em>i</em></sub> is <tt>num_levels[i]</tt> - 1. LevelT* d_levels[NUM_ACTIVE_CHANNELS], ///< [in] The pointers to the arrays of boundaries (levels), one for each active channel. Bin ranges are defined by consecutive boundary pairings: lower sample value boundaries are inclusive and upper sample value boundaries are exclusive. OffsetT num_row_pixels, ///< [in] The number of multi-channel pixels per row in the region of interest OffsetT num_rows, ///< [in] The number of rows in the region of interest size_t row_stride_bytes, ///< [in] The number of bytes between starts of consecutive rows in the region of interest cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. May cause significant slowdown. Default is \p false. { /// The sample value type of the input iterator typedef typename std::iterator_traits<SampleIteratorT>::value_type SampleT; Int2Type<sizeof(SampleT) == 1> is_byte_sample; if ((sizeof(OffsetT) > sizeof(int)) && ((unsigned long long) (num_rows * row_stride_bytes) < (unsigned long long) std::numeric_limits<int>::max())) { // Down-convert OffsetT data type return DipatchHistogram<NUM_CHANNELS, NUM_ACTIVE_CHANNELS, SampleIteratorT, CounterT, LevelT, int>::DispatchRange( d_temp_storage, temp_storage_bytes, d_samples, d_histogram, num_levels, d_levels, (int) num_row_pixels, (int) num_rows, (int) (row_stride_bytes / sizeof(SampleT)), stream, debug_synchronous, is_byte_sample); } return DipatchHistogram<NUM_CHANNELS, NUM_ACTIVE_CHANNELS, SampleIteratorT, CounterT, LevelT, OffsetT>::DispatchRange( d_temp_storage, temp_storage_bytes, d_samples, d_histogram, num_levels, d_levels, num_row_pixels, num_rows, (OffsetT) (row_stride_bytes / sizeof(SampleT)), stream, debug_synchronous, is_byte_sample); } //@} end member group }; } // CUB namespace CUB_NS_POSTFIX // Optional outer namespace(s)
0
rapidsai_public_repos/nvgraph/external/cub_semiring
rapidsai_public_repos/nvgraph/external/cub_semiring/device/device_select.cuh
/****************************************************************************** * Copyright (c) 2011, Duane Merrill. All rights reserved. * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ /** * \file * cub::DeviceSelect provides device-wide, parallel operations for compacting selected items from sequences of data items residing within device-accessible memory. */ #pragma once #include <stdio.h> #include <iterator> #include "dispatch/dispatch_select_if.cuh" #include "../util_namespace.cuh" /// Optional outer namespace(s) CUB_NS_PREFIX /// CUB namespace namespace cub { /** * \brief DeviceSelect provides device-wide, parallel operations for compacting selected items from sequences of data items residing within device-accessible memory. ![](select_logo.png) * \ingroup SingleModule * * \par Overview * These operations apply a selection criterion to selectively copy * items from a specified input sequence to a compact output sequence. * * \par Usage Considerations * \cdp_class{DeviceSelect} * * \par Performance * \linear_performance{select-flagged, select-if, and select-unique} * * \par * The following chart illustrates DeviceSelect::If * performance across different CUDA architectures for \p int32 items, * where 50% of the items are randomly selected. * * \image html select_if_int32_50_percent.png * * \par * The following chart illustrates DeviceSelect::Unique * performance across different CUDA architectures for \p int32 items * where segments have lengths uniformly sampled from [1,1000]. * * \image html select_unique_int32_len_500.png * * \par * \plots_below * */ struct DeviceSelect { /** * \brief Uses the \p d_flags sequence to selectively copy the corresponding items from \p d_in into \p d_out. The total number of items selected is written to \p d_num_selected_out. ![](select_flags_logo.png) * * \par * - The value type of \p d_flags must be castable to \p bool (e.g., \p bool, \p char, \p int, etc.). * - Copies of the selected items are compacted into \p d_out and maintain their original relative ordering. * - \devicestorage * * \par Snippet * The code snippet below illustrates the compaction of items selected from an \p int device vector. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/device/device_select.cuh> * * // Declare, allocate, and initialize device-accessible pointers for input, flags, and output * int num_items; // e.g., 8 * int *d_in; // e.g., [1, 2, 3, 4, 5, 6, 7, 8] * char *d_flags; // e.g., [1, 0, 0, 1, 0, 1, 1, 0] * int *d_out; // e.g., [ , , , , , , , ] * int *d_num_selected_out; // e.g., [ ] * ... * * // Determine temporary device storage requirements * void *d_temp_storage = NULL; * size_t temp_storage_bytes = 0; * cub::DeviceSelect::Flagged(d_temp_storage, temp_storage_bytes, d_in, d_flags, d_out, d_num_selected_out, num_items); * * // Allocate temporary storage * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Run selection * cub::DeviceSelect::Flagged(d_temp_storage, temp_storage_bytes, d_in, d_flags, d_out, d_num_selected_out, num_items); * * // d_out <-- [1, 4, 6, 7] * // d_num_selected_out <-- [4] * * \endcode * * \tparam InputIteratorT <b>[inferred]</b> Random-access input iterator type for reading input items \iterator * \tparam FlagIterator <b>[inferred]</b> Random-access input iterator type for reading selection flags \iterator * \tparam OutputIteratorT <b>[inferred]</b> Random-access output iterator type for writing selected items \iterator * \tparam NumSelectedIteratorT <b>[inferred]</b> Output iterator type for recording the number of items selected \iterator */ template < typename InputIteratorT, typename FlagIterator, typename OutputIteratorT, typename NumSelectedIteratorT> CUB_RUNTIME_FUNCTION __forceinline__ static cudaError_t Flagged( void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation InputIteratorT d_in, ///< [in] Pointer to the input sequence of data items FlagIterator d_flags, ///< [in] Pointer to the input sequence of selection flags OutputIteratorT d_out, ///< [out] Pointer to the output sequence of selected data items NumSelectedIteratorT d_num_selected_out, ///< [out] Pointer to the output total number of items selected (i.e., length of \p d_out) int num_items, ///< [in] Total number of input items (i.e., length of \p d_in) cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. May cause significant slowdown. Default is \p false. { typedef int OffsetT; // Signed integer type for global offsets typedef NullType SelectOp; // Selection op (not used) typedef NullType EqualityOp; // Equality operator (not used) return DispatchSelectIf<InputIteratorT, FlagIterator, OutputIteratorT, NumSelectedIteratorT, SelectOp, EqualityOp, OffsetT, false>::Dispatch( d_temp_storage, temp_storage_bytes, d_in, d_flags, d_out, d_num_selected_out, SelectOp(), EqualityOp(), num_items, stream, debug_synchronous); } /** * \brief Uses the \p select_op functor to selectively copy items from \p d_in into \p d_out. The total number of items selected is written to \p d_num_selected_out. ![](select_logo.png) * * \par * - Copies of the selected items are compacted into \p d_out and maintain their original relative ordering. * - \devicestorage * * \par Performance * The following charts illustrate saturated select-if performance across different * CUDA architectures for \p int32 and \p int64 items, respectively. Items are * selected with 50% probability. * * \image html select_if_int32_50_percent.png * \image html select_if_int64_50_percent.png * * \par * The following charts are similar, but 5% selection probability: * * \image html select_if_int32_5_percent.png * \image html select_if_int64_5_percent.png * * \par Snippet * The code snippet below illustrates the compaction of items selected from an \p int device vector. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/device/device_select.cuh> * * // Functor type for selecting values less than some criteria * struct LessThan * { * int compare; * * CUB_RUNTIME_FUNCTION __forceinline__ * LessThan(int compare) : compare(compare) {} * * CUB_RUNTIME_FUNCTION __forceinline__ * bool operator()(const int &a) const { * return (a < compare); * } * }; * * // Declare, allocate, and initialize device-accessible pointers for input and output * int num_items; // e.g., 8 * int *d_in; // e.g., [0, 2, 3, 9, 5, 2, 81, 8] * int *d_out; // e.g., [ , , , , , , , ] * int *d_num_selected_out; // e.g., [ ] * LessThan select_op(7); * ... * * // Determine temporary device storage requirements * void *d_temp_storage = NULL; * size_t temp_storage_bytes = 0; * cub::DeviceSelect::If(d_temp_storage, temp_storage_bytes, d_in, d_out, d_num_selected_out, num_items, select_op); * * // Allocate temporary storage * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Run selection * cub::DeviceSelect::If(d_temp_storage, temp_storage_bytes, d_in, d_out, d_num_selected_out, num_items, select_op); * * // d_out <-- [0, 2, 3, 5, 2] * // d_num_selected_out <-- [5] * * \endcode * * \tparam InputIteratorT <b>[inferred]</b> Random-access input iterator type for reading input items \iterator * \tparam OutputIteratorT <b>[inferred]</b> Random-access output iterator type for writing selected items \iterator * \tparam NumSelectedIteratorT <b>[inferred]</b> Output iterator type for recording the number of items selected \iterator * \tparam SelectOp <b>[inferred]</b> Selection operator type having member <tt>bool operator()(const T &a)</tt> */ template < typename InputIteratorT, typename OutputIteratorT, typename NumSelectedIteratorT, typename SelectOp> CUB_RUNTIME_FUNCTION __forceinline__ static cudaError_t If( void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation InputIteratorT d_in, ///< [in] Pointer to the input sequence of data items OutputIteratorT d_out, ///< [out] Pointer to the output sequence of selected data items NumSelectedIteratorT d_num_selected_out, ///< [out] Pointer to the output total number of items selected (i.e., length of \p d_out) int num_items, ///< [in] Total number of input items (i.e., length of \p d_in) SelectOp select_op, ///< [in] Unary selection operator cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. May cause significant slowdown. Default is \p false. { typedef int OffsetT; // Signed integer type for global offsets typedef NullType* FlagIterator; // FlagT iterator type (not used) typedef NullType EqualityOp; // Equality operator (not used) return DispatchSelectIf<InputIteratorT, FlagIterator, OutputIteratorT, NumSelectedIteratorT, SelectOp, EqualityOp, OffsetT, false>::Dispatch( d_temp_storage, temp_storage_bytes, d_in, NULL, d_out, d_num_selected_out, select_op, EqualityOp(), num_items, stream, debug_synchronous); } /** * \brief Given an input sequence \p d_in having runs of consecutive equal-valued keys, only the first key from each run is selectively copied to \p d_out. The total number of items selected is written to \p d_num_selected_out. ![](unique_logo.png) * * \par * - The <tt>==</tt> equality operator is used to determine whether keys are equivalent * - Copies of the selected items are compacted into \p d_out and maintain their original relative ordering. * - \devicestorage * * \par Performance * The following charts illustrate saturated select-unique performance across different * CUDA architectures for \p int32 and \p int64 items, respectively. Segments have * lengths uniformly sampled from [1,1000]. * * \image html select_unique_int32_len_500.png * \image html select_unique_int64_len_500.png * * \par * The following charts are similar, but with segment lengths uniformly sampled from [1,10]: * * \image html select_unique_int32_len_5.png * \image html select_unique_int64_len_5.png * * \par Snippet * The code snippet below illustrates the compaction of items selected from an \p int device vector. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/device/device_select.cuh> * * // Declare, allocate, and initialize device-accessible pointers for input and output * int num_items; // e.g., 8 * int *d_in; // e.g., [0, 2, 2, 9, 5, 5, 5, 8] * int *d_out; // e.g., [ , , , , , , , ] * int *d_num_selected_out; // e.g., [ ] * ... * * // Determine temporary device storage requirements * void *d_temp_storage = NULL; * size_t temp_storage_bytes = 0; * cub::DeviceSelect::Unique(d_temp_storage, temp_storage_bytes, d_in, d_out, d_num_selected_out, num_items); * * // Allocate temporary storage * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Run selection * cub::DeviceSelect::Unique(d_temp_storage, temp_storage_bytes, d_in, d_out, d_num_selected_out, num_items); * * // d_out <-- [0, 2, 9, 5, 8] * // d_num_selected_out <-- [5] * * \endcode * * \tparam InputIteratorT <b>[inferred]</b> Random-access input iterator type for reading input items \iterator * \tparam OutputIteratorT <b>[inferred]</b> Random-access output iterator type for writing selected items \iterator * \tparam NumSelectedIteratorT <b>[inferred]</b> Output iterator type for recording the number of items selected \iterator */ template < typename InputIteratorT, typename OutputIteratorT, typename NumSelectedIteratorT> CUB_RUNTIME_FUNCTION __forceinline__ static cudaError_t Unique( void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation InputIteratorT d_in, ///< [in] Pointer to the input sequence of data items OutputIteratorT d_out, ///< [out] Pointer to the output sequence of selected data items NumSelectedIteratorT d_num_selected_out, ///< [out] Pointer to the output total number of items selected (i.e., length of \p d_out) int num_items, ///< [in] Total number of input items (i.e., length of \p d_in) cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. May cause significant slowdown. Default is \p false. { typedef int OffsetT; // Signed integer type for global offsets typedef NullType* FlagIterator; // FlagT iterator type (not used) typedef NullType SelectOp; // Selection op (not used) typedef Equality EqualityOp; // Default == operator return DispatchSelectIf<InputIteratorT, FlagIterator, OutputIteratorT, NumSelectedIteratorT, SelectOp, EqualityOp, OffsetT, false>::Dispatch( d_temp_storage, temp_storage_bytes, d_in, NULL, d_out, d_num_selected_out, SelectOp(), EqualityOp(), num_items, stream, debug_synchronous); } }; /** * \example example_device_select_flagged.cu * \example example_device_select_if.cu * \example example_device_select_unique.cu */ } // CUB namespace CUB_NS_POSTFIX // Optional outer namespace(s)
0
rapidsai_public_repos/nvgraph/external/cub_semiring
rapidsai_public_repos/nvgraph/external/cub_semiring/device/device_radix_sort.cuh
/****************************************************************************** * Copyright (c) 2011, Duane Merrill. All rights reserved. * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ /** * \file * cub::DeviceRadixSort provides device-wide, parallel operations for computing a radix sort across a sequence of data items residing within device-accessible memory. */ #pragma once #include <stdio.h> #include <iterator> #include "dispatch/dispatch_radix_sort.cuh" #include "../util_arch.cuh" #include "../util_namespace.cuh" /// Optional outer namespace(s) CUB_NS_PREFIX /// CUB namespace namespace cub { /** * \brief DeviceRadixSort provides device-wide, parallel operations for computing a radix sort across a sequence of data items residing within device-accessible memory. ![](sorting_logo.png) * \ingroup SingleModule * * \par Overview * The [<em>radix sorting method</em>](http://en.wikipedia.org/wiki/Radix_sort) arranges * items into ascending (or descending) order. The algorithm relies upon a positional representation for * keys, i.e., each key is comprised of an ordered sequence of symbols (e.g., digits, * characters, etc.) specified from least-significant to most-significant. For a * given input sequence of keys and a set of rules specifying a total ordering * of the symbolic alphabet, the radix sorting method produces a lexicographic * ordering of those keys. * * \par * DeviceRadixSort can sort all of the built-in C++ numeric primitive types, e.g.: * <tt>unsigned char</tt>, \p int, \p double, etc. Although the direct radix sorting * method can only be applied to unsigned integral types, DeviceRadixSort * is able to sort signed and floating-point types via simple bit-wise transformations * that ensure lexicographic key ordering. * * \par Usage Considerations * \cdp_class{DeviceRadixSort} * * \par Performance * \linear_performance{radix sort} The following chart illustrates DeviceRadixSort::SortKeys * performance across different CUDA architectures for uniform-random \p uint32 keys. * \plots_below * * \image html lsb_radix_sort_int32_keys.png * */ struct DeviceRadixSort { /******************************************************************//** * \name KeyT-value pairs *********************************************************************/ //@{ /** * \brief Sorts key-value pairs into ascending order. (~<em>2N </em>auxiliary storage required) * * \par * - The contents of the input data are not altered by the sorting operation * - An optional bit subrange <tt>[begin_bit, end_bit)</tt> of differentiating key bits can be specified. This can reduce overall sorting overhead and yield a corresponding performance improvement. * - \devicestorageNP For sorting using only <em>O</em>(<tt>P</tt>) temporary storage, see the sorting interface using DoubleBuffer wrappers below. * - \devicestorage * * \par Performance * The following charts illustrate saturated sorting performance across different * CUDA architectures for uniform-random <tt>uint32,uint32</tt> and * <tt>uint64,uint64</tt> pairs, respectively. * * \image html lsb_radix_sort_int32_pairs.png * \image html lsb_radix_sort_int64_pairs.png * * \par Snippet * The code snippet below illustrates the sorting of a device vector of \p int keys * with associated vector of \p int values. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/device/device_radix_sort.cuh> * * // Declare, allocate, and initialize device-accessible pointers for sorting data * int num_items; // e.g., 7 * int *d_keys_in; // e.g., [8, 6, 7, 5, 3, 0, 9] * int *d_keys_out; // e.g., [ ... ] * int *d_values_in; // e.g., [0, 1, 2, 3, 4, 5, 6] * int *d_values_out; // e.g., [ ... ] * ... * * // Determine temporary device storage requirements * void *d_temp_storage = NULL; * size_t temp_storage_bytes = 0; * cub::DeviceRadixSort::SortPairs(d_temp_storage, temp_storage_bytes, * d_keys_in, d_keys_out, d_values_in, d_values_out, num_items); * * // Allocate temporary storage * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Run sorting operation * cub::DeviceRadixSort::SortPairs(d_temp_storage, temp_storage_bytes, * d_keys_in, d_keys_out, d_values_in, d_values_out, num_items); * * // d_keys_out <-- [0, 3, 5, 6, 7, 8, 9] * // d_values_out <-- [5, 4, 3, 1, 2, 0, 6] * * \endcode * * \tparam KeyT <b>[inferred]</b> KeyT type * \tparam ValueT <b>[inferred]</b> ValueT type */ template < typename KeyT, typename ValueT> CUB_RUNTIME_FUNCTION static cudaError_t SortPairs( void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation const KeyT *d_keys_in, ///< [in] Pointer to the input data of key data to sort KeyT *d_keys_out, ///< [out] Pointer to the sorted output sequence of key data const ValueT *d_values_in, ///< [in] Pointer to the corresponding input sequence of associated value items ValueT *d_values_out, ///< [out] Pointer to the correspondingly-reordered output sequence of associated value items int num_items, ///< [in] Number of items to sort int begin_bit = 0, ///< [in] <b>[optional]</b> The least-significant bit index (inclusive) needed for key comparison int end_bit = sizeof(KeyT) * 8, ///< [in] <b>[optional]</b> The most-significant bit index (exclusive) needed for key comparison (e.g., sizeof(unsigned int) * 8) cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. { // Signed integer type for global offsets typedef int OffsetT; DoubleBuffer<KeyT> d_keys(const_cast<KeyT*>(d_keys_in), d_keys_out); DoubleBuffer<ValueT> d_values(const_cast<ValueT*>(d_values_in), d_values_out); return DispatchRadixSort<false, KeyT, ValueT, OffsetT>::Dispatch( d_temp_storage, temp_storage_bytes, d_keys, d_values, num_items, begin_bit, end_bit, false, stream, debug_synchronous); } /** * \brief Sorts key-value pairs into ascending order. (~<em>N </em>auxiliary storage required) * * \par * - The sorting operation is given a pair of key buffers and a corresponding * pair of associated value buffers. Each pair is managed by a DoubleBuffer * structure that indicates which of the two buffers is "current" (and thus * contains the input data to be sorted). * - The contents of both buffers within each pair may be altered by the sorting * operation. * - Upon completion, the sorting operation will update the "current" indicator * within each DoubleBuffer wrapper to reference which of the two buffers * now contains the sorted output sequence (a function of the number of key bits * specified and the targeted device architecture). * - An optional bit subrange <tt>[begin_bit, end_bit)</tt> of differentiating key bits can be specified. This can reduce overall sorting overhead and yield a corresponding performance improvement. * - \devicestorageP * - \devicestorage * * \par Performance * The following charts illustrate saturated sorting performance across different * CUDA architectures for uniform-random <tt>uint32,uint32</tt> and * <tt>uint64,uint64</tt> pairs, respectively. * * \image html lsb_radix_sort_int32_pairs.png * \image html lsb_radix_sort_int64_pairs.png * * \par Snippet * The code snippet below illustrates the sorting of a device vector of \p int keys * with associated vector of \p int values. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/device/device_radix_sort.cuh> * * // Declare, allocate, and initialize device-accessible pointers for sorting data * int num_items; // e.g., 7 * int *d_key_buf; // e.g., [8, 6, 7, 5, 3, 0, 9] * int *d_key_alt_buf; // e.g., [ ... ] * int *d_value_buf; // e.g., [0, 1, 2, 3, 4, 5, 6] * int *d_value_alt_buf; // e.g., [ ... ] * ... * * // Create a set of DoubleBuffers to wrap pairs of device pointers * cub::DoubleBuffer<int> d_keys(d_key_buf, d_key_alt_buf); * cub::DoubleBuffer<int> d_values(d_value_buf, d_value_alt_buf); * * // Determine temporary device storage requirements * void *d_temp_storage = NULL; * size_t temp_storage_bytes = 0; * cub::DeviceRadixSort::SortPairs(d_temp_storage, temp_storage_bytes, d_keys, d_values, num_items); * * // Allocate temporary storage * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Run sorting operation * cub::DeviceRadixSort::SortPairs(d_temp_storage, temp_storage_bytes, d_keys, d_values, num_items); * * // d_keys.Current() <-- [0, 3, 5, 6, 7, 8, 9] * // d_values.Current() <-- [5, 4, 3, 1, 2, 0, 6] * * \endcode * * \tparam KeyT <b>[inferred]</b> KeyT type * \tparam ValueT <b>[inferred]</b> ValueT type */ template < typename KeyT, typename ValueT> CUB_RUNTIME_FUNCTION static cudaError_t SortPairs( void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation DoubleBuffer<KeyT> &d_keys, ///< [in,out] Reference to the double-buffer of keys whose "current" device-accessible buffer contains the unsorted input keys and, upon return, is updated to point to the sorted output keys DoubleBuffer<ValueT> &d_values, ///< [in,out] Double-buffer of values whose "current" device-accessible buffer contains the unsorted input values and, upon return, is updated to point to the sorted output values int num_items, ///< [in] Number of items to sort int begin_bit = 0, ///< [in] <b>[optional]</b> The least-significant bit index (inclusive) needed for key comparison int end_bit = sizeof(KeyT) * 8, ///< [in] <b>[optional]</b> The most-significant bit index (exclusive) needed for key comparison (e.g., sizeof(unsigned int) * 8) cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. { // Signed integer type for global offsets typedef int OffsetT; return DispatchRadixSort<false, KeyT, ValueT, OffsetT>::Dispatch( d_temp_storage, temp_storage_bytes, d_keys, d_values, num_items, begin_bit, end_bit, true, stream, debug_synchronous); } /** * \brief Sorts key-value pairs into descending order. (~<em>2N</em> auxiliary storage required). * * \par * - The contents of the input data are not altered by the sorting operation * - An optional bit subrange <tt>[begin_bit, end_bit)</tt> of differentiating key bits can be specified. This can reduce overall sorting overhead and yield a corresponding performance improvement. * - \devicestorageNP For sorting using only <em>O</em>(<tt>P</tt>) temporary storage, see the sorting interface using DoubleBuffer wrappers below. * - \devicestorage * * \par Performance * Performance is similar to DeviceRadixSort::SortPairs. * * \par Snippet * The code snippet below illustrates the sorting of a device vector of \p int keys * with associated vector of \p int values. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/device/device_radix_sort.cuh> * * // Declare, allocate, and initialize device-accessible pointers for sorting data * int num_items; // e.g., 7 * int *d_keys_in; // e.g., [8, 6, 7, 5, 3, 0, 9] * int *d_keys_out; // e.g., [ ... ] * int *d_values_in; // e.g., [0, 1, 2, 3, 4, 5, 6] * int *d_values_out; // e.g., [ ... ] * ... * * // Determine temporary device storage requirements * void *d_temp_storage = NULL; * size_t temp_storage_bytes = 0; * cub::DeviceRadixSort::SortPairsDescending(d_temp_storage, temp_storage_bytes, * d_keys_in, d_keys_out, d_values_in, d_values_out, num_items); * * // Allocate temporary storage * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Run sorting operation * cub::DeviceRadixSort::SortPairsDescending(d_temp_storage, temp_storage_bytes, * d_keys_in, d_keys_out, d_values_in, d_values_out, num_items); * * // d_keys_out <-- [9, 8, 7, 6, 5, 3, 0] * // d_values_out <-- [6, 0, 2, 1, 3, 4, 5] * * \endcode * * \tparam KeyT <b>[inferred]</b> KeyT type * \tparam ValueT <b>[inferred]</b> ValueT type */ template < typename KeyT, typename ValueT> CUB_RUNTIME_FUNCTION static cudaError_t SortPairsDescending( void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation const KeyT *d_keys_in, ///< [in] Pointer to the input data of key data to sort KeyT *d_keys_out, ///< [out] Pointer to the sorted output sequence of key data const ValueT *d_values_in, ///< [in] Pointer to the corresponding input sequence of associated value items ValueT *d_values_out, ///< [out] Pointer to the correspondingly-reordered output sequence of associated value items int num_items, ///< [in] Number of items to sort int begin_bit = 0, ///< [in] <b>[optional]</b> The least-significant bit index (inclusive) needed for key comparison int end_bit = sizeof(KeyT) * 8, ///< [in] <b>[optional]</b> The most-significant bit index (exclusive) needed for key comparison (e.g., sizeof(unsigned int) * 8) cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. { // Signed integer type for global offsets typedef int OffsetT; DoubleBuffer<KeyT> d_keys(const_cast<KeyT*>(d_keys_in), d_keys_out); DoubleBuffer<ValueT> d_values(const_cast<ValueT*>(d_values_in), d_values_out); return DispatchRadixSort<true, KeyT, ValueT, OffsetT>::Dispatch( d_temp_storage, temp_storage_bytes, d_keys, d_values, num_items, begin_bit, end_bit, false, stream, debug_synchronous); } /** * \brief Sorts key-value pairs into descending order. (~<em>N </em>auxiliary storage required). * * \par * - The sorting operation is given a pair of key buffers and a corresponding * pair of associated value buffers. Each pair is managed by a DoubleBuffer * structure that indicates which of the two buffers is "current" (and thus * contains the input data to be sorted). * - The contents of both buffers within each pair may be altered by the sorting * operation. * - Upon completion, the sorting operation will update the "current" indicator * within each DoubleBuffer wrapper to reference which of the two buffers * now contains the sorted output sequence (a function of the number of key bits * specified and the targeted device architecture). * - An optional bit subrange <tt>[begin_bit, end_bit)</tt> of differentiating key bits can be specified. This can reduce overall sorting overhead and yield a corresponding performance improvement. * - \devicestorageP * - \devicestorage * * \par Performance * Performance is similar to DeviceRadixSort::SortPairs. * * \par Snippet * The code snippet below illustrates the sorting of a device vector of \p int keys * with associated vector of \p int values. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/device/device_radix_sort.cuh> * * // Declare, allocate, and initialize device-accessible pointers for sorting data * int num_items; // e.g., 7 * int *d_key_buf; // e.g., [8, 6, 7, 5, 3, 0, 9] * int *d_key_alt_buf; // e.g., [ ... ] * int *d_value_buf; // e.g., [0, 1, 2, 3, 4, 5, 6] * int *d_value_alt_buf; // e.g., [ ... ] * ... * * // Create a set of DoubleBuffers to wrap pairs of device pointers * cub::DoubleBuffer<int> d_keys(d_key_buf, d_key_alt_buf); * cub::DoubleBuffer<int> d_values(d_value_buf, d_value_alt_buf); * * // Determine temporary device storage requirements * void *d_temp_storage = NULL; * size_t temp_storage_bytes = 0; * cub::DeviceRadixSort::SortPairsDescending(d_temp_storage, temp_storage_bytes, d_keys, d_values, num_items); * * // Allocate temporary storage * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Run sorting operation * cub::DeviceRadixSort::SortPairsDescending(d_temp_storage, temp_storage_bytes, d_keys, d_values, num_items); * * // d_keys.Current() <-- [9, 8, 7, 6, 5, 3, 0] * // d_values.Current() <-- [6, 0, 2, 1, 3, 4, 5] * * \endcode * * \tparam KeyT <b>[inferred]</b> KeyT type * \tparam ValueT <b>[inferred]</b> ValueT type */ template < typename KeyT, typename ValueT> CUB_RUNTIME_FUNCTION static cudaError_t SortPairsDescending( void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation DoubleBuffer<KeyT> &d_keys, ///< [in,out] Reference to the double-buffer of keys whose "current" device-accessible buffer contains the unsorted input keys and, upon return, is updated to point to the sorted output keys DoubleBuffer<ValueT> &d_values, ///< [in,out] Double-buffer of values whose "current" device-accessible buffer contains the unsorted input values and, upon return, is updated to point to the sorted output values int num_items, ///< [in] Number of items to sort int begin_bit = 0, ///< [in] <b>[optional]</b> The least-significant bit index (inclusive) needed for key comparison int end_bit = sizeof(KeyT) * 8, ///< [in] <b>[optional]</b> The most-significant bit index (exclusive) needed for key comparison (e.g., sizeof(unsigned int) * 8) cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. { // Signed integer type for global offsets typedef int OffsetT; return DispatchRadixSort<true, KeyT, ValueT, OffsetT>::Dispatch( d_temp_storage, temp_storage_bytes, d_keys, d_values, num_items, begin_bit, end_bit, true, stream, debug_synchronous); } //@} end member group /******************************************************************//** * \name Keys-only *********************************************************************/ //@{ /** * \brief Sorts keys into ascending order. (~<em>2N </em>auxiliary storage required) * * \par * - The contents of the input data are not altered by the sorting operation * - An optional bit subrange <tt>[begin_bit, end_bit)</tt> of differentiating key bits can be specified. This can reduce overall sorting overhead and yield a corresponding performance improvement. * - \devicestorageNP For sorting using only <em>O</em>(<tt>P</tt>) temporary storage, see the sorting interface using DoubleBuffer wrappers below. * - \devicestorage * * \par Performance * The following charts illustrate saturated sorting performance across different * CUDA architectures for uniform-random \p uint32 and \p uint64 keys, respectively. * * \image html lsb_radix_sort_int32_keys.png * \image html lsb_radix_sort_int64_keys.png * * \par Snippet * The code snippet below illustrates the sorting of a device vector of \p int keys. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/device/device_radix_sort.cuh> * * // Declare, allocate, and initialize device-accessible pointers for sorting data * int num_items; // e.g., 7 * int *d_keys_in; // e.g., [8, 6, 7, 5, 3, 0, 9] * int *d_keys_out; // e.g., [ ... ] * ... * * // Determine temporary device storage requirements * void *d_temp_storage = NULL; * size_t temp_storage_bytes = 0; * cub::DeviceRadixSort::SortKeys(d_temp_storage, temp_storage_bytes, d_keys_in, d_keys_out, num_items); * * // Allocate temporary storage * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Run sorting operation * cub::DeviceRadixSort::SortKeys(d_temp_storage, temp_storage_bytes, d_keys_in, d_keys_out, num_items); * * // d_keys_out <-- [0, 3, 5, 6, 7, 8, 9] * * \endcode * * \tparam KeyT <b>[inferred]</b> KeyT type */ template <typename KeyT> CUB_RUNTIME_FUNCTION static cudaError_t SortKeys( void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation const KeyT *d_keys_in, ///< [in] Pointer to the input data of key data to sort KeyT *d_keys_out, ///< [out] Pointer to the sorted output sequence of key data int num_items, ///< [in] Number of items to sort int begin_bit = 0, ///< [in] <b>[optional]</b> The least-significant bit index (inclusive) needed for key comparison int end_bit = sizeof(KeyT) * 8, ///< [in] <b>[optional]</b> The most-significant bit index (exclusive) needed for key comparison (e.g., sizeof(unsigned int) * 8) cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. { // Signed integer type for global offsets typedef int OffsetT; // Null value type DoubleBuffer<KeyT> d_keys(const_cast<KeyT*>(d_keys_in), d_keys_out); DoubleBuffer<NullType> d_values; return DispatchRadixSort<false, KeyT, NullType, OffsetT>::Dispatch( d_temp_storage, temp_storage_bytes, d_keys, d_values, num_items, begin_bit, end_bit, false, stream, debug_synchronous); } /** * \brief Sorts keys into ascending order. (~<em>N </em>auxiliary storage required). * * \par * - The sorting operation is given a pair of key buffers managed by a * DoubleBuffer structure that indicates which of the two buffers is * "current" (and thus contains the input data to be sorted). * - The contents of both buffers may be altered by the sorting operation. * - Upon completion, the sorting operation will update the "current" indicator * within the DoubleBuffer wrapper to reference which of the two buffers * now contains the sorted output sequence (a function of the number of key bits * specified and the targeted device architecture). * - An optional bit subrange <tt>[begin_bit, end_bit)</tt> of differentiating key bits can be specified. This can reduce overall sorting overhead and yield a corresponding performance improvement. * - \devicestorageP * - \devicestorage * * \par Performance * The following charts illustrate saturated sorting performance across different * CUDA architectures for uniform-random \p uint32 and \p uint64 keys, respectively. * * \image html lsb_radix_sort_int32_keys.png * \image html lsb_radix_sort_int64_keys.png * * \par Snippet * The code snippet below illustrates the sorting of a device vector of \p int keys. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/device/device_radix_sort.cuh> * * // Declare, allocate, and initialize device-accessible pointers for sorting data * int num_items; // e.g., 7 * int *d_key_buf; // e.g., [8, 6, 7, 5, 3, 0, 9] * int *d_key_alt_buf; // e.g., [ ... ] * ... * * // Create a DoubleBuffer to wrap the pair of device pointers * cub::DoubleBuffer<int> d_keys(d_key_buf, d_key_alt_buf); * * // Determine temporary device storage requirements * void *d_temp_storage = NULL; * size_t temp_storage_bytes = 0; * cub::DeviceRadixSort::SortKeys(d_temp_storage, temp_storage_bytes, d_keys, num_items); * * // Allocate temporary storage * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Run sorting operation * cub::DeviceRadixSort::SortKeys(d_temp_storage, temp_storage_bytes, d_keys, num_items); * * // d_keys.Current() <-- [0, 3, 5, 6, 7, 8, 9] * * \endcode * * \tparam KeyT <b>[inferred]</b> KeyT type */ template <typename KeyT> CUB_RUNTIME_FUNCTION static cudaError_t SortKeys( void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation DoubleBuffer<KeyT> &d_keys, ///< [in,out] Reference to the double-buffer of keys whose "current" device-accessible buffer contains the unsorted input keys and, upon return, is updated to point to the sorted output keys int num_items, ///< [in] Number of items to sort int begin_bit = 0, ///< [in] <b>[optional]</b> The least-significant bit index (inclusive) needed for key comparison int end_bit = sizeof(KeyT) * 8, ///< [in] <b>[optional]</b> The most-significant bit index (exclusive) needed for key comparison (e.g., sizeof(unsigned int) * 8) cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. { // Signed integer type for global offsets typedef int OffsetT; // Null value type DoubleBuffer<NullType> d_values; return DispatchRadixSort<false, KeyT, NullType, OffsetT>::Dispatch( d_temp_storage, temp_storage_bytes, d_keys, d_values, num_items, begin_bit, end_bit, true, stream, debug_synchronous); } /** * \brief Sorts keys into descending order. (~<em>2N</em> auxiliary storage required). * * \par * - The contents of the input data are not altered by the sorting operation * - An optional bit subrange <tt>[begin_bit, end_bit)</tt> of differentiating key bits can be specified. This can reduce overall sorting overhead and yield a corresponding performance improvement. * - \devicestorageNP For sorting using only <em>O</em>(<tt>P</tt>) temporary storage, see the sorting interface using DoubleBuffer wrappers below. * - \devicestorage * * \par Performance * Performance is similar to DeviceRadixSort::SortKeys. * * \par Snippet * The code snippet below illustrates the sorting of a device vector of \p int keys. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/device/device_radix_sort.cuh> * * // Declare, allocate, and initialize device-accessible pointers for sorting data * int num_items; // e.g., 7 * int *d_keys_in; // e.g., [8, 6, 7, 5, 3, 0, 9] * int *d_keys_out; // e.g., [ ... ] * ... * * // Create a DoubleBuffer to wrap the pair of device pointers * cub::DoubleBuffer<int> d_keys(d_key_buf, d_key_alt_buf); * * // Determine temporary device storage requirements * void *d_temp_storage = NULL; * size_t temp_storage_bytes = 0; * cub::DeviceRadixSort::SortKeysDescending(d_temp_storage, temp_storage_bytes, d_keys_in, d_keys_out, num_items); * * // Allocate temporary storage * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Run sorting operation * cub::DeviceRadixSort::SortKeysDescending(d_temp_storage, temp_storage_bytes, d_keys_in, d_keys_out, num_items); * * // d_keys_out <-- [9, 8, 7, 6, 5, 3, 0]s * * \endcode * * \tparam KeyT <b>[inferred]</b> KeyT type */ template <typename KeyT> CUB_RUNTIME_FUNCTION static cudaError_t SortKeysDescending( void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation const KeyT *d_keys_in, ///< [in] Pointer to the input data of key data to sort KeyT *d_keys_out, ///< [out] Pointer to the sorted output sequence of key data int num_items, ///< [in] Number of items to sort int begin_bit = 0, ///< [in] <b>[optional]</b> The least-significant bit index (inclusive) needed for key comparison int end_bit = sizeof(KeyT) * 8, ///< [in] <b>[optional]</b> The most-significant bit index (exclusive) needed for key comparison (e.g., sizeof(unsigned int) * 8) cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. { // Signed integer type for global offsets typedef int OffsetT; DoubleBuffer<KeyT> d_keys(const_cast<KeyT*>(d_keys_in), d_keys_out); DoubleBuffer<NullType> d_values; return DispatchRadixSort<true, KeyT, NullType, OffsetT>::Dispatch( d_temp_storage, temp_storage_bytes, d_keys, d_values, num_items, begin_bit, end_bit, false, stream, debug_synchronous); } /** * \brief Sorts keys into descending order. (~<em>N </em>auxiliary storage required). * * \par * - The sorting operation is given a pair of key buffers managed by a * DoubleBuffer structure that indicates which of the two buffers is * "current" (and thus contains the input data to be sorted). * - The contents of both buffers may be altered by the sorting operation. * - Upon completion, the sorting operation will update the "current" indicator * within the DoubleBuffer wrapper to reference which of the two buffers * now contains the sorted output sequence (a function of the number of key bits * specified and the targeted device architecture). * - An optional bit subrange <tt>[begin_bit, end_bit)</tt> of differentiating key bits can be specified. This can reduce overall sorting overhead and yield a corresponding performance improvement. * - \devicestorageP * - \devicestorage * * \par Performance * Performance is similar to DeviceRadixSort::SortKeys. * * \par Snippet * The code snippet below illustrates the sorting of a device vector of \p int keys. * \par * \code * #include <cub/cub.cuh> // or equivalently <cub/device/device_radix_sort.cuh> * * // Declare, allocate, and initialize device-accessible pointers for sorting data * int num_items; // e.g., 7 * int *d_key_buf; // e.g., [8, 6, 7, 5, 3, 0, 9] * int *d_key_alt_buf; // e.g., [ ... ] * ... * * // Create a DoubleBuffer to wrap the pair of device pointers * cub::DoubleBuffer<int> d_keys(d_key_buf, d_key_alt_buf); * * // Determine temporary device storage requirements * void *d_temp_storage = NULL; * size_t temp_storage_bytes = 0; * cub::DeviceRadixSort::SortKeysDescending(d_temp_storage, temp_storage_bytes, d_keys, num_items); * * // Allocate temporary storage * cudaMalloc(&d_temp_storage, temp_storage_bytes); * * // Run sorting operation * cub::DeviceRadixSort::SortKeysDescending(d_temp_storage, temp_storage_bytes, d_keys, num_items); * * // d_keys.Current() <-- [9, 8, 7, 6, 5, 3, 0] * * \endcode * * \tparam KeyT <b>[inferred]</b> KeyT type */ template <typename KeyT> CUB_RUNTIME_FUNCTION static cudaError_t SortKeysDescending( void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation DoubleBuffer<KeyT> &d_keys, ///< [in,out] Reference to the double-buffer of keys whose "current" device-accessible buffer contains the unsorted input keys and, upon return, is updated to point to the sorted output keys int num_items, ///< [in] Number of items to sort int begin_bit = 0, ///< [in] <b>[optional]</b> The least-significant bit index (inclusive) needed for key comparison int end_bit = sizeof(KeyT) * 8, ///< [in] <b>[optional]</b> The most-significant bit index (exclusive) needed for key comparison (e.g., sizeof(unsigned int) * 8) cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. { // Signed integer type for global offsets typedef int OffsetT; // Null value type DoubleBuffer<NullType> d_values; return DispatchRadixSort<true, KeyT, NullType, OffsetT>::Dispatch( d_temp_storage, temp_storage_bytes, d_keys, d_values, num_items, begin_bit, end_bit, true, stream, debug_synchronous); } //@} end member group }; /** * \example example_device_radix_sort.cu */ } // CUB namespace CUB_NS_POSTFIX // Optional outer namespace(s)
0
rapidsai_public_repos/nvgraph/external/cub_semiring/device
rapidsai_public_repos/nvgraph/external/cub_semiring/device/dispatch/dispatch_scan.cuh
/****************************************************************************** * Copyright (c) 2011, Duane Merrill. All rights reserved. * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ /** * \file * cub::DeviceScan provides device-wide, parallel operations for computing a prefix scan across a sequence of data items residing within device-accessible memory. */ #pragma once #include <stdio.h> #include <iterator> #include "../../agent/agent_scan.cuh" #include "../../thread/thread_operators.cuh" #include "../../grid/grid_queue.cuh" #include "../../util_arch.cuh" #include "../../util_debug.cuh" #include "../../util_device.cuh" #include "../../util_namespace.cuh" /// Optional outer namespace(s) CUB_NS_PREFIX /// CUB namespace namespace cub { /****************************************************************************** * Kernel entry points *****************************************************************************/ /** * Initialization kernel for tile status initialization (multi-block) */ template < typename ScanTileStateT> ///< Tile status interface type __global__ void DeviceScanInitKernel( ScanTileStateT tile_state, ///< [in] Tile status interface int num_tiles) ///< [in] Number of tiles { // Initialize tile status tile_state.InitializeStatus(num_tiles); } /** * Initialization kernel for tile status initialization (multi-block) */ template < typename ScanTileStateT, ///< Tile status interface type typename NumSelectedIteratorT> ///< Output iterator type for recording the number of items selected __global__ void DeviceCompactInitKernel( ScanTileStateT tile_state, ///< [in] Tile status interface int num_tiles, ///< [in] Number of tiles NumSelectedIteratorT d_num_selected_out) ///< [out] Pointer to the total number of items selected (i.e., length of \p d_selected_out) { // Initialize tile status tile_state.InitializeStatus(num_tiles); // Initialize d_num_selected_out if ((blockIdx.x == 0) && (threadIdx.x == 0)) *d_num_selected_out = 0; } /** * Scan kernel entry point (multi-block) */ template < typename ScanPolicyT, ///< Parameterized ScanPolicyT tuning policy type typename InputIteratorT, ///< Random-access input iterator type for reading scan inputs \iterator typename OutputIteratorT, ///< Random-access output iterator type for writing scan outputs \iterator typename ScanTileStateT, ///< Tile status interface type typename ScanOpT, ///< Binary scan functor type having member <tt>T operator()(const T &a, const T &b)</tt> typename InitValueT, ///< Initial value to seed the exclusive scan (cub::NullType for inclusive scans) typename OffsetT> ///< Signed integer type for global offsets __launch_bounds__ (int(ScanPolicyT::BLOCK_THREADS)) __global__ void DeviceScanKernel( InputIteratorT d_in, ///< Input data OutputIteratorT d_out, ///< Output data ScanTileStateT tile_state, ///< Tile status interface int start_tile, ///< The starting tile for the current grid ScanOpT scan_op, ///< Binary scan functor InitValueT init_value, ///< Initial value to seed the exclusive scan OffsetT num_items) ///< Total number of scan items for the entire problem { // Thread block type for scanning input tiles typedef AgentScan< ScanPolicyT, InputIteratorT, OutputIteratorT, ScanOpT, InitValueT, OffsetT> AgentScanT; // Shared memory for AgentScan __shared__ typename AgentScanT::TempStorage temp_storage; // Process tiles AgentScanT(temp_storage, d_in, d_out, scan_op, init_value).ConsumeRange( num_items, tile_state, start_tile); } /****************************************************************************** * Dispatch ******************************************************************************/ /** * Utility class for dispatching the appropriately-tuned kernels for DeviceScan */ template < typename InputIteratorT, ///< Random-access input iterator type for reading scan inputs \iterator typename OutputIteratorT, ///< Random-access output iterator type for writing scan outputs \iterator typename ScanOpT, ///< Binary scan functor type having member <tt>T operator()(const T &a, const T &b)</tt> typename InitValueT, ///< The init_value element type for ScanOpT (cub::NullType for inclusive scans) typename OffsetT> ///< Signed integer type for global offsets struct DispatchScan { //--------------------------------------------------------------------- // Constants and Types //--------------------------------------------------------------------- enum { INIT_KERNEL_THREADS = 128 }; // The output value type typedef typename If<(Equals<typename std::iterator_traits<OutputIteratorT>::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ? typename std::iterator_traits<InputIteratorT>::value_type, // ... then the input iterator's value type, typename std::iterator_traits<OutputIteratorT>::value_type>::Type OutputT; // ... else the output iterator's value type // Tile status descriptor interface type typedef ScanTileState<OutputT> ScanTileStateT; //--------------------------------------------------------------------- // Tuning policies //--------------------------------------------------------------------- /// SM600 struct Policy600 { typedef AgentScanPolicy< CUB_NOMINAL_CONFIG(128, 15, OutputT), ///< Threads per block, items per thread BLOCK_LOAD_TRANSPOSE, LOAD_DEFAULT, BLOCK_STORE_TRANSPOSE, BLOCK_SCAN_WARP_SCANS> ScanPolicyT; }; /// SM520 struct Policy520 { // Titan X: 32.47B items/s @ 48M 32-bit T typedef AgentScanPolicy< CUB_NOMINAL_CONFIG(128, 12, OutputT), ///< Threads per block, items per thread BLOCK_LOAD_DIRECT, LOAD_LDG, BLOCK_STORE_WARP_TRANSPOSE, BLOCK_SCAN_WARP_SCANS> ScanPolicyT; }; /// SM35 struct Policy350 { // GTX Titan: 29.5B items/s (232.4 GB/s) @ 48M 32-bit T typedef AgentScanPolicy< CUB_NOMINAL_CONFIG(128, 12, OutputT), ///< Threads per block, items per thread BLOCK_LOAD_DIRECT, LOAD_LDG, BLOCK_STORE_WARP_TRANSPOSE_TIMESLICED, BLOCK_SCAN_RAKING> ScanPolicyT; }; /// SM30 struct Policy300 { typedef AgentScanPolicy< CUB_NOMINAL_CONFIG(256, 9, OutputT), ///< Threads per block, items per thread BLOCK_LOAD_WARP_TRANSPOSE, LOAD_DEFAULT, BLOCK_STORE_WARP_TRANSPOSE, BLOCK_SCAN_WARP_SCANS> ScanPolicyT; }; /// SM20 struct Policy200 { // GTX 580: 20.3B items/s (162.3 GB/s) @ 48M 32-bit T typedef AgentScanPolicy< CUB_NOMINAL_CONFIG(128, 12, OutputT), ///< Threads per block, items per thread BLOCK_LOAD_WARP_TRANSPOSE, LOAD_DEFAULT, BLOCK_STORE_WARP_TRANSPOSE, BLOCK_SCAN_WARP_SCANS> ScanPolicyT; }; /// SM13 struct Policy130 { typedef AgentScanPolicy< CUB_NOMINAL_CONFIG(96, 21, OutputT), ///< Threads per block, items per thread BLOCK_LOAD_WARP_TRANSPOSE, LOAD_DEFAULT, BLOCK_STORE_WARP_TRANSPOSE, BLOCK_SCAN_RAKING_MEMOIZE> ScanPolicyT; }; /// SM10 struct Policy100 { typedef AgentScanPolicy< CUB_NOMINAL_CONFIG(64, 9, OutputT), ///< Threads per block, items per thread BLOCK_LOAD_WARP_TRANSPOSE, LOAD_DEFAULT, BLOCK_STORE_WARP_TRANSPOSE, BLOCK_SCAN_WARP_SCANS> ScanPolicyT; }; //--------------------------------------------------------------------- // Tuning policies of current PTX compiler pass //--------------------------------------------------------------------- #if (CUB_PTX_ARCH >= 600) typedef Policy600 PtxPolicy; #elif (CUB_PTX_ARCH >= 520) typedef Policy520 PtxPolicy; #elif (CUB_PTX_ARCH >= 350) typedef Policy350 PtxPolicy; #elif (CUB_PTX_ARCH >= 300) typedef Policy300 PtxPolicy; #elif (CUB_PTX_ARCH >= 200) typedef Policy200 PtxPolicy; #elif (CUB_PTX_ARCH >= 130) typedef Policy130 PtxPolicy; #else typedef Policy100 PtxPolicy; #endif // "Opaque" policies (whose parameterizations aren't reflected in the type signature) struct PtxAgentScanPolicy : PtxPolicy::ScanPolicyT {}; //--------------------------------------------------------------------- // Utilities //--------------------------------------------------------------------- /** * Initialize kernel dispatch configurations with the policies corresponding to the PTX assembly we will use */ template <typename KernelConfig> CUB_RUNTIME_FUNCTION __forceinline__ static void InitConfigs( int ptx_version, KernelConfig &scan_kernel_config) { #if (CUB_PTX_ARCH > 0) (void)ptx_version; // We're on the device, so initialize the kernel dispatch configurations with the current PTX policy scan_kernel_config.template Init<PtxAgentScanPolicy>(); #else // We're on the host, so lookup and initialize the kernel dispatch configurations with the policies that match the device's PTX version if (ptx_version >= 600) { scan_kernel_config.template Init<typename Policy600::ScanPolicyT>(); } else if (ptx_version >= 520) { scan_kernel_config.template Init<typename Policy520::ScanPolicyT>(); } else if (ptx_version >= 350) { scan_kernel_config.template Init<typename Policy350::ScanPolicyT>(); } else if (ptx_version >= 300) { scan_kernel_config.template Init<typename Policy300::ScanPolicyT>(); } else if (ptx_version >= 200) { scan_kernel_config.template Init<typename Policy200::ScanPolicyT>(); } else if (ptx_version >= 130) { scan_kernel_config.template Init<typename Policy130::ScanPolicyT>(); } else { scan_kernel_config.template Init<typename Policy100::ScanPolicyT>(); } #endif } /** * Kernel kernel dispatch configuration. */ struct KernelConfig { int block_threads; int items_per_thread; int tile_items; template <typename PolicyT> CUB_RUNTIME_FUNCTION __forceinline__ void Init() { block_threads = PolicyT::BLOCK_THREADS; items_per_thread = PolicyT::ITEMS_PER_THREAD; tile_items = block_threads * items_per_thread; } }; //--------------------------------------------------------------------- // Dispatch entrypoints //--------------------------------------------------------------------- /** * Internal dispatch routine for computing a device-wide prefix scan using the * specified kernel functions. */ template < typename ScanInitKernelPtrT, ///< Function type of cub::DeviceScanInitKernel typename ScanSweepKernelPtrT> ///< Function type of cub::DeviceScanKernelPtrT CUB_RUNTIME_FUNCTION __forceinline__ static cudaError_t Dispatch( void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t& temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation InputIteratorT d_in, ///< [in] Pointer to the input sequence of data items OutputIteratorT d_out, ///< [out] Pointer to the output sequence of data items ScanOpT scan_op, ///< [in] Binary scan functor InitValueT init_value, ///< [in] Initial value to seed the exclusive scan OffsetT num_items, ///< [in] Total number of input items (i.e., the length of \p d_in) cudaStream_t stream, ///< [in] CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous, ///< [in] Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. int /*ptx_version*/, ///< [in] PTX version of dispatch kernels ScanInitKernelPtrT init_kernel, ///< [in] Kernel function pointer to parameterization of cub::DeviceScanInitKernel ScanSweepKernelPtrT scan_kernel, ///< [in] Kernel function pointer to parameterization of cub::DeviceScanKernel KernelConfig scan_kernel_config) ///< [in] Dispatch parameters that match the policy that \p scan_kernel was compiled for { #ifndef CUB_RUNTIME_ENABLED (void)d_temp_storage; (void)temp_storage_bytes; (void)d_in; (void)d_out; (void)scan_op; (void)init_value; (void)num_items; (void)stream; (void)debug_synchronous; (void)init_kernel; (void)scan_kernel; (void)scan_kernel_config; // Kernel launch not supported from this device return CubDebug(cudaErrorNotSupported); #else cudaError error = cudaSuccess; do { // Get device ordinal int device_ordinal; if (CubDebug(error = cudaGetDevice(&device_ordinal))) break; // Get SM count int sm_count; if (CubDebug(error = cudaDeviceGetAttribute (&sm_count, cudaDevAttrMultiProcessorCount, device_ordinal))) break; // Number of input tiles int tile_size = scan_kernel_config.block_threads * scan_kernel_config.items_per_thread; int num_tiles = (num_items + tile_size - 1) / tile_size; // Specify temporary storage allocation requirements size_t allocation_sizes[1]; if (CubDebug(error = ScanTileStateT::AllocationSize(num_tiles, allocation_sizes[0]))) break; // bytes needed for tile status descriptors // Compute allocation pointers into the single storage blob (or compute the necessary size of the blob) void* allocations[1]; if (CubDebug(error = AliasTemporaries(d_temp_storage, temp_storage_bytes, allocations, allocation_sizes))) break; if (d_temp_storage == NULL) { // Return if the caller is simply requesting the size of the storage allocation break; } // Return if empty problem if (num_items == 0) break; // Construct the tile status interface ScanTileStateT tile_state; if (CubDebug(error = tile_state.Init(num_tiles, allocations[0], allocation_sizes[0]))) break; // Log init_kernel configuration int init_grid_size = (num_tiles + INIT_KERNEL_THREADS - 1) / INIT_KERNEL_THREADS; if (debug_synchronous) _CubLog("Invoking init_kernel<<<%d, %d, 0, %lld>>>()\n", init_grid_size, INIT_KERNEL_THREADS, (long long) stream); // Invoke init_kernel to initialize tile descriptors init_kernel<<<init_grid_size, INIT_KERNEL_THREADS, 0, stream>>>( tile_state, num_tiles); // Check for failure to launch if (CubDebug(error = cudaPeekAtLastError())) break; // Sync the stream if specified to flush runtime errors if (debug_synchronous && (CubDebug(error = SyncStream(stream)))) break; // Get SM occupancy for scan_kernel int scan_sm_occupancy; if (CubDebug(error = MaxSmOccupancy( scan_sm_occupancy, // out scan_kernel, scan_kernel_config.block_threads))) break; // Get max x-dimension of grid int max_dim_x; if (CubDebug(error = cudaDeviceGetAttribute(&max_dim_x, cudaDevAttrMaxGridDimX, device_ordinal))) break;; // Run grids in epochs (in case number of tiles exceeds max x-dimension int scan_grid_size = CUB_MIN(num_tiles, max_dim_x); for (int start_tile = 0; start_tile < num_tiles; start_tile += scan_grid_size) { // Log scan_kernel configuration if (debug_synchronous) _CubLog("Invoking %d scan_kernel<<<%d, %d, 0, %lld>>>(), %d items per thread, %d SM occupancy\n", start_tile, scan_grid_size, scan_kernel_config.block_threads, (long long) stream, scan_kernel_config.items_per_thread, scan_sm_occupancy); // Invoke scan_kernel scan_kernel<<<scan_grid_size, scan_kernel_config.block_threads, 0, stream>>>( d_in, d_out, tile_state, start_tile, scan_op, init_value, num_items); // Check for failure to launch if (CubDebug(error = cudaPeekAtLastError())) break; // Sync the stream if specified to flush runtime errors if (debug_synchronous && (CubDebug(error = SyncStream(stream)))) break; } } while (0); return error; #endif // CUB_RUNTIME_ENABLED } /** * Internal dispatch routine */ CUB_RUNTIME_FUNCTION __forceinline__ static cudaError_t Dispatch( void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t& temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation InputIteratorT d_in, ///< [in] Pointer to the input sequence of data items OutputIteratorT d_out, ///< [out] Pointer to the output sequence of data items ScanOpT scan_op, ///< [in] Binary scan functor InitValueT init_value, ///< [in] Initial value to seed the exclusive scan OffsetT num_items, ///< [in] Total number of input items (i.e., the length of \p d_in) cudaStream_t stream, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. { cudaError error = cudaSuccess; do { // Get PTX version int ptx_version; if (CubDebug(error = PtxVersion(ptx_version))) break; // Get kernel kernel dispatch configurations KernelConfig scan_kernel_config; InitConfigs(ptx_version, scan_kernel_config); // Dispatch if (CubDebug(error = Dispatch( d_temp_storage, temp_storage_bytes, d_in, d_out, scan_op, init_value, num_items, stream, debug_synchronous, ptx_version, DeviceScanInitKernel<ScanTileStateT>, DeviceScanKernel<PtxAgentScanPolicy, InputIteratorT, OutputIteratorT, ScanTileStateT, ScanOpT, InitValueT, OffsetT>, scan_kernel_config))) break; } while (0); return error; } }; } // CUB namespace CUB_NS_POSTFIX // Optional outer namespace(s)
0
rapidsai_public_repos/nvgraph/external/cub_semiring/device
rapidsai_public_repos/nvgraph/external/cub_semiring/device/dispatch/dispatch_spmv_orig.cuh
/****************************************************************************** * Copyright (c) 2011, Duane Merrill. All rights reserved. * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ /** * \file * cub::DeviceSpmv provides device-wide parallel operations for performing sparse-matrix * vector multiplication (SpMV). */ #pragma once #include <stdio.h> #include <iterator> #include "../../agent/single_pass_scan_operators.cuh" #include "../../agent/agent_segment_fixup.cuh" #include "../../agent/agent_spmv_orig.cuh" #include "../../util_type.cuh" #include "../../util_debug.cuh" #include "../../util_device.cuh" #include "../../thread/thread_search.cuh" #include "../../grid/grid_queue.cuh" #include "../../util_namespace.cuh" /// Optional outer namespace(s) CUB_NS_PREFIX /// CUB namespace namespace cub { /****************************************************************************** * SpMV kernel entry points *****************************************************************************/ /** * Spmv search kernel. Identifies merge path starting coordinates for each tile. */ template < typename AgentSpmvPolicyT, ///< Parameterized SpmvPolicy tuning policy type typename ValueT, ///< Matrix and vector value type typename OffsetT, ///< Signed integer type for sequence offsets typename SemiringT> ///< Semiring operations __global__ void DeviceSpmv1ColKernel( SpmvParams<ValueT, OffsetT> spmv_params) ///< [in] SpMV input parameter bundle { typedef CacheModifiedInputIterator< AgentSpmvPolicyT::VECTOR_VALUES_LOAD_MODIFIER, ValueT, OffsetT> VectorValueIteratorT; VectorValueIteratorT wrapped_vector_x(spmv_params.d_vector_x); int row_idx = (blockIdx.x * blockDim.x) + threadIdx.x; if (row_idx < spmv_params.num_rows) { OffsetT end_nonzero_idx = spmv_params.d_row_end_offsets[row_idx]; OffsetT nonzero_idx = spmv_params.d_row_end_offsets[row_idx - 1]; ValueT value = SemiringT::plus_ident(); if (end_nonzero_idx != nonzero_idx) { value = SemiringT::times( spmv_params.alpha, SemiringT::times(spmv_params.d_values[nonzero_idx], wrapped_vector_x[spmv_params.d_column_indices[nonzero_idx]])); } spmv_params.d_vector_y[row_idx] = SemiringT::plus(value, SemiringT::times(spmv_params.d_vector_y[row_idx], spmv_params.beta)); } } /** * Degenerate case: y = b*y */ template < typename ValueT, ///< Matrix and vector value type typename OffsetT, ///< Signed integer type for sequence offsets typename SemiringT> ///< Semiring operations __global__ void DeviceSpmvbyKernel( SpmvParams<ValueT, OffsetT> spmv_params) ///< [in] SpMV input parameter bundle { int row_idx = (blockIdx.x * blockDim.x) + threadIdx.x; if (row_idx < spmv_params.num_rows) { spmv_params.d_vector_y[row_idx] = SemiringT::times(spmv_params.d_vector_y[row_idx], spmv_params.beta); } } /** * Spmv search kernel. Identifies merge path starting coordinates for each tile. */ template < typename SpmvPolicyT, ///< Parameterized SpmvPolicy tuning policy type typename OffsetT, ///< Signed integer type for sequence offsets typename CoordinateT, ///< Merge path coordinate type typename SpmvParamsT, ///< SpmvParams type typename SemiringT> ///< Semiring type __global__ void DeviceSpmvSearchKernel( int num_merge_tiles, ///< [in] Number of SpMV merge tiles (spmv grid size) CoordinateT* d_tile_coordinates, ///< [out] Pointer to the temporary array of tile starting coordinates SpmvParamsT spmv_params) ///< [in] SpMV input parameter bundle { /// Constants enum { BLOCK_THREADS = SpmvPolicyT::BLOCK_THREADS, ITEMS_PER_THREAD = SpmvPolicyT::ITEMS_PER_THREAD, TILE_ITEMS = BLOCK_THREADS * ITEMS_PER_THREAD, }; typedef CacheModifiedInputIterator< SpmvPolicyT::ROW_OFFSETS_SEARCH_LOAD_MODIFIER, OffsetT, OffsetT> RowOffsetsSearchIteratorT; // Find the starting coordinate for all tiles (plus the end coordinate of the last one) int tile_idx = (blockIdx.x * blockDim.x) + threadIdx.x; if (tile_idx < num_merge_tiles + 1) { OffsetT diagonal = (tile_idx * TILE_ITEMS); CoordinateT tile_coordinate; CountingInputIterator<OffsetT> nonzero_indices(0); // Search the merge path MergePathSearch( diagonal, RowOffsetsSearchIteratorT(spmv_params.d_row_end_offsets), nonzero_indices, spmv_params.num_rows, spmv_params.num_nonzeros, tile_coordinate); // Output starting offset d_tile_coordinates[tile_idx] = tile_coordinate; } } /** * Spmv agent entry point */ template < typename SpmvPolicyT, ///< Parameterized SpmvPolicy tuning policy type typename ScanTileStateT, ///< Tile status interface type typename ValueT, ///< Matrix and vector value type typename OffsetT, ///< Signed integer type for sequence offsets typename CoordinateT, ///< Merge path coordinate type typename SemiringT, ///< Semiring type bool HAS_ALPHA, ///< Whether the input parameter Alpha is 1 bool HAS_BETA> ///< Whether the input parameter Beta is 0 __launch_bounds__ (int(SpmvPolicyT::BLOCK_THREADS)) __global__ void DeviceSpmvKernel( SpmvParams<ValueT, OffsetT> spmv_params, ///< [in] SpMV input parameter bundle CoordinateT* d_tile_coordinates, ///< [in] Pointer to the temporary array of tile starting coordinates KeyValuePair<OffsetT,ValueT>* d_tile_carry_pairs, ///< [out] Pointer to the temporary array carry-out dot product row-ids, one per block int num_tiles, ///< [in] Number of merge tiles ScanTileStateT tile_state, ///< [in] Tile status interface for fixup reduce-by-key kernel int num_segment_fixup_tiles) ///< [in] Number of reduce-by-key tiles (fixup grid size) { // Spmv agent type specialization typedef AgentSpmv< SpmvPolicyT, ValueT, OffsetT, SemiringT, HAS_ALPHA, HAS_BETA> AgentSpmvT; // Shared memory for AgentSpmv __shared__ typename AgentSpmvT::TempStorage temp_storage; AgentSpmvT(temp_storage, spmv_params).ConsumeTile( d_tile_coordinates, d_tile_carry_pairs, num_tiles); // Initialize fixup tile status tile_state.InitializeStatus(num_segment_fixup_tiles); } /** * Multi-block reduce-by-key sweep kernel entry point */ template < typename AgentSegmentFixupPolicyT, ///< Parameterized AgentSegmentFixupPolicy tuning policy type typename PairsInputIteratorT, ///< Random-access input iterator type for keys typename AggregatesOutputIteratorT, ///< Random-access output iterator type for values typename OffsetT, ///< Signed integer type for global offsets typename SemiringT, ///< Semiring type typename ScanTileStateT> ///< Tile status interface type __launch_bounds__ (int(AgentSegmentFixupPolicyT::BLOCK_THREADS)) __global__ void DeviceSegmentFixupKernel( OffsetT max_items, ///< [in] Limit on number of output items (number of rows). Used to prevent OOB writes. PairsInputIteratorT d_pairs_in, ///< [in] Pointer to the array carry-out dot product row-ids, one per spmv block AggregatesOutputIteratorT d_aggregates_out, ///< [in,out] Output value aggregates OffsetT num_items, ///< [in] Total number of items to select from int num_tiles, ///< [in] Total number of tiles for the entire problem ScanTileStateT tile_state) ///< [in] Tile status interface { // Thread block type for reducing tiles of value segments typedef AgentSegmentFixup< AgentSegmentFixupPolicyT, PairsInputIteratorT, AggregatesOutputIteratorT, cub::Equality, typename SemiringT::SumOp, OffsetT, SemiringT> AgentSegmentFixupT; // Shared memory for AgentSegmentFixup __shared__ typename AgentSegmentFixupT::TempStorage temp_storage; // Process tiles AgentSegmentFixupT(temp_storage, d_pairs_in, d_aggregates_out, cub::Equality(), SemiringT::SumOp()).ConsumeRange( max_items, num_items, num_tiles, tile_state); } /****************************************************************************** * Dispatch ******************************************************************************/ /** * Utility class for dispatching the appropriately-tuned kernels for DeviceSpmv */ template < typename ValueT, ///< Matrix and vector value type typename OffsetT, ///< Signed integer type for global offsets typename SemiringT> ///< Semiring type struct DispatchSpmv { //--------------------------------------------------------------------- // Constants and Types //--------------------------------------------------------------------- enum { INIT_KERNEL_THREADS = 128 }; // SpmvParams bundle type typedef SpmvParams<ValueT, OffsetT> SpmvParamsT; // 2D merge path coordinate type typedef typename CubVector<OffsetT, 2>::Type CoordinateT; // Tile status descriptor interface type typedef ReduceByKeyScanTileState<ValueT, OffsetT> ScanTileStateT; // Tuple type for scanning (pairs accumulated segment-value with segment-index) typedef KeyValuePair<OffsetT, ValueT> KeyValuePairT; //--------------------------------------------------------------------- // Tuning policies //--------------------------------------------------------------------- /// SM11 struct Policy110 { typedef AgentSpmvPolicy< 128, 1, LOAD_DEFAULT, LOAD_DEFAULT, LOAD_DEFAULT, LOAD_DEFAULT, LOAD_DEFAULT, false, BLOCK_SCAN_WARP_SCANS> SpmvPolicyT; typedef AgentSegmentFixupPolicy< 128, 4, BLOCK_LOAD_VECTORIZE, LOAD_DEFAULT, BLOCK_SCAN_WARP_SCANS> SegmentFixupPolicyT; }; /// SM20 struct Policy200 { typedef AgentSpmvPolicy< 96, 18, LOAD_DEFAULT, LOAD_DEFAULT, LOAD_DEFAULT, LOAD_DEFAULT, LOAD_DEFAULT, false, BLOCK_SCAN_RAKING> SpmvPolicyT; typedef AgentSegmentFixupPolicy< 128, 4, BLOCK_LOAD_VECTORIZE, LOAD_DEFAULT, BLOCK_SCAN_WARP_SCANS> SegmentFixupPolicyT; }; /// SM30 struct Policy300 { typedef AgentSpmvPolicy< 96, 6, LOAD_DEFAULT, LOAD_DEFAULT, LOAD_DEFAULT, LOAD_DEFAULT, LOAD_DEFAULT, false, BLOCK_SCAN_WARP_SCANS> SpmvPolicyT; typedef AgentSegmentFixupPolicy< 128, 4, BLOCK_LOAD_VECTORIZE, LOAD_DEFAULT, BLOCK_SCAN_WARP_SCANS> SegmentFixupPolicyT; }; /// SM35 struct Policy350 { typedef AgentSpmvPolicy< (sizeof(ValueT) > 4) ? 96 : 128, (sizeof(ValueT) > 4) ? 4 : 7, LOAD_LDG, LOAD_CA, LOAD_LDG, LOAD_LDG, LOAD_LDG, (sizeof(ValueT) > 4) ? true : false, BLOCK_SCAN_WARP_SCANS> SpmvPolicyT; typedef AgentSegmentFixupPolicy< 128, 3, BLOCK_LOAD_VECTORIZE, LOAD_LDG, BLOCK_SCAN_WARP_SCANS> SegmentFixupPolicyT; }; /// SM37 struct Policy370 { typedef AgentSpmvPolicy< (sizeof(ValueT) > 4) ? 128 : 128, (sizeof(ValueT) > 4) ? 9 : 14, LOAD_LDG, LOAD_CA, LOAD_LDG, LOAD_LDG, LOAD_LDG, false, BLOCK_SCAN_WARP_SCANS> SpmvPolicyT; typedef AgentSegmentFixupPolicy< 128, 3, BLOCK_LOAD_VECTORIZE, LOAD_LDG, BLOCK_SCAN_WARP_SCANS> SegmentFixupPolicyT; }; /// SM50 struct Policy500 { typedef AgentSpmvPolicy< (sizeof(ValueT) > 4) ? 64 : 128, (sizeof(ValueT) > 4) ? 6 : 7, LOAD_LDG, LOAD_DEFAULT, (sizeof(ValueT) > 4) ? LOAD_LDG : LOAD_DEFAULT, (sizeof(ValueT) > 4) ? LOAD_LDG : LOAD_DEFAULT, LOAD_LDG, (sizeof(ValueT) > 4) ? true : false, (sizeof(ValueT) > 4) ? BLOCK_SCAN_WARP_SCANS : BLOCK_SCAN_RAKING_MEMOIZE> SpmvPolicyT; typedef AgentSegmentFixupPolicy< 128, 3, BLOCK_LOAD_VECTORIZE, LOAD_LDG, BLOCK_SCAN_RAKING_MEMOIZE> SegmentFixupPolicyT; }; /// SM60 struct Policy600 { typedef AgentSpmvPolicy< (sizeof(ValueT) > 4) ? 64 : 128, (sizeof(ValueT) > 4) ? 5 : 7, LOAD_DEFAULT, LOAD_DEFAULT, LOAD_DEFAULT, LOAD_DEFAULT, LOAD_DEFAULT, false, BLOCK_SCAN_WARP_SCANS> SpmvPolicyT; typedef AgentSegmentFixupPolicy< 128, 3, BLOCK_LOAD_DIRECT, LOAD_LDG, BLOCK_SCAN_WARP_SCANS> SegmentFixupPolicyT; }; //--------------------------------------------------------------------- // Tuning policies of current PTX compiler pass //--------------------------------------------------------------------- #if (CUB_PTX_ARCH >= 600) typedef Policy600 PtxPolicy; #elif (CUB_PTX_ARCH >= 500) typedef Policy500 PtxPolicy; #elif (CUB_PTX_ARCH >= 370) typedef Policy370 PtxPolicy; #elif (CUB_PTX_ARCH >= 350) typedef Policy350 PtxPolicy; #elif (CUB_PTX_ARCH >= 300) typedef Policy300 PtxPolicy; #elif (CUB_PTX_ARCH >= 200) typedef Policy200 PtxPolicy; #else typedef Policy110 PtxPolicy; #endif // "Opaque" policies (whose parameterizations aren't reflected in the type signature) struct PtxSpmvPolicyT : PtxPolicy::SpmvPolicyT {}; struct PtxSegmentFixupPolicy : PtxPolicy::SegmentFixupPolicyT {}; //--------------------------------------------------------------------- // Utilities //--------------------------------------------------------------------- /** * Initialize kernel dispatch configurations with the policies corresponding to the PTX assembly we will use */ template <typename KernelConfig> CUB_RUNTIME_FUNCTION __forceinline__ static void InitConfigs( int ptx_version, KernelConfig &spmv_config, KernelConfig &segment_fixup_config) { #if (CUB_PTX_ARCH > 0) // We're on the device, so initialize the kernel dispatch configurations with the current PTX policy spmv_config.template Init<PtxSpmvPolicyT>(); segment_fixup_config.template Init<PtxSegmentFixupPolicy>(); #else // We're on the host, so lookup and initialize the kernel dispatch configurations with the policies that match the device's PTX version if (ptx_version >= 600) { spmv_config.template Init<typename Policy600::SpmvPolicyT>(); segment_fixup_config.template Init<typename Policy600::SegmentFixupPolicyT>(); } else if (ptx_version >= 500) { spmv_config.template Init<typename Policy500::SpmvPolicyT>(); segment_fixup_config.template Init<typename Policy500::SegmentFixupPolicyT>(); } else if (ptx_version >= 370) { spmv_config.template Init<typename Policy370::SpmvPolicyT>(); segment_fixup_config.template Init<typename Policy370::SegmentFixupPolicyT>(); } else if (ptx_version >= 350) { spmv_config.template Init<typename Policy350::SpmvPolicyT>(); segment_fixup_config.template Init<typename Policy350::SegmentFixupPolicyT>(); } else if (ptx_version >= 300) { spmv_config.template Init<typename Policy300::SpmvPolicyT>(); segment_fixup_config.template Init<typename Policy300::SegmentFixupPolicyT>(); } else if (ptx_version >= 200) { spmv_config.template Init<typename Policy200::SpmvPolicyT>(); segment_fixup_config.template Init<typename Policy200::SegmentFixupPolicyT>(); } else { spmv_config.template Init<typename Policy110::SpmvPolicyT>(); segment_fixup_config.template Init<typename Policy110::SegmentFixupPolicyT>(); } #endif } /** * Kernel kernel dispatch configuration. */ struct KernelConfig { int block_threads; int items_per_thread; int tile_items; template <typename PolicyT> CUB_RUNTIME_FUNCTION __forceinline__ void Init() { block_threads = PolicyT::BLOCK_THREADS; items_per_thread = PolicyT::ITEMS_PER_THREAD; tile_items = block_threads * items_per_thread; } }; //--------------------------------------------------------------------- // Dispatch entrypoints //--------------------------------------------------------------------- /** * Internal dispatch routine for computing a device-wide reduction using the * specified kernel functions. * * If the input is larger than a single tile, this method uses two-passes of * kernel invocations. */ template < typename Spmv1ColKernelT, ///< Function type of cub::DeviceSpmv1ColKernel typename SpmvbyKernelT, typename SpmvSearchKernelT, ///< Function type of cub::AgentSpmvSearchKernel typename SpmvKernelT, ///< Function type of cub::AgentSpmvKernel typename SegmentFixupKernelT> ///< Function type of cub::DeviceSegmentFixupKernelT CUB_RUNTIME_FUNCTION __forceinline__ static cudaError_t Dispatch( void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t& temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation SpmvParamsT& spmv_params, ///< SpMV input parameter bundle cudaStream_t stream, ///< [in] CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous, ///< [in] Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. SpmvbyKernelT spmv_by_kernel, Spmv1ColKernelT spmv_1col_kernel, ///< [in] Kernel function pointer to parameterization of DeviceSpmv1ColKernel SpmvSearchKernelT spmv_search_kernel, ///< [in] Kernel function pointer to parameterization of AgentSpmvSearchKernel SpmvKernelT spmv_kernel, ///< [in] Kernel function pointer to parameterization of AgentSpmvKernel SegmentFixupKernelT segment_fixup_kernel, ///< [in] Kernel function pointer to parameterization of cub::DeviceSegmentFixupKernel KernelConfig spmv_config, ///< [in] Dispatch parameters that match the policy that \p spmv_kernel was compiled for KernelConfig segment_fixup_config) ///< [in] Dispatch parameters that match the policy that \p segment_fixup_kernel was compiled for { #ifndef CUB_RUNTIME_ENABLED // Kernel launch not supported from this device return CubDebug(cudaErrorNotSupported ); #else cudaError error = cudaSuccess; do { // degenerate case of y = beta*y if (spmv_params.alpha == SemiringT::times_null()) { if (d_temp_storage == NULL) { // Return if the caller is simply requesting the size of the storage allocation temp_storage_bytes = 1; break; } // Get search/init grid dims int degen_by_block_size = INIT_KERNEL_THREADS; int degen_by_grid_size = (spmv_params.num_rows + degen_by_block_size - 1) / degen_by_block_size; if (debug_synchronous) _CubLog("Invoking spmv_1col_kernel<<<%d, %d, 0, %lld>>>()\n", degen_by_grid_size, degen_by_block_size, (long long) stream); // Invoke spmv_search_kernel spmv_by_kernel<<<degen_by_grid_size, degen_by_block_size, 0, stream>>>( spmv_params); // Check for failure to launch if (CubDebug(error = cudaPeekAtLastError())) break; // Sync the stream if specified to flush runtime errors if (debug_synchronous && (CubDebug(error = SyncStream(stream)))) break; break; } if (spmv_params.num_cols == 1) { if (d_temp_storage == NULL) { // Return if the caller is simply requesting the size of the storage allocation temp_storage_bytes = 1; break; } // Get search/init grid dims int degen_col_kernel_block_size = INIT_KERNEL_THREADS; int degen_col_kernel_grid_size = (spmv_params.num_rows + degen_col_kernel_block_size - 1) / degen_col_kernel_block_size; if (debug_synchronous) _CubLog("Invoking spmv_1col_kernel<<<%d, %d, 0, %lld>>>()\n", degen_col_kernel_grid_size, degen_col_kernel_block_size, (long long) stream); // Invoke spmv_search_kernel spmv_1col_kernel<<<degen_col_kernel_grid_size, degen_col_kernel_block_size, 0, stream>>>( spmv_params); // Check for failure to launch if (CubDebug(error = cudaPeekAtLastError())) break; // Sync the stream if specified to flush runtime errors if (debug_synchronous && (CubDebug(error = SyncStream(stream)))) break; break; } // Get device ordinal int device_ordinal; if (CubDebug(error = cudaGetDevice(&device_ordinal))) break; // Get SM count int sm_count; if (CubDebug(error = cudaDeviceGetAttribute (&sm_count, cudaDevAttrMultiProcessorCount, device_ordinal))) break; // Get max x-dimension of grid int max_dim_x_i; if (CubDebug(error = cudaDeviceGetAttribute(&max_dim_x_i, cudaDevAttrMaxGridDimX, device_ordinal))) break;; unsigned int max_dim_x = max_dim_x_i; // Total number of spmv work items int num_merge_items = spmv_params.num_rows + spmv_params.num_nonzeros; // Tile sizes of kernels int merge_tile_size = spmv_config.block_threads * spmv_config.items_per_thread; int segment_fixup_tile_size = segment_fixup_config.block_threads * segment_fixup_config.items_per_thread; // Number of tiles for kernels unsigned int num_merge_tiles = (num_merge_items + merge_tile_size - 1) / merge_tile_size; unsigned int num_segment_fixup_tiles = (num_merge_tiles + segment_fixup_tile_size - 1) / segment_fixup_tile_size; // Get SM occupancy for kernels int spmv_sm_occupancy; if (CubDebug(error = MaxSmOccupancy( spmv_sm_occupancy, spmv_kernel, spmv_config.block_threads))) break; int segment_fixup_sm_occupancy; if (CubDebug(error = MaxSmOccupancy( segment_fixup_sm_occupancy, segment_fixup_kernel, segment_fixup_config.block_threads))) break; // Get grid dimensions dim3 spmv_grid_size( CUB_MIN(num_merge_tiles, max_dim_x), (num_merge_tiles + max_dim_x - 1) / max_dim_x, 1); dim3 segment_fixup_grid_size( CUB_MIN(num_segment_fixup_tiles, max_dim_x), (num_segment_fixup_tiles + max_dim_x - 1) / max_dim_x, 1); // Get the temporary storage allocation requirements size_t allocation_sizes[3]; if (CubDebug(error = ScanTileStateT::AllocationSize(num_segment_fixup_tiles, allocation_sizes[0]))) break; // bytes needed for reduce-by-key tile status descriptors allocation_sizes[1] = num_merge_tiles * sizeof(KeyValuePairT); // bytes needed for block carry-out pairs allocation_sizes[2] = (num_merge_tiles + 1) * sizeof(CoordinateT); // bytes needed for tile starting coordinates // Alias the temporary allocations from the single storage blob (or compute the necessary size of the blob) void* allocations[3]; if (CubDebug(error = AliasTemporaries(d_temp_storage, temp_storage_bytes, allocations, allocation_sizes))) break; if (d_temp_storage == NULL) { // Return if the caller is simply requesting the size of the storage allocation break; } // Construct the tile status interface ScanTileStateT tile_state; if (CubDebug(error = tile_state.Init(num_segment_fixup_tiles, allocations[0], allocation_sizes[0]))) break; // Alias the other allocations KeyValuePairT* d_tile_carry_pairs = (KeyValuePairT*) allocations[1]; // Agent carry-out pairs CoordinateT* d_tile_coordinates = (CoordinateT*) allocations[2]; // Agent starting coordinates // Get search/init grid dims int search_block_size = INIT_KERNEL_THREADS; int search_grid_size = (num_merge_tiles + 1 + search_block_size - 1) / search_block_size; #if (CUB_PTX_ARCH == 0) // Init textures if (CubDebug(error = spmv_params.t_vector_x.BindTexture(spmv_params.d_vector_x))) break; #endif if (search_grid_size < sm_count) // if (num_merge_tiles < spmv_sm_occupancy * sm_count) { // Not enough spmv tiles to saturate the device: have spmv blocks search their own staring coords d_tile_coordinates = NULL; } else { // Use separate search kernel if we have enough spmv tiles to saturate the device // Log spmv_search_kernel configuration if (debug_synchronous) _CubLog("Invoking spmv_search_kernel<<<%d, %d, 0, %lld>>>()\n", search_grid_size, search_block_size, (long long) stream); // Invoke spmv_search_kernel spmv_search_kernel<<<search_grid_size, search_block_size, 0, stream>>>( num_merge_tiles, d_tile_coordinates, spmv_params); // Check for failure to launch if (CubDebug(error = cudaPeekAtLastError())) break; // Sync the stream if specified to flush runtime errors if (debug_synchronous && (CubDebug(error = SyncStream(stream)))) break; } // Log spmv_kernel configuration if (debug_synchronous) _CubLog("Invoking spmv_kernel<<<{%d,%d,%d}, %d, 0, %lld>>>(), %d items per thread, %d SM occupancy\n", spmv_grid_size.x, spmv_grid_size.y, spmv_grid_size.z, spmv_config.block_threads, (long long) stream, spmv_config.items_per_thread, spmv_sm_occupancy); // Invoke spmv_kernel spmv_kernel<<<spmv_grid_size, spmv_config.block_threads, 0, stream>>>( spmv_params, d_tile_coordinates, d_tile_carry_pairs, num_merge_tiles, tile_state, num_segment_fixup_tiles); // Check for failure to launch if (CubDebug(error = cudaPeekAtLastError())) break; // Sync the stream if specified to flush runtime errors if (debug_synchronous && (CubDebug(error = SyncStream(stream)))) break; // Run reduce-by-key fixup if necessary if (num_merge_tiles > 1) { // Log segment_fixup_kernel configuration if (debug_synchronous) _CubLog("Invoking segment_fixup_kernel<<<{%d,%d,%d}, %d, 0, %lld>>>(), %d items per thread, %d SM occupancy\n", segment_fixup_grid_size.x, segment_fixup_grid_size.y, segment_fixup_grid_size.z, segment_fixup_config.block_threads, (long long) stream, segment_fixup_config.items_per_thread, segment_fixup_sm_occupancy); // Invoke segment_fixup_kernel segment_fixup_kernel<<<segment_fixup_grid_size, segment_fixup_config.block_threads, 0, stream>>>( spmv_params.num_rows, d_tile_carry_pairs, spmv_params.d_vector_y, num_merge_tiles, num_segment_fixup_tiles, tile_state); // Check for failure to launch if (CubDebug(error = cudaPeekAtLastError())) break; // Sync the stream if specified to flush runtime errors if (debug_synchronous && (CubDebug(error = SyncStream(stream)))) break; } #if (CUB_PTX_ARCH == 0) // Free textures if (CubDebug(error = spmv_params.t_vector_x.UnbindTexture())) break; #endif } while (0); return error; #endif // CUB_RUNTIME_ENABLED } /** * Internal dispatch routine for computing a device-wide reduction */ CUB_RUNTIME_FUNCTION __forceinline__ static cudaError_t Dispatch( void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t& temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation SpmvParamsT& spmv_params, ///< SpMV input parameter bundle cudaStream_t stream = 0, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous = false) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. May cause significant slowdown. Default is \p false. { cudaError error = cudaSuccess; do { // Get PTX version int ptx_version; #if (CUB_PTX_ARCH == 0) if (CubDebug(error = PtxVersion(ptx_version))) break; #else ptx_version = CUB_PTX_ARCH; #endif // Get kernel kernel dispatch configurations KernelConfig spmv_config, segment_fixup_config; InitConfigs(ptx_version, spmv_config, segment_fixup_config); // Dispatch if (spmv_params.beta == SemiringT::times_null()) { if (spmv_params.alpha == SemiringT::times_ident()) { // Dispatch y = A*x if (CubDebug(error = Dispatch( d_temp_storage, temp_storage_bytes, spmv_params, stream, debug_synchronous, DeviceSpmvbyKernel<ValueT, OffsetT, SemiringT>, DeviceSpmv1ColKernel<PtxSpmvPolicyT, ValueT, OffsetT, SemiringT>, DeviceSpmvSearchKernel<PtxSpmvPolicyT, OffsetT, CoordinateT, SpmvParamsT, SemiringT>, DeviceSpmvKernel<PtxSpmvPolicyT, ScanTileStateT, ValueT, OffsetT, CoordinateT, SemiringT, false, false>, DeviceSegmentFixupKernel<PtxSegmentFixupPolicy, KeyValuePairT*, ValueT*, OffsetT, SemiringT, ScanTileStateT>, spmv_config, segment_fixup_config))) break; } else { // Dispatch y = alpha*A*x if (CubDebug(error = Dispatch( d_temp_storage, temp_storage_bytes, spmv_params, stream, debug_synchronous, DeviceSpmvbyKernel<ValueT, OffsetT, SemiringT>, DeviceSpmv1ColKernel<PtxSpmvPolicyT, ValueT, OffsetT, SemiringT>, DeviceSpmvSearchKernel<PtxSpmvPolicyT, OffsetT, CoordinateT, SpmvParamsT, SemiringT>, DeviceSpmvKernel<PtxSpmvPolicyT, ScanTileStateT, ValueT, OffsetT, CoordinateT, SemiringT, true, false>, DeviceSegmentFixupKernel<PtxSegmentFixupPolicy, KeyValuePairT*, ValueT*, OffsetT, SemiringT, ScanTileStateT>, spmv_config, segment_fixup_config))) break; } } else { if (spmv_params.alpha == SemiringT::times_ident()) { // Dispatch y = A*x + beta*y if (CubDebug(error = Dispatch( d_temp_storage, temp_storage_bytes, spmv_params, stream, debug_synchronous, DeviceSpmvbyKernel<ValueT, OffsetT, SemiringT>, DeviceSpmv1ColKernel<PtxSpmvPolicyT, ValueT, OffsetT, SemiringT>, DeviceSpmvSearchKernel<PtxSpmvPolicyT, OffsetT, CoordinateT, SpmvParamsT, SemiringT>, DeviceSpmvKernel<PtxSpmvPolicyT, ScanTileStateT, ValueT, OffsetT, CoordinateT, SemiringT, false, true>, DeviceSegmentFixupKernel<PtxSegmentFixupPolicy, KeyValuePairT*, ValueT*, OffsetT, SemiringT, ScanTileStateT>, spmv_config, segment_fixup_config))) break; } else { // Dispatch y = alpha*A*x + beta*y if (CubDebug(error = Dispatch( d_temp_storage, temp_storage_bytes, spmv_params, stream, debug_synchronous, DeviceSpmvbyKernel<ValueT, OffsetT, SemiringT>, DeviceSpmv1ColKernel<PtxSpmvPolicyT, ValueT, OffsetT, SemiringT>, DeviceSpmvSearchKernel<PtxSpmvPolicyT, OffsetT, CoordinateT, SpmvParamsT, SemiringT>, DeviceSpmvKernel<PtxSpmvPolicyT, ScanTileStateT, ValueT, OffsetT, CoordinateT, SemiringT, true, true>, DeviceSegmentFixupKernel<PtxSegmentFixupPolicy, KeyValuePairT*, ValueT*, OffsetT, SemiringT, ScanTileStateT>, spmv_config, segment_fixup_config))) break; } } } while (0); return error; } }; } // CUB namespace CUB_NS_POSTFIX // Optional outer namespace(s)
0
rapidsai_public_repos/nvgraph/external/cub_semiring/device
rapidsai_public_repos/nvgraph/external/cub_semiring/device/dispatch/dispatch_reduce_by_key.cuh
/****************************************************************************** * Copyright (c) 2011, Duane Merrill. All rights reserved. * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ /** * \file * cub::DeviceReduceByKey provides device-wide, parallel operations for reducing segments of values residing within device-accessible memory. */ #pragma once #include <stdio.h> #include <iterator> #include "dispatch_scan.cuh" #include "../../agent/agent_reduce_by_key.cuh" #include "../../thread/thread_operators.cuh" #include "../../grid/grid_queue.cuh" #include "../../util_device.cuh" #include "../../util_namespace.cuh" /// Optional outer namespace(s) CUB_NS_PREFIX /// CUB namespace namespace cub { /****************************************************************************** * Kernel entry points *****************************************************************************/ /** * Multi-block reduce-by-key sweep kernel entry point */ template < typename AgentReduceByKeyPolicyT, ///< Parameterized AgentReduceByKeyPolicyT tuning policy type typename KeysInputIteratorT, ///< Random-access input iterator type for keys typename UniqueOutputIteratorT, ///< Random-access output iterator type for keys typename ValuesInputIteratorT, ///< Random-access input iterator type for values typename AggregatesOutputIteratorT, ///< Random-access output iterator type for values typename NumRunsOutputIteratorT, ///< Output iterator type for recording number of segments encountered typename ScanTileStateT, ///< Tile status interface type typename EqualityOpT, ///< KeyT equality operator type typename ReductionOpT, ///< ValueT reduction operator type typename OffsetT> ///< Signed integer type for global offsets __launch_bounds__ (int(AgentReduceByKeyPolicyT::BLOCK_THREADS)) __global__ void DeviceReduceByKeyKernel( KeysInputIteratorT d_keys_in, ///< Pointer to the input sequence of keys UniqueOutputIteratorT d_unique_out, ///< Pointer to the output sequence of unique keys (one key per run) ValuesInputIteratorT d_values_in, ///< Pointer to the input sequence of corresponding values AggregatesOutputIteratorT d_aggregates_out, ///< Pointer to the output sequence of value aggregates (one aggregate per run) NumRunsOutputIteratorT d_num_runs_out, ///< Pointer to total number of runs encountered (i.e., the length of d_unique_out) ScanTileStateT tile_state, ///< Tile status interface int start_tile, ///< The starting tile for the current grid EqualityOpT equality_op, ///< KeyT equality operator ReductionOpT reduction_op, ///< ValueT reduction operator OffsetT num_items) ///< Total number of items to select from { // Thread block type for reducing tiles of value segments typedef AgentReduceByKey< AgentReduceByKeyPolicyT, KeysInputIteratorT, UniqueOutputIteratorT, ValuesInputIteratorT, AggregatesOutputIteratorT, NumRunsOutputIteratorT, EqualityOpT, ReductionOpT, OffsetT> AgentReduceByKeyT; // Shared memory for AgentReduceByKey __shared__ typename AgentReduceByKeyT::TempStorage temp_storage; // Process tiles AgentReduceByKeyT(temp_storage, d_keys_in, d_unique_out, d_values_in, d_aggregates_out, d_num_runs_out, equality_op, reduction_op).ConsumeRange( num_items, tile_state, start_tile); } /****************************************************************************** * Dispatch ******************************************************************************/ /** * Utility class for dispatching the appropriately-tuned kernels for DeviceReduceByKey */ template < typename KeysInputIteratorT, ///< Random-access input iterator type for keys typename UniqueOutputIteratorT, ///< Random-access output iterator type for keys typename ValuesInputIteratorT, ///< Random-access input iterator type for values typename AggregatesOutputIteratorT, ///< Random-access output iterator type for values typename NumRunsOutputIteratorT, ///< Output iterator type for recording number of segments encountered typename EqualityOpT, ///< KeyT equality operator type typename ReductionOpT, ///< ValueT reduction operator type typename OffsetT> ///< Signed integer type for global offsets struct DispatchReduceByKey { //------------------------------------------------------------------------- // Types and constants //------------------------------------------------------------------------- // The input keys type typedef typename std::iterator_traits<KeysInputIteratorT>::value_type KeyInputT; // The output keys type typedef typename If<(Equals<typename std::iterator_traits<UniqueOutputIteratorT>::value_type, void>::VALUE), // KeyOutputT = (if output iterator's value type is void) ? typename std::iterator_traits<KeysInputIteratorT>::value_type, // ... then the input iterator's value type, typename std::iterator_traits<UniqueOutputIteratorT>::value_type>::Type KeyOutputT; // ... else the output iterator's value type // The input values type typedef typename std::iterator_traits<ValuesInputIteratorT>::value_type ValueInputT; // The output values type typedef typename If<(Equals<typename std::iterator_traits<AggregatesOutputIteratorT>::value_type, void>::VALUE), // ValueOutputT = (if output iterator's value type is void) ? typename std::iterator_traits<ValuesInputIteratorT>::value_type, // ... then the input iterator's value type, typename std::iterator_traits<AggregatesOutputIteratorT>::value_type>::Type ValueOutputT; // ... else the output iterator's value type enum { INIT_KERNEL_THREADS = 128, MAX_INPUT_BYTES = CUB_MAX(sizeof(KeyOutputT), sizeof(ValueOutputT)), COMBINED_INPUT_BYTES = sizeof(KeyOutputT) + sizeof(ValueOutputT), }; // Tile status descriptor interface type typedef ReduceByKeyScanTileState<ValueOutputT, OffsetT> ScanTileStateT; //------------------------------------------------------------------------- // Tuning policies //------------------------------------------------------------------------- /// SM35 struct Policy350 { enum { NOMINAL_4B_ITEMS_PER_THREAD = 6, ITEMS_PER_THREAD = (MAX_INPUT_BYTES <= 8) ? 6 : CUB_MIN(NOMINAL_4B_ITEMS_PER_THREAD, CUB_MAX(1, ((NOMINAL_4B_ITEMS_PER_THREAD * 8) + COMBINED_INPUT_BYTES - 1) / COMBINED_INPUT_BYTES)), }; typedef AgentReduceByKeyPolicy< 128, ITEMS_PER_THREAD, BLOCK_LOAD_DIRECT, LOAD_LDG, BLOCK_SCAN_WARP_SCANS> ReduceByKeyPolicyT; }; /// SM30 struct Policy300 { enum { NOMINAL_4B_ITEMS_PER_THREAD = 6, ITEMS_PER_THREAD = CUB_MIN(NOMINAL_4B_ITEMS_PER_THREAD, CUB_MAX(1, ((NOMINAL_4B_ITEMS_PER_THREAD * 8) + COMBINED_INPUT_BYTES - 1) / COMBINED_INPUT_BYTES)), }; typedef AgentReduceByKeyPolicy< 128, ITEMS_PER_THREAD, BLOCK_LOAD_WARP_TRANSPOSE, LOAD_DEFAULT, BLOCK_SCAN_WARP_SCANS> ReduceByKeyPolicyT; }; /// SM20 struct Policy200 { enum { NOMINAL_4B_ITEMS_PER_THREAD = 11, ITEMS_PER_THREAD = CUB_MIN(NOMINAL_4B_ITEMS_PER_THREAD, CUB_MAX(1, ((NOMINAL_4B_ITEMS_PER_THREAD * 8) + COMBINED_INPUT_BYTES - 1) / COMBINED_INPUT_BYTES)), }; typedef AgentReduceByKeyPolicy< 128, ITEMS_PER_THREAD, BLOCK_LOAD_WARP_TRANSPOSE, LOAD_DEFAULT, BLOCK_SCAN_WARP_SCANS> ReduceByKeyPolicyT; }; /// SM13 struct Policy130 { enum { NOMINAL_4B_ITEMS_PER_THREAD = 7, ITEMS_PER_THREAD = CUB_MIN(NOMINAL_4B_ITEMS_PER_THREAD, CUB_MAX(1, ((NOMINAL_4B_ITEMS_PER_THREAD * 8) + COMBINED_INPUT_BYTES - 1) / COMBINED_INPUT_BYTES)), }; typedef AgentReduceByKeyPolicy< 128, ITEMS_PER_THREAD, BLOCK_LOAD_WARP_TRANSPOSE, LOAD_DEFAULT, BLOCK_SCAN_WARP_SCANS> ReduceByKeyPolicyT; }; /// SM11 struct Policy110 { enum { NOMINAL_4B_ITEMS_PER_THREAD = 5, ITEMS_PER_THREAD = CUB_MIN(NOMINAL_4B_ITEMS_PER_THREAD, CUB_MAX(1, (NOMINAL_4B_ITEMS_PER_THREAD * 8) / COMBINED_INPUT_BYTES)), }; typedef AgentReduceByKeyPolicy< 64, ITEMS_PER_THREAD, BLOCK_LOAD_WARP_TRANSPOSE, LOAD_DEFAULT, BLOCK_SCAN_RAKING> ReduceByKeyPolicyT; }; /****************************************************************************** * Tuning policies of current PTX compiler pass ******************************************************************************/ #if (CUB_PTX_ARCH >= 350) typedef Policy350 PtxPolicy; #elif (CUB_PTX_ARCH >= 300) typedef Policy300 PtxPolicy; #elif (CUB_PTX_ARCH >= 200) typedef Policy200 PtxPolicy; #elif (CUB_PTX_ARCH >= 130) typedef Policy130 PtxPolicy; #else typedef Policy110 PtxPolicy; #endif // "Opaque" policies (whose parameterizations aren't reflected in the type signature) struct PtxReduceByKeyPolicy : PtxPolicy::ReduceByKeyPolicyT {}; /****************************************************************************** * Utilities ******************************************************************************/ /** * Initialize kernel dispatch configurations with the policies corresponding to the PTX assembly we will use */ template <typename KernelConfig> CUB_RUNTIME_FUNCTION __forceinline__ static void InitConfigs( int ptx_version, KernelConfig &reduce_by_key_config) { #if (CUB_PTX_ARCH > 0) (void)ptx_version; // We're on the device, so initialize the kernel dispatch configurations with the current PTX policy reduce_by_key_config.template Init<PtxReduceByKeyPolicy>(); #else // We're on the host, so lookup and initialize the kernel dispatch configurations with the policies that match the device's PTX version if (ptx_version >= 350) { reduce_by_key_config.template Init<typename Policy350::ReduceByKeyPolicyT>(); } else if (ptx_version >= 300) { reduce_by_key_config.template Init<typename Policy300::ReduceByKeyPolicyT>(); } else if (ptx_version >= 200) { reduce_by_key_config.template Init<typename Policy200::ReduceByKeyPolicyT>(); } else if (ptx_version >= 130) { reduce_by_key_config.template Init<typename Policy130::ReduceByKeyPolicyT>(); } else { reduce_by_key_config.template Init<typename Policy110::ReduceByKeyPolicyT>(); } #endif } /** * Kernel kernel dispatch configuration. */ struct KernelConfig { int block_threads; int items_per_thread; int tile_items; template <typename PolicyT> CUB_RUNTIME_FUNCTION __forceinline__ void Init() { block_threads = PolicyT::BLOCK_THREADS; items_per_thread = PolicyT::ITEMS_PER_THREAD; tile_items = block_threads * items_per_thread; } }; //--------------------------------------------------------------------- // Dispatch entrypoints //--------------------------------------------------------------------- /** * Internal dispatch routine for computing a device-wide reduce-by-key using the * specified kernel functions. */ template < typename ScanInitKernelT, ///< Function type of cub::DeviceScanInitKernel typename ReduceByKeyKernelT> ///< Function type of cub::DeviceReduceByKeyKernelT CUB_RUNTIME_FUNCTION __forceinline__ static cudaError_t Dispatch( void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t& temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation KeysInputIteratorT d_keys_in, ///< [in] Pointer to the input sequence of keys UniqueOutputIteratorT d_unique_out, ///< [out] Pointer to the output sequence of unique keys (one key per run) ValuesInputIteratorT d_values_in, ///< [in] Pointer to the input sequence of corresponding values AggregatesOutputIteratorT d_aggregates_out, ///< [out] Pointer to the output sequence of value aggregates (one aggregate per run) NumRunsOutputIteratorT d_num_runs_out, ///< [out] Pointer to total number of runs encountered (i.e., the length of d_unique_out) EqualityOpT equality_op, ///< [in] KeyT equality operator ReductionOpT reduction_op, ///< [in] ValueT reduction operator OffsetT num_items, ///< [in] Total number of items to select from cudaStream_t stream, ///< [in] CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous, ///< [in] Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. int /*ptx_version*/, ///< [in] PTX version of dispatch kernels ScanInitKernelT init_kernel, ///< [in] Kernel function pointer to parameterization of cub::DeviceScanInitKernel ReduceByKeyKernelT reduce_by_key_kernel, ///< [in] Kernel function pointer to parameterization of cub::DeviceReduceByKeyKernel KernelConfig reduce_by_key_config) ///< [in] Dispatch parameters that match the policy that \p reduce_by_key_kernel was compiled for { #ifndef CUB_RUNTIME_ENABLED (void)d_temp_storage; (void)temp_storage_bytes; (void)d_keys_in; (void)d_unique_out; (void)d_values_in; (void)d_aggregates_out; (void)d_num_runs_out; (void)equality_op; (void)reduction_op; (void)num_items; (void)stream; (void)debug_synchronous; (void)init_kernel; (void)reduce_by_key_kernel; (void)reduce_by_key_config; // Kernel launch not supported from this device return CubDebug(cudaErrorNotSupported); #else cudaError error = cudaSuccess; do { // Get device ordinal int device_ordinal; if (CubDebug(error = cudaGetDevice(&device_ordinal))) break; // Get SM count int sm_count; if (CubDebug(error = cudaDeviceGetAttribute (&sm_count, cudaDevAttrMultiProcessorCount, device_ordinal))) break; // Number of input tiles int tile_size = reduce_by_key_config.block_threads * reduce_by_key_config.items_per_thread; int num_tiles = (num_items + tile_size - 1) / tile_size; // Specify temporary storage allocation requirements size_t allocation_sizes[1]; if (CubDebug(error = ScanTileStateT::AllocationSize(num_tiles, allocation_sizes[0]))) break; // bytes needed for tile status descriptors // Compute allocation pointers into the single storage blob (or compute the necessary size of the blob) void* allocations[1]; if (CubDebug(error = AliasTemporaries(d_temp_storage, temp_storage_bytes, allocations, allocation_sizes))) break; if (d_temp_storage == NULL) { // Return if the caller is simply requesting the size of the storage allocation break; } // Construct the tile status interface ScanTileStateT tile_state; if (CubDebug(error = tile_state.Init(num_tiles, allocations[0], allocation_sizes[0]))) break; // Log init_kernel configuration int init_grid_size = CUB_MAX(1, (num_tiles + INIT_KERNEL_THREADS - 1) / INIT_KERNEL_THREADS); if (debug_synchronous) _CubLog("Invoking init_kernel<<<%d, %d, 0, %lld>>>()\n", init_grid_size, INIT_KERNEL_THREADS, (long long) stream); // Invoke init_kernel to initialize tile descriptors init_kernel<<<init_grid_size, INIT_KERNEL_THREADS, 0, stream>>>( tile_state, num_tiles, d_num_runs_out); // Check for failure to launch if (CubDebug(error = cudaPeekAtLastError())) break; // Sync the stream if specified to flush runtime errors if (debug_synchronous && (CubDebug(error = SyncStream(stream)))) break; // Return if empty problem if (num_items == 0) break; // Get SM occupancy for reduce_by_key_kernel int reduce_by_key_sm_occupancy; if (CubDebug(error = MaxSmOccupancy( reduce_by_key_sm_occupancy, // out reduce_by_key_kernel, reduce_by_key_config.block_threads))) break; // Get max x-dimension of grid int max_dim_x; if (CubDebug(error = cudaDeviceGetAttribute(&max_dim_x, cudaDevAttrMaxGridDimX, device_ordinal))) break;; // Run grids in epochs (in case number of tiles exceeds max x-dimension int scan_grid_size = CUB_MIN(num_tiles, max_dim_x); for (int start_tile = 0; start_tile < num_tiles; start_tile += scan_grid_size) { // Log reduce_by_key_kernel configuration if (debug_synchronous) _CubLog("Invoking %d reduce_by_key_kernel<<<%d, %d, 0, %lld>>>(), %d items per thread, %d SM occupancy\n", start_tile, scan_grid_size, reduce_by_key_config.block_threads, (long long) stream, reduce_by_key_config.items_per_thread, reduce_by_key_sm_occupancy); // Invoke reduce_by_key_kernel reduce_by_key_kernel<<<scan_grid_size, reduce_by_key_config.block_threads, 0, stream>>>( d_keys_in, d_unique_out, d_values_in, d_aggregates_out, d_num_runs_out, tile_state, start_tile, equality_op, reduction_op, num_items); // Check for failure to launch if (CubDebug(error = cudaPeekAtLastError())) break; // Sync the stream if specified to flush runtime errors if (debug_synchronous && (CubDebug(error = SyncStream(stream)))) break; } } while (0); return error; #endif // CUB_RUNTIME_ENABLED } /** * Internal dispatch routine */ CUB_RUNTIME_FUNCTION __forceinline__ static cudaError_t Dispatch( void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t& temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation KeysInputIteratorT d_keys_in, ///< [in] Pointer to the input sequence of keys UniqueOutputIteratorT d_unique_out, ///< [out] Pointer to the output sequence of unique keys (one key per run) ValuesInputIteratorT d_values_in, ///< [in] Pointer to the input sequence of corresponding values AggregatesOutputIteratorT d_aggregates_out, ///< [out] Pointer to the output sequence of value aggregates (one aggregate per run) NumRunsOutputIteratorT d_num_runs_out, ///< [out] Pointer to total number of runs encountered (i.e., the length of d_unique_out) EqualityOpT equality_op, ///< [in] KeyT equality operator ReductionOpT reduction_op, ///< [in] ValueT reduction operator OffsetT num_items, ///< [in] Total number of items to select from cudaStream_t stream, ///< [in] CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous) ///< [in] Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. { cudaError error = cudaSuccess; do { // Get PTX version int ptx_version; #if (CUB_PTX_ARCH == 0) if (CubDebug(error = PtxVersion(ptx_version))) break; #else ptx_version = CUB_PTX_ARCH; #endif // Get kernel kernel dispatch configurations KernelConfig reduce_by_key_config; InitConfigs(ptx_version, reduce_by_key_config); // Dispatch if (CubDebug(error = Dispatch( d_temp_storage, temp_storage_bytes, d_keys_in, d_unique_out, d_values_in, d_aggregates_out, d_num_runs_out, equality_op, reduction_op, num_items, stream, debug_synchronous, ptx_version, DeviceCompactInitKernel<ScanTileStateT, NumRunsOutputIteratorT>, DeviceReduceByKeyKernel<PtxReduceByKeyPolicy, KeysInputIteratorT, UniqueOutputIteratorT, ValuesInputIteratorT, AggregatesOutputIteratorT, NumRunsOutputIteratorT, ScanTileStateT, EqualityOpT, ReductionOpT, OffsetT>, reduce_by_key_config))) break; } while (0); return error; } }; } // CUB namespace CUB_NS_POSTFIX // Optional outer namespace(s)
0
rapidsai_public_repos/nvgraph/external/cub_semiring/device
rapidsai_public_repos/nvgraph/external/cub_semiring/device/dispatch/dispatch_reduce.cuh
/****************************************************************************** * Copyright (c) 2011, Duane Merrill. All rights reserved. * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ /** * \file * cub::DeviceReduce provides device-wide, parallel operations for computing a reduction across a sequence of data items residing within device-accessible memory. */ #pragma once #include <stdio.h> #include <iterator> #include "../../agent/agent_reduce.cuh" #include "../../iterator/arg_index_input_iterator.cuh" #include "../../thread/thread_operators.cuh" #include "../../grid/grid_even_share.cuh" #include "../../iterator/arg_index_input_iterator.cuh" #include "../../util_debug.cuh" #include "../../util_device.cuh" #include "../../util_namespace.cuh" /// Optional outer namespace(s) CUB_NS_PREFIX /// CUB namespace namespace cub { /****************************************************************************** * Kernel entry points *****************************************************************************/ /** * Reduce region kernel entry point (multi-block). Computes privatized reductions, one per thread block. */ template < typename ChainedPolicyT, ///< Chained tuning policy typename InputIteratorT, ///< Random-access input iterator type for reading input items \iterator typename OutputIteratorT, ///< Output iterator type for recording the reduced aggregate \iterator typename OffsetT, ///< Signed integer type for global offsets typename ReductionOpT> ///< Binary reduction functor type having member <tt>T operator()(const T &a, const T &b)</tt> __launch_bounds__ (int(ChainedPolicyT::ActivePolicy::ReducePolicy::BLOCK_THREADS)) __global__ void DeviceReduceKernel( InputIteratorT d_in, ///< [in] Pointer to the input sequence of data items OutputIteratorT d_out, ///< [out] Pointer to the output aggregate OffsetT num_items, ///< [in] Total number of input data items GridEvenShare<OffsetT> even_share, ///< [in] Even-share descriptor for mapping an equal number of tiles onto each thread block ReductionOpT reduction_op) ///< [in] Binary reduction functor { // The output value type typedef typename If<(Equals<typename std::iterator_traits<OutputIteratorT>::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ? typename std::iterator_traits<InputIteratorT>::value_type, // ... then the input iterator's value type, typename std::iterator_traits<OutputIteratorT>::value_type>::Type OutputT; // ... else the output iterator's value type // Thread block type for reducing input tiles typedef AgentReduce< typename ChainedPolicyT::ActivePolicy::ReducePolicy, InputIteratorT, OutputIteratorT, OffsetT, ReductionOpT> AgentReduceT; // Shared memory storage __shared__ typename AgentReduceT::TempStorage temp_storage; // Consume input tiles OutputT block_aggregate = AgentReduceT(temp_storage, d_in, reduction_op).ConsumeTiles(even_share); // Output result if (threadIdx.x == 0) d_out[blockIdx.x] = block_aggregate; } /** * Reduce a single tile kernel entry point (single-block). Can be used to aggregate privatized thread block reductions from a previous multi-block reduction pass. */ template < typename ChainedPolicyT, ///< Chained tuning policy typename InputIteratorT, ///< Random-access input iterator type for reading input items \iterator typename OutputIteratorT, ///< Output iterator type for recording the reduced aggregate \iterator typename OffsetT, ///< Signed integer type for global offsets typename ReductionOpT, ///< Binary reduction functor type having member <tt>T operator()(const T &a, const T &b)</tt> typename OuputT> ///< Data element type that is convertible to the \p value type of \p OutputIteratorT __launch_bounds__ (int(ChainedPolicyT::ActivePolicy::SingleTilePolicy::BLOCK_THREADS), 1) __global__ void DeviceReduceSingleTileKernel( InputIteratorT d_in, ///< [in] Pointer to the input sequence of data items OutputIteratorT d_out, ///< [out] Pointer to the output aggregate OffsetT num_items, ///< [in] Total number of input data items ReductionOpT reduction_op, ///< [in] Binary reduction functor OuputT init) ///< [in] The initial value of the reduction { // Thread block type for reducing input tiles typedef AgentReduce< typename ChainedPolicyT::ActivePolicy::SingleTilePolicy, InputIteratorT, OutputIteratorT, OffsetT, ReductionOpT> AgentReduceT; // Shared memory storage __shared__ typename AgentReduceT::TempStorage temp_storage; // Check if empty problem if (num_items == 0) { if (threadIdx.x == 0) *d_out = init; return; } // Consume input tiles OuputT block_aggregate = AgentReduceT(temp_storage, d_in, reduction_op).ConsumeRange( OffsetT(0), num_items); // Output result if (threadIdx.x == 0) *d_out = reduction_op(init, block_aggregate); } /// Normalize input iterator to segment offset template <typename T, typename OffsetT, typename IteratorT> __device__ __forceinline__ void NormalizeReductionOutput( T &/*val*/, OffsetT /*base_offset*/, IteratorT /*itr*/) {} /// Normalize input iterator to segment offset (specialized for arg-index) template <typename KeyValuePairT, typename OffsetT, typename WrappedIteratorT, typename OutputValueT> __device__ __forceinline__ void NormalizeReductionOutput( KeyValuePairT &val, OffsetT base_offset, ArgIndexInputIterator<WrappedIteratorT, OffsetT, OutputValueT> /*itr*/) { val.key -= base_offset; } /** * Segmented reduction (one block per segment) */ template < typename ChainedPolicyT, ///< Chained tuning policy typename InputIteratorT, ///< Random-access input iterator type for reading input items \iterator typename OutputIteratorT, ///< Output iterator type for recording the reduced aggregate \iterator typename OffsetIteratorT, ///< Random-access input iterator type for reading segment offsets \iterator typename OffsetT, ///< Signed integer type for global offsets typename ReductionOpT, ///< Binary reduction functor type having member <tt>T operator()(const T &a, const T &b)</tt> typename OutputT> ///< Data element type that is convertible to the \p value type of \p OutputIteratorT __launch_bounds__ (int(ChainedPolicyT::ActivePolicy::ReducePolicy::BLOCK_THREADS)) __global__ void DeviceSegmentedReduceKernel( InputIteratorT d_in, ///< [in] Pointer to the input sequence of data items OutputIteratorT d_out, ///< [out] Pointer to the output aggregate OffsetIteratorT d_begin_offsets, ///< [in] Pointer to the sequence of beginning offsets of length \p num_segments, such that <tt>d_begin_offsets[i]</tt> is the first element of the <em>i</em><sup>th</sup> data segment in <tt>d_keys_*</tt> and <tt>d_values_*</tt> OffsetIteratorT d_end_offsets, ///< [in] Pointer to the sequence of ending offsets of length \p num_segments, such that <tt>d_end_offsets[i]-1</tt> is the last element of the <em>i</em><sup>th</sup> data segment in <tt>d_keys_*</tt> and <tt>d_values_*</tt>. If <tt>d_end_offsets[i]-1</tt> <= <tt>d_begin_offsets[i]</tt>, the <em>i</em><sup>th</sup> is considered empty. int /*num_segments*/, ///< [in] The number of segments that comprise the sorting data ReductionOpT reduction_op, ///< [in] Binary reduction functor OutputT init) ///< [in] The initial value of the reduction { // Thread block type for reducing input tiles typedef AgentReduce< typename ChainedPolicyT::ActivePolicy::ReducePolicy, InputIteratorT, OutputIteratorT, OffsetT, ReductionOpT> AgentReduceT; // Shared memory storage __shared__ typename AgentReduceT::TempStorage temp_storage; OffsetT segment_begin = d_begin_offsets[blockIdx.x]; OffsetT segment_end = d_end_offsets[blockIdx.x]; // Check if empty problem if (segment_begin == segment_end) { if (threadIdx.x == 0) d_out[blockIdx.x] = init; return; } // Consume input tiles OutputT block_aggregate = AgentReduceT(temp_storage, d_in, reduction_op).ConsumeRange( segment_begin, segment_end); // Normalize as needed NormalizeReductionOutput(block_aggregate, segment_begin, d_in); if (threadIdx.x == 0) d_out[blockIdx.x] = reduction_op(init, block_aggregate);; } /****************************************************************************** * Policy ******************************************************************************/ template < typename OuputT, ///< Data type typename OffsetT, ///< Signed integer type for global offsets typename ReductionOpT> ///< Binary reduction functor type having member <tt>T operator()(const T &a, const T &b)</tt> struct DeviceReducePolicy { //------------------------------------------------------------------------------ // Architecture-specific tuning policies //------------------------------------------------------------------------------ /// SM13 struct Policy130 : ChainedPolicy<130, Policy130, Policy130> { // ReducePolicy typedef AgentReducePolicy< CUB_NOMINAL_CONFIG(128, 8, OuputT), ///< Threads per block, items per thread 2, ///< Number of items per vectorized load BLOCK_REDUCE_RAKING, ///< Cooperative block-wide reduction algorithm to use LOAD_DEFAULT> ///< Cache load modifier ReducePolicy; // SingleTilePolicy typedef ReducePolicy SingleTilePolicy; // SegmentedReducePolicy typedef ReducePolicy SegmentedReducePolicy; }; /// SM20 struct Policy200 : ChainedPolicy<200, Policy200, Policy130> { // ReducePolicy (GTX 580: 178.9 GB/s @ 48M 4B items, 158.1 GB/s @ 192M 1B items) typedef AgentReducePolicy< CUB_NOMINAL_CONFIG(128, 8, OuputT), ///< Threads per block, items per thread 4, ///< Number of items per vectorized load BLOCK_REDUCE_RAKING, ///< Cooperative block-wide reduction algorithm to use LOAD_DEFAULT> ///< Cache load modifier ReducePolicy; // SingleTilePolicy typedef ReducePolicy SingleTilePolicy; // SegmentedReducePolicy typedef ReducePolicy SegmentedReducePolicy; }; /// SM30 struct Policy300 : ChainedPolicy<300, Policy300, Policy200> { // ReducePolicy (GTX670: 154.0 @ 48M 4B items) typedef AgentReducePolicy< CUB_NOMINAL_CONFIG(256, 20, OuputT), ///< Threads per block, items per thread 2, ///< Number of items per vectorized load BLOCK_REDUCE_WARP_REDUCTIONS, ///< Cooperative block-wide reduction algorithm to use LOAD_DEFAULT> ///< Cache load modifier ReducePolicy; // SingleTilePolicy typedef ReducePolicy SingleTilePolicy; // SegmentedReducePolicy typedef ReducePolicy SegmentedReducePolicy; }; /// SM35 struct Policy350 : ChainedPolicy<350, Policy350, Policy300> { // ReducePolicy (GTX Titan: 255.1 GB/s @ 48M 4B items; 228.7 GB/s @ 192M 1B items) typedef AgentReducePolicy< CUB_NOMINAL_CONFIG(256, 20, OuputT), ///< Threads per block, items per thread 4, ///< Number of items per vectorized load BLOCK_REDUCE_WARP_REDUCTIONS, ///< Cooperative block-wide reduction algorithm to use LOAD_LDG> ///< Cache load modifier ReducePolicy; // SingleTilePolicy typedef ReducePolicy SingleTilePolicy; // SegmentedReducePolicy typedef ReducePolicy SegmentedReducePolicy; }; /// SM60 struct Policy600 : ChainedPolicy<600, Policy600, Policy350> { // ReducePolicy (P100: 591 GB/s @ 64M 4B items; 583 GB/s @ 256M 1B items) typedef AgentReducePolicy< CUB_NOMINAL_CONFIG(256, 16, OuputT), ///< Threads per block, items per thread 4, ///< Number of items per vectorized load BLOCK_REDUCE_WARP_REDUCTIONS, ///< Cooperative block-wide reduction algorithm to use LOAD_LDG> ///< Cache load modifier ReducePolicy; // SingleTilePolicy typedef ReducePolicy SingleTilePolicy; // SegmentedReducePolicy typedef ReducePolicy SegmentedReducePolicy; }; /// MaxPolicy typedef Policy600 MaxPolicy; }; /****************************************************************************** * Single-problem dispatch ******************************************************************************/ /** * Utility class for dispatching the appropriately-tuned kernels for device-wide reduction */ template < typename InputIteratorT, ///< Random-access input iterator type for reading input items \iterator typename OutputIteratorT, ///< Output iterator type for recording the reduced aggregate \iterator typename OffsetT, ///< Signed integer type for global offsets typename ReductionOpT> ///< Binary reduction functor type having member <tt>T operator()(const T &a, const T &b)</tt> struct DispatchReduce : DeviceReducePolicy< typename If<(Equals<typename std::iterator_traits<OutputIteratorT>::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ? typename std::iterator_traits<InputIteratorT>::value_type, // ... then the input iterator's value type, typename std::iterator_traits<OutputIteratorT>::value_type>::Type, // ... else the output iterator's value type OffsetT, ReductionOpT> { //------------------------------------------------------------------------------ // Constants //------------------------------------------------------------------------------ // Data type of output iterator typedef typename If<(Equals<typename std::iterator_traits<OutputIteratorT>::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ? typename std::iterator_traits<InputIteratorT>::value_type, // ... then the input iterator's value type, typename std::iterator_traits<OutputIteratorT>::value_type>::Type OutputT; // ... else the output iterator's value type //------------------------------------------------------------------------------ // Problem state //------------------------------------------------------------------------------ void *d_temp_storage; ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t &temp_storage_bytes; ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation InputIteratorT d_in; ///< [in] Pointer to the input sequence of data items OutputIteratorT d_out; ///< [out] Pointer to the output aggregate OffsetT num_items; ///< [in] Total number of input items (i.e., length of \p d_in) ReductionOpT reduction_op; ///< [in] Binary reduction functor OutputT init; ///< [in] The initial value of the reduction cudaStream_t stream; ///< [in] CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous; ///< [in] Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. int ptx_version; ///< [in] PTX version //------------------------------------------------------------------------------ // Constructor //------------------------------------------------------------------------------ /// Constructor CUB_RUNTIME_FUNCTION __forceinline__ DispatchReduce( void* d_temp_storage, size_t &temp_storage_bytes, InputIteratorT d_in, OutputIteratorT d_out, OffsetT num_items, ReductionOpT reduction_op, OutputT init, cudaStream_t stream, bool debug_synchronous, int ptx_version) : d_temp_storage(d_temp_storage), temp_storage_bytes(temp_storage_bytes), d_in(d_in), d_out(d_out), num_items(num_items), reduction_op(reduction_op), init(init), stream(stream), debug_synchronous(debug_synchronous), ptx_version(ptx_version) {} //------------------------------------------------------------------------------ // Small-problem (single tile) invocation //------------------------------------------------------------------------------ /// Invoke a single block block to reduce in-core template < typename ActivePolicyT, ///< Umbrella policy active for the target device typename SingleTileKernelT> ///< Function type of cub::DeviceReduceSingleTileKernel CUB_RUNTIME_FUNCTION __forceinline__ cudaError_t InvokeSingleTile( SingleTileKernelT single_tile_kernel) ///< [in] Kernel function pointer to parameterization of cub::DeviceReduceSingleTileKernel { #ifndef CUB_RUNTIME_ENABLED (void)single_tile_kernel; // Kernel launch not supported from this device return CubDebug(cudaErrorNotSupported ); #else cudaError error = cudaSuccess; do { // Return if the caller is simply requesting the size of the storage allocation if (d_temp_storage == NULL) { temp_storage_bytes = 1; break; } // Log single_reduce_sweep_kernel configuration if (debug_synchronous) _CubLog("Invoking DeviceReduceSingleTileKernel<<<1, %d, 0, %lld>>>(), %d items per thread\n", ActivePolicyT::SingleTilePolicy::BLOCK_THREADS, (long long) stream, ActivePolicyT::SingleTilePolicy::ITEMS_PER_THREAD); // Invoke single_reduce_sweep_kernel single_tile_kernel<<<1, ActivePolicyT::SingleTilePolicy::BLOCK_THREADS, 0, stream>>>( d_in, d_out, num_items, reduction_op, init); // Check for failure to launch if (CubDebug(error = cudaPeekAtLastError())) break; // Sync the stream if specified to flush runtime errors if (debug_synchronous && (CubDebug(error = SyncStream(stream)))) break; } while (0); return error; #endif // CUB_RUNTIME_ENABLED } //------------------------------------------------------------------------------ // Normal problem size invocation (two-pass) //------------------------------------------------------------------------------ /// Invoke two-passes to reduce template < typename ActivePolicyT, ///< Umbrella policy active for the target device typename ReduceKernelT, ///< Function type of cub::DeviceReduceKernel typename SingleTileKernelT> ///< Function type of cub::DeviceReduceSingleTileKernel CUB_RUNTIME_FUNCTION __forceinline__ cudaError_t InvokePasses( ReduceKernelT reduce_kernel, ///< [in] Kernel function pointer to parameterization of cub::DeviceReduceKernel SingleTileKernelT single_tile_kernel) ///< [in] Kernel function pointer to parameterization of cub::DeviceReduceSingleTileKernel { #ifndef CUB_RUNTIME_ENABLED (void) reduce_kernel; (void) single_tile_kernel; // Kernel launch not supported from this device return CubDebug(cudaErrorNotSupported ); #else cudaError error = cudaSuccess; do { // Get device ordinal int device_ordinal; if (CubDebug(error = cudaGetDevice(&device_ordinal))) break; // Get SM count int sm_count; if (CubDebug(error = cudaDeviceGetAttribute (&sm_count, cudaDevAttrMultiProcessorCount, device_ordinal))) break; // Init regular kernel configuration KernelConfig reduce_config; if (CubDebug(error = reduce_config.Init<typename ActivePolicyT::ReducePolicy>(reduce_kernel))) break; int reduce_device_occupancy = reduce_config.sm_occupancy * sm_count; // Even-share work distribution int max_blocks = reduce_device_occupancy * CUB_SUBSCRIPTION_FACTOR(ptx_version); GridEvenShare<OffsetT> even_share; even_share.DispatchInit(num_items, max_blocks, reduce_config.tile_size); // Temporary storage allocation requirements void* allocations[1]; size_t allocation_sizes[1] = { max_blocks * sizeof(OutputT) // bytes needed for privatized block reductions }; // Alias the temporary allocations from the single storage blob (or compute the necessary size of the blob) if (CubDebug(error = AliasTemporaries(d_temp_storage, temp_storage_bytes, allocations, allocation_sizes))) break; if (d_temp_storage == NULL) { // Return if the caller is simply requesting the size of the storage allocation return cudaSuccess; } // Alias the allocation for the privatized per-block reductions OutputT *d_block_reductions = (OutputT*) allocations[0]; // Get grid size for device_reduce_sweep_kernel int reduce_grid_size = even_share.grid_size; // Log device_reduce_sweep_kernel configuration if (debug_synchronous) _CubLog("Invoking DeviceReduceKernel<<<%d, %d, 0, %lld>>>(), %d items per thread, %d SM occupancy\n", reduce_grid_size, ActivePolicyT::ReducePolicy::BLOCK_THREADS, (long long) stream, ActivePolicyT::ReducePolicy::ITEMS_PER_THREAD, reduce_config.sm_occupancy); // Invoke DeviceReduceKernel reduce_kernel<<<reduce_grid_size, ActivePolicyT::ReducePolicy::BLOCK_THREADS, 0, stream>>>( d_in, d_block_reductions, num_items, even_share, reduction_op); // Check for failure to launch if (CubDebug(error = cudaPeekAtLastError())) break; // Sync the stream if specified to flush runtime errors if (debug_synchronous && (CubDebug(error = SyncStream(stream)))) break; // Log single_reduce_sweep_kernel configuration if (debug_synchronous) _CubLog("Invoking DeviceReduceSingleTileKernel<<<1, %d, 0, %lld>>>(), %d items per thread\n", ActivePolicyT::SingleTilePolicy::BLOCK_THREADS, (long long) stream, ActivePolicyT::SingleTilePolicy::ITEMS_PER_THREAD); // Invoke DeviceReduceSingleTileKernel single_tile_kernel<<<1, ActivePolicyT::SingleTilePolicy::BLOCK_THREADS, 0, stream>>>( d_block_reductions, d_out, reduce_grid_size, reduction_op, init); // Check for failure to launch if (CubDebug(error = cudaPeekAtLastError())) break; // Sync the stream if specified to flush runtime errors if (debug_synchronous && (CubDebug(error = SyncStream(stream)))) break; } while (0); return error; #endif // CUB_RUNTIME_ENABLED } //------------------------------------------------------------------------------ // Chained policy invocation //------------------------------------------------------------------------------ /// Invocation template <typename ActivePolicyT> CUB_RUNTIME_FUNCTION __forceinline__ cudaError_t Invoke() { typedef typename ActivePolicyT::SingleTilePolicy SingleTilePolicyT; typedef typename DispatchReduce::MaxPolicy MaxPolicyT; // Force kernel code-generation in all compiler passes if (num_items <= (SingleTilePolicyT::BLOCK_THREADS * SingleTilePolicyT::ITEMS_PER_THREAD)) { // Small, single tile size return InvokeSingleTile<ActivePolicyT>( DeviceReduceSingleTileKernel<MaxPolicyT, InputIteratorT, OutputIteratorT, OffsetT, ReductionOpT, OutputT>); } else { // Regular size return InvokePasses<ActivePolicyT>( DeviceReduceKernel<typename DispatchReduce::MaxPolicy, InputIteratorT, OutputT*, OffsetT, ReductionOpT>, DeviceReduceSingleTileKernel<MaxPolicyT, OutputT*, OutputIteratorT, OffsetT, ReductionOpT, OutputT>); } } //------------------------------------------------------------------------------ // Dispatch entrypoints //------------------------------------------------------------------------------ /** * Internal dispatch routine for computing a device-wide reduction */ CUB_RUNTIME_FUNCTION __forceinline__ static cudaError_t Dispatch( void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation InputIteratorT d_in, ///< [in] Pointer to the input sequence of data items OutputIteratorT d_out, ///< [out] Pointer to the output aggregate OffsetT num_items, ///< [in] Total number of input items (i.e., length of \p d_in) ReductionOpT reduction_op, ///< [in] Binary reduction functor OutputT init, ///< [in] The initial value of the reduction cudaStream_t stream, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. { typedef typename DispatchReduce::MaxPolicy MaxPolicyT; cudaError error = cudaSuccess; do { // Get PTX version int ptx_version; if (CubDebug(error = PtxVersion(ptx_version))) break; // Create dispatch functor DispatchReduce dispatch( d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, reduction_op, init, stream, debug_synchronous, ptx_version); // Dispatch to chained policy if (CubDebug(error = MaxPolicyT::Invoke(ptx_version, dispatch))) break; } while (0); return error; } }; /****************************************************************************** * Segmented dispatch ******************************************************************************/ /** * Utility class for dispatching the appropriately-tuned kernels for device-wide reduction */ template < typename InputIteratorT, ///< Random-access input iterator type for reading input items \iterator typename OutputIteratorT, ///< Output iterator type for recording the reduced aggregate \iterator typename OffsetIteratorT, ///< Random-access input iterator type for reading segment offsets \iterator typename OffsetT, ///< Signed integer type for global offsets typename ReductionOpT> ///< Binary reduction functor type having member <tt>T operator()(const T &a, const T &b)</tt> struct DispatchSegmentedReduce : DeviceReducePolicy< typename std::iterator_traits<InputIteratorT>::value_type, OffsetT, ReductionOpT> { //------------------------------------------------------------------------------ // Constants //------------------------------------------------------------------------------ /// The output value type typedef typename If<(Equals<typename std::iterator_traits<OutputIteratorT>::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ? typename std::iterator_traits<InputIteratorT>::value_type, // ... then the input iterator's value type, typename std::iterator_traits<OutputIteratorT>::value_type>::Type OutputT; // ... else the output iterator's value type //------------------------------------------------------------------------------ // Problem state //------------------------------------------------------------------------------ void *d_temp_storage; ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t &temp_storage_bytes; ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation InputIteratorT d_in; ///< [in] Pointer to the input sequence of data items OutputIteratorT d_out; ///< [out] Pointer to the output aggregate OffsetT num_segments; ///< [in] The number of segments that comprise the sorting data OffsetIteratorT d_begin_offsets; ///< [in] Pointer to the sequence of beginning offsets of length \p num_segments, such that <tt>d_begin_offsets[i]</tt> is the first element of the <em>i</em><sup>th</sup> data segment in <tt>d_keys_*</tt> and <tt>d_values_*</tt> OffsetIteratorT d_end_offsets; ///< [in] Pointer to the sequence of ending offsets of length \p num_segments, such that <tt>d_end_offsets[i]-1</tt> is the last element of the <em>i</em><sup>th</sup> data segment in <tt>d_keys_*</tt> and <tt>d_values_*</tt>. If <tt>d_end_offsets[i]-1</tt> <= <tt>d_begin_offsets[i]</tt>, the <em>i</em><sup>th</sup> is considered empty. ReductionOpT reduction_op; ///< [in] Binary reduction functor OutputT init; ///< [in] The initial value of the reduction cudaStream_t stream; ///< [in] CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous; ///< [in] Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. int ptx_version; ///< [in] PTX version //------------------------------------------------------------------------------ // Constructor //------------------------------------------------------------------------------ /// Constructor CUB_RUNTIME_FUNCTION __forceinline__ DispatchSegmentedReduce( void* d_temp_storage, size_t &temp_storage_bytes, InputIteratorT d_in, OutputIteratorT d_out, OffsetT num_segments, OffsetIteratorT d_begin_offsets, OffsetIteratorT d_end_offsets, ReductionOpT reduction_op, OutputT init, cudaStream_t stream, bool debug_synchronous, int ptx_version) : d_temp_storage(d_temp_storage), temp_storage_bytes(temp_storage_bytes), d_in(d_in), d_out(d_out), num_segments(num_segments), d_begin_offsets(d_begin_offsets), d_end_offsets(d_end_offsets), reduction_op(reduction_op), init(init), stream(stream), debug_synchronous(debug_synchronous), ptx_version(ptx_version) {} //------------------------------------------------------------------------------ // Chained policy invocation //------------------------------------------------------------------------------ /// Invocation template < typename ActivePolicyT, ///< Umbrella policy active for the target device typename DeviceSegmentedReduceKernelT> ///< Function type of cub::DeviceSegmentedReduceKernel CUB_RUNTIME_FUNCTION __forceinline__ cudaError_t InvokePasses( DeviceSegmentedReduceKernelT segmented_reduce_kernel) ///< [in] Kernel function pointer to parameterization of cub::DeviceSegmentedReduceKernel { #ifndef CUB_RUNTIME_ENABLED (void)segmented_reduce_kernel; // Kernel launch not supported from this device return CubDebug(cudaErrorNotSupported ); #else cudaError error = cudaSuccess; do { // Return if the caller is simply requesting the size of the storage allocation if (d_temp_storage == NULL) { temp_storage_bytes = 1; return cudaSuccess; } // Init kernel configuration KernelConfig segmented_reduce_config; if (CubDebug(error = segmented_reduce_config.Init<typename ActivePolicyT::SegmentedReducePolicy>(segmented_reduce_kernel))) break; // Log device_reduce_sweep_kernel configuration if (debug_synchronous) _CubLog("Invoking SegmentedDeviceReduceKernel<<<%d, %d, 0, %lld>>>(), %d items per thread, %d SM occupancy\n", num_segments, ActivePolicyT::SegmentedReducePolicy::BLOCK_THREADS, (long long) stream, ActivePolicyT::SegmentedReducePolicy::ITEMS_PER_THREAD, segmented_reduce_config.sm_occupancy); // Invoke DeviceReduceKernel segmented_reduce_kernel<<<num_segments, ActivePolicyT::SegmentedReducePolicy::BLOCK_THREADS, 0, stream>>>( d_in, d_out, d_begin_offsets, d_end_offsets, num_segments, reduction_op, init); // Check for failure to launch if (CubDebug(error = cudaPeekAtLastError())) break; // Sync the stream if specified to flush runtime errors if (debug_synchronous && (CubDebug(error = SyncStream(stream)))) break; } while (0); return error; #endif // CUB_RUNTIME_ENABLED } /// Invocation template <typename ActivePolicyT> CUB_RUNTIME_FUNCTION __forceinline__ cudaError_t Invoke() { typedef typename DispatchSegmentedReduce::MaxPolicy MaxPolicyT; // Force kernel code-generation in all compiler passes return InvokePasses<ActivePolicyT>( DeviceSegmentedReduceKernel<MaxPolicyT, InputIteratorT, OutputIteratorT, OffsetIteratorT, OffsetT, ReductionOpT, OutputT>); } //------------------------------------------------------------------------------ // Dispatch entrypoints //------------------------------------------------------------------------------ /** * Internal dispatch routine for computing a device-wide reduction */ CUB_RUNTIME_FUNCTION __forceinline__ static cudaError_t Dispatch( void *d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation InputIteratorT d_in, ///< [in] Pointer to the input sequence of data items OutputIteratorT d_out, ///< [out] Pointer to the output aggregate int num_segments, ///< [in] The number of segments that comprise the sorting data OffsetIteratorT d_begin_offsets, ///< [in] Pointer to the sequence of beginning offsets of length \p num_segments, such that <tt>d_begin_offsets[i]</tt> is the first element of the <em>i</em><sup>th</sup> data segment in <tt>d_keys_*</tt> and <tt>d_values_*</tt> OffsetIteratorT d_end_offsets, ///< [in] Pointer to the sequence of ending offsets of length \p num_segments, such that <tt>d_end_offsets[i]-1</tt> is the last element of the <em>i</em><sup>th</sup> data segment in <tt>d_keys_*</tt> and <tt>d_values_*</tt>. If <tt>d_end_offsets[i]-1</tt> <= <tt>d_begin_offsets[i]</tt>, the <em>i</em><sup>th</sup> is considered empty. ReductionOpT reduction_op, ///< [in] Binary reduction functor OutputT init, ///< [in] The initial value of the reduction cudaStream_t stream, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. { typedef typename DispatchSegmentedReduce::MaxPolicy MaxPolicyT; if (num_segments <= 0) return cudaSuccess; cudaError error = cudaSuccess; do { // Get PTX version int ptx_version; if (CubDebug(error = PtxVersion(ptx_version))) break; // Create dispatch functor DispatchSegmentedReduce dispatch( d_temp_storage, temp_storage_bytes, d_in, d_out, num_segments, d_begin_offsets, d_end_offsets, reduction_op, init, stream, debug_synchronous, ptx_version); // Dispatch to chained policy if (CubDebug(error = MaxPolicyT::Invoke(ptx_version, dispatch))) break; } while (0); return error; } }; } // CUB namespace CUB_NS_POSTFIX // Optional outer namespace(s)
0
rapidsai_public_repos/nvgraph/external/cub_semiring/device
rapidsai_public_repos/nvgraph/external/cub_semiring/device/dispatch/dispatch_rle.cuh
/****************************************************************************** * Copyright (c) 2011, Duane Merrill. All rights reserved. * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ /** * \file * cub::DeviceRle provides device-wide, parallel operations for run-length-encoding sequences of data items residing within device-accessible memory. */ #pragma once #include <stdio.h> #include <iterator> #include "dispatch_scan.cuh" #include "../../agent/agent_rle.cuh" #include "../../thread/thread_operators.cuh" #include "../../grid/grid_queue.cuh" #include "../../util_device.cuh" #include "../../util_namespace.cuh" /// Optional outer namespace(s) CUB_NS_PREFIX /// CUB namespace namespace cub { /****************************************************************************** * Kernel entry points *****************************************************************************/ /** * Select kernel entry point (multi-block) * * Performs functor-based selection if SelectOp functor type != NullType * Otherwise performs flag-based selection if FlagIterator's value type != NullType * Otherwise performs discontinuity selection (keep unique) */ template < typename AgentRlePolicyT, ///< Parameterized AgentRlePolicyT tuning policy type typename InputIteratorT, ///< Random-access input iterator type for reading input items \iterator typename OffsetsOutputIteratorT, ///< Random-access output iterator type for writing run-offset values \iterator typename LengthsOutputIteratorT, ///< Random-access output iterator type for writing run-length values \iterator typename NumRunsOutputIteratorT, ///< Output iterator type for recording the number of runs encountered \iterator typename ScanTileStateT, ///< Tile status interface type typename EqualityOpT, ///< T equality operator type typename OffsetT> ///< Signed integer type for global offsets __launch_bounds__ (int(AgentRlePolicyT::BLOCK_THREADS)) __global__ void DeviceRleSweepKernel( InputIteratorT d_in, ///< [in] Pointer to input sequence of data items OffsetsOutputIteratorT d_offsets_out, ///< [out] Pointer to output sequence of run-offsets LengthsOutputIteratorT d_lengths_out, ///< [out] Pointer to output sequence of run-lengths NumRunsOutputIteratorT d_num_runs_out, ///< [out] Pointer to total number of runs (i.e., length of \p d_offsets_out) ScanTileStateT tile_status, ///< [in] Tile status interface EqualityOpT equality_op, ///< [in] Equality operator for input items OffsetT num_items, ///< [in] Total number of input items (i.e., length of \p d_in) int num_tiles) ///< [in] Total number of tiles for the entire problem { // Thread block type for selecting data from input tiles typedef AgentRle< AgentRlePolicyT, InputIteratorT, OffsetsOutputIteratorT, LengthsOutputIteratorT, EqualityOpT, OffsetT> AgentRleT; // Shared memory for AgentRle __shared__ typename AgentRleT::TempStorage temp_storage; // Process tiles AgentRleT(temp_storage, d_in, d_offsets_out, d_lengths_out, equality_op, num_items).ConsumeRange( num_tiles, tile_status, d_num_runs_out); } /****************************************************************************** * Dispatch ******************************************************************************/ /** * Utility class for dispatching the appropriately-tuned kernels for DeviceRle */ template < typename InputIteratorT, ///< Random-access input iterator type for reading input items \iterator typename OffsetsOutputIteratorT, ///< Random-access output iterator type for writing run-offset values \iterator typename LengthsOutputIteratorT, ///< Random-access output iterator type for writing run-length values \iterator typename NumRunsOutputIteratorT, ///< Output iterator type for recording the number of runs encountered \iterator typename EqualityOpT, ///< T equality operator type typename OffsetT> ///< Signed integer type for global offsets struct DeviceRleDispatch { /****************************************************************************** * Types and constants ******************************************************************************/ // The input value type typedef typename std::iterator_traits<InputIteratorT>::value_type T; // The lengths output value type typedef typename If<(Equals<typename std::iterator_traits<LengthsOutputIteratorT>::value_type, void>::VALUE), // LengthT = (if output iterator's value type is void) ? OffsetT, // ... then the OffsetT type, typename std::iterator_traits<LengthsOutputIteratorT>::value_type>::Type LengthT; // ... else the output iterator's value type enum { INIT_KERNEL_THREADS = 128, }; // Tile status descriptor interface type typedef ReduceByKeyScanTileState<LengthT, OffsetT> ScanTileStateT; /****************************************************************************** * Tuning policies ******************************************************************************/ /// SM35 struct Policy350 { enum { NOMINAL_4B_ITEMS_PER_THREAD = 15, ITEMS_PER_THREAD = CUB_MIN(NOMINAL_4B_ITEMS_PER_THREAD, CUB_MAX(1, (NOMINAL_4B_ITEMS_PER_THREAD * 4 / sizeof(T)))), }; typedef AgentRlePolicy< 96, ITEMS_PER_THREAD, BLOCK_LOAD_DIRECT, LOAD_LDG, true, BLOCK_SCAN_WARP_SCANS> RleSweepPolicy; }; /// SM30 struct Policy300 { enum { NOMINAL_4B_ITEMS_PER_THREAD = 5, ITEMS_PER_THREAD = CUB_MIN(NOMINAL_4B_ITEMS_PER_THREAD, CUB_MAX(1, (NOMINAL_4B_ITEMS_PER_THREAD * 4 / sizeof(T)))), }; typedef AgentRlePolicy< 256, ITEMS_PER_THREAD, BLOCK_LOAD_WARP_TRANSPOSE, LOAD_DEFAULT, true, BLOCK_SCAN_RAKING_MEMOIZE> RleSweepPolicy; }; /// SM20 struct Policy200 { enum { NOMINAL_4B_ITEMS_PER_THREAD = 15, ITEMS_PER_THREAD = CUB_MIN(NOMINAL_4B_ITEMS_PER_THREAD, CUB_MAX(1, (NOMINAL_4B_ITEMS_PER_THREAD * 4 / sizeof(T)))), }; typedef AgentRlePolicy< 128, ITEMS_PER_THREAD, BLOCK_LOAD_WARP_TRANSPOSE, LOAD_DEFAULT, false, BLOCK_SCAN_WARP_SCANS> RleSweepPolicy; }; /// SM13 struct Policy130 { enum { NOMINAL_4B_ITEMS_PER_THREAD = 9, ITEMS_PER_THREAD = CUB_MIN(NOMINAL_4B_ITEMS_PER_THREAD, CUB_MAX(1, (NOMINAL_4B_ITEMS_PER_THREAD * 4 / sizeof(T)))), }; typedef AgentRlePolicy< 64, ITEMS_PER_THREAD, BLOCK_LOAD_WARP_TRANSPOSE, LOAD_DEFAULT, true, BLOCK_SCAN_RAKING_MEMOIZE> RleSweepPolicy; }; /// SM10 struct Policy100 { enum { NOMINAL_4B_ITEMS_PER_THREAD = 9, ITEMS_PER_THREAD = CUB_MIN(NOMINAL_4B_ITEMS_PER_THREAD, CUB_MAX(1, (NOMINAL_4B_ITEMS_PER_THREAD * 4 / sizeof(T)))), }; typedef AgentRlePolicy< 256, ITEMS_PER_THREAD, BLOCK_LOAD_WARP_TRANSPOSE, LOAD_DEFAULT, true, BLOCK_SCAN_RAKING_MEMOIZE> RleSweepPolicy; }; /****************************************************************************** * Tuning policies of current PTX compiler pass ******************************************************************************/ #if (CUB_PTX_ARCH >= 350) typedef Policy350 PtxPolicy; #elif (CUB_PTX_ARCH >= 300) typedef Policy300 PtxPolicy; #elif (CUB_PTX_ARCH >= 200) typedef Policy200 PtxPolicy; #elif (CUB_PTX_ARCH >= 130) typedef Policy130 PtxPolicy; #else typedef Policy100 PtxPolicy; #endif // "Opaque" policies (whose parameterizations aren't reflected in the type signature) struct PtxRleSweepPolicy : PtxPolicy::RleSweepPolicy {}; /****************************************************************************** * Utilities ******************************************************************************/ /** * Initialize kernel dispatch configurations with the policies corresponding to the PTX assembly we will use */ template <typename KernelConfig> CUB_RUNTIME_FUNCTION __forceinline__ static void InitConfigs( int ptx_version, KernelConfig& device_rle_config) { #if (CUB_PTX_ARCH > 0) // We're on the device, so initialize the kernel dispatch configurations with the current PTX policy device_rle_config.template Init<PtxRleSweepPolicy>(); #else // We're on the host, so lookup and initialize the kernel dispatch configurations with the policies that match the device's PTX version if (ptx_version >= 350) { device_rle_config.template Init<typename Policy350::RleSweepPolicy>(); } else if (ptx_version >= 300) { device_rle_config.template Init<typename Policy300::RleSweepPolicy>(); } else if (ptx_version >= 200) { device_rle_config.template Init<typename Policy200::RleSweepPolicy>(); } else if (ptx_version >= 130) { device_rle_config.template Init<typename Policy130::RleSweepPolicy>(); } else { device_rle_config.template Init<typename Policy100::RleSweepPolicy>(); } #endif } /** * Kernel kernel dispatch configuration. Mirrors the constants within AgentRlePolicyT. */ struct KernelConfig { int block_threads; int items_per_thread; BlockLoadAlgorithm load_policy; bool store_warp_time_slicing; BlockScanAlgorithm scan_algorithm; template <typename AgentRlePolicyT> CUB_RUNTIME_FUNCTION __forceinline__ void Init() { block_threads = AgentRlePolicyT::BLOCK_THREADS; items_per_thread = AgentRlePolicyT::ITEMS_PER_THREAD; load_policy = AgentRlePolicyT::LOAD_ALGORITHM; store_warp_time_slicing = AgentRlePolicyT::STORE_WARP_TIME_SLICING; scan_algorithm = AgentRlePolicyT::SCAN_ALGORITHM; } CUB_RUNTIME_FUNCTION __forceinline__ void Print() { printf("%d, %d, %d, %d, %d", block_threads, items_per_thread, load_policy, store_warp_time_slicing, scan_algorithm); } }; /****************************************************************************** * Dispatch entrypoints ******************************************************************************/ /** * Internal dispatch routine for computing a device-wide run-length-encode using the * specified kernel functions. */ template < typename DeviceScanInitKernelPtr, ///< Function type of cub::DeviceScanInitKernel typename DeviceRleSweepKernelPtr> ///< Function type of cub::DeviceRleSweepKernelPtr CUB_RUNTIME_FUNCTION __forceinline__ static cudaError_t Dispatch( void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t& temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation InputIteratorT d_in, ///< [in] Pointer to the input sequence of data items OffsetsOutputIteratorT d_offsets_out, ///< [out] Pointer to the output sequence of run-offsets LengthsOutputIteratorT d_lengths_out, ///< [out] Pointer to the output sequence of run-lengths NumRunsOutputIteratorT d_num_runs_out, ///< [out] Pointer to the total number of runs encountered (i.e., length of \p d_offsets_out) EqualityOpT equality_op, ///< [in] Equality operator for input items OffsetT num_items, ///< [in] Total number of input items (i.e., length of \p d_in) cudaStream_t stream, ///< [in] CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous, ///< [in] Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. int ptx_version, ///< [in] PTX version of dispatch kernels DeviceScanInitKernelPtr device_scan_init_kernel, ///< [in] Kernel function pointer to parameterization of cub::DeviceScanInitKernel DeviceRleSweepKernelPtr device_rle_sweep_kernel, ///< [in] Kernel function pointer to parameterization of cub::DeviceRleSweepKernel KernelConfig device_rle_config) ///< [in] Dispatch parameters that match the policy that \p device_rle_sweep_kernel was compiled for { #ifndef CUB_RUNTIME_ENABLED // Kernel launch not supported from this device return CubDebug(cudaErrorNotSupported); #else cudaError error = cudaSuccess; do { // Get device ordinal int device_ordinal; if (CubDebug(error = cudaGetDevice(&device_ordinal))) break; // Get SM count int sm_count; if (CubDebug(error = cudaDeviceGetAttribute (&sm_count, cudaDevAttrMultiProcessorCount, device_ordinal))) break; // Number of input tiles int tile_size = device_rle_config.block_threads * device_rle_config.items_per_thread; int num_tiles = (num_items + tile_size - 1) / tile_size; // Specify temporary storage allocation requirements size_t allocation_sizes[1]; if (CubDebug(error = ScanTileStateT::AllocationSize(num_tiles, allocation_sizes[0]))) break; // bytes needed for tile status descriptors // Compute allocation pointers into the single storage blob (or compute the necessary size of the blob) void* allocations[1]; if (CubDebug(error = AliasTemporaries(d_temp_storage, temp_storage_bytes, allocations, allocation_sizes))) break; if (d_temp_storage == NULL) { // Return if the caller is simply requesting the size of the storage allocation break; } // Construct the tile status interface ScanTileStateT tile_status; if (CubDebug(error = tile_status.Init(num_tiles, allocations[0], allocation_sizes[0]))) break; // Log device_scan_init_kernel configuration int init_grid_size = CUB_MAX(1, (num_tiles + INIT_KERNEL_THREADS - 1) / INIT_KERNEL_THREADS); if (debug_synchronous) _CubLog("Invoking device_scan_init_kernel<<<%d, %d, 0, %lld>>>()\n", init_grid_size, INIT_KERNEL_THREADS, (long long) stream); // Invoke device_scan_init_kernel to initialize tile descriptors and queue descriptors device_scan_init_kernel<<<init_grid_size, INIT_KERNEL_THREADS, 0, stream>>>( tile_status, num_tiles, d_num_runs_out); // Check for failure to launch if (CubDebug(error = cudaPeekAtLastError())) break; // Sync the stream if specified to flush runtime errors if (debug_synchronous && (CubDebug(error = SyncStream(stream)))) break; // Return if empty problem if (num_items == 0) break; // Get SM occupancy for device_rle_sweep_kernel int device_rle_kernel_sm_occupancy; if (CubDebug(error = MaxSmOccupancy( device_rle_kernel_sm_occupancy, // out device_rle_sweep_kernel, device_rle_config.block_threads))) break; // Get max x-dimension of grid int max_dim_x; if (CubDebug(error = cudaDeviceGetAttribute(&max_dim_x, cudaDevAttrMaxGridDimX, device_ordinal))) break;; // Get grid size for scanning tiles dim3 scan_grid_size; scan_grid_size.z = 1; scan_grid_size.y = ((unsigned int) num_tiles + max_dim_x - 1) / max_dim_x; scan_grid_size.x = CUB_MIN(num_tiles, max_dim_x); // Log device_rle_sweep_kernel configuration if (debug_synchronous) _CubLog("Invoking device_rle_sweep_kernel<<<{%d,%d,%d}, %d, 0, %lld>>>(), %d items per thread, %d SM occupancy\n", scan_grid_size.x, scan_grid_size.y, scan_grid_size.z, device_rle_config.block_threads, (long long) stream, device_rle_config.items_per_thread, device_rle_kernel_sm_occupancy); // Invoke device_rle_sweep_kernel device_rle_sweep_kernel<<<scan_grid_size, device_rle_config.block_threads, 0, stream>>>( d_in, d_offsets_out, d_lengths_out, d_num_runs_out, tile_status, equality_op, num_items, num_tiles); // Check for failure to launch if (CubDebug(error = cudaPeekAtLastError())) break; // Sync the stream if specified to flush runtime errors if (debug_synchronous && (CubDebug(error = SyncStream(stream)))) break; } while (0); return error; #endif // CUB_RUNTIME_ENABLED } /** * Internal dispatch routine */ CUB_RUNTIME_FUNCTION __forceinline__ static cudaError_t Dispatch( void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t& temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation InputIteratorT d_in, ///< [in] Pointer to input sequence of data items OffsetsOutputIteratorT d_offsets_out, ///< [out] Pointer to output sequence of run-offsets LengthsOutputIteratorT d_lengths_out, ///< [out] Pointer to output sequence of run-lengths NumRunsOutputIteratorT d_num_runs_out, ///< [out] Pointer to total number of runs (i.e., length of \p d_offsets_out) EqualityOpT equality_op, ///< [in] Equality operator for input items OffsetT num_items, ///< [in] Total number of input items (i.e., length of \p d_in) cudaStream_t stream, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. { cudaError error = cudaSuccess; do { // Get PTX version int ptx_version; #if (CUB_PTX_ARCH == 0) if (CubDebug(error = PtxVersion(ptx_version))) break; #else ptx_version = CUB_PTX_ARCH; #endif // Get kernel kernel dispatch configurations KernelConfig device_rle_config; InitConfigs(ptx_version, device_rle_config); // Dispatch if (CubDebug(error = Dispatch( d_temp_storage, temp_storage_bytes, d_in, d_offsets_out, d_lengths_out, d_num_runs_out, equality_op, num_items, stream, debug_synchronous, ptx_version, DeviceCompactInitKernel<ScanTileStateT, NumRunsOutputIteratorT>, DeviceRleSweepKernel<PtxRleSweepPolicy, InputIteratorT, OffsetsOutputIteratorT, LengthsOutputIteratorT, NumRunsOutputIteratorT, ScanTileStateT, EqualityOpT, OffsetT>, device_rle_config))) break; } while (0); return error; } }; } // CUB namespace CUB_NS_POSTFIX // Optional outer namespace(s)
0
rapidsai_public_repos/nvgraph/external/cub_semiring/device
rapidsai_public_repos/nvgraph/external/cub_semiring/device/dispatch/dispatch_select_if.cuh
/****************************************************************************** * Copyright (c) 2011, Duane Merrill. All rights reserved. * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ /** * \file * cub::DeviceSelect provides device-wide, parallel operations for selecting items from sequences of data items residing within device-accessible memory. */ #pragma once #include <stdio.h> #include <iterator> #include "dispatch_scan.cuh" #include "../../agent/agent_select_if.cuh" #include "../../thread/thread_operators.cuh" #include "../../grid/grid_queue.cuh" #include "../../util_device.cuh" #include "../../util_namespace.cuh" /// Optional outer namespace(s) CUB_NS_PREFIX /// CUB namespace namespace cub { /****************************************************************************** * Kernel entry points *****************************************************************************/ /** * Select kernel entry point (multi-block) * * Performs functor-based selection if SelectOpT functor type != NullType * Otherwise performs flag-based selection if FlagsInputIterator's value type != NullType * Otherwise performs discontinuity selection (keep unique) */ template < typename AgentSelectIfPolicyT, ///< Parameterized AgentSelectIfPolicyT tuning policy type typename InputIteratorT, ///< Random-access input iterator type for reading input items typename FlagsInputIteratorT, ///< Random-access input iterator type for reading selection flags (NullType* if a selection functor or discontinuity flagging is to be used for selection) typename SelectedOutputIteratorT, ///< Random-access output iterator type for writing selected items typename NumSelectedIteratorT, ///< Output iterator type for recording the number of items selected typename ScanTileStateT, ///< Tile status interface type typename SelectOpT, ///< Selection operator type (NullType if selection flags or discontinuity flagging is to be used for selection) typename EqualityOpT, ///< Equality operator type (NullType if selection functor or selection flags is to be used for selection) typename OffsetT, ///< Signed integer type for global offsets bool KEEP_REJECTS> ///< Whether or not we push rejected items to the back of the output __launch_bounds__ (int(AgentSelectIfPolicyT::BLOCK_THREADS)) __global__ void DeviceSelectSweepKernel( InputIteratorT d_in, ///< [in] Pointer to the input sequence of data items FlagsInputIteratorT d_flags, ///< [in] Pointer to the input sequence of selection flags (if applicable) SelectedOutputIteratorT d_selected_out, ///< [out] Pointer to the output sequence of selected data items NumSelectedIteratorT d_num_selected_out, ///< [out] Pointer to the total number of items selected (i.e., length of \p d_selected_out) ScanTileStateT tile_status, ///< [in] Tile status interface SelectOpT select_op, ///< [in] Selection operator EqualityOpT equality_op, ///< [in] Equality operator OffsetT num_items, ///< [in] Total number of input items (i.e., length of \p d_in) int num_tiles) ///< [in] Total number of tiles for the entire problem { // Thread block type for selecting data from input tiles typedef AgentSelectIf< AgentSelectIfPolicyT, InputIteratorT, FlagsInputIteratorT, SelectedOutputIteratorT, SelectOpT, EqualityOpT, OffsetT, KEEP_REJECTS> AgentSelectIfT; // Shared memory for AgentSelectIf __shared__ typename AgentSelectIfT::TempStorage temp_storage; // Process tiles AgentSelectIfT(temp_storage, d_in, d_flags, d_selected_out, select_op, equality_op, num_items).ConsumeRange( num_tiles, tile_status, d_num_selected_out); } /****************************************************************************** * Dispatch ******************************************************************************/ /** * Utility class for dispatching the appropriately-tuned kernels for DeviceSelect */ template < typename InputIteratorT, ///< Random-access input iterator type for reading input items typename FlagsInputIteratorT, ///< Random-access input iterator type for reading selection flags (NullType* if a selection functor or discontinuity flagging is to be used for selection) typename SelectedOutputIteratorT, ///< Random-access output iterator type for writing selected items typename NumSelectedIteratorT, ///< Output iterator type for recording the number of items selected typename SelectOpT, ///< Selection operator type (NullType if selection flags or discontinuity flagging is to be used for selection) typename EqualityOpT, ///< Equality operator type (NullType if selection functor or selection flags is to be used for selection) typename OffsetT, ///< Signed integer type for global offsets bool KEEP_REJECTS> ///< Whether or not we push rejected items to the back of the output struct DispatchSelectIf { /****************************************************************************** * Types and constants ******************************************************************************/ // The output value type typedef typename If<(Equals<typename std::iterator_traits<SelectedOutputIteratorT>::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ? typename std::iterator_traits<InputIteratorT>::value_type, // ... then the input iterator's value type, typename std::iterator_traits<SelectedOutputIteratorT>::value_type>::Type OutputT; // ... else the output iterator's value type // The flag value type typedef typename std::iterator_traits<FlagsInputIteratorT>::value_type FlagT; enum { INIT_KERNEL_THREADS = 128, }; // Tile status descriptor interface type typedef ScanTileState<OffsetT> ScanTileStateT; /****************************************************************************** * Tuning policies ******************************************************************************/ /// SM35 struct Policy350 { enum { NOMINAL_4B_ITEMS_PER_THREAD = 10, ITEMS_PER_THREAD = CUB_MIN(NOMINAL_4B_ITEMS_PER_THREAD, CUB_MAX(1, (NOMINAL_4B_ITEMS_PER_THREAD * 4 / sizeof(OutputT)))), }; typedef AgentSelectIfPolicy< 128, ITEMS_PER_THREAD, BLOCK_LOAD_DIRECT, LOAD_LDG, BLOCK_SCAN_WARP_SCANS> SelectIfPolicyT; }; /// SM30 struct Policy300 { enum { NOMINAL_4B_ITEMS_PER_THREAD = 7, ITEMS_PER_THREAD = CUB_MIN(NOMINAL_4B_ITEMS_PER_THREAD, CUB_MAX(3, (NOMINAL_4B_ITEMS_PER_THREAD * 4 / sizeof(OutputT)))), }; typedef AgentSelectIfPolicy< 128, ITEMS_PER_THREAD, BLOCK_LOAD_WARP_TRANSPOSE, LOAD_DEFAULT, BLOCK_SCAN_WARP_SCANS> SelectIfPolicyT; }; /// SM20 struct Policy200 { enum { NOMINAL_4B_ITEMS_PER_THREAD = (KEEP_REJECTS) ? 7 : 15, ITEMS_PER_THREAD = CUB_MIN(NOMINAL_4B_ITEMS_PER_THREAD, CUB_MAX(1, (NOMINAL_4B_ITEMS_PER_THREAD * 4 / sizeof(OutputT)))), }; typedef AgentSelectIfPolicy< 128, ITEMS_PER_THREAD, BLOCK_LOAD_WARP_TRANSPOSE, LOAD_DEFAULT, BLOCK_SCAN_WARP_SCANS> SelectIfPolicyT; }; /// SM13 struct Policy130 { enum { NOMINAL_4B_ITEMS_PER_THREAD = 9, ITEMS_PER_THREAD = CUB_MIN(NOMINAL_4B_ITEMS_PER_THREAD, CUB_MAX(1, (NOMINAL_4B_ITEMS_PER_THREAD * 4 / sizeof(OutputT)))), }; typedef AgentSelectIfPolicy< 64, ITEMS_PER_THREAD, BLOCK_LOAD_WARP_TRANSPOSE, LOAD_DEFAULT, BLOCK_SCAN_RAKING_MEMOIZE> SelectIfPolicyT; }; /// SM10 struct Policy100 { enum { NOMINAL_4B_ITEMS_PER_THREAD = 9, ITEMS_PER_THREAD = CUB_MIN(NOMINAL_4B_ITEMS_PER_THREAD, CUB_MAX(1, (NOMINAL_4B_ITEMS_PER_THREAD * 4 / sizeof(OutputT)))), }; typedef AgentSelectIfPolicy< 64, ITEMS_PER_THREAD, BLOCK_LOAD_WARP_TRANSPOSE, LOAD_DEFAULT, BLOCK_SCAN_RAKING> SelectIfPolicyT; }; /****************************************************************************** * Tuning policies of current PTX compiler pass ******************************************************************************/ #if (CUB_PTX_ARCH >= 350) typedef Policy350 PtxPolicy; #elif (CUB_PTX_ARCH >= 300) typedef Policy300 PtxPolicy; #elif (CUB_PTX_ARCH >= 200) typedef Policy200 PtxPolicy; #elif (CUB_PTX_ARCH >= 130) typedef Policy130 PtxPolicy; #else typedef Policy100 PtxPolicy; #endif // "Opaque" policies (whose parameterizations aren't reflected in the type signature) struct PtxSelectIfPolicyT : PtxPolicy::SelectIfPolicyT {}; /****************************************************************************** * Utilities ******************************************************************************/ /** * Initialize kernel dispatch configurations with the policies corresponding to the PTX assembly we will use */ template <typename KernelConfig> CUB_RUNTIME_FUNCTION __forceinline__ static void InitConfigs( int ptx_version, KernelConfig &select_if_config) { #if (CUB_PTX_ARCH > 0) (void)ptx_version; // We're on the device, so initialize the kernel dispatch configurations with the current PTX policy select_if_config.template Init<PtxSelectIfPolicyT>(); #else // We're on the host, so lookup and initialize the kernel dispatch configurations with the policies that match the device's PTX version if (ptx_version >= 350) { select_if_config.template Init<typename Policy350::SelectIfPolicyT>(); } else if (ptx_version >= 300) { select_if_config.template Init<typename Policy300::SelectIfPolicyT>(); } else if (ptx_version >= 200) { select_if_config.template Init<typename Policy200::SelectIfPolicyT>(); } else if (ptx_version >= 130) { select_if_config.template Init<typename Policy130::SelectIfPolicyT>(); } else { select_if_config.template Init<typename Policy100::SelectIfPolicyT>(); } #endif } /** * Kernel kernel dispatch configuration. */ struct KernelConfig { int block_threads; int items_per_thread; int tile_items; template <typename PolicyT> CUB_RUNTIME_FUNCTION __forceinline__ void Init() { block_threads = PolicyT::BLOCK_THREADS; items_per_thread = PolicyT::ITEMS_PER_THREAD; tile_items = block_threads * items_per_thread; } }; /****************************************************************************** * Dispatch entrypoints ******************************************************************************/ /** * Internal dispatch routine for computing a device-wide selection using the * specified kernel functions. */ template < typename ScanInitKernelPtrT, ///< Function type of cub::DeviceScanInitKernel typename SelectIfKernelPtrT> ///< Function type of cub::SelectIfKernelPtrT CUB_RUNTIME_FUNCTION __forceinline__ static cudaError_t Dispatch( void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t& temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation InputIteratorT d_in, ///< [in] Pointer to the input sequence of data items FlagsInputIteratorT d_flags, ///< [in] Pointer to the input sequence of selection flags (if applicable) SelectedOutputIteratorT d_selected_out, ///< [in] Pointer to the output sequence of selected data items NumSelectedIteratorT d_num_selected_out, ///< [in] Pointer to the total number of items selected (i.e., length of \p d_selected_out) SelectOpT select_op, ///< [in] Selection operator EqualityOpT equality_op, ///< [in] Equality operator OffsetT num_items, ///< [in] Total number of input items (i.e., length of \p d_in) cudaStream_t stream, ///< [in] CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous, ///< [in] Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. int /*ptx_version*/, ///< [in] PTX version of dispatch kernels ScanInitKernelPtrT scan_init_kernel, ///< [in] Kernel function pointer to parameterization of cub::DeviceScanInitKernel SelectIfKernelPtrT select_if_kernel, ///< [in] Kernel function pointer to parameterization of cub::DeviceSelectSweepKernel KernelConfig select_if_config) ///< [in] Dispatch parameters that match the policy that \p select_if_kernel was compiled for { #ifndef CUB_RUNTIME_ENABLED (void)d_temp_storage; (void)temp_storage_bytes; (void)d_in; (void)d_flags; (void)d_selected_out; (void)d_num_selected_out; (void)select_op; (void)equality_op; (void)num_items; (void)stream; (void)debug_synchronous; (void)scan_init_kernel; (void)select_if_kernel; (void)select_if_config; // Kernel launch not supported from this device return CubDebug(cudaErrorNotSupported); #else cudaError error = cudaSuccess; do { // Get device ordinal int device_ordinal; if (CubDebug(error = cudaGetDevice(&device_ordinal))) break; // Get SM count int sm_count; if (CubDebug(error = cudaDeviceGetAttribute (&sm_count, cudaDevAttrMultiProcessorCount, device_ordinal))) break; // Number of input tiles int tile_size = select_if_config.block_threads * select_if_config.items_per_thread; int num_tiles = (num_items + tile_size - 1) / tile_size; // Specify temporary storage allocation requirements size_t allocation_sizes[1]; if (CubDebug(error = ScanTileStateT::AllocationSize(num_tiles, allocation_sizes[0]))) break; // bytes needed for tile status descriptors // Compute allocation pointers into the single storage blob (or compute the necessary size of the blob) void* allocations[1]; if (CubDebug(error = AliasTemporaries(d_temp_storage, temp_storage_bytes, allocations, allocation_sizes))) break; if (d_temp_storage == NULL) { // Return if the caller is simply requesting the size of the storage allocation break; } // Construct the tile status interface ScanTileStateT tile_status; if (CubDebug(error = tile_status.Init(num_tiles, allocations[0], allocation_sizes[0]))) break; // Log scan_init_kernel configuration int init_grid_size = CUB_MAX(1, (num_tiles + INIT_KERNEL_THREADS - 1) / INIT_KERNEL_THREADS); if (debug_synchronous) _CubLog("Invoking scan_init_kernel<<<%d, %d, 0, %lld>>>()\n", init_grid_size, INIT_KERNEL_THREADS, (long long) stream); // Invoke scan_init_kernel to initialize tile descriptors scan_init_kernel<<<init_grid_size, INIT_KERNEL_THREADS, 0, stream>>>( tile_status, num_tiles, d_num_selected_out); // Check for failure to launch if (CubDebug(error = cudaPeekAtLastError())) break; // Sync the stream if specified to flush runtime errors if (debug_synchronous && (CubDebug(error = SyncStream(stream)))) break; // Return if empty problem if (num_items == 0) break; // Get SM occupancy for select_if_kernel int range_select_sm_occupancy; if (CubDebug(error = MaxSmOccupancy( range_select_sm_occupancy, // out select_if_kernel, select_if_config.block_threads))) break; // Get max x-dimension of grid int max_dim_x; if (CubDebug(error = cudaDeviceGetAttribute(&max_dim_x, cudaDevAttrMaxGridDimX, device_ordinal))) break;; // Get grid size for scanning tiles dim3 scan_grid_size; scan_grid_size.z = 1; scan_grid_size.y = ((unsigned int) num_tiles + max_dim_x - 1) / max_dim_x; scan_grid_size.x = CUB_MIN(num_tiles, max_dim_x); // Log select_if_kernel configuration if (debug_synchronous) _CubLog("Invoking select_if_kernel<<<{%d,%d,%d}, %d, 0, %lld>>>(), %d items per thread, %d SM occupancy\n", scan_grid_size.x, scan_grid_size.y, scan_grid_size.z, select_if_config.block_threads, (long long) stream, select_if_config.items_per_thread, range_select_sm_occupancy); // Invoke select_if_kernel select_if_kernel<<<scan_grid_size, select_if_config.block_threads, 0, stream>>>( d_in, d_flags, d_selected_out, d_num_selected_out, tile_status, select_op, equality_op, num_items, num_tiles); // Check for failure to launch if (CubDebug(error = cudaPeekAtLastError())) break; // Sync the stream if specified to flush runtime errors if (debug_synchronous && (CubDebug(error = SyncStream(stream)))) break; } while (0); return error; #endif // CUB_RUNTIME_ENABLED } /** * Internal dispatch routine */ CUB_RUNTIME_FUNCTION __forceinline__ static cudaError_t Dispatch( void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t& temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation InputIteratorT d_in, ///< [in] Pointer to the input sequence of data items FlagsInputIteratorT d_flags, ///< [in] Pointer to the input sequence of selection flags (if applicable) SelectedOutputIteratorT d_selected_out, ///< [in] Pointer to the output sequence of selected data items NumSelectedIteratorT d_num_selected_out, ///< [in] Pointer to the total number of items selected (i.e., length of \p d_selected_out) SelectOpT select_op, ///< [in] Selection operator EqualityOpT equality_op, ///< [in] Equality operator OffsetT num_items, ///< [in] Total number of input items (i.e., length of \p d_in) cudaStream_t stream, ///< [in] <b>[optional]</b> CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous) ///< [in] <b>[optional]</b> Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. { cudaError error = cudaSuccess; do { // Get PTX version int ptx_version; #if (CUB_PTX_ARCH == 0) if (CubDebug(error = PtxVersion(ptx_version))) break; #else ptx_version = CUB_PTX_ARCH; #endif // Get kernel kernel dispatch configurations KernelConfig select_if_config; InitConfigs(ptx_version, select_if_config); // Dispatch if (CubDebug(error = Dispatch( d_temp_storage, temp_storage_bytes, d_in, d_flags, d_selected_out, d_num_selected_out, select_op, equality_op, num_items, stream, debug_synchronous, ptx_version, DeviceCompactInitKernel<ScanTileStateT, NumSelectedIteratorT>, DeviceSelectSweepKernel<PtxSelectIfPolicyT, InputIteratorT, FlagsInputIteratorT, SelectedOutputIteratorT, NumSelectedIteratorT, ScanTileStateT, SelectOpT, EqualityOpT, OffsetT, KEEP_REJECTS>, select_if_config))) break; } while (0); return error; } }; } // CUB namespace CUB_NS_POSTFIX // Optional outer namespace(s)
0
rapidsai_public_repos/nvgraph/external/cub_semiring/device
rapidsai_public_repos/nvgraph/external/cub_semiring/device/dispatch/dispatch_radix_sort.cuh
/****************************************************************************** * Copyright (c) 2011, Duane Merrill. All rights reserved. * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ /** * \file * cub::DeviceRadixSort provides device-wide, parallel operations for computing a radix sort across a sequence of data items residing within device-accessible memory. */ #pragma once #include <stdio.h> #include <iterator> #include "../../agent/agent_radix_sort_upsweep.cuh" #include "../../agent/agent_radix_sort_downsweep.cuh" #include "../../agent/agent_scan.cuh" #include "../../block/block_radix_sort.cuh" #include "../../grid/grid_even_share.cuh" #include "../../util_type.cuh" #include "../../util_debug.cuh" #include "../../util_device.cuh" #include "../../util_namespace.cuh" /// Optional outer namespace(s) CUB_NS_PREFIX /// CUB namespace namespace cub { /****************************************************************************** * Kernel entry points *****************************************************************************/ /** * Upsweep digit-counting kernel entry point (multi-block). Computes privatized digit histograms, one per block. */ template < typename ChainedPolicyT, ///< Chained tuning policy bool ALT_DIGIT_BITS, ///< Whether or not to use the alternate (lower-bits) policy bool IS_DESCENDING, ///< Whether or not the sorted-order is high-to-low typename KeyT, ///< Key type typename OffsetT> ///< Signed integer type for global offsets __launch_bounds__ (int((ALT_DIGIT_BITS) ? ChainedPolicyT::ActivePolicy::AltUpsweepPolicy::BLOCK_THREADS : ChainedPolicyT::ActivePolicy::UpsweepPolicy::BLOCK_THREADS)) __global__ void DeviceRadixSortUpsweepKernel( const KeyT *d_keys, ///< [in] Input keys buffer OffsetT *d_spine, ///< [out] Privatized (per block) digit histograms (striped, i.e., 0s counts from each block, then 1s counts from each block, etc.) OffsetT /*num_items*/, ///< [in] Total number of input data items int current_bit, ///< [in] Bit position of current radix digit int num_bits, ///< [in] Number of bits of current radix digit GridEvenShare<OffsetT> even_share) ///< [in] Even-share descriptor for mapan equal number of tiles onto each thread block { enum { TILE_ITEMS = ChainedPolicyT::ActivePolicy::AltUpsweepPolicy::BLOCK_THREADS * ChainedPolicyT::ActivePolicy::AltUpsweepPolicy::ITEMS_PER_THREAD }; // Parameterize AgentRadixSortUpsweep type for the current configuration typedef AgentRadixSortUpsweep< typename If<(ALT_DIGIT_BITS), typename ChainedPolicyT::ActivePolicy::AltUpsweepPolicy, typename ChainedPolicyT::ActivePolicy::UpsweepPolicy>::Type, KeyT, OffsetT> AgentRadixSortUpsweepT; // Shared memory storage __shared__ typename AgentRadixSortUpsweepT::TempStorage temp_storage; // Initialize GRID_MAPPING_RAKE even-share descriptor for this thread block even_share.template BlockInit<TILE_ITEMS, GRID_MAPPING_RAKE>(); AgentRadixSortUpsweepT upsweep(temp_storage, d_keys, current_bit, num_bits); upsweep.ProcessRegion(even_share.block_offset, even_share.block_end); CTA_SYNC(); // Write out digit counts (striped) upsweep.ExtractCounts<IS_DESCENDING>(d_spine, gridDim.x, blockIdx.x); } /** * Spine scan kernel entry point (single-block). Computes an exclusive prefix sum over the privatized digit histograms */ template < typename ChainedPolicyT, ///< Chained tuning policy typename OffsetT> ///< Signed integer type for global offsets __launch_bounds__ (int(ChainedPolicyT::ActivePolicy::ScanPolicy::BLOCK_THREADS), 1) __global__ void RadixSortScanBinsKernel( OffsetT *d_spine, ///< [in,out] Privatized (per block) digit histograms (striped, i.e., 0s counts from each block, then 1s counts from each block, etc.) int num_counts) ///< [in] Total number of bin-counts { // Parameterize the AgentScan type for the current configuration typedef AgentScan< typename ChainedPolicyT::ActivePolicy::ScanPolicy, OffsetT*, OffsetT*, cub::Sum, OffsetT, OffsetT> AgentScanT; // Shared memory storage __shared__ typename AgentScanT::TempStorage temp_storage; // Block scan instance AgentScanT block_scan(temp_storage, d_spine, d_spine, cub::Sum(), OffsetT(0)) ; // Process full input tiles int block_offset = 0; BlockScanRunningPrefixOp<OffsetT, Sum> prefix_op(0, Sum()); while (block_offset + AgentScanT::TILE_ITEMS <= num_counts) { block_scan.template ConsumeTile<false, false>(block_offset, prefix_op); block_offset += AgentScanT::TILE_ITEMS; } } /** * Downsweep pass kernel entry point (multi-block). Scatters keys (and values) into corresponding bins for the current digit place. */ template < typename ChainedPolicyT, ///< Chained tuning policy bool ALT_DIGIT_BITS, ///< Whether or not to use the alternate (lower-bits) policy bool IS_DESCENDING, ///< Whether or not the sorted-order is high-to-low typename KeyT, ///< Key type typename ValueT, ///< Value type typename OffsetT> ///< Signed integer type for global offsets __launch_bounds__ (int((ALT_DIGIT_BITS) ? ChainedPolicyT::ActivePolicy::AltDownsweepPolicy::BLOCK_THREADS : ChainedPolicyT::ActivePolicy::DownsweepPolicy::BLOCK_THREADS)) __global__ void DeviceRadixSortDownsweepKernel( const KeyT *d_keys_in, ///< [in] Input keys buffer KeyT *d_keys_out, ///< [in] Output keys buffer const ValueT *d_values_in, ///< [in] Input values buffer ValueT *d_values_out, ///< [in] Output values buffer OffsetT *d_spine, ///< [in] Scan of privatized (per block) digit histograms (striped, i.e., 0s counts from each block, then 1s counts from each block, etc.) OffsetT num_items, ///< [in] Total number of input data items int current_bit, ///< [in] Bit position of current radix digit int num_bits, ///< [in] Number of bits of current radix digit GridEvenShare<OffsetT> even_share) ///< [in] Even-share descriptor for mapan equal number of tiles onto each thread block { enum { TILE_ITEMS = ChainedPolicyT::ActivePolicy::AltUpsweepPolicy::BLOCK_THREADS * ChainedPolicyT::ActivePolicy::AltUpsweepPolicy::ITEMS_PER_THREAD }; // Parameterize AgentRadixSortDownsweep type for the current configuration typedef AgentRadixSortDownsweep< typename If<(ALT_DIGIT_BITS), typename ChainedPolicyT::ActivePolicy::AltDownsweepPolicy, typename ChainedPolicyT::ActivePolicy::DownsweepPolicy>::Type, IS_DESCENDING, KeyT, ValueT, OffsetT> AgentRadixSortDownsweepT; // Shared memory storage __shared__ typename AgentRadixSortDownsweepT::TempStorage temp_storage; // Initialize even-share descriptor for this thread block even_share.template BlockInit<TILE_ITEMS, GRID_MAPPING_RAKE>(); // Process input tiles AgentRadixSortDownsweepT(temp_storage, num_items, d_spine, d_keys_in, d_keys_out, d_values_in, d_values_out, current_bit, num_bits).ProcessRegion( even_share.block_offset, even_share.block_end); } /** * Single pass kernel entry point (single-block). Fully sorts a tile of input. */ template < typename ChainedPolicyT, ///< Chained tuning policy bool IS_DESCENDING, ///< Whether or not the sorted-order is high-to-low typename KeyT, ///< Key type typename ValueT, ///< Value type typename OffsetT> ///< Signed integer type for global offsets __launch_bounds__ (int(ChainedPolicyT::ActivePolicy::SingleTilePolicy::BLOCK_THREADS), 1) __global__ void DeviceRadixSortSingleTileKernel( const KeyT *d_keys_in, ///< [in] Input keys buffer KeyT *d_keys_out, ///< [in] Output keys buffer const ValueT *d_values_in, ///< [in] Input values buffer ValueT *d_values_out, ///< [in] Output values buffer OffsetT num_items, ///< [in] Total number of input data items int current_bit, ///< [in] Bit position of current radix digit int end_bit) ///< [in] The past-the-end (most-significant) bit index needed for key comparison { // Constants enum { BLOCK_THREADS = ChainedPolicyT::ActivePolicy::SingleTilePolicy::BLOCK_THREADS, ITEMS_PER_THREAD = ChainedPolicyT::ActivePolicy::SingleTilePolicy::ITEMS_PER_THREAD, KEYS_ONLY = Equals<ValueT, NullType>::VALUE, }; // BlockRadixSort type typedef BlockRadixSort< KeyT, BLOCK_THREADS, ITEMS_PER_THREAD, ValueT, ChainedPolicyT::ActivePolicy::SingleTilePolicy::RADIX_BITS, (ChainedPolicyT::ActivePolicy::SingleTilePolicy::RANK_ALGORITHM == RADIX_RANK_MEMOIZE), ChainedPolicyT::ActivePolicy::SingleTilePolicy::SCAN_ALGORITHM> BlockRadixSortT; // BlockLoad type (keys) typedef BlockLoad< KeyT, BLOCK_THREADS, ITEMS_PER_THREAD, ChainedPolicyT::ActivePolicy::SingleTilePolicy::LOAD_ALGORITHM> BlockLoadKeys; // BlockLoad type (values) typedef BlockLoad< ValueT, BLOCK_THREADS, ITEMS_PER_THREAD, ChainedPolicyT::ActivePolicy::SingleTilePolicy::LOAD_ALGORITHM> BlockLoadValues; // Unsigned word for key bits typedef typename Traits<KeyT>::UnsignedBits UnsignedBitsT; // Shared memory storage __shared__ union TempStorage { typename BlockRadixSortT::TempStorage sort; typename BlockLoadKeys::TempStorage load_keys; typename BlockLoadValues::TempStorage load_values; } temp_storage; // Keys and values for the block KeyT keys[ITEMS_PER_THREAD]; ValueT values[ITEMS_PER_THREAD]; // Get default (min/max) value for out-of-bounds keys UnsignedBitsT default_key_bits = (IS_DESCENDING) ? Traits<KeyT>::LOWEST_KEY : Traits<KeyT>::MAX_KEY; KeyT default_key = reinterpret_cast<KeyT&>(default_key_bits); // Load keys BlockLoadKeys(temp_storage.load_keys).Load(d_keys_in, keys, num_items, default_key); CTA_SYNC(); // Load values if (!KEYS_ONLY) { BlockLoadValues(temp_storage.load_values).Load(d_values_in, values, num_items); CTA_SYNC(); } // Sort tile BlockRadixSortT(temp_storage.sort).SortBlockedToStriped( keys, values, current_bit, end_bit, Int2Type<IS_DESCENDING>(), Int2Type<KEYS_ONLY>()); // Store keys and values #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM) { int item_offset = ITEM * BLOCK_THREADS + threadIdx.x; if (item_offset < num_items) { d_keys_out[item_offset] = keys[ITEM]; if (!KEYS_ONLY) d_values_out[item_offset] = values[ITEM]; } } } /** * Segmented radix sorting pass (one block per segment) */ template < typename ChainedPolicyT, ///< Chained tuning policy bool ALT_DIGIT_BITS, ///< Whether or not to use the alternate (lower-bits) policy bool IS_DESCENDING, ///< Whether or not the sorted-order is high-to-low typename KeyT, ///< Key type typename ValueT, ///< Value type typename OffsetIteratorT, ///< Random-access input iterator type for reading segment offsets \iterator typename OffsetT> ///< Signed integer type for global offsets __launch_bounds__ (int((ALT_DIGIT_BITS) ? ChainedPolicyT::ActivePolicy::AltSegmentedPolicy::BLOCK_THREADS : ChainedPolicyT::ActivePolicy::SegmentedPolicy::BLOCK_THREADS)) __global__ void DeviceSegmentedRadixSortKernel( const KeyT *d_keys_in, ///< [in] Input keys buffer KeyT *d_keys_out, ///< [in] Output keys buffer const ValueT *d_values_in, ///< [in] Input values buffer ValueT *d_values_out, ///< [in] Output values buffer OffsetIteratorT d_begin_offsets, ///< [in] Pointer to the sequence of beginning offsets of length \p num_segments, such that <tt>d_begin_offsets[i]</tt> is the first element of the <em>i</em><sup>th</sup> data segment in <tt>d_keys_*</tt> and <tt>d_values_*</tt> OffsetIteratorT d_end_offsets, ///< [in] Pointer to the sequence of ending offsets of length \p num_segments, such that <tt>d_end_offsets[i]-1</tt> is the last element of the <em>i</em><sup>th</sup> data segment in <tt>d_keys_*</tt> and <tt>d_values_*</tt>. If <tt>d_end_offsets[i]-1</tt> <= <tt>d_begin_offsets[i]</tt>, the <em>i</em><sup>th</sup> is considered empty. int /*num_segments*/, ///< [in] The number of segments that comprise the sorting data int current_bit, ///< [in] Bit position of current radix digit int pass_bits) ///< [in] Number of bits of current radix digit { // // Constants // typedef typename If<(ALT_DIGIT_BITS), typename ChainedPolicyT::ActivePolicy::AltSegmentedPolicy, typename ChainedPolicyT::ActivePolicy::SegmentedPolicy>::Type SegmentedPolicyT; enum { BLOCK_THREADS = SegmentedPolicyT::BLOCK_THREADS, ITEMS_PER_THREAD = SegmentedPolicyT::ITEMS_PER_THREAD, RADIX_BITS = SegmentedPolicyT::RADIX_BITS, TILE_ITEMS = BLOCK_THREADS * ITEMS_PER_THREAD, RADIX_DIGITS = 1 << RADIX_BITS, KEYS_ONLY = Equals<ValueT, NullType>::VALUE, }; // Upsweep type typedef AgentRadixSortUpsweep< AgentRadixSortUpsweepPolicy<BLOCK_THREADS, ITEMS_PER_THREAD, SegmentedPolicyT::LOAD_MODIFIER, RADIX_BITS>, KeyT, OffsetT> BlockUpsweepT; // Digit-scan type typedef BlockScan<OffsetT, BLOCK_THREADS> DigitScanT; // Downsweep type typedef AgentRadixSortDownsweep<SegmentedPolicyT, IS_DESCENDING, KeyT, ValueT, OffsetT> BlockDownsweepT; enum { /// Number of bin-starting offsets tracked per thread BINS_TRACKED_PER_THREAD = BlockDownsweepT::BINS_TRACKED_PER_THREAD }; // // Process input tiles // // Shared memory storage __shared__ union { typename BlockUpsweepT::TempStorage upsweep; typename BlockDownsweepT::TempStorage downsweep; struct { volatile OffsetT reverse_counts_in[RADIX_DIGITS]; volatile OffsetT reverse_counts_out[RADIX_DIGITS]; typename DigitScanT::TempStorage scan; }; } temp_storage; OffsetT segment_begin = d_begin_offsets[blockIdx.x]; OffsetT segment_end = d_end_offsets[blockIdx.x]; OffsetT num_items = segment_end - segment_begin; // Check if empty segment if (num_items <= 0) return; // Upsweep BlockUpsweepT upsweep(temp_storage.upsweep, d_keys_in, current_bit, pass_bits); upsweep.ProcessRegion(segment_begin, segment_end); CTA_SYNC(); // The count of each digit value in this pass (valid in the first RADIX_DIGITS threads) OffsetT bin_count[BINS_TRACKED_PER_THREAD]; upsweep.ExtractCounts(bin_count); CTA_SYNC(); if (IS_DESCENDING) { // Reverse bin counts #pragma unroll for (int track = 0; track < BINS_TRACKED_PER_THREAD; ++track) { int bin_idx = (threadIdx.x * BINS_TRACKED_PER_THREAD) + track; if ((BLOCK_THREADS == RADIX_DIGITS) || (bin_idx < RADIX_DIGITS)) temp_storage.reverse_counts_in[bin_idx] = bin_count[track]; } CTA_SYNC(); #pragma unroll for (int track = 0; track < BINS_TRACKED_PER_THREAD; ++track) { int bin_idx = (threadIdx.x * BINS_TRACKED_PER_THREAD) + track; if ((BLOCK_THREADS == RADIX_DIGITS) || (bin_idx < RADIX_DIGITS)) bin_count[track] = temp_storage.reverse_counts_in[RADIX_DIGITS - bin_idx - 1]; } } // Scan OffsetT bin_offset[BINS_TRACKED_PER_THREAD]; // The global scatter base offset for each digit value in this pass (valid in the first RADIX_DIGITS threads) DigitScanT(temp_storage.scan).ExclusiveSum(bin_count, bin_offset); #pragma unroll for (int track = 0; track < BINS_TRACKED_PER_THREAD; ++track) { bin_offset[track] += segment_begin; } if (IS_DESCENDING) { // Reverse bin offsets #pragma unroll for (int track = 0; track < BINS_TRACKED_PER_THREAD; ++track) { int bin_idx = (threadIdx.x * BINS_TRACKED_PER_THREAD) + track; if ((BLOCK_THREADS == RADIX_DIGITS) || (bin_idx < RADIX_DIGITS)) temp_storage.reverse_counts_out[threadIdx.x] = bin_offset[track]; } CTA_SYNC(); #pragma unroll for (int track = 0; track < BINS_TRACKED_PER_THREAD; ++track) { int bin_idx = (threadIdx.x * BINS_TRACKED_PER_THREAD) + track; if ((BLOCK_THREADS == RADIX_DIGITS) || (bin_idx < RADIX_DIGITS)) bin_offset[track] = temp_storage.reverse_counts_out[RADIX_DIGITS - bin_idx - 1]; } } CTA_SYNC(); // Downsweep BlockDownsweepT downsweep(temp_storage.downsweep, bin_offset, num_items, d_keys_in, d_keys_out, d_values_in, d_values_out, current_bit, pass_bits); downsweep.ProcessRegion(segment_begin, segment_end); } /****************************************************************************** * Policy ******************************************************************************/ /** * Tuning policy for kernel specialization */ template < typename KeyT, ///< Key type typename ValueT, ///< Value type typename OffsetT> ///< Signed integer type for global offsets struct DeviceRadixSortPolicy { //------------------------------------------------------------------------------ // Constants //------------------------------------------------------------------------------ enum { // Whether this is a keys-only (or key-value) sort KEYS_ONLY = (Equals<ValueT, NullType>::VALUE), // Relative size of KeyT type to a 4-byte word SCALE_FACTOR_4B = (CUB_MAX(sizeof(KeyT), sizeof(ValueT)) + 3) / 4, }; //------------------------------------------------------------------------------ // Architecture-specific tuning policies //------------------------------------------------------------------------------ /// SM13 struct Policy130 : ChainedPolicy<130, Policy130, Policy130> { enum { PRIMARY_RADIX_BITS = 5, ALT_RADIX_BITS = PRIMARY_RADIX_BITS - 1, }; // Keys-only upsweep policies typedef AgentRadixSortUpsweepPolicy <128, CUB_MAX(1, 19 / SCALE_FACTOR_4B), LOAD_DEFAULT, PRIMARY_RADIX_BITS> UpsweepPolicyKeys; typedef AgentRadixSortUpsweepPolicy <128, CUB_MAX(1, 15 / SCALE_FACTOR_4B), LOAD_DEFAULT, ALT_RADIX_BITS> AltUpsweepPolicyKeys; // Key-value pairs upsweep policies typedef AgentRadixSortUpsweepPolicy <128, CUB_MAX(1, 19 / SCALE_FACTOR_4B), LOAD_DEFAULT, PRIMARY_RADIX_BITS> UpsweepPolicyPairs; typedef AgentRadixSortUpsweepPolicy <128, CUB_MAX(1, 15 / SCALE_FACTOR_4B), LOAD_DEFAULT, ALT_RADIX_BITS> AltUpsweepPolicyPairs; // Upsweep policies typedef typename If<KEYS_ONLY, UpsweepPolicyKeys, UpsweepPolicyPairs>::Type UpsweepPolicy; typedef typename If<KEYS_ONLY, AltUpsweepPolicyKeys, AltUpsweepPolicyPairs>::Type AltUpsweepPolicy; // Scan policy typedef AgentScanPolicy <256, 4, BLOCK_LOAD_VECTORIZE, LOAD_DEFAULT, BLOCK_STORE_VECTORIZE, BLOCK_SCAN_WARP_SCANS> ScanPolicy; // Keys-only downsweep policies typedef AgentRadixSortDownsweepPolicy <64, CUB_MAX(1, 19 / SCALE_FACTOR_4B), BLOCK_LOAD_WARP_TRANSPOSE, LOAD_DEFAULT, RADIX_RANK_BASIC, BLOCK_SCAN_WARP_SCANS, PRIMARY_RADIX_BITS> DownsweepPolicyKeys; typedef AgentRadixSortDownsweepPolicy <128, CUB_MAX(1, 15 / SCALE_FACTOR_4B), BLOCK_LOAD_WARP_TRANSPOSE, LOAD_DEFAULT, RADIX_RANK_BASIC, BLOCK_SCAN_WARP_SCANS, ALT_RADIX_BITS> AltDownsweepPolicyKeys; // Key-value pairs downsweep policies typedef AgentRadixSortDownsweepPolicy <64, CUB_MAX(1, 19 / SCALE_FACTOR_4B), BLOCK_LOAD_WARP_TRANSPOSE, LOAD_DEFAULT, RADIX_RANK_BASIC, BLOCK_SCAN_WARP_SCANS, PRIMARY_RADIX_BITS> DownsweepPolicyPairs; typedef AgentRadixSortDownsweepPolicy <128, CUB_MAX(1, 15 / SCALE_FACTOR_4B), BLOCK_LOAD_WARP_TRANSPOSE, LOAD_DEFAULT, RADIX_RANK_BASIC, BLOCK_SCAN_WARP_SCANS, ALT_RADIX_BITS> AltDownsweepPolicyPairs; // Downsweep policies typedef typename If<KEYS_ONLY, DownsweepPolicyKeys, DownsweepPolicyPairs>::Type DownsweepPolicy; typedef typename If<KEYS_ONLY, AltDownsweepPolicyKeys, AltDownsweepPolicyPairs>::Type AltDownsweepPolicy; // Single-tile policy typedef DownsweepPolicy SingleTilePolicy; // Segmented policies typedef DownsweepPolicy SegmentedPolicy; typedef AltDownsweepPolicy AltSegmentedPolicy; }; /// SM20 struct Policy200 : ChainedPolicy<200, Policy200, Policy130> { enum { PRIMARY_RADIX_BITS = 5, ALT_RADIX_BITS = PRIMARY_RADIX_BITS - 1, }; // Keys-only upsweep policies typedef AgentRadixSortUpsweepPolicy <64, CUB_MAX(1, 18 / SCALE_FACTOR_4B), LOAD_DEFAULT, PRIMARY_RADIX_BITS> UpsweepPolicyKeys; typedef AgentRadixSortUpsweepPolicy <64, CUB_MAX(1, 18 / SCALE_FACTOR_4B), LOAD_DEFAULT, ALT_RADIX_BITS> AltUpsweepPolicyKeys; // Key-value pairs upsweep policies typedef AgentRadixSortUpsweepPolicy <128, CUB_MAX(1, 13 / SCALE_FACTOR_4B), LOAD_DEFAULT, PRIMARY_RADIX_BITS> UpsweepPolicyPairs; typedef AgentRadixSortUpsweepPolicy <128, CUB_MAX(1, 13 / SCALE_FACTOR_4B), LOAD_DEFAULT, ALT_RADIX_BITS> AltUpsweepPolicyPairs; // Upsweep policies typedef typename If<KEYS_ONLY, UpsweepPolicyKeys, UpsweepPolicyPairs>::Type UpsweepPolicy; typedef typename If<KEYS_ONLY, AltUpsweepPolicyKeys, AltUpsweepPolicyPairs>::Type AltUpsweepPolicy; // Scan policy typedef AgentScanPolicy <512, 4, BLOCK_LOAD_VECTORIZE, LOAD_DEFAULT, BLOCK_STORE_VECTORIZE, BLOCK_SCAN_RAKING_MEMOIZE> ScanPolicy; // Keys-only downsweep policies typedef AgentRadixSortDownsweepPolicy <64, CUB_MAX(1, 18 / SCALE_FACTOR_4B), BLOCK_LOAD_WARP_TRANSPOSE, LOAD_DEFAULT, RADIX_RANK_BASIC, BLOCK_SCAN_WARP_SCANS, PRIMARY_RADIX_BITS> DownsweepPolicyKeys; typedef AgentRadixSortDownsweepPolicy <64, CUB_MAX(1, 18 / SCALE_FACTOR_4B), BLOCK_LOAD_WARP_TRANSPOSE, LOAD_DEFAULT, RADIX_RANK_BASIC, BLOCK_SCAN_WARP_SCANS, ALT_RADIX_BITS> AltDownsweepPolicyKeys; // Key-value pairs downsweep policies typedef AgentRadixSortDownsweepPolicy <128, CUB_MAX(1, 13 / SCALE_FACTOR_4B), BLOCK_LOAD_WARP_TRANSPOSE, LOAD_DEFAULT, RADIX_RANK_BASIC, BLOCK_SCAN_WARP_SCANS, PRIMARY_RADIX_BITS> DownsweepPolicyPairs; typedef AgentRadixSortDownsweepPolicy <128, CUB_MAX(1, 13 / SCALE_FACTOR_4B), BLOCK_LOAD_WARP_TRANSPOSE, LOAD_DEFAULT, RADIX_RANK_BASIC, BLOCK_SCAN_WARP_SCANS, ALT_RADIX_BITS> AltDownsweepPolicyPairs; // Downsweep policies typedef typename If<KEYS_ONLY, DownsweepPolicyKeys, DownsweepPolicyPairs>::Type DownsweepPolicy; typedef typename If<KEYS_ONLY, AltDownsweepPolicyKeys, AltDownsweepPolicyPairs>::Type AltDownsweepPolicy; // Single-tile policy typedef DownsweepPolicy SingleTilePolicy; // Segmented policies typedef DownsweepPolicy SegmentedPolicy; typedef AltDownsweepPolicy AltSegmentedPolicy; }; /// SM30 struct Policy300 : ChainedPolicy<300, Policy300, Policy200> { enum { PRIMARY_RADIX_BITS = 5, ALT_RADIX_BITS = PRIMARY_RADIX_BITS - 1, }; // Keys-only upsweep policies typedef AgentRadixSortUpsweepPolicy <256, CUB_MAX(1, 7 / SCALE_FACTOR_4B), LOAD_DEFAULT, PRIMARY_RADIX_BITS> UpsweepPolicyKeys; typedef AgentRadixSortUpsweepPolicy <256, CUB_MAX(1, 7 / SCALE_FACTOR_4B), LOAD_DEFAULT, ALT_RADIX_BITS> AltUpsweepPolicyKeys; // Key-value pairs upsweep policies typedef AgentRadixSortUpsweepPolicy <256, CUB_MAX(1, 5 / SCALE_FACTOR_4B), LOAD_DEFAULT, PRIMARY_RADIX_BITS> UpsweepPolicyPairs; typedef AgentRadixSortUpsweepPolicy <256, CUB_MAX(1, 5 / SCALE_FACTOR_4B), LOAD_DEFAULT, ALT_RADIX_BITS> AltUpsweepPolicyPairs; // Upsweep policies typedef typename If<KEYS_ONLY, UpsweepPolicyKeys, UpsweepPolicyPairs>::Type UpsweepPolicy; typedef typename If<KEYS_ONLY, AltUpsweepPolicyKeys, AltUpsweepPolicyPairs>::Type AltUpsweepPolicy; // Scan policy typedef AgentScanPolicy <1024, 4, BLOCK_LOAD_VECTORIZE, LOAD_DEFAULT, BLOCK_STORE_VECTORIZE, BLOCK_SCAN_WARP_SCANS> ScanPolicy; // Keys-only downsweep policies typedef AgentRadixSortDownsweepPolicy <128, CUB_MAX(1, 14 / SCALE_FACTOR_4B), BLOCK_LOAD_WARP_TRANSPOSE, LOAD_DEFAULT, RADIX_RANK_BASIC, BLOCK_SCAN_WARP_SCANS, PRIMARY_RADIX_BITS> DownsweepPolicyKeys; typedef AgentRadixSortDownsweepPolicy <128, CUB_MAX(1, 14 / SCALE_FACTOR_4B), BLOCK_LOAD_WARP_TRANSPOSE, LOAD_DEFAULT, RADIX_RANK_BASIC, BLOCK_SCAN_WARP_SCANS, ALT_RADIX_BITS> AltDownsweepPolicyKeys; // Key-value pairs downsweep policies typedef AgentRadixSortDownsweepPolicy <128, CUB_MAX(1, 10 / SCALE_FACTOR_4B), BLOCK_LOAD_TRANSPOSE, LOAD_DEFAULT, RADIX_RANK_BASIC, BLOCK_SCAN_WARP_SCANS, PRIMARY_RADIX_BITS> DownsweepPolicyPairs; typedef AgentRadixSortDownsweepPolicy <128, CUB_MAX(1, 10 / SCALE_FACTOR_4B), BLOCK_LOAD_TRANSPOSE, LOAD_DEFAULT, RADIX_RANK_BASIC, BLOCK_SCAN_WARP_SCANS, ALT_RADIX_BITS> AltDownsweepPolicyPairs; // Downsweep policies typedef typename If<KEYS_ONLY, DownsweepPolicyKeys, DownsweepPolicyPairs>::Type DownsweepPolicy; typedef typename If<KEYS_ONLY, AltDownsweepPolicyKeys, AltDownsweepPolicyPairs>::Type AltDownsweepPolicy; // Single-tile policy typedef DownsweepPolicy SingleTilePolicy; // Segmented policies typedef DownsweepPolicy SegmentedPolicy; typedef AltDownsweepPolicy AltSegmentedPolicy; }; /// SM35 struct Policy350 : ChainedPolicy<350, Policy350, Policy300> { enum { PRIMARY_RADIX_BITS = 6, // 1.72B 32b keys/s, 1.17B 32b pairs/s, 1.55B 32b segmented keys/s (K40m) }; // Scan policy typedef AgentScanPolicy <1024, 4, BLOCK_LOAD_VECTORIZE, LOAD_DEFAULT, BLOCK_STORE_VECTORIZE, BLOCK_SCAN_WARP_SCANS> ScanPolicy; // Keys-only downsweep policies typedef AgentRadixSortDownsweepPolicy <128, CUB_MAX(1, 9 / SCALE_FACTOR_4B), BLOCK_LOAD_WARP_TRANSPOSE, LOAD_LDG, RADIX_RANK_MATCH, BLOCK_SCAN_WARP_SCANS, PRIMARY_RADIX_BITS> DownsweepPolicyKeys; typedef AgentRadixSortDownsweepPolicy <64, CUB_MAX(1, 18 / SCALE_FACTOR_4B), BLOCK_LOAD_DIRECT, LOAD_LDG, RADIX_RANK_MEMOIZE, BLOCK_SCAN_WARP_SCANS, PRIMARY_RADIX_BITS - 1> AltDownsweepPolicyKeys; // Key-value pairs downsweep policies typedef DownsweepPolicyKeys DownsweepPolicyPairs; typedef AgentRadixSortDownsweepPolicy <128, CUB_MAX(1, 15 / SCALE_FACTOR_4B), BLOCK_LOAD_DIRECT, LOAD_LDG, RADIX_RANK_MEMOIZE, BLOCK_SCAN_WARP_SCANS, PRIMARY_RADIX_BITS - 1> AltDownsweepPolicyPairs; // Downsweep policies typedef typename If<KEYS_ONLY, DownsweepPolicyKeys, DownsweepPolicyPairs>::Type DownsweepPolicy; typedef typename If<KEYS_ONLY, AltDownsweepPolicyKeys, AltDownsweepPolicyPairs>::Type AltDownsweepPolicy; // Upsweep policies typedef DownsweepPolicy UpsweepPolicy; typedef AltDownsweepPolicy AltUpsweepPolicy; // Single-tile policy typedef DownsweepPolicy SingleTilePolicy; // Segmented policies typedef DownsweepPolicy SegmentedPolicy; typedef AltDownsweepPolicy AltSegmentedPolicy; }; /// SM50 struct Policy500 : ChainedPolicy<500, Policy500, Policy350> { enum { PRIMARY_RADIX_BITS = 7, // 3.5B 32b keys/s, 1.92B 32b pairs/s (TitanX) SINGLE_TILE_RADIX_BITS = 6, SEGMENTED_RADIX_BITS = 6, // 3.1B 32b segmented keys/s (TitanX) }; // ScanPolicy typedef AgentScanPolicy <512, 23, BLOCK_LOAD_WARP_TRANSPOSE, LOAD_DEFAULT, BLOCK_STORE_WARP_TRANSPOSE, BLOCK_SCAN_RAKING_MEMOIZE> ScanPolicy; // Downsweep policies typedef AgentRadixSortDownsweepPolicy <160, CUB_MAX(1, 39 / SCALE_FACTOR_4B), BLOCK_LOAD_WARP_TRANSPOSE, LOAD_DEFAULT, RADIX_RANK_BASIC, BLOCK_SCAN_WARP_SCANS, PRIMARY_RADIX_BITS> DownsweepPolicy; typedef AgentRadixSortDownsweepPolicy <256, CUB_MAX(1, 16 / SCALE_FACTOR_4B), BLOCK_LOAD_DIRECT, LOAD_LDG, RADIX_RANK_MEMOIZE, BLOCK_SCAN_RAKING_MEMOIZE, PRIMARY_RADIX_BITS - 1> AltDownsweepPolicy; // Upsweep policies typedef DownsweepPolicy UpsweepPolicy; typedef AltDownsweepPolicy AltUpsweepPolicy; // Single-tile policy typedef AgentRadixSortDownsweepPolicy <256, CUB_MAX(1, 19 / SCALE_FACTOR_4B), BLOCK_LOAD_DIRECT, LOAD_LDG, RADIX_RANK_MEMOIZE, BLOCK_SCAN_WARP_SCANS, SINGLE_TILE_RADIX_BITS> SingleTilePolicy; // Segmented policies typedef AgentRadixSortDownsweepPolicy <192, CUB_MAX(1, 31 / SCALE_FACTOR_4B), BLOCK_LOAD_WARP_TRANSPOSE, LOAD_DEFAULT, RADIX_RANK_MEMOIZE, BLOCK_SCAN_WARP_SCANS, SEGMENTED_RADIX_BITS> SegmentedPolicy; typedef AgentRadixSortDownsweepPolicy <256, CUB_MAX(1, 11 / SCALE_FACTOR_4B), BLOCK_LOAD_WARP_TRANSPOSE, LOAD_DEFAULT, RADIX_RANK_MEMOIZE, BLOCK_SCAN_WARP_SCANS, SEGMENTED_RADIX_BITS - 1> AltSegmentedPolicy; }; /// SM60 (GP100) struct Policy600 : ChainedPolicy<600, Policy600, Policy500> { enum { PRIMARY_RADIX_BITS = 7, // 6.9B 32b keys/s (Quadro P100) SINGLE_TILE_RADIX_BITS = 6, SEGMENTED_RADIX_BITS = 6, // 5.9B 32b segmented keys/s (Quadro P100) }; // ScanPolicy typedef AgentScanPolicy <512, 23, BLOCK_LOAD_WARP_TRANSPOSE, LOAD_DEFAULT, BLOCK_STORE_WARP_TRANSPOSE, BLOCK_SCAN_RAKING_MEMOIZE> ScanPolicy; // Downsweep policies typedef AgentRadixSortDownsweepPolicy <256, CUB_MAX(1, 25 / SCALE_FACTOR_4B), BLOCK_LOAD_TRANSPOSE, LOAD_DEFAULT, RADIX_RANK_MATCH, BLOCK_SCAN_WARP_SCANS, PRIMARY_RADIX_BITS> DownsweepPolicy; typedef AgentRadixSortDownsweepPolicy <192, CUB_MAX(1, 39 / SCALE_FACTOR_4B), BLOCK_LOAD_TRANSPOSE, LOAD_DEFAULT, RADIX_RANK_MEMOIZE, BLOCK_SCAN_WARP_SCANS, PRIMARY_RADIX_BITS - 1> AltDownsweepPolicy; // Upsweep policies typedef DownsweepPolicy UpsweepPolicy; typedef AltDownsweepPolicy AltUpsweepPolicy; // Single-tile policy typedef AgentRadixSortDownsweepPolicy <256, CUB_MAX(1, 19 / SCALE_FACTOR_4B), BLOCK_LOAD_DIRECT, LOAD_LDG, RADIX_RANK_MEMOIZE, BLOCK_SCAN_WARP_SCANS, SINGLE_TILE_RADIX_BITS> SingleTilePolicy; // Segmented policies typedef AgentRadixSortDownsweepPolicy <192, CUB_MAX(1, 39 / SCALE_FACTOR_4B), BLOCK_LOAD_TRANSPOSE, LOAD_DEFAULT, RADIX_RANK_MEMOIZE, BLOCK_SCAN_WARP_SCANS, SEGMENTED_RADIX_BITS> SegmentedPolicy; typedef AgentRadixSortDownsweepPolicy <384, CUB_MAX(1, 11 / SCALE_FACTOR_4B), BLOCK_LOAD_TRANSPOSE, LOAD_DEFAULT, RADIX_RANK_MEMOIZE, BLOCK_SCAN_WARP_SCANS, SEGMENTED_RADIX_BITS - 1> AltSegmentedPolicy; }; /// SM61 (GP104) struct Policy610 : ChainedPolicy<610, Policy610, Policy600> { enum { PRIMARY_RADIX_BITS = 7, // 3.4B 32b keys/s, 1.83B 32b pairs/s (1080) SINGLE_TILE_RADIX_BITS = 6, SEGMENTED_RADIX_BITS = 6, // 3.3B 32b segmented keys/s (1080) }; // ScanPolicy typedef AgentScanPolicy <512, 23, BLOCK_LOAD_WARP_TRANSPOSE, LOAD_DEFAULT, BLOCK_STORE_WARP_TRANSPOSE, BLOCK_SCAN_RAKING_MEMOIZE> ScanPolicy; // Downsweep policies typedef AgentRadixSortDownsweepPolicy <384, CUB_MAX(1, 31 / SCALE_FACTOR_4B), BLOCK_LOAD_DIRECT, LOAD_DEFAULT, RADIX_RANK_MATCH, BLOCK_SCAN_RAKING_MEMOIZE, PRIMARY_RADIX_BITS> DownsweepPolicy; typedef AgentRadixSortDownsweepPolicy <256, CUB_MAX(1, 35 / SCALE_FACTOR_4B), BLOCK_LOAD_TRANSPOSE, LOAD_DEFAULT, RADIX_RANK_MEMOIZE, BLOCK_SCAN_RAKING_MEMOIZE, PRIMARY_RADIX_BITS - 1> AltDownsweepPolicy; // Upsweep policies typedef AgentRadixSortUpsweepPolicy <128, CUB_MAX(1, 16 / SCALE_FACTOR_4B), LOAD_LDG, PRIMARY_RADIX_BITS> UpsweepPolicy; typedef AgentRadixSortUpsweepPolicy <128, CUB_MAX(1, 16 / SCALE_FACTOR_4B), LOAD_LDG, PRIMARY_RADIX_BITS - 1> AltUpsweepPolicy; // Single-tile policy typedef AgentRadixSortDownsweepPolicy <256, CUB_MAX(1, 19 / SCALE_FACTOR_4B), BLOCK_LOAD_DIRECT, LOAD_LDG, RADIX_RANK_MEMOIZE, BLOCK_SCAN_WARP_SCANS, SINGLE_TILE_RADIX_BITS> SingleTilePolicy; // Segmented policies typedef AgentRadixSortDownsweepPolicy <192, CUB_MAX(1, 39 / SCALE_FACTOR_4B), BLOCK_LOAD_TRANSPOSE, LOAD_DEFAULT, RADIX_RANK_MEMOIZE, BLOCK_SCAN_WARP_SCANS, SEGMENTED_RADIX_BITS> SegmentedPolicy; typedef AgentRadixSortDownsweepPolicy <384, CUB_MAX(1, 11 / SCALE_FACTOR_4B), BLOCK_LOAD_TRANSPOSE, LOAD_DEFAULT, RADIX_RANK_MEMOIZE, BLOCK_SCAN_WARP_SCANS, SEGMENTED_RADIX_BITS - 1> AltSegmentedPolicy; }; /// SM62 (Tegra, less RF) struct Policy620 : ChainedPolicy<620, Policy620, Policy610> { enum { PRIMARY_RADIX_BITS = 5, ALT_RADIX_BITS = PRIMARY_RADIX_BITS - 1, }; // ScanPolicy typedef AgentScanPolicy <512, 23, BLOCK_LOAD_WARP_TRANSPOSE, LOAD_DEFAULT, BLOCK_STORE_WARP_TRANSPOSE, BLOCK_SCAN_RAKING_MEMOIZE> ScanPolicy; // Downsweep policies typedef AgentRadixSortDownsweepPolicy <256, CUB_MAX(1, 16 / SCALE_FACTOR_4B), BLOCK_LOAD_DIRECT, LOAD_DEFAULT, RADIX_RANK_MEMOIZE, BLOCK_SCAN_RAKING_MEMOIZE, PRIMARY_RADIX_BITS> DownsweepPolicy; typedef AgentRadixSortDownsweepPolicy <256, CUB_MAX(1, 16 / SCALE_FACTOR_4B), BLOCK_LOAD_DIRECT, LOAD_DEFAULT, RADIX_RANK_MEMOIZE, BLOCK_SCAN_RAKING_MEMOIZE, ALT_RADIX_BITS> AltDownsweepPolicy; // Upsweep policies typedef DownsweepPolicy UpsweepPolicy; typedef AltDownsweepPolicy AltUpsweepPolicy; // Single-tile policy typedef AgentRadixSortDownsweepPolicy <256, CUB_MAX(1, 19 / SCALE_FACTOR_4B), BLOCK_LOAD_DIRECT, LOAD_LDG, RADIX_RANK_MEMOIZE, BLOCK_SCAN_WARP_SCANS, PRIMARY_RADIX_BITS> SingleTilePolicy; // Segmented policies typedef DownsweepPolicy SegmentedPolicy; typedef AltDownsweepPolicy AltSegmentedPolicy; }; /// SM70 (GV100) struct Policy700 : ChainedPolicy<700, Policy700, Policy620> { enum { PRIMARY_RADIX_BITS = 6, // 7.62B 32b keys/s (GV100) SINGLE_TILE_RADIX_BITS = 6, SEGMENTED_RADIX_BITS = 6, // 8.7B 32b segmented keys/s (GV100) }; // ScanPolicy typedef AgentScanPolicy <512, 23, BLOCK_LOAD_WARP_TRANSPOSE, LOAD_DEFAULT, BLOCK_STORE_WARP_TRANSPOSE, BLOCK_SCAN_RAKING_MEMOIZE> ScanPolicy; // Downsweep policies typedef AgentRadixSortDownsweepPolicy <256, CUB_MAX(1, 47 / SCALE_FACTOR_4B), BLOCK_LOAD_TRANSPOSE, LOAD_DEFAULT, RADIX_RANK_MEMOIZE, BLOCK_SCAN_WARP_SCANS, PRIMARY_RADIX_BITS> DownsweepPolicy; typedef AgentRadixSortDownsweepPolicy <384, CUB_MAX(1, 29 / SCALE_FACTOR_4B), BLOCK_LOAD_TRANSPOSE, LOAD_DEFAULT, RADIX_RANK_MEMOIZE, BLOCK_SCAN_WARP_SCANS, PRIMARY_RADIX_BITS - 1> AltDownsweepPolicy; // Upsweep policies typedef AgentRadixSortDownsweepPolicy <128, CUB_MAX(1, 47 / SCALE_FACTOR_4B), BLOCK_LOAD_TRANSPOSE, LOAD_DEFAULT, RADIX_RANK_MATCH, BLOCK_SCAN_WARP_SCANS, PRIMARY_RADIX_BITS> UpsweepPolicy; typedef AgentRadixSortDownsweepPolicy <128, CUB_MAX(1, 29 / SCALE_FACTOR_4B), BLOCK_LOAD_TRANSPOSE, LOAD_DEFAULT, RADIX_RANK_MATCH, BLOCK_SCAN_WARP_SCANS, PRIMARY_RADIX_BITS - 1> AltUpsweepPolicy; // Single-tile policy typedef AgentRadixSortDownsweepPolicy <256, CUB_MAX(1, 19 / SCALE_FACTOR_4B), BLOCK_LOAD_DIRECT, LOAD_LDG, RADIX_RANK_MEMOIZE, BLOCK_SCAN_WARP_SCANS, SINGLE_TILE_RADIX_BITS> SingleTilePolicy; // Segmented policies typedef AgentRadixSortDownsweepPolicy <192, CUB_MAX(1, 39 / SCALE_FACTOR_4B), BLOCK_LOAD_TRANSPOSE, LOAD_DEFAULT, RADIX_RANK_MEMOIZE, BLOCK_SCAN_WARP_SCANS, SEGMENTED_RADIX_BITS> SegmentedPolicy; typedef AgentRadixSortDownsweepPolicy <384, CUB_MAX(1, 11 / SCALE_FACTOR_4B), BLOCK_LOAD_TRANSPOSE, LOAD_DEFAULT, RADIX_RANK_MEMOIZE, BLOCK_SCAN_WARP_SCANS, SEGMENTED_RADIX_BITS - 1> AltSegmentedPolicy; }; /// MaxPolicy typedef Policy700 MaxPolicy; }; /****************************************************************************** * Single-problem dispatch ******************************************************************************/ /** * Utility class for dispatching the appropriately-tuned kernels for device-wide radix sort */ template < bool IS_DESCENDING, ///< Whether or not the sorted-order is high-to-low typename KeyT, ///< Key type typename ValueT, ///< Value type typename OffsetT> ///< Signed integer type for global offsets struct DispatchRadixSort : DeviceRadixSortPolicy<KeyT, ValueT, OffsetT> { //------------------------------------------------------------------------------ // Constants //------------------------------------------------------------------------------ enum { // Whether this is a keys-only (or key-value) sort KEYS_ONLY = (Equals<ValueT, NullType>::VALUE), }; //------------------------------------------------------------------------------ // Problem state //------------------------------------------------------------------------------ void *d_temp_storage; ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t &temp_storage_bytes; ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation DoubleBuffer<KeyT> &d_keys; ///< [in,out] Double-buffer whose current buffer contains the unsorted input keys and, upon return, is updated to point to the sorted output keys DoubleBuffer<ValueT> &d_values; ///< [in,out] Double-buffer whose current buffer contains the unsorted input values and, upon return, is updated to point to the sorted output values OffsetT num_items; ///< [in] Number of items to sort int begin_bit; ///< [in] The beginning (least-significant) bit index needed for key comparison int end_bit; ///< [in] The past-the-end (most-significant) bit index needed for key comparison cudaStream_t stream; ///< [in] CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous; ///< [in] Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. int ptx_version; ///< [in] PTX version bool is_overwrite_okay; ///< [in] Whether is okay to overwrite source buffers //------------------------------------------------------------------------------ // Constructor //------------------------------------------------------------------------------ /// Constructor CUB_RUNTIME_FUNCTION __forceinline__ DispatchRadixSort( void* d_temp_storage, size_t &temp_storage_bytes, DoubleBuffer<KeyT> &d_keys, DoubleBuffer<ValueT> &d_values, OffsetT num_items, int begin_bit, int end_bit, bool is_overwrite_okay, cudaStream_t stream, bool debug_synchronous, int ptx_version) : d_temp_storage(d_temp_storage), temp_storage_bytes(temp_storage_bytes), d_keys(d_keys), d_values(d_values), num_items(num_items), begin_bit(begin_bit), end_bit(end_bit), stream(stream), debug_synchronous(debug_synchronous), ptx_version(ptx_version), is_overwrite_okay(is_overwrite_okay) {} //------------------------------------------------------------------------------ // Small-problem (single tile) invocation //------------------------------------------------------------------------------ /// Invoke a single block to sort in-core template < typename ActivePolicyT, ///< Umbrella policy active for the target device typename SingleTileKernelT> ///< Function type of cub::DeviceRadixSortSingleTileKernel CUB_RUNTIME_FUNCTION __forceinline__ cudaError_t InvokeSingleTile( SingleTileKernelT single_tile_kernel) ///< [in] Kernel function pointer to parameterization of cub::DeviceRadixSortSingleTileKernel { #ifndef CUB_RUNTIME_ENABLED (void)single_tile_kernel; // Kernel launch not supported from this device return CubDebug(cudaErrorNotSupported ); #else cudaError error = cudaSuccess; do { // Return if the caller is simply requesting the size of the storage allocation if (d_temp_storage == NULL) { temp_storage_bytes = 1; break; } // Return if empty problem if (num_items == 0) break; // Log single_tile_kernel configuration if (debug_synchronous) _CubLog("Invoking single_tile_kernel<<<%d, %d, 0, %lld>>>(), %d items per thread, %d SM occupancy, current bit %d, bit_grain %d\n", 1, ActivePolicyT::SingleTilePolicy::BLOCK_THREADS, (long long) stream, ActivePolicyT::SingleTilePolicy::ITEMS_PER_THREAD, 1, begin_bit, ActivePolicyT::SingleTilePolicy::RADIX_BITS); // Invoke upsweep_kernel with same grid size as downsweep_kernel single_tile_kernel<<<1, ActivePolicyT::SingleTilePolicy::BLOCK_THREADS, 0, stream>>>( d_keys.Current(), d_keys.Alternate(), d_values.Current(), d_values.Alternate(), num_items, begin_bit, end_bit); // Check for failure to launch if (CubDebug(error = cudaPeekAtLastError())) break; // Sync the stream if specified to flush runtime errors if (debug_synchronous && (CubDebug(error = SyncStream(stream)))) break; // Update selector d_keys.selector ^= 1; d_values.selector ^= 1; } while (0); return error; #endif // CUB_RUNTIME_ENABLED } //------------------------------------------------------------------------------ // Normal problem size invocation //------------------------------------------------------------------------------ /** * Invoke a three-kernel sorting pass at the current bit. */ template <typename PassConfigT> CUB_RUNTIME_FUNCTION __forceinline__ cudaError_t InvokePass( const KeyT *d_keys_in, KeyT *d_keys_out, const ValueT *d_values_in, ValueT *d_values_out, OffsetT *d_spine, int spine_length, int &current_bit, PassConfigT &pass_config) { cudaError error = cudaSuccess; do { int pass_bits = CUB_MIN(pass_config.radix_bits, (end_bit - current_bit)); // Log upsweep_kernel configuration if (debug_synchronous) _CubLog("Invoking upsweep_kernel<<<%d, %d, 0, %lld>>>(), %d items per thread, %d SM occupancy, current bit %d, bit_grain %d\n", pass_config.even_share.grid_size, pass_config.upsweep_config.block_threads, (long long) stream, pass_config.upsweep_config.items_per_thread, pass_config.upsweep_config.sm_occupancy, current_bit, pass_bits); // Invoke upsweep_kernel with same grid size as downsweep_kernel pass_config.upsweep_kernel<<<pass_config.even_share.grid_size, pass_config.upsweep_config.block_threads, 0, stream>>>( d_keys_in, d_spine, num_items, current_bit, pass_bits, pass_config.even_share); // Check for failure to launch if (CubDebug(error = cudaPeekAtLastError())) break; // Sync the stream if specified to flush runtime errors if (debug_synchronous && (CubDebug(error = SyncStream(stream)))) break; // Log scan_kernel configuration if (debug_synchronous) _CubLog("Invoking scan_kernel<<<%d, %d, 0, %lld>>>(), %d items per thread\n", 1, pass_config.scan_config.block_threads, (long long) stream, pass_config.scan_config.items_per_thread); // Invoke scan_kernel pass_config.scan_kernel<<<1, pass_config.scan_config.block_threads, 0, stream>>>( d_spine, spine_length); // Check for failure to launch if (CubDebug(error = cudaPeekAtLastError())) break; // Sync the stream if specified to flush runtime errors if (debug_synchronous && (CubDebug(error = SyncStream(stream)))) break; // Log downsweep_kernel configuration if (debug_synchronous) _CubLog("Invoking downsweep_kernel<<<%d, %d, 0, %lld>>>(), %d items per thread, %d SM occupancy\n", pass_config.even_share.grid_size, pass_config.downsweep_config.block_threads, (long long) stream, pass_config.downsweep_config.items_per_thread, pass_config.downsweep_config.sm_occupancy); // Invoke downsweep_kernel pass_config.downsweep_kernel<<<pass_config.even_share.grid_size, pass_config.downsweep_config.block_threads, 0, stream>>>( d_keys_in, d_keys_out, d_values_in, d_values_out, d_spine, num_items, current_bit, pass_bits, pass_config.even_share); // Check for failure to launch if (CubDebug(error = cudaPeekAtLastError())) break; // Sync the stream if specified to flush runtime errors if (debug_synchronous && (CubDebug(error = SyncStream(stream)))) break; // Update current bit current_bit += pass_bits; } while (0); return error; } /// Pass configuration structure template < typename UpsweepKernelT, typename ScanKernelT, typename DownsweepKernelT> struct PassConfig { UpsweepKernelT upsweep_kernel; KernelConfig upsweep_config; ScanKernelT scan_kernel; KernelConfig scan_config; DownsweepKernelT downsweep_kernel; KernelConfig downsweep_config; int radix_bits; int radix_digits; int max_downsweep_grid_size; GridEvenShare<OffsetT> even_share; /// Initialize pass configuration template < typename UpsweepPolicyT, typename ScanPolicyT, typename DownsweepPolicyT> CUB_RUNTIME_FUNCTION __forceinline__ cudaError_t InitPassConfig( UpsweepKernelT upsweep_kernel, ScanKernelT scan_kernel, DownsweepKernelT downsweep_kernel, int ptx_version, int sm_count, int num_items) { cudaError error = cudaSuccess; do { this->upsweep_kernel = upsweep_kernel; this->scan_kernel = scan_kernel; this->downsweep_kernel = downsweep_kernel; radix_bits = DownsweepPolicyT::RADIX_BITS; radix_digits = 1 << radix_bits; if (CubDebug(error = upsweep_config.Init<UpsweepPolicyT>(upsweep_kernel))) break; if (CubDebug(error = scan_config.Init<ScanPolicyT>(scan_kernel))) break; if (CubDebug(error = downsweep_config.Init<DownsweepPolicyT>(downsweep_kernel))) break; max_downsweep_grid_size = (downsweep_config.sm_occupancy * sm_count) * CUB_SUBSCRIPTION_FACTOR(ptx_version); even_share.DispatchInit( num_items, max_downsweep_grid_size, CUB_MAX(downsweep_config.tile_size, upsweep_config.tile_size)); } while (0); return error; } }; /// Invocation (run multiple digit passes) template < typename ActivePolicyT, ///< Umbrella policy active for the target device typename UpsweepKernelT, ///< Function type of cub::DeviceRadixSortUpsweepKernel typename ScanKernelT, ///< Function type of cub::SpineScanKernel typename DownsweepKernelT> ///< Function type of cub::DeviceRadixSortDownsweepKernel CUB_RUNTIME_FUNCTION __forceinline__ cudaError_t InvokePasses( UpsweepKernelT upsweep_kernel, ///< [in] Kernel function pointer to parameterization of cub::DeviceRadixSortUpsweepKernel UpsweepKernelT alt_upsweep_kernel, ///< [in] Alternate kernel function pointer to parameterization of cub::DeviceRadixSortUpsweepKernel ScanKernelT scan_kernel, ///< [in] Kernel function pointer to parameterization of cub::SpineScanKernel DownsweepKernelT downsweep_kernel, ///< [in] Kernel function pointer to parameterization of cub::DeviceRadixSortDownsweepKernel DownsweepKernelT alt_downsweep_kernel) ///< [in] Alternate kernel function pointer to parameterization of cub::DeviceRadixSortDownsweepKernel { #ifndef CUB_RUNTIME_ENABLED (void)upsweep_kernel; (void)alt_upsweep_kernel; (void)scan_kernel; (void)downsweep_kernel; (void)alt_downsweep_kernel; // Kernel launch not supported from this device return CubDebug(cudaErrorNotSupported ); #else cudaError error = cudaSuccess; do { // Get device ordinal int device_ordinal; if (CubDebug(error = cudaGetDevice(&device_ordinal))) break; // Get SM count int sm_count; if (CubDebug(error = cudaDeviceGetAttribute (&sm_count, cudaDevAttrMultiProcessorCount, device_ordinal))) break; // Init regular and alternate-digit kernel configurations PassConfig<UpsweepKernelT, ScanKernelT, DownsweepKernelT> pass_config, alt_pass_config; if ((error = pass_config.template InitPassConfig< typename ActivePolicyT::UpsweepPolicy, typename ActivePolicyT::ScanPolicy, typename ActivePolicyT::DownsweepPolicy>( upsweep_kernel, scan_kernel, downsweep_kernel, ptx_version, sm_count, num_items))) break; if ((error = alt_pass_config.template InitPassConfig< typename ActivePolicyT::AltUpsweepPolicy, typename ActivePolicyT::ScanPolicy, typename ActivePolicyT::AltDownsweepPolicy>( alt_upsweep_kernel, scan_kernel, alt_downsweep_kernel, ptx_version, sm_count, num_items))) break; // Get maximum spine length int max_grid_size = CUB_MAX(pass_config.max_downsweep_grid_size, alt_pass_config.max_downsweep_grid_size); int spine_length = (max_grid_size * pass_config.radix_digits) + pass_config.scan_config.tile_size; // Temporary storage allocation requirements void* allocations[3]; size_t allocation_sizes[3] = { spine_length * sizeof(OffsetT), // bytes needed for privatized block digit histograms (is_overwrite_okay) ? 0 : num_items * sizeof(KeyT), // bytes needed for 3rd keys buffer (is_overwrite_okay || (KEYS_ONLY)) ? 0 : num_items * sizeof(ValueT), // bytes needed for 3rd values buffer }; // Alias the temporary allocations from the single storage blob (or compute the necessary size of the blob) if (CubDebug(error = AliasTemporaries(d_temp_storage, temp_storage_bytes, allocations, allocation_sizes))) break; // Return if the caller is simply requesting the size of the storage allocation if (d_temp_storage == NULL) return cudaSuccess; // Pass planning. Run passes of the alternate digit-size configuration until we have an even multiple of our preferred digit size int num_bits = end_bit - begin_bit; int num_passes = (num_bits + pass_config.radix_bits - 1) / pass_config.radix_bits; bool is_num_passes_odd = num_passes & 1; int max_alt_passes = (num_passes * pass_config.radix_bits) - num_bits; int alt_end_bit = CUB_MIN(end_bit, begin_bit + (max_alt_passes * alt_pass_config.radix_bits)); // Alias the temporary storage allocations OffsetT *d_spine = static_cast<OffsetT*>(allocations[0]); DoubleBuffer<KeyT> d_keys_remaining_passes( (is_overwrite_okay || is_num_passes_odd) ? d_keys.Alternate() : static_cast<KeyT*>(allocations[1]), (is_overwrite_okay) ? d_keys.Current() : (is_num_passes_odd) ? static_cast<KeyT*>(allocations[1]) : d_keys.Alternate()); DoubleBuffer<ValueT> d_values_remaining_passes( (is_overwrite_okay || is_num_passes_odd) ? d_values.Alternate() : static_cast<ValueT*>(allocations[2]), (is_overwrite_okay) ? d_values.Current() : (is_num_passes_odd) ? static_cast<ValueT*>(allocations[2]) : d_values.Alternate()); // Run first pass, consuming from the input's current buffers int current_bit = begin_bit; if (CubDebug(error = InvokePass( d_keys.Current(), d_keys_remaining_passes.Current(), d_values.Current(), d_values_remaining_passes.Current(), d_spine, spine_length, current_bit, (current_bit < alt_end_bit) ? alt_pass_config : pass_config))) break; // Run remaining passes while (current_bit < end_bit) { if (CubDebug(error = InvokePass( d_keys_remaining_passes.d_buffers[d_keys_remaining_passes.selector], d_keys_remaining_passes.d_buffers[d_keys_remaining_passes.selector ^ 1], d_values_remaining_passes.d_buffers[d_keys_remaining_passes.selector], d_values_remaining_passes.d_buffers[d_keys_remaining_passes.selector ^ 1], d_spine, spine_length, current_bit, (current_bit < alt_end_bit) ? alt_pass_config : pass_config))) break;; // Invert selectors d_keys_remaining_passes.selector ^= 1; d_values_remaining_passes.selector ^= 1; } // Update selector if (!is_overwrite_okay) { num_passes = 1; // Sorted data always ends up in the other vector } d_keys.selector = (d_keys.selector + num_passes) & 1; d_values.selector = (d_values.selector + num_passes) & 1; } while (0); return error; #endif // CUB_RUNTIME_ENABLED } //------------------------------------------------------------------------------ // Chained policy invocation //------------------------------------------------------------------------------ /// Invocation template <typename ActivePolicyT> CUB_RUNTIME_FUNCTION __forceinline__ cudaError_t Invoke() { typedef typename DispatchRadixSort::MaxPolicy MaxPolicyT; typedef typename ActivePolicyT::SingleTilePolicy SingleTilePolicyT; // Force kernel code-generation in all compiler passes if (num_items <= (SingleTilePolicyT::BLOCK_THREADS * SingleTilePolicyT::ITEMS_PER_THREAD)) { // Small, single tile size return InvokeSingleTile<ActivePolicyT>( DeviceRadixSortSingleTileKernel<MaxPolicyT, IS_DESCENDING, KeyT, ValueT, OffsetT>); } else { // Regular size return InvokePasses<ActivePolicyT>( DeviceRadixSortUpsweepKernel< MaxPolicyT, false, IS_DESCENDING, KeyT, OffsetT>, DeviceRadixSortUpsweepKernel< MaxPolicyT, true, IS_DESCENDING, KeyT, OffsetT>, RadixSortScanBinsKernel< MaxPolicyT, OffsetT>, DeviceRadixSortDownsweepKernel< MaxPolicyT, false, IS_DESCENDING, KeyT, ValueT, OffsetT>, DeviceRadixSortDownsweepKernel< MaxPolicyT, true, IS_DESCENDING, KeyT, ValueT, OffsetT>); } } //------------------------------------------------------------------------------ // Dispatch entrypoints //------------------------------------------------------------------------------ /** * Internal dispatch routine */ CUB_RUNTIME_FUNCTION __forceinline__ static cudaError_t Dispatch( void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation DoubleBuffer<KeyT> &d_keys, ///< [in,out] Double-buffer whose current buffer contains the unsorted input keys and, upon return, is updated to point to the sorted output keys DoubleBuffer<ValueT> &d_values, ///< [in,out] Double-buffer whose current buffer contains the unsorted input values and, upon return, is updated to point to the sorted output values OffsetT num_items, ///< [in] Number of items to sort int begin_bit, ///< [in] The beginning (least-significant) bit index needed for key comparison int end_bit, ///< [in] The past-the-end (most-significant) bit index needed for key comparison bool is_overwrite_okay, ///< [in] Whether is okay to overwrite source buffers cudaStream_t stream, ///< [in] CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous) ///< [in] Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. { typedef typename DispatchRadixSort::MaxPolicy MaxPolicyT; cudaError_t error; do { // Get PTX version int ptx_version; if (CubDebug(error = PtxVersion(ptx_version))) break; // Create dispatch functor DispatchRadixSort dispatch( d_temp_storage, temp_storage_bytes, d_keys, d_values, num_items, begin_bit, end_bit, is_overwrite_okay, stream, debug_synchronous, ptx_version); // Dispatch to chained policy if (CubDebug(error = MaxPolicyT::Invoke(ptx_version, dispatch))) break; } while (0); return error; } }; /****************************************************************************** * Segmented dispatch ******************************************************************************/ /** * Utility class for dispatching the appropriately-tuned kernels for segmented device-wide radix sort */ template < bool IS_DESCENDING, ///< Whether or not the sorted-order is high-to-low typename KeyT, ///< Key type typename ValueT, ///< Value type typename OffsetIteratorT, ///< Random-access input iterator type for reading segment offsets \iterator typename OffsetT> ///< Signed integer type for global offsets struct DispatchSegmentedRadixSort : DeviceRadixSortPolicy<KeyT, ValueT, OffsetT> { //------------------------------------------------------------------------------ // Constants //------------------------------------------------------------------------------ enum { // Whether this is a keys-only (or key-value) sort KEYS_ONLY = (Equals<ValueT, NullType>::VALUE), }; //------------------------------------------------------------------------------ // Parameter members //------------------------------------------------------------------------------ void *d_temp_storage; ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t &temp_storage_bytes; ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation DoubleBuffer<KeyT> &d_keys; ///< [in,out] Double-buffer whose current buffer contains the unsorted input keys and, upon return, is updated to point to the sorted output keys DoubleBuffer<ValueT> &d_values; ///< [in,out] Double-buffer whose current buffer contains the unsorted input values and, upon return, is updated to point to the sorted output values OffsetT num_items; ///< [in] Number of items to sort OffsetT num_segments; ///< [in] The number of segments that comprise the sorting data OffsetIteratorT d_begin_offsets; ///< [in] Pointer to the sequence of beginning offsets of length \p num_segments, such that <tt>d_begin_offsets[i]</tt> is the first element of the <em>i</em><sup>th</sup> data segment in <tt>d_keys_*</tt> and <tt>d_values_*</tt> OffsetIteratorT d_end_offsets; ///< [in] Pointer to the sequence of ending offsets of length \p num_segments, such that <tt>d_end_offsets[i]-1</tt> is the last element of the <em>i</em><sup>th</sup> data segment in <tt>d_keys_*</tt> and <tt>d_values_*</tt>. If <tt>d_end_offsets[i]-1</tt> <= <tt>d_begin_offsets[i]</tt>, the <em>i</em><sup>th</sup> is considered empty. int begin_bit; ///< [in] The beginning (least-significant) bit index needed for key comparison int end_bit; ///< [in] The past-the-end (most-significant) bit index needed for key comparison cudaStream_t stream; ///< [in] CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous; ///< [in] Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. int ptx_version; ///< [in] PTX version bool is_overwrite_okay; ///< [in] Whether is okay to overwrite source buffers //------------------------------------------------------------------------------ // Constructors //------------------------------------------------------------------------------ /// Constructor CUB_RUNTIME_FUNCTION __forceinline__ DispatchSegmentedRadixSort( void* d_temp_storage, size_t &temp_storage_bytes, DoubleBuffer<KeyT> &d_keys, DoubleBuffer<ValueT> &d_values, OffsetT num_items, OffsetT num_segments, OffsetIteratorT d_begin_offsets, OffsetIteratorT d_end_offsets, int begin_bit, int end_bit, bool is_overwrite_okay, cudaStream_t stream, bool debug_synchronous, int ptx_version) : d_temp_storage(d_temp_storage), temp_storage_bytes(temp_storage_bytes), d_keys(d_keys), d_values(d_values), num_items(num_items), num_segments(num_segments), d_begin_offsets(d_begin_offsets), d_end_offsets(d_end_offsets), begin_bit(begin_bit), end_bit(end_bit), is_overwrite_okay(is_overwrite_okay), stream(stream), debug_synchronous(debug_synchronous), ptx_version(ptx_version) {} //------------------------------------------------------------------------------ // Multi-segment invocation //------------------------------------------------------------------------------ /// Invoke a three-kernel sorting pass at the current bit. template <typename PassConfigT> CUB_RUNTIME_FUNCTION __forceinline__ cudaError_t InvokePass( const KeyT *d_keys_in, KeyT *d_keys_out, const ValueT *d_values_in, ValueT *d_values_out, int &current_bit, PassConfigT &pass_config) { cudaError error = cudaSuccess; do { int pass_bits = CUB_MIN(pass_config.radix_bits, (end_bit - current_bit)); // Log kernel configuration if (debug_synchronous) _CubLog("Invoking segmented_kernels<<<%d, %d, 0, %lld>>>(), %d items per thread, %d SM occupancy, current bit %d, bit_grain %d\n", num_segments, pass_config.segmented_config.block_threads, (long long) stream, pass_config.segmented_config.items_per_thread, pass_config.segmented_config.sm_occupancy, current_bit, pass_bits); pass_config.segmented_kernel<<<num_segments, pass_config.segmented_config.block_threads, 0, stream>>>( d_keys_in, d_keys_out, d_values_in, d_values_out, d_begin_offsets, d_end_offsets, num_segments, current_bit, pass_bits); // Check for failure to launch if (CubDebug(error = cudaPeekAtLastError())) break; // Sync the stream if specified to flush runtime errors if (debug_synchronous && (CubDebug(error = SyncStream(stream)))) break; // Update current bit current_bit += pass_bits; } while (0); return error; } /// PassConfig data structure template <typename SegmentedKernelT> struct PassConfig { SegmentedKernelT segmented_kernel; KernelConfig segmented_config; int radix_bits; int radix_digits; /// Initialize pass configuration template <typename SegmentedPolicyT> CUB_RUNTIME_FUNCTION __forceinline__ cudaError_t InitPassConfig(SegmentedKernelT segmented_kernel) { this->segmented_kernel = segmented_kernel; this->radix_bits = SegmentedPolicyT::RADIX_BITS; this->radix_digits = 1 << radix_bits; return CubDebug(segmented_config.Init<SegmentedPolicyT>(segmented_kernel)); } }; /// Invocation (run multiple digit passes) template < typename ActivePolicyT, ///< Umbrella policy active for the target device typename SegmentedKernelT> ///< Function type of cub::DeviceSegmentedRadixSortKernel CUB_RUNTIME_FUNCTION __forceinline__ cudaError_t InvokePasses( SegmentedKernelT segmented_kernel, ///< [in] Kernel function pointer to parameterization of cub::DeviceSegmentedRadixSortKernel SegmentedKernelT alt_segmented_kernel) ///< [in] Alternate kernel function pointer to parameterization of cub::DeviceSegmentedRadixSortKernel { #ifndef CUB_RUNTIME_ENABLED (void)segmented_kernel; (void)alt_segmented_kernel; // Kernel launch not supported from this device return CubDebug(cudaErrorNotSupported ); #else cudaError error = cudaSuccess; do { // Init regular and alternate kernel configurations PassConfig<SegmentedKernelT> pass_config, alt_pass_config; if ((error = pass_config.template InitPassConfig<typename ActivePolicyT::SegmentedPolicy>(segmented_kernel))) break; if ((error = alt_pass_config.template InitPassConfig<typename ActivePolicyT::AltSegmentedPolicy>(alt_segmented_kernel))) break; // Temporary storage allocation requirements void* allocations[2]; size_t allocation_sizes[2] = { (is_overwrite_okay) ? 0 : num_items * sizeof(KeyT), // bytes needed for 3rd keys buffer (is_overwrite_okay || (KEYS_ONLY)) ? 0 : num_items * sizeof(ValueT), // bytes needed for 3rd values buffer }; // Alias the temporary allocations from the single storage blob (or compute the necessary size of the blob) if (CubDebug(error = AliasTemporaries(d_temp_storage, temp_storage_bytes, allocations, allocation_sizes))) break; // Return if the caller is simply requesting the size of the storage allocation if (d_temp_storage == NULL) { if (temp_storage_bytes == 0) temp_storage_bytes = 1; return cudaSuccess; } // Pass planning. Run passes of the alternate digit-size configuration until we have an even multiple of our preferred digit size int radix_bits = ActivePolicyT::SegmentedPolicy::RADIX_BITS; int alt_radix_bits = ActivePolicyT::AltSegmentedPolicy::RADIX_BITS; int num_bits = end_bit - begin_bit; int num_passes = (num_bits + radix_bits - 1) / radix_bits; bool is_num_passes_odd = num_passes & 1; int max_alt_passes = (num_passes * radix_bits) - num_bits; int alt_end_bit = CUB_MIN(end_bit, begin_bit + (max_alt_passes * alt_radix_bits)); DoubleBuffer<KeyT> d_keys_remaining_passes( (is_overwrite_okay || is_num_passes_odd) ? d_keys.Alternate() : static_cast<KeyT*>(allocations[0]), (is_overwrite_okay) ? d_keys.Current() : (is_num_passes_odd) ? static_cast<KeyT*>(allocations[0]) : d_keys.Alternate()); DoubleBuffer<ValueT> d_values_remaining_passes( (is_overwrite_okay || is_num_passes_odd) ? d_values.Alternate() : static_cast<ValueT*>(allocations[1]), (is_overwrite_okay) ? d_values.Current() : (is_num_passes_odd) ? static_cast<ValueT*>(allocations[1]) : d_values.Alternate()); // Run first pass, consuming from the input's current buffers int current_bit = begin_bit; if (CubDebug(error = InvokePass( d_keys.Current(), d_keys_remaining_passes.Current(), d_values.Current(), d_values_remaining_passes.Current(), current_bit, (current_bit < alt_end_bit) ? alt_pass_config : pass_config))) break; // Run remaining passes while (current_bit < end_bit) { if (CubDebug(error = InvokePass( d_keys_remaining_passes.d_buffers[d_keys_remaining_passes.selector], d_keys_remaining_passes.d_buffers[d_keys_remaining_passes.selector ^ 1], d_values_remaining_passes.d_buffers[d_keys_remaining_passes.selector], d_values_remaining_passes.d_buffers[d_keys_remaining_passes.selector ^ 1], current_bit, (current_bit < alt_end_bit) ? alt_pass_config : pass_config))) break; // Invert selectors and update current bit d_keys_remaining_passes.selector ^= 1; d_values_remaining_passes.selector ^= 1; } // Update selector if (!is_overwrite_okay) { num_passes = 1; // Sorted data always ends up in the other vector } d_keys.selector = (d_keys.selector + num_passes) & 1; d_values.selector = (d_values.selector + num_passes) & 1; } while (0); return error; #endif // CUB_RUNTIME_ENABLED } //------------------------------------------------------------------------------ // Chained policy invocation //------------------------------------------------------------------------------ /// Invocation template <typename ActivePolicyT> CUB_RUNTIME_FUNCTION __forceinline__ cudaError_t Invoke() { typedef typename DispatchSegmentedRadixSort::MaxPolicy MaxPolicyT; // Force kernel code-generation in all compiler passes return InvokePasses<ActivePolicyT>( DeviceSegmentedRadixSortKernel<MaxPolicyT, false, IS_DESCENDING, KeyT, ValueT, OffsetIteratorT, OffsetT>, DeviceSegmentedRadixSortKernel<MaxPolicyT, true, IS_DESCENDING, KeyT, ValueT, OffsetIteratorT, OffsetT>); } //------------------------------------------------------------------------------ // Dispatch entrypoints //------------------------------------------------------------------------------ /// Internal dispatch routine CUB_RUNTIME_FUNCTION __forceinline__ static cudaError_t Dispatch( void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t &temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation DoubleBuffer<KeyT> &d_keys, ///< [in,out] Double-buffer whose current buffer contains the unsorted input keys and, upon return, is updated to point to the sorted output keys DoubleBuffer<ValueT> &d_values, ///< [in,out] Double-buffer whose current buffer contains the unsorted input values and, upon return, is updated to point to the sorted output values int num_items, ///< [in] Number of items to sort int num_segments, ///< [in] The number of segments that comprise the sorting data OffsetIteratorT d_begin_offsets, ///< [in] Pointer to the sequence of beginning offsets of length \p num_segments, such that <tt>d_begin_offsets[i]</tt> is the first element of the <em>i</em><sup>th</sup> data segment in <tt>d_keys_*</tt> and <tt>d_values_*</tt> OffsetIteratorT d_end_offsets, ///< [in] Pointer to the sequence of ending offsets of length \p num_segments, such that <tt>d_end_offsets[i]-1</tt> is the last element of the <em>i</em><sup>th</sup> data segment in <tt>d_keys_*</tt> and <tt>d_values_*</tt>. If <tt>d_end_offsets[i]-1</tt> <= <tt>d_begin_offsets[i]</tt>, the <em>i</em><sup>th</sup> is considered empty. int begin_bit, ///< [in] The beginning (least-significant) bit index needed for key comparison int end_bit, ///< [in] The past-the-end (most-significant) bit index needed for key comparison bool is_overwrite_okay, ///< [in] Whether is okay to overwrite source buffers cudaStream_t stream, ///< [in] CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous) ///< [in] Whether or not to synchronize the stream after every kernel launch to check for errors. Also causes launch configurations to be printed to the console. Default is \p false. { typedef typename DispatchSegmentedRadixSort::MaxPolicy MaxPolicyT; cudaError_t error; do { // Get PTX version int ptx_version; if (CubDebug(error = PtxVersion(ptx_version))) break; // Create dispatch functor DispatchSegmentedRadixSort dispatch( d_temp_storage, temp_storage_bytes, d_keys, d_values, num_items, num_segments, d_begin_offsets, d_end_offsets, begin_bit, end_bit, is_overwrite_okay, stream, debug_synchronous, ptx_version); // Dispatch to chained policy if (CubDebug(error = MaxPolicyT::Invoke(ptx_version, dispatch))) break; } while (0); return error; } }; } // CUB namespace CUB_NS_POSTFIX // Optional outer namespace(s)
0
rapidsai_public_repos/nvgraph/external/cub_semiring/device
rapidsai_public_repos/nvgraph/external/cub_semiring/device/dispatch/dispatch_histogram.cuh
/****************************************************************************** * Copyright (c) 2011, Duane Merrill. All rights reserved. * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ /** * \file * cub::DeviceHistogram provides device-wide parallel operations for constructing histogram(s) from a sequence of samples data residing within device-accessible memory. */ #pragma once #include <stdio.h> #include <iterator> #include <limits> #include "../../agent/agent_histogram.cuh" #include "../../util_debug.cuh" #include "../../util_device.cuh" #include "../../thread/thread_search.cuh" #include "../../grid/grid_queue.cuh" #include "../../util_namespace.cuh" /// Optional outer namespace(s) CUB_NS_PREFIX /// CUB namespace namespace cub { /****************************************************************************** * Histogram kernel entry points *****************************************************************************/ /** * Histogram initialization kernel entry point */ template < int NUM_ACTIVE_CHANNELS, ///< Number of channels actively being histogrammed typename CounterT, ///< Integer type for counting sample occurrences per histogram bin typename OffsetT> ///< Signed integer type for global offsets __global__ void DeviceHistogramInitKernel( ArrayWrapper<int, NUM_ACTIVE_CHANNELS> num_output_bins_wrapper, ///< Number of output histogram bins per channel ArrayWrapper<CounterT*, NUM_ACTIVE_CHANNELS> d_output_histograms_wrapper, ///< Histogram counter data having logical dimensions <tt>CounterT[NUM_ACTIVE_CHANNELS][num_bins.array[CHANNEL]]</tt> GridQueue<int> tile_queue) ///< Drain queue descriptor for dynamically mapping tile data onto thread blocks { if ((threadIdx.x == 0) && (blockIdx.x == 0)) tile_queue.ResetDrain(); int output_bin = (blockIdx.x * blockDim.x) + threadIdx.x; #pragma unroll for (int CHANNEL = 0; CHANNEL < NUM_ACTIVE_CHANNELS; ++CHANNEL) { if (output_bin < num_output_bins_wrapper.array[CHANNEL]) d_output_histograms_wrapper.array[CHANNEL][output_bin] = 0; } } /** * Histogram privatized sweep kernel entry point (multi-block). Computes privatized histograms, one per thread block. */ template < typename AgentHistogramPolicyT, ///< Parameterized AgentHistogramPolicy tuning policy type int PRIVATIZED_SMEM_BINS, ///< Maximum number of histogram bins per channel (e.g., up to 256) int NUM_CHANNELS, ///< Number of channels interleaved in the input data (may be greater than the number of channels being actively histogrammed) int NUM_ACTIVE_CHANNELS, ///< Number of channels actively being histogrammed typename SampleIteratorT, ///< The input iterator type. \iterator. typename CounterT, ///< Integer type for counting sample occurrences per histogram bin typename PrivatizedDecodeOpT, ///< The transform operator type for determining privatized counter indices from samples, one for each channel typename OutputDecodeOpT, ///< The transform operator type for determining output bin-ids from privatized counter indices, one for each channel typename OffsetT> ///< Signed integer type for global offsets __launch_bounds__ (int(AgentHistogramPolicyT::BLOCK_THREADS)) __global__ void DeviceHistogramSweepKernel( SampleIteratorT d_samples, ///< Input data to reduce ArrayWrapper<int, NUM_ACTIVE_CHANNELS> num_output_bins_wrapper, ///< The number bins per final output histogram ArrayWrapper<int, NUM_ACTIVE_CHANNELS> num_privatized_bins_wrapper, ///< The number bins per privatized histogram ArrayWrapper<CounterT*, NUM_ACTIVE_CHANNELS> d_output_histograms_wrapper, ///< Reference to final output histograms ArrayWrapper<CounterT*, NUM_ACTIVE_CHANNELS> d_privatized_histograms_wrapper, ///< Reference to privatized histograms ArrayWrapper<OutputDecodeOpT, NUM_ACTIVE_CHANNELS> output_decode_op_wrapper, ///< The transform operator for determining output bin-ids from privatized counter indices, one for each channel ArrayWrapper<PrivatizedDecodeOpT, NUM_ACTIVE_CHANNELS> privatized_decode_op_wrapper, ///< The transform operator for determining privatized counter indices from samples, one for each channel OffsetT num_row_pixels, ///< The number of multi-channel pixels per row in the region of interest OffsetT num_rows, ///< The number of rows in the region of interest OffsetT row_stride_samples, ///< The number of samples between starts of consecutive rows in the region of interest int tiles_per_row, ///< Number of image tiles per row GridQueue<int> tile_queue) ///< Drain queue descriptor for dynamically mapping tile data onto thread blocks { // Thread block type for compositing input tiles typedef AgentHistogram< AgentHistogramPolicyT, PRIVATIZED_SMEM_BINS, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, SampleIteratorT, CounterT, PrivatizedDecodeOpT, OutputDecodeOpT, OffsetT> AgentHistogramT; // Shared memory for AgentHistogram __shared__ typename AgentHistogramT::TempStorage temp_storage; AgentHistogramT agent( temp_storage, d_samples, num_output_bins_wrapper.array, num_privatized_bins_wrapper.array, d_output_histograms_wrapper.array, d_privatized_histograms_wrapper.array, output_decode_op_wrapper.array, privatized_decode_op_wrapper.array); // Initialize counters agent.InitBinCounters(); // Consume input tiles agent.ConsumeTiles( num_row_pixels, num_rows, row_stride_samples, tiles_per_row, tile_queue); // Store output to global (if necessary) agent.StoreOutput(); } /****************************************************************************** * Dispatch ******************************************************************************/ /** * Utility class for dispatching the appropriately-tuned kernels for DeviceHistogram */ template < int NUM_CHANNELS, ///< Number of channels interleaved in the input data (may be greater than the number of channels being actively histogrammed) int NUM_ACTIVE_CHANNELS, ///< Number of channels actively being histogrammed typename SampleIteratorT, ///< Random-access input iterator type for reading input items \iterator typename CounterT, ///< Integer type for counting sample occurrences per histogram bin typename LevelT, ///< Type for specifying bin level boundaries typename OffsetT> ///< Signed integer type for global offsets struct DipatchHistogram { //--------------------------------------------------------------------- // Types and constants //--------------------------------------------------------------------- /// The sample value type of the input iterator typedef typename std::iterator_traits<SampleIteratorT>::value_type SampleT; enum { // Maximum number of bins per channel for which we will use a privatized smem strategy MAX_PRIVATIZED_SMEM_BINS = 256 }; //--------------------------------------------------------------------- // Transform functors for converting samples to bin-ids //--------------------------------------------------------------------- // Searches for bin given a list of bin-boundary levels template <typename LevelIteratorT> struct SearchTransform { LevelIteratorT d_levels; // Pointer to levels array int num_output_levels; // Number of levels in array // Initializer __host__ __device__ __forceinline__ void Init( LevelIteratorT d_levels, // Pointer to levels array int num_output_levels) // Number of levels in array { this->d_levels = d_levels; this->num_output_levels = num_output_levels; } // Method for converting samples to bin-ids template <CacheLoadModifier LOAD_MODIFIER, typename _SampleT> __host__ __device__ __forceinline__ void BinSelect(_SampleT sample, int &bin, bool valid) { /// Level iterator wrapper type typedef typename If<IsPointer<LevelIteratorT>::VALUE, CacheModifiedInputIterator<LOAD_MODIFIER, LevelT, OffsetT>, // Wrap the native input pointer with CacheModifiedInputIterator LevelIteratorT>::Type // Directly use the supplied input iterator type WrappedLevelIteratorT; WrappedLevelIteratorT wrapped_levels(d_levels); int num_bins = num_output_levels - 1; if (valid) { bin = UpperBound(wrapped_levels, num_output_levels, (LevelT) sample) - 1; if (bin >= num_bins) bin = -1; } } }; // Scales samples to evenly-spaced bins struct ScaleTransform { int num_bins; // Number of levels in array LevelT max; // Max sample level (exclusive) LevelT min; // Min sample level (inclusive) LevelT scale; // Bin scaling factor // Initializer template <typename _LevelT> __host__ __device__ __forceinline__ void Init( int num_output_levels, // Number of levels in array _LevelT max, // Max sample level (exclusive) _LevelT min, // Min sample level (inclusive) _LevelT scale) // Bin scaling factor { this->num_bins = num_output_levels - 1; this->max = max; this->min = min; this->scale = scale; } // Initializer (float specialization) __host__ __device__ __forceinline__ void Init( int num_output_levels, // Number of levels in array float max, // Max sample level (exclusive) float min, // Min sample level (inclusive) float scale) // Bin scaling factor { this->num_bins = num_output_levels - 1; this->max = max; this->min = min; this->scale = float(1.0) / scale; } // Initializer (double specialization) __host__ __device__ __forceinline__ void Init( int num_output_levels, // Number of levels in array double max, // Max sample level (exclusive) double min, // Min sample level (inclusive) double scale) // Bin scaling factor { this->num_bins = num_output_levels - 1; this->max = max; this->min = min; this->scale = double(1.0) / scale; } // Method for converting samples to bin-ids template <CacheLoadModifier LOAD_MODIFIER, typename _SampleT> __host__ __device__ __forceinline__ void BinSelect(_SampleT sample, int &bin, bool valid) { LevelT level_sample = (LevelT) sample; if (valid && (level_sample >= min) && (level_sample < max)) bin = (int) ((level_sample - min) / scale); } // Method for converting samples to bin-ids (float specialization) template <CacheLoadModifier LOAD_MODIFIER> __host__ __device__ __forceinline__ void BinSelect(float sample, int &bin, bool valid) { LevelT level_sample = (LevelT) sample; if (valid && (level_sample >= min) && (level_sample < max)) bin = (int) ((level_sample - min) * scale); } // Method for converting samples to bin-ids (double specialization) template <CacheLoadModifier LOAD_MODIFIER> __host__ __device__ __forceinline__ void BinSelect(double sample, int &bin, bool valid) { LevelT level_sample = (LevelT) sample; if (valid && (level_sample >= min) && (level_sample < max)) bin = (int) ((level_sample - min) * scale); } }; // Pass-through bin transform operator struct PassThruTransform { // Method for converting samples to bin-ids template <CacheLoadModifier LOAD_MODIFIER, typename _SampleT> __host__ __device__ __forceinline__ void BinSelect(_SampleT sample, int &bin, bool valid) { if (valid) bin = (int) sample; } }; //--------------------------------------------------------------------- // Tuning policies //--------------------------------------------------------------------- template <int NOMINAL_ITEMS_PER_THREAD> struct TScale { enum { V_SCALE = (sizeof(SampleT) + sizeof(int) - 1) / sizeof(int), VALUE = CUB_MAX((NOMINAL_ITEMS_PER_THREAD / NUM_ACTIVE_CHANNELS / V_SCALE), 1) }; }; /// SM11 struct Policy110 { // HistogramSweepPolicy typedef AgentHistogramPolicy< 512, (NUM_CHANNELS == 1) ? 8 : 2, BLOCK_LOAD_DIRECT, LOAD_DEFAULT, true, GMEM, false> HistogramSweepPolicy; }; /// SM20 struct Policy200 { // HistogramSweepPolicy typedef AgentHistogramPolicy< (NUM_CHANNELS == 1) ? 256 : 128, (NUM_CHANNELS == 1) ? 8 : 3, (NUM_CHANNELS == 1) ? BLOCK_LOAD_DIRECT : BLOCK_LOAD_WARP_TRANSPOSE, LOAD_DEFAULT, true, SMEM, false> HistogramSweepPolicy; }; /// SM30 struct Policy300 { // HistogramSweepPolicy typedef AgentHistogramPolicy< 512, (NUM_CHANNELS == 1) ? 8 : 2, BLOCK_LOAD_DIRECT, LOAD_DEFAULT, true, GMEM, false> HistogramSweepPolicy; }; /// SM35 struct Policy350 { // HistogramSweepPolicy typedef AgentHistogramPolicy< 128, TScale<8>::VALUE, BLOCK_LOAD_DIRECT, LOAD_LDG, true, BLEND, true> HistogramSweepPolicy; }; /// SM50 struct Policy500 { // HistogramSweepPolicy typedef AgentHistogramPolicy< 384, TScale<16>::VALUE, BLOCK_LOAD_DIRECT, LOAD_LDG, true, SMEM, false> HistogramSweepPolicy; }; //--------------------------------------------------------------------- // Tuning policies of current PTX compiler pass //--------------------------------------------------------------------- #if (CUB_PTX_ARCH >= 500) typedef Policy500 PtxPolicy; #elif (CUB_PTX_ARCH >= 350) typedef Policy350 PtxPolicy; #elif (CUB_PTX_ARCH >= 300) typedef Policy300 PtxPolicy; #elif (CUB_PTX_ARCH >= 200) typedef Policy200 PtxPolicy; #else typedef Policy110 PtxPolicy; #endif // "Opaque" policies (whose parameterizations aren't reflected in the type signature) struct PtxHistogramSweepPolicy : PtxPolicy::HistogramSweepPolicy {}; //--------------------------------------------------------------------- // Utilities //--------------------------------------------------------------------- /** * Initialize kernel dispatch configurations with the policies corresponding to the PTX assembly we will use */ template <typename KernelConfig> CUB_RUNTIME_FUNCTION __forceinline__ static cudaError_t InitConfigs( int ptx_version, KernelConfig &histogram_sweep_config) { #if (CUB_PTX_ARCH > 0) // We're on the device, so initialize the kernel dispatch configurations with the current PTX policy return histogram_sweep_config.template Init<PtxHistogramSweepPolicy>(); #else // We're on the host, so lookup and initialize the kernel dispatch configurations with the policies that match the device's PTX version if (ptx_version >= 500) { return histogram_sweep_config.template Init<typename Policy500::HistogramSweepPolicy>(); } else if (ptx_version >= 350) { return histogram_sweep_config.template Init<typename Policy350::HistogramSweepPolicy>(); } else if (ptx_version >= 300) { return histogram_sweep_config.template Init<typename Policy300::HistogramSweepPolicy>(); } else if (ptx_version >= 200) { return histogram_sweep_config.template Init<typename Policy200::HistogramSweepPolicy>(); } else if (ptx_version >= 110) { return histogram_sweep_config.template Init<typename Policy110::HistogramSweepPolicy>(); } else { // No global atomic support return cudaErrorNotSupported; } #endif } /** * Kernel kernel dispatch configuration */ struct KernelConfig { int block_threads; int pixels_per_thread; template <typename BlockPolicy> CUB_RUNTIME_FUNCTION __forceinline__ cudaError_t Init() { block_threads = BlockPolicy::BLOCK_THREADS; pixels_per_thread = BlockPolicy::PIXELS_PER_THREAD; return cudaSuccess; } }; //--------------------------------------------------------------------- // Dispatch entrypoints //--------------------------------------------------------------------- /** * Privatization-based dispatch routine */ template < typename PrivatizedDecodeOpT, ///< The transform operator type for determining privatized counter indices from samples, one for each channel typename OutputDecodeOpT, ///< The transform operator type for determining output bin-ids from privatized counter indices, one for each channel typename DeviceHistogramInitKernelT, ///< Function type of cub::DeviceHistogramInitKernel typename DeviceHistogramSweepKernelT> ///< Function type of cub::DeviceHistogramSweepKernel CUB_RUNTIME_FUNCTION __forceinline__ static cudaError_t PrivatizedDispatch( void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t& temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation SampleIteratorT d_samples, ///< [in] The pointer to the input sequence of sample items. The samples from different channels are assumed to be interleaved (e.g., an array of 32-bit pixels where each pixel consists of four RGBA 8-bit samples). CounterT* d_output_histograms[NUM_ACTIVE_CHANNELS], ///< [out] The pointers to the histogram counter output arrays, one for each active channel. For channel<sub><em>i</em></sub>, the allocation length of <tt>d_histograms[i]</tt> should be <tt>num_output_levels[i]</tt> - 1. int num_privatized_levels[NUM_ACTIVE_CHANNELS], ///< [in] The number of bin level boundaries for delineating histogram samples in each active channel. Implies that the number of bins for channel<sub><em>i</em></sub> is <tt>num_output_levels[i]</tt> - 1. PrivatizedDecodeOpT privatized_decode_op[NUM_ACTIVE_CHANNELS], ///< [in] Transform operators for determining bin-ids from samples, one for each channel int num_output_levels[NUM_ACTIVE_CHANNELS], ///< [in] The number of bin level boundaries for delineating histogram samples in each active channel. Implies that the number of bins for channel<sub><em>i</em></sub> is <tt>num_output_levels[i]</tt> - 1. OutputDecodeOpT output_decode_op[NUM_ACTIVE_CHANNELS], ///< [in] Transform operators for determining bin-ids from samples, one for each channel int max_num_output_bins, ///< [in] Maximum number of output bins in any channel OffsetT num_row_pixels, ///< [in] The number of multi-channel pixels per row in the region of interest OffsetT num_rows, ///< [in] The number of rows in the region of interest OffsetT row_stride_samples, ///< [in] The number of samples between starts of consecutive rows in the region of interest DeviceHistogramInitKernelT histogram_init_kernel, ///< [in] Kernel function pointer to parameterization of cub::DeviceHistogramInitKernel DeviceHistogramSweepKernelT histogram_sweep_kernel, ///< [in] Kernel function pointer to parameterization of cub::DeviceHistogramSweepKernel KernelConfig histogram_sweep_config, ///< [in] Dispatch parameters that match the policy that \p histogram_sweep_kernel was compiled for cudaStream_t stream, ///< [in] CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous) ///< [in] Whether or not to synchronize the stream after every kernel launch to check for errors. May cause significant slowdown. Default is \p false. { #ifndef CUB_RUNTIME_ENABLED // Kernel launch not supported from this device return CubDebug(cudaErrorNotSupported); #else cudaError error = cudaSuccess; do { // Get device ordinal int device_ordinal; if (CubDebug(error = cudaGetDevice(&device_ordinal))) break; // Get SM count int sm_count; if (CubDebug(error = cudaDeviceGetAttribute (&sm_count, cudaDevAttrMultiProcessorCount, device_ordinal))) break; // Get SM occupancy for histogram_sweep_kernel int histogram_sweep_sm_occupancy; if (CubDebug(error = MaxSmOccupancy( histogram_sweep_sm_occupancy, histogram_sweep_kernel, histogram_sweep_config.block_threads))) break; // Get device occupancy for histogram_sweep_kernel int histogram_sweep_occupancy = histogram_sweep_sm_occupancy * sm_count; if (num_row_pixels * NUM_CHANNELS == row_stride_samples) { // Treat as a single linear array of samples num_row_pixels *= num_rows; num_rows = 1; row_stride_samples = num_row_pixels * NUM_CHANNELS; } // Get grid dimensions, trying to keep total blocks ~histogram_sweep_occupancy int pixels_per_tile = histogram_sweep_config.block_threads * histogram_sweep_config.pixels_per_thread; int tiles_per_row = int(num_row_pixels + pixels_per_tile - 1) / pixels_per_tile; int blocks_per_row = CUB_MIN(histogram_sweep_occupancy, tiles_per_row); int blocks_per_col = (blocks_per_row > 0) ? int(CUB_MIN(histogram_sweep_occupancy / blocks_per_row, num_rows)) : 0; int num_thread_blocks = blocks_per_row * blocks_per_col; dim3 sweep_grid_dims; sweep_grid_dims.x = (unsigned int) blocks_per_row; sweep_grid_dims.y = (unsigned int) blocks_per_col; sweep_grid_dims.z = 1; // Temporary storage allocation requirements const int NUM_ALLOCATIONS = NUM_ACTIVE_CHANNELS + 1; void* allocations[NUM_ALLOCATIONS]; size_t allocation_sizes[NUM_ALLOCATIONS]; for (int CHANNEL = 0; CHANNEL < NUM_ACTIVE_CHANNELS; ++CHANNEL) allocation_sizes[CHANNEL] = size_t(num_thread_blocks) * (num_privatized_levels[CHANNEL] - 1) * sizeof(CounterT); allocation_sizes[NUM_ALLOCATIONS - 1] = GridQueue<int>::AllocationSize(); // Alias the temporary allocations from the single storage blob (or compute the necessary size of the blob) if (CubDebug(error = AliasTemporaries(d_temp_storage, temp_storage_bytes, allocations, allocation_sizes))) break; if (d_temp_storage == NULL) { // Return if the caller is simply requesting the size of the storage allocation break; } // Construct the grid queue descriptor GridQueue<int> tile_queue(allocations[NUM_ALLOCATIONS - 1]); // Setup array wrapper for histogram channel output (because we can't pass static arrays as kernel parameters) ArrayWrapper<CounterT*, NUM_ACTIVE_CHANNELS> d_output_histograms_wrapper; for (int CHANNEL = 0; CHANNEL < NUM_ACTIVE_CHANNELS; ++CHANNEL) d_output_histograms_wrapper.array[CHANNEL] = d_output_histograms[CHANNEL]; // Setup array wrapper for privatized per-block histogram channel output (because we can't pass static arrays as kernel parameters) ArrayWrapper<CounterT*, NUM_ACTIVE_CHANNELS> d_privatized_histograms_wrapper; for (int CHANNEL = 0; CHANNEL < NUM_ACTIVE_CHANNELS; ++CHANNEL) d_privatized_histograms_wrapper.array[CHANNEL] = (CounterT*) allocations[CHANNEL]; // Setup array wrapper for sweep bin transforms (because we can't pass static arrays as kernel parameters) ArrayWrapper<PrivatizedDecodeOpT, NUM_ACTIVE_CHANNELS> privatized_decode_op_wrapper; for (int CHANNEL = 0; CHANNEL < NUM_ACTIVE_CHANNELS; ++CHANNEL) privatized_decode_op_wrapper.array[CHANNEL] = privatized_decode_op[CHANNEL]; // Setup array wrapper for aggregation bin transforms (because we can't pass static arrays as kernel parameters) ArrayWrapper<OutputDecodeOpT, NUM_ACTIVE_CHANNELS> output_decode_op_wrapper; for (int CHANNEL = 0; CHANNEL < NUM_ACTIVE_CHANNELS; ++CHANNEL) output_decode_op_wrapper.array[CHANNEL] = output_decode_op[CHANNEL]; // Setup array wrapper for num privatized bins (because we can't pass static arrays as kernel parameters) ArrayWrapper<int, NUM_ACTIVE_CHANNELS> num_privatized_bins_wrapper; for (int CHANNEL = 0; CHANNEL < NUM_ACTIVE_CHANNELS; ++CHANNEL) num_privatized_bins_wrapper.array[CHANNEL] = num_privatized_levels[CHANNEL] - 1; // Setup array wrapper for num output bins (because we can't pass static arrays as kernel parameters) ArrayWrapper<int, NUM_ACTIVE_CHANNELS> num_output_bins_wrapper; for (int CHANNEL = 0; CHANNEL < NUM_ACTIVE_CHANNELS; ++CHANNEL) num_output_bins_wrapper.array[CHANNEL] = num_output_levels[CHANNEL] - 1; int histogram_init_block_threads = 256; int histogram_init_grid_dims = (max_num_output_bins + histogram_init_block_threads - 1) / histogram_init_block_threads; // Log DeviceHistogramInitKernel configuration if (debug_synchronous) _CubLog("Invoking DeviceHistogramInitKernel<<<%d, %d, 0, %lld>>>()\n", histogram_init_grid_dims, histogram_init_block_threads, (long long) stream); // Invoke histogram_init_kernel histogram_init_kernel<<<histogram_init_grid_dims, histogram_init_block_threads, 0, stream>>>( num_output_bins_wrapper, d_output_histograms_wrapper, tile_queue); // Return if empty problem if ((blocks_per_row == 0) || (blocks_per_col == 0)) break; // Log histogram_sweep_kernel configuration if (debug_synchronous) _CubLog("Invoking histogram_sweep_kernel<<<{%d, %d, %d}, %d, 0, %lld>>>(), %d pixels per thread, %d SM occupancy\n", sweep_grid_dims.x, sweep_grid_dims.y, sweep_grid_dims.z, histogram_sweep_config.block_threads, (long long) stream, histogram_sweep_config.pixels_per_thread, histogram_sweep_sm_occupancy); // Invoke histogram_sweep_kernel histogram_sweep_kernel<<<sweep_grid_dims, histogram_sweep_config.block_threads, 0, stream>>>( d_samples, num_output_bins_wrapper, num_privatized_bins_wrapper, d_output_histograms_wrapper, d_privatized_histograms_wrapper, output_decode_op_wrapper, privatized_decode_op_wrapper, num_row_pixels, num_rows, row_stride_samples, tiles_per_row, tile_queue); // Check for failure to launch if (CubDebug(error = cudaPeekAtLastError())) break; // Sync the stream if specified to flush runtime errors if (debug_synchronous && (CubDebug(error = SyncStream(stream)))) break; } while (0); return error; #endif // CUB_RUNTIME_ENABLED } /** * Dispatch routine for HistogramRange, specialized for sample types larger than 8bit */ CUB_RUNTIME_FUNCTION static cudaError_t DispatchRange( void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t& temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation SampleIteratorT d_samples, ///< [in] The pointer to the multi-channel input sequence of data samples. The samples from different channels are assumed to be interleaved (e.g., an array of 32-bit pixels where each pixel consists of four RGBA 8-bit samples). CounterT* d_output_histograms[NUM_ACTIVE_CHANNELS], ///< [out] The pointers to the histogram counter output arrays, one for each active channel. For channel<sub><em>i</em></sub>, the allocation length of <tt>d_histograms[i]</tt> should be <tt>num_output_levels[i]</tt> - 1. int num_output_levels[NUM_ACTIVE_CHANNELS], ///< [in] The number of boundaries (levels) for delineating histogram samples in each active channel. Implies that the number of bins for channel<sub><em>i</em></sub> is <tt>num_output_levels[i]</tt> - 1. LevelT *d_levels[NUM_ACTIVE_CHANNELS], ///< [in] The pointers to the arrays of boundaries (levels), one for each active channel. Bin ranges are defined by consecutive boundary pairings: lower sample value boundaries are inclusive and upper sample value boundaries are exclusive. OffsetT num_row_pixels, ///< [in] The number of multi-channel pixels per row in the region of interest OffsetT num_rows, ///< [in] The number of rows in the region of interest OffsetT row_stride_samples, ///< [in] The number of samples between starts of consecutive rows in the region of interest cudaStream_t stream, ///< [in] CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous, ///< [in] Whether or not to synchronize the stream after every kernel launch to check for errors. May cause significant slowdown. Default is \p false. Int2Type<false> is_byte_sample) ///< [in] Marker type indicating whether or not SampleT is a 8b type { cudaError error = cudaSuccess; do { // Get PTX version int ptx_version; #if (CUB_PTX_ARCH == 0) if (CubDebug(error = PtxVersion(ptx_version))) break; #else ptx_version = CUB_PTX_ARCH; #endif // Get kernel dispatch configurations KernelConfig histogram_sweep_config; if (CubDebug(error = InitConfigs(ptx_version, histogram_sweep_config))) break; // Use the search transform op for converting samples to privatized bins typedef SearchTransform<LevelT*> PrivatizedDecodeOpT; // Use the pass-thru transform op for converting privatized bins to output bins typedef PassThruTransform OutputDecodeOpT; PrivatizedDecodeOpT privatized_decode_op[NUM_ACTIVE_CHANNELS]; OutputDecodeOpT output_decode_op[NUM_ACTIVE_CHANNELS]; int max_levels = num_output_levels[0]; for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel) { privatized_decode_op[channel].Init(d_levels[channel], num_output_levels[channel]); if (num_output_levels[channel] > max_levels) max_levels = num_output_levels[channel]; } int max_num_output_bins = max_levels - 1; // Dispatch if (max_num_output_bins > MAX_PRIVATIZED_SMEM_BINS) { // Too many bins to keep in shared memory. const int PRIVATIZED_SMEM_BINS = 0; if (CubDebug(error = PrivatizedDispatch( d_temp_storage, temp_storage_bytes, d_samples, d_output_histograms, num_output_levels, privatized_decode_op, num_output_levels, output_decode_op, max_num_output_bins, num_row_pixels, num_rows, row_stride_samples, DeviceHistogramInitKernel<NUM_ACTIVE_CHANNELS, CounterT, OffsetT>, DeviceHistogramSweepKernel<PtxHistogramSweepPolicy, PRIVATIZED_SMEM_BINS, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, SampleIteratorT, CounterT, PrivatizedDecodeOpT, OutputDecodeOpT, OffsetT>, histogram_sweep_config, stream, debug_synchronous))) break; } else { // Dispatch shared-privatized approach const int PRIVATIZED_SMEM_BINS = MAX_PRIVATIZED_SMEM_BINS; if (CubDebug(error = PrivatizedDispatch( d_temp_storage, temp_storage_bytes, d_samples, d_output_histograms, num_output_levels, privatized_decode_op, num_output_levels, output_decode_op, max_num_output_bins, num_row_pixels, num_rows, row_stride_samples, DeviceHistogramInitKernel<NUM_ACTIVE_CHANNELS, CounterT, OffsetT>, DeviceHistogramSweepKernel<PtxHistogramSweepPolicy, PRIVATIZED_SMEM_BINS, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, SampleIteratorT, CounterT, PrivatizedDecodeOpT, OutputDecodeOpT, OffsetT>, histogram_sweep_config, stream, debug_synchronous))) break; } } while (0); return error; } /** * Dispatch routine for HistogramRange, specialized for 8-bit sample types (computes 256-bin privatized histograms and then reduces to user-specified levels) */ CUB_RUNTIME_FUNCTION static cudaError_t DispatchRange( void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t& temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation SampleIteratorT d_samples, ///< [in] The pointer to the multi-channel input sequence of data samples. The samples from different channels are assumed to be interleaved (e.g., an array of 32-bit pixels where each pixel consists of four RGBA 8-bit samples). CounterT* d_output_histograms[NUM_ACTIVE_CHANNELS], ///< [out] The pointers to the histogram counter output arrays, one for each active channel. For channel<sub><em>i</em></sub>, the allocation length of <tt>d_histograms[i]</tt> should be <tt>num_output_levels[i]</tt> - 1. int num_output_levels[NUM_ACTIVE_CHANNELS], ///< [in] The number of boundaries (levels) for delineating histogram samples in each active channel. Implies that the number of bins for channel<sub><em>i</em></sub> is <tt>num_output_levels[i]</tt> - 1. LevelT *d_levels[NUM_ACTIVE_CHANNELS], ///< [in] The pointers to the arrays of boundaries (levels), one for each active channel. Bin ranges are defined by consecutive boundary pairings: lower sample value boundaries are inclusive and upper sample value boundaries are exclusive. OffsetT num_row_pixels, ///< [in] The number of multi-channel pixels per row in the region of interest OffsetT num_rows, ///< [in] The number of rows in the region of interest OffsetT row_stride_samples, ///< [in] The number of samples between starts of consecutive rows in the region of interest cudaStream_t stream, ///< [in] CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous, ///< [in] Whether or not to synchronize the stream after every kernel launch to check for errors. May cause significant slowdown. Default is \p false. Int2Type<true> is_byte_sample) ///< [in] Marker type indicating whether or not SampleT is a 8b type { cudaError error = cudaSuccess; do { // Get PTX version int ptx_version; #if (CUB_PTX_ARCH == 0) if (CubDebug(error = PtxVersion(ptx_version))) break; #else ptx_version = CUB_PTX_ARCH; #endif // Get kernel dispatch configurations KernelConfig histogram_sweep_config; if (CubDebug(error = InitConfigs(ptx_version, histogram_sweep_config))) break; // Use the pass-thru transform op for converting samples to privatized bins typedef PassThruTransform PrivatizedDecodeOpT; // Use the search transform op for converting privatized bins to output bins typedef SearchTransform<LevelT*> OutputDecodeOpT; int num_privatized_levels[NUM_ACTIVE_CHANNELS]; PrivatizedDecodeOpT privatized_decode_op[NUM_ACTIVE_CHANNELS]; OutputDecodeOpT output_decode_op[NUM_ACTIVE_CHANNELS]; int max_levels = num_output_levels[0]; // Maximum number of levels in any channel for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel) { num_privatized_levels[channel] = 257; output_decode_op[channel].Init(d_levels[channel], num_output_levels[channel]); if (num_output_levels[channel] > max_levels) max_levels = num_output_levels[channel]; } int max_num_output_bins = max_levels - 1; const int PRIVATIZED_SMEM_BINS = 256; if (CubDebug(error = PrivatizedDispatch( d_temp_storage, temp_storage_bytes, d_samples, d_output_histograms, num_privatized_levels, privatized_decode_op, num_output_levels, output_decode_op, max_num_output_bins, num_row_pixels, num_rows, row_stride_samples, DeviceHistogramInitKernel<NUM_ACTIVE_CHANNELS, CounterT, OffsetT>, DeviceHistogramSweepKernel<PtxHistogramSweepPolicy, PRIVATIZED_SMEM_BINS, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, SampleIteratorT, CounterT, PrivatizedDecodeOpT, OutputDecodeOpT, OffsetT>, histogram_sweep_config, stream, debug_synchronous))) break; } while (0); return error; } /** * Dispatch routine for HistogramEven, specialized for sample types larger than 8-bit */ CUB_RUNTIME_FUNCTION __forceinline__ static cudaError_t DispatchEven( void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t& temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation SampleIteratorT d_samples, ///< [in] The pointer to the input sequence of sample items. The samples from different channels are assumed to be interleaved (e.g., an array of 32-bit pixels where each pixel consists of four RGBA 8-bit samples). CounterT* d_output_histograms[NUM_ACTIVE_CHANNELS], ///< [out] The pointers to the histogram counter output arrays, one for each active channel. For channel<sub><em>i</em></sub>, the allocation length of <tt>d_histograms[i]</tt> should be <tt>num_output_levels[i]</tt> - 1. int num_output_levels[NUM_ACTIVE_CHANNELS], ///< [in] The number of bin level boundaries for delineating histogram samples in each active channel. Implies that the number of bins for channel<sub><em>i</em></sub> is <tt>num_output_levels[i]</tt> - 1. LevelT lower_level[NUM_ACTIVE_CHANNELS], ///< [in] The lower sample value bound (inclusive) for the lowest histogram bin in each active channel. LevelT upper_level[NUM_ACTIVE_CHANNELS], ///< [in] The upper sample value bound (exclusive) for the highest histogram bin in each active channel. OffsetT num_row_pixels, ///< [in] The number of multi-channel pixels per row in the region of interest OffsetT num_rows, ///< [in] The number of rows in the region of interest OffsetT row_stride_samples, ///< [in] The number of samples between starts of consecutive rows in the region of interest cudaStream_t stream, ///< [in] CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous, ///< [in] Whether or not to synchronize the stream after every kernel launch to check for errors. May cause significant slowdown. Default is \p false. Int2Type<false> is_byte_sample) ///< [in] Marker type indicating whether or not SampleT is a 8b type { cudaError error = cudaSuccess; do { // Get PTX version int ptx_version; #if (CUB_PTX_ARCH == 0) if (CubDebug(error = PtxVersion(ptx_version))) break; #else ptx_version = CUB_PTX_ARCH; #endif // Get kernel dispatch configurations KernelConfig histogram_sweep_config; if (CubDebug(error = InitConfigs(ptx_version, histogram_sweep_config))) break; // Use the scale transform op for converting samples to privatized bins typedef ScaleTransform PrivatizedDecodeOpT; // Use the pass-thru transform op for converting privatized bins to output bins typedef PassThruTransform OutputDecodeOpT; PrivatizedDecodeOpT privatized_decode_op[NUM_ACTIVE_CHANNELS]; OutputDecodeOpT output_decode_op[NUM_ACTIVE_CHANNELS]; int max_levels = num_output_levels[0]; for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel) { int bins = num_output_levels[channel] - 1; LevelT scale = (upper_level[channel] - lower_level[channel]) / bins; privatized_decode_op[channel].Init(num_output_levels[channel], upper_level[channel], lower_level[channel], scale); if (num_output_levels[channel] > max_levels) max_levels = num_output_levels[channel]; } int max_num_output_bins = max_levels - 1; if (max_num_output_bins > MAX_PRIVATIZED_SMEM_BINS) { // Dispatch shared-privatized approach const int PRIVATIZED_SMEM_BINS = 0; if (CubDebug(error = PrivatizedDispatch( d_temp_storage, temp_storage_bytes, d_samples, d_output_histograms, num_output_levels, privatized_decode_op, num_output_levels, output_decode_op, max_num_output_bins, num_row_pixels, num_rows, row_stride_samples, DeviceHistogramInitKernel<NUM_ACTIVE_CHANNELS, CounterT, OffsetT>, DeviceHistogramSweepKernel<PtxHistogramSweepPolicy, PRIVATIZED_SMEM_BINS, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, SampleIteratorT, CounterT, PrivatizedDecodeOpT, OutputDecodeOpT, OffsetT>, histogram_sweep_config, stream, debug_synchronous))) break; } else { // Dispatch shared-privatized approach const int PRIVATIZED_SMEM_BINS = MAX_PRIVATIZED_SMEM_BINS; if (CubDebug(error = PrivatizedDispatch( d_temp_storage, temp_storage_bytes, d_samples, d_output_histograms, num_output_levels, privatized_decode_op, num_output_levels, output_decode_op, max_num_output_bins, num_row_pixels, num_rows, row_stride_samples, DeviceHistogramInitKernel<NUM_ACTIVE_CHANNELS, CounterT, OffsetT>, DeviceHistogramSweepKernel<PtxHistogramSweepPolicy, PRIVATIZED_SMEM_BINS, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, SampleIteratorT, CounterT, PrivatizedDecodeOpT, OutputDecodeOpT, OffsetT>, histogram_sweep_config, stream, debug_synchronous))) break; } } while (0); return error; } /** * Dispatch routine for HistogramEven, specialized for 8-bit sample types (computes 256-bin privatized histograms and then reduces to user-specified levels) */ CUB_RUNTIME_FUNCTION __forceinline__ static cudaError_t DispatchEven( void* d_temp_storage, ///< [in] %Device-accessible allocation of temporary storage. When NULL, the required allocation size is written to \p temp_storage_bytes and no work is done. size_t& temp_storage_bytes, ///< [in,out] Reference to size in bytes of \p d_temp_storage allocation SampleIteratorT d_samples, ///< [in] The pointer to the input sequence of sample items. The samples from different channels are assumed to be interleaved (e.g., an array of 32-bit pixels where each pixel consists of four RGBA 8-bit samples). CounterT* d_output_histograms[NUM_ACTIVE_CHANNELS], ///< [out] The pointers to the histogram counter output arrays, one for each active channel. For channel<sub><em>i</em></sub>, the allocation length of <tt>d_histograms[i]</tt> should be <tt>num_output_levels[i]</tt> - 1. int num_output_levels[NUM_ACTIVE_CHANNELS], ///< [in] The number of bin level boundaries for delineating histogram samples in each active channel. Implies that the number of bins for channel<sub><em>i</em></sub> is <tt>num_output_levels[i]</tt> - 1. LevelT lower_level[NUM_ACTIVE_CHANNELS], ///< [in] The lower sample value bound (inclusive) for the lowest histogram bin in each active channel. LevelT upper_level[NUM_ACTIVE_CHANNELS], ///< [in] The upper sample value bound (exclusive) for the highest histogram bin in each active channel. OffsetT num_row_pixels, ///< [in] The number of multi-channel pixels per row in the region of interest OffsetT num_rows, ///< [in] The number of rows in the region of interest OffsetT row_stride_samples, ///< [in] The number of samples between starts of consecutive rows in the region of interest cudaStream_t stream, ///< [in] CUDA stream to launch kernels within. Default is stream<sub>0</sub>. bool debug_synchronous, ///< [in] Whether or not to synchronize the stream after every kernel launch to check for errors. May cause significant slowdown. Default is \p false. Int2Type<true> is_byte_sample) ///< [in] Marker type indicating whether or not SampleT is a 8b type { cudaError error = cudaSuccess; do { // Get PTX version int ptx_version; #if (CUB_PTX_ARCH == 0) if (CubDebug(error = PtxVersion(ptx_version))) break; #else ptx_version = CUB_PTX_ARCH; #endif // Get kernel dispatch configurations KernelConfig histogram_sweep_config; if (CubDebug(error = InitConfigs(ptx_version, histogram_sweep_config))) break; // Use the pass-thru transform op for converting samples to privatized bins typedef PassThruTransform PrivatizedDecodeOpT; // Use the scale transform op for converting privatized bins to output bins typedef ScaleTransform OutputDecodeOpT; int num_privatized_levels[NUM_ACTIVE_CHANNELS]; PrivatizedDecodeOpT privatized_decode_op[NUM_ACTIVE_CHANNELS]; OutputDecodeOpT output_decode_op[NUM_ACTIVE_CHANNELS]; int max_levels = num_output_levels[0]; for (int channel = 0; channel < NUM_ACTIVE_CHANNELS; ++channel) { num_privatized_levels[channel] = 257; int bins = num_output_levels[channel] - 1; LevelT scale = (upper_level[channel] - lower_level[channel]) / bins; output_decode_op[channel].Init(num_output_levels[channel], upper_level[channel], lower_level[channel], scale); if (num_output_levels[channel] > max_levels) max_levels = num_output_levels[channel]; } int max_num_output_bins = max_levels - 1; const int PRIVATIZED_SMEM_BINS = 256; if (CubDebug(error = PrivatizedDispatch( d_temp_storage, temp_storage_bytes, d_samples, d_output_histograms, num_privatized_levels, privatized_decode_op, num_output_levels, output_decode_op, max_num_output_bins, num_row_pixels, num_rows, row_stride_samples, DeviceHistogramInitKernel<NUM_ACTIVE_CHANNELS, CounterT, OffsetT>, DeviceHistogramSweepKernel<PtxHistogramSweepPolicy, PRIVATIZED_SMEM_BINS, NUM_CHANNELS, NUM_ACTIVE_CHANNELS, SampleIteratorT, CounterT, PrivatizedDecodeOpT, OutputDecodeOpT, OffsetT>, histogram_sweep_config, stream, debug_synchronous))) break; } while (0); return error; } }; } // CUB namespace CUB_NS_POSTFIX // Optional outer namespace(s)
0
rapidsai_public_repos/nvgraph/external/cub_semiring
rapidsai_public_repos/nvgraph/external/cub_semiring/grid/grid_barrier.cuh
/****************************************************************************** * Copyright (c) 2011, Duane Merrill. All rights reserved. * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ /** * \file * cub::GridBarrier implements a software global barrier among thread blocks within a CUDA grid */ #pragma once #include "../util_debug.cuh" #include "../util_namespace.cuh" #include "../thread/thread_load.cuh" /// Optional outer namespace(s) CUB_NS_PREFIX /// CUB namespace namespace cub { /** * \addtogroup GridModule * @{ */ /** * \brief GridBarrier implements a software global barrier among thread blocks within a CUDA grid */ class GridBarrier { protected : typedef unsigned int SyncFlag; // Counters in global device memory SyncFlag* d_sync; public: /** * Constructor */ GridBarrier() : d_sync(NULL) {} /** * Synchronize */ __device__ __forceinline__ void Sync() const { volatile SyncFlag *d_vol_sync = d_sync; // Threadfence and syncthreads to make sure global writes are visible before // thread-0 reports in with its sync counter __threadfence(); CTA_SYNC(); if (blockIdx.x == 0) { // Report in ourselves if (threadIdx.x == 0) { d_vol_sync[blockIdx.x] = 1; } CTA_SYNC(); // Wait for everyone else to report in for (int peer_block = threadIdx.x; peer_block < gridDim.x; peer_block += blockDim.x) { while (ThreadLoad<LOAD_CG>(d_sync + peer_block) == 0) { __threadfence_block(); } } CTA_SYNC(); // Let everyone know it's safe to proceed for (int peer_block = threadIdx.x; peer_block < gridDim.x; peer_block += blockDim.x) { d_vol_sync[peer_block] = 0; } } else { if (threadIdx.x == 0) { // Report in d_vol_sync[blockIdx.x] = 1; // Wait for acknowledgment while (ThreadLoad<LOAD_CG>(d_sync + blockIdx.x) == 1) { __threadfence_block(); } } CTA_SYNC(); } } }; /** * \brief GridBarrierLifetime extends GridBarrier to provide lifetime management of the temporary device storage needed for cooperation. * * Uses RAII for lifetime, i.e., device resources are reclaimed when * the destructor is called. */ class GridBarrierLifetime : public GridBarrier { protected: // Number of bytes backed by d_sync size_t sync_bytes; public: /** * Constructor */ GridBarrierLifetime() : GridBarrier(), sync_bytes(0) {} /** * DeviceFrees and resets the progress counters */ cudaError_t HostReset() { cudaError_t retval = cudaSuccess; if (d_sync) { CubDebug(retval = cudaFree(d_sync)); d_sync = NULL; } sync_bytes = 0; return retval; } /** * Destructor */ virtual ~GridBarrierLifetime() { HostReset(); } /** * Sets up the progress counters for the next kernel launch (lazily * allocating and initializing them if necessary) */ cudaError_t Setup(int sweep_grid_size) { cudaError_t retval = cudaSuccess; do { size_t new_sync_bytes = sweep_grid_size * sizeof(SyncFlag); if (new_sync_bytes > sync_bytes) { if (d_sync) { if (CubDebug(retval = cudaFree(d_sync))) break; } sync_bytes = new_sync_bytes; // Allocate and initialize to zero if (CubDebug(retval = cudaMalloc((void**) &d_sync, sync_bytes))) break; if (CubDebug(retval = cudaMemset(d_sync, 0, new_sync_bytes))) break; } } while (0); return retval; } }; /** @} */ // end group GridModule } // CUB namespace CUB_NS_POSTFIX // Optional outer namespace(s)
0
rapidsai_public_repos/nvgraph/external/cub_semiring
rapidsai_public_repos/nvgraph/external/cub_semiring/grid/grid_even_share.cuh
/****************************************************************************** * Copyright (c) 2011, Duane Merrill. All rights reserved. * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ /** * \file * cub::GridEvenShare is a descriptor utility for distributing input among CUDA thread blocks in an "even-share" fashion. Each thread block gets roughly the same number of fixed-size work units (grains). */ #pragma once #include "../util_namespace.cuh" #include "../util_macro.cuh" #include "grid_mapping.cuh" /// Optional outer namespace(s) CUB_NS_PREFIX /// CUB namespace namespace cub { /** * \addtogroup GridModule * @{ */ /** * \brief GridEvenShare is a descriptor utility for distributing input among * CUDA thread blocks in an "even-share" fashion. Each thread block gets roughly * the same number of input tiles. * * \par Overview * Each thread block is assigned a consecutive sequence of input tiles. To help * preserve alignment and eliminate the overhead of guarded loads for all but the * last thread block, to GridEvenShare assigns one of three different amounts of * work to a given thread block: "big", "normal", or "last". The "big" workloads * are one scheduling grain larger than "normal". The "last" work unit for the * last thread block may be partially-full if the input is not an even multiple of * the scheduling grain size. * * \par * Before invoking a child grid, a parent thread will typically construct an * instance of GridEvenShare. The instance can be passed to child thread blocks * which can initialize their per-thread block offsets using \p BlockInit(). */ template <typename OffsetT> struct GridEvenShare { private: OffsetT total_tiles; int big_shares; OffsetT big_share_items; OffsetT normal_share_items; OffsetT normal_base_offset; public: /// Total number of input items OffsetT num_items; /// Grid size in thread blocks int grid_size; /// OffsetT into input marking the beginning of the owning thread block's segment of input tiles OffsetT block_offset; /// OffsetT into input of marking the end (one-past) of the owning thread block's segment of input tiles OffsetT block_end; /// Stride between input tiles OffsetT block_stride; /** * \brief Constructor. */ __host__ __device__ __forceinline__ GridEvenShare() : total_tiles(0), big_shares(0), big_share_items(0), normal_share_items(0), normal_base_offset(0), num_items(0), grid_size(0), block_offset(0), block_end(0), block_stride(0) {} /** * \brief Dispatch initializer. To be called prior prior to kernel launch. */ __host__ __device__ __forceinline__ void DispatchInit( OffsetT num_items, ///< Total number of input items int max_grid_size, ///< Maximum grid size allowable (actual grid size may be less if not warranted by the the number of input items) int tile_items) ///< Number of data items per input tile { this->block_offset = num_items; // Initialize past-the-end this->block_end = num_items; // Initialize past-the-end this->num_items = num_items; this->total_tiles = (num_items + tile_items - 1) / tile_items; this->grid_size = CUB_MIN(total_tiles, max_grid_size); OffsetT avg_tiles_per_block = total_tiles / grid_size; this->big_shares = total_tiles - (avg_tiles_per_block * grid_size); // leftover grains go to big blocks this->normal_share_items = avg_tiles_per_block * tile_items; this->normal_base_offset = big_shares * tile_items; this->big_share_items = normal_share_items + tile_items; } /** * \brief Initializes ranges for the specified thread block index. Specialized * for a "raking" access pattern in which each thread block is assigned a * consecutive sequence of input tiles. */ template <int TILE_ITEMS> __device__ __forceinline__ void BlockInit( int block_id, Int2Type<GRID_MAPPING_RAKE> /*strategy_tag*/) { block_stride = TILE_ITEMS; if (block_id < big_shares) { // This thread block gets a big share of grains (avg_tiles_per_block + 1) block_offset = (block_id * big_share_items); block_end = block_offset + big_share_items; } else if (block_id < total_tiles) { // This thread block gets a normal share of grains (avg_tiles_per_block) block_offset = normal_base_offset + (block_id * normal_share_items); block_end = CUB_MIN(num_items, block_offset + normal_share_items); } // Else default past-the-end } /** * \brief Block-initialization, specialized for a "raking" access * pattern in which each thread block is assigned a consecutive sequence * of input tiles. */ template <int TILE_ITEMS> __device__ __forceinline__ void BlockInit( int block_id, Int2Type<GRID_MAPPING_STRIP_MINE> /*strategy_tag*/) { block_stride = grid_size * TILE_ITEMS; block_offset = (block_id * TILE_ITEMS); block_end = num_items; } /** * \brief Block-initialization, specialized for "strip mining" access * pattern in which the input tiles assigned to each thread block are * separated by a stride equal to the the extent of the grid. */ template < int TILE_ITEMS, GridMappingStrategy STRATEGY> __device__ __forceinline__ void BlockInit() { BlockInit<TILE_ITEMS>(blockIdx.x, Int2Type<STRATEGY>()); } /** * \brief Block-initialization, specialized for a "raking" access * pattern in which each thread block is assigned a consecutive sequence * of input tiles. */ template <int TILE_ITEMS> __device__ __forceinline__ void BlockInit( OffsetT block_offset, ///< [in] Threadblock begin offset (inclusive) OffsetT block_end) ///< [in] Threadblock end offset (exclusive) { this->block_offset = block_offset; this->block_end = block_end; this->block_stride = TILE_ITEMS; } }; /** @} */ // end group GridModule } // CUB namespace CUB_NS_POSTFIX // Optional outer namespace(s)
0
rapidsai_public_repos/nvgraph/external/cub_semiring
rapidsai_public_repos/nvgraph/external/cub_semiring/grid/grid_queue.cuh
/****************************************************************************** * Copyright (c) 2011, Duane Merrill. All rights reserved. * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ /** * \file * cub::GridQueue is a descriptor utility for dynamic queue management. */ #pragma once #include "../util_namespace.cuh" #include "../util_debug.cuh" /// Optional outer namespace(s) CUB_NS_PREFIX /// CUB namespace namespace cub { /** * \addtogroup GridModule * @{ */ /** * \brief GridQueue is a descriptor utility for dynamic queue management. * * \par Overview * GridQueue descriptors provides abstractions for "filling" or * "draining" globally-shared vectors. * * \par * A "filling" GridQueue works by atomically-adding to a zero-initialized counter, * returning a unique offset for the calling thread to write its items. * The GridQueue maintains the total "fill-size". The fill counter must be reset * using GridQueue::ResetFill by the host or kernel instance prior to the kernel instance that * will be filling. * * \par * Similarly, a "draining" GridQueue works by works by atomically-incrementing a * zero-initialized counter, returning a unique offset for the calling thread to * read its items. Threads can safely drain until the array's logical fill-size is * exceeded. The drain counter must be reset using GridQueue::ResetDrain or * GridQueue::FillAndResetDrain by the host or kernel instance prior to the kernel instance that * will be filling. (For dynamic work distribution of existing data, the corresponding fill-size * is simply the number of elements in the array.) * * \par * Iterative work management can be implemented simply with a pair of flip-flopping * work buffers, each with an associated set of fill and drain GridQueue descriptors. * * \tparam OffsetT Signed integer type for global offsets */ template <typename OffsetT> class GridQueue { private: /// Counter indices enum { FILL = 0, DRAIN = 1, }; /// Pair of counters OffsetT *d_counters; public: /// Returns the device allocation size in bytes needed to construct a GridQueue instance __host__ __device__ __forceinline__ static size_t AllocationSize() { return sizeof(OffsetT) * 2; } /// Constructs an invalid GridQueue descriptor __host__ __device__ __forceinline__ GridQueue() : d_counters(NULL) {} /// Constructs a GridQueue descriptor around the device storage allocation __host__ __device__ __forceinline__ GridQueue( void *d_storage) ///< Device allocation to back the GridQueue. Must be at least as big as <tt>AllocationSize()</tt>. : d_counters((OffsetT*) d_storage) {} /// This operation sets the fill-size and resets the drain counter, preparing the GridQueue for draining in the next kernel instance. To be called by the host or by a kernel prior to that which will be draining. __host__ __device__ __forceinline__ cudaError_t FillAndResetDrain( OffsetT fill_size, cudaStream_t stream = 0) { #if (CUB_PTX_ARCH > 0) (void)stream; d_counters[FILL] = fill_size; d_counters[DRAIN] = 0; return cudaSuccess; #else OffsetT counters[2]; counters[FILL] = fill_size; counters[DRAIN] = 0; return CubDebug(cudaMemcpyAsync(d_counters, counters, sizeof(OffsetT) * 2, cudaMemcpyHostToDevice, stream)); #endif } /// This operation resets the drain so that it may advance to meet the existing fill-size. To be called by the host or by a kernel prior to that which will be draining. __host__ __device__ __forceinline__ cudaError_t ResetDrain(cudaStream_t stream = 0) { #if (CUB_PTX_ARCH > 0) (void)stream; d_counters[DRAIN] = 0; return cudaSuccess; #else return CubDebug(cudaMemsetAsync(d_counters + DRAIN, 0, sizeof(OffsetT), stream)); #endif } /// This operation resets the fill counter. To be called by the host or by a kernel prior to that which will be filling. __host__ __device__ __forceinline__ cudaError_t ResetFill(cudaStream_t stream = 0) { #if (CUB_PTX_ARCH > 0) (void)stream; d_counters[FILL] = 0; return cudaSuccess; #else return CubDebug(cudaMemsetAsync(d_counters + FILL, 0, sizeof(OffsetT), stream)); #endif } /// Returns the fill-size established by the parent or by the previous kernel. __host__ __device__ __forceinline__ cudaError_t FillSize( OffsetT &fill_size, cudaStream_t stream = 0) { #if (CUB_PTX_ARCH > 0) (void)stream; fill_size = d_counters[FILL]; return cudaSuccess; #else return CubDebug(cudaMemcpyAsync(&fill_size, d_counters + FILL, sizeof(OffsetT), cudaMemcpyDeviceToHost, stream)); #endif } /// Drain \p num_items from the queue. Returns offset from which to read items. To be called from CUDA kernel. __device__ __forceinline__ OffsetT Drain(OffsetT num_items) { return atomicAdd(d_counters + DRAIN, num_items); } /// Fill \p num_items into the queue. Returns offset from which to write items. To be called from CUDA kernel. __device__ __forceinline__ OffsetT Fill(OffsetT num_items) { return atomicAdd(d_counters + FILL, num_items); } }; #ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document /** * Reset grid queue (call with 1 block of 1 thread) */ template <typename OffsetT> __global__ void FillAndResetDrainKernel( GridQueue<OffsetT> grid_queue, OffsetT num_items) { grid_queue.FillAndResetDrain(num_items); } #endif // DOXYGEN_SHOULD_SKIP_THIS /** @} */ // end group GridModule } // CUB namespace CUB_NS_POSTFIX // Optional outer namespace(s)
0
rapidsai_public_repos/nvgraph/external/cub_semiring
rapidsai_public_repos/nvgraph/external/cub_semiring/grid/grid_mapping.cuh
/****************************************************************************** * Copyright (c) 2011, Duane Merrill. All rights reserved. * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ /** * \file * cub::GridMappingStrategy enumerates alternative strategies for mapping constant-sized tiles of device-wide data onto a grid of CUDA thread blocks. */ #pragma once #include "../util_namespace.cuh" /// Optional outer namespace(s) CUB_NS_PREFIX /// CUB namespace namespace cub { /** * \addtogroup GridModule * @{ */ /****************************************************************************** * Mapping policies *****************************************************************************/ /** * \brief cub::GridMappingStrategy enumerates alternative strategies for mapping constant-sized tiles of device-wide data onto a grid of CUDA thread blocks. */ enum GridMappingStrategy { /** * \brief An a "raking" access pattern in which each thread block is * assigned a consecutive sequence of input tiles * * \par Overview * The input is evenly partitioned into \p p segments, where \p p is * constant and corresponds loosely to the number of thread blocks that may * actively reside on the target device. Each segment is comprised of * consecutive tiles, where a tile is a small, constant-sized unit of input * to be processed to completion before the thread block terminates or * obtains more work. The kernel invokes \p p thread blocks, each * of which iteratively consumes a segment of <em>n</em>/<em>p</em> elements * in tile-size increments. */ GRID_MAPPING_RAKE, /** * \brief An a "strip mining" access pattern in which the input tiles assigned * to each thread block are separated by a stride equal to the the extent of * the grid. * * \par Overview * The input is evenly partitioned into \p p sets, where \p p is * constant and corresponds loosely to the number of thread blocks that may * actively reside on the target device. Each set is comprised of * data tiles separated by stride \p tiles, where a tile is a small, * constant-sized unit of input to be processed to completion before the * thread block terminates or obtains more work. The kernel invokes \p p * thread blocks, each of which iteratively consumes a segment of * <em>n</em>/<em>p</em> elements in tile-size increments. */ GRID_MAPPING_STRIP_MINE, /** * \brief A dynamic "queue-based" strategy for assigning input tiles to thread blocks. * * \par Overview * The input is treated as a queue to be dynamically consumed by a grid of * thread blocks. Work is atomically dequeued in tiles, where a tile is a * unit of input to be processed to completion before the thread block * terminates or obtains more work. The grid size \p p is constant, * loosely corresponding to the number of thread blocks that may actively * reside on the target device. */ GRID_MAPPING_DYNAMIC, }; /** @} */ // end group GridModule } // CUB namespace CUB_NS_POSTFIX // Optional outer namespace(s)
0
rapidsai_public_repos/nvgraph
rapidsai_public_repos/nvgraph/cpp/CMakeLists.txt
#============================================================================= # Copyright (c) 2019, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #============================================================================= cmake_minimum_required(VERSION 3.12 FATAL_ERROR) project(NV_GRAPH VERSION 0.4.0 LANGUAGES C CXX CUDA) find_package(CUDA) ################################################################################################### # - compiler options ------------------------------------------------------------------------------ set(CMAKE_CXX_STANDARD 11) set(CMAKE_C_COMPILER $ENV{CC}) set(CMAKE_CXX_COMPILER $ENV{CXX}) set(CMAKE_CXX_STANDARD_REQUIRED ON) set(CMAKE_CUDA_STANDARD 11) set(CMAKE_CUDA_STANDARD_REQUIRED ON) if(CMAKE_COMPILER_IS_GNUCXX) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror") endif(CMAKE_COMPILER_IS_GNUCXX) set(GPU_ARCHS "" CACHE STRING "List of GPU architectures (semicolon-separated) to be compiled for. Default is to compile for a pre-determined st of gpu-architectures based on CTK version.") if("${GPU_ARCHS}" STREQUAL "") set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} -gencode=arch=compute_60,code=sm_60") if((CUDA_VERSION_MAJOR EQUAL 9) OR (CUDA_VERSION_MAJOR GREATER 9)) set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} -gencode=arch=compute_70,code=sm_70") endif() if((CUDA_VERSION_MAJOR EQUAL 10) OR (CUDA_VERSION_MAJOR GREATER 10)) set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} -gencode=arch=compute_75,code=sm_75 -gencode=arch=compute_75,code=compute_75") endif() else() foreach(arch ${GPU_ARCHS}) set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} -gencode arch=compute_${arch},code=sm_${arch}") endforeach() list(GET GPU_ARCHS -1 ptx) set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} -gencode arch=compute_${ptx},code=compute_${ptx}") endif() # set warnings as errors set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} -Werror cross-execution-space-call -Xcompiler -Wall,-Werror") # set default build type set(CMAKE_BUILD_TYPE "Release") option(BUILD_TESTS "Configure CMake to build tests" ON) if(CMAKE_COMPILER_IS_GNUCXX) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror") option(CMAKE_CXX11_ABI "Enable the GLIBCXX11 ABI" ON) if(CMAKE_CXX11_ABI) message(STATUS "nvGraph: Enabling the GLIBCXX11 ABI") else() message(STATUS "nvGraph: Disabling the GLIBCXX11 ABI") set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D_GLIBCXX_USE_CXX11_ABI=0") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -D_GLIBCXX_USE_CXX11_ABI=0") set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} -Xcompiler -D_GLIBCXX_USE_CXX11_ABI=0") endif(CMAKE_CXX11_ABI) endif(CMAKE_COMPILER_IS_GNUCXX) if(NOT DEFINED NVGRAPH_LIGHT) set(NVGRAPH_LIGHT True) endif(NOT DEFINED NVGRAPH_LIGHT) ################################################################################################### # - cmake modules --------------------------------------------------------------------------------- set(CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake/Modules/" ${CMAKE_MODULE_PATH}) include(FeatureSummary) include(CheckIncludeFiles) include(CheckLibraryExists) ################################################################################################### # - add gtest ------------------------------------------------------------------------------------- if(BUILD_TESTS) include(CTest) include(ConfigureGoogleTest) if(GTEST_FOUND) message(STATUS "Google C++ Testing Framework (Google Test) found in ${GTEST_ROOT}") include_directories(${GTEST_INCLUDE_DIR}) add_subdirectory(${CMAKE_SOURCE_DIR}/tests) else() message(AUTHOR_WARNING "Google C++ Testing Framework (Google Test) not found: automated tests are disabled.") endif(GTEST_FOUND) endif(BUILD_TESTS) ################################################################################################### # - include paths --------------------------------------------------------------------------------- include_directories( "${CMAKE_BINARY_DIR}/include" "${CMAKE_SOURCE_DIR}/include" "${CMAKE_SOURCE_DIR}/thirdparty/cub" "${CMAKE_SOURCE_DIR}/thirdparty/cnmem/include" "${CMAKE_SOURCE_DIR}/../external" "${CMAKE_SOURCE_DIR}/../external/cusp" "${CMAKE_CUDA_TOOLKIT_INCLUDE_DIRECTORIES}" ) ################################################################################################### # - library paths --------------------------------------------------------------------------------- link_directories("${CMAKE_CUDA_IMPLICIT_LINK_DIRECTORIES}" # CMAKE_CUDA_IMPLICIT_LINK_DIRECTORIES is an undocumented/unsupported variable containing the link directories for nvcc "${CMAKE_BINARY_DIR}/lib" "${GTEST_LIBRARY_DIR}") ################################################################################################### # - library targets ------------------------------------------------------------------------------- if(NVGRAPH_LIGHT MATCHES True) add_library(nvgraph SHARED thirdparty/cnmem/src/cnmem.cpp src/arnoldi.cu src/bfs.cu src/bfs2d.cu src/bfs_kernels.cu src/convert.cu src/csrmv.cu src/csrmv_cub.cu src/csr_graph.cpp src/graph_extractor.cu src/jaccard_gpu.cu src/kmeans.cu src/lanczos.cu src/lobpcg.cu src/matrix.cu src/modularity_maximization.cu src/nvgraph.cu src/nvgraph_cusparse.cpp src/nvgraph_cublas.cpp src/nvgraph_error.cu src/nvgraph_lapack.cu src/nvgraph_vector_kernels.cu src/pagerank.cu src/pagerank_kernels.cu src/partition.cu src/size2_selector.cu src/sssp.cu src/triangles_counting.cpp src/triangles_counting_kernels.cu src/valued_csr_graph.cpp src/widest_path.cu ) else(NVGRAPH_LIGHT MATCHES True) add_library(nvgraph SHARED thirdparty/cnmem/src/cnmem.cpp src/arnoldi.cu src/bfs.cu src/bfs2d.cu src/bfs_kernels.cu src/convert.cu src/csrmv.cu src/csrmv_cub.cu src/csr_graph.cpp src/graph_extractor.cu src/jaccard_gpu.cu src/kmeans.cu src/lanczos.cu src/lobpcg.cu src/matrix.cu src/modularity_maximization.cu src/nvgraph.cu src/nvgraph_cusparse.cpp src/nvgraph_cublas.cpp src/nvgraph_error.cu src/nvgraph_lapack.cu src/nvgraph_vector_kernels.cu src/pagerank.cu src/pagerank_kernels.cu src/partition.cu src/size2_selector.cu src/sssp.cu src/triangles_counting.cpp src/triangles_counting_kernels.cu src/valued_csr_graph.cpp src/widest_path.cu src/graph_contraction/contraction_csr_max.cu src/graph_contraction/contraction_csr_sum.cu src/graph_contraction/contraction_mv_double_mul.cu src/graph_contraction/contraction_mv_float_min.cu src/graph_contraction/contraction_csr_min.cu src/graph_contraction/contraction_mv_double_max.cu src/graph_contraction/contraction_mv_double_sum.cu src/graph_contraction/contraction_mv_float_mul.cu src/graph_contraction/contraction_csr_mul.cu src/graph_contraction/contraction_mv_double_min.cu src/graph_contraction/contraction_mv_float_max.cu src/graph_contraction/contraction_mv_float_sum.cu ) endif(NVGRAPH_LIGHT MATCHES True) ################################################################################################### # - build options --------------------------------------------------------------------------------- if(CMAKE_BUILD_TYPE MATCHES Debug) message(STATUS "Building with debugging flags") set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} -G") endif(CMAKE_BUILD_TYPE MATCHES Debug) if(NVGRAPH_LIGHT MATCHES True) add_definitions( -DNVGRAPH_LIGHT=${NVGRAPH_LIGHT} ) endif(NVGRAPH_LIGHT MATCHES True) ################################################################################################### # - link libraries -------------------------------------------------------------------------------- target_link_libraries(nvgraph cublas cusparse curand cusolver cudart ) ################################################################################################### # - install targets ------------------------------------------------------------------------------- install(TARGETS nvgraph DESTINATION lib64) install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/include/nvgraph.h ${CMAKE_CURRENT_SOURCE_DIR}/include/test_opt_utils.cuh DESTINATION include/nvgraph)
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/include/pagerank_kernels.hxx
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once namespace nvgraph { template <typename ValueType_> void update_dangling_nodes(int n, ValueType_* dangling_nodes, ValueType_ damping_factor, cudaStream_t stream = 0); } // end namespace nvgraph
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/include/async_event.cuh
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once class AsyncEvent { public: AsyncEvent() : async_event(NULL) { } AsyncEvent(int size) : async_event(NULL) { cudaEventCreate(&async_event); } ~AsyncEvent() { if (async_event != NULL) cudaEventDestroy(async_event); } void create() { cudaEventCreate(&async_event); } void record(cudaStream_t s = 0) { if (async_event == NULL) { cudaEventCreate(&async_event); // check if we haven't created the event yet } cudaEventRecord(async_event, s); } void sync() { cudaEventSynchronize(async_event); } private: cudaEvent_t async_event; };
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/include/common_selector.hxx
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ template <typename T_ELEM> __inline__ __device__ T_ELEM __cachingLoad(const T_ELEM *addr) { #if __CUDA_ARCH__ < 350 return *addr; #else return __ldg(addr); #endif } __device__ float random_weight(int i, int j, int n) { #define RAND_MULTIPLIER 1145637293 int i_min = (min(i, j) * RAND_MULTIPLIER) % n; int i_max = (max(i, j) * RAND_MULTIPLIER) % n; return ((float)i_max / n) * i_min; } /* WARNING: notice that based on the hexadecimal number in the last line in the hash function the resulting floating point value is very likely on the order of 0.5. */ __host__ __device__ unsigned int hash_val(unsigned int a, unsigned int seed) { a ^= seed; a = (a + 0x7ed55d16) + (a << 12); a = (a ^ 0xc761c23c) + (a >> 19); a = (a + 0x165667b1) + (a << 5); a = (a ^ 0xd3a2646c) + (a << 9); a = (a + 0xfd7046c5) + (a << 3); a = (a ^ 0xb55a4f09) + (a >> 16); return a; } /* return 1e-5 for float [sizeof(float)=4] and 1e-12 for double [sizeof(double)=8] types */ template<typename WeightType> __host__ __device__ WeightType scaling_factor(){ return (sizeof(WeightType) == 4) ? 1e-5f : 1e-12; } // Kernel to compute the weight of the edges // original version from AmgX. template <typename IndexType, typename ValueType, typename WeightType> __global__ void computeEdgeWeightsBlockDiaCsr_V2( const IndexType* row_offsets, const IndexType *row_indices, const IndexType *column_indices, const IndexType *dia_values, const ValueType* nonzero_values, const IndexType num_nonzero_blocks, WeightType *str_edge_weights, WeightType *rand_edge_weights, int num_owned, int bsize, int component, int weight_formula) { int tid= threadIdx.x + blockDim.x*blockIdx.x; int i,j,kmin,kmax; int bsize_sq = bsize*bsize; WeightType den; int matrix_weight_entry = component*bsize+component; while (tid < num_nonzero_blocks) { i = row_indices[tid]; j = column_indices[tid]; if ((i != j) && (j < num_owned)) // skip diagonal and across-boundary edges { den = (WeightType) max(fabs(__cachingLoad(&nonzero_values[dia_values[i]*bsize_sq+matrix_weight_entry])),fabs(__cachingLoad(&nonzero_values[dia_values[j]*bsize_sq+matrix_weight_entry]))); kmin = __cachingLoad(&row_offsets[j]); //kmin = row_offsets[j]; kmax = __cachingLoad(&row_offsets[j+1]); //kmax = row_offsets[j+1]; WeightType kvalue = 0.0; bool foundk = false; for (int k=kmin;k<kmax;k++) { if ((column_indices[k] == i) /* && (column_indices[k] < num_owned) */) { kvalue = __cachingLoad(&nonzero_values[k*bsize_sq+matrix_weight_entry]); //kvalue = nonzero_values[k*bsize_sq+matrix_weight_entry]; foundk = true; break; } } // handles both symmetric & non-symmetric matrices WeightType ed_weight=0; if( foundk ) { if( weight_formula == 0 ) ed_weight = 0.5*(fabs(__cachingLoad(&nonzero_values[tid*bsize_sq+matrix_weight_entry])) + fabs(kvalue)) / den; // 0.5*(aij+aji)/max(a_ii,a_jj) else ed_weight = -0.5 * ( __cachingLoad(&nonzero_values[tid*bsize_sq+matrix_weight_entry]) / __cachingLoad(&nonzero_values[dia_values[i]*bsize_sq+matrix_weight_entry]) + // -0.5 * ( a_ij/a_ii + kvalue / __cachingLoad(&nonzero_values[dia_values[j]*bsize_sq+matrix_weight_entry]) ); // a_ji/a_jj ) } // 05/09/13: Perturb the edge weights slightly to handle cases where edge weights are uniform WeightType small_fraction = scaling_factor<WeightType>()*hash_val(min(i,j),max(i,j))/UINT_MAX; ed_weight += small_fraction*ed_weight; str_edge_weights[tid] = ed_weight; // fill up random unique weights if( rand_edge_weights != NULL ) rand_edge_weights[tid] = random_weight(i, j, num_owned); } tid += gridDim.x*blockDim.x; } } // Kernel to compute the weight of the edges // simple version modified for nvgraph template <typename IndexType, typename ValueType, typename WeightType> __global__ void computeEdgeWeights_simple( const IndexType* row_offsets, const IndexType *row_indices, const IndexType *column_indices, const ValueType *row_sum, const ValueType* nonzero_values, const IndexType num_nonzero_blocks, WeightType *str_edge_weights, WeightType *rand_edge_weights, int n, int weight_formula) { int tid= threadIdx.x + blockDim.x*blockIdx.x; int i,j,kmin,kmax; WeightType den; while (tid < num_nonzero_blocks) { i = row_indices[tid]; j = column_indices[tid]; if ((i != j) && (j < n)) // skip diagonal and across-boundary edges { den = (WeightType) max(fabs(__cachingLoad(&row_sum[i])),fabs(__cachingLoad(&row_sum[j]))); kmin = __cachingLoad(&row_offsets[j]); //kmin = row_offsets[j]; kmax = __cachingLoad(&row_offsets[j+1]); //kmax = row_offsets[j+1]; WeightType kvalue = 0.0; bool foundk = false; for (int k=kmin;k<kmax;k++) { if ((column_indices[k] == i) /* && (column_indices[k] < n) */) { kvalue = __cachingLoad(&nonzero_values[k]); //kvalue = nonzero_values[k]; foundk = true; break; } } // handles both symmetric & non-symmetric matrices WeightType ed_weight=0; if( foundk ) { if( weight_formula == 0 ) ed_weight = 0.5*(fabs(__cachingLoad(&nonzero_values[tid])) + fabs(kvalue)) / den; // 0.5*(aij+aji)/max(a_ii,a_jj) else ed_weight = -0.5 * ( __cachingLoad(&nonzero_values[tid]) / __cachingLoad(&row_sum[i]) + // -0.5 * ( a_ij/a_ii + kvalue / __cachingLoad(&row_sum[j]) ); // a_ji/a_jj ) } // 05/09/13: Perturb the edge weights slightly to handle cases where edge weights are uniform WeightType small_fraction = scaling_factor<WeightType>()*hash_val(min(i,j),max(i,j))/UINT_MAX; ed_weight += small_fraction*ed_weight; str_edge_weights[tid] = ed_weight; // fill up random unique weights if( rand_edge_weights != NULL ) rand_edge_weights[tid] = random_weight(i, j, n); } tid += gridDim.x*blockDim.x; } } // Kernel to compute the weight of the edges using geometry distance between edges template <typename IndexType, typename ValueType> __global__ void computeEdgeWeightsDistance3d( const int* row_offsets, const IndexType *column_indices, const ValueType* gx, const ValueType* gy, const ValueType* gz, float *str_edge_weights, int num_rows) { int tid= threadIdx.x + blockDim.x*blockIdx.x; float lx, ly, lz; float px, py, pz; int kmin, kmax; int col_id; while (tid < num_rows) { lx = gx[tid]; ly = gy[tid]; lz = gz[tid]; kmin = row_offsets[tid]; kmax = row_offsets[tid+1]; for (int k=kmin;k<kmax;k++) { col_id = column_indices[k]; if (col_id != tid) // skip diagonal { px = gx[col_id]; py = gy[col_id]; pz = gz[col_id]; str_edge_weights[k] = 1.0 / sqrt((px - lx)*(px - lx) + (py - ly)*(py - ly) + (pz - lz)*(pz - lz)); } } tid += gridDim.x*blockDim.x; } } // Kernel that checks if perfect matchs exist template <typename IndexType> __global__ void matchEdges(const IndexType num_rows, IndexType *partner_index, IndexType *aggregates, const IndexType *strongest_neighbour) { int potential_match, potential_match_neighbour; for (int tid= threadIdx.x + blockDim.x*blockIdx.x; tid < num_rows; tid += gridDim.x*blockDim.x) { if (partner_index[tid] == -1) // Unaggregated row { potential_match = strongest_neighbour[tid]; if (potential_match!=-1) { potential_match_neighbour = strongest_neighbour[potential_match]; if ( potential_match_neighbour == tid ) // we have a match { partner_index[tid] = potential_match; aggregates[tid] = ( potential_match > tid) ? tid : potential_match; } } } } } template <typename IndexType> __global__ void joinExistingAggregates(IndexType num_rows, IndexType *aggregates, IndexType *aggregated, const IndexType *aggregates_candidate) { int tid= threadIdx.x + blockDim.x*blockIdx.x; while (tid < num_rows) { if (aggregated[tid] == -1 && aggregates_candidate[tid] != -1) // Unaggregated row { aggregates[tid] = aggregates_candidate[tid]; aggregated[tid] = 1; } tid += gridDim.x*blockDim.x; } } template<typename IndexType> __global__ void aggregateSingletons( IndexType* aggregates, IndexType numRows ) { int tid = threadIdx.x + blockDim.x*blockIdx.x; while( tid < numRows ) { if( aggregates[tid] == -1 ) //still unaggregated! aggregates[tid] = tid; //then become a singleton tid += gridDim.x*blockDim.x; } } __device__ float random_weight2(int i, int j) { #define RAND_MULTIPLIER 1145637293 unsigned long i_min = (min(i, j) * RAND_MULTIPLIER); unsigned long i_max = (max(i, j) * RAND_MULTIPLIER); return ((float)i_min / i_max); } // findStrongestNeighbour kernel for block_dia_csr_matrix format // Reads the weight from edge_weights array template <typename IndexType> __global__ void findStrongestNeighbourBlockDiaCsr_V2(const IndexType *row_offsets, const IndexType *column_indices, const float *edge_weights, IndexType n, IndexType *aggregates, IndexType *strongest_neighbour_1phase, IndexType *strongest_neighbour, const size_t bsize, int phase, bool merge_singletons) { int tid= threadIdx.x + blockDim.x*blockIdx.x; float weight; int jcol; while (tid < n) { int strongest_unaggregated = -1; int strongest_aggregated = -1; float max_weight_unaggregated = 0.; float max_weight_aggregated = 0.; if (aggregates[tid] == -1) // Unaggregated row { for (int j=row_offsets[tid]; j<row_offsets[tid+1]; j++) { //TODO: check if aggregated before computing the weight jcol = column_indices[j]; if (phase == 1) weight = edge_weights[j]; else weight = random_weight2(tid, jcol); if (tid == jcol || jcol >= n) continue; // skip diagonal and halo if (phase == 2 && strongest_neighbour_1phase[jcol] != tid) continue; // if 2nd phase only accept those who gave a hand on the 1st phase // Identify strongest aggregated and unaggregated neighbours if (aggregates[jcol] == -1 && (weight > max_weight_unaggregated || (weight==max_weight_unaggregated && jcol > strongest_unaggregated))) // unaggregated { max_weight_unaggregated= weight; strongest_unaggregated= jcol; } else if (aggregates[jcol] != -1 && (weight > max_weight_aggregated || (weight==max_weight_aggregated && jcol > strongest_aggregated))) // aggregated { max_weight_aggregated = weight; strongest_aggregated = jcol; } } if (strongest_unaggregated == -1 && strongest_aggregated != -1) // All neighbours are aggregated { if( merge_singletons ) // Put in same aggregate as strongest neighbour aggregates[tid] = aggregates[strongest_aggregated]; else aggregates[tid] = tid; } else if (strongest_unaggregated != -1) { if (phase == 2) { float rand_w1 = random_weight2(tid, strongest_neighbour_1phase[tid]); strongest_neighbour[tid] = max_weight_unaggregated > rand_w1 ? strongest_unaggregated : strongest_neighbour_1phase[tid]; } else strongest_neighbour_1phase[tid] = strongest_unaggregated; } else { if (phase == 2) strongest_neighbour[tid] = strongest_neighbour_1phase[tid]; else strongest_neighbour_1phase[tid] = tid; } } tid += gridDim.x*blockDim.x; } } // Kernel that checks if perfect matchs exist template <typename IndexType> __global__ void matchEdges(const IndexType num_rows, IndexType *aggregates, const int *strongest_neighbour) { int tid= threadIdx.x + blockDim.x*blockIdx.x; int potential_match, potential_match_neighbour; while (tid < num_rows) { if (aggregates[tid] == -1) // Unaggregated row { potential_match = strongest_neighbour[tid]; potential_match_neighbour = strongest_neighbour[potential_match]; if (potential_match != -1 && potential_match_neighbour == tid) // we have a match aggregates[tid] = ( potential_match > tid ) ? tid : potential_match; /* if (potential_match != -1){ potential_match_neighbour = strongest_neighbour[potential_match]; if (potential_match_neighbour == tid) // we have a match aggregates[tid] = ( potential_match > tid ) ? tid : potential_match; } */ } tid += gridDim.x*blockDim.x; } } template <typename IndexType, int block_size> __global__ void countAggregates(const IndexType num_rows, const IndexType *aggregates, int *num_unaggregated) { int tid = threadIdx.x + blockDim.x * blockIdx.x; int c = 0; int i = tid; while( i < num_rows ) { c += ( aggregates[i] == -1 ); i += gridDim.x * blockDim.x; } __shared__ volatile int smem[block_size]; smem[threadIdx.x] = c; __syncthreads(); for( int off = blockDim.x / 2; off >= 32; off = off / 2 ) { if( threadIdx.x < off ) smem[threadIdx.x] += smem[threadIdx.x + off]; __syncthreads(); } // warp reduce if( threadIdx.x < 32 ) { smem[threadIdx.x] += smem[threadIdx.x+16]; smem[threadIdx.x] += smem[threadIdx.x+8]; smem[threadIdx.x] += smem[threadIdx.x+4]; smem[threadIdx.x] += smem[threadIdx.x+2]; smem[threadIdx.x] += smem[threadIdx.x+1]; } if( threadIdx.x == 0 ) atomicAdd(num_unaggregated, smem[0]); } template <typename IndexType> __global__ void joinExistingAggregates(IndexType num_rows, IndexType *aggregates, const IndexType *aggregates_candidate) { int tid= threadIdx.x + blockDim.x*blockIdx.x; while (tid < num_rows) { if (aggregates[tid] == -1 && aggregates_candidate[tid] != -1) // Unaggregated row aggregates[tid] = aggregates_candidate[tid]; tid+=gridDim.x*blockDim.x; } } // Kernel that merges unaggregated vertices its strongest aggregated neighbour // Weights are read from edge_weights array // For block_dia_csr_matrix_format template <typename IndexType> __global__ void mergeWithExistingAggregatesBlockDiaCsr_V2(const IndexType *row_offsets, const IndexType *column_indices, const float *edge_weights, const int n, IndexType *aggregates, int bsize, const int deterministic, IndexType *aggregates_candidate) { int tid= threadIdx.x + blockDim.x*blockIdx.x; int jcol; float weight; while (tid < n) { float max_weight_aggregated = 0.; int strongest_aggregated = -1; if (aggregates[tid] == -1) // Unaggregated row { for (int j=row_offsets[tid]; j<row_offsets[tid+1]; j++) { // Compute edge weight weight = edge_weights[j]; jcol = column_indices[j]; if (jcol == tid || jcol >= n) continue; // skip diagonal // Identify strongest aggregated neighbour if (aggregates[jcol] != -1 && (weight > max_weight_aggregated || (weight==max_weight_aggregated && jcol > strongest_aggregated))) // { max_weight_aggregated = weight; strongest_aggregated = jcol; } } if (strongest_aggregated != -1) // Found a neighbour to aggregate to { if (deterministic) { aggregates_candidate[tid] = aggregates[strongest_aggregated]; } else { // Put in same aggregate as strongest neighbour aggregates[tid] = aggregates[strongest_aggregated]; } } else // All neighbours are unaggregated, leave alone { if (deterministic) aggregates_candidate[tid] = tid; else aggregates[tid] = tid; } } tid += gridDim.x*blockDim.x; } } template <typename INDEX_TYPE> __global__ void computeDiagonalKernelCSR(INDEX_TYPE num_rows, const INDEX_TYPE *row_offsets, const INDEX_TYPE *col_indices, INDEX_TYPE *diag) { INDEX_TYPE row=(blockIdx.x*blockDim.x+threadIdx.x); while(row<num_rows) { int nz=row_offsets[row]; int last_nz=row_offsets[row+1]; //diag[row] = null_index; while(nz<last_nz) { int col=col_indices[nz]; if(row==col) { diag[row]=nz; //diag_end_offsets[row]=nz+1; break; } nz++; } row+=blockDim.x*gridDim.x; } } template <typename T1, typename T2> __global__ void convert_type(int n, const T1 *src, T2 *dest) { int tid=(blockIdx.x*blockDim.x+threadIdx.x); while(tid<n) { dest[tid] = static_cast<T2>(src[tid]); tid += gridDim.x*blockDim.x; } } /* // findStrongestNeighbour kernel for block_dia_csr_matrix format // Reads the weight from edge_weights array template <typename IndexType> __global__ void agreeOnProposal(const IndexType *row_offsets, const IndexType *column_indices, IndexType num_block_rows, IndexType *aggregated, int *strongest_neighbour, float *weight_strongest_neighbour, IndexType *partner_index, int *aggregates) { int tid= threadIdx.x + blockDim.x*blockIdx.x; int partner; while(tid < num_block_rows) { if (aggregated[tid] == -1) { partner = partner_index[tid]; float my_weight = weight_strongest_neighbour[tid]; float partners_weight = -1; if (partner != -1) partners_weight = weight_strongest_neighbour[partner]; if (my_weight < 0. && partners_weight < 0.) { // All neighbours are aggregated, leave in current aggregate //if (deterministic!=1) //{ aggregated[tid] = 1; strongest_neighbour[tid] = -1; partner_index[tid+num_block_rows] = tid; partner_index[tid+2*num_block_rows] = tid; //} } // if my weight is smaller than my partner's weight, change my strongest neighbour else if (my_weight < partners_weight) strongest_neighbour[tid] = strongest_neighbour[partner]; } tid += gridDim.x*blockDim.x; } } // Kernel that checks if perfect matchs exist template <typename IndexType> __global__ void matchAggregates(IndexType *aggregates, IndexType *aggregated, IndexType *strongest_neighbour, const IndexType num_rows) { int tid= threadIdx.x + blockDim.x*blockIdx.x; int potential_match, potential_match_neighbour, my_aggregate; while (tid < num_rows) { if (aggregated[tid] == -1) // Unaggregated row { potential_match = strongest_neighbour[tid]; if (potential_match!=-1) { potential_match_neighbour = strongest_neighbour[potential_match]; my_aggregate = aggregates[tid]; if (potential_match_neighbour == my_aggregate) // we have a match { aggregated[tid] = 1; aggregates[tid] = ( potential_match > my_aggregate) ? my_aggregate: potential_match; } } } tid += gridDim.x*blockDim.x; } } // Kernel that checks if perfect matchs exist template <typename IndexType> __global__ void assignUnassignedVertices(IndexType *partner_index, const IndexType num_rows) { int tid= threadIdx.x + blockDim.x*blockIdx.x; while (tid < num_rows) { if (partner_index[tid] == -1) // Unaggregated row { partner_index[tid] = tid; } tid += gridDim.x*blockDim.x; } } // Kernel that merges unaggregated vertices its strongest aggregated neighbour // Edge weights are computed on the fly // For block_dia_csr_matrix_format template <typename IndexType, typename ValueType> __global__ void mergeWithExistingAggregatesBlockDiaCsr(const IndexType *row_offsets, const IndexType *column_indices, const ValueType *dia_values, const ValueType *nonzero_values, const int n, IndexType *aggregates, int bsize, int deterministic, IndexType *aggregates_candidate) { int tid= threadIdx.x + blockDim.x*blockIdx.x; int jcol; ValueType weight; int bsize_sq = bsize*bsize; while (tid < n) { int strongest_aggregated = -1; ValueType max_weight_aggregated = 0.; if (aggregates[tid] == -1) // Unaggregated row { for (int j=row_offsets[tid]; j<row_offsets[tid+1]; j++) { jcol = column_indices[j]; if (jcol >= n) continue; // Compute edge weight weight = fabs(nonzero_values[j*bsize_sq])/max( fabs(dia_values[tid*bsize_sq]),fabs(dia_values[jcol*bsize_sq])); // Identify strongest aggregated neighbour if (aggregates[jcol] != -1 && (weight > max_weight_aggregated || (weight==max_weight_aggregated && jcol > strongest_aggregated))) // aggregated { max_weight_aggregated = weight; strongest_aggregated = jcol; } } if (strongest_aggregated != -1) // Found a neighbour to aggregate to { if (deterministic) { aggregates_candidate[tid] = aggregates[strongest_aggregated]; } else { // Put in same aggregate as strongest neighbour aggregates[tid] = aggregates[strongest_aggregated]; } } else // All neighbours are unaggregated, leave alone { if (deterministic) aggregates_candidate[tid] = tid; else aggregates[tid] = tid; } } tid += gridDim.x*blockDim.x; } } // findStrongestNeighbour kernel for block_dia_csr_matrix format // Reads the weight from edge_weights array template <typename IndexType> __global__ void findStrongestNeighbourBlockDiaCsr_NoMerge(const IndexType *row_offsets, const IndexType *column_indices, float *edge_weights, const IndexType num_block_rows, IndexType* partner_index, int *strongest_neighbour, int deterministic) { int tid= threadIdx.x + blockDim.x*blockIdx.x; int jmin,jmax; float weight; int jcol; while (tid < num_block_rows) { float max_weight_unaggregated = 0.; int strongest_unaggregated = -1; if (partner_index[tid] == -1) // Unaggregated row { jmin = row_offsets[tid]; jmax = row_offsets[tid+1]; for (int j=jmin; j<jmax; j++) { jcol = column_indices[j]; if (tid == jcol || jcol >= num_block_rows) continue; // Skip diagonal and boundary edges. weight = edge_weights[j]; // Identify strongest unaggregated neighbours if (partner_index[jcol] == -1 && (weight > max_weight_unaggregated || (weight==max_weight_unaggregated && jcol > strongest_unaggregated))) // unaggregated { max_weight_unaggregated= weight; strongest_unaggregated= jcol; } } if (strongest_unaggregated == -1) // All neighbours are aggregated { // Put in its own aggregate if (!deterministic) partner_index[tid] = tid; } else { strongest_neighbour[tid] = strongest_unaggregated; } //if (strongest_unaggregated != -1) // All neighbours are aggregated // strongest_neighbour[tid] = strongest_unaggregated; // Put in its own aggregate // partner_index[tid] = tid; //else } tid += gridDim.x*blockDim.x; } } // findStrongestNeighbour kernel for block_dia_csr_matrix format // Reads the weight from edge_weights array template <typename IndexType> __global__ void findStrongestNeighbourBlockDiaCsr_StoreWeight(const IndexType *row_offsets, const IndexType *column_indices, const float *edge_weights, const IndexType num_block_rows, IndexType *aggregated, IndexType *aggregates, int *strongest_neighbour, IndexType *partner_index, float *weight_strongest_neighbour, int deterministic) { int tid= threadIdx.x + blockDim.x*blockIdx.x; float weight; int jcol,jmin,jmax; int agg_jcol; while (tid < num_block_rows) { float max_weight_unaggregated = 0.; float max_weight_aggregated = 0.; int strongest_unaggregated = -1; int strongest_aggregated = -1; int partner = -1; if (aggregated[tid] == -1) // Unaggregated row { partner = partner_index[tid]; jmin = row_offsets[tid]; jmax = row_offsets[tid+1]; for (int j=jmin; j<jmax; j++) { jcol = column_indices[j]; if (tid == jcol || jcol >= num_block_rows) continue; // Skip diagonal and boundary edges. weight = edge_weights[j]; agg_jcol = aggregated[jcol]; if (agg_jcol == -1 && jcol != partner && (weight > max_weight_unaggregated || (weight==max_weight_unaggregated && jcol > strongest_unaggregated))) // unaggregated { max_weight_unaggregated= weight; strongest_unaggregated= jcol; } else if (agg_jcol != -1 && jcol != partner && (weight > max_weight_aggregated || (weight==max_weight_aggregated && jcol > strongest_aggregated))) // unaggregated { max_weight_aggregated = weight; strongest_aggregated = jcol; } } if (strongest_unaggregated== -1) // All neighbours are aggregated { if (!deterministic) { if (strongest_aggregated != -1) { aggregates[tid] = aggregates[strongest_aggregated]; aggregated[tid] = 1; if (partner != -1) { aggregates[partner] = aggregates[strongest_aggregated]; aggregated[partner] = 1; } } else {// leave in its own aggregate if (partner != -1) aggregated[partner] = 1; aggregated[tid] = 1; } } } else // Found an unaggregated aggregate { weight_strongest_neighbour[tid] = max_weight_unaggregated; strongest_neighbour[tid] = aggregates[strongest_unaggregated]; } } tid += gridDim.x*blockDim.x; } } // findStrongestNeighbour kernel for block_dia_csr_matrix format // computes weight on the fly template <typename IndexType, typename ValueType> __global__ void findStrongestNeighbourBlockDiaCsr(const IndexType *row_offsets, const IndexType *column_indices, const ValueType *dia_values, const ValueType *nonzero_values, const IndexType n, IndexType *aggregates, int *strongest_neighbour, int bsize) { int tid= threadIdx.x + blockDim.x*blockIdx.x; ValueType weight; int jcol; int bsize_sq = bsize*bsize; while (tid < n) { ValueType max_weight_unaggregated = 0.; ValueType max_weight_aggregated = 0.; int strongest_unaggregated = -1; int strongest_aggregated = -1; if (aggregates[tid] == -1) // Unaggregated row { for (int j=row_offsets[tid]; j<row_offsets[tid+1]; j++) { jcol = column_indices[j]; if (jcol >= n) continue; // Compute edge weight for (int k=row_offsets[jcol];k<row_offsets[jcol+1];k++) { if (column_indices[k] == tid) { weight = 0.5*(fabs(nonzero_values[j*bsize_sq]) + fabs(nonzero_values[k*bsize_sq])) / max( fabs(dia_values[tid*bsize_sq]),fabs(dia_values[jcol*bsize_sq])); break; } } // Identify strongest aggregated and unaggregated neighbours if (aggregates[jcol] == -1 && (weight > max_weight_unaggregated || (weight==max_weight_unaggregated && jcol > strongest_unaggregated))) // unaggregated { max_weight_unaggregated= weight; strongest_unaggregated= jcol; } else if (aggregates[jcol] != -1 && (weight > max_weight_aggregated || (weight==max_weight_aggregated && jcol > strongest_aggregated))) // aggregated { max_weight_aggregated = weight; strongest_aggregated = jcol; } } if (strongest_unaggregated == -1 && strongest_aggregated != -1) // All neighbours are aggregated // Put in same aggregate as strongest neighbour aggregates[tid] = aggregates[strongest_aggregated]; else if (strongest_unaggregated != -1) strongest_neighbour[tid] = strongest_unaggregated; else strongest_neighbour[tid] = tid; } tid += gridDim.x*blockDim.x; } } // Kernel that merges unaggregated vertices its strongest aggregated neighbour // Weights are read from edge_weights array // For block_dia_csr_matrix_format template <typename IndexType> __global__ void mergeWithExistingAggregatesBlockDiaCsr(const IndexType *row_offsets, const IndexType *column_indices, const float *edge_weights, const int num_block_rows, IndexType *aggregates, IndexType *aggregated, int deterministic, IndexType *aggregates_candidate, bool allow_singletons = true) { int tid= threadIdx.x + blockDim.x*blockIdx.x; int jcol; float weight; while (tid < num_block_rows) { float max_weight_aggregated = 0.; int strongest_aggregated = -1; if (aggregated[tid] == -1) // Unaggregated row { for (int j=row_offsets[tid]; j<row_offsets[tid+1]; j++) { jcol = column_indices[j]; if (tid == jcol || jcol >= num_block_rows) continue; // Skip diagonal and boundary edges. // Identify strongest aggregated neighbour if (aggregated[jcol] != -1) { weight = edge_weights[j]; if (weight > max_weight_aggregated || (weight == max_weight_aggregated && jcol > strongest_aggregated)) { max_weight_aggregated = weight; strongest_aggregated = jcol; } } } if (strongest_aggregated != -1) { if (deterministic) { aggregates_candidate[tid] = aggregates[strongest_aggregated]; } else { // Put in same aggregate as strongest neighbour aggregates[tid] = aggregates[strongest_aggregated]; aggregated[tid] = 1; } } else // All neighbours are unaggregated, leave alone { if (deterministic) { if (allow_singletons) aggregates_candidate[tid] = tid; } else aggregates[tid] = tid; } } tid += gridDim.x*blockDim.x; } } // Kernel to extract diagonal for csr_matrix format template <typename IndexType, typename ValueType> __global__ void getDiagonalKernel(const IndexType *offsets, const IndexType *column_indices, const ValueType *values, const IndexType numRows, ValueType *diagonal) { int tIdx = threadIdx.x + blockDim.x*blockIdx.x; while (tIdx < numRows) { const int offset = offsets[tIdx]; const int numj = offsets[tIdx+1]-offset; for (int j=offset; j < offset+numj; j++) { int jcol = column_indices[j]; if (tIdx == jcol) { diagonal[tIdx] = values[j]; } } tIdx += gridDim.x*blockDim.x; } } template <typename INDEX_TYPE> __global__ void computeDiagonalKernelCOO(INDEX_TYPE num_nz, INDEX_TYPE *row_indices, INDEX_TYPE *col_indices, INDEX_TYPE *diag) { //BLOCKY*BLOCKX threads per nz INDEX_TYPE nz=(blockIdx.x*blockDim.x+threadIdx.x); while(nz<num_nz) { INDEX_TYPE row=row_indices[nz]; INDEX_TYPE col=col_indices[nz]; if(row==col) { //copy block to diag diag[row]=nz; //diag_end_offsets[row]=nz+1; } nz+=blockDim.x*gridDim.x; } } // Kernel to extract diagonal for csr_matrix format template <typename IndexType, typename ValueType> __global__ void getDiagonalKernelNoDiaProp(const IndexType *dia_idx, const ValueType *values, const IndexType numRows, ValueType *diagonal) { int tIdx = threadIdx.x + blockDim.x*blockIdx.x; while (tIdx < numRows) { diagonal[tIdx] = values[dia_idx[tIdx]]; tIdx += gridDim.x*blockDim.x; } } */
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/include/util.cuh
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <iostream> #include <fstream> #include <ctime> #include <chrono> #include <string> #include <time.h> namespace nvlouvain{ #define BLOCK_SIZE_1D 64 #define BLOCK_SIZE_2D 16 #define CUDA_MAX_KERNEL_THREADS 256 #define CUDA_MAX_BLOCKS_1D 65535 #define CUDA_MAX_BLOCKS_2D 256 #define LOCAL_MEM_MAX 512 #define GRID_MAX_SIZE 65535 #define WARP_SIZE 32 #define CUDA_CALL( call ) \ { \ cudaError_t cudaStatus = call; \ if ( cudaSuccess != cudaStatus ) \ fprintf(stderr, "ERROR: CUDA call \"%s\" in line %d of file %s failed with %s (%d).\n", \ #call, __LINE__, __FILE__, cudaGetErrorString(cudaStatus), cudaStatus); \ } #define THRUST_SAFE_CALL( call ) \ { \ try{ \ call; \ } \ catch(std::bad_alloc &e){ \ fprintf(stderr, "ERROR: THRUST call \"%s\".\n" \ #call); \ exit(-1); \ } \ } #define COLOR_GRN "\033[0;32m" #define COLOR_MGT "\033[0;35m" #define COLOR_WHT "\033[0;0m" inline std::string time_now(){ struct timespec ts; timespec_get(&ts, TIME_UTC); char buff[100]; strftime(buff, sizeof buff, "%T", gmtime(&ts.tv_sec)); std::string s = buff; s +="."+std::to_string(ts.tv_nsec).substr(0, 6); return s; } typedef enum{ NVLOUVAIN_OK = 0, NVLOUVAIN_ERR_BAD_PARAMETERS = 1, }NVLOUVAIN_STATUS; using nvlouvainStatus_t = NVLOUVAIN_STATUS; const char* nvlouvainStatusGetString(nvlouvainStatus_t status){ std::string s; switch(status){ case 0: s = "NVLOUVAIN_OK"; break; case 1: s = "NVLOUVAIN_ERR_BAD_PARAMETERS"; break; default: break; } return s.c_str(); } template<typename VecType> void display_vec(VecType vec, std::ostream& ouf=std::cout){ auto it = vec.begin(); ouf<<vec.front(); for(it = vec.begin() + 1; it!= vec.end(); ++it) { ouf<<", "<<(*it); } ouf<<"\n"; } template<typename VecType> void display_intvec_size(VecType vec, unsigned size){ printf("%d", (int)vec[0]); for(unsigned i = 1; i < size; ++i) { printf(", %d",(int)vec[i]); } printf("\n"); } template<typename VecType> void display_vec_size(VecType vec, unsigned size){ for(unsigned i = 0; i < size; ++i) { printf("%f ",vec[i]); } printf("\n"); } template<typename VecIter> __host__ __device__ void display_vec(VecIter vec, int size){ for(unsigned i = 0; i < size; ++i) { printf("%f ", (*(vec+i))); } printf("\n"); } template<typename VecType> __host__ __device__ void display_vec_with_idx(VecType vec, int size, int offset=0){ for(unsigned i = 0; i < size; ++i) { printf("idx:%d %f\n", i+offset, (*(vec+i))); } printf("\n"); } template<typename VecType> void display_cluster(std::vector<VecType>& vec, std::ostream& ouf=std::cout){ for(const auto& it: vec){ for(unsigned idx = 0; idx <it.size(); ++idx ){ ouf<<idx<<" "<<it[idx]<<std::endl; } } } template<typename VecType> int folded_print_float(VecType s){ return printf("%f\n", s); } template<typename VecType1, typename ... VecType2> int folded_print_float(VecType1 s, VecType2 ... vec){ return printf("%f ", s) + folded_print_float(vec...); } template<typename VecType> int folded_print_int(VecType s){ return printf("%d\n", (int)s); } template<typename VecType1, typename ... VecType2> int folded_print_int(VecType1 s, VecType2 ... vec){ return printf("%d ", (int)s) + folded_print_int(vec...); } }//nvlouvain
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/include/common_selector.cuh
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ //#pragma once namespace nvlouvain{ template <typename T_ELEM> __inline__ __device__ T_ELEM __cachingLoad(const T_ELEM *addr) { #if __CUDA_ARCH__ < 350 return *addr; #else return __ldg(addr); #endif } __device__ inline float random_weight(int i, int j, int n) { #define RAND_MULTIPLIER 1145637293 int i_min = (min(i, j) * RAND_MULTIPLIER) % n; int i_max = (max(i, j) * RAND_MULTIPLIER) % n; return ((float)i_max / n) * i_min; } /* WARNING: notice that based on the hexadecimal number in the last line in the hash function the resulting floating point value is very likely on the order of 0.5. */ __host__ __device__ inline unsigned int hash_val(unsigned int a, unsigned int seed) { a ^= seed; a = (a + 0x7ed55d16) + (a << 12); a = (a ^ 0xc761c23c) + (a >> 19); a = (a + 0x165667b1) + (a << 5); a = (a ^ 0xd3a2646c) + (a << 9); a = (a + 0xfd7046c5) + (a << 3); a = (a ^ 0xb55a4f09) + (a >> 16); return a; } /* return 1e-5 for float [sizeof(float)=4] and 1e-12 for double [sizeof(double)=8] types */ template<typename WeightType> __host__ __device__ WeightType scaling_factor(){ return (sizeof(WeightType) == 4) ? 1e-5f : 1e-12; } // Kernel to compute the weight of the edges // original version from AmgX. template <typename IndexType, typename ValueType, typename WeightType> __global__ void computeEdgeWeightsBlockDiaCsr_V2( const IndexType* row_offsets, const IndexType *row_indices, const IndexType *column_indices, const IndexType *dia_values, const ValueType* nonzero_values, const IndexType num_nonzero_blocks, WeightType *str_edge_weights, WeightType *rand_edge_weights, int num_owned, int bsize, int component, int weight_formula) { int tid= threadIdx.x + blockDim.x*blockIdx.x; int i,j,kmin,kmax; int bsize_sq = bsize*bsize; WeightType den; int matrix_weight_entry = component*bsize+component; while (tid < num_nonzero_blocks) { i = row_indices[tid]; j = column_indices[tid]; if ((i != j) && (j < num_owned)) // skip diagonal and across-boundary edges { den = (WeightType) max(fabs(__cachingLoad(&nonzero_values[dia_values[i]*bsize_sq+matrix_weight_entry])),fabs(__cachingLoad(&nonzero_values[dia_values[j]*bsize_sq+matrix_weight_entry]))); kmin = __cachingLoad(&row_offsets[j]); //kmin = row_offsets[j]; kmax = __cachingLoad(&row_offsets[j+1]); //kmax = row_offsets[j+1]; WeightType kvalue = 0.0; bool foundk = false; for (int k=kmin;k<kmax;k++) { if ((column_indices[k] == i) /* && (column_indices[k] < num_owned) */) { kvalue = __cachingLoad(&nonzero_values[k*bsize_sq+matrix_weight_entry]); //kvalue = nonzero_values[k*bsize_sq+matrix_weight_entry]; foundk = true; break; } } // handles both symmetric & non-symmetric matrices WeightType ed_weight=0; if( foundk ) { if( weight_formula == 0 ) ed_weight = 0.5*(fabs(__cachingLoad(&nonzero_values[tid*bsize_sq+matrix_weight_entry])) + fabs(kvalue)) / den; // 0.5*(aij+aji)/max(a_ii,a_jj) else ed_weight = -0.5 * ( __cachingLoad(&nonzero_values[tid*bsize_sq+matrix_weight_entry]) / __cachingLoad(&nonzero_values[dia_values[i]*bsize_sq+matrix_weight_entry]) + // -0.5 * ( a_ij/a_ii + kvalue / __cachingLoad(&nonzero_values[dia_values[j]*bsize_sq+matrix_weight_entry]) ); // a_ji/a_jj ) } // 05/09/13: Perturb the edge weights slightly to handle cases where edge weights are uniform WeightType small_fraction = scaling_factor<WeightType>()*hash_val(min(i,j),max(i,j))/UINT_MAX; ed_weight += small_fraction*ed_weight; str_edge_weights[tid] = ed_weight; // fill up random unique weights if( rand_edge_weights != NULL ) rand_edge_weights[tid] = random_weight(i, j, num_owned); } tid += gridDim.x*blockDim.x; } } // Kernel to compute the weight of the edges // simple version modified for nvgraph template <typename IndexType, typename ValueType, typename WeightType> __global__ void computeEdgeWeights_simple( const IndexType* row_offsets, const IndexType *row_indices, const IndexType *column_indices, const ValueType *row_sum, const ValueType* nonzero_values, const IndexType num_nonzero_blocks, WeightType *str_edge_weights, WeightType *rand_edge_weights, int n, int weight_formula) { int tid= threadIdx.x + blockDim.x*blockIdx.x; int i,j,kmin,kmax; WeightType den; while (tid < num_nonzero_blocks) { i = row_indices[tid]; j = column_indices[tid]; if ((i != j) && (j < n)) // skip diagonal and across-boundary edges { den = (WeightType) max(fabs(__cachingLoad(&row_sum[i])),fabs(__cachingLoad(&row_sum[j]))); kmin = __cachingLoad(&row_offsets[j]); //kmin = row_offsets[j]; kmax = __cachingLoad(&row_offsets[j+1]); //kmax = row_offsets[j+1]; WeightType kvalue = 0.0; bool foundk = false; for (int k=kmin;k<kmax;k++) { if ((column_indices[k] == i) /* && (column_indices[k] < n) */) { kvalue = __cachingLoad(&nonzero_values[k]); //kvalue = nonzero_values[k]; foundk = true; break; } } // handles both symmetric & non-symmetric matrices WeightType ed_weight=0; if( foundk ) { if( weight_formula == 0 ) ed_weight = 0.5*(fabs(__cachingLoad(&nonzero_values[tid])) + fabs(kvalue)) / den; // 0.5*(aij+aji)/max(a_ii,a_jj) else ed_weight = -0.5 * ( __cachingLoad(&nonzero_values[tid]) / __cachingLoad(&row_sum[i]) + // -0.5 * ( a_ij/a_ii + kvalue / __cachingLoad(&row_sum[j]) ); // a_ji/a_jj ) } // 05/09/13: Perturb the edge weights slightly to handle cases where edge weights are uniform WeightType small_fraction = scaling_factor<WeightType>()*hash_val(min(i,j),max(i,j))/UINT_MAX; ed_weight += small_fraction*ed_weight; str_edge_weights[tid] = ed_weight; // fill up random unique weights if( rand_edge_weights != NULL ) rand_edge_weights[tid] = random_weight(i, j, n); } tid += gridDim.x*blockDim.x; } } // Kernel to compute the weight of the edges using geometry distance between edges template <typename IndexType, typename ValueType> __global__ void computeEdgeWeightsDistance3d( const int* row_offsets, const IndexType *column_indices, const ValueType* gx, const ValueType* gy, const ValueType* gz, float *str_edge_weights, int num_rows) { int tid= threadIdx.x + blockDim.x*blockIdx.x; float lx, ly, lz; float px, py, pz; int kmin, kmax; int col_id; while (tid < num_rows) { lx = gx[tid]; ly = gy[tid]; lz = gz[tid]; kmin = row_offsets[tid]; kmax = row_offsets[tid+1]; for (int k=kmin;k<kmax;k++) { col_id = column_indices[k]; if (col_id != tid) // skip diagonal { px = gx[col_id]; py = gy[col_id]; pz = gz[col_id]; str_edge_weights[k] = 1.0 / sqrt((px - lx)*(px - lx) + (py - ly)*(py - ly) + (pz - lz)*(pz - lz)); } } tid += gridDim.x*blockDim.x; } } // Kernel that checks if perfect matchs exist template <typename IndexType> __global__ void matchEdges(const IndexType num_rows, IndexType *partner_index, IndexType *aggregates, const IndexType *strongest_neighbour) { int potential_match, potential_match_neighbour; for (int tid= threadIdx.x + blockDim.x*blockIdx.x; tid < num_rows; tid += gridDim.x*blockDim.x) { if (partner_index[tid] == -1) // Unaggregated row { potential_match = strongest_neighbour[tid]; if (potential_match!=-1) { potential_match_neighbour = strongest_neighbour[potential_match]; if ( potential_match_neighbour == tid ) // we have a match { partner_index[tid] = potential_match; aggregates[tid] = ( potential_match > tid) ? tid : potential_match; } } } } } template <typename IndexType> __global__ void joinExistingAggregates(IndexType num_rows, IndexType *aggregates, IndexType *aggregated, const IndexType *aggregates_candidate) { int tid= threadIdx.x + blockDim.x*blockIdx.x; while (tid < num_rows) { if (aggregated[tid] == -1 && aggregates_candidate[tid] != -1) // Unaggregated row { aggregates[tid] = aggregates_candidate[tid]; aggregated[tid] = 1; } tid += gridDim.x*blockDim.x; } } template<typename IndexType> __global__ void aggregateSingletons( IndexType* aggregates, IndexType numRows ) { int tid = threadIdx.x + blockDim.x*blockIdx.x; while( tid < numRows ) { if( aggregates[tid] == -1 ) //still unaggregated! aggregates[tid] = tid; //then become a singleton tid += gridDim.x*blockDim.x; } } __device__ inline float random_weight2(int i, int j) { #define RAND_MULTIPLIER 1145637293 unsigned long i_min = (min(i, j) * RAND_MULTIPLIER); unsigned long i_max = (max(i, j) * RAND_MULTIPLIER); return ((float)i_min / i_max); } // findStrongestNeighbour kernel for block_dia_csr_matrix format // Reads the weight from edge_weights array template <typename IndexType> __global__ void findStrongestNeighbourBlockDiaCsr_V2(const IndexType *row_offsets, const IndexType *column_indices, const float *edge_weights, IndexType n, IndexType *aggregates, IndexType *strongest_neighbour_1phase, IndexType *strongest_neighbour, const size_t bsize, int phase, bool merge_singletons) { int tid = threadIdx.x + blockDim.x*blockIdx.x; float weight; int jcol; while (tid < n) { int strongest_unaggregated = -1; int strongest_aggregated = -1; float max_weight_unaggregated = 0.; float max_weight_aggregated = 0.; if (aggregates[tid] == -1) // Unaggregated row { for (int j=row_offsets[tid]; j<row_offsets[tid+1]; j++) { //TODO: check if aggregated before computing the weight jcol = column_indices[j]; // if (phase == 1) weight = edge_weights[j]; // else weight = random_weight2(tid, jcol); weight = edge_weights[j]; // printf("j: %d weight %f\n", j, weight); if (tid == jcol || jcol >= n) continue; // skip diagonal and halo if (phase == 2 && strongest_neighbour_1phase[jcol] != tid) continue; // if 2nd phase only accept those who gave a hand on the 1st phase // Identify strongest aggregated and unaggregated neighbours if (aggregates[jcol] == -1 && (weight > max_weight_unaggregated || (weight==max_weight_unaggregated && jcol > strongest_unaggregated))) // unaggregated { max_weight_unaggregated= weight; strongest_unaggregated= jcol; // find the smallestt index with weight = max_weight } else if (aggregates[jcol] != -1 && (weight > max_weight_aggregated || (weight==max_weight_aggregated && jcol > strongest_aggregated))) // aggregated { max_weight_aggregated = weight; strongest_aggregated = jcol; } } // printf("-- phase: %d tid: %d strongest_neighbour: %d %f\n", phase, tid, strongest_neighbour[tid], max_weight_unaggregated); if (strongest_unaggregated == -1 && strongest_aggregated != -1) // All neighbours are aggregated { if( merge_singletons ){ // Put in same aggregate as strongest neighbour aggregates[tid] = aggregates[strongest_aggregated]; } else{ aggregates[tid] = tid; } } else if (strongest_unaggregated != -1) { if (phase == 2) { float rand_w1 = random_weight2(tid, strongest_neighbour_1phase[tid]); strongest_neighbour[tid] = max_weight_unaggregated > rand_w1 ? strongest_unaggregated : strongest_neighbour_1phase[tid]; } else strongest_neighbour_1phase[tid] = strongest_unaggregated; //strongest_neighbour_1phase[tid] = strongest_unaggregated; } else { if (phase == 2) strongest_neighbour[tid] = strongest_neighbour_1phase[tid]; else strongest_neighbour_1phase[tid] = tid; } } /* if(tid<16) printf("++ phase: %d tid: %d strongest_neighbour: %d %f\n", phase, tid, strongest_neighbour[tid], max_weight_unaggregated); */ tid += gridDim.x*blockDim.x; } } // Kernel that checks if perfect matchs exist template <typename IndexType> __global__ void matchEdges(const IndexType num_rows, IndexType *aggregates, const int *strongest_neighbour) { int tid= threadIdx.x + blockDim.x*blockIdx.x; int potential_match, potential_match_neighbour; while (tid < num_rows) { if (aggregates[tid] == -1) // Unaggregated row { potential_match = strongest_neighbour[tid]; potential_match_neighbour = strongest_neighbour[potential_match]; if (potential_match != -1 && potential_match_neighbour == tid) // we have a match aggregates[tid] = ( potential_match > tid ) ? tid : potential_match; /* if (potential_match != -1){ potential_match_neighbour = strongest_neighbour[potential_match]; if (potential_match_neighbour == tid) // we have a match aggregates[tid] = ( potential_match > tid ) ? tid : potential_match; } */ } tid += gridDim.x*blockDim.x; } } template <typename IndexType, int block_size> __global__ void countAggregates(const IndexType num_rows, const IndexType *aggregates, int *num_unaggregated) { int tid = threadIdx.x + blockDim.x * blockIdx.x; int c = 0; int i = tid; while( i < num_rows ) { c += ( aggregates[i] == -1 ); i += gridDim.x * blockDim.x; } __shared__ volatile int smem[block_size]; smem[threadIdx.x] = c; __syncthreads(); for( int off = blockDim.x / 2; off >= 32; off = off / 2 ) { if( threadIdx.x < off ) smem[threadIdx.x] += smem[threadIdx.x + off]; __syncthreads(); } // warp reduce if( threadIdx.x < 32 ) { smem[threadIdx.x] += smem[threadIdx.x+16]; smem[threadIdx.x] += smem[threadIdx.x+8]; smem[threadIdx.x] += smem[threadIdx.x+4]; smem[threadIdx.x] += smem[threadIdx.x+2]; smem[threadIdx.x] += smem[threadIdx.x+1]; } if( threadIdx.x == 0 ) atomicAdd(num_unaggregated, smem[0]); } template <typename IndexType> __global__ void joinExistingAggregates(IndexType num_rows, IndexType *aggregates, const IndexType *aggregates_candidate) { int tid= threadIdx.x + blockDim.x*blockIdx.x; while (tid < num_rows) { if (aggregates[tid] == -1 && aggregates_candidate[tid] != -1) // Unaggregated row aggregates[tid] = aggregates_candidate[tid]; tid+=gridDim.x*blockDim.x; } } // Kernel that merges unaggregated vertices its strongest aggregated neighbour // Weights are read from edge_weights array // For block_dia_csr_matrix_format template <typename IndexType> __global__ void mergeWithExistingAggregatesBlockDiaCsr_V2(const IndexType *row_offsets, const IndexType *column_indices, const float *edge_weights, const int n, IndexType *aggregates, int bsize, const int deterministic, IndexType *aggregates_candidate) { int tid= threadIdx.x + blockDim.x*blockIdx.x; int jcol; float weight; while (tid < n) { float max_weight_aggregated = 0.; int strongest_aggregated = -1; if (aggregates[tid] == -1) // Unaggregated row { for (int j=row_offsets[tid]; j<row_offsets[tid+1]; j++) { // Compute edge weight weight = edge_weights[j]; jcol = column_indices[j]; if (jcol == tid || jcol >= n) continue; // skip diagonal // Identify strongest aggregated neighbour if (aggregates[jcol] != -1 && (weight > max_weight_aggregated || (weight==max_weight_aggregated && jcol > strongest_aggregated))) // { max_weight_aggregated = weight; strongest_aggregated = jcol; } } if (strongest_aggregated != -1) // Found a neighbour to aggregate to { if (deterministic) { aggregates_candidate[tid] = aggregates[strongest_aggregated]; } else { // Put in same aggregate as strongest neighbour aggregates[tid] = aggregates[strongest_aggregated]; } } else // All neighbours are unaggregated, leave alone { if (deterministic) aggregates_candidate[tid] = tid; else aggregates[tid] = tid; } } tid += gridDim.x*blockDim.x; } } template <typename INDEX_TYPE> __global__ void computeDiagonalKernelCSR(INDEX_TYPE num_rows, const INDEX_TYPE *row_offsets, const INDEX_TYPE *col_indices, INDEX_TYPE *diag) { INDEX_TYPE row=(blockIdx.x*blockDim.x+threadIdx.x); while(row<num_rows) { int nz=row_offsets[row]; int last_nz=row_offsets[row+1]; //diag[row] = null_index; while(nz<last_nz) { int col=col_indices[nz]; if(row==col) { diag[row]=nz; //diag_end_offsets[row]=nz+1; break; } nz++; } row+=blockDim.x*gridDim.x; } } template <typename T1, typename T2> __global__ void convert_type(int n, const T1 *src, T2 *dest) { int tid=(blockIdx.x*blockDim.x+threadIdx.x); while(tid<n) { dest[tid] = static_cast<T2>(src[tid]); tid += gridDim.x*blockDim.x; } } }//nvlouvain /* // findStrongestNeighbour kernel for block_dia_csr_matrix format // Reads the weight from edge_weights array template <typename IndexType> __global__ void agreeOnProposal(const IndexType *row_offsets, const IndexType *column_indices, IndexType num_block_rows, IndexType *aggregated, int *strongest_neighbour, float *weight_strongest_neighbour, IndexType *partner_index, int *aggregates) { int tid= threadIdx.x + blockDim.x*blockIdx.x; int partner; while(tid < num_block_rows) { if (aggregated[tid] == -1) { partner = partner_index[tid]; float my_weight = weight_strongest_neighbour[tid]; float partners_weight = -1; if (partner != -1) partners_weight = weight_strongest_neighbour[partner]; if (my_weight < 0. && partners_weight < 0.) { // All neighbours are aggregated, leave in current aggregate //if (deterministic!=1) //{ aggregated[tid] = 1; strongest_neighbour[tid] = -1; partner_index[tid+num_block_rows] = tid; partner_index[tid+2*num_block_rows] = tid; //} } // if my weight is smaller than my partner's weight, change my strongest neighbour else if (my_weight < partners_weight) strongest_neighbour[tid] = strongest_neighbour[partner]; } tid += gridDim.x*blockDim.x; } } // Kernel that checks if perfect matchs exist template <typename IndexType> __global__ void matchAggregates(IndexType *aggregates, IndexType *aggregated, IndexType *strongest_neighbour, const IndexType num_rows) { int tid= threadIdx.x + blockDim.x*blockIdx.x; int potential_match, potential_match_neighbour, my_aggregate; while (tid < num_rows) { if (aggregated[tid] == -1) // Unaggregated row { potential_match = strongest_neighbour[tid]; if (potential_match!=-1) { potential_match_neighbour = strongest_neighbour[potential_match]; my_aggregate = aggregates[tid]; if (potential_match_neighbour == my_aggregate) // we have a match { aggregated[tid] = 1; aggregates[tid] = ( potential_match > my_aggregate) ? my_aggregate: potential_match; } } } tid += gridDim.x*blockDim.x; } } // Kernel that checks if perfect matchs exist template <typename IndexType> __global__ void assignUnassignedVertices(IndexType *partner_index, const IndexType num_rows) { int tid= threadIdx.x + blockDim.x*blockIdx.x; while (tid < num_rows) { if (partner_index[tid] == -1) // Unaggregated row { partner_index[tid] = tid; } tid += gridDim.x*blockDim.x; } } // Kernel that merges unaggregated vertices its strongest aggregated neighbour // Edge weights are computed on the fly // For block_dia_csr_matrix_format template <typename IndexType, typename ValueType> __global__ void mergeWithExistingAggregatesBlockDiaCsr(const IndexType *row_offsets, const IndexType *column_indices, const ValueType *dia_values, const ValueType *nonzero_values, const int n, IndexType *aggregates, int bsize, int deterministic, IndexType *aggregates_candidate) { int tid= threadIdx.x + blockDim.x*blockIdx.x; int jcol; ValueType weight; int bsize_sq = bsize*bsize; while (tid < n) { int strongest_aggregated = -1; ValueType max_weight_aggregated = 0.; if (aggregates[tid] == -1) // Unaggregated row { for (int j=row_offsets[tid]; j<row_offsets[tid+1]; j++) { jcol = column_indices[j]; if (jcol >= n) continue; // Compute edge weight weight = fabs(nonzero_values[j*bsize_sq])/max( fabs(dia_values[tid*bsize_sq]),fabs(dia_values[jcol*bsize_sq])); // Identify strongest aggregated neighbour if (aggregates[jcol] != -1 && (weight > max_weight_aggregated || (weight==max_weight_aggregated && jcol > strongest_aggregated))) // aggregated { max_weight_aggregated = weight; strongest_aggregated = jcol; } } if (strongest_aggregated != -1) // Found a neighbour to aggregate to { if (deterministic) { aggregates_candidate[tid] = aggregates[strongest_aggregated]; } else { // Put in same aggregate as strongest neighbour aggregates[tid] = aggregates[strongest_aggregated]; } } else // All neighbours are unaggregated, leave alone { if (deterministic) aggregates_candidate[tid] = tid; else aggregates[tid] = tid; } } tid += gridDim.x*blockDim.x; } } // findStrongestNeighbour kernel for block_dia_csr_matrix format // Reads the weight from edge_weights array template <typename IndexType> __global__ void findStrongestNeighbourBlockDiaCsr_NoMerge(const IndexType *row_offsets, const IndexType *column_indices, float *edge_weights, const IndexType num_block_rows, IndexType* partner_index, int *strongest_neighbour, int deterministic) { int tid= threadIdx.x + blockDim.x*blockIdx.x; int jmin,jmax; float weight; int jcol; while (tid < num_block_rows) { float max_weight_unaggregated = 0.; int strongest_unaggregated = -1; if (partner_index[tid] == -1) // Unaggregated row { jmin = row_offsets[tid]; jmax = row_offsets[tid+1]; for (int j=jmin; j<jmax; j++) { jcol = column_indices[j]; if (tid == jcol || jcol >= num_block_rows) continue; // Skip diagonal and boundary edges. weight = edge_weights[j]; // Identify strongest unaggregated neighbours if (partner_index[jcol] == -1 && (weight > max_weight_unaggregated || (weight==max_weight_unaggregated && jcol > strongest_unaggregated))) // unaggregated { max_weight_unaggregated= weight; strongest_unaggregated= jcol; } } if (strongest_unaggregated == -1) // All neighbours are aggregated { // Put in its own aggregate if (!deterministic) partner_index[tid] = tid; } else { strongest_neighbour[tid] = strongest_unaggregated; } //if (strongest_unaggregated != -1) // All neighbours are aggregated // strongest_neighbour[tid] = strongest_unaggregated; // Put in its own aggregate // partner_index[tid] = tid; //else } tid += gridDim.x*blockDim.x; } } // findStrongestNeighbour kernel for block_dia_csr_matrix format // Reads the weight from edge_weights array template <typename IndexType> __global__ void findStrongestNeighbourBlockDiaCsr_StoreWeight(const IndexType *row_offsets, const IndexType *column_indices, const float *edge_weights, const IndexType num_block_rows, IndexType *aggregated, IndexType *aggregates, int *strongest_neighbour, IndexType *partner_index, float *weight_strongest_neighbour, int deterministic) { int tid= threadIdx.x + blockDim.x*blockIdx.x; float weight; int jcol,jmin,jmax; int agg_jcol; while (tid < num_block_rows) { float max_weight_unaggregated = 0.; float max_weight_aggregated = 0.; int strongest_unaggregated = -1; int strongest_aggregated = -1; int partner = -1; if (aggregated[tid] == -1) // Unaggregated row { partner = partner_index[tid]; jmin = row_offsets[tid]; jmax = row_offsets[tid+1]; for (int j=jmin; j<jmax; j++) { jcol = column_indices[j]; if (tid == jcol || jcol >= num_block_rows) continue; // Skip diagonal and boundary edges. weight = edge_weights[j]; agg_jcol = aggregated[jcol]; if (agg_jcol == -1 && jcol != partner && (weight > max_weight_unaggregated || (weight==max_weight_unaggregated && jcol > strongest_unaggregated))) // unaggregated { max_weight_unaggregated= weight; strongest_unaggregated= jcol; } else if (agg_jcol != -1 && jcol != partner && (weight > max_weight_aggregated || (weight==max_weight_aggregated && jcol > strongest_aggregated))) // unaggregated { max_weight_aggregated = weight; strongest_aggregated = jcol; } } if (strongest_unaggregated== -1) // All neighbours are aggregated { if (!deterministic) { if (strongest_aggregated != -1) { aggregates[tid] = aggregates[strongest_aggregated]; aggregated[tid] = 1; if (partner != -1) { aggregates[partner] = aggregates[strongest_aggregated]; aggregated[partner] = 1; } } else {// leave in its own aggregate if (partner != -1) aggregated[partner] = 1; aggregated[tid] = 1; } } } else // Found an unaggregated aggregate { weight_strongest_neighbour[tid] = max_weight_unaggregated; strongest_neighbour[tid] = aggregates[strongest_unaggregated]; } } tid += gridDim.x*blockDim.x; } } // findStrongestNeighbour kernel for block_dia_csr_matrix format // computes weight on the fly template <typename IndexType, typename ValueType> __global__ void findStrongestNeighbourBlockDiaCsr(const IndexType *row_offsets, const IndexType *column_indices, const ValueType *dia_values, const ValueType *nonzero_values, const IndexType n, IndexType *aggregates, int *strongest_neighbour, int bsize) { int tid= threadIdx.x + blockDim.x*blockIdx.x; ValueType weight; int jcol; int bsize_sq = bsize*bsize; while (tid < n) { ValueType max_weight_unaggregated = 0.; ValueType max_weight_aggregated = 0.; int strongest_unaggregated = -1; int strongest_aggregated = -1; if (aggregates[tid] == -1) // Unaggregated row { for (int j=row_offsets[tid]; j<row_offsets[tid+1]; j++) { jcol = column_indices[j]; if (jcol >= n) continue; // Compute edge weight for (int k=row_offsets[jcol];k<row_offsets[jcol+1];k++) { if (column_indices[k] == tid) { weight = 0.5*(fabs(nonzero_values[j*bsize_sq]) + fabs(nonzero_values[k*bsize_sq])) / max( fabs(dia_values[tid*bsize_sq]),fabs(dia_values[jcol*bsize_sq])); break; } } // Identify strongest aggregated and unaggregated neighbours if (aggregates[jcol] == -1 && (weight > max_weight_unaggregated || (weight==max_weight_unaggregated && jcol > strongest_unaggregated))) // unaggregated { max_weight_unaggregated= weight; strongest_unaggregated= jcol; } else if (aggregates[jcol] != -1 && (weight > max_weight_aggregated || (weight==max_weight_aggregated && jcol > strongest_aggregated))) // aggregated { max_weight_aggregated = weight; strongest_aggregated = jcol; } } if (strongest_unaggregated == -1 && strongest_aggregated != -1) // All neighbours are aggregated // Put in same aggregate as strongest neighbour aggregates[tid] = aggregates[strongest_aggregated]; else if (strongest_unaggregated != -1) strongest_neighbour[tid] = strongest_unaggregated; else strongest_neighbour[tid] = tid; } tid += gridDim.x*blockDim.x; } } // Kernel that merges unaggregated vertices its strongest aggregated neighbour // Weights are read from edge_weights array // For block_dia_csr_matrix_format template <typename IndexType> __global__ void mergeWithExistingAggregatesBlockDiaCsr(const IndexType *row_offsets, const IndexType *column_indices, const float *edge_weights, const int num_block_rows, IndexType *aggregates, IndexType *aggregated, int deterministic, IndexType *aggregates_candidate, bool allow_singletons = true) { int tid= threadIdx.x + blockDim.x*blockIdx.x; int jcol; float weight; while (tid < num_block_rows) { float max_weight_aggregated = 0.; int strongest_aggregated = -1; if (aggregated[tid] == -1) // Unaggregated row { for (int j=row_offsets[tid]; j<row_offsets[tid+1]; j++) { jcol = column_indices[j]; if (tid == jcol || jcol >= num_block_rows) continue; // Skip diagonal and boundary edges. // Identify strongest aggregated neighbour if (aggregated[jcol] != -1) { weight = edge_weights[j]; if (weight > max_weight_aggregated || (weight == max_weight_aggregated && jcol > strongest_aggregated)) { max_weight_aggregated = weight; strongest_aggregated = jcol; } } } if (strongest_aggregated != -1) { if (deterministic) { aggregates_candidate[tid] = aggregates[strongest_aggregated]; } else { // Put in same aggregate as strongest neighbour aggregates[tid] = aggregates[strongest_aggregated]; aggregated[tid] = 1; } } else // All neighbours are unaggregated, leave alone { if (deterministic) { if (allow_singletons) aggregates_candidate[tid] = tid; } else aggregates[tid] = tid; } } tid += gridDim.x*blockDim.x; } } // Kernel to extract diagonal for csr_matrix format template <typename IndexType, typename ValueType> __global__ void getDiagonalKernel(const IndexType *offsets, const IndexType *column_indices, const ValueType *values, const IndexType numRows, ValueType *diagonal) { int tIdx = threadIdx.x + blockDim.x*blockIdx.x; while (tIdx < numRows) { const int offset = offsets[tIdx]; const int numj = offsets[tIdx+1]-offset; for (int j=offset; j < offset+numj; j++) { int jcol = column_indices[j]; if (tIdx == jcol) { diagonal[tIdx] = values[j]; } } tIdx += gridDim.x*blockDim.x; } } template <typename INDEX_TYPE> __global__ void computeDiagonalKernelCOO(INDEX_TYPE num_nz, INDEX_TYPE *row_indices, INDEX_TYPE *col_indices, INDEX_TYPE *diag) { //BLOCKY*BLOCKX threads per nz INDEX_TYPE nz=(blockIdx.x*blockDim.x+threadIdx.x); while(nz<num_nz) { INDEX_TYPE row=row_indices[nz]; INDEX_TYPE col=col_indices[nz]; if(row==col) { //copy block to diag diag[row]=nz; //diag_end_offsets[row]=nz+1; } nz+=blockDim.x*gridDim.x; } } // Kernel to extract diagonal for csr_matrix format template <typename IndexType, typename ValueType> __global__ void getDiagonalKernelNoDiaProp(const IndexType *dia_idx, const ValueType *values, const IndexType numRows, ValueType *diagonal) { int tIdx = threadIdx.x + blockDim.x*blockIdx.x; while (tIdx < numRows) { diagonal[tIdx] = values[dia_idx[tIdx]]; tIdx += gridDim.x*blockDim.x; } } */
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/include/exclusive_kv_scan.hxx
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include "shfl.hxx" #include "sm_utils.h" namespace nvgraph { //This file is to do a blockwide reduction by key as specialized for Key-Value Pairs. //Each thread will call this function. There will be two outputs. One will be the calling thread's //own output key value pair and the other will be the block-wide aggegrate reduction of the input items //This is based on Duane Merrills's Exclusive Scan function in Cub //Implementing key value pair to be called in device functions template<typename IndexType_, typename ValueType_> //allow for different datatypes struct KeyValuePair { IndexType_ key; ValueType_ value; }; //binary reduction operator to be applied to the values- we can template on the type on //the operator for the general case but only using sum () in our case so can simplify template<typename SemiRingType_> struct ReduceByKeySum { SemiRingType_ SR; __host__ __device__ __forceinline__ ReduceByKeySum(SemiRingType_ SR) : SR(SR) //pass in semiring { } template<typename IndexType_, typename ValueType_> __host__ __device__ __forceinline__ KeyValuePair<IndexType_, ValueType_> operator() (const KeyValuePair<IndexType_, ValueType_> &first, const KeyValuePair<IndexType_, ValueType_> &second) { KeyValuePair<IndexType_, ValueType_> result = second; //check if they have matching keys and if so sum them if (first.key == second.key) result.value = SR.plus(first.value, result.value); return result; } }; //Statically determien log2(N), rounded up template <int N, int CURRENT_VAL = N, int COUNT = 0> struct Log2 { /// Static logarithm value enum { VALUE = Log2<N, (CURRENT_VAL >> 1), COUNT + 1>::VALUE }; // Inductive case }; template <int N, int COUNT> struct Log2<N, 0, COUNT> { enum {VALUE = (1 << (COUNT - 1) < N) ? // Base case COUNT : COUNT - 1 }; }; template<typename IndexType_, typename ValueType_, typename SemiRingType_, int BLOCK_DIM_X> struct PrefixSum { int laneId, warpId, linearTid; SemiRingType_ SR; //list constants enum { //number of threads per warp WARP_THREADS = 32, // The number of warp scan steps log2 STEPS = Log2<WARP_THREADS>::VALUE, // The 5-bit SHFL mask for logically splitting warps into sub-segments starts 8-bits up SHFL_C = ((-1 << STEPS) & 31) << 8, //add in more enums for the warps! //calculate the thread block size in threads BLOCK_DIM_Y = 1, BLOCK_DIM_Z = 1, BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z, //calculate the number of active warps WARPS = (BLOCK_THREADS + WARP_THREADS - 1) / WARP_THREADS, }; //constructor __device__ __forceinline__ PrefixSum(SemiRingType_ SR) : SR(SR) { laneId = utils::lane_id(); //set lane id linearTid = threadIdx.x; //simple for linear 1D block warpId = (WARPS == 1) ? 0 : linearTid / WARP_THREADS; } //Final function with the exclusive scan outputs one partial sum for the calling thread and the blockwide reduction __device__ __forceinline__ void ExclusiveKeyValueScan( KeyValuePair<IndexType_, ValueType_> &output, //input/output key value pair from the calling thread KeyValuePair<IndexType_,ValueType_> &blockAggegrate) //blockwide reduction output { KeyValuePair<IndexType_, ValueType_> inclusiveOutput; KeyValueScan(inclusiveOutput, output); //to get individual thread res CalcBlockAggregate(output, inclusiveOutput, blockAggegrate, (laneId > 0)); //to get blockwide res } //This function uses the inclusive scan below to calculate the exclusive scan __device__ __forceinline__ void KeyValueScan( KeyValuePair<IndexType_,ValueType_> &inclusiveOutput, //calling thread's inclusive-scan output item KeyValuePair<IndexType_,ValueType_> &exclusiveOutput) //calling thread's exclusive-scan output item { //exclusiveOutput is the initial input as well InclusiveKeyValueScan(exclusiveOutput, inclusiveOutput); //inclusive starts at first number and last element is total reduction //to get exclusive output shuffle the keys and values both up by 1 exclusiveOutput.key = utils::shfl_up(inclusiveOutput.key, 1); exclusiveOutput.value = utils::shfl_up(inclusiveOutput.value, 1); } //This function computes an inclusive scan odf key value pairs __device__ __forceinline__ void InclusiveKeyValueScan( KeyValuePair<IndexType_, ValueType_> input, //calling thread's input item KeyValuePair<IndexType_, ValueType_> &output //calling thread's input item ) { //__shfl_up and __ballot are intrinsic functions require SM30 or greater-send error message for lower hardwares output = input; IndexType_ predKey = utils::shfl_up(output.key, 1); //shuffle key to next neighbor unsigned int ballot = utils::ballot((predKey != output.key));//intrinsic evaluates a condition for all threads in the warp and returns a 32-bit value //where each bit gives the condition for the corresponding thread in the warp. //Mask away all lanes greater than ours ballot = ballot & utils::lane_mask_le(); //Find index of first set bit int firstLane = max(0, 31 - __clz(ballot));//Count the number of consecutive leading zero bits, //starting at the most significant bit (bit 31) of x. //Returns a value between 0 and 32 inclusive representing the number of zero bits. //Iterate scan steps for (int step = 0; step < STEPS; ++step) //only called on double not key so not specific to key value pairs { output.value = SR.shflPlus(output.value, firstLane | SHFL_C, 1 << step); //plus defined on class operator //if (threadIdx.x + blockDim.x *blockIdx.x < 4)printf("%.1f\n", output.value); } } //This completes the warp-prefix scan. Now we will use the Warp Aggregates to also calculate a blockwide aggregate // Update the calling thread's partial reduction with the warp-wide aggregates from preceding warps. //Also returns block-wide aggregate __device__ __forceinline__ void CalcBlockAggregate( //can add in scan operators later KeyValuePair<IndexType_, ValueType_> &partial, //Calling thread's partial reduction KeyValuePair<IndexType_, ValueType_> warpAggregate, //Warp-wide aggregate reduction of input items KeyValuePair<IndexType_, ValueType_> &blockAggregate, //Threadblock-wide aggregate reduction of input items bool laneValid = true) //Whether or not the partial belonging to the current thread is valid { //use shared memory in the block approach // Last lane in each warp shares its warp-aggregate //use 1D linear linear_tid def __shared__ KeyValuePair<IndexType_, ValueType_> warpAggregates[WARPS]; if (laneId == WARP_THREADS - 1) //number of threads per warp warpAggregates[warpId] = warpAggregate; //load into shared memory and wait until all threads are done __syncthreads(); blockAggregate = warpAggregates[0]; ReduceByKeySum<SemiRingType_> keyValAdd(SR); //call scn operator only add together if keys match for (int warp = 1; warp < WARPS; ++warp) { KeyValuePair<IndexType_, ValueType_> inclusive = keyValAdd(blockAggregate, partial); if (warpId == warp) partial = (laneValid) ? inclusive : blockAggregate; KeyValuePair<IndexType_, ValueType_> addend = warpAggregates[warp]; blockAggregate = keyValAdd(blockAggregate, addend); //only add if matching keys } } }; } //end namespace nvgraph
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/include/async_event.hxx
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once namespace nvgraph { class AsyncEvent { public: AsyncEvent() : async_event(NULL) { } AsyncEvent(int size) : async_event(NULL) { cudaEventCreate(&async_event); } ~AsyncEvent() { if (async_event != NULL) cudaEventDestroy(async_event); } void create() { cudaEventCreate(&async_event); } void record(cudaStream_t s=0) { if (async_event == NULL) cudaEventCreate(&async_event); // check if we haven't created the event yet cudaEventRecord(async_event,s); } void sync() { cudaEventSynchronize(async_event); } private: cudaEvent_t async_event; }; }
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/include/csrmv_cub.h
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include "nvgraph.h" #include "nvgraph_error.hxx" #include "multi_valued_csr_graph.hxx" namespace nvgraph { template <typename I, typename V> class SemiringDispatch { public: template <typename SR> static NVGRAPH_ERROR Dispatch( const V* d_values, const I* d_row_offsets, const I* d_column_indices, const V* d_vector_x, V* d_vector_y, V alpha, V beta, I num_rows, I num_cols, I num_nonzeros, cudaStream_t stream); static NVGRAPH_ERROR InitAndLaunch( const nvgraph::MultiValuedCsrGraph<I, V> &graph, const size_t weight_index, const void *p_alpha, const size_t x_index, const void *p_beta, const size_t y_index, const nvgraphSemiring_t SR, cudaStream_t stream ); }; // API wrapper to avoid bloating main API object nvgraph.cpp NVGRAPH_ERROR SemiringAPILauncher(nvgraphHandle_t handle, const nvgraphGraphDescr_t descrG, const size_t weight_index, const void *alpha, const size_t x, const void *beta, const size_t y, const nvgraphSemiring_t sr); } //namespace nvgraph
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/include/nvlouvain.cuh
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <string> #include <cstring> #include <vector> #include <cmath> #include <fstream> #include <chrono> #include <cuda.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/generate.h> #include <thrust/reduce.h> #include <thrust/functional.h> #include <cusparse.h> #include "graph_utils.cuh" #include "modularity.cuh" #include "delta_modularity.cuh" #include "high_res_clock.h" #include "size2_selector.cuh" #include "thrust_coarse_generator.cuh" namespace nvlouvain{ //#define VERBOSE true #define LOG() (log<<COLOR_GRN<<"[ "<< time_now() <<" ] "<<COLOR_WHT) /* The main program of louvain */ template<typename IdxType=int, typename ValType> NVLOUVAIN_STATUS louvain(IdxType* csr_ptr, IdxType* csr_ind, ValType* csr_val, const size_t num_vertex, const size_t num_edges, bool& weighted, bool has_init_cluster, IdxType* init_cluster, // size = n_vertex ValType& final_modularity, IdxType* cluster_vec, // size = n_vertex IdxType& num_level, std::ostream& log = std::cout){ #ifndef ENABLE_LOG log.setstate(std::ios_base::failbit); #endif num_level = 0; cusparseHandle_t cusp_handle; cusparseCreate(&cusp_handle); int n_edges = num_edges; int n_vertex = num_vertex; thrust::device_vector<IdxType> csr_ptr_d(csr_ptr, csr_ptr + n_vertex + 1); thrust::device_vector<IdxType> csr_ind_d(csr_ind, csr_ind + n_edges); thrust::device_vector<ValType> csr_val_d(csr_val, csr_val + n_edges); //std::vector<IdxType> clustering(n_vertex); thrust::device_vector<IdxType> clustering(n_vertex); int upper_bound = 100; HighResClock hr_clock; double timed, diff_time; //size_t mem_tot= 0; //size_t mem_free = 0; int c_size(n_vertex); unsigned int best_c_size = (unsigned) n_vertex; unsigned current_n_vertex(n_vertex); int num_aggregates(n_edges); ValType m2 = thrust::reduce(thrust::cuda::par, csr_val_d.begin(), csr_val_d.begin() + n_edges); ValType best_modularity = -1; thrust::device_vector<IdxType> new_csr_ptr(n_vertex, 0); thrust::device_vector<IdxType> new_csr_ind(n_edges, 0); thrust::device_vector<ValType> new_csr_val(n_edges, 0); thrust::device_vector<IdxType> cluster_d(n_vertex); thrust::device_vector<IdxType> aggregates_tmp_d(n_vertex, 0); thrust::device_vector<IdxType> cluster_inv_ptr(c_size + 1, 0); thrust::device_vector<IdxType> cluster_inv_ind(n_vertex, 0); thrust::device_vector<ValType> k_vec(n_vertex, 0); thrust::device_vector<ValType> Q_arr(n_vertex, 0); thrust::device_vector<ValType> delta_Q_arr(n_edges, 0); thrust::device_vector<ValType> cluster_sum_vec(c_size, 0); thrust::host_vector<IdxType> best_cluster_h(n_vertex, 0); Vector<IdxType> aggregates((int) current_n_vertex, 0); IdxType* cluster_inv_ptr_ptr = thrust::raw_pointer_cast(cluster_inv_ptr.data()); IdxType* cluster_inv_ind_ptr = thrust::raw_pointer_cast(cluster_inv_ind.data()); IdxType* csr_ptr_ptr = thrust::raw_pointer_cast(csr_ptr_d.data()); IdxType* csr_ind_ptr = thrust::raw_pointer_cast(csr_ind_d.data()); ValType* csr_val_ptr = thrust::raw_pointer_cast(csr_val_d.data()); IdxType* cluster_ptr = thrust::raw_pointer_cast(cluster_d.data()); if(!has_init_cluster){ // if there is no initialized cluster // the cluster as assigned as a sequence (a cluster for each vertex) // inv_clusters will also be 2 sequence thrust::sequence(thrust::cuda::par, cluster_d.begin(), cluster_d.end()); thrust::sequence(thrust::cuda::par, cluster_inv_ptr.begin(), cluster_inv_ptr.end()); thrust::sequence(thrust::cuda::par, cluster_inv_ind.begin(), cluster_inv_ind.end()); } else{ // assign initialized cluster to cluster_d device vector // generate inverse cluster in CSR formate if(init_cluster == nullptr){ final_modularity = -1; return NVLOUVAIN_ERR_BAD_PARAMETERS; } thrust::copy(init_cluster, init_cluster + n_vertex , cluster_d.begin()); generate_cluster_inv(current_n_vertex, c_size, cluster_d.begin(), cluster_inv_ptr, cluster_inv_ind); } dim3 block_size_1d((n_vertex + BLOCK_SIZE_1D -1)/ BLOCK_SIZE_1D, 1, 1); dim3 grid_size_1d(BLOCK_SIZE_1D, 1, 1); dim3 block_size_2d((n_vertex + BLOCK_SIZE_2D -1)/ BLOCK_SIZE_2D, (n_vertex + BLOCK_SIZE_2D -1)/ BLOCK_SIZE_2D, 1); dim3 grid_size_2d(BLOCK_SIZE_2D, BLOCK_SIZE_2D, 1); ValType* k_vec_ptr = thrust::raw_pointer_cast(k_vec.data()); ValType* Q_arr_ptr = thrust::raw_pointer_cast(Q_arr.data()); ValType* cluster_sum_vec_ptr = thrust::raw_pointer_cast(cluster_sum_vec.data()); ValType* delta_Q_arr_ptr = thrust::raw_pointer_cast(delta_Q_arr.data()); ValType new_Q, cur_Q, delta_Q, delta_Q_final; unsigned old_c_size(c_size); bool updated = true; hr_clock.start(); // Get the initialized modularity new_Q = modularity( n_vertex, n_edges, c_size, m2, csr_ptr_ptr, csr_ind_ptr, csr_val_ptr, cluster_ptr, cluster_inv_ptr_ptr, cluster_inv_ind_ptr, weighted, k_vec_ptr, Q_arr_ptr, delta_Q_arr_ptr); // delta_Q_arr_ptr is temp_i hr_clock.stop(&timed); diff_time = timed; LOG()<<"Initial modularity value: "<<COLOR_MGT<<new_Q<<COLOR_WHT<<" runtime: "<<diff_time/1000<<"\n"; bool contin(true); int bound = 0; int except = 3; do{ bound = 0; block_size_1d = dim3((current_n_vertex + BLOCK_SIZE_1D -1)/ BLOCK_SIZE_1D, 1, 1); grid_size_1d = dim3(BLOCK_SIZE_1D, 1, 1); cur_Q = new_Q; old_c_size = c_size; #ifdef VERBOSE LOG()<<"Current cluster inv: \n"; nvlouvain::display_vec(cluster_inv_ptr, log); nvlouvain::display_vec(cluster_inv_ind, log); #endif hr_clock.start(); // Compute delta modularity for each edges build_delta_modularity_vector(cusp_handle, current_n_vertex, c_size, m2, updated, csr_ptr_d, csr_ind_d, csr_val_d, cluster_d, cluster_inv_ptr_ptr, cluster_inv_ind_ptr, k_vec_ptr, cluster_sum_vec_ptr, delta_Q_arr_ptr); //display_vec(delta_Q_arr); hr_clock.stop(&timed); diff_time = timed; LOG()<<"Complete build_delta_modularity_vector runtime: "<<diff_time/1000<<"\n"; //LOG()<<"Initial modularity value: "<<COLOR_MGT<<new_Q<<COLOR_WHT<<" runtime: "<<diff_time/1000<<"\n"; // Start aggregates Matching_t config = nvlouvain::USER_PROVIDED; //Size2Selector<IdxType, ValType> size2_sector(config, 0, 50, 0.6, true, false, 0); int agg_deterministic = 1; int agg_max_iterations = 25; ValType agg_numUnassigned_tol = 0.85; bool agg_two_phase = false; bool agg_merge_singletons = true; if (current_n_vertex<8) { agg_merge_singletons = false; //agg_max_iterations = 4; } Size2Selector<IdxType, ValType> size2_sector(config, agg_deterministic, agg_max_iterations, agg_numUnassigned_tol, agg_two_phase, agg_merge_singletons, 0); //hollywood-2009 0.5 #ifdef DEBUG if((unsigned)cluster_d.size()!= current_n_vertex) //LOG()<<"Error cluster_d.size()!= current_n_verte:qx"<< cluster_d.size() <<" != "<< current_n_vertex <<"\n"; #endif #ifdef VERBOSE //LOG()<<"n_vertex: "<< csr_ptr_d.size()<<" "<<csr_ind_d.size()<< " " << csr_val_d.size()<<" a_size: "<<aggregates.size()<<std::endl; #endif hr_clock.start(); size2_sector.setAggregates(cusp_handle, current_n_vertex, n_edges, csr_ptr_ptr, csr_ind_ptr, csr_val_ptr , aggregates, num_aggregates); CUDA_CALL(cudaDeviceSynchronize()); hr_clock.stop(&timed); diff_time = timed; LOG()<<"Complete aggregation size: "<< num_aggregates<<" runtime: "<<diff_time/1000<<std::endl; // Done aggregates c_size = num_aggregates; thrust::copy(thrust::device, aggregates.begin(), aggregates.begin() + current_n_vertex, cluster_d.begin()); weighted = true; // start update modularty hr_clock.start(); CUDA_CALL(cudaDeviceSynchronize()); generate_cluster_inv(current_n_vertex, c_size, cluster_d.begin(), cluster_inv_ptr, cluster_inv_ind); CUDA_CALL(cudaDeviceSynchronize()); hr_clock.stop(&timed); diff_time = timed; LOG()<<"Complete generate_cluster_inv runtime: "<<diff_time/1000<<std::endl; #ifdef VERBOSE display_vec(cluster_inv_ptr, log); display_vec(cluster_inv_ind, log); #endif hr_clock.start(); new_Q = modularity(current_n_vertex, n_edges, c_size, m2, csr_ptr_ptr, csr_ind_ptr, csr_val_ptr, cluster_ptr, cluster_inv_ptr_ptr, cluster_inv_ind_ptr, weighted, k_vec_ptr, Q_arr_ptr, delta_Q_arr_ptr); //delta_Q_arr_ptr is temp_i and Q_arr is also temp store hr_clock.stop(&timed); diff_time = timed; // Done update modularity delta_Q = new_Q - cur_Q; if(best_modularity < new_Q){ best_c_size = c_size; } LOG()<<"modularity: "<<COLOR_MGT<<new_Q<<COLOR_WHT <<" delta modularity: " <<delta_Q <<" best_modularity:"<< min(best_modularity, new_Q) <<" moved: "<< (old_c_size - best_c_size) <<" runtime: "<<diff_time/1000<<std::endl; // start shinking graph if(best_modularity < new_Q ){ LOG()<< "Start Update best cluster\n"; updated = true; num_level ++; thrust::copy(thrust::device, cluster_d.begin(), cluster_d.begin() + current_n_vertex, aggregates_tmp_d.begin()); // if we would like to record the best cluster assignment for each level // we push back current cluster assignment to cluster_vec //TODO best_modularity = new_Q; best_c_size = c_size; hr_clock.start(); // generate super vertices graph generate_superverticies_graph(current_n_vertex, best_c_size, csr_ptr_d, csr_ind_d, csr_val_d, new_csr_ptr, new_csr_ind, new_csr_val, aggregates_tmp_d); CUDA_CALL(cudaDeviceSynchronize()); if(current_n_vertex == num_vertex){ // copy inital aggregates assignments as initial clustering thrust::copy(thrust::device, aggregates_tmp_d.begin(), aggregates_tmp_d.begin() + current_n_vertex, clustering.begin()); } else{ // update, clustering[i] = aggregates[clustering[i]]; update_clustering((int)num_vertex, thrust::raw_pointer_cast(clustering.data()), thrust::raw_pointer_cast(aggregates_tmp_d.data())); } hr_clock.stop(&timed); diff_time = timed; LOG() <<"Complete generate_superverticies_graph size of graph: "<<current_n_vertex<<" -> "<<best_c_size<<" runtime: "<<diff_time/1000<<std::endl; // update cluster_d as a sequence thrust::sequence(thrust::cuda::par, cluster_d.begin(), cluster_d.begin() + current_n_vertex); cudaCheckError(); // generate cluster inv in CSR form as sequence thrust::sequence(thrust::cuda::par, cluster_inv_ptr.begin(), cluster_inv_ptr.begin() + best_c_size+1); thrust::sequence(thrust::cuda::par, cluster_inv_ind.begin(), cluster_inv_ind.begin() + best_c_size); cluster_inv_ptr_ptr = thrust::raw_pointer_cast(cluster_inv_ptr.data()); cluster_inv_ind_ptr = thrust::raw_pointer_cast(cluster_inv_ind.data()); //display_vec(cluster_inv_ind, log); hr_clock.start(); // get new modularity after we generate super vertices. IdxType* new_csr_ptr_ptr = thrust::raw_pointer_cast(new_csr_ptr.data()); IdxType* new_csr_ind_ptr = thrust::raw_pointer_cast(new_csr_ind.data()); ValType* new_csr_val_ptr = thrust::raw_pointer_cast(new_csr_val.data()); new_Q = modularity( best_c_size, n_edges, best_c_size, m2, new_csr_ptr_ptr, new_csr_ind_ptr, new_csr_val_ptr, cluster_ptr, cluster_inv_ptr_ptr, cluster_inv_ind_ptr, weighted, k_vec_ptr, Q_arr_ptr, delta_Q_arr_ptr); hr_clock.stop(&timed); diff_time = timed; // modularity keeps the same after we generate super vertices // shouldn't happen if(std::fabs(new_Q - best_modularity) > 0.0001){ printf("Warning new_Q != best_Q %f != %f \n", new_Q, best_modularity); #if 0 printf("best_c_size = %d\n", best_c_size); std::ofstream ouf("./log/Error_"+time_now()+".log"); display_vec(aggregates_tmp_d, ouf); ouf<<"Error new_Q != best_Q "<< new_Q<<" != "<< best_modularity<<"\n"; ouf<<"old graph with size = "<<current_n_vertex<< "\n"; display_vec(csr_ptr_d, ouf); display_vec(csr_ind_d, ouf); display_vec(csr_val_d, ouf); ouf<<"new graph \n"; display_vec(new_csr_ptr, ouf); display_vec(new_csr_ind, ouf); display_vec(new_csr_val, ouf); generate_cluster_inv(current_n_vertex, c_size, aggregates_tmp_d.begin(), cluster_inv_ptr, cluster_inv_ind); ValType Q = modularity( current_n_vertex, n_edges, c_size, m2, csr_ptr_d, csr_ind_d, csr_val_d, cluster_d, cluster_inv_ptr, cluster_inv_ind, weighted, k_vec_ptr, Q_arr_ptr, delta_Q_arr_ptr); // delta_Q_arr_ptr is temp_i CUDA_CALL(cudaDeviceSynchronize()); LOG()<<Q<<std::endl; ouf<<"non block Q recompute "<< Q<<std::endl; display_vec(Q_arr, ouf); display_vec(delta_Q_arr, ouf); ouf.close(); #endif } LOG()<<"Update vectors and variables\n"; if(cur_Q - new_Q && (bound < upper_bound)){ current_n_vertex = best_c_size; n_edges = new_csr_ptr[ best_c_size ]; thrust::copy(thrust::device, new_csr_ptr.begin(), new_csr_ptr.begin() + current_n_vertex + 1, csr_ptr_d.begin()); thrust::copy(thrust::device, new_csr_ind.begin(), new_csr_ind.begin() + n_edges, csr_ind_d.begin()); thrust::copy(thrust::device, new_csr_val.begin(), new_csr_val.begin() + n_edges, csr_val_d.begin()); } //cudaMemGetInfo(&mem_free, &mem_tot); //std::cout<<"Mem usage : "<< (float)(mem_tot-mem_free)/(1<<30) <<std::endl; }else { LOG()<<"Didn't increase in modularity\n"; updated = false; except --; } // end better delta_Q_final = cur_Q - new_Q; contin = ((delta_Q_final > 0.0001 || except >0) && (bound < upper_bound)); LOG()<<"======================= modularity: "<<COLOR_MGT<<new_Q<<COLOR_WHT<<" delta modularity: "<<delta_Q_final << " runtime: "<<diff_time/1000<<" best_c_size: "<<best_c_size <<std::endl; ++bound; } while(contin); #ifdef VERBOSE display_vec(cluster_d); display_vec(csr_ptr_d); display_vec(csr_ind_d); display_vec(csr_val_d); #endif //LOG()<<"Final modularity: "<<COLOR_MGT<<best_modularity<<COLOR_WHT<<std::endl; log.clear(); final_modularity = best_modularity; cudaMemcpy ( cluster_vec, thrust::raw_pointer_cast(clustering.data()), n_vertex*sizeof(int), cudaMemcpyDefault ); return NVLOUVAIN_OK; } template<typename IdxType=int, typename ValType> NVLOUVAIN_STATUS louvain(IdxType* csr_ptr, IdxType* csr_ind, ValType* csr_val, const size_t num_vertex, const size_t num_edges, bool& weighted, bool has_init_cluster, IdxType* init_cluster, // size = n_vertex ValType& final_modularity, std::vector< std::vector<int> >& cluster_vec, // std::vector< IdxType* >& cluster_vec, IdxType& num_level, std::ostream& log = std::cout){ #ifndef ENABLE_LOG log.setstate(std::ios_base::failbit); #endif num_level = 0; cusparseHandle_t cusp_handle; cusparseCreate(&cusp_handle); int n_edges = num_edges; int n_vertex = num_vertex; thrust::device_vector<IdxType> csr_ptr_d(csr_ptr, csr_ptr + n_vertex + 1); thrust::device_vector<IdxType> csr_ind_d(csr_ind, csr_ind + n_edges); thrust::device_vector<ValType> csr_val_d(csr_val, csr_val + n_edges); int upper_bound = 100; HighResClock hr_clock; double timed, diff_time; int c_size(n_vertex); unsigned int best_c_size = (unsigned) n_vertex; int current_n_vertex(n_vertex); int num_aggregates(n_edges); ValType m2 = thrust::reduce(thrust::cuda::par, csr_val_d.begin(), csr_val_d.begin() + n_edges); ValType best_modularity = -1; thrust::device_vector<IdxType> new_csr_ptr(n_vertex, 0); thrust::device_vector<IdxType> new_csr_ind(n_edges, 0); thrust::device_vector<ValType> new_csr_val(n_edges, 0); thrust::device_vector<IdxType> cluster_d(n_vertex); thrust::device_vector<IdxType> aggregates_tmp_d(n_vertex, 0); thrust::device_vector<IdxType> cluster_inv_ptr(c_size + 1, 0); thrust::device_vector<IdxType> cluster_inv_ind(n_vertex, 0); thrust::device_vector<ValType> k_vec(n_vertex, 0); thrust::device_vector<ValType> Q_arr(n_vertex, 0); thrust::device_vector<ValType> delta_Q_arr(n_edges, 0); thrust::device_vector<ValType> cluster_sum_vec(c_size, 0); std::vector<IdxType> best_cluster_h(n_vertex, 0); Vector<IdxType> aggregates(current_n_vertex, 0); IdxType* cluster_inv_ptr_ptr = thrust::raw_pointer_cast(cluster_inv_ptr.data()); IdxType* cluster_inv_ind_ptr = thrust::raw_pointer_cast(cluster_inv_ind.data()); IdxType* csr_ptr_ptr = thrust::raw_pointer_cast(csr_ptr_d.data()); IdxType* csr_ind_ptr = thrust::raw_pointer_cast(csr_ind_d.data()); ValType* csr_val_ptr = thrust::raw_pointer_cast(csr_val_d.data()); IdxType* cluster_ptr = thrust::raw_pointer_cast(cluster_d.data()); if(!has_init_cluster){ // if there is no initialized cluster // the cluster as assigned as a sequence (a cluster for each vertex) // inv_clusters will also be 2 sequence thrust::sequence(thrust::cuda::par, cluster_d.begin(), cluster_d.end()); thrust::sequence(thrust::cuda::par, cluster_inv_ptr.begin(), cluster_inv_ptr.end()); thrust::sequence(thrust::cuda::par, cluster_inv_ind.begin(), cluster_inv_ind.end()); } else{ // assign initialized cluster to cluster_d device vector // generate inverse cluster in CSR formate if(init_cluster == nullptr){ final_modularity = -1; return NVLOUVAIN_ERR_BAD_PARAMETERS; } thrust::copy(init_cluster, init_cluster + n_vertex , cluster_d.begin()); generate_cluster_inv(current_n_vertex, c_size, cluster_d.begin(), cluster_inv_ptr, cluster_inv_ind); } dim3 block_size_1d((n_vertex + BLOCK_SIZE_1D -1)/ BLOCK_SIZE_1D, 1, 1); dim3 grid_size_1d(BLOCK_SIZE_1D, 1, 1); dim3 block_size_2d((n_vertex + BLOCK_SIZE_2D -1)/ BLOCK_SIZE_2D, (n_vertex + BLOCK_SIZE_2D -1)/ BLOCK_SIZE_2D, 1); dim3 grid_size_2d(BLOCK_SIZE_2D, BLOCK_SIZE_2D, 1); ValType* k_vec_ptr = thrust::raw_pointer_cast(k_vec.data()); ValType* Q_arr_ptr = thrust::raw_pointer_cast(Q_arr.data()); ValType* cluster_sum_vec_ptr = thrust::raw_pointer_cast(cluster_sum_vec.data()); ValType* delta_Q_arr_ptr = thrust::raw_pointer_cast(delta_Q_arr.data()); ValType new_Q, cur_Q, delta_Q, delta_Q_final; unsigned old_c_size(c_size); bool updated = true; hr_clock.start(); // Get the initialized modularity new_Q = modularity( n_vertex, n_edges, c_size, m2, csr_ptr_ptr, csr_ind_ptr, csr_val_ptr, cluster_ptr, cluster_inv_ptr_ptr, cluster_inv_ind_ptr, weighted, k_vec_ptr, Q_arr_ptr, delta_Q_arr_ptr); // delta_Q_arr_ptr is temp_i hr_clock.stop(&timed); diff_time = timed; LOG()<<"Initial modularity value: "<<COLOR_MGT<<new_Q<<COLOR_WHT<<" runtime: "<<diff_time/1000<<"\n"; bool contin(true); int bound = 0; int except = 3; do{ bound = 0; block_size_1d = dim3((current_n_vertex + BLOCK_SIZE_1D -1)/ BLOCK_SIZE_1D, 1, 1); grid_size_1d = dim3(BLOCK_SIZE_1D, 1, 1); cur_Q = new_Q; old_c_size = c_size; #ifdef VERBOSE LOG()<<"Current cluster inv: \n"; nvlouvain::display_vec(cluster_inv_ptr, log); nvlouvain::display_vec(cluster_inv_ind, log); #endif hr_clock.start(); // Compute delta modularity for each edges build_delta_modularity_vector(cusp_handle, current_n_vertex, c_size, m2, updated, csr_ptr_d, csr_ind_d, csr_val_d, cluster_d, cluster_inv_ptr_ptr, cluster_inv_ind_ptr, k_vec_ptr, cluster_sum_vec_ptr, delta_Q_arr_ptr); //display_vec(delta_Q_arr); hr_clock.stop(&timed); diff_time = timed; LOG()<<"Complete build_delta_modularity_vector runtime: "<<diff_time/1000<<"\n"; // Start aggregates Matching_t config = nvlouvain::USER_PROVIDED; // Size2Selector<IdxType, ValType> size2_sector(config, 0, 50, 0.6, true, false, 0); Size2Selector<IdxType, ValType> size2_sector(config, 1, 25, 0.85, false, true, 0); //hollywood-2009 0.5 #ifdef DEBUG if((unsigned)cluster_d.size()!= current_n_vertex) //LOG()<<"Error cluster_d.size()!= current_n_verte:qx"<< cluster_d.size() <<" != "<< current_n_vertex <<"\n"; #endif #ifdef VERBOSE //LOG()<<"n_vertex: "<< csr_ptr_d.size()<<" "<<csr_ind_d.size()<< " " << csr_val_d.size()<<" a_size: "<<aggregates.size()<<std::endl; #endif hr_clock.start(); size2_sector.setAggregates(cusp_handle, current_n_vertex, n_edges, csr_ptr_ptr, csr_ind_ptr, csr_val_ptr , aggregates, num_aggregates); CUDA_CALL(cudaDeviceSynchronize()); hr_clock.stop(&timed); diff_time = timed; LOG()<<"Complete aggregation size: "<< num_aggregates<<" runtime: "<<diff_time/1000<<std::endl; // Done aggregates c_size = num_aggregates; thrust::copy(thrust::device, aggregates.begin(), aggregates.begin() + current_n_vertex, cluster_d.begin()); weighted = true; // start update modularty hr_clock.start(); CUDA_CALL(cudaDeviceSynchronize()); generate_cluster_inv(current_n_vertex, c_size, cluster_d.begin(), cluster_inv_ptr, cluster_inv_ind); CUDA_CALL(cudaDeviceSynchronize()); hr_clock.stop(&timed); diff_time = timed; LOG()<<"Complete generate_cluster_inv runtime: "<<diff_time/1000<<std::endl; #ifdef VERBOSE display_vec(cluster_inv_ptr, log); display_vec(cluster_inv_ind, log); #endif hr_clock.start(); new_Q = modularity(current_n_vertex, n_edges, c_size, m2, csr_ptr_ptr, csr_ind_ptr, csr_val_ptr, cluster_ptr, cluster_inv_ptr_ptr, cluster_inv_ind_ptr, weighted, k_vec_ptr, Q_arr_ptr, delta_Q_arr_ptr); //delta_Q_arr_ptr is temp_i and Q_arr is also temp store hr_clock.stop(&timed); diff_time = timed; // Done update modularity delta_Q = new_Q - cur_Q; if(best_modularity < new_Q){ best_c_size = c_size; } LOG()<<"modularity: "<<COLOR_MGT<<new_Q<<COLOR_WHT <<" delta modularity: " <<delta_Q <<" best_modularity:"<< min(best_modularity, new_Q) <<" moved: "<< (old_c_size - best_c_size) <<" runtime: "<<diff_time/1000<<std::endl; // start shinking graph if(best_modularity < new_Q ){ LOG()<< "Start Update best cluster\n"; updated = true; num_level ++; thrust::copy(thrust::device, cluster_d.begin(), cluster_d.begin() + current_n_vertex, aggregates_tmp_d.begin()); // if we would like to record the best cluster assignment for each level // we push back current cluster assignment to cluster_vec best_cluster_h.resize(current_n_vertex); thrust::copy( cluster_d.begin(), cluster_d.begin() + current_n_vertex, best_cluster_h.begin()); cudaCheckError(); cluster_vec.push_back(best_cluster_h); best_modularity = new_Q; best_c_size = c_size; hr_clock.start(); // generate super vertices graph generate_superverticies_graph(current_n_vertex, best_c_size, csr_ptr_d, csr_ind_d, csr_val_d, new_csr_ptr, new_csr_ind, new_csr_val, aggregates_tmp_d); CUDA_CALL(cudaDeviceSynchronize()); hr_clock.stop(&timed); diff_time = timed; LOG() <<"Complete generate_superverticies_graph size of graph: "<<current_n_vertex<<" -> "<<best_c_size<<" runtime: "<<diff_time/1000<<std::endl; // update cluster_d as a sequence thrust::sequence(thrust::cuda::par, cluster_d.begin(), cluster_d.begin() + current_n_vertex); cudaCheckError(); // generate cluster inv in CSR form as sequence thrust::sequence(thrust::cuda::par, cluster_inv_ptr.begin(), cluster_inv_ptr.begin() + best_c_size+1); thrust::sequence(thrust::cuda::par, cluster_inv_ind.begin(), cluster_inv_ind.begin() + best_c_size); cluster_inv_ptr_ptr = thrust::raw_pointer_cast(cluster_inv_ptr.data()); cluster_inv_ind_ptr = thrust::raw_pointer_cast(cluster_inv_ind.data()); hr_clock.start(); // get new modularity after we generate super vertices. IdxType* new_csr_ptr_ptr = thrust::raw_pointer_cast(new_csr_ptr.data()); IdxType* new_csr_ind_ptr = thrust::raw_pointer_cast(new_csr_ind.data()); ValType* new_csr_val_ptr = thrust::raw_pointer_cast(new_csr_val.data()); new_Q = modularity( best_c_size, n_edges, best_c_size, m2, new_csr_ptr_ptr, new_csr_ind_ptr, new_csr_val_ptr, cluster_ptr, cluster_inv_ptr_ptr, cluster_inv_ind_ptr, weighted, k_vec_ptr, Q_arr_ptr, delta_Q_arr_ptr); hr_clock.stop(&timed); diff_time = timed; // modularity keeps the same after we generate super vertices // shouldn't happen if(std::fabs(new_Q - best_modularity) > 0.0001){ printf("Warning new_Q != best_Q %f != %f \n", new_Q, best_modularity); #if 0 printf("best_c_size = %d\n", best_c_size); std::ofstream ouf("./log/Error_"+time_now()+".log"); display_vec(aggregates_tmp_d, ouf); ouf<<"Error new_Q != best_Q "<< new_Q<<" != "<< best_modularity<<"\n"; ouf<<"old graph with size = "<<current_n_vertex<< "\n"; display_vec(csr_ptr_d, ouf); display_vec(csr_ind_d, ouf); display_vec(csr_val_d, ouf); ouf<<"new graph \n"; display_vec(new_csr_ptr, ouf); display_vec(new_csr_ind, ouf); display_vec(new_csr_val, ouf); generate_cluster_inv(current_n_vertex, c_size, aggregates_tmp_d.begin(), cluster_inv_ptr, cluster_inv_ind); ValType Q = modularity( current_n_vertex, n_edges, c_size, m2, csr_ptr_d, csr_ind_d, csr_val_d, cluster_d, cluster_inv_ptr, cluster_inv_ind, weighted, k_vec_ptr, Q_arr_ptr, delta_Q_arr_ptr); // delta_Q_arr_ptr is temp_i CUDA_CALL(cudaDeviceSynchronize()); LOG()<<Q<<std::endl; ouf<<"non block Q recompute "<< Q<<std::endl; display_vec(Q_arr, ouf); display_vec(delta_Q_arr, ouf); ouf.close(); #endif } LOG()<<"Update vectors and variables\n"; if(cur_Q - new_Q && (bound < upper_bound)){ current_n_vertex = best_c_size; n_edges = new_csr_ptr[ best_c_size ]; thrust::copy(thrust::device, new_csr_ptr.begin(), new_csr_ptr.begin() + current_n_vertex + 1, csr_ptr_d.begin()); thrust::copy(thrust::device, new_csr_ind.begin(), new_csr_ind.begin() + n_edges, csr_ind_d.begin()); thrust::copy(thrust::device, new_csr_val.begin(), new_csr_val.begin() + n_edges, csr_val_d.begin()); } }else { LOG()<<"Didn't increase in modularity\n"; updated = false; except --; } // end better delta_Q_final = cur_Q - new_Q; contin = ((delta_Q_final > 0.0001 || except >0) && (bound < upper_bound)); LOG()<<"======================= modularity: "<<COLOR_MGT<<new_Q<<COLOR_WHT<<" delta modularity: "<<delta_Q_final << " runtime: "<<diff_time/1000<<" best_c_size: "<<best_c_size <<std::endl; ++bound; }while(contin); #ifdef VERBOSE display_vec(cluster_d); display_vec(csr_ptr_d); display_vec(csr_ind_d); display_vec(csr_val_d); #endif //LOG()<<"Final modularity: "<<COLOR_MGT<<best_modularity<<COLOR_WHT<<std::endl; log.clear(); final_modularity = best_modularity; return NVLOUVAIN_OK; } }
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/include/arnoldi.hxx
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <vector> namespace nvgraph { template <typename IndexType_, typename ValueType_> class ImplicitArnoldi { public: typedef IndexType_ IndexType; typedef ValueType_ ValueType; private: //Arnoldi ValuedCsrGraph <IndexType, ValueType> m_A ;//device std::vector<ValueType*> m_Vi; // Host vector of device adresses -> no it is a 2D vect Vector<ValueType> m_V; // Each colum is a vector of size n, colum major storage Vector<ValueType> m_Q_d; // Device version of Q (Qt) Vector<ValueType> m_V_tmp; // Output of V*Q <=> QtVt Vector<ValueType> m_ritz_eigenvectors_d; Vector<ValueType> m_eigenvectors; std::vector<ValueType> m_H; //host std::vector<ValueType> m_H_select; //host std::vector<ValueType> m_H_tmp; //host (lapack likes to overwrite input) std::vector<ValueType> m_ritz_eigenvalues; //host std::vector<ValueType> m_ritz_eigenvalues_i; //host std::vector<ValueType> m_shifts; //host std::vector<ValueType> m_ritz_eigenvectors;//host std::vector<ValueType> m_Q; //host std::vector<ValueType> m_Q_tmp; //host (lapack likes to overwrite input) std::vector<ValueType> m_mns_residuals; //host resuals of subspaces std::vector<ValueType> m_mns_beta; //host resuals of subspaces Vector <ValueType> m_a; // Markov Vector <ValueType> m_b; // Markov Vector <ValueType> m_D; // Laplacian ValueType m_beta; // from arnoldi projection algorithm ValueType m_residual; // is set by compute_residual() ValueType m_damping; // for Markov and Pagerank float m_tolerance; int m_nr_eigenvalues; // the number of wanted eigenvals, also called k in the litterature int m_n_eigenvalues; // the number of eigenvals we keep in the solver, this greater or equal to k, this can be m_nr_eigenvalues or m_nr_eigenvalues+1 int m_krylov_size; // the maximum size of the krylov sobspace, also called m in the litterature (m=k+p) int m_iterations; // a counter of restart, each restart cost m_krylov_size-m_n_eigenvalues arnoldi iterations (~spmv) int m_max_iter; // maximum number of iterations int m_parts; // laplacian related //miramns related ints int m_nested_subspaces; // the number of subspace to evaluate in MIRAMns int m_nested_subspaces_freq; // the frequence at which we should evaluate subspaces in MIRAMns int m_select; // best subspace size int m_select_idx; // best subspace number (0 indexed) int m_safety_lower_bound; // The smallest subspace to check is m_safety_lower_bound+m_nr_eigenvalues+1 bool m_converged; bool m_is_setup; bool m_has_guess; bool m_markov; bool m_miramns; bool m_dirty_bit; // to know if H has changed, so if we need to call geev bool m_laplacian; bool has_init_guess; // Warning : here an iteration is a restart bool solve_it(); // Input: A V[0] // Output: V, H, f(=V[m_krylov_size]) bool solve_arnoldi(int lower_bound, int upper_bound); // Input: H - a real square upper Hessenberg matrix // Output: w - eigenvalues of H sorted according to which // most wanted to least wanted order // Optionally compute the eigenvalues of H void select_shifts(bool dirty_bit=false); // reorder eigenpairs by largest real part void LR(int subspace_sz); // reorder eigenpairs by largest magnitude void LM(int subspace_sz); // reorder eigenpairs by smallest real part void SR(int subspace_sz); // Input: Q -- a real square orthogonal matrix // H -- a real square upper Hessenberg matrix // mu -- a real shift // Output: Q+ -- a real orthogonal matrix // H+ -- a real square upper Hessenberg matrix // This step will "refine" the subspace by "pushing" the information // into the top left corner void qr_step(); // Update V and f using Q+ and H+ void refine_basis(); // Approximate residual of the largest Ritz pair of H // Optionally compute the eigenvalues of H void compute_residual(int subspace_size, bool dirty_bit=false); void compute_eigenvectors(); void select_subspace(); // extract H_select from H void extract_subspace(int m); // clean everything outside of the new_sz*new_sz hessenberg matrix (in colum major) void cleanup_subspace(std::vector<ValueType_>& v, int ld, int new_sz); // clean everything outside of the new_sz*new_sz hessenberg matrix (in colum major) void shift(std::vector<ValueType_>& H, int ld, int m, ValueType mu); public: // Simple constructor ImplicitArnoldi(void) {}; // Simple destructor ~ImplicitArnoldi(void) {}; // Create a ImplicitArnoldi Solver ImplicitArnoldi(const ValuedCsrGraph <IndexType, ValueType>& A); // Create a ImplicitArnoldi Solver with support of graph laplacian generation ImplicitArnoldi(const ValuedCsrGraph <IndexType, ValueType>& A, int parts); // Create a ImplicitArnoldi Solver with support of damping factor and rank one updates (pagerank, markov ...) ImplicitArnoldi(const ValuedCsrGraph <IndexType, ValueType>& A, Vector<ValueType>& dangling_nodes, const float tolerance, const int max_iter, ValueType alpha=0.95); void setup( Vector<ValueType>& initial_guess, const int restart_it, const int nEigVals); // public because we want to use and test that directly and/or separately // Starting from V, H, f : // Call the QRstep, project the update, launch the arnlodi with the new base // and check the quality of the new result void implicit_restart(); // public because we want to use and test that directly and/or separately // The total number of SPMV will be : m_krylov_size + (m_krylov_size-m_n_eigenvalues)*nb_restart NVGRAPH_ERROR solve(const int restart_it, const int nEigVals, Vector<ValueType>& initial_guess, Vector<ValueType>& eigVals, Vector<ValueType>& eigVecs, const int n_sub_space=0); inline ValueType get_residual() const {return m_residual;} inline int get_iterations() const {return m_iterations;} // we use that for tests, unoptimized copies/transfers inside std::vector<ValueType> get_H_copy() {return m_H;} std::vector<ValueType> get_Hs_copy() {return m_H_select;} std::vector<ValueType> get_ritz_eval_copy(){return m_ritz_eigenvalues;} // should be called after select_shifts std::vector<ValueType> get_V_copy(); std::vector<ValueType> get_f_copy(); std::vector<ValueType> get_fp_copy(); }; } // end namespace nvgraph
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/include/triangles_counting_defines.hxx
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cuda_runtime.h> #include <limits.h> #ifdef _MSC_VER #include <stdint.h> #else #include <inttypes.h> #endif /* #ifdef MSVC_VER #include <intrin.h> #pragma intrinsic(_BitScanForward) #pragma intrinsic(_BitScanForward64) #pragma intrinsic(_BitScanReverse) #pragma intrinsic(_BitScanReverse64) #endif */ #define MIN(x,y) (((x)<(y))?(x):(y)) #define MAX(x,y) (((x)>(y))?(x):(y)) #define THREADS (128) #define DIV_UP(a,b) (((a)+((b)-1))/(b)) #define BITSOF(x) (sizeof(*x)*8) #define BLK_BWL0 (128) #define WRP_BWL0 (128) #define HUGE_GRAPH #define DEG_THR1 (3.5) #define DEG_THR2 (38.0) namespace nvgraph { namespace triangles_counting { template <typename T> struct type_utils; template <> struct type_utils<int> { typedef int LOCINT; static const LOCINT LOCINT_MAX = INT_MAX; #ifdef MPI_VERSION static const MPI_Datatype LOCINT_MPI = MPI_INT; #endif static __inline__ LOCINT abs(const LOCINT& x) { return abs(x); } }; template <> struct type_utils<int64_t> { typedef uint64_t LOCINT; static const LOCINT LOCINT_MAX = LLONG_MAX; #ifdef MPI_VERSION static const MPI_Datatype LOCINT_MPI = MPI_LONG_LONG; #endif static __inline__ LOCINT abs(const LOCINT& x) { return llabs(x); } }; template <typename T> struct spmat_t { T N; T nnz; T nrows; const T *roff_d; const T *rows_d; const T *cols_d; bool is_lower_triangular; }; } // namespace triangles_counting } // namespace nvgraph
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/include/range_view.hxx
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <thrust/device_vector.h> #include <thrust/for_each.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/execution_policy.h> #include <iostream> #ifndef RANGE_VIEW_HXX #define RANGE_VIEW_HXX // This example demonstrates the use of a view: a non-owning wrapper for an // iterator range which presents a container-like interface to the user. // // For example, a view of a device_vector's data can be helpful when we wish to // access that data from a device function. Even though device_vectors are not // accessible from device functions, the range_view class allows us to access // and manipulate its data as if we were manipulating a real container. // // This example demonstrate use of range_view with for_each algorithm which is // dispatch from GPU // template<class Iterator> class range_view { public: typedef Iterator iterator; typedef typename thrust::iterator_traits<iterator>::value_type value_type; typedef typename thrust::iterator_traits<iterator>::pointer pointer; typedef typename thrust::iterator_traits<iterator>::difference_type difference_type; typedef typename thrust::iterator_traits<iterator>::reference reference; private: const iterator first; const iterator last; public: __host__ __device__ range_view(Iterator first, Iterator last) : first(first), last(last) {} __host__ __device__ ~range_view() {} __host__ __device__ difference_type size() const { return thrust::distance(first, last); } __host__ __device__ reference operator[](difference_type n) { return *(first + n); } __host__ __device__ const reference operator[](difference_type n) const { return *(first + n); } __host__ __device__ iterator begin() { return first; } __host__ __device__ const iterator cbegin() const { return first; } __host__ __device__ iterator end() { return last; } __host__ __device__ const iterator cend() const { return last; } __host__ __device__ thrust::reverse_iterator<iterator> rbegin() { return thrust::reverse_iterator<iterator>(end()); } __host__ __device__ const thrust::reverse_iterator<const iterator> crbegin() const { return thrust::reverse_iterator<const iterator>(cend()); } __host__ __device__ thrust::reverse_iterator<iterator> rend() { return thrust::reverse_iterator<iterator>(begin()); } __host__ __device__ const thrust::reverse_iterator<const iterator> crend() const { return thrust::reverse_iterator<const iterator>(cbegin()); } __host__ __device__ reference front() { return *begin(); } __host__ __device__ const reference front() const { return *cbegin(); } __host__ __device__ reference back() { return *end(); } __host__ __device__ const reference back() const { return *cend(); } __host__ __device__ bool empty() const { return size() == 0; } }; // This helper function creates a range_view from iterator and the number of // elements template <class Iterator, class Size> range_view<Iterator> __host__ __device__ make_range_view(Iterator first, Size n) { return range_view<Iterator>(first, first+n); } // This helper function creates a range_view from a pair of iterators template <class Iterator> range_view<Iterator> __host__ __device__ make_range_view(Iterator first, Iterator last) { return range_view<Iterator>(first, last); } // This helper function creates a range_view from a Vector template <class Vector> range_view<typename Vector::iterator> __host__ make_range_view(Vector& v) { return range_view<typename Vector::iterator>(v.begin(), v.end()); } #endif
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/include/stacktrace.h
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ //adapted from https://idlebox.net/2008/0901-stacktrace-demangled/ and licensed under WTFPL v2.0 #pragma once #if defined(_WIN32) || defined (__ANDROID__) || defined(ANDROID) || defined (__QNX__) || defined (__QNXNTO__) #else #include <execinfo.h> #include <dlfcn.h> #include <cxxabi.h> #include <unistd.h> #include <stdlib.h> #endif #include <stdio.h> #include <string> #include <sstream> #include <iostream> namespace nvgraph { /** Print a demangled stack backtrace of the caller function to FILE* out. */ static inline void printStackTrace(std::ostream &eout = std::cerr, unsigned int max_frames = 63) { #if defined(_WIN32) || defined (__ANDROID__) || defined(ANDROID) || defined (__QNX__) || defined (__QNXNTO__) //TODO add code for windows stack trace and android stack trace #else std::stringstream out; // storage array for stack trace address data void* addrlist[max_frames+1]; // retrieve current stack addresses int addrlen = backtrace(addrlist, sizeof(addrlist) / sizeof(void*)); if (addrlen == 0) { out << " <empty, possibly corrupt>\n"; return; } // resolve addresses into strings containing "filename(function+address)", // this array must be free()-ed char** symbollist = backtrace_symbols(addrlist, addrlen); // allocate string which will be filled with the demangled function name size_t funcnamesize = 256; char* funcname = (char*)malloc(funcnamesize); // iterate over the returned symbol lines. skip the first, it is the // address of this function. for (int i = 1; i < addrlen; i++) { char *begin_name = 0, *begin_offset = 0, *end_offset = 0; // find parentheses and +address offset surrounding the mangled name: // ./module(function+0x15c) [0x8048a6d] for (char *p = symbollist[i]; *p; ++p) { if (*p == '(') begin_name = p; else if (*p == '+') begin_offset = p; else if (*p == ')' && begin_offset) { end_offset = p; break; } } if (begin_name && begin_offset && end_offset && begin_name < begin_offset) { *begin_name++ = '\0'; *begin_offset++ = '\0'; *end_offset = '\0'; // mangled name is now in [begin_name, begin_offset) and caller // offset in [begin_offset, end_offset). now apply // __cxa_demangle(): int status; char* ret = abi::__cxa_demangle(begin_name, funcname, &funcnamesize, &status); if (status == 0) { funcname = ret; // use possibly realloc()-ed string out << " " << symbollist[i] << " : " << funcname << "+" << begin_offset << "\n"; } else { // demangling failed. Output function name as a C function with // no arguments. out << " " << symbollist[i] << " : " << begin_name << "()+" << begin_offset << "\n"; } } else { // couldn't parse the line? print the whole line. out << " " << symbollist[i] << "\n"; } } eout << out.str(); //error_output(out.str().c_str(),out.str().size()); free(funcname); free(symbollist); //printf("PID of failing process: %d\n",getpid()); //while(1); #endif } } //end namespace nvgraph
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/include/graph_contracting_visitor.hxx
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef GRAPH_CONTRACTING_VISITOR_HXX #define GRAPH_CONTRACTING_VISITOR_HXX // // #include <multi_valued_csr_graph.hxx> //which includes all other headers... #include <range_view.hxx> // TODO: to be changed to thrust/range_view.h, when toolkit gets in sync with Thrust #include <thrust_traits.hxx> ///#include <graph_contracting_structs.hxx> #include <cassert> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/sequence.h> #include <thrust/binary_search.h> #include <thrust/sort.h>// #include <thrust/extrema.h> #include <thrust/pair.h> #include <thrust/distance.h>// #include <thrust/unique.h>// #include <cusp/array1d.h> #include <cusp/array2d.h> #include <cusp/functional.h> #include <cusp/multiply.h> #include <cusp/print.h> #include <cusp/transpose.h>// //debugging only: #include <cstdio> #define __CUDA_ARCH_THRESHOLD__ 300 ///#define __CUDA_ARCH_THRESHOLD__ 350 // namespace nvgraph { //SpMv + SpMM + SpMM: // cntrctd_vertex_data = S*v(g_vertex_data); // cntrctd_edge_data = (S*G(g_edge_data)*St).values // //see GraphContractionFunctor::computeRestrictionOperator() for S matrix CSR data // template<typename VectorI, //vector type for indices typename VectorV, //vector type for values typename VWrapper, //wrapper type around raw pointer or other type of array wrapper typename VertexCombineFctr, //vertex "multiplication" functor type typename VertexReduceFctr, //vertex "addition" functor type typename EdgeCombineFctr, //edge "multiplication" functor type typename EdgeReduceFctr> //edge "addition" functor type struct SemiringContractionUtilities { typedef typename VectorI::value_type IndexT; typedef typename VectorV::value_type ValT; typedef typename VectorPtrT<typename VectorI::value_type,VectorI>::PtrT PtrI; typedef typename VectorPtrT<typename VectorV::value_type,VectorV>::PtrT PtrV; SemiringContractionUtilities(const VectorI& g_row_offsets, //original graph CSR const VectorI& g_col_indices, const VectorI& S_row_offsets, const VectorI& S_col_indices, VertexCombineFctr& v_combine, VertexReduceFctr& v_reduce, EdgeCombineFctr& e_combine, EdgeReduceFctr& e_reduce): m_g_row_offsets(g_row_offsets), m_g_col_indices(g_col_indices), m_v_combine(v_combine), m_v_reduce(v_reduce), m_e_combine(e_combine), m_e_reduce(e_reduce), m_n_agg(S_row_offsets.size()-1), m_g_nr(g_row_offsets.size()-1), // == S_nc m_g_nnz(g_row_offsets.back()), m_s_nnz(S_row_offsets.back()) { VectorV S_vals(m_s_nnz, 1); PtrV p_S_vals(S_vals.data().get()); VWrapper S_vals_w(p_S_vals, p_S_vals+S_vals.size()); //NOT necessarily square! m_S = make_csr_matrix(m_g_nr, S_row_offsets, S_col_indices, S_vals_w); m_St = cusp::csr_matrix<IndexT, ValT, cusp::device_memory>(m_g_nr, m_n_agg, m_s_nnz); cusp::transpose(m_S, m_St); cudaCheckError(); } virtual ~SemiringContractionUtilities(void) { } const VectorI& get_row_ptr(void) const { return m_cntrctd_row_offsets; } const VectorI& get_col_ind(void) const { return m_cntrctd_col_indices; } IndexT get_subg_nnz(void) const { return m_cntrctd_row_offsets.back(); } virtual void update_vertex_data(/*In: */const VWrapper& g_vertex_data,//multivalue vertex entry of original graph, size==g_nr /*Out:*/VWrapper& cntrctd_vertex_data)//multivalue vertex entry of contracted graph, size==n_agg==S_nr (assumed allocated!) { //SpMv: // assert( m_g_nr == g_vertex_data.size() ); cusp::array1d<ValT, cusp::device_memory> x(g_vertex_data.cbegin(), g_vertex_data.cend()); cusp::array1d<ValT, cusp::device_memory> y(m_n_agg,0); cusp::constant_functor<ValT> initialize; cusp::multiply(m_S, x, y, initialize, m_v_combine, m_v_reduce); cudaCheckError(); thrust::copy(y.begin(), y.end(), cntrctd_vertex_data.begin()); cudaCheckError(); } virtual void update_topology_only(void) { cudaCheckError(); //SpMM+SpMM: S*G*St // VectorV empty(m_g_nnz, 1);//0 => empty G matrix, use 1's as values PtrV ptr_e(&empty[0]); VWrapper g_edge_data(ptr_e, ptr_e+m_g_nnz); cudaCheckError(); cusp::csr_matrix<IndexT, ValT, cusp::device_memory> G = make_square_csr_matrix(m_g_row_offsets, m_g_col_indices, g_edge_data); cudaCheckError(); cusp::constant_functor<ValT> initialize; //L=S*G cusp::csr_matrix<IndexT, ValT, cusp::device_memory> L;//no need to allocate! cusp::multiply(m_S, G, L, initialize, m_e_combine, m_e_reduce); cudaCheckError(); //R = L*St cusp::csr_matrix<IndexT, ValT, cusp::device_memory> R;//no need to allocate! cusp::multiply(L, m_St, R, initialize, m_e_combine, m_e_reduce); cudaCheckError(); //##### debug: //std::cout<<"S:\n";cusp::print(m_S); //std::cout<<"R:\n";cusp::print(R); size_t r_sz = R.row_offsets.size(); assert( r_sz > 0 ); size_t cntrctd_nnz = R.row_offsets.back(); ///size_t cntrctd_nr = r_sz-1; //allocate cntrctd_csr_data: m_cntrctd_row_offsets = VectorI(r_sz, 0); m_cntrctd_col_indices = VectorI(cntrctd_nnz, 0); thrust::copy(R.row_offsets.begin(), R.row_offsets.end(), m_cntrctd_row_offsets.begin()); cudaCheckError(); thrust::copy(R.column_indices.begin(), R.column_indices.end(), m_cntrctd_col_indices.begin()); cudaCheckError(); } virtual void update_edge_data(/*In: */const VWrapper& g_edge_data, //multivalue edge entry of original graph, size==g_nnz /*Out:*/VWrapper& cntrctd_edge_data) //multivalue edge entry of contracted graph, size==nnz(S*G*St) (assumed allocated!) { //SpMM+SpMM: S*G*St // assert( m_g_nnz == g_edge_data.size() ); cusp::csr_matrix<IndexT, ValT, cusp::device_memory> G = make_square_csr_matrix(m_g_row_offsets, m_g_col_indices, g_edge_data); cudaCheckError(); cusp::constant_functor<ValT> initialize; cudaCheckError(); //L=S*G cusp::csr_matrix<IndexT, ValT, cusp::device_memory> L;//no need to allocate! cusp::multiply(m_S, G, L, initialize, m_e_combine, m_e_reduce); cudaCheckError(); //R = L*St //##### crash here: cusp::csr_matrix<IndexT, ValT, cusp::device_memory> R;//no need to allocate! cusp::multiply(L, m_St, R, initialize, m_e_combine, m_e_reduce); cudaCheckError(); size_t r_sz = R.row_offsets.size(); assert( r_sz > 0 ); size_t cntrctd_nnz = R.row_offsets.back(); ///size_t cntrctd_nr = r_sz-1; //allocate cntrctd_csr_data: m_cntrctd_row_offsets = VectorI(r_sz, 0); m_cntrctd_col_indices = VectorI(cntrctd_nnz, 0); thrust::copy(R.row_offsets.begin(), R.row_offsets.end(), m_cntrctd_row_offsets.begin()); cudaCheckError(); thrust::copy(R.column_indices.begin(), R.column_indices.end(), m_cntrctd_col_indices.begin()); cudaCheckError(); thrust::copy(R.values.begin(), R.values.end(), cntrctd_edge_data.begin()); cudaCheckError(); } virtual void update_all(/*In: */const VWrapper& g_vertex_data,//multivalue vertex entry of original graph, size==g_nr /*Out:*/VWrapper& cntrctd_vertex_data,//multivalue vertex entry of contracted graph, size==n_agg==S_nr (assumed allocated!) /*In: */const VWrapper& g_edge_data, //multivalue edge entry of original graph, size==g_nnz /*Out:*/VWrapper& cntrctd_edge_data) //multivalue edge entry of contracted graph, size==nnz(S*G*St) (assumed allocated!) { update_vertex_data(g_vertex_data, cntrctd_vertex_data); update_edge_data(g_edge_data, cntrctd_edge_data); } protected: static cusp::csr_matrix<IndexT, ValT, cusp::device_memory> make_csr_matrix(size_t nc, const VectorI& row_offsets, const VectorI& col_indices, const VWrapper& vals) { size_t nr = row_offsets.size()-1; size_t nz = row_offsets.back(); cusp::csr_matrix<IndexT, ValT, cusp::device_memory> A(nr, nc, nz); //copy: // A.row_offsets = row_offsets; A.column_indices = col_indices; thrust::copy(vals.cbegin(), vals.cend(), A.values.begin()); cudaCheckError(); return A; } static cusp::csr_matrix<IndexT, ValT, cusp::device_memory> make_square_csr_matrix(const VectorI& row_offsets, const VectorI& col_indices, const VWrapper& vals) { size_t nc = row_offsets.size()-1; return make_csr_matrix(nc, row_offsets, col_indices, vals); } private: //Input: // const VectorI& m_g_row_offsets; //original graph CSR data: const VectorI& m_g_col_indices; cusp::csr_matrix<IndexT, ValT, cusp::device_memory> m_S; //aggreagate matrix cusp::csr_matrix<IndexT, ValT, cusp::device_memory> m_St; //aggreagate matrix transpose //Output: // VectorI m_cntrctd_row_offsets; //contracted graph CSR data: VectorI m_cntrctd_col_indices; //I/O: // VertexCombineFctr& m_v_combine; //vertex "multiplication" functor VertexReduceFctr& m_v_reduce; //vertex "addition" functor EdgeCombineFctr& m_e_combine; //edge "multiplication" functor EdgeReduceFctr& m_e_reduce; //edge "addition" functor const size_t m_n_agg; const size_t m_g_nr; // == S_nc const size_t m_g_nnz; const size_t m_s_nnz; }; //generic value updater // template<typename VectorV, //Vector of values typename VectorI, //Vector of indices typename VertexCombineFctr, //vertex "multiplication" functor type typename VertexReduceFctr, //vertex "addition" functor type typename EdgeCombineFctr, //edge "multiplication" functor type typename EdgeReduceFctr, //edge "addition" functor type size_t CTA_SIZE> //only used by the specialized template struct ContractionValueUpdater { typedef typename VectorI::value_type IndexT; //typedef typename VectorPtrT<typename VectorI::value_type,VectorV>::PtrT PtrI; typedef typename VectorV::value_type ValueT; typedef typename VectorPtrT<typename VectorV::value_type,VectorV>::PtrT PtrV; //TODO: make template argument: typedef range_view<PtrV> VWrapper; //v_src, v_dest assumed pre-allocated! // ContractionValueUpdater(/*const */VectorV& v_src, VectorV& v_dest, VertexCombineFctr& v_combine, VertexReduceFctr& v_reduce, EdgeCombineFctr& e_combine, EdgeReduceFctr& e_reduce): v_s_(v_src), v_d_(v_dest), m_v_combine(v_combine), m_v_reduce(v_reduce), m_e_combine(e_combine), m_e_reduce(e_reduce) { } //TODO: more efficient solution with VWrapper, to avoid device memory traffic // void update_from(///Hash_Workspace<IndexT,ValueT>& hash_wk,//only used by the specialized template ///size_t num_aggregates,//only used by the specialized template const VectorI& R_row_offsets, const VectorI& R_column_indices, const VectorI& g_row_offsets, const VectorI& g_col_indices) ///const VectorI& aggregates,//only used by the specialized template ///const VectorI& cg_row_offsets,//only used by the specialized template ///const VectorI& cg_col_indices,//only used by the specialized template ///const VectorI& Ac_pos)//only used by the specialized template { // PtrI ptr(&seq[0]); // int* raw_ptr = ptr.get(); // PtrI ptr0(raw_ptr); // range_view<PtrI> rv0(ptr0, ptr0+n); size_t n_s = v_s_.size(); PtrV ptr_src(&v_s_[0]); //ValueT* p_s = v_s_.data().get(); VWrapper g_edge_data(ptr_src, ptr_src+n_s); ///VWrapper g_edge_data(v_s_.cbegin(), v_s_.cend());//nope... size_t n_d = v_d_.size(); PtrV ptr_dst(&v_d_[0]); //ValueT* p_d = v_d_.data().get(); VWrapper cg_edge_data(ptr_dst, ptr_dst+n_d); //R == S // SemiringContractionUtilities<VectorI, VectorV, VWrapper,VertexCombineFctr,VertexReduceFctr,EdgeCombineFctr,EdgeReduceFctr> sr(g_row_offsets, g_col_indices, R_row_offsets, R_column_indices, m_v_combine, m_v_reduce, m_e_combine, m_e_reduce); sr.update_edge_data(g_edge_data, cg_edge_data); } const VectorV& get_cg_vals(void) const { return v_d_; } private: /*const */VectorV& v_s_; VectorV& v_d_; VertexCombineFctr& m_v_combine; VertexReduceFctr& m_v_reduce; EdgeCombineFctr& m_e_combine; EdgeReduceFctr& m_e_reduce; }; //partial specialization for (Combine, Reduce) == (*,+) // // template<typename VectorV, // typename VectorI, // size_t CTA_SIZE> // struct ContractionValueUpdater<VectorV, // VectorI, // thrust::multiplies<typename VectorV::value_type>, // thrust::plus<typename VectorV::value_type>, // thrust::multiplies<typename VectorV::value_type>, // thrust::plus<typename VectorV::value_type>, // CTA_SIZE> // { // typedef typename VectorI::value_type IndexT; // //typedef typename VectorPtrT<typename VectorI::value_type,VectorV>::PtrT PtrI; // typedef typename VectorV::value_type ValueT; // typedef typename VectorPtrT<typename VectorV::value_type,VectorV>::PtrT PtrV; // //v_src, v_dest assumed pre-allocated! // // // ContractionValueUpdater(/*const */VectorV& v_src, // VectorV& v_dest, // thrust::multiplies<ValueT>& , // thrust::plus<ValueT>& , // thrust::multiplies<ValueT>& , // thrust::plus<ValueT>& ): // v_s_(v_src), // v_d_(v_dest) // { // } // void update_from(Hash_Workspace<IndexT,ValueT>& hash_wk, // size_t num_aggregates, // const VectorI& R_row_offsets, // const VectorI& R_column_indices, // const VectorI& g_row_offsets, // const VectorI& g_col_indices, // const VectorI& aggregates, // const VectorI& cg_row_offsets, // const VectorI& cg_col_indices, // const VectorI& Ac_pos) // { // fill_A_dispatch<CTA_SIZE>(hash_wk, // num_aggregates, // R_row_offsets.data().get(), // R_column_indices.data().get(), // g_row_offsets.data().get(), // g_col_indices.data().get(), // v_s_.data().get(), // aggregates.data().get(), // cg_row_offsets.data().get(), // cg_col_indices.data().get(), // thrust::raw_pointer_cast( &Ac_pos.front() ), // v_d_.data().get()); // cudaCheckError(); // } // const VectorV& get_cg_vals(void) const // { // return v_d_; // } // private: // /*const */VectorV& v_s_; // VectorV& v_d_; // }; template<typename VectorI, typename VectorV, typename VertexCombineFctr, //vertex "multiplication" functor type typename VertexReduceFctr, //vertex "addition" functor type typename EdgeCombineFctr, //edge "multiplication" functor type typename EdgeReduceFctr, //edge "addition" functor type typename VectorB = VectorI, size_t CTA_SIZE = 128> struct GraphContractionFunctor { typedef typename VectorI::value_type IndexT; typedef typename VectorV::value_type ValueT; typedef typename VectorB::value_type ValueB; typedef typename VectorPtrT<typename VectorB::value_type,VectorB>::PtrT PtrB; typedef typename VectorPtrT<typename VectorI::value_type,VectorI>::PtrT PtrI; typedef typename VectorPtrT<typename VectorV::value_type,VectorV>::PtrT PtrV; // num_aggregates != m_aggregates.size()!!! // Need m_num_aggregates const member // GraphContractionFunctor(size_t g_n_vertices, const VectorI& aggregates, /*const */ size_t num_aggregates, VertexCombineFctr& v_combine, VertexReduceFctr& v_reduce, EdgeCombineFctr& e_combine, EdgeReduceFctr& e_reduce): m_num_rows(g_n_vertices), m_aggregates(aggregates), m_num_aggregates(num_aggregates), m_v_combine(v_combine), m_v_reduce(v_reduce), m_e_combine(e_combine), m_e_reduce(e_reduce) { computeRestrictionOperator(); cudaCheckError(); } virtual ~GraphContractionFunctor(void) { } const VectorI& get_aggregates(void) const { return m_aggregates; } size_t get_num_aggregates(void) const { return m_num_aggregates; } const VectorI& get_R_row_offsets(void) const { return m_R_row_offsets; } const VectorI& get_R_column_indices(void) const { return m_R_column_indices; } VertexCombineFctr& get_v_combine(void) { return m_v_combine; } VertexReduceFctr& get_v_reduce(void) { return m_v_reduce; } EdgeCombineFctr& get_e_combine(void) { return m_e_combine; } EdgeReduceFctr& get_e_reduce(void) { return m_e_reduce; } protected: void computeRestrictionOperator(void) { size_t n_aggregates = m_num_aggregates;//nope: m_aggregates.size(); m_R_row_offsets.resize(n_aggregates+1);//create one more row for the pseudo aggregate (?) VectorI R_row_indices(m_aggregates); m_R_column_indices.resize(m_num_rows); thrust::sequence(m_R_column_indices.begin(),m_R_column_indices.end()); cudaCheckError(); thrust::sort_by_key(R_row_indices.begin(),R_row_indices.end(),m_R_column_indices.begin()); cudaCheckError(); thrust::lower_bound(R_row_indices.begin(), R_row_indices.end(), thrust::counting_iterator<ValueT>(0), thrust::counting_iterator<ValueT>(m_R_row_offsets.size()), m_R_row_offsets.begin()); cudaCheckError(); } //code "parked" for the time being; //it uses the AMGX approach which has a bug //un-debuggable due to nvcc failure with -g -G pair //(bug: https://nvbugswb.nvidia.com/NvBugs5/SWBug.aspx?bugid=1813290&cmtNo) // struct NoValueUpdater { void update_from(///Hash_Workspace<IndexT,ValueT>& hash_wk, ///size_t num_aggregates, const VectorI& R_row_offsets, const VectorI& R_column_indices, const VectorI& g_row_offsets, const VectorI& g_col_indices) ///const VectorI& aggregates, ///const VectorI& cg_row_offsets, ///const VectorI& cg_col_indices, ///const VectorI& Ac_pos) { //no-op... } }; virtual void operator() (VectorI& g_row_ptr_, VectorI& g_col_ind_) { NoValueUpdater updater;//dummy object... contract(g_row_ptr_, g_col_ind_, updater); } virtual void operator () (VectorV& g_vals_, VectorI& g_row_ptr_, VectorI& g_col_ind_) { ContractionValueUpdater<VectorV, VectorI, VertexCombineFctr, VertexReduceFctr, EdgeCombineFctr, EdgeReduceFctr, CTA_SIZE> updater(g_vals_, m_cg_values, m_v_combine, m_v_reduce, m_e_combine, m_e_reduce); contract(g_row_ptr_, g_col_ind_, updater); } const VectorI& get_row_ptr(void) const { return m_cg_row_offsets; } const VectorI& get_col_ind(void) const { return m_cg_col_indices; } IndexT get_subg_nnz(void) const { return m_cg_row_offsets.back(); } template<typename ValUpdaterFctr> void contract(VectorI& g_row_offsets, //contracted VectorI& g_col_indices, //contracted ValUpdaterFctr fctrv) { //notation mapping from AMGX->nvGRAPH: // //S (Restriction) matrix data: //R_row_offsets -> m_R_row_offsets //R_column_indices -> m_R_column_indices // //Graph matrix data: //A.row_offsets -> g_row_offsets //A.col_indices -> g_col_indices // //Contracted matrix data: //Ac.row_offsets -> m_cg_row_offsets //Ac.col_indices -> m_cg_col_indices // //num_aggregates != m_aggregates.size()!!! // ///size_t num_aggregates = m_aggregates.size(); //nope... //size_t sz_aggregates = m_aggregates.size(); // TODO: check why no size() for amgx::IVector m_cg_row_offsets.resize( m_num_aggregates+1 ); //##### update topology: //{ // Hash_Workspace<IndexT,ValueT> hash_wk; // compute_sparsity_dispatch<CTA_SIZE, false, true>(hash_wk, // m_num_aggregates,//????? // m_R_row_offsets.data().get(), // m_R_column_indices.data().get(), // g_row_offsets.data().get(), // g_col_indices.data().get(), // m_aggregates.data().get(), // m_cg_row_offsets.data().get(), // static_cast<IndexT*>(0), //ok // static_cast<IndexT*>(0));//ok // cudaCheckError(); // // Compute the number of non-zeroes. // thrust::exclusive_scan( m_cg_row_offsets.begin(), m_cg_row_offsets.end(), m_cg_row_offsets.begin() ); // cudaCheckError(); ///IndexT nonzero_blocks = m_cg_row_offsets[m_num_aggregates]; // // Vector to store the positions in the hash table. ///VectorI Ac_pos(nonzero_blocks); // compute_sparsity_dispatch<CTA_SIZE, false, false>(hash_wk, // m_num_aggregates,///????? // m_R_row_offsets.data().get(), // m_R_column_indices.data().get(), // g_row_offsets.data().get(), // g_col_indices.data().get(), // m_aggregates.data().get(), // m_cg_row_offsets.data().get(), // m_cg_col_indices.data().get(), // thrust::raw_pointer_cast( &Ac_pos.front() )); // cudaCheckError(); //} end update topology //##### update values: //{ //act (or not) on values: // fctrv.update_from(///hash_wk, ///m_num_aggregates,///????? m_R_row_offsets, m_R_column_indices, g_row_offsets, g_col_indices); ///m_aggregates, ///m_cg_row_offsets, ///m_cg_col_indices, ///Ac_pos); //}end update values } private: size_t m_num_rows; // number of vertices in the original graph VectorI m_aggregates; // labels of vertices to be collapsed (vertices with same label will be collapsed into one) const size_t m_num_aggregates; // != m_aggregates.size() !!! //Restrictor CSR info //Restrictor = S "matrix" in algorithm 4.5 in "Graph Algorithms in the language of Linear Algebra") VectorI m_R_row_offsets; VectorI m_R_column_indices; //Contracted graph data: VectorI m_cg_row_offsets; VectorI m_cg_col_indices; VectorV m_cg_values; //Contraction functors: // VertexCombineFctr& m_v_combine; VertexReduceFctr& m_v_reduce; EdgeCombineFctr& m_e_combine; EdgeReduceFctr& m_e_reduce; }; namespace{ //unnamed.. template<typename VectorI> size_t validate_contractor_input(const VectorI& v, size_t g_nrows) { typedef typename VectorI::value_type IndexT; typedef typename VectorI::iterator Iterator; size_t n = v.size(); if( n == 0 ) FatalError("0-sized array input in graph contraction.",NVGRAPH_ERR_BAD_PARAMETERS); if( n != g_nrows ) FatalError("Aggregate array size must match number of vertices of original graph",NVGRAPH_ERR_BAD_PARAMETERS); //find min/max values in aggregates... //and check if min==0 and max <= g_nrows-1... VectorI res(v);//copy cudaCheckError(); thrust::pair<Iterator, Iterator> result = thrust::minmax_element(res.begin(), res.end()); if( *result.first != 0 ) FatalError("Aggregate array values must start from 0.",NVGRAPH_ERR_BAD_PARAMETERS); cudaCheckError(); if( static_cast<size_t>(*result.second) > g_nrows-1 ) FatalError("Aggregate array values must be less than number of vertices of original graph.",NVGRAPH_ERR_BAD_PARAMETERS); //then make sure all values in between are covered... //use count_distinct() and see if there are max-min+1 size_t n_expected = *result.second - *result.first + 1; thrust::sort(res.begin(), res.end()); cudaCheckError(); size_t counts = thrust::distance(res.begin(), thrust::unique(res.begin(), res.end())); cudaCheckError(); if( counts != n_expected ) FatalError("Aggregate array intermediate values (between 0 and max(aggregates)) are missing.",NVGRAPH_ERR_BAD_PARAMETERS); //return # aggregates (not to be confused with aggregates.size()!) return n_expected; } }//end unnamed namespace //(the C header will have something similar) //add more enums for additional Functor Types; // //CAVEAT: NrFctrTypes MUST be last in enum! //additions can be made anywhere between enum...=0 and NrFctrTypes! // typedef enum{Multiply=0, Sum, Min, Max, NrFctrTypes} SemiRingFunctorTypes; //Partial specialization to select proper //functor through an integer, at compile time (?) // template<SemiRingFunctorTypes, typename ValueT> struct SemiRingFctrSelector; template<typename ValueT> struct SemiRingFctrSelector<Multiply, ValueT> { typedef typename thrust::multiplies<ValueT> FctrType; }; template<typename ValueT> struct SemiRingFctrSelector<Sum, ValueT> { typedef typename thrust::plus<ValueT> FctrType; }; template<typename ValueT> struct SemiRingFctrSelector<Min, ValueT> { typedef typename thrust::minimum<ValueT> FctrType; }; template<typename ValueT> struct SemiRingFctrSelector<Max, ValueT> { typedef typename thrust::maximum<ValueT> FctrType; }; //...add more specializations for additional Functor Types //Acyclic Visitor // (A. Alexandrescu, "Modern C++ Design", Section 10.4), // where *concrete* Visitors must be parameterized by all // the possibile template args of the Visited classes (visitees); // //Visitor for SubGraph extraction: // template<typename VectorI, typename VectorV, typename VertexCombineFctr, //vertex "multiplication" functor type typename VertexReduceFctr, //vertex "addition" functor type typename EdgeCombineFctr, //edge "multiplication" functor type typename EdgeReduceFctr> //edge "addition" functor type> struct GraphContractionVisitor: VisitorBase, Visitor<Graph<typename VectorI::value_type> >, Visitor<CsrGraph<typename VectorI::value_type> >, Visitor<ValuedCsrGraph<typename VectorI::value_type, typename VectorV::value_type> >, Visitor<MultiValuedCsrGraph<typename VectorI::value_type, typename VectorV::value_type> > { typedef typename VectorI::value_type IndexType_; typedef typename VectorV::value_type ValueType_; typedef typename VectorPtrT<typename VectorI::value_type,VectorI>::PtrT PtrI; typedef typename VectorPtrT<typename VectorV::value_type,VectorV>::PtrT PtrV; typedef range_view<PtrV> VWrapper; typedef GraphContractionFunctor<VectorI, VectorV, VertexCombineFctr, VertexReduceFctr, EdgeCombineFctr, EdgeReduceFctr > CFunctor; //TODO: avoid copy from raw pointer // GraphContractionVisitor(CsrGraph<IndexType_>& graph, const VectorI& aggregates, /*const */ cudaStream_t stream, VertexCombineFctr& v_combine, VertexReduceFctr& v_reduce, EdgeCombineFctr& e_combine, EdgeReduceFctr& e_reduce): m_g_row_ptr_(graph.get_raw_row_offsets(), graph.get_raw_row_offsets()+graph.get_num_vertices()+1), m_g_col_ind_(graph.get_raw_column_indices(), graph.get_raw_column_indices()+graph.get_num_edges()), // num_aggregates != m_aggregates.size()!!! // need to calculate num_aggregates (validate_..() does it) // and pass it to contractor: // contractor_(graph.get_num_vertices(), aggregates, validate_contractor_input(aggregates, graph.get_num_vertices()), v_combine, v_reduce, e_combine, e_reduce), stream_(stream), contracted_graph_(0) { cudaCheckError(); //empty... } void Visit(Graph<IndexType_>& graph) { //no-op... } void Visit(CsrGraph<IndexType_>& graph_src) { //(non-AMGX version): //SemiRing::update_topology(contractor_.get_row_ptr(), contractor_.get_col_ind()); typedef typename SemiRingFctrSelector<Multiply, ValueType_>::FctrType MultiplyFctr; typedef typename SemiRingFctrSelector<Sum, ValueType_>::FctrType SumFctr; MultiplyFctr mult; SumFctr sum; SemiringContractionUtilities<VectorI, VectorV, VWrapper,MultiplyFctr,SumFctr,MultiplyFctr,SumFctr> sr(m_g_row_ptr_, m_g_col_ind_, contractor_.get_R_row_offsets(), contractor_.get_R_column_indices(), mult, sum, mult, sum); sr.update_topology_only(); ///contractor_(m_g_row_ptr_, m_g_col_ind_);//just drop it, no-op, here, all work done by sr size_t rowptr_sz = sr.get_row_ptr().size(); assert( rowptr_sz >= 1 ); size_t contrctd_nrows = rowptr_sz-1; size_t contrctd_nnz = sr.get_subg_nnz(); if( contracted_graph_ ) delete contracted_graph_; contracted_graph_ = new CsrGraph<IndexType_>(contrctd_nrows, contrctd_nnz, stream_); //TODO: more efficient solution: investigate if/how copy can be avoided // thrust::copy(sr.get_row_ptr().begin(), sr.get_row_ptr().end(), contracted_graph_->get_raw_row_offsets()); cudaCheckError(); thrust::copy(sr.get_col_ind().begin(), sr.get_col_ind().end(), contracted_graph_->get_raw_column_indices()); cudaCheckError(); } void Visit(ValuedCsrGraph<IndexType_,ValueType_>& graph_src) { size_t g_nrows = graph_src.get_num_vertices(); size_t g_nnz = graph_src.get_num_edges(); VectorV vals(graph_src.get_raw_values(), graph_src.get_raw_values()+g_nnz); //(non-AMGX version): //SemiRing::update_topology(contractor_.get_row_ptr(), contractor_.get_col_ind()); typedef typename SemiRingFctrSelector<Multiply, ValueType_>::FctrType MultiplyFctr; typedef typename SemiRingFctrSelector<Sum, ValueType_>::FctrType SumFctr; MultiplyFctr mult; SumFctr sum; SemiringContractionUtilities<VectorI, VectorV, VWrapper,MultiplyFctr,SumFctr,MultiplyFctr,SumFctr> sr(m_g_row_ptr_, m_g_col_ind_, contractor_.get_R_row_offsets(), contractor_.get_R_column_indices(), mult, sum, mult, sum); sr.update_topology_only(); ///contractor_(vals, m_g_row_ptr_, m_g_col_ind_);//just drop it, no-op, here, all work done by sr and updater, below size_t rowptr_sz = sr.get_row_ptr().size(); assert( rowptr_sz >= 1 ); size_t contrctd_nrows = rowptr_sz-1; size_t contrctd_nnz = sr.get_subg_nnz(); ValuedCsrGraph<IndexType_,ValueType_>* subg = new ValuedCsrGraph<IndexType_,ValueType_>(contrctd_nrows, contrctd_nnz, stream_); //TODO: more efficient solution: investigate if/how copy can be avoided // thrust::copy(sr.get_row_ptr().begin(), sr.get_row_ptr().end(), subg->get_raw_row_offsets()); cudaCheckError(); thrust::copy(sr.get_col_ind().begin(), sr.get_col_ind().end(), subg->get_raw_column_indices()); cudaCheckError(); //handling the values: // VertexCombineFctr v_combine; VertexReduceFctr v_reduce; EdgeCombineFctr e_combine; EdgeReduceFctr e_reduce; //TODO: more efficient solution with VWrapper, to avoid device memory traffic // VectorV cg_values(subg->get_raw_values(), subg->get_raw_values()+contrctd_nnz); ContractionValueUpdater<VectorV,//VWrapper? VectorI, VertexCombineFctr, VertexReduceFctr, EdgeCombineFctr, EdgeReduceFctr, 128>//useless...; only used with AMGX version updater(vals, cg_values, v_combine, v_reduce, e_combine, e_reduce); updater.update_from(contractor_.get_R_row_offsets(), contractor_.get_R_column_indices(), m_g_row_ptr_, m_g_col_ind_); //TODO: more efficient solution with VWrapper, to avoid device memory traffic // thrust::copy(cg_values.begin(), cg_values.end(), subg->get_raw_values()); cudaCheckError(); if( contracted_graph_ ) delete contracted_graph_; contracted_graph_ = subg; } void Visit(MultiValuedCsrGraph<IndexType_,ValueType_>& graph_src) { //(non-AMGX version): //SemiRing::update_topology(contractor_.get_row_ptr(), contractor_.get_col_ind()); typedef typename SemiRingFctrSelector<Multiply, ValueType_>::FctrType MultiplyFctr; typedef typename SemiRingFctrSelector<Sum, ValueType_>::FctrType SumFctr; MultiplyFctr mult; SumFctr sum; SemiringContractionUtilities<VectorI, VectorV, VWrapper,MultiplyFctr,SumFctr,MultiplyFctr,SumFctr> sr(m_g_row_ptr_, m_g_col_ind_, contractor_.get_R_row_offsets(), contractor_.get_R_column_indices(), mult, sum, mult, sum); cudaCheckError(); sr.update_topology_only(); cudaCheckError(); ///contractor_(m_g_row_ptr_, m_g_col_ind_);//just drop it, no-op, here, all work done by sr and reduce_*_data(), below //construct the contracted graph out of contractor_ newly acquired data size_t rowptr_sz = sr.get_row_ptr().size(); assert( rowptr_sz >= 1 ); size_t contrctd_nrows = rowptr_sz-1; size_t contrctd_nnz = sr.get_subg_nnz(); cudaCheckError(); if( contracted_graph_ ) delete contracted_graph_; cudaCheckError(); MultiValuedCsrGraph<IndexType_,ValueType_>* mv_cntrctd_graph = new MultiValuedCsrGraph<IndexType_,ValueType_>(contrctd_nrows, contrctd_nnz, stream_); cudaCheckError(); //TODO: more efficient solution: investigate if/how copy can be avoided // thrust::copy(sr.get_row_ptr().begin(), sr.get_row_ptr().end(), mv_cntrctd_graph->get_raw_row_offsets()); cudaCheckError(); thrust::copy(sr.get_col_ind().begin(), sr.get_col_ind().end(), mv_cntrctd_graph->get_raw_column_indices()); cudaCheckError(); //reduce vertex and edge data for the contracted graph reduce_vertex_data(graph_src, *mv_cntrctd_graph); reduce_edge_data(graph_src, *mv_cntrctd_graph); contracted_graph_ = mv_cntrctd_graph; } const CFunctor& get_contractor(void) const { return contractor_; } CsrGraph<IndexType_>* get_contracted_graph(void) // TODO: change to unique_ptr, when moving to C++1* { return contracted_graph_; } const VectorI& get_aggregates(void) const { return contractor_.get_aggregates(); } protected: //virtual reductors for contracted vertices and edges: // virtual void reduce_vertex_data(MultiValuedCsrGraph<IndexType_,ValueType_>& graph_src, MultiValuedCsrGraph<IndexType_,ValueType_>& graph_dest) { SemiringContractionUtilities<VectorI, VectorV, VWrapper,VertexCombineFctr,VertexReduceFctr,EdgeCombineFctr,EdgeReduceFctr> sr(m_g_row_ptr_, m_g_col_ind_, contractor_.get_R_row_offsets(), contractor_.get_R_column_indices(), contractor_.get_v_combine(), contractor_.get_v_reduce(), contractor_.get_e_combine(), contractor_.get_e_reduce()); cudaCheckError(); if ( graph_dest.get_num_vertices() == 0 ) FatalError("Empty contracted graph (no vertices).",NVGRAPH_ERR_BAD_PARAMETERS); //allocate graph_dest vertex data and fill it: // size_t ng = graph_src.get_num_vertex_dim(); graph_dest.allocateVertexData(ng, stream_); cudaCheckError(); for(unsigned int i=0;i<ng;++i) { Vector<ValueType_>& v_src = graph_src.get_vertex_dim(i); Vector<ValueType_>& v_dest = graph_dest.get_vertex_dim(i); size_t n_src = v_src.get_size(); PtrV ptr_src(v_src.raw()); VWrapper rv_src(ptr_src, ptr_src+n_src); size_t n_dest = v_dest.get_size(); assert( graph_dest.get_num_vertices() == n_dest ); PtrV ptr_dest(v_dest.raw()); VWrapper rv_dest(ptr_dest, ptr_dest+n_dest); sr.update_vertex_data(rv_src, rv_dest); cudaCheckError(); } } virtual void reduce_edge_data(MultiValuedCsrGraph<IndexType_,ValueType_>& graph_src, MultiValuedCsrGraph<IndexType_,ValueType_>& graph_dest) { SemiringContractionUtilities<VectorI, VectorV, VWrapper,VertexCombineFctr,VertexReduceFctr,EdgeCombineFctr,EdgeReduceFctr> sr(m_g_row_ptr_, m_g_col_ind_, contractor_.get_R_row_offsets(), contractor_.get_R_column_indices(), contractor_.get_v_combine(), contractor_.get_v_reduce(), contractor_.get_e_combine(), contractor_.get_e_reduce()); cudaCheckError(); //There can be a contracted graph with no edges, //but such a case warrants a warning: // if ( graph_dest.get_num_edges() == 0 ) WARNING("Contracted graph is disjointed (no edges)"); //allocate graph_dest edge data and fill it: // size_t ng = graph_src.get_num_edge_dim(); graph_dest.allocateEdgeData(ng, stream_); cudaCheckError(); for(unsigned int i=0;i<ng;++i) { Vector<ValueType_>& v_src = graph_src.get_edge_dim(i); Vector<ValueType_>& v_dest = graph_dest.get_edge_dim(i); size_t n_src = v_src.get_size(); PtrV ptr_src(v_src.raw()); VWrapper rv_src(ptr_src, ptr_src+n_src); size_t n_dest = v_dest.get_size(); assert( graph_dest.get_num_edges() == n_dest ); PtrV ptr_dest(v_dest.raw()); VWrapper rv_dest(ptr_dest, ptr_dest+n_dest); sr.update_edge_data(rv_src, rv_dest); cudaCheckError(); } } private: VectorI m_g_row_ptr_; VectorI m_g_col_ind_; CFunctor contractor_; cudaStream_t stream_; CsrGraph<IndexType_>* contracted_graph_; // to be constructed }; //###################################################### Nested-if-then-else solution: // //easier on number of recursive template instantiations //i.e., less-likely to run into compilation problems like: //'error: excessive recursion at instantiation of function ...'; //or the newly(as of cuda8.0) available flag: -ftemplate-depth <depth> // //generic empty template: // template<typename VectorI, typename VectorV, typename T1, typename T2, typename T3, size_t Level, size_t n, size_t N> struct NestedTypedIfThenElser; //Level 3 (ceiling of recursion): // template<typename VectorI, typename VectorV, typename T1, typename T2, typename T3, size_t n, size_t N> struct NestedTypedIfThenElser<VectorI, VectorV, T1, T2, T3, 3, n, N> { typedef typename VectorI::value_type IndexT; typedef typename VectorV::value_type ValueT; static CsrGraph<IndexT>* iffer(size_t i1, size_t i2, size_t i3, size_t i4, CsrGraph<IndexT>& graph, VectorI& aggregates, cudaStream_t stream) { if( i4 == n )//reached both ceiling of Level recursion and bottom of n value recursion { ///std::cout<<"OK: tuple("<<i1<<","<<i2<<","<<i3<<","<<i4<<") hit!\n";//stop, everything hit... typedef typename SemiRingFctrSelector<(SemiRingFunctorTypes)n, ValueT>::FctrType T4; typedef T1 VertexCombineFctr; typedef T2 VertexReduceFctr; typedef T3 EdgeCombineFctr; typedef T4 EdgeReduceFctr; VertexCombineFctr v_combine; VertexReduceFctr v_reduce; EdgeCombineFctr e_combine; EdgeReduceFctr e_reduce; GraphContractionVisitor<VectorI, VectorV, VertexCombineFctr, VertexReduceFctr, EdgeCombineFctr, EdgeReduceFctr> visitor(graph, aggregates, stream, v_combine, v_reduce, e_combine, e_reduce); cudaCheckError(); graph.Accept(visitor); cudaCheckError(); return visitor.get_contracted_graph(); } else //continue with same level (3), but next decreasing n value return NestedTypedIfThenElser<VectorI, VectorV, T1, T2, T3, 3, n-1, N>::iffer(i1, i2, i3, i4, graph, aggregates, stream); } }; //Level 3 bottom: // template<typename VectorI, typename VectorV, typename T1, typename T2, typename T3, size_t N> struct NestedTypedIfThenElser<VectorI, VectorV, T1, T2, T3, 3, 0, N> { typedef typename VectorI::value_type IndexT; typedef typename VectorV::value_type ValueT; static CsrGraph<IndexT>* iffer(size_t i1, size_t i2, size_t i3, size_t i4, CsrGraph<IndexT>& graph, VectorI& aggregates, cudaStream_t stream) { if( i4 == 0 ) { ///std::cout<<"OK: tuple("<<i1<<","<<i2<<","<<i3<<","<<i4<<") hit!\n";//stop, everything hit... typedef typename SemiRingFctrSelector<(SemiRingFunctorTypes)0, ValueT>::FctrType T4; typedef T1 VertexCombineFctr; typedef T2 VertexReduceFctr; typedef T3 EdgeCombineFctr; typedef T4 EdgeReduceFctr; VertexCombineFctr v_combine; VertexReduceFctr v_reduce; EdgeCombineFctr e_combine; EdgeReduceFctr e_reduce; GraphContractionVisitor<VectorI, VectorV, VertexCombineFctr, VertexReduceFctr, EdgeCombineFctr, EdgeReduceFctr> visitor(graph, aggregates, stream, v_combine, v_reduce, e_combine, e_reduce); graph.Accept(visitor); return visitor.get_contracted_graph(); } else { std:: stringstream ss; ss<<"ERROR: tuple("<<i1<<","<<i2<<","<<i3<<","<<i4<<") not hit on Level 3."; FatalError(ss.str().c_str(),NVGRAPH_ERR_BAD_PARAMETERS); //return 0; } } }; //Level 2 generic: // template<typename VectorI, typename VectorV, typename T1, typename T2, typename T3, size_t n, size_t N> struct NestedTypedIfThenElser<VectorI, VectorV, T1, T2, T3, 2, n, N> { typedef typename VectorI::value_type IndexT; typedef typename VectorV::value_type ValueT; static CsrGraph<IndexT>* iffer(size_t i1, size_t i2, size_t i3, size_t i4, CsrGraph<IndexT>& graph, VectorI& aggregates, cudaStream_t stream) { if( i3 == n ) { typedef typename SemiRingFctrSelector<(SemiRingFunctorTypes)n, ValueT>::FctrType RT;//replace T3! return NestedTypedIfThenElser<VectorI, VectorV, T1, T2, RT, 3, N-1, N>::iffer(i1, i2, i3, i4, graph, aggregates, stream);//continue with next increasing level (3) //with 1st possible value (N-1) } else return NestedTypedIfThenElser<VectorI, VectorV, T1, T2, T3, 2, n-1, N>::iffer(i1, i2, i3, i4, graph, aggregates, stream);//continue with same level (2), but next decreasing n value } }; //Level 2 bottom: // template<typename VectorI, typename VectorV, typename T1, typename T2, typename T3, size_t N> struct NestedTypedIfThenElser<VectorI, VectorV, T1, T2, T3, 2, 0, N> { typedef typename VectorI::value_type IndexT; typedef typename VectorV::value_type ValueT; static CsrGraph<IndexT>* iffer(size_t i1, size_t i2, size_t i3, size_t i4, CsrGraph<IndexT>& graph, VectorI& aggregates, cudaStream_t stream) { if( i3 == 0 ) { typedef typename SemiRingFctrSelector<(SemiRingFunctorTypes)0, ValueT>::FctrType RT;//replace T3! return NestedTypedIfThenElser<VectorI, VectorV, T1, T2, RT, 3, N-1, N>::iffer(i1, i2, i3, i4, graph, aggregates, stream);//continue with next increasing level (3) //with 1st possible value (N-1) } else { std:: stringstream ss; ss<<"ERROR: tuple("<<i1<<","<<i2<<","<<i3<<","<<i4<<") not hit on Level 2."; FatalError(ss.str().c_str(),NVGRAPH_ERR_BAD_PARAMETERS); //return 0; } } }; //Level 1 generic: // template<typename VectorI, typename VectorV, typename T1, typename T2, typename T3, size_t n, size_t N> struct NestedTypedIfThenElser<VectorI, VectorV, T1, T2, T3, 1, n, N> { typedef typename VectorI::value_type IndexT; typedef typename VectorV::value_type ValueT; static CsrGraph<IndexT>* iffer(size_t i1, size_t i2, size_t i3, size_t i4, CsrGraph<IndexT>& graph, VectorI& aggregates, cudaStream_t stream) { if( i2 == n ) { typedef typename SemiRingFctrSelector<(SemiRingFunctorTypes)n, ValueT>::FctrType RT;//replace T2! return NestedTypedIfThenElser<VectorI, VectorV, T1, RT, T3, 2, N-1, N>::iffer(i1, i2, i3, i4, graph, aggregates, stream);//continue with next increasing level (2) //with 1st possible value (N-1) } else return NestedTypedIfThenElser<VectorI, VectorV, T1, T2, T3, 1, n-1, N>::iffer(i1, i2, i3, i4, graph, aggregates, stream);//continue with same level (1), but next decreasing n value } }; //Level 1 bottom: // template<typename VectorI, typename VectorV, typename T1, typename T2, typename T3, size_t N> struct NestedTypedIfThenElser<VectorI, VectorV, T1, T2, T3, 1, 0, N> { typedef typename VectorI::value_type IndexT; typedef typename VectorV::value_type ValueT; static CsrGraph<IndexT>* iffer(size_t i1, size_t i2, size_t i3, size_t i4, CsrGraph<IndexT>& graph, VectorI& aggregates, cudaStream_t stream) { if( i2 == 0 ) { typedef typename SemiRingFctrSelector<(SemiRingFunctorTypes)0, ValueT>::FctrType RT;//replace T2! return NestedTypedIfThenElser<VectorI, VectorV, T1, RT, T3, 2, N-1, N>::iffer(i1, i2, i3, i4, graph, aggregates, stream);//continue with next increasing level (2) //with 1st possible value (N-1) } else { std:: stringstream ss; ss<<"ERROR: tuple("<<i1<<","<<i2<<","<<i3<<","<<i4<<") not hit on Level 1."; FatalError(ss.str().c_str(),NVGRAPH_ERR_BAD_PARAMETERS); //return 0; } } }; //Level 0 generic: // template<typename VectorI, typename VectorV, typename T1, typename T2, typename T3, size_t n, size_t N> struct NestedTypedIfThenElser<VectorI, VectorV, T1, T2, T3, 0, n, N> { typedef typename VectorI::value_type IndexT; typedef typename VectorV::value_type ValueT; static CsrGraph<IndexT>* iffer(size_t i1, size_t i2, size_t i3, size_t i4, CsrGraph<IndexT>& graph, VectorI& aggregates, cudaStream_t stream) { if( i1 == n ) { typedef typename SemiRingFctrSelector<(SemiRingFunctorTypes)n, ValueT>::FctrType RT;//replace T1! return NestedTypedIfThenElser<VectorI, VectorV, RT, T2, T3, 1, N-1, N>::iffer(i1, i2, i3, i4, graph, aggregates, stream);//continue with next increasing level (1) //with 1st possible value (N-1) } else return NestedTypedIfThenElser<VectorI, VectorV, T1, T2, T3, 0, n-1, N>::iffer(i1, i2, i3, i4, graph, aggregates, stream);//continue with same level (0), but next decreasing n value } }; //Level 0 bottom: // template<typename VectorI, typename VectorV, typename T1, typename T2, typename T3, size_t N> struct NestedTypedIfThenElser<VectorI, VectorV, T1, T2, T3, 0, 0, N> { typedef typename VectorI::value_type IndexT; typedef typename VectorV::value_type ValueT; static CsrGraph<IndexT>* iffer(size_t i1, size_t i2, size_t i3, size_t i4, CsrGraph<IndexT>& graph, VectorI& aggregates, cudaStream_t stream) { if( i1 == 0 ) { typedef typename SemiRingFctrSelector<(SemiRingFunctorTypes)0, ValueT>::FctrType RT;//replace T1! return NestedTypedIfThenElser<VectorI, VectorV, RT, T2, T3, 1, N-1, N>::iffer(i1, i2, i3, i4, graph, aggregates, stream);//continue with next increasing level (1) //with 1st possible value (N-1) } else { std:: stringstream ss; ss<<"ERROR: tuple("<<i1<<","<<i2<<","<<i3<<","<<i4<<") not hit on Level 0."; FatalError(ss.str().c_str(),NVGRAPH_ERR_BAD_PARAMETERS); //return 0; } } }; //Wrapper: // //N = # possible (consecutive 0-based) values //that each tuple element can take // template<typename VectorI, typename VectorV, size_t N> struct NestedTypedIfThenElseWrapper { typedef typename VectorI::value_type IndexT; typedef typename VectorV::value_type ValueT; struct Unused{};//placeholder to be replaced by actual types static CsrGraph<IndexT>* iffer(size_t i1, size_t i2, size_t i3, size_t i4, CsrGraph<IndexT>& graph, VectorI& aggregates, cudaStream_t stream) { return NestedTypedIfThenElser<VectorI, VectorV, Unused, Unused, Unused, 0, N-1, N>::iffer(i1, i2, i3, i4, graph, aggregates, stream); } }; template<typename VectorI, typename VectorV, typename T1, size_t N> struct NestedTypedIfThenElseWrapperT { typedef typename VectorI::value_type IndexT; typedef typename VectorV::value_type ValueT; struct Unused{};//placeholder to be replaced by actual types static CsrGraph<IndexT>* iffer(size_t i1, size_t i2, size_t i3, size_t i4, CsrGraph<IndexT>& graph, VectorI& aggregates, cudaStream_t stream) { return NestedTypedIfThenElser<VectorI, VectorV, T1, Unused, Unused, 1, N-1, N>::iffer(i1, i2, i3, i4, graph, aggregates, stream); } }; template<typename IndexT, typename ValueT> CsrGraph<IndexT>* contract_from_aggregates(CsrGraph<IndexT>& graph, IndexT* p_aggregates, size_t n, cudaStream_t stream, const SemiRingFunctorTypes& vCombine, const SemiRingFunctorTypes& vReduce, const SemiRingFunctorTypes& eCombine, const SemiRingFunctorTypes& eReduce) { typedef thrust::device_vector<IndexT> VectorI; typedef thrust::device_vector<ValueT> VectorV; VectorI aggregates(p_aggregates, p_aggregates+n); //Nested if-then-else solution: // //(no need for constness, they're NOT template args) // return NestedTypedIfThenElseWrapper<VectorI, VectorV, NrFctrTypes>::iffer((size_t)vCombine, (size_t)vReduce, (size_t)eCombine, (size_t)eReduce, graph, aggregates, stream); //Flatened if-then-else solution: // //const size_t M = NrFctrTypes; //const size_t M2 = M*M; //const size_t M3 = M2*M; //size_t i // = (size_t)vCombine * M3 // + (size_t)vReduce * M2 // + (size_t)eCombine * M // + (size_t)eReduce; //return Selector<NComboTypes-1, NrFctrTypes, VectorI, VectorV>::iffer(i, graph, aggregates, stream); } template<typename IndexT, typename ValueT, typename T> CsrGraph<IndexT>* contract_from_aggregates_t(CsrGraph<IndexT>& graph, IndexT* p_aggregates, size_t n, cudaStream_t stream, const SemiRingFunctorTypes& vCombine, const SemiRingFunctorTypes& vReduce, const SemiRingFunctorTypes& eCombine, const SemiRingFunctorTypes& eReduce) { typedef thrust::device_vector<IndexT> VectorI; typedef thrust::device_vector<ValueT> VectorV; VectorI aggregates(p_aggregates, p_aggregates+n); //Nested if-then-else solution: // //(no need for constness, they're NOT template args) // return NestedTypedIfThenElseWrapperT<VectorI, VectorV, T, NrFctrTypes>::iffer((size_t)vCombine, (size_t)vReduce, (size_t)eCombine, (size_t)eReduce, graph, aggregates, stream); //Flatened if-then-else solution: // //const size_t M = NrFctrTypes; //const size_t M2 = M*M; //const size_t M3 = M2*M; //size_t i // = (size_t)vCombine * M3 // + (size_t)vReduce * M2 // + (size_t)eCombine * M // + (size_t)eReduce; //return Selector<NComboTypes-1, NrFctrTypes, VectorI, VectorV>::iffer(i, graph, aggregates, stream); } } #endif
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/include/nvgraph_lapack.hxx
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <nvgraph_error.hxx> namespace nvgraph { template <typename T> class Lapack; template <typename T> class Lapack { private: Lapack(); ~Lapack(); public: static void check_lapack_enabled(); static void gemm(bool transa, bool transb, int m, int n, int k, T alpha, const T * A, int lda, const T * B, int ldb, T beta, T * C, int ldc); // special QR for lanczos static void sterf(int n, T * d, T * e); static void steqr(char compz, int n, T * d, T * e, T * z, int ldz, T * work); // QR // computes the QR factorization of a general matrix static void geqrf (int m, int n, T *a, int lda, T *tau, T *work, int *lwork); // Generates the real orthogonal matrix Q of the QR factorization formed by geqrf. //static void orgqr( int m, int n, int k, T* a, int lda, const T* tau, T* work, int* lwork ); // multiply C by implicit Q static void ormqr (bool right_side, bool transq, int m, int n, int k, T *a, int lda, T *tau, T *c, int ldc, T *work, int *lwork); //static void unmqr (bool right_side, bool transq, int m, int n, int k, T *a, int lda, T *tau, T *c, int ldc, T *work, int *lwork); //static void qrf (int n, T *H, T *Q, T *R); //static void hseqr (T* Q, T* R, T* eigenvalues,T* eigenvectors, int dim, int ldh, int ldq); static void geev(T* A, T* eigenvalues, int dim, int lda); static void geev(T* A, T* eigenvalues, T* eigenvectors, int dim, int lda, int ldvr); static void geev(T* A, T* eigenvalues_r, T* eigenvalues_i, T* eigenvectors_r, T* eigenvectors_i, int dim, int lda, int ldvr); }; } // end namespace nvgraph
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/include/incidence_graph.hxx
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef incidence_graph_hxx #define incidence_graph_hxx #include <iostream> #include <vector> #include <map> #include <iterator> #include <algorithm> #include <sstream> #include <stdexcept> #include <cassert> #define DEBUG_ // namespace nvgraph{ namespace debug{ typedef std::vector<std::vector<int> > MatrixI; //IndexT = index type to store in the incidence Matrix //VertexT = value type to store for each vertex //EdgetT = value type to store for each edge // //Graph stored by inidence matrix //for DEBUGGING purposes, only //(of small graphs) // template<typename IndexT, typename VertexT, typename EdgeT> struct Graph { typedef IndexT TypeI; typedef VertexT TypeV; typedef EdgeT TypeE; Graph(void): nrows_(0), ncols_(0) { } explicit Graph(const MatrixI& incidence): nrows_(incidence.size()), ncols_(incidence[0].size()),//throws on empty incidence! incidence_(incidence) { //construct the other members? } virtual ~Graph(){} void add_vertex(const VertexT& value) { //add row and column: ++nrows_; ++ncols_; for(typename MatrixI::iterator row=incidence_.begin();row!=incidence_.end();++row) { (*row).push_back(IndexT(0)); } // for(auto& row:incidence_) // { // row.push_back(IndexT(0)); // } incidence_.push_back(std::vector<IndexT>(ncols_,IndexT(0))); vertex_values_.push_back(value); } void add_edge(const EdgeT& value, const std::pair<IndexT,IndexT>& endpoints /*first = source, second=sink*/) { IndexT i = endpoints.first; IndexT j = endpoints.second; incidence_[i][j] = IndexT(1); edge_values_.insert(std::make_pair(endpoints,value)); } friend std::ostream& operator<<(std::ostream& os, const Graph& g) { g.print(os); return os; } const MatrixI& get_incidence(void) const { return incidence_; } MatrixI& get_incidence(void) { return incidence_; } size_t get_nrows(void) const { return nrows_; } size_t& get_nrows(void) { return nrows_; } size_t get_ncols(void) const { return ncols_; } size_t& get_ncols(void) { return ncols_; } size_t get_nnz(void) const { return edge_values_.size(); } const std::map<std::pair<IndexT, IndexT>, EdgeT>& get_edges(void) const { return edge_values_; } //must be public (for CsrGraph(Graph&))...why? std::map<std::pair<IndexT, IndexT>, EdgeT>& get_edges(void) { return edge_values_; } std::vector<VertexT>& get_vertices(void) { return vertex_values_; } protected: struct RowPrinter { explicit RowPrinter(std::ostream& o): m_os(o) { } void operator()(const std::vector<IndexT>& row) { std::copy(row.begin(), row.end(), std::ostream_iterator<IndexT>(m_os, ",")); m_os<<"\n"; } private: std::ostream& m_os; }; void print_incidence(std::ostream& os) const { os<<"(nr,nc):("<<nrows_<<","<<ncols_<<")\n"; RowPrinter rprint(os); std::for_each(incidence_.begin(), incidence_.end(), rprint); // std::for_each(incidence_.begin(), incidence_.end(), [&os](const std::vector<IndexT>& row){ // std::copy(row.begin(), row.end(), std::ostream_iterator<IndexT>(os, ",")); // os<<"\n"; // }); } void print_vertices(std::ostream& os) const { int i=0; for(typename std::vector<VertexT>::const_iterator it=vertex_values_.begin(); it!=vertex_values_.end(); ++it) { os<<"v["<<i<<"]="<<*it<<","; ++i; } // for(auto entry:vertex_values_) // { // os<<"v["<<i<<"]="<<entry<<","; // ++i; // } os<<"\n"; } void print_edges(std::ostream& os) const { for(typename std::map<std::pair<IndexT, IndexT>, EdgeT>::const_iterator it=edge_values_.begin(); it!=edge_values_.end(); ++it) { os<<"("<<it->first.first<<","<<it->first.second<<")="<<it->second<<","; } // for(auto entry:edge_values_) // { // os<<"("<<entry.first.first<<","<<entry.first.second<<")="<<entry.second<<","; // } os<<"\n"; } virtual void print(std::ostream& os) const { print_incidence(os); print_vertices(os); print_edges(os); } private: size_t nrows_; size_t ncols_; MatrixI incidence_; std::vector<VertexT> vertex_values_; std::map<std::pair<IndexT, IndexT>, EdgeT> edge_values_; }; //CSR: //for matrix A_{mxn} with nnz non-zero entries: // //vals[nnz]: contains the non-zero entries in order left-right, top-down; // no entry for rows without non-zeros; //row_ptr[m+1]: contains poition in "vals" of first non-zero entry for each row; // last element is nnz; // for empty row i, we repeat info from i+1 in row_ptr //cols_ind[nnz]:contains column of each non-zero entry in vals; // no entry for rows without non-zeros; /* col_ind[j] and vals[j] for j in [row_ptr[i], row_ptr[i+1]-1] represent the column index (unsigned integer) and value of matrix (double) on row i */ // template<typename IndexT, typename VertexT, typename EdgeT> struct CsrGraph: Graph<IndexT, VertexT, EdgeT> { using Graph<IndexT, VertexT, EdgeT>::get_incidence; using Graph<IndexT, VertexT, EdgeT>::get_nrows; using Graph<IndexT, VertexT, EdgeT>::get_ncols; using Graph<IndexT, VertexT, EdgeT>::get_nnz; using Graph<IndexT, VertexT, EdgeT>::get_edges;//not confused by 2 versions of it... using Graph<IndexT, VertexT, EdgeT>::get_vertices; CsrGraph(void):Graph<IndexT, VertexT, EdgeT>() { } explicit CsrGraph(Graph<IndexT, VertexT, EdgeT>& g)://g must be non-const...why? Graph<IndexT, VertexT, EdgeT>(g.get_incidence()) //,get_edges()(g.get_edges()) //fails to compile in initialization list...why? { get_edges() = g.get_edges();//ok! get_vertices() = g.get_vertices(); to_csr(); } CsrGraph(const std::vector<EdgeT>& vals, const std::vector<IndexT>& row_ptr, const std::vector<IndexT>& col_ind, const std::vector<VertexT>& vertex_values): vals_(vals), row_ptr_(row_ptr), col_ind_(col_ind) { from_csr(vertex_values); } void from_csr(const std::vector<VertexT>& vertex_values) { ///size_t nnz = col_ind_.size(); size_t nrows = vertex_values.size(); get_nrows() = nrows; get_ncols() = nrows; get_incidence().assign(nrows,std::vector<IndexT>(nrows,IndexT(0))); get_vertices() = vertex_values; for(IndexT i=IndexT(0);i<IndexT(nrows);++i) { for(IndexT j=row_ptr_[i]; j<row_ptr_[i+1];++j) { IndexT k = col_ind_[j]; EdgeT v = vals_[j]; get_incidence()[i][k] = 1; get_edges().insert(std::make_pair(std::make_pair(i,k),v)); } } } void to_csr(void) { size_t nnz = get_nnz(); size_t nrows = get_nrows(); size_t ncols = get_ncols(); //const auto& edges = get_edges(); const std::map<std::pair<IndexT, IndexT>, EdgeT>& edges = get_edges(); vals_.assign(nnz,EdgeT()); row_ptr_.assign(nrows+1,IndexT(0)); row_ptr_[nrows] = IndexT(nnz); col_ind_.assign(nnz,IndexT(0)); const MatrixI& A = get_incidence(); IndexT crt_row_ptr_i(0); IndexT crt_nz_i(0); std::vector<IndexT> all_zeros; all_zeros.reserve(nrows); for(IndexT i=0;i<nrows;++i) { bool first_nz_inrow = true; for(IndexT j=0;j<ncols;++j) { if( A[i][j] != IndexT(0) ) { ///std::pair<IndexT,IndexT> key(i,j);//ok //std::pair<IndexT,IndexT> key = std::make_pair<IndexT,IndexT>(i, j);//fails...why??? //see: http://stackoverflow.com/questions/9641960/c11-make-pair-with-specified-template-parameters-doesnt-compile std::pair<IndexT,IndexT> key = std::make_pair(i, j); typename std::map<std::pair<IndexT, IndexT>, EdgeT>::const_iterator pos = edges.find(key); if (pos == edges.end()) { std::stringstream ss; ss << "ERROR: edge("<<i<<","<<j<<") not found."; throw std::runtime_error(ss.str()); } vals_[crt_nz_i] = pos->second; if (first_nz_inrow) { row_ptr_[crt_row_ptr_i] = crt_nz_i; first_nz_inrow = false; ++crt_row_ptr_i; } col_ind_[crt_nz_i] = j; ++crt_nz_i; }//end if }//end for j //special cases of a row with all zeros: mark it! if (first_nz_inrow) { all_zeros.push_back(i); } }//end for i //handle all zero row cases: fix_zero_rows(all_zeros, row_ptr_); } const std::vector<EdgeT>& get_vals(void) const { return vals_; } std::vector<EdgeT>& get_vals(void) { return vals_; } const std::vector<IndexT>& get_row_ptr(void) const { return row_ptr_; } std::vector<IndexT>& get_row_ptr(void) { return row_ptr_; } const std::vector<IndexT>& get_col_ind(void) const { return col_ind_; } std::vector<IndexT>& get_col_ind(void) { return col_ind_; } friend std::ostream& operator<<(std::ostream& os, const CsrGraph& g) { g.Graph<IndexT, VertexT, EdgeT>::print(os); g.print(os); return os; } void extract_subgraph(std::vector<IndexT>& vertexSubset, CsrGraph& subgraph) const { //check if vertexSubset is sorted increasingly: // if( std::adjacent_find(vertexSubset.begin(), vertexSubset.end(), std::greater<IndexT>()) != vertexSubset.end() )//not sorted in ascending order... { std::sort(vertexSubset.begin(), vertexSubset.end()); //#ifdef DEBUG_ std::copy(vertexSubset.begin(), vertexSubset.end(), std::ostream_iterator<IndexT>(std::cout,",")); std::cout<<std::endl; //#endif } //#ifdef DEBUG_ else { std::cout<<"was sorted...\n"; } //#endif std::vector<EdgeT>& vals_subg = subgraph.vals_; std::vector<IndexT>& row_ptr_subg = subgraph.row_ptr_; std::vector<IndexT>& col_ind_subg = subgraph.col_ind_; std::vector<IndexT> all_zeros; IndexT last_updated_pos(0); // size_t nrows_subg = vertexSubset.size(); row_ptr_subg.assign(nrows_subg+1, IndexT(0)); all_zeros.reserve(nrows_subg); IndexT nz_subg(0); for(IndexT i=IndexT(0);i<IndexT(nrows_subg);++i) { IndexT row_index = vertexSubset[i]; bool first_nz_inrow = true; for(IndexT j=row_ptr_[row_index]; j<row_ptr_[row_index+1];++j) { IndexT k = col_ind_[j]; if( std::binary_search(vertexSubset.begin(), vertexSubset.end(), k) )//in vertex subset! { vals_subg.push_back(vals_[j]); col_ind_subg.push_back(k); ++nz_subg; if( first_nz_inrow )//update row_ptr_subg { row_ptr_subg[i] = last_updated_pos; first_nz_inrow = false; } ++last_updated_pos; } }//end for(j;..) //special cases of a row with all zeros: mark it! if (first_nz_inrow) { all_zeros.push_back(i); } }//end for(i;...) assert( nz_subg == vals_subg.size() ); assert( nz_subg == col_ind_subg.size() ); //last entry in row_ptr_subg: row_ptr_subg.back() = nz_subg; //handle all zero row cases: fix_zero_rows(all_zeros, row_ptr_subg); remap_indices(vertexSubset, col_ind_subg); } protected: void print(std::ostream& os) const { os<<"vals: "; std::copy(vals_.begin(), vals_.end(), std::ostream_iterator<EdgeT>(os,",")); os<<"\n"; os<<"row_ptr: "; std::copy(row_ptr_.begin(), row_ptr_.end(), std::ostream_iterator<IndexT>(os,",")); os<<"\n"; os<<"col_ind: "; std::copy(col_ind_.begin(), col_ind_.end(), std::ostream_iterator<IndexT>(os,",")); os<<"\n"; } struct Updater { explicit Updater(std::vector<IndexT>& row_ptr): m_row_ptr(row_ptr) { } void operator()(const IndexT& i) { m_row_ptr[i] = m_row_ptr[i+1]; } private: std::vector<IndexT>& m_row_ptr; }; //correct row_ptr: iterate all_zeros from end towards beginning //and correct row_ptr_ at corresponding index // static void fix_zero_rows(const std::vector<IndexT>& all_zeros, std::vector<IndexT>& row_ptr) { Updater up(row_ptr); std::for_each(all_zeros.rbegin(), all_zeros.rend(), up); // std::for_each(all_zeros.rbegin(), all_zeros.rend(), [&](const IndexT& i){ // row_ptr[i] = row_ptr[i+1]; // }); } struct HashUpdater { explicit HashUpdater(std::vector<IndexT>& hash): m_hash(hash), m_counter(0) { } void operator()(const IndexT& i) { m_hash[i]=m_counter++; } private: std::vector<IndexT>& m_hash; IndexT m_counter; }; //assumes src is ordered increasingly // static void remap_indices(const std::vector<IndexT>& src, std::vector<IndexT>& index_set) { IndexT max_entry = src.back(); //use hash_src vector as hash-table: // std::vector<IndexT> hash_src(max_entry+1, IndexT(0)); ///std::iota(hash_src.begin(), hash_src.end(), IndexT(0));//increasing sequence HashUpdater hasher(hash_src); std::for_each(src.begin(), src.end(), hasher); // IndexT counter(0); // std::for_each(src.begin(), src.end(), [&](const IndexT& i){ // hash_src[i]=counter++; // }); size_t set_sz = index_set.size(); std::vector<IndexT> old_index_set(index_set); for(IndexT k = 0;k<set_sz;++k) { index_set[k] = hash_src[old_index_set[k]]; } } private: std::vector<EdgeT> vals_; std::vector<IndexT> row_ptr_; std::vector<IndexT> col_ind_; }; }//end namespace debug }//end namespace nvgraph #endif /* incidence_graph_hxx */
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/include/atomics.hxx
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ namespace nvgraph { //This file contains the atomic operations for floats and doubles from cusparse/src/cusparse_atomics.h static __inline__ __device__ double atomicFPAdd(double *addr, double val) { // atomicAdd for double starts with sm_60 #if __CUDA_ARCH__ >= 600 return atomicAdd( addr, val ); #else unsigned long long old = __double_as_longlong( addr[0] ), assumed; do { assumed = old; old = atomicCAS( (unsigned long long *) addr, assumed, __double_as_longlong( val + __longlong_as_double( assumed ) ) ); } while ( assumed != old ); return old; #endif } // atomicAdd for float starts with sm_20 static __inline__ __device__ float atomicFPAdd(float *addr, float val) { return atomicAdd( addr, val ); } static __inline__ __device__ double atomicFPMin(double *addr, double val) { double old, assumed; old=*addr; do{ assumed = old; old = __longlong_as_double(atomicCAS((unsigned long long int *)addr, __double_as_longlong(assumed), __double_as_longlong(min(val,assumed)))); } while (__double_as_longlong(assumed) != __double_as_longlong(old)); return old; } /* atomic addition: based on Nvidia Research atomic's tricks from cusparse */ static __inline__ __device__ float atomicFPMin(float *addr, float val) { float old, assumed; old=*addr; do{ assumed = old; old = int_as_float(atomicCAS((int *)addr, float_as_int(assumed),float_as_int(min(val,assumed)))); } while (float_as_int(assumed) != float_as_int(old)); return old; } static __inline__ __device__ double atomicFPMax(double *addr, double val) { double old, assumed; old=*addr; do{ assumed = old; old = __longlong_as_double(atomicCAS((unsigned long long int *)addr, __double_as_longlong(assumed), __double_as_longlong(max(val,assumed)))); } while (__double_as_longlong(assumed) != __double_as_longlong(old)); return old; } /* atomic addition: based on Nvidia Research atomic's tricks from cusparse */ static __inline__ __device__ float atomicFPMax(float *addr, float val) { float old, assumed; old=*addr; do{ assumed = old; old = int_as_float(atomicCAS((int *)addr, float_as_int(assumed),float_as_int(max(val,assumed)))); } while (float_as_int(assumed) != float_as_int(old)); return old; } static __inline__ __device__ double atomicFPOr(double *addr, double val) { double old, assumed; old=*addr; do{ assumed = old; old = __longlong_as_double(atomicCAS((unsigned long long int *)addr, __double_as_longlong(assumed), __double_as_longlong((bool)val | (bool)assumed))); } while (__double_as_longlong(assumed) != __double_as_longlong(old)); return old; } /* atomic addition: based on Nvidia Research atomic's tricks from cusparse */ static __inline__ __device__ float atomicFPOr(float *addr, float val) { float old, assumed; old=*addr; do{ assumed = old; old = int_as_float(atomicCAS((int *)addr, float_as_int(assumed),float_as_int((bool)val | (bool)assumed))); } while (float_as_int(assumed) != float_as_int(old)); return old; } static __inline__ __device__ double atomicFPLog(double *addr, double val) { double old, assumed; old=*addr; do{ assumed = old; old = __longlong_as_double(atomicCAS((unsigned long long int *)addr, __double_as_longlong(assumed), __double_as_longlong(-log(exp(-val)+exp(-assumed))))); } while (__double_as_longlong(assumed) != __double_as_longlong(old)); return old; } /* atomic addition: based on Nvidia Research atomic's tricks from cusparse */ static __inline__ __device__ float atomicFPLog(float *addr, float val) { float old, assumed; old=*addr; do{ assumed = old; old = int_as_float(atomicCAS((int *)addr, float_as_int(assumed),float_as_int(-logf(expf(-val)+expf(-assumed))))); } while (float_as_int(assumed) != float_as_int(old)); return old; } } //end anmespace nvgraph
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/include/nvgraphP.h
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * * * WARNING: this is a private header file, it should not be publically exposed. * * */ #pragma once #include "nvgraph.h" #include "cnmem.h" #if defined(__cplusplus) extern "C" { #endif /* Graph descriptor types */ typedef enum { IS_EMPTY = 0, //nothing HAS_TOPOLOGY = 1, //connectivity info HAS_VALUES = 2, //MultiValuedCSRGraph IS_2D = 3 } nvgraphGraphStatus_t; struct nvgraphContext { cudaStream_t stream; cnmemDevice_t cnmem_device; int nvgraphIsInitialized; }; struct nvgraphGraphDescr { nvgraphGraphStatus_t graphStatus; cudaDataType T; // This is the type of values for the graph nvgraphTopologyType_t TT; // The topology type (class to cast graph_handle pointer to) void* graph_handle; // Opaque pointer to the graph class object }; #if defined(__cplusplus) }//extern "C" #endif
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/include/lanczos.hxx
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include "nvgraph_error.hxx" #include "matrix.hxx" namespace nvgraph { /// Compute smallest eigenvectors of symmetric matrix /** Computes eigenvalues and eigenvectors that are least * positive. If matrix is positive definite or positive * semidefinite, the computed eigenvalues are smallest in * magnitude. * * The largest eigenvalue is estimated by performing several * Lanczos iterations. An implicitly restarted Lanczos method is * then applied to A+s*I, where s is negative the largest * eigenvalue. * * CNMEM must be initialized before calling this function. * * @param A Pointer to matrix object. * @param nEigVecs Number of eigenvectors to compute. * @param maxIter Maximum number of Lanczos steps. Does not include * Lanczos steps used to estimate largest eigenvalue. * @param restartIter Maximum size of Lanczos system before * performing an implicit restart. Should be at least 4. * @param tol Convergence tolerance. Lanczos iteration will * terminate when the residual norm is less than tol*theta, where * theta is an estimate for the smallest unwanted eigenvalue * (i.e. the (nEigVecs+1)th smallest eigenvalue). * @param reorthogonalize Whether to reorthogonalize Lanczos * vectors. * @param iter On exit, pointer to total number of Lanczos * iterations performed. Does not include Lanczos steps used to * estimate largest eigenvalue. * @param eigVals_dev (Output, device memory, nEigVecs entries) * Smallest eigenvalues of matrix. * @param eigVecs_dev (Output, device memory, n*nEigVecs entries) * Eigenvectors corresponding to smallest eigenvalues of * matrix. Vectors are stored as columns of a column-major matrix * with dimensions n x nEigVecs. * @return NVGRAPH error flag. */ template <typename IndexType_, typename ValueType_> NVGRAPH_ERROR computeSmallestEigenvectors(const Matrix<IndexType_,ValueType_> & A, IndexType_ nEigVecs, IndexType_ maxIter, IndexType_ restartIter, ValueType_ tol, bool reorthogonalize, IndexType_ & iter, ValueType_ * __restrict__ eigVals_dev, ValueType_ * __restrict__ eigVecs_dev); /// Compute smallest eigenvectors of symmetric matrix /** Computes eigenvalues and eigenvectors that are least * positive. If matrix is positive definite or positive * semidefinite, the computed eigenvalues are smallest in * magnitude. * * The largest eigenvalue is estimated by performing several * Lanczos iterations. An implicitly restarted Lanczos method is * then applied to A+s*I, where s is negative the largest * eigenvalue. * * @param A Pointer to matrix object. * @param nEigVecs Number of eigenvectors to compute. * @param maxIter Maximum number of Lanczos steps. Does not include * Lanczos steps used to estimate largest eigenvalue. * @param restartIter Maximum size of Lanczos system before * performing an implicit restart. Should be at least 4. * @param tol Convergence tolerance. Lanczos iteration will * terminate when the residual norm is less than tol*theta, where * theta is an estimate for the smallest unwanted eigenvalue * (i.e. the (nEigVecs+1)th smallest eigenvalue). * @param reorthogonalize Whether to reorthogonalize Lanczos * vectors. * @param iter On exit, pointer to final size of Lanczos system. * @param totalIter On exit, pointer to total number of Lanczos * iterations performed. Does not include Lanczos steps used to * estimate largest eigenvalue. * @param shift On exit, pointer to matrix shift. * @param alpha_host (Output, host memory, restartIter entries) * Diagonal entries of Lanczos system. * @param beta_host (Output, host memory, restartIter entries) * Off-diagonal entries of Lanczos system. * @param lanczosVecs_dev (Output, device memory, n*(restartIter+1) * entries) Lanczos vectors. Vectors are stored as columns of a * column-major matrix with dimensions n x (restartIter+1). * @param work_dev (Output, device memory, * (n+restartIter)*restartIter entries) Workspace. * @param eigVals_dev (Output, device memory, nEigVecs entries) * Smallest eigenvalues of matrix. * @param eigVecs_dev (Output, device memory, n*nEigVecs entries) * Eigenvectors corresponding to smallest eigenvalues of * matrix. Vectors are stored as columns of a column-major matrix * with dimensions n x nEigVecs. * @return NVGRAPH error flag. */ template <typename IndexType_, typename ValueType_> NVGRAPH_ERROR computeSmallestEigenvectors(const Matrix<IndexType_,ValueType_> * A, IndexType_ nEigVecs, IndexType_ maxIter, IndexType_ restartIter, ValueType_ tol, bool reorthogonalize, IndexType_ * iter, IndexType_ * totalIter, ValueType_ * shift, ValueType_ * __restrict__ alpha_host, ValueType_ * __restrict__ beta_host, ValueType_ * __restrict__ lanczosVecs_dev, ValueType_ * __restrict__ work_dev, ValueType_ * __restrict__ eigVals_dev, ValueType_ * __restrict__ eigVecs_dev); /// Compute largest eigenvectors of symmetric matrix /** Computes eigenvalues and eigenvectors that are least * positive. If matrix is positive definite or positive * semidefinite, the computed eigenvalues are largest in * magnitude. * * The largest eigenvalue is estimated by performing several * Lanczos iterations. An implicitly restarted Lanczos method is * then applied. * * @param A Matrix. * @param nEigVecs Number of eigenvectors to compute. * @param maxIter Maximum number of Lanczos steps. * @param restartIter Maximum size of Lanczos system before * performing an implicit restart. Should be at least 4. * @param tol Convergence tolerance. Lanczos iteration will * terminate when the residual norm is less than tol*theta, where * theta is an estimate for the largest unwanted eigenvalue * (i.e. the (nEigVecs+1)th largest eigenvalue). * @param reorthogonalize Whether to reorthogonalize Lanczos * vectors. * @param effIter On exit, pointer to final size of Lanczos system. * @param totalIter On exit, pointer to total number of Lanczos * iterations performed. * @param alpha_host (Output, host memory, restartIter entries) * Diagonal entries of Lanczos system. * @param beta_host (Output, host memory, restartIter entries) * Off-diagonal entries of Lanczos system. * @param lanczosVecs_dev (Output, device memory, n*(restartIter+1) * entries) Lanczos vectors. Vectors are stored as columns of a * column-major matrix with dimensions n x (restartIter+1). * @param work_dev (Output, device memory, * (n+restartIter)*restartIter entries) Workspace. * @param eigVals_dev (Output, device memory, nEigVecs entries) * Largest eigenvalues of matrix. * @param eigVecs_dev (Output, device memory, n*nEigVecs entries) * Eigenvectors corresponding to largest eigenvalues of * matrix. Vectors are stored as columns of a column-major matrix * with dimensions n x nEigVecs. * @return NVGRAPH error flag. */ template <typename IndexType_, typename ValueType_> NVGRAPH_ERROR computeLargestEigenvectors(const Matrix<IndexType_,ValueType_> * A, IndexType_ nEigVecs, IndexType_ maxIter, IndexType_ restartIter, ValueType_ tol, bool reorthogonalize, IndexType_ * effIter, IndexType_ * totalIter, ValueType_ * __restrict__ alpha_host, ValueType_ * __restrict__ beta_host, ValueType_ * __restrict__ lanczosVecs_dev, ValueType_ * __restrict__ work_dev, ValueType_ * __restrict__ eigVals_dev, ValueType_ * __restrict__ eigVecs_dev); /// Compute largest eigenvectors of symmetric matrix /** Computes eigenvalues and eigenvectors that are least * positive. If matrix is positive definite or positive * semidefinite, the computed eigenvalues are largest in * magnitude. * * The largest eigenvalue is estimated by performing several * Lanczos iterations. An implicitly restarted Lanczos method is * then applied to A+s*I, where s is negative the largest * eigenvalue. * * CNMEM must be initialized before calling this function. * * @param A Matrix. * @param nEigVecs Number of eigenvectors to compute. * @param maxIter Maximum number of Lanczos steps. Does not include * Lanczos steps used to estimate largest eigenvalue. * @param restartIter Maximum size of Lanczos system before * performing an implicit restart. Should be at least 4. * @param tol Convergence tolerance. Lanczos iteration will * terminate when the residual norm is less than tol*theta, where * theta is an estimate for the largest unwanted eigenvalue * (i.e. the (nEigVecs+1)th largest eigenvalue). * @param reorthogonalize Whether to reorthogonalize Lanczos * vectors. * @param iter On exit, pointer to total number of Lanczos * iterations performed. Does not include Lanczos steps used to * estimate largest eigenvalue. * @param eigVals_dev (Output, device memory, nEigVecs entries) * Largest eigenvalues of matrix. * @param eigVecs_dev (Output, device memory, n*nEigVecs entries) * Eigenvectors corresponding to largest eigenvalues of * matrix. Vectors are stored as columns of a column-major matrix * with dimensions n x nEigVecs. * @return NVGRAPH error flag. */ template <typename IndexType_, typename ValueType_> NVGRAPH_ERROR computeLargestEigenvectors(const Matrix<IndexType_,ValueType_> & A, IndexType_ nEigVecs, IndexType_ maxIter, IndexType_ restartIter, ValueType_ tol, bool reorthogonalize, IndexType_ & iter, ValueType_ * __restrict__ eigVals_dev, ValueType_ * __restrict__ eigVecs_dev); }
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/include/nvgraph.h
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef _NVGRAPH_H_ #define _NVGRAPH_H_ #include "stddef.h" #include "stdint.h" #include "library_types.h" #define NVG_CUDA_TRY(T) {\ if (T != cudaSuccess)\ return NVGRAPH_STATUS_ALLOC_FAILED;\ } #ifndef NVGRAPH_API #ifdef _WIN32 #define NVGRAPH_API __stdcall #else #define NVGRAPH_API #endif #endif #ifdef __cplusplus extern "C" { #endif /* nvGRAPH status type returns */ typedef enum { NVGRAPH_STATUS_SUCCESS = 0, NVGRAPH_STATUS_NOT_INITIALIZED = 1, NVGRAPH_STATUS_ALLOC_FAILED = 2, NVGRAPH_STATUS_INVALID_VALUE = 3, NVGRAPH_STATUS_ARCH_MISMATCH = 4, NVGRAPH_STATUS_MAPPING_ERROR = 5, NVGRAPH_STATUS_EXECUTION_FAILED = 6, NVGRAPH_STATUS_INTERNAL_ERROR = 7, NVGRAPH_STATUS_TYPE_NOT_SUPPORTED = 8, NVGRAPH_STATUS_NOT_CONVERGED = 9, NVGRAPH_STATUS_GRAPH_TYPE_NOT_SUPPORTED = 10 } nvgraphStatus_t; const char* nvgraphStatusGetString(nvgraphStatus_t status); /* Opaque structure holding nvGRAPH library context */ struct nvgraphContext; typedef struct nvgraphContext *nvgraphHandle_t; /* Opaque structure holding the graph descriptor */ struct nvgraphGraphDescr; typedef struct nvgraphGraphDescr *nvgraphGraphDescr_t; /* Semi-ring types */ typedef enum { NVGRAPH_PLUS_TIMES_SR = 0, NVGRAPH_MIN_PLUS_SR = 1, NVGRAPH_MAX_MIN_SR = 2, NVGRAPH_OR_AND_SR = 3, } nvgraphSemiring_t; /* Topology types */ typedef enum { NVGRAPH_CSR_32 = 0, NVGRAPH_CSC_32 = 1, NVGRAPH_COO_32 = 2, NVGRAPH_2D_32I_32I = 3, NVGRAPH_2D_64I_32I = 4 } nvgraphTopologyType_t; typedef enum { NVGRAPH_DEFAULT = 0, // Default is unsorted. NVGRAPH_UNSORTED = 1, // NVGRAPH_SORTED_BY_SOURCE = 2, // CSR NVGRAPH_SORTED_BY_DESTINATION = 3 // CSC } nvgraphTag_t; typedef enum { NVGRAPH_MULTIPLY = 0, NVGRAPH_SUM = 1, NVGRAPH_MIN = 2, NVGRAPH_MAX = 3 } nvgraphSemiringOps_t; typedef enum { NVGRAPH_MODULARITY_MAXIMIZATION = 0, //maximize modularity with Lanczos solver NVGRAPH_BALANCED_CUT_LANCZOS = 1, //minimize balanced cut with Lanczos solver NVGRAPH_BALANCED_CUT_LOBPCG = 2 //minimize balanced cut with LOPCG solver } nvgraphSpectralClusteringType_t; struct SpectralClusteringParameter { int n_clusters; //number of clusters int n_eig_vects; // //number of eigenvectors nvgraphSpectralClusteringType_t algorithm; // algorithm to use float evs_tolerance; // tolerance of the eigensolver int evs_max_iter; // maximum number of iterations of the eigensolver float kmean_tolerance; // tolerance of kmeans int kmean_max_iter; // maximum number of iterations of kemeans void * opt; // optional parameter that can be used for preconditioning in the future }; typedef enum { NVGRAPH_MODULARITY, // clustering score telling how good the clustering is compared to random assignment. NVGRAPH_EDGE_CUT, // total number of edges between clusters. NVGRAPH_RATIO_CUT // sum for all clusters of the number of edges going outside of the cluster divided by the number of vertex inside the cluster } nvgraphClusteringMetric_t; struct nvgraphCSRTopology32I_st { int nvertices; // n+1 int nedges; // nnz int *source_offsets; // rowPtr int *destination_indices; // colInd }; typedef struct nvgraphCSRTopology32I_st *nvgraphCSRTopology32I_t; struct nvgraphCSCTopology32I_st { int nvertices; // n+1 int nedges; // nnz int *destination_offsets; // colPtr int *source_indices; // rowInd }; typedef struct nvgraphCSCTopology32I_st *nvgraphCSCTopology32I_t; struct nvgraphCOOTopology32I_st { int nvertices; // n+1 int nedges; // nnz int *source_indices; // rowInd int *destination_indices; // colInd nvgraphTag_t tag; }; typedef struct nvgraphCOOTopology32I_st *nvgraphCOOTopology32I_t; struct nvgraph2dCOOTopology32I_st { int nvertices; int nedges; int *source_indices; // Row Indices int *destination_indices; // Column Indices cudaDataType_t valueType; // The type of values being given. void *values; // Pointer to array of values. int numDevices; // Gives the number of devices to be used. int *devices; // Array of device IDs to use. int blockN; // Specifies the value of n for an n x n matrix decomposition. nvgraphTag_t tag; }; typedef struct nvgraph2dCOOTopology32I_st *nvgraph2dCOOTopology32I_t; /* Return properties values for the nvGraph library, such as library version */ nvgraphStatus_t NVGRAPH_API nvgraphGetProperty(libraryPropertyType type, int *value); /* Open the library and create the handle */ nvgraphStatus_t NVGRAPH_API nvgraphCreate(nvgraphHandle_t *handle); nvgraphStatus_t NVGRAPH_API nvgraphCreateMulti( nvgraphHandle_t *handle, int numDevices, int* devices); /* Close the library and destroy the handle */ nvgraphStatus_t NVGRAPH_API nvgraphDestroy(nvgraphHandle_t handle); /* Create an empty graph descriptor */ nvgraphStatus_t NVGRAPH_API nvgraphCreateGraphDescr( nvgraphHandle_t handle, nvgraphGraphDescr_t *descrG); /* Destroy a graph descriptor */ nvgraphStatus_t NVGRAPH_API nvgraphDestroyGraphDescr( nvgraphHandle_t handle, nvgraphGraphDescr_t descrG); /* Set size, topology data in the graph descriptor */ nvgraphStatus_t NVGRAPH_API nvgraphSetGraphStructure( nvgraphHandle_t handle, nvgraphGraphDescr_t descrG, void* topologyData, nvgraphTopologyType_t TType); /* Query size and topology information from the graph descriptor */ nvgraphStatus_t NVGRAPH_API nvgraphGetGraphStructure( nvgraphHandle_t handle, nvgraphGraphDescr_t descrG, void* topologyData, nvgraphTopologyType_t* TType); /* Allocate numsets vectors of size V representing Vertex Data and attached them the graph. * settypes[i] is the type of vector #i, currently all Vertex and Edge data should have the same type */ nvgraphStatus_t NVGRAPH_API nvgraphAllocateVertexData(nvgraphHandle_t handle, nvgraphGraphDescr_t descrG, size_t numsets, cudaDataType_t *settypes); /* Allocate numsets vectors of size E representing Edge Data and attached them the graph. * settypes[i] is the type of vector #i, currently all Vertex and Edge data should have the same type */ nvgraphStatus_t NVGRAPH_API nvgraphAllocateEdgeData( nvgraphHandle_t handle, nvgraphGraphDescr_t descrG, size_t numsets, cudaDataType_t *settypes); /* Update the vertex set #setnum with the data in *vertexData, sets have 0-based index * Conversions are not supported so nvgraphTopologyType_t should match the graph structure */ nvgraphStatus_t NVGRAPH_API nvgraphSetVertexData( nvgraphHandle_t handle, nvgraphGraphDescr_t descrG, void *vertexData, size_t setnum); /* Copy the edge set #setnum in *edgeData, sets have 0-based index * Conversions are not supported so nvgraphTopologyType_t should match the graph structure */ nvgraphStatus_t NVGRAPH_API nvgraphGetVertexData( nvgraphHandle_t handle, nvgraphGraphDescr_t descrG, void *vertexData, size_t setnum); /* Convert the edge data to another topology */ nvgraphStatus_t NVGRAPH_API nvgraphConvertTopology(nvgraphHandle_t handle, nvgraphTopologyType_t srcTType, void *srcTopology, void *srcEdgeData, cudaDataType_t *dataType, nvgraphTopologyType_t dstTType, void *dstTopology, void *dstEdgeData); /* Update the edge set #setnum with the data in *edgeData, sets have 0-based index */ nvgraphStatus_t NVGRAPH_API nvgraphSetEdgeData( nvgraphHandle_t handle, nvgraphGraphDescr_t descrG, void *edgeData, size_t setnum); /* Copy the edge set #setnum in *edgeData, sets have 0-based index */ nvgraphStatus_t NVGRAPH_API nvgraphGetEdgeData( nvgraphHandle_t handle, nvgraphGraphDescr_t descrG, void *edgeData, size_t setnum); /* create a new graph by extracting a subgraph given a list of vertices */ nvgraphStatus_t NVGRAPH_API nvgraphExtractSubgraphByVertex( nvgraphHandle_t handle, nvgraphGraphDescr_t descrG, nvgraphGraphDescr_t subdescrG, int *subvertices, size_t numvertices); /* create a new graph by extracting a subgraph given a list of edges */ nvgraphStatus_t NVGRAPH_API nvgraphExtractSubgraphByEdge(nvgraphHandle_t handle, nvgraphGraphDescr_t descrG, nvgraphGraphDescr_t subdescrG, int *subedges, size_t numedges); /* nvGRAPH Semi-ring sparse matrix vector multiplication */ nvgraphStatus_t NVGRAPH_API nvgraphSrSpmv(nvgraphHandle_t handle, const nvgraphGraphDescr_t descrG, const size_t weight_index, const void *alpha, const size_t x_index, const void *beta, const size_t y_index, const nvgraphSemiring_t SR); /* Helper struct for Traversal parameters */ typedef struct { size_t pad[128]; } nvgraphTraversalParameter_t; /* Initializes traversal parameters with default values */ nvgraphStatus_t NVGRAPH_API nvgraphTraversalParameterInit(nvgraphTraversalParameter_t *param); /* Stores/retrieves index of a vertex data where target distances will be stored */ nvgraphStatus_t NVGRAPH_API nvgraphTraversalSetDistancesIndex( nvgraphTraversalParameter_t *param, const size_t value); nvgraphStatus_t NVGRAPH_API nvgraphTraversalGetDistancesIndex( const nvgraphTraversalParameter_t param, size_t *value); /* Stores/retrieves index of a vertex data where path predecessors will be stored */ nvgraphStatus_t NVGRAPH_API nvgraphTraversalSetPredecessorsIndex( nvgraphTraversalParameter_t *param, const size_t value); nvgraphStatus_t NVGRAPH_API nvgraphTraversalGetPredecessorsIndex( const nvgraphTraversalParameter_t param, size_t *value); /* Stores/retrieves index of an edge data which tells traversal algorithm whether path can go through an edge or not */ nvgraphStatus_t NVGRAPH_API nvgraphTraversalSetEdgeMaskIndex( nvgraphTraversalParameter_t *param, const size_t value); nvgraphStatus_t NVGRAPH_API nvgraphTraversalGetEdgeMaskIndex( const nvgraphTraversalParameter_t param, size_t *value); /* Stores/retrieves flag that tells an algorithm whether the graph is directed or not */ nvgraphStatus_t NVGRAPH_API nvgraphTraversalSetUndirectedFlag( nvgraphTraversalParameter_t *param, const size_t value); nvgraphStatus_t NVGRAPH_API nvgraphTraversalGetUndirectedFlag( const nvgraphTraversalParameter_t param, size_t *value); /* Stores/retrieves 'alpha' and 'beta' parameters for BFS traversal algorithm */ nvgraphStatus_t NVGRAPH_API nvgraphTraversalSetAlpha( nvgraphTraversalParameter_t *param, const size_t value); nvgraphStatus_t NVGRAPH_API nvgraphTraversalGetAlpha( const nvgraphTraversalParameter_t param, size_t *value); nvgraphStatus_t NVGRAPH_API nvgraphTraversalSetBeta( nvgraphTraversalParameter_t *param, const size_t value); nvgraphStatus_t NVGRAPH_API nvgraphTraversalGetBeta( const nvgraphTraversalParameter_t param, size_t *value); //Traversal available typedef enum { NVGRAPH_TRAVERSAL_BFS = 0 } nvgraphTraversal_t; /* nvGRAPH Traversal API * Compute a traversal of the graph from a single vertex using algorithm specified by traversalT parameter */ nvgraphStatus_t NVGRAPH_API nvgraphTraversal(nvgraphHandle_t handle, const nvgraphGraphDescr_t descrG, const nvgraphTraversal_t traversalT, const int *source_vert, const nvgraphTraversalParameter_t params); /** * CAPI Method for calling 2d BFS algorithm. * @param handle Nvgraph context handle. * @param descrG Graph handle (must be 2D partitioned) * @param source_vert The source vertex ID * @param distances Pointer to memory allocated to store the distances. * @param predecessors Pointer to memory allocated to store the predecessors * @return Status code. */ nvgraphStatus_t NVGRAPH_API nvgraph2dBfs( nvgraphHandle_t handle, const nvgraphGraphDescr_t descrG, const int32_t source_vert, int32_t* distances, int32_t* predecessors); /* nvGRAPH Single Source Shortest Path (SSSP) * Calculate the shortest path distance from a single vertex in the graph to all other vertices. */ nvgraphStatus_t NVGRAPH_API nvgraphSssp(nvgraphHandle_t handle, const nvgraphGraphDescr_t descrG, const size_t weight_index, const int *source_vert, const size_t sssp_index); /* nvGRAPH WidestPath * Find widest path potential from source_index to every other vertices. */ nvgraphStatus_t NVGRAPH_API nvgraphWidestPath(nvgraphHandle_t handle, const nvgraphGraphDescr_t descrG, const size_t weight_index, const int *source_vert, const size_t widest_path_index); /* nvGRAPH PageRank * Find PageRank for each vertex of a graph with a given transition probabilities, a bookmark vector of dangling vertices, and the damping factor. */ nvgraphStatus_t NVGRAPH_API nvgraphPagerank(nvgraphHandle_t handle, const nvgraphGraphDescr_t descrG, const size_t weight_index, const void *alpha, const size_t bookmark_index, const int has_guess, const size_t pagerank_index, const float tolerance, const int max_iter); /* nvGRAPH contraction * given array of agregates contract graph with * given (Combine, Reduce) operators for Vertex Set * and Edge Set; */ nvgraphStatus_t NVGRAPH_API nvgraphContractGraph(nvgraphHandle_t handle, nvgraphGraphDescr_t descrG, nvgraphGraphDescr_t contrdescrG, int *aggregates, size_t numaggregates, nvgraphSemiringOps_t VertexCombineOp, nvgraphSemiringOps_t VertexReduceOp, nvgraphSemiringOps_t EdgeCombineOp, nvgraphSemiringOps_t EdgeReduceOp, int flag); /* nvGRAPH spectral clustering * given a graph and solver parameters of struct SpectralClusteringParameter, * assign vertices to groups such as * intra-group connections are strong and/or inter-groups connections are weak * using spectral technique. */ nvgraphStatus_t NVGRAPH_API nvgraphSpectralClustering(nvgraphHandle_t handle, const nvgraphGraphDescr_t graph_descr, const size_t weight_index, const struct SpectralClusteringParameter *params, int* clustering, void* eig_vals, void* eig_vects); /* nvGRAPH analyze clustering * Given a graph, a clustering, and a metric * compute the score that measures the clustering quality according to the metric. */ nvgraphStatus_t NVGRAPH_API nvgraphAnalyzeClustering(nvgraphHandle_t handle, const nvgraphGraphDescr_t graph_descr, const size_t weight_index, const int n_clusters, const int* clustering, nvgraphClusteringMetric_t metric, float * score); /* nvGRAPH Triangles counting * count number of triangles (cycles of size 3) formed by graph edges */ nvgraphStatus_t NVGRAPH_API nvgraphTriangleCount(nvgraphHandle_t handle, const nvgraphGraphDescr_t graph_descr, uint64_t* result); /* nvGRAPH Louvain implementation */ nvgraphStatus_t NVGRAPH_API nvgraphLouvain ( cudaDataType_t index_type, cudaDataType_t val_type, const size_t num_vertex, const size_t num_edges, void* csr_ptr, void* csr_ind, void* csr_val, int weighted, int has_init_cluster, void* init_cluster, void* final_modularity, void* best_cluster_vec, void* num_level); /* nvGRAPH Jaccard implementation */ nvgraphStatus_t NVGRAPH_API nvgraphJaccard ( cudaDataType_t index_type, cudaDataType_t val_type, const size_t n, const size_t e, void* csr_ptr, void *csr_ind, void* csr_val, int weighted, void* v, void* gamma, void* weight_j); /* nvGRAPH attach structure * Warp external device data into a nvgraphGraphDescr_t * Warning : this data remain owned by the user */ nvgraphStatus_t NVGRAPH_API nvgraphAttachGraphStructure(nvgraphHandle_t handle, nvgraphGraphDescr_t descrG, void* topologyData, nvgraphTopologyType_t TT); /* nvGRAPH attach Vertex Data * Warp external device data into a vertex dim * Warning : this data remain owned by the user */ nvgraphStatus_t NVGRAPH_API nvgraphAttachVertexData(nvgraphHandle_t handle, nvgraphGraphDescr_t descrG, size_t setnum, cudaDataType_t settype, void *vertexData); /* nvGRAPH attach Edge Data * Warp external device data into an edge dim * Warning : this data remain owned by the user */ nvgraphStatus_t NVGRAPH_API nvgraphAttachEdgeData(nvgraphHandle_t handle, nvgraphGraphDescr_t descrG, size_t setnum, cudaDataType_t settype, void *edgeData); #if defined(__cplusplus) } /* extern "C" */ #endif #endif /* _NVGRAPH_H_ */
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/include/bfs2d_kernels.cuh
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cub/cub.cuh> #include "nvgraph_error.hxx" #define MAXBLOCKS 65535 #define WARP_SIZE 32 #define INT_SIZE 32 #define FILL_QUEUE_DIMX 256 #define COMPUTE_BUCKET_OFFSETS_DIMX 512 #define TOP_DOWN_EXPAND_DIMX 256 #define TOP_DOWN_BUCKET_SIZE 32 #define NBUCKETS_PER_BLOCK (TOP_DOWN_EXPAND_DIMX/TOP_DOWN_BUCKET_SIZE) #define TOP_DOWN_BATCH_SIZE 2 #define MAX_ITEMS_PER_THREAD_PER_OFFSETS_LOAD (TOP_DOWN_BUCKET_SIZE - 1) using namespace nvgraph; namespace bfs_kernels { struct popCount : public thrust::unary_function<int,int> { __device__ int operator()(int x) const { return __popc(x); } }; template<typename > struct vec_t { typedef int4 vec4; typedef int2 vec2; }; template<> struct vec_t<int> { typedef int4 vec4; typedef int2 vec2; static const int max = INT_MAX; }; template<> struct vec_t<long long int> { typedef longlong4 vec4; typedef longlong2 vec2; static const long long int max = LLONG_MAX; }; struct BitwiseOr { template<typename T> __host__ __device__ __forceinline__ T operator()(const T &a, const T &b) const { return (a | b); } }; struct predMerge { template<typename T> __host__ __device__ __forceinline__ T operator()(const T &a, const T &b) const { if (a != -1 && b != -1) return min(a, b); if (a != -1) return a; if (b != -1) return b; return -1; } }; __forceinline__ __device__ int getMaskNRightmostBitSet(int n) { if (n == INT_SIZE) return (~0); int mask = (1 << n) - 1; return mask; } __forceinline__ __device__ int getMaskNLeftmostBitSet(int n) { if (n == 0) return 0; int mask = ~((1 << (INT_SIZE - n)) - 1); return mask; } /** * Finds the position of the next non-zero bit in the given value. The value is * re-written with the found bit unset. * @param val The integer to find the next non-zero bit in. * @return The position of the next non-zero bit */ __forceinline__ __device__ int getNextNonZeroBit(int32_t& val) { int ibit = __ffs(val) - 1; val &= ~(1 << ibit); return ibit; } template<typename IndexType> __device__ IndexType binsearch_maxle(const IndexType *vec, const IndexType val, IndexType low, IndexType high) { while (true) { if (low == high) return low; //we know it exists if ((low + 1) == high) return (vec[high] <= val) ? high : low; IndexType mid = low + (high - low) / 2; if (vec[mid] > val) high = mid - 1; else low = mid; } } template<typename IndexType> class degreeIterator: public std::iterator<std::input_iterator_tag, IndexType, size_t, IndexType*, IndexType> { IndexType* offsets; size_t pos; public: __host__ __device__ degreeIterator(IndexType* _offsets) : offsets(_offsets), pos(0) { } __host__ __device__ degreeIterator(IndexType* _offsets, size_t _pos) : offsets(_offsets), pos(_pos) { } __host__ __device__ IndexType operator[](int loc) { return offsets[loc + 1] - offsets[loc]; } __host__ __device__ IndexType operator*() { return offsets[pos + 1] - offsets[pos]; } __host__ __device__ degreeIterator operator+(int inc) { degreeIterator it(offsets, pos + inc); return it; } }; template<typename IndexType> size_t getCubExclusiveSumStorageSize(IndexType n) { void* d_temp_storage = NULL; size_t temp_storage_bytes = 0; IndexType *d_in = NULL, *d_out = NULL; cub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes, d_in, d_out, n); return temp_storage_bytes; } template<typename IndexType> size_t getCubSelectFlaggedStorageSize(IndexType n) { void* d_temp_storage = NULL; size_t temp_storage_bytes = 0; IndexType *d_in = NULL, *d_out = NULL, *size_out = NULL; degreeIterator<IndexType> degreeIt(NULL); cub::DeviceSelect::Flagged(d_temp_storage, temp_storage_bytes, d_in, degreeIt, d_out, size_out, n); return temp_storage_bytes; } /** * Takes in the bitmap frontier and outputs the frontier as a queue of ids. * @param bmap Pointer to the bitmap * @param bmap_nints The number of ints used to store the bitmap * @param n The number of bits in the bitmap * @param outputQueue Pointer to the output queue * @param output_cnt Pointer to counter for output size */ template<typename IndexType> __global__ void convert_bitmap_to_queue_kernel(int32_t *bmap, IndexType bmap_nints, IndexType n, IndexType *outputQueue, IndexType *output_cnt) { typedef cub::BlockScan<int, FILL_QUEUE_DIMX> BlockScan; __shared__ typename BlockScan::TempStorage scan_temp_storage; // When filling the output queue, we use output_cnt to know where to write in the queue // (equivalent of int off = atomicAddd(unvisited_cnt, 1)) We will actually do only one // atomicAdd per block - we first do a scan, then call one atomicAdd, and store the common // offset for the block in common_block_offset __shared__ IndexType common_block_offset; // We don't want threads divergence in the loop (we're going to call __syncthreads) // Using a block-only dependent in the condition of the loop for (IndexType block_v_idx = blockIdx.x * blockDim.x; block_v_idx < bmap_nints; block_v_idx += blockDim.x * gridDim.x) { // Index of bmap that this thread will compute IndexType v_idx = block_v_idx + threadIdx.x; int thread_int = (v_idx < bmap_nints) ? bmap[v_idx] : 0; // The last int can be only partially valid // If we are indeed taking care of the last int in this thread, // We need to first disable the inactive bits (vertices >= n) if (v_idx == (bmap_nints - 1)) { int active_bits = n - (INT_SIZE * v_idx); int inactive_bits = INT_SIZE - active_bits; int mask = getMaskNLeftmostBitSet(inactive_bits); thread_int &= (~mask); } //Counting number of set bits in this int int n_in_int = __popc(thread_int); int thread_offset; // We will need to write n_unvisited_in_int unvisited vertices to the unvisited queue // We ask for that space when computing the block scan, that will tell where to write those // vertices in the queue, using the common offset of the block (see below) BlockScan(scan_temp_storage).ExclusiveSum(n_in_int, thread_offset); // Last thread knows how many vertices will be written to the queue by this block // Asking for that space in the queue using the global count, and saving the common offset if (threadIdx.x == (FILL_QUEUE_DIMX - 1)) { IndexType total = thread_offset + n_in_int; common_block_offset = atomicAdd(output_cnt, total); } // syncthreads for two reasons : // - we need to broadcast common_block_offset // - we will reuse scan_temp_storage (cf CUB doc) __syncthreads(); IndexType current_index = common_block_offset + thread_offset; int nvertices_to_write = n_in_int; // getNextNonZeroBit uses __ffs, which gives least significant bit set // which means that as long as n_unvisited_in_int is valid, // we will use valid bits while (nvertices_to_write > 0) { if (nvertices_to_write >= 4 && (current_index % 4) == 0) { typename vec_t<IndexType>::vec4 vec_v; vec_v.x = v_idx * INT_SIZE + getNextNonZeroBit(thread_int); vec_v.y = v_idx * INT_SIZE + getNextNonZeroBit(thread_int); vec_v.z = v_idx * INT_SIZE + getNextNonZeroBit(thread_int); vec_v.w = v_idx * INT_SIZE + getNextNonZeroBit(thread_int); typename vec_t<IndexType>::vec4 *unvisited_i4 = reinterpret_cast<typename vec_t< IndexType>::vec4*>(&outputQueue[current_index]); *unvisited_i4 = vec_v; current_index += 4; nvertices_to_write -= 4; } else if (nvertices_to_write >= 2 && (current_index % 2) == 0) { typename vec_t<IndexType>::vec2 vec_v; vec_v.x = v_idx * INT_SIZE + getNextNonZeroBit(thread_int); vec_v.y = v_idx * INT_SIZE + getNextNonZeroBit(thread_int); typename vec_t<IndexType>::vec2 *unvisited_i2 = reinterpret_cast<typename vec_t< IndexType>::vec2*>(&outputQueue[current_index]); *unvisited_i2 = vec_v; current_index += 2; nvertices_to_write -= 2; } else { IndexType v = v_idx * INT_SIZE + getNextNonZeroBit(thread_int); outputQueue[current_index] = v; current_index += 1; nvertices_to_write -= 1; } } } } template<typename IndexType> void convert_bitmap_to_queue(int32_t *bmap, IndexType bmap_nints, IndexType n, IndexType *outputQueue, IndexType *output_cnt, cudaStream_t stream) { dim3 grid, block; block.x = FILL_QUEUE_DIMX; grid.x = min((IndexType) MAXBLOCKS, (bmap_nints + block.x - 1) / block.x); convert_bitmap_to_queue_kernel<<<grid, block, 0, stream>>>(bmap, bmap_nints, n, outputQueue, output_cnt); cudaCheckError() ; } /** * Kernel to compute bucket offsets for load balancing main top-down expand kernel * @param frontier_degrees_exclusive_sum Exclusive sum of the local degrees of the frontier * elements. * @param bucket_offsets Output location for the bucket offsets. * @param frontier_size Number of elements in the frontier. * @param total_degree Total local degree of frontier elements. */ template<typename IndexType> __global__ void compute_bucket_offsets_kernel(const IndexType *frontier_degrees_exclusive_sum, IndexType *bucket_offsets, const IndexType frontier_size, IndexType total_degree) { IndexType end = ((total_degree - 1 + TOP_DOWN_EXPAND_DIMX) / TOP_DOWN_EXPAND_DIMX * NBUCKETS_PER_BLOCK + 1); for (IndexType bid = blockIdx.x * blockDim.x + threadIdx.x; bid <= end; bid += gridDim.x * blockDim.x) { IndexType eid = min(bid * TOP_DOWN_BUCKET_SIZE, total_degree - 1); bucket_offsets[bid] = binsearch_maxle(frontier_degrees_exclusive_sum, eid, (IndexType) 0, frontier_size - 1); } } /** * Wrapper function around compute_bucket_offsets_kernel. * @param cumul Exclusive sum of the local degrees of the frontier elements. * @param bucket_offsets Output location for the bucket offsets. * @param frontier_size Number of elements in the frontier. * @param total_degree Total local degree of frontier elements. * @param m_stream Stream to use for execution. */ template<typename IndexType> void compute_bucket_offsets(IndexType *cumul, IndexType *bucket_offsets, IndexType frontier_size, IndexType total_degree, cudaStream_t m_stream) { dim3 grid, block; block.x = COMPUTE_BUCKET_OFFSETS_DIMX; grid.x = min((IndexType) MAXBLOCKS, ((total_degree - 1 + TOP_DOWN_EXPAND_DIMX) / TOP_DOWN_EXPAND_DIMX * NBUCKETS_PER_BLOCK + 1 + block.x - 1) / block.x); compute_bucket_offsets_kernel<<<grid, block, 0, m_stream>>>(cumul, bucket_offsets, frontier_size, total_degree); cudaCheckError(); } /** * Kernel for setting the degree of each frontier element. * @param frontier_degree Output to store frontier degrees. * @param frontier The frontier elements. * @param degreeIt Iterator providing the degree of a given vertex ID * @param n The number of elements in the frontier. */ template<typename IndexType, typename InputIterator> __global__ void set_frontier_degree_kernel(IndexType *frontier_degree, IndexType *frontier, InputIterator degreeIt, IndexType n) { for (IndexType idx = blockDim.x * blockIdx.x + threadIdx.x; idx < n; idx += gridDim.x * blockDim.x) { IndexType u = frontier[idx]; frontier_degree[idx] = degreeIt[u]; } } /** * Wrapper function for calling set_frontier_degree_kernel * @param frontier_degree Output to store frontier degrees. * @param frontier The frontier elements. * @param degreeIt Iterator providing the degree of a given vertex ID. * @param n The number of elements in the frontier. * @param m_stream The stream to use for the kernel call. */ template<typename IndexType, typename InputIterator> void set_frontier_degree(IndexType *frontier_degree, IndexType *frontier, InputIterator degreeIt, IndexType n, cudaStream_t m_stream) { dim3 grid, block; block.x = 256; grid.x = min((n + block.x - 1) / block.x, (IndexType) MAXBLOCKS); set_frontier_degree_kernel<<<grid, block, 0, m_stream>>>(frontier_degree, frontier, degreeIt, n); cudaCheckError(); } /** * Kernel for setting the degree of each frontier element. * @param frontier_degree Output to store frontier degrees. * @param frontier The frontier elements. * @param degreeIt Iterator providing the degree of a given vertex ID * @param n The number of elements in the frontier. */ template<typename IndexType, typename InputIterator> __global__ void set_degree_flags_kernel(int8_t *degree_flags, IndexType *frontier, InputIterator degreeIt, IndexType n) { for (IndexType idx = blockDim.x * blockIdx.x + threadIdx.x; idx < n; idx += gridDim.x * blockDim.x) { IndexType u = frontier[idx]; degree_flags[idx] = (degreeIt[u] == 0) ? 0 : 1; } } /** * Wrapper function for calling set_frontier_degree_kernel * @param frontier_degree Output to store frontier degrees. * @param frontier The frontier elements. * @param degreeIt Iterator providing the degree of a given vertex ID. * @param n The number of elements in the frontier. * @param m_stream The stream to use for the kernel call. */ template<typename IndexType, typename InputIterator> void set_degree_flags(int8_t *degree_flags, IndexType *frontier, InputIterator degreeIt, IndexType n, cudaStream_t m_stream) { dim3 grid, block; block.x = 256; grid.x = min((n + block.x - 1) / block.x, (IndexType) MAXBLOCKS); set_degree_flags_kernel<<<grid, block, 0, m_stream>>>(degree_flags, frontier, degreeIt, n); cudaCheckError(); } /** * Kernel for globalizing an array of ids using a given offset. Values of -1 remain * unchanged, other values are incremented by the offset. * @param ids The array of ids to globalize (input and output) * @param offset The offset to be applied to each id. * @param n The number of ids in the array. */ template<typename IndexType> __global__ void globalize_ids_kernel(IndexType *ids, IndexType offset, IndexType n) { for (IndexType idx = blockDim.x * blockIdx.x + threadIdx.x; idx < n; idx += gridDim.x * blockDim.x) { IndexType id = ids[idx]; ids[idx] = (id == -1) ? -1 : id + offset; } } /** * Wrapper function for calling globalize_ids_kernel * @param ids The array of ids to globalize (input and output) * @param offset The offset to be applied to each id. * @param n The number of ids in the array. * @param m_stream The stream to use for the kernel call. */ template<typename IndexType> void globalize_ids(IndexType *ids, IndexType offset, IndexType n, cudaStream_t m_stream) { dim3 grid, block; block.x = 256; grid.x = min((n + block.x - 1) / block.x, (IndexType) MAXBLOCKS); globalize_ids_kernel<<<grid, block, 0, m_stream>>>(ids, offset, n); cudaCheckError(); } template<typename IndexType, typename GlobalType> __global__ void topdown_expand_kernel( const IndexType *row_ptr, const IndexType *col_ind, const IndexType *frontier, const IndexType frontier_size, const IndexType totaldegree, const IndexType max_items_per_thread, const IndexType lvl, int *frontier_bmap, const IndexType *frontier_degrees_exclusive_sum, const IndexType *frontier_degrees_exclusive_sum_buckets_offsets, int *visited_bmap, IndexType *distances, GlobalType *predecessors) { __shared__ IndexType shared_buckets_offsets[TOP_DOWN_EXPAND_DIMX - NBUCKETS_PER_BLOCK + 1]; __shared__ IndexType shared_frontier_degrees_exclusive_sum[TOP_DOWN_EXPAND_DIMX + 1]; IndexType block_offset = (blockDim.x * blockIdx.x) * max_items_per_thread; IndexType n_items_per_thread_left = (totaldegree - block_offset + TOP_DOWN_EXPAND_DIMX - 1) / TOP_DOWN_EXPAND_DIMX; // if (threadIdx.x == 0) // printf("n_items_per_thread_left=%d max_items_per_thread=%d\n", n_items_per_thread_left, max_items_per_thread); n_items_per_thread_left = min(max_items_per_thread, n_items_per_thread_left); for (; (n_items_per_thread_left > 0) && (block_offset < totaldegree); block_offset += MAX_ITEMS_PER_THREAD_PER_OFFSETS_LOAD * blockDim.x, n_items_per_thread_left -= MAX_ITEMS_PER_THREAD_PER_OFFSETS_LOAD) { // In this loop, we will process batch_set_size batches IndexType nitems_per_thread = min(n_items_per_thread_left, (IndexType) MAX_ITEMS_PER_THREAD_PER_OFFSETS_LOAD); // Loading buckets offset (see compute_bucket_offsets_kernel) if (threadIdx.x < (nitems_per_thread * NBUCKETS_PER_BLOCK + 1)) shared_buckets_offsets[threadIdx.x] = frontier_degrees_exclusive_sum_buckets_offsets[block_offset / TOP_DOWN_BUCKET_SIZE + threadIdx.x]; // We will use shared_buckets_offsets __syncthreads(); // // shared_buckets_offsets gives us a range of the possible indexes // for edge of linear_threadx, we are looking for the value k such as // k is the max value such as frontier_degrees_exclusive_sum[k] <= linear_threadx // // we have 0 <= k < frontier_size // but we also have : // // frontier_degrees_exclusive_sum_buckets_offsets[linear_threadx/TOP_DOWN_BUCKET_SIZE] // <= k // <= frontier_degrees_exclusive_sum_buckets_offsets[linear_threadx/TOP_DOWN_BUCKET_SIZE + 1] // // To find the exact value in that range, we need a few values from frontier_degrees_exclusive_sum (see below) // We will load them here // We will load as much as we can - if it doesn't fit we will make multiple iteration of the next loop // Because all vertices in frontier have degree > 0, we know it will fits if left + 1 = right (see below) //We're going to load values in frontier_degrees_exclusive_sum for batch [left; right[ //If it doesn't fit, --right until it does, then loop //It is excepted to fit on the first try, that's why we start right = nitems_per_thread IndexType left = 0; IndexType right = nitems_per_thread; while (left < nitems_per_thread) { // // Values that are necessary to compute the local binary searches // We only need those with indexes between extremes indexes of buckets_offsets // We need the next val for the binary search, hence the +1 // IndexType nvalues_to_load = shared_buckets_offsets[right * NBUCKETS_PER_BLOCK] - shared_buckets_offsets[left * NBUCKETS_PER_BLOCK] + 1; //If left = right + 1 we are sure to have nvalues_to_load < TOP_DOWN_EXPAND_DIMX+1 while (nvalues_to_load > (TOP_DOWN_EXPAND_DIMX + 1)) { --right; nvalues_to_load = shared_buckets_offsets[right * NBUCKETS_PER_BLOCK] - shared_buckets_offsets[left * NBUCKETS_PER_BLOCK] + 1; } IndexType nitems_per_thread_for_this_load = right - left; IndexType frontier_degrees_exclusive_sum_block_offset = shared_buckets_offsets[left * NBUCKETS_PER_BLOCK]; //TODO put again the nvalues_to_load == 1 if (threadIdx.x < nvalues_to_load) { shared_frontier_degrees_exclusive_sum[threadIdx.x] = frontier_degrees_exclusive_sum[frontier_degrees_exclusive_sum_block_offset + threadIdx.x]; } if (nvalues_to_load == (TOP_DOWN_EXPAND_DIMX + 1) && threadIdx.x == 0) { shared_frontier_degrees_exclusive_sum[TOP_DOWN_EXPAND_DIMX] = frontier_degrees_exclusive_sum[frontier_degrees_exclusive_sum_block_offset + TOP_DOWN_EXPAND_DIMX]; } //shared_frontier_degrees_exclusive_sum is in shared mem, we will use it, sync //TODO we don't use it if nvalues_to_load == 1 __syncthreads(); // Now we will process the edges // Here each thread will process nitems_per_thread_for_this_load for (IndexType item_index = 0; item_index < nitems_per_thread_for_this_load; item_index += TOP_DOWN_BATCH_SIZE) { // We process TOP_DOWN_BATCH_SIZE edge in parallel (instruction parallism) // Reduces latency IndexType current_max_edge_index = min(block_offset + (left + nitems_per_thread_for_this_load) * blockDim.x, totaldegree); /** * We will need vec_u (source of the edge) until the end if we need to save the * predecessors. For others informations, we will reuse pointers on the go * (nvcc does not color well the registers in that case) */ IndexType vec_u[TOP_DOWN_BATCH_SIZE]; IndexType local_buf1[TOP_DOWN_BATCH_SIZE]; IndexType local_buf2[TOP_DOWN_BATCH_SIZE]; IndexType *vec_frontier_degrees_exclusive_sum_index = &local_buf2[0]; #pragma unroll for (IndexType iv = 0; iv < TOP_DOWN_BATCH_SIZE; ++iv) { IndexType ibatch = left + item_index + iv; IndexType gid = block_offset + ibatch * blockDim.x + threadIdx.x; if (gid < current_max_edge_index) { IndexType start_off_idx = (ibatch * blockDim.x + threadIdx.x) / TOP_DOWN_BUCKET_SIZE; IndexType bucket_start = shared_buckets_offsets[start_off_idx] - frontier_degrees_exclusive_sum_block_offset; IndexType bucket_end = shared_buckets_offsets[start_off_idx + 1] - frontier_degrees_exclusive_sum_block_offset; IndexType k = binsearch_maxle(shared_frontier_degrees_exclusive_sum, gid, bucket_start, bucket_end) + frontier_degrees_exclusive_sum_block_offset; vec_u[iv] = frontier[k]; // origin of this edge vec_frontier_degrees_exclusive_sum_index[iv] = frontier_degrees_exclusive_sum[k]; } else { vec_u[iv] = -1; vec_frontier_degrees_exclusive_sum_index[iv] = -1; } } IndexType *vec_row_ptr_u = &local_buf1[0]; #pragma unroll for (int iv = 0; iv < TOP_DOWN_BATCH_SIZE; ++iv) { IndexType u = vec_u[iv]; //row_ptr for this vertex origin u vec_row_ptr_u[iv] = (u != -1) ? row_ptr[u] : -1; } //We won't need row_ptr after that, reusing pointer IndexType *vec_dest_v = vec_row_ptr_u; #pragma unroll for (int iv = 0; iv < TOP_DOWN_BATCH_SIZE; ++iv) { IndexType thread_item_index = left + item_index + iv; IndexType gid = block_offset + thread_item_index * blockDim.x + threadIdx.x; IndexType row_ptr_u = vec_row_ptr_u[iv]; IndexType edge = row_ptr_u + gid - vec_frontier_degrees_exclusive_sum_index[iv]; //Destination of this edge vec_dest_v[iv] = (row_ptr_u != -1) ? col_ind[edge] : -1; // if (vec_u[iv] != -1 && vec_dest_v[iv] != -1) // printf("Edge to examine: %d, %d\n", vec_u[iv],vec_dest_v[iv]); } //We don't need vec_frontier_degrees_exclusive_sum_index anymore IndexType *vec_v_visited_bmap = vec_frontier_degrees_exclusive_sum_index; #pragma unroll for (int iv = 0; iv < TOP_DOWN_BATCH_SIZE; ++iv) { IndexType v = vec_dest_v[iv]; vec_v_visited_bmap[iv] = (v != -1) ? visited_bmap[v / INT_SIZE] : (~0); //will look visited } // From now on we will consider v as a frontier candidate // If for some reason vec_candidate[iv] should be put in the new_frontier // Then set vec_candidate[iv] = -1 IndexType *vec_frontier_candidate = vec_dest_v; #pragma unroll for (int iv = 0; iv < TOP_DOWN_BATCH_SIZE; ++iv) { IndexType v = vec_frontier_candidate[iv]; int m = 1 << (v % INT_SIZE); int is_visited = vec_v_visited_bmap[iv] & m; if (is_visited) vec_frontier_candidate[iv] = -1; } #pragma unroll /** * Here is where the distances, predecessors, new bitmap frontier and visited bitmap * get written out. */ for (int iv = 0; iv < TOP_DOWN_BATCH_SIZE; ++iv) { IndexType v = vec_frontier_candidate[iv]; if (v != -1) { int m = 1 << (v % INT_SIZE); int q = atomicOr(&visited_bmap[v / INT_SIZE], m); //atomicOr returns old int f = atomicOr(&frontier_bmap[v / INT_SIZE], m); if (!(m & q)) { //if this thread was the first to discover this node if (distances) distances[v] = lvl; if (predecessors) { IndexType pred = vec_u[iv]; predecessors[v] = pred; } } } } //We need naccepted_vertices to be ready __syncthreads(); } //We need to keep shared_frontier_degrees_exclusive_sum coherent __syncthreads(); //Preparing for next load left = right; right = nitems_per_thread; } //we need to keep shared_buckets_offsets coherent __syncthreads(); } } template<typename IndexType, typename GlobalType> void frontier_expand(const IndexType *row_ptr, const IndexType *col_ind, const IndexType *frontier, const IndexType frontier_size, const IndexType totaldegree, const IndexType lvl, IndexType *frontier_bmap, const IndexType *frontier_degrees_exclusive_sum, const IndexType *frontier_degrees_exclusive_sum_buckets_offsets, int *visited_bmap, IndexType *distances, GlobalType *predecessors, cudaStream_t m_stream) { if (!totaldegree) return; dim3 block; block.x = TOP_DOWN_EXPAND_DIMX; IndexType max_items_per_thread = (totaldegree + MAXBLOCKS * block.x - 1) / (MAXBLOCKS * block.x); dim3 grid; grid.x = min((totaldegree + max_items_per_thread * block.x - 1) / (max_items_per_thread * block.x), (IndexType) MAXBLOCKS); topdown_expand_kernel<<<grid, block, 0, m_stream>>>( row_ptr, col_ind, frontier, frontier_size, totaldegree, max_items_per_thread, lvl, frontier_bmap, frontier_degrees_exclusive_sum, frontier_degrees_exclusive_sum_buckets_offsets, visited_bmap, distances, predecessors); cudaCheckError(); } }
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/include/lobpcg.hxx
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include "matrix.hxx" #include "partition.hxx" namespace nvgraph { template <typename IndexType_, typename ValueType_> int lobpcg_simplified(cublasHandle_t cublasHandle, cusolverDnHandle_t cusolverHandle, IndexType_ n, IndexType_ k, /*const*/ Matrix<IndexType_,ValueType_> * A, ValueType_ * __restrict__ eigVecs_dev, ValueType_ * __restrict__ eigVals_dev, IndexType_ maxIter,ValueType_ tol, ValueType_ * __restrict__ work_dev, IndexType_ & iter); }
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/include/graph.hxx
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cstdlib> #include <cstddef> // size_t #include <iostream> #include <graph_visitors.hxx>// // namespace nvgraph { #define DEFINE_VISITABLE(T) \ virtual void Accept(VisitorBase& guest) \ { BaseVisitableGraph<T>::AcceptImpl(*this, guest); } template<typename T> struct BaseVisitableGraph { virtual void Accept(VisitorBase& v) = 0; virtual ~BaseVisitableGraph(void) { } protected: template<typename Host> static void AcceptImpl(Host& visited, VisitorBase& guest) { if( Visitor<Host>* p = dynamic_cast<Visitor<Host>*>(&guest)) { p->Visit(visited); } } }; template<typename IndexType_> class Graph: public BaseVisitableGraph<IndexType_> { public: typedef IndexType_ IndexType; protected: size_t num_vertices; size_t num_edges; Graph<IndexType> *parent; Graph<IndexType> *child; public: /*! Construct an empty \p Graph. */ Graph() : num_vertices(0),num_edges(0) {} /*! Construct a \p Graph with a specific number of vertices. * * \param vertices Number of vertices. */ Graph(size_t vertices) : num_vertices(vertices), num_edges(0) {} /*! Construct a \p Graph with a specific number of vertices and edges. * * \param vertices Number of vertices. * \param edges Number of edges. */ Graph(size_t vertices, size_t edges) : num_vertices(vertices), num_edges(edges) {} /*! Construct a \p CsrGraph from another graph. * * \param CsrGraph Another graph in csr */ Graph(const Graph& gr) { num_vertices = gr.get_num_vertices(); num_edges = gr.get_num_edges(); } inline void set_num_vertices(IndexType_ p_num_vertices) { num_vertices = p_num_vertices; } inline void set_num_edges(IndexType_ p_num_edges) { num_edges = p_num_edges; } inline size_t get_num_vertices() const { return num_vertices; } inline size_t get_num_edges() const { return num_edges; } /*! Resize graph dimensions * * \param num_rows Number of vertices. * \param num_cols Number of edges. */ //inline void resize(size_t vertices, size_t edges) //{ // num_vertices = vertices; // num_edges = edges; //} //Accept method injection DEFINE_VISITABLE(IndexType_) }; } // end namespace nvgraph
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/include/nvgraph_error.hxx
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <stdio.h> #include <string> #include <sstream> #include <time.h> #include <stacktrace.h> //#define VERBOSE_DIAG //#define DEBUG 1 namespace nvgraph { typedef void (*NVGRAPH_output_callback)(const char *msg, int length); extern NVGRAPH_output_callback nvgraph_output; extern NVGRAPH_output_callback error_output; extern NVGRAPH_output_callback nvgraph_distributed_output; int nvgraph_printf(const char* fmt, ...); #if defined(DEBUG) || defined(VERBOSE_DIAG) #define nvgraph_printf_debug(fmt,...) nvgraph_printf(fmt,##__VA_ARGS__) #define device_printf(fmt,...) printf(fmt,##__VA_ARGS__) #else #define nvgraph_printf_debug(fmt,...) #define device_printf(fmt,...) #endif // print stacktrace only in debug mode #if defined(DEBUG) || defined(VERBOSE_DIAG) #define STACKTRACE "\nStack trace:\n" + std::string(e.trace()) #define WHERE " at: " << __FILE__ << ':' << __LINE__ #else #define STACKTRACE "" #define WHERE "" #endif enum NVGRAPH_ERROR { /********************************************************* * Flags for status reporting *********************************************************/ NVGRAPH_OK=0, NVGRAPH_ERR_BAD_PARAMETERS=1, NVGRAPH_ERR_UNKNOWN=2, NVGRAPH_ERR_CUDA_FAILURE=3, NVGRAPH_ERR_THRUST_FAILURE=4, NVGRAPH_ERR_IO=5, NVGRAPH_ERR_NOT_IMPLEMENTED=6, NVGRAPH_ERR_NO_MEMORY=7, NVGRAPH_ERR_NOT_CONVERGED=8 }; // define our own bad_alloc so we can set its .what() class nvgraph_exception: public std::exception { public: inline nvgraph_exception(const std::string &w, const std::string &where, const std::string &trace, NVGRAPH_ERROR reason) : m_trace(trace), m_what(w), m_reason(reason), m_where(where) { } inline virtual ~nvgraph_exception(void) throw () {}; inline virtual const char *what(void) const throw() { return m_what.c_str(); } inline virtual const char *where(void) const throw() { return m_where.c_str(); } inline virtual const char *trace(void) const throw() { return m_trace.c_str(); } inline virtual NVGRAPH_ERROR reason(void) const throw() { return m_reason; } private: std::string m_trace; std::string m_what; NVGRAPH_ERROR m_reason; std::string m_where; }; // end bad_alloc int NVGRAPH_GetErrorString( NVGRAPH_ERROR error, char* buffer, int buf_len); /******************************************************** * Prints the error message, the stack trace, and exits * ******************************************************/ #define FatalError(s, reason) { \ std::stringstream _where; \ _where << WHERE ; \ std::stringstream _trace; \ printStackTrace(_trace); \ throw nvgraph_exception(std::string(s) + "\n", _where.str(), _trace.str(), reason); \ } #undef cudaCheckError #if defined(DEBUG) || defined(VERBOSE_DIAG) #define cudaCheckError() { \ cudaError_t e=cudaGetLastError(); \ if(e!=cudaSuccess) { \ std::stringstream _error; \ _error << "Cuda failure: '" << cudaGetErrorString(e) << "'"; \ FatalError(_error.str(), NVGRAPH_ERR_CUDA_FAILURE); \ } \ } #else // NO DEBUG #define cudaCheckError() \ { \ cudaError_t __e = cudaGetLastError(); \ if (__e != cudaSuccess) { \ FatalError("", NVGRAPH_ERR_CUDA_FAILURE); \ } \ } #endif #define CHECK_CUDA(call) \ { \ cudaError_t _e = (call); \ if (_e != cudaSuccess) \ { \ std::stringstream _error; \ _error << "CUDA Runtime failure: '#" << _e << "'"; \ FatalError(_error.str(), NVGRAPH_ERR_CUDA_FAILURE); \ } \ } #define CHECK_CURAND(call) \ { \ curandStatus_t _e = (call); \ if (_e != CURAND_STATUS_SUCCESS) \ { \ std::stringstream _error; \ _error << "CURAND failure: '#" << _e << "'"; \ FatalError(_error.str(), NVGRAPH_ERR_CUDA_FAILURE); \ } \ } #define CHECK_CUBLAS(call) \ { \ cublasStatus_t _e = (call); \ if (_e != CUBLAS_STATUS_SUCCESS) \ { \ std::stringstream _error; \ _error << "CUBLAS failure: '#" << _e << "'"; \ FatalError(_error.str(), NVGRAPH_ERR_CUDA_FAILURE); \ } \ } #define CHECK_CUSPARSE(call) \ { \ cusparseStatus_t _e = (call); \ if (_e != CUSPARSE_STATUS_SUCCESS) \ { \ std::stringstream _error; \ _error << "CURAND failure: '#" << _e << "'"; \ FatalError(_error.str(), NVGRAPH_ERR_CUDA_FAILURE); \ } \ } #define CHECK_CUSOLVER(call) \ { \ cusolverStatus_t _e = (call); \ if (_e != CUSOLVER_STATUS_SUCCESS) \ { \ std::stringstream _error; \ _error << "CURAND failure: '#" << _e << "'"; \ FatalError(_error.str(), NVGRAPH_ERR_CUDA_FAILURE); \ } \ } #define NVGRAPH_CATCHES(rc) catch (nvgraph_exception e) { \ std::string err = "Caught nvgraph exception: " + std::string(e.what()) \ + std::string(e.where()) + STACKTRACE + "\n"; \ error_output(err.c_str(), static_cast<int>(err.length())); \ rc = e.reason(); \ } catch (std::bad_alloc e) { \ std::string err = "Not enough memory: " + std::string(e.what()) \ + "\nFile and line number are not available for this exception.\n"; \ error_output(err.c_str(), static_cast<int>(err.length())); \ rc = NVGRAPH_ERR_NO_MEMORY; \ } catch (std::exception e) { \ std::string err = "Caught unknown exception: " + std::string(e.what()) \ + "\nFile and line number are not available for this exception.\n"; \ error_output(err.c_str(), static_cast<int>(err.length())); \ rc = NVGRAPH_ERR_UNKNOWN; \ } catch (...) { \ std::string err = \ "Caught unknown exception\nFile and line number are not available for this exception.\n"; \ error_output(err.c_str(), static_cast<int>(err.length())); \ rc = NVGRAPH_ERR_UNKNOWN; \ } // Since there is no global-level thrust dependency, we don't include this globally. May add later /* catch (thrust::system_error &e) { \ std::string err = "Thrust failure: " + std::string(e.what()) \ + "\nFile and line number are not available for this exception.\n"; \ error_output(err.c_str(), static_cast<int>(err.length())); \ rc = NVGRAPH_ERR_THRUST_FAILURE; \ } catch (thrust::system::detail::bad_alloc e) { \ std::string err = "Thrust failure: " + std::string(e.what()) \ + "\nFile and line number are not available for this exception.\n"; \ error_output(err.c_str(), static_cast<int>(err.length())); \ rc = NVGRAPH_ERR_NO_MEMORY; \ } */ // simple cuda timer // can be called in cpp files class cuda_timer { public: cuda_timer(); void start(); float stop(); // in ms private: struct event_pair; event_pair* p; }; } // namespace nvgraph
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/include/nvgraph_vector_kernels.hxx
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once namespace nvgraph { template <typename ValueType_> void nrm1_raw_vec (ValueType_* vec, size_t n, ValueType_* res, cudaStream_t stream = 0); template <typename ValueType_> void fill_raw_vec (ValueType_* vec, size_t n, ValueType_ value, cudaStream_t stream = 0); template <typename ValueType_> void dump_raw_vec (ValueType_* vec, size_t n, int offset, cudaStream_t stream = 0); template <typename ValueType_> void dmv (size_t num_vertices, ValueType_ alpha, ValueType_* D, ValueType_* x, ValueType_ beta, ValueType_* y, cudaStream_t stream = 0); template<typename ValueType_> void copy_vec(ValueType_ *vec1, size_t n, ValueType_ *res, cudaStream_t stream = 0); template <typename ValueType_> void flag_zeros_raw_vec(size_t num_vertices, ValueType_* vec, int* flag, cudaStream_t stream = 0 ); template <typename IndexType_, typename ValueType_> void set_connectivity( size_t n, IndexType_ root, ValueType_ self_loop_val, ValueType_ unreachable_val, ValueType_* res, cudaStream_t stream = 0); } // end namespace nvgraph
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/include/bfs.hxx
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <climits> //Used in nvgraph.h #define TRAVERSAL_DEFAULT_ALPHA 15 #define TRAVERSAL_DEFAULT_BETA 18 #include "nvgraph_error.hxx" namespace nvgraph { template <typename IndexType> class Bfs { private: IndexType n, nnz; IndexType* row_offsets; IndexType* col_indices; bool directed; bool deterministic; // edgemask, distances, predecessors are set/read by users - using Vectors bool useEdgeMask; bool computeDistances; bool computePredecessors; IndexType *distances; IndexType *predecessors; int *edge_mask; //Working data //For complete description of each, go to bfs.cu IndexType nisolated; IndexType *frontier, *new_frontier; IndexType * original_frontier; IndexType vertices_bmap_size; int *visited_bmap, *isolated_bmap; IndexType *vertex_degree; IndexType *buffer_np1_1, *buffer_np1_2; IndexType *frontier_vertex_degree; IndexType *exclusive_sum_frontier_vertex_degree; IndexType *unvisited_queue; IndexType *left_unvisited_queue; IndexType *exclusive_sum_frontier_vertex_buckets_offsets; IndexType *d_counters_pad; IndexType *d_new_frontier_cnt; IndexType *d_mu; IndexType *d_unvisited_cnt; IndexType *d_left_unvisited_cnt; void *d_cub_exclusive_sum_storage; size_t cub_exclusive_sum_storage_bytes; //Parameters for direction optimizing IndexType alpha, beta; cudaStream_t stream; //resets pointers defined by d_counters_pad (see implem) void resetDevicePointers(); NVGRAPH_ERROR setup(); void clean(); public: virtual ~Bfs(void) { clean(); }; Bfs(IndexType _n, IndexType _nnz, IndexType *_row_offsets, IndexType *_col_indices, bool _directed, IndexType _alpha, IndexType _beta, cudaStream_t _stream = 0) : n(_n), nnz(_nnz), row_offsets(_row_offsets), col_indices(_col_indices), directed(_directed), alpha(_alpha), beta(_beta), stream(_stream) { setup(); } NVGRAPH_ERROR configure(IndexType *distances, IndexType *predecessors, int *edge_mask); NVGRAPH_ERROR traverse(IndexType source_vertex); //Used only for benchmarks NVGRAPH_ERROR traverse(IndexType *source_vertices, IndexType nsources); }; } // end namespace nvgraph
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/include/triangles_counting.hxx
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <csr_graph.hxx> #include <async_event.hxx> #include <nvgraph_error.hxx> #include <nvgraph_vector.hxx> #include <cuda_runtime.h> #include <triangles_counting_defines.hxx> namespace nvgraph { namespace triangles_counting { typedef enum { TCOUNT_DEFAULT, TCOUNT_BSH, TCOUNT_B2B, TCOUNT_WRP, TCOUNT_THR } TrianglesCountAlgo; template <typename IndexType> class TrianglesCount { private: //CsrGraph <IndexType>& m_last_graph ; AsyncEvent m_event; uint64_t m_triangles_number; spmat_t<IndexType> m_mat; int m_dev_id; cudaDeviceProp m_dev_props; Vector<IndexType> m_seq; cudaStream_t m_stream; bool m_done; void tcount_bsh(); void tcount_b2b(); void tcount_wrp(); void tcount_thr(); public: // Simple constructor TrianglesCount(const CsrGraph <IndexType>& graph, cudaStream_t stream = NULL, int device_id = -1); // Simple destructor ~TrianglesCount(); NVGRAPH_ERROR count(TrianglesCountAlgo algo = TCOUNT_DEFAULT ); inline uint64_t get_triangles_count() const {return m_triangles_number;} }; } // end namespace triangles_counting } // end namespace nvgraph
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/include/nvgraph_csrmv.hxx
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <algorithm> #include <stdio.h> #include "valued_csr_graph.hxx" #include "nvgraph_vector.hxx" namespace nvgraph{ //this header file defines the various semirings using enum enum Semiring {//the datatype is assumed to be real unless otherwise specified in the name PlusTimes, //standard matrix vector multiplication MinPlus, //breadth first search-also called tropical MaxMin, //mas flow problems OrAndBool, LogPlus }; //Merge Path Coord array depends on the integere type template<typename IndexType_> struct Coord { IndexType_ x; IndexType_ y; }; //struct which stores the csr matrix format, templated on the index and value template <typename IndexType_, typename ValueType_> struct CsrMvParams { ValueType_ alpha; ValueType_ beta; ValueType_ *csrVal; //nonzero values from matrix A //row pointer must look at next address to avoid the 0 in merge path IndexType_ *csrRowPtr; //row offsets last entry is number of nonzeros size is m +1 IndexType_ *csrColInd; //column indices of nonzeros ValueType_ *x; //vector x in alpha*A*x ValueType_ *y; //output y will be modified and store the output IndexType_ m; //number of rows IndexType_ n; //number of columns IndexType_ nnz; }; //create a device function interface to call the above dispatch function template <typename IndexType_, typename ValueType_> cudaError_t csrmv_mp( IndexType_ n, IndexType_ m, IndexType_ nnz, ValueType_ alpha, ValueType_ * dValues, //all must be preallocated on the device IndexType_ * dRowOffsets, IndexType_ * dColIndices, ValueType_ *dVectorX, ValueType_ beta, ValueType_ *dVectorY, Semiring SR, //this parameter is of type enum and gives the semiring name cudaStream_t stream = 0 ); //overloaded function that has valued_csr_graph parameter to store the matrix template<typename IndexType_, typename ValueType_> cudaError_t csrmv_mp( IndexType_ n, IndexType_ m, IndexType_ nnz, ValueType_ alpha, ValuedCsrGraph <IndexType_, ValueType_> network, ValueType_ *dVectorX, ValueType_ beta, ValueType_ *dVectorY, Semiring SR, //this parameter is of type enum and gives the semiring name cudaStream_t stream = 0); } //end nvgraph namespace template<typename IndexType_, typename ValueType_> void callTestCsrmv(IndexType_ num_rows, IndexType_ *dRowOffsets, IndexType_ *dColIndices, ValueType_ *dValues, ValueType_ *dVectorX, ValueType_ *dVectorY, nvgraph::Semiring SR, ValueType_ alpha, ValueType_ beta);
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/include/graph_utils.cuh
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // Helper functions based on Thrust #pragma once #include <cuda.h> #include <cuda_runtime.h> //#include <library_types.h> //#include <cuda_fp16.h> #include <thrust/device_vector.h> #include <thrust/functional.h> #include <thrust/transform.h> #include <thrust/inner_product.h> #include <thrust/functional.h> #include <thrust/iterator/zip_iterator.h> #include <thrust/sort.h> #define USE_CG 1 #define DEBUG 1 namespace nvlouvain { #define CUDA_MAX_BLOCKS 65535 #define CUDA_MAX_KERNEL_THREADS 256 //kernel will launch at most 256 threads per block #define DEFAULT_MASK 0xffffffff #define US //#define DEBUG 1 //error check #undef cudaCheckError #ifdef DEBUG #define WHERE " at: " << __FILE__ << ':' << __LINE__ #define cudaCheckError() { \ cudaError_t e=cudaGetLastError(); \ if(e!=cudaSuccess) { \ std::cerr << "Cuda failure: " << cudaGetErrorString(e) << WHERE << std::endl; \ } \ } #else #define cudaCheckError() #define WHERE "" #endif template<typename T> static __device__ __forceinline__ T shfl_up(T r, int offset, int bound = 32, int mask = DEFAULT_MASK) { #if __CUDA_ARCH__ >= 300 #if USE_CG return __shfl_up_sync( mask, r, offset, bound ); #else return __shfl_up( r, offset, bound ); #endif #else return 0.0f; #endif } template<typename T> static __device__ __forceinline__ T shfl(T r, int lane, int bound = 32, int mask = DEFAULT_MASK) { #if __CUDA_ARCH__ >= 300 #if USE_CG return __shfl_sync(mask, r, lane, bound ); #else return __shfl(r, lane, bound ); #endif #else return 0.0f; #endif } template<typename T> __inline__ __device__ T parallel_prefix_sum(int n, int *ind,T *w) { int i,j,mn; T v,last; T sum=0.0; bool valid; //Parallel prefix sum (using __shfl) mn =(((n+blockDim.x-1)/blockDim.x)*blockDim.x); //n in multiple of blockDim.x for (i=threadIdx.x; i<mn; i+=blockDim.x) { //All threads (especially the last one) must always participate //in the shfl instruction, otherwise their sum will be undefined. //So, the loop stopping condition is based on multiple of n in loop increments, //so that all threads enter into the loop and inside we make sure we do not //read out of bounds memory checking for the actual size n. //check if the thread is valid valid = i<n; //Notice that the last thread is used to propagate the prefix sum. //For all the threads, in the first iteration the last is 0, in the following //iterations it is the value at the last thread of the previous iterations. //get the value of the last thread last = shfl(sum, blockDim.x-1, blockDim.x); //if you are valid read the value from memory, otherwise set your value to 0 sum = (valid) ? w[ind[i]] : 0.0; //do prefix sum (of size warpSize=blockDim.x =< 32) for (j=1; j<blockDim.x; j*=2) { v = shfl_up(sum, j, blockDim.x); if (threadIdx.x >= j) sum+=v; } //shift by last sum+=last; //notice that no __threadfence or __syncthreads are needed in this implementation } //get the value of the last thread (to all threads) last = shfl(sum, blockDim.x-1, blockDim.x); return last; } //dot template <typename T> T dot(size_t n, T* x, T* y) { T result = thrust::inner_product(thrust::device_pointer_cast(x), thrust::device_pointer_cast(x+n), thrust::device_pointer_cast(y), 0.0f); cudaCheckError(); return result; } //axpy template <typename T> struct axpy_functor : public thrust::binary_function<T,T,T> { const T a; axpy_functor(T _a) : a(_a) {} __host__ __device__ T operator()(const T& x, const T& y) const { return a * x + y; } }; template <typename T> void axpy(size_t n, T a, T* x, T* y) { thrust::transform(thrust::device_pointer_cast(x), thrust::device_pointer_cast(x+n), thrust::device_pointer_cast(y), thrust::device_pointer_cast(y), axpy_functor<T>(a)); cudaCheckError(); } //norm template <typename T> struct square { __host__ __device__ T operator()(const T& x) const { return x * x; } }; template <typename T> T nrm2(size_t n, T* x) { T init = 0; T result = std::sqrt( thrust::transform_reduce(thrust::device_pointer_cast(x), thrust::device_pointer_cast(x+n), square<T>(), init, thrust::plus<T>()) ); cudaCheckError(); return result; } template <typename T> T nrm1(size_t n, T* x) { T result = thrust::reduce(thrust::device_pointer_cast(x), thrust::device_pointer_cast(x+n)); cudaCheckError(); return result; } template <typename T> void scal(size_t n, T val, T* x) { thrust::transform(thrust::device_pointer_cast(x), thrust::device_pointer_cast(x + n), thrust::make_constant_iterator(val), thrust::device_pointer_cast(x), thrust::multiplies<T>()); cudaCheckError(); } template <typename T> void fill(size_t n, T* x, T value) { thrust::fill(thrust::device_pointer_cast(x), thrust::device_pointer_cast(x + n), value); cudaCheckError(); } template <typename T> void printv(size_t n, T* vec, int offset) { thrust::device_ptr<T> dev_ptr(vec); std::cout.precision(15); std::cout << "sample size = "<< n << ", offset = "<< offset << std::endl; thrust::copy(dev_ptr+offset,dev_ptr+offset+n, std::ostream_iterator<T>(std::cout, " ")); cudaCheckError(); std::cout << std::endl; } template<typename T> void copy(size_t n, T *x, T *res) { thrust::device_ptr<T> dev_ptr(x); thrust::device_ptr<T> res_ptr(res); thrust::copy_n(dev_ptr, n, res_ptr); cudaCheckError(); } template <typename T> struct is_zero { __host__ __device__ bool operator()(const T x) { return x == 0; } }; template <typename T> struct dangling_functor : public thrust::unary_function<T,T> { const T val; dangling_functor(T _val) : val(_val) {} __host__ __device__ T operator()(const T& x) const { return val + x; } }; template <typename T> void update_dangling_nodes(size_t n, T* dangling_nodes, T damping_factor) { thrust::transform_if(thrust::device_pointer_cast(dangling_nodes), thrust::device_pointer_cast( dangling_nodes + n), thrust::device_pointer_cast(dangling_nodes), dangling_functor<T>(1.0-damping_factor), is_zero<T>()); cudaCheckError(); } //google matrix kernels template <typename IndexType, typename ValueType> __global__ void __launch_bounds__(CUDA_MAX_KERNEL_THREADS) degree_coo ( const IndexType n, const IndexType e, const IndexType *ind, IndexType *degree) { for (int i=threadIdx.x+blockIdx.x*blockDim.x; i<e; i+=gridDim.x*blockDim.x) atomicAdd(&degree[ind[i]],1.0); } template <typename IndexType, typename ValueType> __global__ void __launch_bounds__(CUDA_MAX_KERNEL_THREADS) equi_prob ( const IndexType n, const IndexType e, const IndexType *ind, ValueType *val, IndexType *degree) { for (int i=threadIdx.x+blockIdx.x*blockDim.x; i<e; i+=gridDim.x*blockDim.x) val[i] = 1.0/degree[ind[i]]; } template <typename IndexType, typename ValueType> __global__ void __launch_bounds__(CUDA_MAX_KERNEL_THREADS) flag_leafs ( const IndexType n, IndexType *degree, ValueType *bookmark) { for (int i=threadIdx.x+blockIdx.x*blockDim.x; i<n; i+=gridDim.x*blockDim.x) if (degree[i]==0) bookmark[i]=1.0; } //notice that in the transposed matrix/csc a dangling node is a node without incomming edges template <typename IndexType, typename ValueType> void google_matrix ( const IndexType n, const IndexType e, const IndexType *cooColInd, ValueType *cooVal, ValueType *bookmark) { thrust::device_vector<IndexType> degree(n,0); dim3 nthreads, nblocks; nthreads.x = min(e,CUDA_MAX_KERNEL_THREADS); nthreads.y = 1; nthreads.z = 1; nblocks.x = min((e + nthreads.x - 1)/nthreads.x,CUDA_MAX_BLOCKS); nblocks.y = 1; nblocks.z = 1; degree_coo<IndexType,ValueType><<<nblocks,nthreads>>>(n,e,cooColInd, thrust::raw_pointer_cast(degree.data())); equi_prob<IndexType,ValueType><<<nblocks,nthreads>>>(n,e,cooColInd, cooVal, thrust::raw_pointer_cast(degree.data())); ValueType val = 0.0; fill(n,bookmark,val); nthreads.x = min(n,CUDA_MAX_KERNEL_THREADS); nblocks.x = min((n + nthreads.x - 1)/nthreads.x,CUDA_MAX_BLOCKS); flag_leafs <IndexType,ValueType><<<nblocks,nthreads>>>(n, thrust::raw_pointer_cast(degree.data()), bookmark); //printv(n, thrust::raw_pointer_cast(degree.data()) , 0); //printv(n, bookmark , 0); //printv(e, cooVal , 0); } template <typename IndexType> __global__ void __launch_bounds__(CUDA_MAX_KERNEL_THREADS) update_clustering_kernel ( const IndexType n, IndexType *clustering, IndexType *aggregates_d) { for (int i=threadIdx.x+blockIdx.x*blockDim.x; i<n; i+=gridDim.x*blockDim.x) clustering[i] = aggregates_d[clustering[i]]; } template <typename IndexType> void update_clustering ( const IndexType n, IndexType *clustering, IndexType *aggregates_d) { int nthreads = min(n,CUDA_MAX_KERNEL_THREADS); int nblocks = min((n + nthreads - 1)/nthreads,CUDA_MAX_BLOCKS); update_clustering_kernel<IndexType><<<nblocks,nthreads>>>(n,clustering,aggregates_d); } } //namespace nvga
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/include/debug_macros.h
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include "nvgraph_error.hxx" #define CHECK_STATUS(...) \ do { \ if (__VA_ARGS__) { \ FatalError(#__VA_ARGS__, NVGRAPH_ERR_UNKNOWN); \ } \ } while (0) #define CHECK_NVGRAPH(...) \ do { \ NVGRAPH_ERROR e = __VA_ARGS__; \ if (e != NVGRAPH_OK) { \ FatalError(#__VA_ARGS__, e) \ } \ } while (0) #ifdef DEBUG #define COUT() (std::cout) #define CERR() (std::cerr) #define WARNING(message) \ do { \ std::stringstream ss; \ ss << "Warning (" << __FILE__ << ":" << __LINE__ << "): " << message; \ CERR() << ss.str() << std::endl; \ } while (0) #else // DEBUG #define WARNING(message) #endif
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/include/functor.cuh
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <thrust/random.h> namespace nvlouvain{ template<typename IdxType, typename IdxIter> struct link_to_cluster{ IdxType key; IdxIter cluster_iter; __host__ __device__ link_to_cluster(IdxType _key, IdxIter _iter): key(_key), cluster_iter(_iter){} __host__ __device__ bool operator()(const IdxType& csr_idx){ return ((*(cluster_iter + csr_idx)) == key); } }; template<typename IdxType, typename IdxIter> struct link_inside_cluster{ IdxType idx_i; IdxType key; IdxIter cluster_iter; __host__ __device__ link_inside_cluster(IdxType _idx_i, IdxType _key, IdxIter _iter):idx_i(_idx_i), key(_key), cluster_iter(_iter){} __host__ __device__ bool operator()(const IdxType& csr_idx){ return ((*(cluster_iter + csr_idx)) == (*(cluster_iter + idx_i))) && ((*(cluster_iter + csr_idx)) == key); } }; template<typename IdxType, typename IdxIter> struct link_incident_cluster{ IdxType key; IdxIter cluster_iter; IdxType i; __host__ __device__ link_incident_cluster(IdxType _key, IdxIter _iter, IdxType _i): key(_key), cluster_iter(_iter), i(_i){} __host__ __device__ bool operator()(const IdxType& csr_idx){ //if(csr_idx == i) return false; return (csr_idx == i) ? false : ((key) == (IdxType)(*(cluster_iter + csr_idx)) ); } }; template<typename IdxType, typename IdxIter> struct ci_not_equal_cj{ IdxType key; IdxIter cluster_iter; __host__ __device__ ci_not_equal_cj( IdxType _key, IdxIter _iter): key(_key), cluster_iter(_iter){} __host__ __device__ bool operator()(const IdxType& idx){ IdxType cj = *(cluster_iter+idx); return (cj != key); } }; template<typename IdxType, typename IdxIter> struct ci_is_cj{ IdxType key; IdxIter cluster_iter; __host__ __device__ ci_is_cj( IdxType _key, IdxIter _iter): key(_key), cluster_iter(_iter){} __host__ __device__ bool operator()(const IdxType& idx){ IdxType cj = *(cluster_iter+idx); return (cj == key); } }; template<typename IdxType> struct rand_functor{ IdxType low; IdxType up; __host__ __device__ rand_functor(IdxType _low, IdxType _up): low(_low), up(_up){} __host__ __device__ bool operator()(const IdxType& idx){ thrust::random::default_random_engine rand_eng; thrust::random::uniform_int_distribution< IdxType > random_op(low, up); rand_eng.discard(idx); return random_op(rand_eng); } }; template<typename IdxType> struct not_zero{ __host__ __device__ bool operator()(const IdxType& idx){ return (idx != 0); } }; template<typename IdxType> struct is_one{ __host__ __device__ bool operator()(const IdxType& x){ return x == 1; } }; template<typename IdxType> struct is_c{ IdxType c; __host__ __device__ is_c(int _c):c(_c){} __host__ __device__ bool operator()(const IdxType& x){ return x == c; } }; template<typename ValType> struct not_best{ ValType best_val; __host__ __device__ not_best(ValType _b):best_val(_b){} __host__ __device__ bool operator()(const ValType& val){ return (val != best_val); } }; template<typename ValType> struct assign_k_functor{ ValType* k_ptr; __host__ __device__ assign_k_functor(ValType* _k):k_ptr(_k){} template <typename Tuple> __host__ __device__ void operator()(Tuple t){ //output[i] = k_ptr[ ind[i] ]; thrust::get<1>(t) = *(k_ptr + thrust::get<0>(t)); // t.first = *(k_ptr + t.second); } }; template<typename IdxType, typename IdxIter> struct assign_table_functor{ IdxType* table_array; IdxIter cluster_iter; __host__ __device__ assign_table_functor(IdxIter _c, IdxType* _t):cluster_iter(_c),table_array(_t){} template <typename Tuple> __host__ __device__ void operator()(Tuple t){ //output[i] = k_ptr[ ind[i] ]; // thrust::get<1>(t) = *(k_ptr + thrust::get<0>(t)); table_array[*(cluster_iter + thrust::get<0>(t))] = 1; // t.first = *(k_ptr + t.second); } }; template<typename IdxType, typename ValType> struct minus_idx{ __host__ __device__ ValType operator()(const IdxType & x, const IdxType & y) const{ return (ValType) (x - y); } }; template<typename IdxType, typename IdxIter> struct sort_by_cluster{ IdxIter cluster_iter; __host__ __device__ sort_by_cluster(IdxIter _c):cluster_iter(_c){} __host__ __device__ bool operator()(const IdxType& a, const IdxType& b){ return (IdxType)(*(cluster_iter + a)) < (IdxType)(*(cluster_iter + b)); } }; template<typename IdxType> __device__ inline IdxType not_delta_function(IdxType c1, IdxType c2){ return (IdxType)(c1!=c2); } template<typename IdxType> __device__ inline IdxType delta_function(IdxType c1, IdxType c2){ return (IdxType)(c1==c2); } }// nvlouvain
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/include/nvgraph_cusparse.hxx
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cusparse_v2.h> #include <cusparse_internal.h> #include "valued_csr_graph.hxx" #include "nvgraph_vector.hxx" #include <iostream> #include "debug_macros.h" namespace nvgraph { class Cusparse { private: // global CUSPARSE handle for nvgraph static cusparseHandle_t m_handle; // Constructor. Cusparse(); // Destructor. ~Cusparse(); public: // Get the handle. static cusparseHandle_t get_handle() { if (m_handle == 0) CHECK_CUSPARSE(cusparseCreate(&m_handle)); return m_handle; } // Destroy handle static void destroy_handle() { if (m_handle != 0) CHECK_CUSPARSE( cusparseDestroy(m_handle) ); m_handle = 0; } static void setStream(cudaStream_t stream) { cusparseHandle_t handle = Cusparse::get_handle(); CHECK_CUSPARSE(cusparseSetStream(handle, stream)); } // Set pointer mode static void set_pointer_mode_device(); static void set_pointer_mode_host(); // operate on all rows and columns y= alpha*A.x + beta*y template <typename IndexType_, typename ValueType_> static void csrmv( const bool transposed, const bool sym, const int m, const int n, const int nnz, const ValueType_* alpha, const ValueType_* csrVal, const IndexType_ *csrRowPtr, const IndexType_ *csrColInd, const ValueType_* x, const ValueType_* beta, ValueType_* y); template <typename IndexType_, typename ValueType_> static void csrmv( const bool transposed, const bool sym, const ValueType_* alpha, const ValuedCsrGraph<IndexType_, ValueType_>& G, const Vector<ValueType_>& x, const ValueType_* beta, Vector<ValueType_>& y ); // future possible features /* template <class TConfig> static void csrmv_with_mask( const typename TConfig::MatPrec alphaConst, Matrix<TConfig> &A, Vector<TConfig> &x, const typename TConfig::MatPrec betaConst, Vector<TConfig> &y ); template <class TConfig> static void csrmv_with_mask_restriction( const typename TConfig::MatPrec alphaConst, Matrix<TConfig> &A, Vector<TConfig> &x, const typename TConfig::MatPrec betaConst, Vector<TConfig> &y, Matrix<TConfig> &P); // E is a vector that represents a diagonal matrix // operate on all rows and columns // y= alpha*E.x + beta*y template <class TConfig> static void csrmv( const typename TConfig::MatPrec alphaConst, Matrix<TConfig> &A, const typename Matrix<TConfig>::MVector &E, Vector<TConfig> &x, const typename TConfig::MatPrec betaConst, Vector<TConfig> &y, ViewType view = OWNED ); // operate only on columns specified by columnColorSelector, see enum ColumnColorSelector above // operate only on rows of specified color, given by A.offsets_rows_per_color, A.sorted_rows_by_color // y= alpha*A.x + beta*y template <class TConfig> static void csrmv( ColumnColorSelector columnColorSelector, const int color, const typename TConfig::MatPrec alphaConst, Matrix<TConfig> &A, Vector<TConfig> &x, const typename TConfig::MatPrec betaConst, Vector<TConfig> &y, ViewType view = OWNED ); // E is a vector that represents a diagonal matrix // operate only on rows of specified color, given by A.offsets_rows_per_color, A.sorted_rows_by_color // y= alpha*E.x + beta*y template <class TConfig> static void csrmv( const int color, typename TConfig::MatPrec alphaConst, Matrix<TConfig> &A, const typename Matrix<TConfig>::MVector &E, Vector<TConfig> &x, typename TConfig::MatPrec betaConst, Vector<TConfig> &y, ViewType view=OWNED ); template <class TConfig> static void csrmm(typename TConfig::MatPrec alpha, Matrix<TConfig> &A, Vector<TConfig> &V, typename TConfig::VecPrec beta, Vector<TConfig> &Res); */ template <typename IndexType_, typename ValueType_> static void csrmm(const bool transposed, const bool sym, const int m, const int n, const int k, const int nnz, const ValueType_* alpha, const ValueType_* csrVal, const IndexType_* csrRowPtr, const IndexType_* csrColInd, const ValueType_* x, const int ldx, const ValueType_* beta, ValueType_* y, const int ldy); //template <typename IndexType_, typename ValueType_> static void csr2coo( const int n, const int nnz, const int *csrRowPtr, int *cooRowInd); }; } // end namespace nvgraph
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/include/thrust_coarse_generator.cuh
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <thrust/device_vector.h> #include <thrust/system/detail/generic/reduce_by_key.h> #include <thrust/remove.h> #include <thrust/iterator/transform_iterator.h> #include <thrust/gather.h> #include <thrust/binary_search.h> #include <thrust/detail/temporary_array.h> #include "util.cuh" #include "graph_utils.cuh" //#include <cusp/format_utils.h> //indices_to_offsets template <typename DerivedPolicy, typename IndexArray, typename OffsetArray> void indices_to_offsets(const thrust::execution_policy<DerivedPolicy> &exec, const IndexArray& indices, OffsetArray& offsets) { typedef typename OffsetArray::value_type OffsetType; // convert uncompressed row indices into compressed row offsets thrust::lower_bound(exec, indices.begin(), indices.end(), thrust::counting_iterator<OffsetType>(0), thrust::counting_iterator<OffsetType>(offsets.size()), offsets.begin()); } template <typename DerivedPolicy, typename ArrayType1, typename ArrayType2> void counting_sort_by_key(const thrust::execution_policy<DerivedPolicy> &exec, ArrayType1& keys, ArrayType2& vals//, /*typename ArrayType1::value_type min, typename ArrayType1::value_type max*/) { /* std::cout<<"## stable_sort_by_key\n" ; if(keys.size()!= vals.size()){ std::cout<<"Error keys.size()!= vals.size()\n" ; } */ CUDA_CALL(cudaDeviceSynchronize()); thrust::stable_sort_by_key(exec, keys.begin(), keys.end(), vals.begin()); CUDA_CALL(cudaDeviceSynchronize()); // std::cout<<"## done stable_sort_by_key\n"; } template <typename DerivedPolicy, typename ArrayType1, typename ArrayType2, typename ArrayType3> void sort_by_row_and_column(const thrust::execution_policy<DerivedPolicy> &exec, ArrayType1& row_indices, ArrayType2& column_indices, ArrayType3& values, typename ArrayType1::value_type min_row = 0, typename ArrayType1::value_type max_row = 0, typename ArrayType2::value_type min_col = 0, typename ArrayType2::value_type max_col = 0) { typedef typename ArrayType1::value_type IndexType1; typedef typename ArrayType2::value_type IndexType2; typedef typename ArrayType3::value_type ValueType; size_t N = row_indices.size(); thrust::detail::temporary_array<IndexType1, DerivedPolicy> permutation(thrust::detail::derived_cast(thrust::detail::strip_const(exec)), N); thrust::sequence(exec, permutation.begin(), permutation.end()); /* IndexType1 minr = min_row; IndexType1 maxr = max_row; IndexType2 minc = min_col; IndexType2 maxc = max_col; */ //std::cout<<"## max element\n"; /* if(maxr == 0){ // maxr = *thrust::max_element(exec, row_indices.begin(), row_indices.end()); ArrayType1::iterator maxr_iter = thrust::max_element(exec, row_indices.begin(), row_indices.end()); maxr = *maxr_ptr; } if(maxc == 0){ // maxc = *thrust::max_element(exec, column_indices.begin(), column_indices.end()); ArrayType2::iterator maxc_iter = thrust::max_element(exec, column_indices.begin(), column_indices.end()); thrust::copy() maxc = *maxc_ptr; } */ // std::cout<<"## compute permutation and sort by (I,J)\n"; // compute permutation and sort by (I,J) { thrust::detail::temporary_array<IndexType1, DerivedPolicy> temp(thrust::detail::derived_cast(thrust::detail::strip_const(exec)), column_indices.begin(), column_indices.end()); counting_sort_by_key(exec, temp, permutation/*, minc, maxc*/); thrust::copy(exec, row_indices.begin(), row_indices.end(), temp.begin()); thrust::gather(exec, permutation.begin(), permutation.end(), temp.begin(), row_indices.begin()); counting_sort_by_key(exec, row_indices, permutation/*, minr, maxr*/); // thrust::stable_sort_by_key(exec, row_indices.begin(), row_indices.end(), permutation.begin()); thrust::copy(exec, column_indices.begin(), column_indices.end(), temp.begin()); thrust::gather(exec, permutation.begin(), permutation.end(), temp.begin(), column_indices.begin()); } // use permutation to reorder the values { thrust::detail::temporary_array<ValueType, DerivedPolicy> temp(thrust::detail::derived_cast(thrust::detail::strip_const(exec)), values.begin(), values.end()); thrust::gather(exec, permutation.begin(), permutation.end(), temp.begin(), values.begin()); } } //#include <cusp/system/detail/generic/format_utils.h> // -------------------- // Kernels // -------------------- // Kernel to store aggregate I of each fine point index i template <typename IndexType> __global__ void iToIKernel(const IndexType *row_offsets, const IndexType *aggregates, IndexType *I, const int num_rows) { for (int tid = blockDim.x*blockIdx.x + threadIdx.x; tid < num_rows; tid += gridDim.x * blockDim.x) { int agg = aggregates[tid]; for (int j=row_offsets[tid];j<row_offsets[tid+1];j++) { I[j] = agg; } } } // Kernel to store aggregate J of each fine point index j template <typename IndexType> __global__ void jToJKernel(const IndexType *column_indices, const IndexType *aggregates, IndexType *J, const int num_entries) { for (int tid = blockDim.x*blockIdx.x + threadIdx.x; tid < num_entries; tid += gridDim.x * blockDim.x) { int j = column_indices[tid]; J[tid] = aggregates[j]; } } //----------------------------------------------------- // Method to compute the Galerkin product: A_c=R*A*P //----------------------------------------------------- // Method to compute Ac on DEVICE using csr format template <typename IndexType, typename ValueType> void generate_superverticies_graph(const int n_vertex, const int num_aggregates, thrust::device_vector<IndexType> &csr_ptr_d, thrust::device_vector<IndexType> &csr_ind_d, thrust::device_vector<ValueType> &csr_val_d, thrust::device_vector<IndexType> &new_csr_ptr_d, thrust::device_vector<IndexType> &new_csr_ind_d, thrust::device_vector<ValueType> &new_csr_val_d, const thrust::device_vector<IndexType> &aggregates ){ const int n_edges = csr_ptr_d[n_vertex]; thrust::device_vector<IndexType> I(n_edges,-1); thrust::device_vector<IndexType> J(n_edges,-1); thrust::device_vector<ValueType> V(n_edges,-1); const int block_size_I = 128; const int block_size_J = 256; const int num_blocks_I = min( GRID_MAX_SIZE, (int) ((n_vertex-1)/block_size_I + 1) ); const int num_blocks_J = min( GRID_MAX_SIZE, (int) ((n_edges-1)/block_size_J + 1) ); const IndexType *row_offsets_ptr = thrust::raw_pointer_cast(csr_ptr_d.data()); const IndexType *column_indices_ptr = thrust::raw_pointer_cast(csr_ind_d.data()); const IndexType *aggregates_ptr= thrust::raw_pointer_cast(aggregates.data()); IndexType *I_ptr= thrust::raw_pointer_cast(&I[0]); IndexType *J_ptr= thrust::raw_pointer_cast(&J[0]); // Kernel to fill array I with aggregates number for fine points i iToIKernel<<<num_blocks_I,block_size_I>>>(row_offsets_ptr, aggregates_ptr, I_ptr, (int)n_vertex); cudaCheckError(); // Kernel to fill array J with aggregates number for fine points j jToJKernel<<<num_blocks_J,block_size_J>>>(column_indices_ptr, aggregates_ptr, J_ptr, (int)n_edges); cudaCheckError(); // Copy A.values to V array thrust::copy(thrust::device, csr_val_d.begin(), csr_val_d.begin() + n_edges, V.begin()); cudaCheckError(); //cudaDeviceSynchronize(); // Sort (I,J,V) by rows and columns (I,J) // TODO : remove cusp depedency sort_by_row_and_column(thrust::device, I, J, V); cudaCheckError(); cudaDeviceSynchronize(); // compute unique number of nonzeros in the output IndexType NNZ = thrust::inner_product(thrust::make_zip_iterator(thrust::make_tuple(I.begin(), J.begin())), thrust::make_zip_iterator(thrust::make_tuple(I.end (), J.end())) - 1, thrust::make_zip_iterator(thrust::make_tuple(I.begin(), J.begin())) + 1, IndexType(0), thrust::plus<IndexType>(), thrust::not_equal_to< thrust::tuple<IndexType,IndexType> >()) + 1; cudaCheckError(); // allocate space for coarse matrix Ac new_csr_ptr_d.resize(num_aggregates+1); new_csr_ind_d.resize(NNZ); new_csr_val_d.resize(NNZ); // Reduce by key to fill in Ac.column_indices and Ac.values thrust::device_vector<IndexType> new_row_indices(NNZ,0); thrust::reduce_by_key(thrust::make_zip_iterator(thrust::make_tuple(I.begin(), J.begin())), thrust::make_zip_iterator(thrust::make_tuple(I.end(), J.end())), V.begin(), thrust::make_zip_iterator(thrust::make_tuple(new_row_indices.begin(), new_csr_ind_d.begin())), new_csr_val_d.begin(), thrust::equal_to< thrust::tuple<IndexType,IndexType> >(), thrust::plus<ValueType>()); cudaCheckError(); indices_to_offsets(thrust::device, new_row_indices, new_csr_ptr_d); cudaCheckError(); }
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/include/debug_help.h
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * debug_help.h * * Created on: Jul 19, 2018 * Author: jwyles */ #include <string> #include <iostream> #pragma once namespace debug { template <typename T> void printDeviceVector(T* dev_ptr, int items, std::string title) { T* host_ptr = (T*)malloc(sizeof(T) * items); cudaMemcpy(host_ptr, dev_ptr, sizeof(T) * items, cudaMemcpyDefault); std::cout << title << ": { "; for (int i = 0; i < items; i++) { std::cout << host_ptr[i] << ((i < items - 1) ? ", " : " "); } std::cout << "}\n"; free(host_ptr); } }
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/include/valued_csr_graph.cuh
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once namespace nvlouvain{ template <typename ValType> class Vector: public thrust::device_vector<ValType>{ public: Vector(): thrust::device_vector<ValType>(){} Vector(int size): thrust::device_vector<ValType>(size){} template <typename Iter> Vector(Iter begin, Iter end): thrust::device_vector<ValType>(begin, end){} inline void fill(const ValType val){ thrust::fill(thrust::cuda::par, this->begin(), this->end(), val); } inline thrust::device_vector<ValType>& to_device_vector(){ return static_cast<thrust::device_vector<ValType>> (*this); } inline ValType* raw(){ return (ValType*)thrust::raw_pointer_cast( thrust::device_vector<ValType>::data() ); } inline int get_size(){ return this->size(); } }; template <typename IndexType, typename ValueType> class CsrGraph{ public: CsrGraph( thrust::device_vector<IndexType>& csr_ptr_d, thrust::device_vector<IndexType>& csr_ind_d, thrust::device_vector<ValueType>& csr_val_d, IndexType v, IndexType e, bool _w=false): _n_vertices(v), _n_edges(e), csr_ptr(csr_ptr_d.begin(), csr_ptr_d.end()), csr_ind(csr_ind_d.begin(), csr_ind_d.end()), csr_val(csr_val_d.begin(), csr_val_d.end()), weighted(_w){ } CsrGraph( thrust::host_vector<IndexType>& csr_ptr_d, thrust::host_vector<IndexType>& csr_ind_d, thrust::host_vector<ValueType>& csr_val_d, IndexType v, IndexType e, bool _w=false): _n_vertices(v), _n_edges(e), csr_ptr(csr_ptr_d.begin(), csr_ptr_d.end()), csr_ind(csr_ind_d.begin(), csr_ind_d.end()), csr_val(csr_val_d.begin(), csr_val_d.end()), weighted(_w){ } inline const IndexType get_num_vertices() const{ return _n_vertices; } inline const IndexType get_num_edges() const{ return csr_ptr.back(); } inline const IndexType* get_raw_row_offsets() const{ return thrust::raw_pointer_cast(csr_ptr.data()); } inline const IndexType* get_raw_column_indices()const { return thrust::raw_pointer_cast(csr_ind.data());; } inline const ValueType* get_raw_values() const{ return thrust::raw_pointer_cast(csr_val.data()); } inline const Vector<IndexType> & get_row_offsets() const{ return csr_ptr; } inline const Vector<IndexType> & get_column_indices() const{ return csr_ind; } inline const Vector<ValueType> & get_values() const{ return csr_val; } inline const Vector<IndexType> & get_csr_ptr() const{ return csr_ptr; } inline const Vector<IndexType> & get_csr_ind() const{ return csr_ind; } inline const Vector<ValueType> & get_csr_val() const{ return csr_val; } inline void update_csr_ptr(thrust::device_vector<IndexType> & d_v){ thrust::copy(thrust::cuda::par, d_v.begin(), d_v.end(), csr_ptr.begin()); } inline void update_csr_ptr_n(thrust::device_vector<IndexType> & d_v,unsigned size){ csr_ptr.resize(size); thrust::copy_n(thrust::cuda::par, d_v.begin(), size, csr_ptr.begin()); } inline void update_csr_ind(thrust::device_vector<IndexType> & d_v){ thrust::copy(thrust::cuda::par, d_v.begin(), d_v.end(), csr_ind.begin()); } inline void update_csr_ind_n(thrust::device_vector<IndexType> & d_v,unsigned size){ csr_ind.resize(size); thrust::copy_n(thrust::cuda::par, d_v.begin(), size, csr_ind.begin()); } inline void update_csr_val(thrust::device_vector<ValueType> & d_v){ thrust::copy(thrust::cuda::par, d_v.begin(), d_v.end(), csr_val.begin()); } inline void update_csr_val_n(thrust::device_vector<ValueType> & d_v,unsigned size){ csr_val.resize(size); thrust::copy_n(thrust::cuda::par, d_v.begin(), size, csr_val.begin()); } inline void update_graph(size_t n_v, size_t n_e, thrust::device_vector<IndexType> & ptr, thrust::device_vector<IndexType> & ind, thrust::device_vector<ValueType> & val, bool w){ _n_vertices = n_v; _n_edges = n_e; #ifdef DEBUG if(n_v != ptr.size()){ std::cout<<"n_vertex size not match\n"; } if(n_e != ind.size() || n_e != val.size()){ std::cout<<"n_edges size not match\n"; } #endif update_csr_ptr_n(ptr, _n_vertices); update_csr_ind_n(ind, _n_edges); update_csr_val_n(val, _n_edges); weighted = w; } private: size_t _n_vertices; size_t _n_edges; Vector<IndexType> csr_ptr; Vector<IndexType> csr_ind; Vector<ValueType> csr_val; bool weighted; }; }; //nvlouvain
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/include/modularity.cuh
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cuda.h> #include <cuda_runtime.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/reduce.h> #include <thrust/random.h> #include <thrust/generate.h> #include <thrust/transform.h> #include "util.cuh" #include "graph_utils.cuh" #include "functor.cuh" //#include "block_modulariy.cuh" namespace nvlouvain{ /************************************************************* * * compute k vector from [ k0, k1, ..., kn ] * * - input : * n_vertex * csr_ptr's iterator * csr_val's iterator * * - output: * results: k_vec : k vectors * ***************************************************************/ template<typename ValType, typename IdxType> __device__ void compute_k_vec(const int n_vertex, IdxType* csr_ptr_ptr, ValType* csr_val_ptr, bool weighted, ValType* k_vec){ int tid = blockDim.x*blockIdx.x + threadIdx.x; if( (tid < n_vertex) ){ int start_idx = *(csr_ptr_ptr + tid); int end_idx = *(csr_ptr_ptr + tid + 1); #ifdef DEBUG if( end_idx > (*(csr_ptr_ptr + n_vertex)) ){ printf("Error computing ki iter but end_idx >= n_vertex %d >= %d\n"); *(k_vec + tid) = 0.0; } #endif if(!weighted){ *(k_vec + tid) = (ValType)end_idx - start_idx; } else{ ValType sum = 0.0; #pragma unroll for(int i = 0 ; i < end_idx - start_idx; ++ i){ sum += *(csr_val_ptr + start_idx + i); } *(k_vec + tid) = sum; } } return; } template<typename IdxType, typename ValType> __device__ void modularity_i( const int n_vertex, const int n_clusters, IdxType* csr_ptr_ptr, IdxType* csr_ind_ptr, ValType* csr_val_ptr, IdxType* cluster_ptr, IdxType* cluster_inv_ptr_ptr, IdxType* cluster_inv_ind_ptr, ValType* k_ptr, ValType* Q_arr, ValType* temp_i, // size = n_edges ValType m2 ){ int i = blockIdx.x * blockDim.x + threadIdx.x; IdxType start_idx, end_idx, c_i; ValType ki(0.0), Ai(0.0), sum_k(0.0); IdxType start_c_idx; IdxType end_c_idx; if(i < n_vertex){ start_idx = *( csr_ptr_ptr + i ); end_idx = *( csr_ptr_ptr + i + 1 ); c_i = *(cluster_ptr + i); ki = *(k_ptr + i); //only sees its neibors Ai = 0.0; #pragma unroll for(int j = 0; j< end_idx - start_idx; ++j){ IdxType j_idx = (IdxType)(*(csr_ind_ptr + j + start_idx)); IdxType c_j = (IdxType)(*(cluster_ptr + j_idx)); Ai += ((int)(c_i != c_j)*((ValType)(*(csr_val_ptr + j + start_idx)))); } start_c_idx = *(cluster_inv_ptr_ptr + c_i); end_c_idx = *(cluster_inv_ptr_ptr + c_i + 1); #ifdef DEBUG if (temp_i == NULL) printf("Error in allocate temp_i memory in thread %d\n",i); #endif #pragma unroll for(int j = 0; j< end_c_idx-start_c_idx; ++j){ IdxType j_idx = (IdxType)(*(cluster_inv_ind_ptr + j + start_c_idx)); sum_k += (ValType)(*(k_ptr + j_idx)); } sum_k = m2 - sum_k; *(Q_arr + i) =( Ai - (( ki * sum_k )/ m2))/m2 ; // printf("-- i: %d Q: %.6e Ai: %f ki*sum_k = %f x %f = %f\n", i, *(Q_arr + i), Ai, ki, sum_k, (ki * sum_k)); } return; } template<typename IdxType=int, typename ValType> __device__ void modularity_no_matrix(const int n_vertex, const int n_clusters, ValType m2, IdxType* csr_ptr_ptr, IdxType* csr_ind_ptr, ValType* csr_val_ptr, IdxType* cluster_ptr, IdxType* cluster_inv_ptr_ptr, IdxType* cluster_inv_ind_ptr, bool weighted, // bool identical_cluster, // todo optimizaiton ValType* k_vec, ValType* Q_arr, ValType* temp_i){ compute_k_vec(n_vertex, csr_ptr_ptr, csr_val_ptr, weighted, k_vec); __syncthreads(); modularity_i(n_vertex, n_clusters, csr_ptr_ptr, csr_ind_ptr, csr_val_ptr, cluster_ptr, cluster_inv_ptr_ptr, cluster_inv_ind_ptr, k_vec, Q_arr, temp_i, m2); } template<typename IdxType, typename ValType> __global__ void kernel_modularity_no_matrix(const int n_vertex, const int n_clusters, ValType m2, IdxType* csr_ptr_ptr, IdxType* csr_ind_ptr, ValType* csr_val_ptr, IdxType* cluster_ptr, IdxType* cluster_inv_ptr_ptr, IdxType* cluster_inv_ind_ptr, bool weighted, ValType* k_vec_ptr, ValType* Q_arr_ptr, ValType* temp_i_ptr){ ValType m2_s(m2); modularity_no_matrix(n_vertex, n_clusters, m2_s, csr_ptr_ptr, csr_ind_ptr, csr_val_ptr, cluster_ptr, cluster_inv_ptr_ptr, cluster_inv_ind_ptr, weighted, k_vec_ptr, Q_arr_ptr, temp_i_ptr ); } template<typename IdxType, typename ValType> ValType modularity(const int n_vertex, int n_edges, const int n_clusters, ValType m2, IdxType* csr_ptr_ptr, IdxType* csr_ind_ptr, ValType* csr_val_ptr, IdxType* cluster_ptr, IdxType* cluster_inv_ptr_ptr, IdxType* cluster_inv_ind_ptr, bool weighted, ValType* k_vec_ptr, ValType* Q_arr_ptr, ValType* temp_i_ptr // temporary space for calculation ){ thrust::fill(thrust::device, temp_i_ptr, temp_i_ptr + n_edges, 0.0); int nthreads = min(n_vertex,CUDA_MAX_KERNEL_THREADS); int nblocks = min((n_vertex + nthreads - 1)/nthreads,CUDA_MAX_BLOCKS); kernel_modularity_no_matrix<<<nblocks, nthreads >>>(n_vertex, n_clusters, m2, csr_ptr_ptr, csr_ind_ptr, csr_val_ptr, cluster_ptr, cluster_inv_ptr_ptr, cluster_inv_ind_ptr, weighted, k_vec_ptr, Q_arr_ptr, temp_i_ptr); CUDA_CALL(cudaDeviceSynchronize()); ValType Q = thrust::reduce(thrust::cuda::par, Q_arr_ptr, Q_arr_ptr + n_vertex, (ValType)(0.0)); return -Q; } /*********************** cluster_iter(n_vertex) cluster_inv_ptr(c_size + 1) cluster_inv_ind(n_vertex) seq_idx(n_vertex) [0, 1, 2, ... , n_vertex -1] ***********************/ template<typename IdxIter, typename IdxType=int> __global__ void generate_cluster_inv_ptr(const int n_vertex, const int c_size, IdxIter cluster_iter, IdxType* cluster_inv_ptr){ int tid = blockDim.x * blockIdx.x + threadIdx.x; IdxType ci; //Inital cluster_inv_ptr outside!!! if(tid < n_vertex){ ci = *(cluster_iter + tid); atomicAdd(cluster_inv_ptr + ci, 1); } } template<typename IdxType=int, typename IdxIter> void generate_cluster_inv(const int n_vertex, const int c_size, IdxIter cluster_iter, thrust::device_vector<IdxType>& cluster_inv_ptr, thrust::device_vector<IdxType>& cluster_inv_ind){ int nthreads = min(n_vertex,CUDA_MAX_KERNEL_THREADS); int nblocks = min((n_vertex + nthreads - 1)/nthreads,CUDA_MAX_BLOCKS); thrust::fill(thrust::cuda::par, cluster_inv_ptr.begin(), cluster_inv_ptr.end(), 0); cudaCheckError(); IdxType* cluster_inv_ptr_ptr = thrust::raw_pointer_cast(cluster_inv_ptr.data()); generate_cluster_inv_ptr<<<nblocks,nthreads>>>(n_vertex, c_size, cluster_iter, cluster_inv_ptr_ptr); CUDA_CALL(cudaDeviceSynchronize()); #ifdef DEBUG if((unsigned)c_size + 1 > cluster_inv_ptr.size()) std::cout<<"Error cluster_inv_ptr run out of memory\n"; #endif thrust::exclusive_scan(thrust::device, cluster_inv_ptr.begin(), cluster_inv_ptr.begin() + c_size + 1 , cluster_inv_ptr.begin()); cudaCheckError(); thrust::sequence(thrust::device, cluster_inv_ind.begin(), cluster_inv_ind.end(), 0); cudaCheckError(); thrust::sort(thrust::device, cluster_inv_ind.begin(), cluster_inv_ind.begin() + n_vertex, sort_by_cluster<IdxType, IdxIter>(cluster_iter)); cudaCheckError(); } }// nvlouvain
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/include/triangles_counting_kernels.hxx
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <triangles_counting.hxx> namespace nvgraph { namespace triangles_counting { template <typename T> void tricnt_bsh(T nblock, spmat_t<T> *m, uint64_t *ocnt_d, size_t bmld, cudaStream_t stream); template <typename T> void tricnt_wrp(T nblock, spmat_t<T> *m, uint64_t *ocnt_d, unsigned int *bmap_d, size_t bmld, cudaStream_t stream); template <typename T> void tricnt_thr(T nblock, spmat_t<T> *m, uint64_t *ocnt_d, cudaStream_t stream); template <typename T> void tricnt_b2b(T nblock, spmat_t<T> *m, uint64_t *ocnt_d, unsigned int *bmapL0_d, size_t bmldL0, unsigned int *bmapL1_d, size_t bmldL1, cudaStream_t stream); template <typename T> uint64_t reduce(uint64_t *v_d, T n, cudaStream_t stream); template <typename T> void create_nondangling_vector(const T *roff, T *p_nonempty, T *n_nonempty, size_t n, cudaStream_t stream); void myCudaMemset(unsigned long long *p, unsigned long long v, long long n, cudaStream_t stream); } // namespace triangles_counting } // namespace nvgraph
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/include/nvgraph_convert.hxx
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <nvgraph.h> #include <nvgraph_cusparse.hxx> #include <cnmem_shared_ptr.hxx> namespace nvgraph{ void csr2coo( const int *csrSortedRowPtr, int nnz, int m, int *cooRowInd, cusparseIndexBase_t idxBase); void coo2csr( const int *cooRowInd, int nnz, int m, int *csrSortedRowPtr, cusparseIndexBase_t idxBase ); void csr2csc( int m, int n, int nnz, const void *csrVal, const int *csrRowPtr, const int *csrColInd, void *cscVal, int *cscRowInd, int *cscColPtr, cusparseAction_t copyValues, cusparseIndexBase_t idxBase, cudaDataType_t *dataType); void csc2csr( int m, int n, int nnz, const void *cscVal, const int *cscRowInd, const int *cscColPtr, void *csrVal, int *csrRowPtr, int *csrColInd, cusparseAction_t copyValues, cusparseIndexBase_t idxBase, cudaDataType_t *dataType); void csr2cscP( int m, int n, int nnz, const int *csrRowPtr, const int *csrColInd, int *cscRowInd, int *cscColPtr, int *p, cusparseIndexBase_t idxBase); void cooSortBySource(int m, int n, int nnz, const void *srcVal, const int *srcRowInd, const int *srcColInd, void *dstVal, int *dstRowInd, int *dstColInd, cusparseIndexBase_t idxBase, cudaDataType_t *dataType); void cooSortByDestination(int m, int n, int nnz, const void *srcVal, const int *srcRowInd, const int *srcColInd, void *dstVal, int *dstRowInd, int *dstColInd, cusparseIndexBase_t idxBase, cudaDataType_t *dataType); void coos2csc(int m, int n, int nnz, const void *srcVal, const int *srcRowInd, const int *srcColInd, void *dstVal, int *dstRowInd, int *dstColInd, cusparseIndexBase_t idxBase, cudaDataType_t *dataType); void cood2csr(int m, int n, int nnz, const void *srcVal, const int *srcRowInd, const int *srcColInd, void *dstVal, int *dstRowInd, int *dstColInd, cusparseIndexBase_t idxBase, cudaDataType_t *dataType); void coou2csr(int m, int n, int nnz, const void *srcVal, const int *srcRowInd, const int *srcColInd, void *dstVal, int *dstRowInd, int *dstColInd, cusparseIndexBase_t idxBase, cudaDataType_t *dataType); void coou2csc(int m, int n, int nnz, const void *srcVal, const int *srcRowInd, const int *srcColInd, void *dstVal, int *dstRowInd, int *dstColInd, cusparseIndexBase_t idxBase, cudaDataType_t *dataType); ////////////////////////// Utility functions ////////////////////////// void createIdentityPermutation(int n, int *p); void gthrX(int nnz, const void *y, void *xVal, const int *xInd, cusparseIndexBase_t idxBase, cudaDataType_t *dataType); void cooSortBufferSize(int m, int n, int nnz, const int *cooRows, const int *cooCols, size_t *pBufferSizeInBytes); void cooGetSourcePermutation(int m, int n, int nnz, int *cooRows, int *cooCols, int *p, void *pBuffer); void cooGetDestinationPermutation(int m, int n, int nnz, int *cooRows, int *cooCols, int *p, void *pBuffer); void csr2csc2BufferSize(int m, int n, int nnz, const int *csrRowPtr, const int *csrColInd, size_t *pBufferSize); void csr2csc2(int m, int n, int nnz, const int *csrRowPtr, const int *csrColInd, int *cscRowInd, int *cscColPtr, int *p, void *pBuffer, cusparseIndexBase_t idxBase); } //end nvgraph namespace
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/include/size2_selector.cuh
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <thrust/device_vector.h> #include <thrust/count.h> //count #include <thrust/sort.h> //sort #include <thrust/binary_search.h> //lower_bound #include <thrust/unique.h> //unique #include <cusparse.h> #include "async_event.cuh" #include "graph_utils.cuh" #include "common_selector.cuh" #include "valued_csr_graph.cuh" // This should be enabled #define EXPERIMENTAL_ITERATIVE_MATCHING using namespace nvlouvain; namespace nvlouvain{ typedef enum { USER_PROVIDED = 0, // using edge values as is SCALED_BY_ROW_SUM = 1, // 0.5*(A_ij+A_ji)/max(d(i),d (j)), where d(i) is the sum of the row i SCALED_BY_DIAGONAL = 2, // 0.5*(A_ij+A_ji)/max(diag(i),diag(j)) }Matching_t; typedef enum{ NVGRAPH_OK = 0, NVGRAPH_ERR_BAD_PARAMETERS = 1, }NVGRAPH_ERROR; template <typename IndexType, typename ValueType> class Size2Selector { public: Size2Selector(); Size2Selector(Matching_t similarity_metric, int deterministic = 1, int max_iterations = 15 , ValueType numUnassigned_tol = 0.05 ,bool two_phase = false, bool merge_singletons = true, cudaStream_t stream = 0) :m_similarity_metric(similarity_metric), m_deterministic(deterministic), m_max_iterations(max_iterations), m_numUnassigned_tol(numUnassigned_tol), m_two_phase(two_phase), m_merge_singletons(merge_singletons), m_stream(stream) { m_aggregation_edge_weight_component = 0; m_weight_formula = 0; } // NVGRAPH_ERROR setAggregates(const CsrGraph<IndexType, ValueType> &A, Vector<IndexType> &aggregates, int &num_aggregates); NVGRAPH_ERROR setAggregates(cusparseHandle_t, const IndexType n_vertex, const IndexType n_edges, IndexType* csr_ptr, IndexType* csr_ind, ValueType* csr_val, Vector<IndexType> &aggregates, int &num_aggregates); protected: // NVGRAPH_ERROR setAggregates_common_sqblocks(const CsrGraph<IndexType, ValueType> &A, Vector<IndexType> &aggregates, int &num_aggregates); NVGRAPH_ERROR setAggregates_common_sqblocks(cusparseHandle_t, const IndexType n_vertex, const IndexType n_edges, IndexType* csr_ptr, IndexType* csr_ind, ValueType* csr_val, Vector<IndexType> &aggregates, int &num_aggregates); Matching_t m_similarity_metric; int m_deterministic; int m_max_iterations; ValueType m_numUnassigned_tol; bool m_two_phase; bool m_merge_singletons; cudaStream_t m_stream; int m_aggregation_edge_weight_component; int m_weight_formula; }; } template <typename IndexType> void renumberAndCountAggregates(Vector<IndexType> &aggregates, const IndexType n, IndexType& num_aggregates) { // renumber aggregates Vector<IndexType> scratch(n+1); scratch.fill(0); thrust::device_ptr<IndexType> aggregates_thrust_dev_ptr(aggregates.raw()); thrust::device_ptr<IndexType> scratch_thrust_dev_ptr(scratch.raw()); // set scratch[aggregates[i]] = 1 thrust::fill(thrust::make_permutation_iterator(scratch_thrust_dev_ptr, aggregates_thrust_dev_ptr), thrust::make_permutation_iterator(scratch_thrust_dev_ptr, aggregates_thrust_dev_ptr + n), 1); //scratch.dump(0,scratch.get_size()); // do prefix sum on scratch thrust::exclusive_scan(scratch_thrust_dev_ptr, scratch_thrust_dev_ptr + n + 1, scratch_thrust_dev_ptr); // scratch.dump(0,scratch.get_size()); // aggregates[i] = scratch[aggregates[i]] thrust::copy(thrust::make_permutation_iterator(scratch_thrust_dev_ptr, aggregates_thrust_dev_ptr), thrust::make_permutation_iterator(scratch_thrust_dev_ptr, aggregates_thrust_dev_ptr + n), aggregates_thrust_dev_ptr); cudaCheckError(); cudaMemcpy(&num_aggregates, &scratch.raw()[scratch.get_size()-1], sizeof(int), cudaMemcpyDefault); //num_aggregates = scratch.raw()[scratch.get_size()-1]; cudaCheckError(); } // ------------------ // Constructors // ------------------ template <typename IndexType, typename ValueType> Size2Selector<IndexType, ValueType>::Size2Selector() { //Using default vaues from AmgX m_deterministic = 1; m_stream=0; m_max_iterations = 15; m_numUnassigned_tol = 0.05; m_two_phase = 0; m_aggregation_edge_weight_component= 0; m_merge_singletons = 1; m_weight_formula = 0; m_similarity_metric = SCALED_BY_ROW_SUM; } // ------------------ // Methods // ------------------ // setAggregates for block_dia_csr_matrix_d format template <typename IndexType, typename ValueType> NVGRAPH_ERROR Size2Selector<IndexType, ValueType>::setAggregates_common_sqblocks( cusparseHandle_t cusp_handle, const IndexType n_vertex, const IndexType n_edges, IndexType *csr_ptr, IndexType *csr_ind, ValueType *csr_val, Vector<IndexType> &aggregates, int &num_aggregates) { const IndexType n = n_vertex; const IndexType nnz = n_edges; const IndexType *A_row_offsets_ptr = csr_ptr; const IndexType *A_column_indices_ptr = csr_ind; const ValueType *A_nonzero_values_ptr = csr_val; // compute row indices Vector<IndexType> row_indices(nnz); IndexType* row_indices_raw_ptr = row_indices.raw(); // Cusparse::csr2coo( n, nnz, A_row_offsets_ptr, row_indices.raw()); // note : amgx uses cusp for that //cusparseHandle_t cusp_handle; //cusparseCreate(&cusp_handle); cusparseXcsr2coo(cusp_handle, A_row_offsets_ptr, nnz, n, row_indices_raw_ptr, CUSPARSE_INDEX_BASE_ZERO); const IndexType *A_row_indices_ptr = row_indices.raw(); //All vectors should be initialized to -1. aggregates.fill(-1); Vector<IndexType> strongest_neighbour(n); strongest_neighbour.fill(-1); Vector<IndexType> strongest_neighbour_1phase(n); strongest_neighbour_1phase.fill(-1); Vector<float> edge_weights(nnz); edge_weights.fill(-1); float *edge_weights_ptr = edge_weights.raw(); float *rand_edge_weights_ptr = NULL; cudaCheckError(); IndexType *strongest_neighbour_ptr = strongest_neighbour.raw(); IndexType *strongest_neighbour_1phase_ptr = strongest_neighbour_1phase.raw(); IndexType *aggregates_ptr = aggregates.raw(); const int threads_per_block = 256; const int max_grid_size = 256; const int num_blocks = min( max_grid_size, (n-1)/threads_per_block+ 1 ); const int num_blocks_V2 = min( max_grid_size, (nnz-1)/threads_per_block + 1); int bsize = 1; // AmgX legacy: we don't use block CSR matrices, this is just to specify that we run on regular matrices int numUnassigned = n; int numUnassigned_previous = numUnassigned; thrust::device_ptr<IndexType> aggregates_thrust_dev_ptr(aggregates_ptr); switch(m_similarity_metric) { case USER_PROVIDED : { //printf("user provided !!!!!!!!!!!!!!!! \n"); //copy non wero values of A in edge_weights (float) convert_type<<<num_blocks_V2,threads_per_block,0,this->m_stream>>>(nnz, A_nonzero_values_ptr, edge_weights_ptr); cudaCheckError(); //edge_weights.dump(0,nnz); break; } case SCALED_BY_ROW_SUM : { /* comment out by Tin-Yin // Compute the edge weights using .5*(A_ij+A_ji)/max(d(i),d(j)) where d(i) is the sum of outgoing edges of i Vector<ValueType> row_sum(n); const ValueType *A_row_sum_ptr = row_sum.raw(); Vector<ValueType> ones(n); ones.fill(1.0); ValueType alpha = 1.0, beta =0.0; Cusparse::csrmv(false, false, n, n, nnz,&alpha,A_nonzero_values_ptr, A_row_offsets_ptr, A_column_indices_ptr, ones.raw(),&beta, row_sum.raw()); cudaFuncSetCacheConfig(computeEdgeWeightsBlockDiaCsr_V2<IndexType,ValueType,float>,cudaFuncCachePreferL1); computeEdgeWeights_simple<<<num_blocks_V2,threads_per_block,0,this->m_stream>>>(A_row_offsets_ptr, A_row_indices_ptr, A_column_indices_ptr, A_row_sum_ptr, A_nonzero_values_ptr, nnz, edge_weights_ptr, rand_edge_weights_ptr, n, this->m_weight_formula); cudaCheckError(); break; */ } case SCALED_BY_DIAGONAL : { // Compute the edge weights using AmgX formula (works only if there is a diagonal entry for each row) Vector<IndexType> diag_idx(n); const IndexType *A_dia_idx_ptr = diag_idx.raw(); computeDiagonalKernelCSR<<<num_blocks,threads_per_block,0,this->m_stream>>>(n, csr_ptr, csr_ind, diag_idx.raw()); cudaCheckError(); cudaFuncSetCacheConfig(computeEdgeWeightsBlockDiaCsr_V2<IndexType,ValueType,float>,cudaFuncCachePreferL1); computeEdgeWeightsBlockDiaCsr_V2<<<num_blocks_V2,threads_per_block,0,this->m_stream>>>(A_row_offsets_ptr, A_row_indices_ptr, A_column_indices_ptr, A_dia_idx_ptr, A_nonzero_values_ptr, nnz, edge_weights_ptr, rand_edge_weights_ptr, n, bsize,this->m_aggregation_edge_weight_component, this->m_weight_formula); cudaCheckError(); break; } default: return NVGRAPH_ERR_BAD_PARAMETERS; } #ifdef EXPERIMENTAL_ITERATIVE_MATCHING // TODO (from amgx): allocate host pinned memory AsyncEvent *throttle_event = new AsyncEvent; throttle_event->create(); std::vector<IndexType> h_unagg_vec(1); Vector<IndexType> d_unagg_vec(1); int *unaggregated = &h_unagg_vec[0]; int *d_unaggregated = d_unagg_vec.raw(); #endif int icount, s = 1; { icount = 0; float *weights_ptr = edge_weights_ptr; do { if( !this->m_two_phase ) { // 1-phase handshaking findStrongestNeighbourBlockDiaCsr_V2<<<num_blocks,threads_per_block,0,this->m_stream>>>(A_row_offsets_ptr, A_column_indices_ptr, weights_ptr, n, aggregates_ptr, strongest_neighbour_ptr, strongest_neighbour_ptr, bsize, 1, this->m_merge_singletons); cudaCheckError(); } else { // 2-phase handshaking findStrongestNeighbourBlockDiaCsr_V2<<<num_blocks,threads_per_block,0,this->m_stream>>>(A_row_offsets_ptr, A_column_indices_ptr, weights_ptr, n, aggregates_ptr, strongest_neighbour_1phase_ptr, strongest_neighbour_ptr, bsize, 1, this->m_merge_singletons); cudaCheckError(); // 2nd phase: for each block_row, find the strongest neighbour among those who gave hand on 1st phase findStrongestNeighbourBlockDiaCsr_V2<<<num_blocks,threads_per_block,0,this->m_stream>>>(A_row_offsets_ptr, A_column_indices_ptr, weights_ptr, n, aggregates_ptr, strongest_neighbour_1phase_ptr, strongest_neighbour_ptr, bsize, 2, this->m_merge_singletons); cudaCheckError(); } // Look for perfect matches. Also, for nodes without unaggregated neighbours, merge with aggregate containing strongest neighbour matchEdges<<<num_blocks,threads_per_block,0,this->m_stream>>>(n, aggregates_ptr, strongest_neighbour_ptr); cudaCheckError(); #ifdef EXPERIMENTAL_ITERATIVE_MATCHING s = (icount & 1); if( s == 0 ) { // count unaggregated vertices cudaMemsetAsync(d_unaggregated, 0, sizeof(int), this->m_stream); countAggregates<IndexType,threads_per_block><<<num_blocks,threads_per_block,0,this->m_stream>>>(n, aggregates_ptr, d_unaggregated); cudaCheckError(); cudaMemcpyAsync(unaggregated, d_unaggregated, sizeof(int), cudaMemcpyDeviceToHost, this->m_stream); throttle_event->record(this->m_stream); cudaCheckError(); } else { throttle_event->sync(); numUnassigned_previous = numUnassigned; numUnassigned = *unaggregated; } #else cudaStreamSynchronize(this->m_stream); numUnassigned_previous = numUnassigned; numUnassigned = (int)thrust::count(aggregates_thrust_dev_ptr, aggregates_thrust_dev_ptr+n,-1); cudaCheckError(); #endif icount++; } while ( (s == 0) || !(numUnassigned==0 || icount > this->m_max_iterations || 1.0*numUnassigned/n < this->m_numUnassigned_tol || numUnassigned == numUnassigned_previous)); } //print //printf("icount=%i, numUnassiged=%d, numUnassigned_tol=%f\n", icount, numUnassigned, this->m_numUnassigned_tol); #ifdef EXPERIMENTAL_ITERATIVE_MATCHING delete throttle_event; #endif if( this->m_merge_singletons ) { // Merge remaining vertices with current aggregates if (!this->m_deterministic) { while (numUnassigned != 0) { mergeWithExistingAggregatesBlockDiaCsr_V2<<<num_blocks,threads_per_block,0,this->m_stream>>>(A_row_offsets_ptr, A_column_indices_ptr, edge_weights_ptr, n, aggregates_ptr, bsize,this->m_deterministic,(IndexType*) NULL); cudaCheckError(); numUnassigned = (int)thrust::count(aggregates_thrust_dev_ptr, aggregates_thrust_dev_ptr+n,-1); cudaCheckError(); } } else { Vector<int> aggregates_candidate(n); aggregates_candidate.fill(-1); while (numUnassigned != 0) { mergeWithExistingAggregatesBlockDiaCsr_V2<<<num_blocks,threads_per_block,0,this->m_stream>>>(A_row_offsets_ptr, A_column_indices_ptr, edge_weights_ptr, n, aggregates_ptr, bsize,this->m_deterministic,aggregates_candidate.raw()); cudaCheckError(); joinExistingAggregates<<<num_blocks,threads_per_block,0,this->m_stream>>>(n, aggregates_ptr, aggregates_candidate.raw()); cudaCheckError(); numUnassigned = (int)thrust::count(aggregates_thrust_dev_ptr, aggregates_thrust_dev_ptr+n,-1); cudaCheckError(); } } } else { //make singletons aggregateSingletons<<<num_blocks,threads_per_block,0,this->m_stream>>>( aggregates_ptr, n ); cudaCheckError(); } renumberAndCountAggregates(aggregates, n, num_aggregates); return NVGRAPH_OK; } /* template <typename IndexType, typename ValueType> NVGRAPH_ERROR Size2Selector<IndexType, ValueType>::setAggregates(const CsrGraph<IndexType, ValueType> &A, Vector<IndexType> &aggregates, int &num_aggregates) { return setAggregates_common_sqblocks( A, aggregates, num_aggregates); } */ template <typename IndexType, typename ValueType> NVGRAPH_ERROR Size2Selector<IndexType, ValueType>::setAggregates( cusparseHandle_t cusp_handle, const IndexType n_vertex, const IndexType n_edges, IndexType *csr_ptr, IndexType *csr_ind, ValueType *csr_val, Vector<IndexType> &aggregates, int &num_aggregates) { return setAggregates_common_sqblocks(cusp_handle, n_vertex, n_edges, csr_ptr, csr_ind, csr_val, aggregates, num_aggregates); } //template class Size2Selector<int, float>; //template class Size2Selector<int, double>; //template void renumberAndCountAggregates <int> (Vector<int> &aggregates, const int n, int& num_aggregates);
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/include/high_res_clock.h
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // A wrapper of clock_gettime. // Michael A. Frumkin (mfrumkin@nvidia.com) #pragma once #include <iostream> #include <string> #include <time.h> class HighResClock { public: HighResClock() { clock_gettime(CLOCK_REALTIME, &_start_time); clock_gettime(CLOCK_REALTIME, &_stop_time); } ~HighResClock() { } void start() { clock_gettime(CLOCK_REALTIME, &_start_time); } std::string stop() { clock_gettime(CLOCK_REALTIME, &_stop_time); char buffer[64]; long long int start_time = _start_time.tv_sec * 1e9 + _start_time.tv_nsec; long long int stop_time = _stop_time.tv_sec * 1e9 + _stop_time.tv_nsec; sprintf(buffer, "%lld us", (stop_time - start_time) / 1000); std::string str(buffer); return str; } void stop(double* elapsed_time) { // returns time in us clock_gettime(CLOCK_REALTIME, &_stop_time); long long int start_time = _start_time.tv_sec * 1e9 + _start_time.tv_nsec; long long int stop_time = _stop_time.tv_sec * 1e9 + _stop_time.tv_nsec; *elapsed_time = (stop_time - start_time) / 1000; } private: timespec _start_time; timespec _stop_time; };
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/include/widest_path.hxx
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once namespace nvgraph { template <typename IndexType_, typename ValueType_> class WidestPath { public: typedef IndexType_ IndexType; typedef ValueType_ ValueType; private: ValuedCsrGraph <IndexType, ValueType> m_network ; Vector <ValueType> m_widest_path; Vector <ValueType> m_tmp; Vector <int> m_mask; // mask[i] = 0 if we can ignore the i th column in the csrmv IndexType m_source; ValueType m_residual; int m_iterations; bool m_is_setup; cudaStream_t m_stream; bool solve_it(); void setup(IndexType source_index, Vector<ValueType>& source_connection, Vector<ValueType>& WidestPath_result); public: // Simple constructor WidestPath(void) {}; // Simple destructor ~WidestPath(void) {}; // Create a WidestPath solver attached to a the transposed of a weighted network // *** network is the transposed/CSC*** WidestPath(const ValuedCsrGraph <IndexType, ValueType>& network, cudaStream_t stream = 0):m_network(network),m_is_setup(false), m_stream(stream) {}; /*! Find the Widest Path from the vertex source_index to every other vertices. * * \param source_index The source. * \param source_connection The connectivity of the source * - if there is a link from source_index to i, source_connection[i] = E(source_index, i) ) * - otherwise source_connection[i] = op.plus->id * - source_connection[source_index] = op.time->id The source_connection is provided as input * \param (output) m_widest_path m_widest_path[i] contains the Widest Path from the source to the vertex i. */ NVGRAPH_ERROR solve(IndexType source_index, Vector<ValueType>& source_connection, Vector<ValueType>& WidestPath_result); inline int get_iterations() const {return m_iterations;} }; } // end namespace nvgraph
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/include/nvgraph_cublas.hxx
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cublas_v2.h> #include <iostream> #include "debug_macros.h" namespace nvgraph { class Cublas; class Cublas { private: static cublasHandle_t m_handle; // Private ctor to prevent instantiation. Cublas(); ~Cublas(); public: // Get the handle. static cublasHandle_t get_handle() { if (m_handle == 0) CHECK_CUBLAS(cublasCreate(&m_handle)); return m_handle; } static void destroy_handle() { if (m_handle != 0) CHECK_CUBLAS(cublasDestroy(m_handle)); m_handle = 0; } static void set_pointer_mode_device(); static void set_pointer_mode_host(); static void setStream(cudaStream_t stream) { cublasHandle_t handle = Cublas::get_handle(); CHECK_CUBLAS(cublasSetStream(handle, stream)); } template <typename T> static void axpy(int n, T alpha, const T* x, int incx, T* y, int incy); template <typename T> static void copy(int n, const T* x, int incx, T* y, int incy); template <typename T> static void dot(int n, const T* x, int incx, const T* y, int incy, T* result); template <typename T> static void gemv(bool transposed, int m, int n, const T* alpha, const T* A, int lda, const T* x, int incx, const T* beta, T* y, int incy); template <typename T> static void gemv_ext(bool transposed, const int m, const int n, const T* alpha, const T* A, const int lda, const T* x, const int incx, const T* beta, T* y, const int incy, const int offsetx, const int offsety, const int offseta); template <typename T> static void trsv_v2( cublasFillMode_t uplo, cublasOperation_t trans, cublasDiagType_t diag, int n, const T *A, int lda, T *x, int incx, int offseta); template <typename T> static void ger(int m, int n, const T* alpha, const T* x, int incx, const T* y, int incy, T* A, int lda); template <typename T> static T nrm2(int n, const T* x, int incx); template <typename T> static void nrm2(int n, const T* x, int incx, T* result); template <typename T> static void scal(int n, T alpha, T* x, int incx); template <typename T> static void scal(int n, T* alpha, T* x, int incx); template <typename T> static void gemm(bool transa, bool transb, int m, int n, int k, const T * alpha, const T * A, int lda, const T * B, int ldb, const T * beta, T * C, int ldc); template <typename T> static void geam(bool transa, bool transb, int m, int n, const T * alpha, const T * A, int lda, const T * beta, const T * B, int ldb, T * C, int ldc); }; } // end namespace nvgraph
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/include/thrust_traits.hxx
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef THRUST_TRAITS_HXX #define THRUST_TRAITS_HXX #include <thrust/device_vector.h> #include <thrust/host_vector.h> namespace nvgraph { //generic Vector Ptr Type facade: // template<typename T, typename Vector> struct VectorPtrT; //partial specialization for device_vector: // template<typename T> struct VectorPtrT<T, thrust::device_vector<T> > { typedef thrust::device_ptr<T> PtrT; }; //partial specialization for host_vector: // template<typename T> struct VectorPtrT<T, thrust::host_vector<T> > { typedef typename thrust::host_vector<T>::value_type* PtrT; }; } #endif
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/include/semiring.hxx
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cfloat> #include <algorithm> #include <stdio.h> #include "atomics.hxx" #include "nvgraph_error.hxx" namespace nvgraph{ //define nvgraph min and max oprators template<typename T> __host__ __device__ __forceinline__ T min(const T&a, const T &b) { return (a < b) ? a : b; } template<typename T> __host__ __device__ __forceinline__ T max(const T&a, const T &b) { return (a > b) ? a : b; } //have routines to return these operators template<typename ValueType_> //ValueType_ is Value_type of the graph struct PlusTimesSemiring { typedef ValueType_ SR_type; SR_type plus_ident, times_ident, times_null; PlusTimesSemiring() { if (typeid(ValueType_) != typeid(float) && typeid(ValueType_) != typeid(double)) FatalError("Graph value type is not supported by this semiring.", NVGRAPH_ERR_BAD_PARAMETERS); //for semiring need multiplicative and additive identity plus_ident = SR_type(0); times_ident = SR_type(1); //also need multiplicative null times_null = SR_type(0); } __host__ __device__ __forceinline__ void setPlus_ident(SR_type &val) { val = SR_type(0); } __host__ __device__ __forceinline__ SR_type plus(const SR_type &arg0, const SR_type &arg1) { return arg0 + arg1; } __host__ __device__ __forceinline__ SR_type times(const SR_type &arg0, const SR_type &arg1) { return arg0 * arg1; } //potential private member to be used in reduction by key so only need atomic for plus operator __device__ __forceinline__ void atomicPlus(SR_type *addr, SR_type val) { atomicFPAdd(addr, val); } __device__ __forceinline__ SR_type shflPlus(SR_type input, int firstLane, int offset) { return shflFPAdd(input, firstLane, offset); } }; template<typename ValueType_> struct MinPlusSemiring { typedef ValueType_ SR_type; //possibly change for integers to cast to floats SR_type plus_ident, times_ident, times_null; MinPlusSemiring() { if (typeid(ValueType_) != typeid(float) && typeid(ValueType_) != typeid(double)) FatalError("Graph value type is not supported by this semiring.", NVGRAPH_ERR_BAD_PARAMETERS); //for semiring need multiplicative and additive identity//put in constructor SR_type inf = (typeid(ValueType_) == typeid(float)) ? FLT_MAX : DBL_MAX; //check for cuda add type identifiers plus_ident = SR_type(inf); times_ident = SR_type(0); //also need multiplicative null times_null = SR_type(inf); } __host__ __device__ __forceinline__ void setPlus_ident(float &val) { val = FLT_MAX; } __host__ __device__ __forceinline__ void setPlus_ident(double &val) { val = DBL_MAX; } __host__ __device__ __forceinline__ SR_type plus(const SR_type &arg0, const SR_type &arg1) { return min(arg0, arg1); //check and change!-using min in csrmv.cu } __host__ __device__ __forceinline__ SR_type times(const SR_type &arg0, const SR_type &arg1) { return arg0 + arg1; } //potential private member to be used in reduction by key so only need atomic for plus operator __device__ __forceinline__ void atomicPlus(SR_type *addr, SR_type val) { atomicFPMin(addr, val); } __device__ __forceinline__ SR_type shflPlus(SR_type input, int firstLane, int offset) { return shflFPMin(input, firstLane, offset); } }; template<typename ValueType_> struct MaxMinSemiring //bottleneck semiring { typedef ValueType_ SR_type;//could be integers template and check that type makes sense SR_type plus_ident, times_ident, times_null; MaxMinSemiring() { if (typeid(ValueType_) != typeid(float) && typeid(ValueType_) != typeid(double)) FatalError("Graph value type is not supported by this semiring.", NVGRAPH_ERR_BAD_PARAMETERS); //for semiring need multiplicative and additive identity SR_type inf = (typeid(ValueType_) == typeid(float)) ? FLT_MAX : DBL_MAX; plus_ident = SR_type(-inf); times_ident = SR_type(inf); //also need multiplicative null times_null = SR_type(-inf); } __host__ __device__ __forceinline__ void setPlus_ident(float &val) { val = -FLT_MAX; } __host__ __device__ __forceinline__ void setPlus_ident(double &val) { val = -DBL_MAX; } __host__ __device__ __forceinline__ SR_type plus(const SR_type &arg0, const SR_type &arg1) { return max(arg0, arg1); //check and change!-using min in csrmv.cu can use thrust } __host__ __device__ __forceinline__ SR_type times(const SR_type &arg0, const SR_type &arg1) { return min(arg0,arg1); } //potential private member to be used in reduction by key so only need atomic for plus operator __device__ __forceinline__ void atomicPlus(SR_type *addr, SR_type val) { atomicFPMax(addr, val); } __device__ __forceinline__ SR_type shflPlus(SR_type input, int firstLane, int offset) { return shflFPMax(input, firstLane, offset); } }; template<typename ValueType_> struct OrAndBoolSemiring //bottleneck semiring { typedef ValueType_ SR_type;//could be integers SR_type plus_ident, times_ident, times_null; OrAndBoolSemiring() { //embed the bools in the reals just use 0 and 1 in floats if (typeid(ValueType_) != typeid(float) && typeid(ValueType_) != typeid(double)) FatalError("Graph value type is not supported by this semiring.", NVGRAPH_ERR_BAD_PARAMETERS); //for semiring need multiplicative and additive identity plus_ident = SR_type(0); times_ident = SR_type(1); //also need multiplicative null times_null = SR_type(0); } __host__ __device__ __forceinline__ void setPlus_ident(SR_type &val) { val = SR_type(0); } __host__ __device__ __forceinline__ SR_type plus(const SR_type &arg0, const SR_type &arg1) { return (bool) arg0 | (bool) arg1; //check and change!-using min in csrmv.cu can use thrust } __host__ __device__ __forceinline__ SR_type times(const SR_type &arg0, const SR_type &arg1) { return (bool) arg0 & (bool) arg1; } //potential private member to be used in reduction by key so only need atomic for plus operator //need to check this atomic since it takes integer parameters instead of boolean __device__ __forceinline__ void atomicPlus(SR_type *addr, SR_type val) { atomicFPOr(addr, val); } //DOESN"T work returns exclusive or __device__ __forceinline__ SR_type shflPlus(SR_type input, int firstLane, int offset) { return shflFPOr(input, firstLane, offset); } }; //This Semiring does not work. WIll not be supported in first version template<typename ValueType_> struct LogPlusSemiring //bottleneck semiring { typedef ValueType_ SR_type;//could be integers SR_type plus_ident, times_ident, times_null; LogPlusSemiring() { //for semiring need multiplicative and additive identity if (typeid(ValueType_) != typeid(float) && typeid(ValueType_) != typeid(double)) FatalError("Graph value type is not supported by this semiring.", NVGRAPH_ERR_BAD_PARAMETERS); SR_type inf = (typeid(ValueType_) == typeid(float)) ? FLT_MAX : DBL_MAX; plus_ident = SR_type(inf); times_ident = SR_type(0); //also need multiplicative null times_null = SR_type(inf); } __host__ __device__ __forceinline__ void setPlus_ident(float &val) { val = FLT_MAX; } __host__ __device__ __forceinline__ void setPlus_ident(double &val) { val = DBL_MAX; } __host__ __device__ __forceinline__ SR_type plus(const SR_type &arg0, const SR_type &arg1) { return -log(exp(-arg0) + exp(-arg1)); //check calling cuda log and arg0 ok for float not double? } __host__ __device__ __forceinline__ SR_type times(const SR_type &arg0, const SR_type &arg1) { return arg0 + arg1; } //this will not work! __device__ __forceinline__ void atomicPlus(SR_type *addr, SR_type val) { atomicFPLog(addr, val); } //this DOES NOT work! Need customized shfl isntructions for logPlus __device__ __forceinline__ SR_type shflPlus(SR_type input, int firstLane, int offset) { return shflFPAdd(input, firstLane, offset); } }; }// end namespace nvgraph
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/include/nvgraph_experimental.h
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // Internal header of NVGRAPH library // // // WARNING: // This header give access to experimental feature and internal routines that are not in the official API // // #include "nvgraph.h" #ifdef __cplusplus #include "cstdio" #else #include "stdio.h" #endif #ifndef NVGRAPH_API #ifdef _WIN32 #define NVGRAPH_API __stdcall #else #define NVGRAPH_API #endif #endif #ifdef __cplusplus extern "C" { #endif /* Edge matching types */ typedef enum { NVGRAPH_UNSCALED = 0, // using edge values as is NVGRAPH_SCALED_BY_ROW_SUM = 1, // 0.5*(A_ij+A_ji)/max(d(i),d (j)), where d(i) is the sum of the row i NVGRAPH_SCALED_BY_DIAGONAL = 2, // 0.5*(A_ij+A_ji)/max(diag(i),diag(j)) } nvgraphEdgeWeightMatching_t; nvgraphStatus_t NVGRAPH_API nvgraphSpectralModularityMaximization(nvgraphHandle_t handle, const nvgraphGraphDescr_t graph_descr, const size_t weight_index, const int n_clusters, const int n_eig_vects, const float evs_tolerance, const int evs_max_iter, const float kmean_tolerance, const int kmean_max_iter, int* clustering, void* eig_vals, void* eig_vects); nvgraphStatus_t NVGRAPH_API nvgraphAnalyzeModularityClustering(nvgraphHandle_t handle, const nvgraphGraphDescr_t graph_descr, const size_t weight_index, const int clusters, const int* clustering, float * modularity); nvgraphStatus_t NVGRAPH_API nvgraphHeavyEdgeMatching(nvgraphHandle_t handle, const nvgraphGraphDescr_t graph_descr, const size_t weight_index, const nvgraphEdgeWeightMatching_t similarity_metric, int* aggregates, size_t* n_aggregates); nvgraphStatus_t NVGRAPH_API nvgraphBalancedCutClustering(nvgraphHandle_t handle, const nvgraphGraphDescr_t graph_descr, const size_t weight_index, const int n_clusters, const int n_eig_vects, const int evs_type, const float evs_tolerance, const int evs_max_iter, const float kmean_tolerance, const int kmean_max_iter, int* clustering, void* eig_vals, void* eig_vects); nvgraphStatus_t NVGRAPH_API nvgraphAnalyzeBalancedCut(nvgraphHandle_t handle, const nvgraphGraphDescr_t graph_descr, const size_t weight_index, const int n_clusters, const int* clustering, float * edgeCut, float * ratioCut); nvgraphStatus_t NVGRAPH_API nvgraphKrylovPagerank(nvgraphHandle_t handle, const nvgraphGraphDescr_t graph_descr, const size_t weight_index, const void *alpha, const size_t bookmark_index, const float tolerance, const int max_iter, const int subspace_size, const int has_guess, const size_t pagerank_index); #if defined(__cplusplus) } //extern "C" #endif
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/include/nvgraph_vector.hxx
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cnmem_shared_ptr.hxx> #include "nvgraph_error.hxx" #include "nvgraph_vector_kernels.hxx" #include "debug_macros.h" namespace nvgraph { /*! A Vector contains a device vector of size |E| and type T */ template <typename ValueType_> class Vector { public: //typedef IndexType_ IndexType; typedef ValueType_ ValueType; protected: /*! Storage for the values. */ SHARED_PREFIX::shared_ptr<ValueType> values; /*! Size of the array */ size_t size; /*! Storage for a cuda stream */ //, cudaStream_t stream = 0 public: /*! Construct an empty \p Vector. */ Vector(void) {} ~Vector(void) {} /*! Construct a \p Vector of size vertices. * * \param vertices The size of the Vector */ Vector(size_t vertices, cudaStream_t stream = 0) : values(allocateDevice<ValueType>(vertices, stream)), size(vertices) {} size_t get_size() const { return size; } size_t bytes() const { return size*sizeof(ValueType);} ValueType* raw() const { return values.get(); } //cudaStream_t get_stream() const { return stream_; } void allocate(size_t n, cudaStream_t stream = 0) { size = n; values = allocateDevice<ValueType>(n, stream); } void attach(size_t n, ValueType* vals, cudaStream_t stream = 0) { size = n; values = attachDevicePtr<ValueType>(vals, stream); } Vector(size_t vertices, ValueType * vals, cudaStream_t stream = 0) : values(attachDevicePtr<ValueType>(vals, stream)), size(vertices) {} void fill(ValueType val, cudaStream_t stream = 0) { fill_raw_vec(this->raw(), this->get_size(), val, stream); } void copy(Vector<ValueType> &vec1, cudaStream_t stream = 0) { if (this->get_size() == 0 && vec1.get_size()>0) { allocate(vec1.get_size(), stream); copy_vec(vec1.raw(), this->get_size(), this->raw(), stream); } else if (this->get_size() == vec1.get_size()) copy_vec(vec1.raw(), this->get_size(), this->raw(), stream); else if (this->get_size() > vec1.get_size()) { //COUT() << "Warning Copy : sizes mismatch "<< this->get_size() <<':'<< vec1.get_size() <<std::endl; copy_vec(vec1.raw(), vec1.get_size(), this->raw(), stream); //dump_raw_vec (this->raw(), vec1.get_size(), 0); } else { FatalError("Cannot copy a vector into a smaller one", NVGRAPH_ERR_BAD_PARAMETERS); } } void dump(size_t off, size_t sz, cudaStream_t stream = 0) { if ((off+sz)<= this->size) dump_raw_vec(this->raw(), sz, off, stream); else FatalError("Offset and Size values doesn't make sense", NVGRAPH_ERR_BAD_PARAMETERS); } void flag_zeros(Vector<int> & flags, cudaStream_t stream = 0) { flag_zeros_raw_vec(this->get_size(), this->raw(), flags.raw(), stream); } ValueType nrm1(cudaStream_t stream = 0) { ValueType res = 0; nrm1_raw_vec(this->raw(), this->get_size(), &res, stream); return res; } }; // class Vector } // end namespace nvgraph
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/include/bfs2d.hxx
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <climits> //Used in nvgraph.h #define TRAVERSAL_DEFAULT_ALPHA 15 #define TRAVERSAL_DEFAULT_BETA 18 #include "nvgraph_error.hxx" #include "2d_partitioning.h" namespace nvgraph { template<typename GlobalType, typename LocalType, typename ValueType> class Bfs2d { private: Matrix2d<GlobalType, LocalType, ValueType>* M; bool directed; bool deterministic; GlobalType alpha; GlobalType beta; // edgemask, distances, predecessors are set/read by users - using Vectors bool useEdgeMask; bool computeDistances; bool computePredecessors; int32_t vertices_bmap_size; VertexData2D<GlobalType, LocalType, LocalType>* distances; VertexData2D<GlobalType, LocalType, GlobalType>* predecessors; //Working data VertexData2D<GlobalType, LocalType, int32_t>* frontier_bmap; VertexData2D<GlobalType, LocalType, int32_t>* visited_bmap; VertexData2D_Unbuffered<GlobalType, LocalType, LocalType>* frontier; VertexData2D_Unbuffered<GlobalType, LocalType, LocalType>* trim_frontier; VertexData2D_Unbuffered<GlobalType, LocalType, LocalType>* frontierSize; VertexData2D_Unbuffered<GlobalType, LocalType, int8_t>* degreeFlags; std::vector<LocalType> frontierSize_h; VertexData2D_Unbuffered<GlobalType, LocalType, LocalType>* exSumDegree; VertexData2D_Unbuffered<GlobalType, LocalType, int8_t>* exSumStorage; VertexData2D_Unbuffered<GlobalType, LocalType, LocalType>* bucketOffsets; std::vector<LocalType> frontierDegree_h; // Output locations GlobalType* distances_out; GlobalType* predecessors_out; NVGRAPH_ERROR setup(); void clean(); public: virtual ~Bfs2d(void) { clean(); }; Bfs2d(Matrix2d<GlobalType, LocalType, ValueType>* _M, bool _directed, GlobalType _alpha, GlobalType _beta) : M(_M), directed(_directed), alpha(_alpha), beta(_beta){ distances = NULL; predecessors = NULL; frontier_bmap = NULL; visited_bmap = NULL; setup(); } NVGRAPH_ERROR configure(GlobalType *distances, GlobalType *predecessors); NVGRAPH_ERROR traverse(GlobalType source_vertex); //Used only for benchmarks NVGRAPH_ERROR traverse(GlobalType *source_vertices, int32_t nsources); }; } // end namespace nvgraph
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/include/subg_extrctrs.hxx
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <vector> #include <iterator> #include <algorithm> #include <sstream> #include <cassert> namespace nvgraph{ namespace debug{ //Sequential CSR graph extractor //for DEBUGGING purposes, only // template<typename VectorI, typename VectorV, typename VectorB = VectorI> struct SeqSubGraphExtractorFunctor { typedef typename VectorI::value_type IndexT; typedef typename VectorV::value_type ValueT; typedef typename VectorB::value_type ValueB; explicit SeqSubGraphExtractorFunctor(const VectorI& vSubset): vertexSubset(vSubset) { //make sure vertexSubset_ is sorted increasingly: ///sort_ifnot(vertexSubset); } virtual ~SeqSubGraphExtractorFunctor(void) { } const VectorV& get_vals(void) const { return vals_subg; } VectorV& get_vals(void) { return vals_subg; } const VectorI& get_row_ptr(void) const { return row_ptr_subg; } const VectorI& get_col_ind(void) const { return col_ind_subg; } struct ValueUpdater { ValueUpdater(const VectorV& v_src, VectorV& v_dest): v_s_(v_src), v_d_(v_dest) { } //debug: (sequential version only) void operator() (const IndexT& j) { v_d_.push_back(v_s_[j]); } ValueT at(IndexT j) const { return v_s_[j]; } void update_vals(const VectorV& vals) { v_d_ = vals; } private: const VectorV& v_s_; VectorV& v_d_; }; struct NoValueUpdater { void operator() (const IndexT& j) { //no-op... } ValueT at(IndexT j) const { return ValueT(0); //nothing meaningful... } void update_vals(const VectorV& vals) { //no-op... } }; virtual void operator () (VectorI& row_ptr_, VectorI& col_ind_) { NoValueUpdater fctr; sequential_extract_subgraph(row_ptr_, col_ind_, fctr); } virtual void operator () (VectorV& vals_, VectorI& row_ptr_, VectorI& col_ind_) { ValueUpdater fctr(vals_, vals_subg); sequential_extract_subgraph(row_ptr_, col_ind_, fctr); } protected: //for debugging purposes, only: // template<typename ValUpdaterFctr> void sequential_extract_subgraph(const VectorI& row_ptr_, const VectorI& col_ind_, ValUpdaterFctr& fctr) { VectorI all_zeros; IndexT last_updated_pos(0); // size_t nrows_subg = vertexSubset.size(); VectorB hash_rows; size_t hash_sz = make_hash(vertexSubset, hash_rows);//assume *NOT* sorted row_ptr_subg.assign(nrows_subg+1, IndexT(0)); all_zeros.reserve(nrows_subg); IndexT nz_subg(0); //this loop assumes sorted vertexSubset // for(IndexT i=IndexT(0);i<IndexT(nrows_subg);++i) { IndexT row_index = vertexSubset[i]; bool first_nz_inrow = true; for(IndexT j=row_ptr_[row_index]; j<row_ptr_[row_index+1];++j) { IndexT k = col_ind_[j]; if( (k<hash_sz) && (hash_rows[k] == 1) )//in vertex subset! ///if( std::binary_search(vertexSubset.begin(), vertexSubset.end(), k) )//in vertex subset! { ///vals_subg.push_back(vals_[j]);//functor! (no-op vs push_back()) fctr(j);//synch issues for parallel! col_ind_subg.push_back(k);//synch issues for parallel! ++nz_subg; //synch issues for parallel: // if( first_nz_inrow )//update row_ptr_subg { row_ptr_subg[i] = last_updated_pos; first_nz_inrow = false; } ++last_updated_pos;//synch issues for parallel! } }//end for(j;..) //special cases of a row with all zeros: mark it! if (first_nz_inrow) { all_zeros.push_back(i); } }//end for(i;...) assert( nz_subg == col_ind_subg.size() ); //last entry in row_ptr_subg: row_ptr_subg.back() = nz_subg; //handle all zero row cases: fix_zero_rows(all_zeros, row_ptr_subg); //assume *NOT* sorted remap_indices(vertexSubset, col_ind_subg); } struct UpdateRowPtr { explicit UpdateRowPtr(VectorI& row_p): row_p_(row_p) { } void operator() (const IndexT& i) { row_p_[i] = row_p_[i+1]; } private: VectorI& row_p_; }; //correct row_ptr: iterate all_zeros from end towards beginning //and correct row_ptr_ at corresponding index // static void fix_zero_rows(const VectorI& all_zeros, VectorI& row_ptr) { UpdateRowPtr correcter(row_ptr); //reverse traversal! // std::for_each(all_zeros.rbegin(), all_zeros.rend(), correcter); } template<typename Container> struct HashFctr { explicit HashFctr(Container& hash_src): m_hash(hash_src) { } IndexT operator() (const IndexT& src_elem) { IndexT hit(1); m_hash[src_elem] = hit; return hit; } private: Container& m_hash; }; static size_t make_hash(const VectorI& src, VectorB& hash_src, bool is_sorted = false) { assert( !src.empty() ); IndexT max_entry(0); if( is_sorted ) max_entry = src.back(); else max_entry = *std::max_element(src.begin(), src.end()); hash_src.assign(max_entry+1, 0); VectorB dummy(hash_src); HashFctr<VectorB> hctr(hash_src); //why unused dummy? //because functor must return something //and must store result of functor somewhere! // std::transform(src.begin(), src.end(), dummy.begin(), //unused... hctr); return hash_src.size(); } //re-number vertices: // static void remap_indices(const VectorI& src, VectorI& index_set, bool is_sorted = false) { IndexT max_entry(0); if( is_sorted ) max_entry = src.back(); else max_entry = *std::max_element(src.begin(), src.end()); //use hash_src vector as hash-table: // VectorI hash_src(max_entry+1, IndexT(0)); IndexT counter(0); for(typename VectorI::const_iterator pos = src.begin(); pos != src.end(); ++pos) { hash_src[*pos]=counter++;//SEQUENTIALITY!!! } IndexT set_sz(index_set.size()); VectorI old_index_set(index_set); for(IndexT k = IndexT(0);k<set_sz;++k) { index_set[k] = hash_src[old_index_set[k]]; } } private: VectorI vertexSubset; VectorV vals_subg; //not used for non-valued graphs VectorI row_ptr_subg; VectorI col_ind_subg; }; }//end namespace debug }//end namespace nvgraph
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/include/cnmem_shared_ptr.hxx
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cnmem.h> #include <cstring> // #if __cplusplus > 199711L #include <memory> #define SHARED_PREFIX std #else #include <boost/shared_ptr.hpp> #define SHARED_PREFIX boost #endif #include <iostream> #include "nvgraph_error.hxx" namespace nvgraph { template< typename T > class DeviceDeleter { cudaStream_t mStream; public: DeviceDeleter(cudaStream_t stream) : mStream(stream) {} void operator()(T *ptr) { cnmemStatus_t status = cnmemFree(ptr, mStream); if( status != CNMEM_STATUS_SUCCESS ) { FatalError("Memory manager internal error (free)", NVGRAPH_ERR_UNKNOWN); } } }; template< typename T > inline SHARED_PREFIX::shared_ptr<T> allocateDevice(size_t n, cudaStream_t stream) { T *ptr = NULL; cnmemStatus_t status = cnmemMalloc((void**) &ptr, n*sizeof(T), stream); if( status == CNMEM_STATUS_OUT_OF_MEMORY) { FatalError("Not enough memory", NVGRAPH_ERR_NO_MEMORY); } else if (status != CNMEM_STATUS_SUCCESS) { FatalError("Memory manager internal error (alloc)", NVGRAPH_ERR_UNKNOWN); } return SHARED_PREFIX::shared_ptr<T>(ptr, DeviceDeleter<T>(stream)); } template< typename T > class DeviceReleaser { cudaStream_t mStream; public: DeviceReleaser(cudaStream_t stream) : mStream(stream) {} void operator()(T *ptr) { } }; template< typename T > inline SHARED_PREFIX::shared_ptr<T> attachDevicePtr(T * ptr_in, cudaStream_t stream) { T *ptr = ptr_in; return SHARED_PREFIX::shared_ptr<T>(ptr, DeviceReleaser<T>(stream)); } } // end namespace nvgraph
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/include/delta_modularity.cuh
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cuda.h> #include <cuda_runtime.h> #include <cuda_profiler_api.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/generate.h> #include <thrust/transform.h> #include "util.cuh" #include "graph_utils.cuh" #include "functor.cuh" //#include "block_delta_modularity.cuh" #include <cusparse.h> namespace nvlouvain{ /************************************************************* * * compute k_i_in * * - input : * n_vertex * csr_ptr's ptr * csr_idx's ptr * csr_val's ptr * cluster's ptr : current cluster assignment * c: target cluster * i: current vertex * * - output: * results: k i in c * ***************************************************************/ template<typename IdxType, typename ValType> __device__ void compute_k_i_in( const int n_vertex, IdxType* csr_ptr_ptr, IdxType* csr_idx_ptr, ValType* csr_val_ptr, IdxType* cluster_ptr, IdxType c, // tid.y IdxType i, // tid.x ValType* result){ ValType sum = 0.0; //Sanity check if( i < n_vertex ){ IdxType i_start = *(csr_ptr_ptr + i); IdxType i_end = *(csr_ptr_ptr + i + 1); #pragma unroll for(int j = 0; j < i_end - i_start; ++j){ IdxType j_idx = *(csr_idx_ptr + i_start + j); IdxType c_j = *(cluster_ptr + j_idx); sum += (int)(c_j==c)*((ValType)(*(csr_val_ptr + i_start + j))); } *result = sum; } } // delta modularity when an isolate vertex i moved into a cluster c // c must be one of the clusters // ptr version template<typename IdxType, typename ValType> __device__ void delta_modularity(const int n_vertex, const int c_size, bool updated, IdxType* csr_ptr_ptr, IdxType* csr_ind_ptr, ValType* csr_val_ptr, IdxType* cluster_ptr, ValType c_sum, ValType m2, IdxType row_idx, IdxType col_idx, IdxType c, ValType* k_vec_ptr, ValType* score){ // ki: sum of i's edges weight // ki_in: sum of edge from i to c // sum_tot: for all v in c, sum of v's edges weight IdxType c_i = *(cluster_ptr + row_idx); ValType ki_in = 0.0; ki_in = (int)(c_i!=c)*(*(csr_val_ptr + col_idx)); ValType ki = *(k_vec_ptr + row_idx); if(!updated){ compute_k_i_in(n_vertex, csr_ptr_ptr, csr_ind_ptr, csr_val_ptr, cluster_ptr, c, row_idx, &ki_in); } ValType sum_tot = c_sum - (int)(c_i == c)*ki; *score = ki_in - 2*sum_tot*ki/(m2); // printf("i: %d\tci: %d\tc: %d\t2m: %1f\tkin: %f\tki: %f\tsum_tot: %f\tc_sum: %f\tdelta: %f\n", row_idx, c_i, c, m2, ki_in, ki, sum_tot, c_sum,*score ); } template<typename IdxType=int, typename ValType> __device__ void compute_cluster_sum(const int n_vertex, const int c_size, IdxType* cluster_inv_ptr_ptr, IdxType* cluster_inv_ind_ptr, ValType* k_ptr, // pre-compute ki size: n_vertex ValType* cluster_sum_vec){ int c = blockIdx.x * blockDim.x + threadIdx.x; IdxType c_start, c_end; ValType sum = 0.0; if(c < c_size){ c_start = *(cluster_inv_ptr_ptr + c); c_end = *(cluster_inv_ptr_ptr + c + 1); #pragma unroll for(IdxType* it = cluster_inv_ind_ptr + c_start; it!= cluster_inv_ind_ptr + c_end ; ++it){ sum += (ValType)(*(k_ptr + *(it))); } *(cluster_sum_vec + c) = sum; //printf("c: %d c_sum: %f\n", c, (ValType)(*(cluster_sum_vec + c))); } } template<typename IdxType=int, typename ValType> __global__ void kernel_compute_cluster_sum(const int n_vertex, const int c_size, IdxType* cluster_inv_ptr_ptr, IdxType* cluster_inv_ind_ptr, ValType* k_ptr, // pre-compute ki size: n_vertex ValType* cluster_sum_vec){ compute_cluster_sum(n_vertex, c_size, cluster_inv_ptr_ptr, cluster_inv_ind_ptr, k_ptr, cluster_sum_vec); } /**************************************************************************************************** * * compute delta modularity vector, delta_modularity_vec, size = n_edges * theads layout: (lunched as 1D) * 1 thread for 1 edge, flattened * need coo row index instead (pre-computed) * input variables: * n_vertex: number of vertex * n_edges: number of edges * c_size: number of unique clusters * updated: if previous iteration generate a new supervertices graph * cluster_ptr: cluster assignment * cluster_sum_vec_ptr: sum of clusters * k_vec_ptr: ki vector * output: * delta_modularity_vec: size = n_edges * delta modularity if we move from_node to to_nodes cluster c for each edge * ****************************************************************************************************/ template<typename IdxType, typename ValType> __global__ void// __launch_bounds__(CUDA_MAX_KERNEL_THREADS) build_delta_modularity_vec_flat(const int n_vertex, const int n_edges, const int c_size, ValType m2, bool updated, IdxType* coo_row_ind_ptr, IdxType* csr_ptr_ptr, IdxType* csr_ind_ptr, ValType* csr_val_ptr, IdxType* cluster_ptr, ValType* cluster_sum_vec_ptr, ValType* k_vec_ptr, ValType* delta_modularity_vec){ ValType m2_s(m2); //privatize int tid = blockIdx.x * blockDim.x + threadIdx.x; if( tid < n_edges ){ IdxType row_idx = *(coo_row_ind_ptr + tid); IdxType col_idx = *(csr_ind_ptr + tid); IdxType c = cluster_ptr[ col_idx ]; // target cluster c ValType c_sum = cluster_sum_vec_ptr[c]; delta_modularity(n_vertex, c_size, updated, csr_ptr_ptr, csr_ind_ptr, csr_val_ptr, cluster_ptr, c_sum, m2_s, row_idx, col_idx, c, k_vec_ptr, delta_modularity_vec + tid); } } /****************************************************************************************************** * NOT USED * compute delta modularity vector, delta_modularity_vec, size = n_edges * theads layout: (lauched as 2D) * 1 thread for 1 edge * each thread.x per vertex i * each thread.y per neibor j of vertex i * need to pre compute max_degree for lauch this kernel * input variables: * n_vertex: number of vertex * n_edges: number of edges * c_size: number of unique clusters * updated: if previous iteration generate a new supervertices graph * cluster_ptr: cluster assignment * cluster_sum_vec_ptr: sum of clusters * k_vec_ptr: ki vector * output: * delta_modularity_vec: size = n_edges * delta modularity if we move from_node to to_nodes cluster c for each edge * *****************************************************************************************************/ /* template<typename IdxIter, typename ValIter, typename ValType> __global__ void// __launch_bounds__(CUDA_MAX_KERNEL_THREADS) build_delta_modularity_vec(const int n_vertex, const int c_size, ValType m2, bool updated, IdxIter csr_ptr_ptr, IdxIter csr_ind_ptr, ValIter csr_val_ptr, IdxIter cluster_ptr, ValType* cluster_sum_vec_ptr, ValType* k_vec_ptr, ValType* delta_modularity_vec){ ValType m2_s(m2); int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int start, end; if( i < n_vertex ){ start = *(csr_ptr_ptr + i); end = *(csr_ptr_ptr + i + 1); if(j < end - start){ int j_idx = *(csr_ind_ptr + start + j); int c = *( cluster_ptr + j_idx); ValType c_sum = cluster_sum_vec_ptr[c]; delta_modularity( n_vertex, c_size, updated, csr_ptr_ptr, csr_ind_ptr, csr_val_ptr, cluster_ptr, c_sum, m2_s, i, start + j, c, k_vec_ptr, delta_modularity_vec + start + j); } } } */ /****************************************************** * * find the max delta modularity for each vertex i * zero out other delta modularity for vertex i * *******************************************************/ //template<typename ValType, typename IdxIter, typename ValIter> template<typename ValType, typename IdxIter, typename ValIter> __global__ void// __launch_bounds__(CUDA_MAX_KERNEL_THREADS) max_delta_modularity_vec_stride(const int n_vertex, const int n_edges, IdxIter csr_ptr_iter, IdxIter csr_ind_iter, ValIter csr_val_iter, IdxIter cluster_iter, ValType* delta_modularity_vec){ unsigned int wid = blockIdx.x; // 0 ~ n_vertex - 1 unsigned int tid = threadIdx.x; // 0 ~ 31 __shared__ int start_idx; __shared__ int end_idx; __shared__ int degree; __shared__ ValType local_max[WARP_SIZE]; __shared__ ValType warp_max_val; unsigned int stride = WARP_SIZE / 2; warp_max_val = -1000; if( wid < n_vertex ){ if(tid == 0){ start_idx = *(csr_ptr_iter + wid); end_idx = *(csr_ptr_iter + wid + 1); degree = end_idx - start_idx; } __syncwarp(); //find the max elements for(unsigned xid = 0; xid + tid < ( degree ); xid += WARP_SIZE){ local_max[tid]= -1.0 ; if(start_idx + xid + tid > n_edges) printf("Error access invalid memory %d = %d + %d + %d end: %d\n", start_idx + xid + tid, start_idx, xid, tid, end_idx); local_max[tid] = (ValType)(*(delta_modularity_vec + start_idx + xid + tid)); stride = umin(16, (degree)/2 + 1); while(tid < stride && stride > 0){ local_max[tid] = fmax(local_max[tid], local_max[tid + stride]); stride/=2; //stride /=2 } __syncwarp(); if(tid == 0 && warp_max_val < local_max[0]){ warp_max_val = local_max[0]; } } __syncwarp(); // zero out non-max elements for(unsigned xid = 0; xid + tid < ( degree ); xid += WARP_SIZE){ if(start_idx + xid + tid < end_idx){ ValType original_val = ((ValType)*(delta_modularity_vec + start_idx + xid + tid)); (*(delta_modularity_vec + start_idx + xid + tid)) = (int)(original_val == warp_max_val) * original_val; /* if(original_val == warp_max_val){ int j_idx = (int)(*(csr_ind_iter + start_idx + xid + tid)); printf("+i: %d j: %d c: %d %f\n", wid, j_idx, (int)(*(cluster_iter + j_idx)),original_val ); }else{ int j_idx = (int)(*(csr_ind_iter + start_idx + xid + tid)); printf("-i: %d j: %d c: %d %f\n", wid, j_idx, (int)(*(cluster_iter + j_idx)),original_val ); } */ } } } } /****************************************************** * NOT USED * find the max delta modularity for each vertex i * zero out other delta modularity for vertex i * *******************************************************/ /* template<typename IdxIter, typename ValIter, typename ValType> __global__ void// __launch_bounds__(CUDA_MAX_KERNEL_THREADS) max_delta_modularity_vec(const int n_vertex, IdxIter csr_ptr_ptr, IdxIter csr_ind_ptr, ValIter csr_val_ptr, ValType* delta_modularity_vec){ int i = blockIdx.x * blockDim.x + threadIdx.x; int start, end; ValType * best_pos_ptr; if( i < n_vertex ){ start = *( csr_ptr_ptr + i); end = *( csr_ptr_ptr + i + 1); best_pos_ptr = thrust::max_element(thrust::cuda::par, delta_modularity_vec + start, delta_modularity_vec + end); } if( i < n_vertex ){ //printf("i: %d max: %f\n", i, (ValType)(*best_pos_ptr)); thrust::replace_if(thrust::cuda::par, delta_modularity_vec + start, delta_modularity_vec + end, not_best<ValType>(*best_pos_ptr), 0.0); } } */ // Not used template<typename IdxType, typename ValType> void build_delta_modularity_vector_old(const int n_vertex, const int c_size, ValType m2, bool updated, thrust::device_vector<IdxType>& csr_ptr_d, thrust::device_vector<IdxType>& csr_ind_d, thrust::device_vector<ValType>& csr_val_d, thrust::device_vector<IdxType>& cluster_d, IdxType* cluster_inv_ptr_ptr, IdxType* cluster_inv_ind_ptr, // precompute cluster inverse ValType* k_vec_ptr, // precompute ki's thrust::device_vector<ValType>& temp_vec, // temp global memory with size n_vertex ValType* cluster_sum_vec_ptr, ValType* delta_Q_arr_ptr){ /* start compute delta modularity vec */ dim3 block_size_1d((n_vertex + BLOCK_SIZE_1D -1)/ BLOCK_SIZE_1D, 1, 1); dim3 grid_size_1d(BLOCK_SIZE_1D, 1, 1); int n_edges = csr_ptr_d[n_vertex]; kernel_compute_cluster_sum<<<block_size_1d, grid_size_1d>>>( n_vertex, c_size, cluster_inv_ptr_ptr, cluster_inv_ind_ptr, k_vec_ptr, cluster_sum_vec_ptr); CUDA_CALL(cudaDeviceSynchronize()); thrust::fill(thrust::cuda::par, delta_Q_arr_ptr, delta_Q_arr_ptr + n_edges, 0.0); //pre-compute max_degree for block_size_2D and grid_size_2D thrust::transform(thrust::device, csr_ptr_d.begin() + 1, csr_ptr_d.end(), csr_ptr_d.begin(), temp_vec.begin(), minus_idx<IdxType, ValType>()); auto max_ptr = thrust::max_element(thrust::device, temp_vec.begin(), temp_vec.begin() + n_vertex ); int max_degree = (IdxType)(*max_ptr); dim3 block_size_2d((n_vertex + BLOCK_SIZE_2D*2 -1)/ (BLOCK_SIZE_2D*2), (max_degree + BLOCK_SIZE_2D -1)/ (BLOCK_SIZE_2D), 1); dim3 grid_size_2d(BLOCK_SIZE_2D*2, BLOCK_SIZE_2D, 1); // build delta modularity vec with 2D (vertex i, neighbor of i) grid size are_now(32, 16, 1) build_delta_modularity_vec<<<block_size_2d, grid_size_2d>>>(n_vertex, c_size, m2, updated, csr_ptr_d.begin(), csr_ind_d.begin(), csr_val_d.begin(), cluster_d.begin(), cluster_sum_vec_ptr, k_vec_ptr, delta_Q_arr_ptr); CUDA_CALL(cudaDeviceSynchronize()); block_size_1d = dim3((n_vertex + BLOCK_SIZE_1D*4 -1)/ BLOCK_SIZE_1D*4, 1, 1); grid_size_1d = dim3(BLOCK_SIZE_1D*4, 1, 1); // zero out non maximum delta modularity for each vertex i grid size are now (128, 1, 1) max_delta_modularity_vec<<<block_size_1d, grid_size_1d>>>(n_vertex, csr_ptr_d.begin(), csr_ind_d.begin(), csr_val_d.begin(), delta_Q_arr_ptr ); CUDA_CALL(cudaDeviceSynchronize()); } // // A new version of building delta modularity vector function // // template<typename IdxType, typename ValType> void build_delta_modularity_vector(cusparseHandle_t cusp_handle, const int n_vertex, const int c_size, ValType m2, bool updated, thrust::device_vector<IdxType>& csr_ptr_d, thrust::device_vector<IdxType>& csr_ind_d, thrust::device_vector<ValType>& csr_val_d, thrust::device_vector<IdxType>& cluster_d, IdxType* cluster_inv_ptr_ptr, IdxType* cluster_inv_ind_ptr, // precompute cluster inverse ValType* k_vec_ptr, // precompute ki's ValType* cluster_sum_vec_ptr, ValType* delta_Q_arr_ptr){ /* start compute delta modularity vec */ dim3 block_size_1d((n_vertex + BLOCK_SIZE_1D -1)/ BLOCK_SIZE_1D, 1, 1); dim3 grid_size_1d(BLOCK_SIZE_1D, 1, 1); int n_edges = csr_ptr_d[n_vertex]; kernel_compute_cluster_sum<<<block_size_1d, grid_size_1d>>>( n_vertex, c_size, cluster_inv_ptr_ptr, cluster_inv_ind_ptr, k_vec_ptr, cluster_sum_vec_ptr); CUDA_CALL(cudaDeviceSynchronize()); thrust::fill(thrust::cuda::par, delta_Q_arr_ptr, delta_Q_arr_ptr + n_edges, 0.0); IdxType *csr_ptr_ptr = thrust::raw_pointer_cast(csr_ptr_d.data()); IdxType *csr_ind_ptr = thrust::raw_pointer_cast(csr_ind_d.data()); ValType *csr_val_ptr = thrust::raw_pointer_cast(csr_val_d.data()); IdxType *cluster_ptr = thrust::raw_pointer_cast(cluster_d.data()); // pre compute coo row indices using cusparse thrust::device_vector<IdxType> coo_row_ind(n_edges); IdxType* coo_row_ind_ptr = thrust::raw_pointer_cast(coo_row_ind.data()); cusparseXcsr2coo(cusp_handle, csr_ptr_ptr, n_edges, n_vertex, coo_row_ind_ptr, CUSPARSE_INDEX_BASE_ZERO); // build delta modularity vec flatten (1 thread per 1 edges) block_size_1d = dim3((n_edges + BLOCK_SIZE_1D * 2 -1)/ BLOCK_SIZE_1D * 2, 1, 1); grid_size_1d = dim3(BLOCK_SIZE_1D*2, 1, 1); build_delta_modularity_vec_flat<<<block_size_1d, grid_size_1d>>>(n_vertex, n_edges, c_size, m2, updated, coo_row_ind_ptr, csr_ptr_ptr, csr_ind_ptr, csr_val_ptr, cluster_ptr, cluster_sum_vec_ptr, k_vec_ptr, delta_Q_arr_ptr); CUDA_CALL(cudaDeviceSynchronize()); // Done compute delta modularity vec block_size_1d = dim3(n_vertex, 1, 1); grid_size_1d = dim3(WARP_SIZE, 1, 1); max_delta_modularity_vec_stride<<<block_size_1d, grid_size_1d>>>(n_vertex, n_edges, csr_ptr_d.begin(), csr_ind_d.begin(), csr_val_d.begin(), cluster_d.begin(), delta_Q_arr_ptr ); CUDA_CALL(cudaDeviceSynchronize()); } } // nvlouvain
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/include/size2_selector.hxx
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once //#include <common_selector.hxx> #include <nvgraph_vector.hxx> #include <valued_csr_graph.hxx> namespace nvgraph { typedef enum { USER_PROVIDED = 0, // using edge values as is SCALED_BY_ROW_SUM = 1, // 0.5*(A_ij+A_ji)/max(d(i),d (j)), where d(i) is the sum of the row i SCALED_BY_DIAGONAL = 2, // 0.5*(A_ij+A_ji)/max(diag(i),diag(j)) }Matching_t; template <typename IndexType_, typename ValueType_> class Size2Selector { public: typedef IndexType_ IndexType; typedef ValueType_ ValueType; Size2Selector(); Size2Selector(Matching_t similarity_metric, int deterministic = 1, int max_iterations = 15 , ValueType numUnassigned_tol = 0.05 ,bool two_phase = false, bool merge_singletons = true, cudaStream_t stream = 0) :m_similarity_metric(similarity_metric), m_deterministic(deterministic), m_max_iterations(max_iterations), m_numUnassigned_tol(numUnassigned_tol), m_two_phase(two_phase), m_merge_singletons(merge_singletons), m_stream(stream) { m_aggregation_edge_weight_component = 0; m_weight_formula = 0; } NVGRAPH_ERROR setAggregates(const ValuedCsrGraph<IndexType, ValueType> &A, Vector<IndexType> &aggregates, int &num_aggregates); protected: NVGRAPH_ERROR setAggregates_common_sqblocks(const ValuedCsrGraph<IndexType, ValueType> &A, Vector<IndexType> &aggregates, int &num_aggregates); Matching_t m_similarity_metric; int m_deterministic; int m_max_iterations; ValueType m_numUnassigned_tol; bool m_two_phase; bool m_merge_singletons; cudaStream_t m_stream; int m_aggregation_edge_weight_component; int m_weight_formula; }; }//nvgraph
0
rapidsai_public_repos/nvgraph/cpp
rapidsai_public_repos/nvgraph/cpp/include/graph_concrete_visitors.hxx
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef GRAPH_CONCRETE_VISITORS_HXX #define GRAPH_CONCRETE_VISITORS_HXX #include <multi_valued_csr_graph.hxx> //which includes all other headers... #include <range_view.hxx> // TODO: to be changed to thrust/range_view.h, when toolkit gets in sync with Thrust #include <thrust_traits.hxx> #include <cassert> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/iterator/constant_iterator.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/transform.h> #include <thrust/sequence.h> #include <thrust/binary_search.h> #include <thrust/functional.h> #include <thrust/copy.h> #include <thrust/reduce.h> #include <thrust/gather.h> #include <thrust/scan.h> #include <thrust/fill.h> #include <thrust/remove.h> #include <thrust/count.h> #include <thrust/distance.h>// #include <thrust/unique.h>// #include <thrust/merge.h>// #include <thrust/sort.h>// #include <thrust/find.h>// #include <iostream> #include <sstream> #include <iterator> #include <algorithm> namespace nvgraph { //get unique elements and return their count: // template<typename Container> size_t count_get_distinct(const Container& v, //in Container& res) //out { res.assign(v.begin(), v.end());//copy size_t counts = thrust::distance(res.begin(), thrust::unique(res.begin(), res.end())); res.resize(counts); return counts; } //Adapted from: https://github.com/thrust/thrust/blob/master/examples/expand.cu // //Note: //C++03 doesn’t allow default template arguments on function templates. //This was considered a “defect” by Bjarne Stroustrup, subsequently fixed in C++11. //See, for example: http://stackoverflow.com/questions/2447458/default-template-arguments-for-function-templates // template<typename T, template<typename> class Allocator, template<typename, typename> class Vector> typename Vector<T, Allocator<T> >::iterator expand(Vector<T, Allocator<T> >& counts, Vector<T, Allocator<T> >& values, Vector<T, Allocator<T> >& out) { typedef typename Vector<T, Allocator<T> >::iterator Iterator; Iterator first1 = counts.begin(); Iterator last1 = counts.end(); Iterator first2 = values.begin(); Iterator output = out.begin(); typedef typename thrust::iterator_difference<Iterator>::type difference_type; difference_type input_size = thrust::distance(first1, last1); difference_type output_size = thrust::reduce(first1, last1); // scan the counts to obtain output offsets for each input element Vector<difference_type, Allocator<difference_type> > output_offsets(input_size, 0); thrust::exclusive_scan(first1, last1, output_offsets.begin()); // scatter the nonzero counts into their corresponding output positions Vector<difference_type, Allocator<difference_type> > output_indices(output_size, 0); thrust::scatter_if (thrust::counting_iterator<difference_type>(0), thrust::counting_iterator<difference_type>(input_size), output_offsets.begin(), first1, output_indices.begin()); // compute max-scan over the output indices, filling in the holes thrust::inclusive_scan (output_indices.begin(), output_indices.end(), output_indices.begin(), thrust::maximum<difference_type>()); // gather input values according to index array (output = first2[output_indices]) Iterator output_end = output; thrust::advance(output_end, output_size); thrust::gather(output_indices.begin(), output_indices.end(), first2, output); // return output + output_size thrust::advance(output, output_size); return output; } // // //##### Change 1: reverse hash was wrong: hash[val_i] = index of first occurence of val_i ##### // template<typename Container> struct MinLeftRightPlusValue { typedef typename VectorPtrT<typename Container::value_type,Container>::PtrT PtrT; typedef typename Container::value_type ValT; explicit MinLeftRightPlusValue(ValT delta): delta_(delta) { } __host__ __device__ ValT operator() (ValT left, ValT right) { ValT rs = right + delta_; return (left < rs? left : rs); } private: ValT delta_; }; //given vector v[i] = val_i, //return reverse hash vector: //hash[val_i] = i (index of first occurence of val_i, if val_i exists in v[]; // else, last occurence of closest value less than val_i): // //advantage: works trully like a hash, no need for search // // //pre-conditions: (1.) v sorted in ascending order // (2.) value_type is integer type // //Ex: //v: 0,1,3,6,7,8,8; //hash: 0,1,1,2,2,2,3,4,5; // template<typename Container> void reverse_hash(Container& v, //in Container& hash) //out { typedef typename Container::value_type ValT; if( v.empty() ) return; size_t sz = v.size(); size_t seq_sz = v.back()-v.front()+1; thrust::counting_iterator<ValT> seq_first(v.front()); thrust::counting_iterator<ValT> seq_last(v.back()+1); Container hash1(seq_sz, ValT(-1)); Container hash2(seq_sz, ValT(-1)); hash.assign(seq_sz, ValT(-1)); thrust::upper_bound(v.begin(), v.end(), seq_first, seq_last, //seq.begin(), seq.end(),//ok hash1.begin(), thrust::less<ValT>()); // thrust::lower_bound(v.begin(), v.end(), seq_first, seq_last, //seq.begin(), seq.end(), //ok hash2.begin(), thrust::less<ValT>()); thrust::transform(hash2.begin(), hash2.end(), hash1.begin(), hash.begin(), MinLeftRightPlusValue<Container>(-1)); } //better use thrust::gather(...) //see /home/aschaffer/Development/Sources/Cuda_Thrust/filter_via_gather.cu template<typename VectorR, typename IndexT> struct Filter { typedef typename VectorR::value_type RetT; explicit Filter(VectorR& src): m_src(&src[0]) { } __host__ __device__ RetT operator()(const IndexT& k) { return m_src[k]; } private: typename VectorPtrT<typename VectorR::value_type,VectorR>::PtrT m_src; }; template<typename Container, typename IndexT> struct CleanFctr { explicit CleanFctr(Container& used): m_used(&used[0]) { } __host__ __device__ bool operator()(const IndexT& k) { return (m_used[k] == 0); } private: typename VectorPtrT<typename Container::value_type,Container>::PtrT m_used; }; // // template<typename VectorV, typename VectorI> struct ValueUpdater { typedef typename VectorI::value_type IndexT; //typedef typename VectorPtrT<typename VectorI::value_type,VectorV>::PtrT PtrI; typedef typename VectorV::value_type ValueT; typedef typename VectorPtrT<typename VectorV::value_type,VectorV>::PtrT PtrV; explicit ValueUpdater(VectorV& v_src, VectorV& v_dest): v_s_(v_src), v_d_(v_dest) { } ///__host__ __device__ ValueT at(IndexT j) const { return v_s_[j]; } struct ValFiller { explicit ValFiller(VectorV& v_src): m_s(&v_src[0]) { } __host__ __device__ ValueT operator() (IndexT k) { return m_s[k]; } private: PtrV m_s; }; //##### Change 5: const K ##### // void update_from(const VectorI& K) { size_t actual_nnz = K.size(); v_d_.assign(actual_nnz, ValueT(0)); ValFiller valfill(v_s_); thrust::transform(K.begin(), K.end(), v_d_.begin(), valfill); } const VectorV& get_subg_vals(void) const { return v_d_; } private: VectorV& v_s_; VectorV& v_d_; }; template<typename VectorI, typename VectorB = VectorI> struct Offsets2RowIndex { typedef typename VectorI::value_type IndexT; typedef typename VectorPtrT<typename VectorB::value_type,VectorB>::PtrT PtrB; typedef typename VectorPtrT<typename VectorI::value_type,VectorI>::PtrT PtrI; Offsets2RowIndex(VectorB& hash_rows, VectorI& offsets, VectorI& I0, VectorI& vSub, VectorI& row_ptr, VectorI& col_ind, VectorI& I, VectorI& J, VectorI& K, VectorB& U): m_hash_sz(hash_rows.size()), m_off_sz(offsets.size()), m_hash_rows(&hash_rows[0]), m_offsets(&offsets[0]), m_i0(&I0[0]), m_row_subset(&vSub[0]), m_row_ptr(&row_ptr[0]), m_col_ind(&col_ind[0]), m_i(&I[0]), m_j(&J[0]), m_k(&K[0]), m_used(&U[0]) { } //k = element in range[]:{0,1,...,nnz-1} // __host__ __device__ IndexT operator() (IndexT k) { IndexT subg_row_index = m_i0[k]; IndexT g_row_index = m_row_subset[subg_row_index]; //j = col_ind[ row_ptr[g_row_index] + k - offsets[subg_row_index]] // IndexT row_ptr_i = m_row_ptr[g_row_index]+ k- m_offsets[subg_row_index]; IndexT col_index = m_col_ind[row_ptr_i]; //is col_index in row_subset? // if( (col_index < m_hash_sz) && (m_hash_rows[col_index] == 1) ) //col_index in subset, too=>it's a hit! { m_i[k] = g_row_index; m_j[k] = col_index; ///m_v[k] = m_fctr.at(row_ptr_i);//ok, but couples it with vals... m_k[k] = row_ptr_i; m_used[k] = 1; } //else ...nothing return g_row_index; } private: const size_t m_hash_sz; const size_t m_off_sz; PtrB m_hash_rows; PtrI m_offsets; PtrI m_offset_indices; PtrI m_row_subset; PtrI m_row_ptr; PtrI m_col_ind; PtrI m_i0; PtrI m_i; PtrI m_j; PtrI m_k; PtrB m_used; }; template<typename VectorI, typename VectorB> size_t fill_hash_nz2ijv(VectorB& hash_rows, VectorI& range, //in/out VectorI& nzs, VectorI& offsets, VectorI& vSub, VectorI& row_ptr, VectorI& col_ind, VectorI& I, VectorI& J, VectorI& K, VectorB& U) { typedef typename VectorI::value_type IndexT; size_t nnz = range.size(); size_t nrows_subg = nzs.size(); VectorI I0(nnz, IndexT(0)); VectorI dummy(nnz, IndexT(0)); //make m_offset_indices increasing sequence //from 0,...,offsets.size()-1 // VectorI offset_indices(nrows_subg, IndexT(0)); thrust::sequence(offset_indices.begin(), offset_indices.end(), IndexT(0)); expand(nzs, offset_indices, I0); Offsets2RowIndex<VectorI, /*VectorV, ValueUpdater, VectorSz,*/ VectorB > off_fctr(hash_rows, offsets, I0, vSub, row_ptr, col_ind, I,J,K,U); //why unused dummy? //because functor must return something //and must store result of functor somewhere! // thrust::transform(range.begin(), range.end(), dummy.begin(), //unused... off_fctr); CleanFctr<VectorB, IndexT> cleaner(U); range.erase(thrust::remove_if(range.begin(), range.end(), cleaner), range.end()); size_t actual_nnz = range.size(); VectorI truncated_i(actual_nnz, IndexT(0)); VectorI truncated_j(actual_nnz, IndexT(0)); ///VectorV truncated_v(actual_nnz, IndexT(0)); VectorI truncated_k(actual_nnz, IndexT(0)); Filter<VectorI, IndexT> filter_i(I); thrust::transform(range.begin(), range.end(), truncated_i.begin(), filter_i); I = truncated_i; // vector copy! Filter<VectorI, IndexT> filter_j(J); thrust::transform(range.begin(), range.end(), truncated_j.begin(), filter_j); J = truncated_j; // vector copy! Filter<VectorI, IndexT> filter_k(K); thrust::transform(range.begin(), range.end(), truncated_k.begin(), filter_k); K = truncated_k; // vector copy! // Filter<VectorV, IndexT> filter_v(V); // thrust::transform(range.begin(), range.end(), // truncated_v.begin(), // filter_v); // V = truncated_v; // vector copy! //scoo.m_v[] == subg.vals ! ///fctr.update_vals(scoo.get_v()); U.assign(actual_nnz,1);//just for consistency, // not really necessary return actual_nnz; } template<typename Container> struct NzCounter { typedef typename Container::value_type IndexT; typedef typename VectorPtrT<typename Container::value_type,Container>::PtrT PtrT; explicit NzCounter(Container& row_ptr): m_row_ptr(&row_ptr[0]) { } __host__ __device__ IndexT operator() (const IndexT& i) { return m_row_ptr[i+1]-m_row_ptr[i]; } private: PtrT m_row_ptr; }; template<typename Container> struct HashFctr { typedef typename Container::value_type IndexT; explicit HashFctr(Container& hash_src): m_hash(&hash_src[0]) { } __host__ __device__ IndexT operator() (const IndexT& src_elem) { IndexT hit(1); m_hash[src_elem] = hit; return hit; } private: typename VectorPtrT<typename Container::value_type,Container>::PtrT m_hash; }; template<typename VectorI, typename VectorB> size_t make_hash(VectorI& src, VectorB& hash_src, bool is_sorted = false) { typedef typename VectorI::value_type IndexT; typedef typename VectorB::value_type ValueB; assert( !src.empty() ); IndexT max_entry(0); if( is_sorted ) max_entry = src.back(); else max_entry = thrust::reduce(src.begin(), src.end(), 0, thrust::maximum<IndexT>()); hash_src.assign(max_entry+1, 0); VectorB dummy(hash_src); HashFctr<VectorB> hctr(hash_src); //why unused dummy? //because functor must return something //and must store result of functor somewhere! // thrust::transform(src.begin(), src.end(), dummy.begin(), //unused... hctr); return hash_src.size(); } //##### Change 2: subg row_ptr extraction failed on missing indices ##### /** * @brief Compute the CSR row indices of the extracted graph. * * Note that source is an array of row indices that are * part of the subgraph. If a vertex appears a source multiple * times in the subgraph it appears multiple times in the source * vector. * * @param[in] actual_nnz Number of non-zeros in the subgraph matrix * (aka the number of edges) * @param[in] nrows Number of vertices in the subgraph * @param[in] source Array of row indices that the source of an edge * (NOTE: this array is assumed to be sorted) * @param[out] subg_row_ptr The computed subgraph row pointer */ template<typename VectorI> void make_subg_row_ptr(size_t actual_nnz, //in: # non-zeros in subgraph matrix size_t nrows, //in: |vSub| VectorI& source, //in: array of row indices where there // are non-zeros (assumed sorted) VectorI& subg_row_ptr) //out:subgraph row_ptr { typedef typename VectorI::value_type IndexT; // // Nothing to do here. // if( actual_nnz == 0 ) return; VectorI counts(nrows, 0); // // We want to count how many times the element occurs. We // do this (based on the assumption that the list is sorted) // by computing the upper bound of the range for each row id, // and the lower bound for the range of each row id and // computing the difference. // VectorI ub(nrows), lb(nrows); thrust::upper_bound(source.begin(), source.end(), thrust::make_counting_iterator(size_t{0}), thrust::make_counting_iterator(nrows), ub.begin()); // // At this point ub[i] is the offset of the end of the string // of occurrences for row id i. // thrust::lower_bound(source.begin(), source.end(), thrust::make_counting_iterator(size_t{0}), thrust::make_counting_iterator(nrows), lb.begin()); // // At this point lb[i] is the offset of the beginning of the string // of occurrences for row id i. // thrust::transform(ub.begin(), ub.end(), lb.begin(), counts.begin(), thrust::minus<int>()); // // Counts is now the number of times each index occurs in the data. So we // can compute prefix sums to create our new row index array. // thrust::exclusive_scan(counts.begin(), counts.end(), subg_row_ptr.begin()); subg_row_ptr.back() = actual_nnz; } //used by renumber_indices(...) // template<typename Container> struct Hasher { typedef typename Container::value_type IndexT; typedef typename VectorPtrT<typename Container::value_type,Container>::PtrT PtrT; explicit Hasher(Container& hash_src): m_hash(&hash_src[0]) { } __host__ __device__ IndexT operator() (IndexT i, IndexT v) { m_hash[v] = i; return v; } __host__ __device__ IndexT operator() (IndexT u) { return m_hash[u]; } private: PtrT m_hash; }; //##### Change 3: index renumbering must be split into hash construction and hash usage ##### //constructs hash table //from set of indices into reduced set of indices: //row_idx{5,7,10,12}->{0,1,2,3}; // so that given u{12,7} you get: w{3,1} //w[i]=hash[u[i]]; // //Pre-conditions: //(1.) row_idx is sorted (increasing order); //(2.) row_idx has no duplicates; // template<typename VectorI> void renumber_indices(VectorI& row_idx, //in: subset of row indices; // pre-conditions= // {sorted (increasingly), no duplicates} VectorI& hash_t) //out: renumbering hash table { typedef typename VectorI::value_type IndexT; size_t n = row_idx.size(); VectorI dummy(n,IndexT(0)); IndexT max_entry = row_idx.back();//...since row_idx is sorted increasingly hash_t.assign(max_entry+1, -1); Hasher<VectorI> hasher(hash_t); thrust::counting_iterator<IndexT> first(0); thrust::transform(first, first+n, row_idx.begin(), dummy.begin(), hasher); } template<typename VectorI> void get_renumbered_indices(VectorI& u, //in: in=subset of row_idx; VectorI& hash_t, //in: renumbering hash table VectorI& w) //out:renumbered: hash[u[i]] { typedef typename VectorI::value_type IndexT; Hasher<VectorI> hasher(hash_t); thrust::transform(u.begin(), u.end(), w.begin(), hasher); } template<typename VectorI, typename VectorV, typename VectorB = VectorI> struct SubGraphExtractorFunctor { typedef typename VectorI::value_type IndexT; typedef typename VectorV::value_type ValueT; typedef typename VectorB::value_type ValueB; typedef typename VectorPtrT<typename VectorB::value_type,VectorB>::PtrT PtrB; typedef typename VectorPtrT<typename VectorI::value_type,VectorI>::PtrT PtrI; typedef typename VectorPtrT<typename VectorV::value_type,VectorV>::PtrT PtrV; //constructor for edge subset: //requires additional info: col_ind, row_ptr // //pre-conditions: (1.) eSub sorted in ascending order; // (2.) eSub has no duplicates; // SubGraphExtractorFunctor(const VectorI& eSub, bool /*unused*/): edgeSubset(eSub), is_vertex_extraction(false) { } explicit SubGraphExtractorFunctor(const VectorI& vSubset): vertexSubset(vSubset), is_vertex_extraction(true) { //make sure vertexSubset_ is sorted increasingly: ///sort_ifnot(vertexSubset); row_ptr_subg.assign(vSubset.size()+1, IndexT(0)); // can be pre-allocated } virtual ~SubGraphExtractorFunctor(void) { } const VectorV& get_vals(void) const { return vals_subg; } VectorV& get_vals(void) { return vals_subg; } const VectorI& get_row_ptr(void) const { return row_ptr_subg; } const VectorI& get_col_ind(void) const { return col_ind_subg; } struct NoValueUpdater { //##### Change 5: const K ##### // void update_from(const VectorI& K) { //no-op.... } }; virtual void operator () (VectorI& row_ptr_, VectorI& col_ind_) { NoValueUpdater no_op; if( is_vertex_extraction ) extract_subgraph_by_vertex(row_ptr_, col_ind_, no_op); else extract_subgraph_by_edge(row_ptr_, col_ind_, no_op); } virtual void operator () (VectorV& vals_, VectorI& row_ptr_, VectorI& col_ind_) { ValueUpdater<VectorV, VectorI> fctrv(vals_, vals_subg); if( is_vertex_extraction ) extract_subgraph_by_vertex(row_ptr_, col_ind_, fctrv); else extract_subgraph_by_edge(row_ptr_, col_ind_, fctrv); } IndexT get_subg_nnz(void) const { return row_ptr_subg.back(); } const VectorI& get_I(void) const { return I; } const VectorI& get_J(void) const { return J; } const VectorI& get_K(void) const { return K; } const VectorI& get_hash_table(void) const { return hash_t; } const VectorI& get_vertex_subset(void) const { return vertexSubset; } protected: template<typename ValUpdaterFctr> void extract_subgraph_by_vertex(VectorI& row_ptr_, VectorI& col_ind_, ValUpdaterFctr fctrv) { typedef typename VectorI::value_type IndexT; //typedef typename VectorV::value_type ValueT; typedef typename VectorB::value_type ValueB; if( vertexSubset.empty() ) return; //nothing to do //Pre-condition (new): vertexSubset sorted! size_t nrows_subg = vertexSubset.size(); //step 1: subgraph *upper-bound* //of #non-zeros per row: VectorI nzs(nrows_subg, 0); //count_nz_per_row(row_ptr_, vertexSubset, nzs); NzCounter<VectorI> count_nzs(row_ptr_); thrust::transform(vertexSubset.begin(), vertexSubset.end(), nzs.begin(), count_nzs); //step 2: offsets of where each //subgraph row *could* have entries; // //TODO: change to an exclusive prefix scan! // VectorI offsets(nrows_subg, 0); thrust::exclusive_scan(nzs.begin(), nzs.end(), offsets.begin()); //step 3: total # non-zero entries; this is used as upper bound //for # non-zero entries of subgraph; // size_t nnz = offsets.back()+nzs.back(); VectorI range(nnz, IndexT(0));//increasing sequence thrust::sequence(range.begin(), range.end(),IndexT(0));//or, counting_iterator VectorB hash_rows; size_t hash_sz = make_hash(vertexSubset, hash_rows, true); //step 4: create hash map between nz entry and corresponding // I[], J[], V[], Used[] SoA; update vals_ // I.assign(nnz, IndexT(0)); J.assign(nnz, IndexT(0)); K.assign(nnz, IndexT(0)); VectorB U(nnz, ValueB(0)); size_t actual_nnz = fill_hash_nz2ijv(hash_rows, range, nzs, offsets, vertexSubset, row_ptr_, col_ind_, I, J, K, U); //##### Change 4: subg row_ptr extraction requires renumbering first ##### renumber_indices(vertexSubset, hash_t); VectorI I_sg(actual_nnz, IndexT(0)); get_renumbered_indices(I, //in: in=sources; hash_t, //in: renumbering hash table I_sg); //out:renumbered: sources[] #ifdef DEBUG_NEW std::cout<<"I_sg: "; print_v(I_sg, std::cout); std::cout<<"nnz="<<actual_nnz<<std::endl; std::cout<<"I.size()="<<I.size()<<std::endl; #endif //#################################### Change 2: //step 5: extract subgraph CSR data: // make_subg_row_ptr(actual_nnz, nrows_subg, I_sg, row_ptr_subg); //step 6: update col_ind and re-number: // col_ind_subg.assign(actual_nnz, IndexT(0)); //#################################### Change 3: get_renumbered_indices(J, //in: in=sinks; hash_t, //in: renumbering hash table col_ind_subg);//out:renumbered: col_ind[] //##### Change 7: get edge subset from original graph ##### edgeSubset = K; // copy !!! //act (or not) on values: // fctrv.update_from(K); } //##### Change 6: separate logic for extraction by edges ##### // template<typename ValUpdaterFctr> void extract_subgraph_by_edge(VectorI& row_ptr, VectorI& col_ind, ValUpdaterFctr fctrv) { if( edgeSubset.empty() ) return; //nothing to do size_t nedges = edgeSubset.size(); K = edgeSubset; // copy!!! VectorI sinks0(nedges); //get edge sinks: //just extract the col_ind //values at indices specified by eSub: // // //old solution... // Filter<Container, ValT> filter(col_ind); // thrust::transform(eSub.begin(), eSub.end(), // sinks0.begin(), // filter); // //...replace with gather: // thrust::gather(edgeSubset.begin(), edgeSubset.end(), //range of indexes... col_ind.begin(), //...into source sinks0.begin()); //destination (result) //subg_col_ind[] = sink entries corresponding //to *sorted* source entries //at this point both sources and sinks are sorted, //but that doesn't mean that sinks[i] and sources[i] form edges... //(use multi_sort_SoA?) // //Actually: since sources[] should come out sorted regardless of sinks[] //the corresponding sinks[] are just sinks0[] before sorting it! // //J[] is just the unsorted sinks: // J = sinks0; // copy!!! #ifdef DEBUG_EDGES std::cout<<"sinks:"; print_v(J, std::cout); #endif //sort sinks to later do a merge with them: // thrust::sort(sinks0.begin(), sinks0.end()); //hash[val_i] = i (index of first occurence of val_i, if val_i exists in v[]; // else, last occurence of closest value less than val_i): // //(not ot be confused with renumbering hash, hash_t) // VectorI hash; reverse_hash(row_ptr, hash); #ifdef DEBUG_EDGES std::cout<<"hash:"; print_v(hash, std::cout); #endif //now get sources: //apply hash on eSub, //i.e., extract the hash //values at indices specified by eSub: //(the result should be sorted, // because eSub is assumed sorted // and hash has indices of a sorted array: row_ptr) // I.assign(nedges, IndexT(0)); //I[] = sources !!! // //old solution... // Filter<Container, ValT> hash_app(hash); // thrust::transform(eSub.begin(), eSub.end(), // sources.begin(), // hash_app); // //replaced by gather... // thrust::gather(edgeSubset.begin(), edgeSubset.end(), //range of indexes... hash.begin(), //...into source I.begin()); //destination (result) assert( sinks0.size() == I.size() ); #ifdef DEBUG_EDGES std::cout<<"sources:"; print_v(I, std::cout); #endif //now merge sinks with sources // VectorI v(nedges<<1);//twice as many edges... thrust::merge(sinks0.begin(), sinks0.end(), I.begin(), I.end(), v.begin()); size_t nrows_subg = count_get_distinct(v, vertexSubset); //renumber row (vertex) indices: // renumber_indices(vertexSubset, hash_t); get_renumbered_indices(I, //in: in=sources; hash_t, //in: renumbering hash table sinks0); //out:renumbered: sources[] //create subgraph row_ptr, //operating on sources: // row_ptr_subg.resize(nrows_subg+1); make_subg_row_ptr(nedges, //==actual_nnz nrows_subg, sinks0, row_ptr_subg); //renumber subg_col_ind: // col_ind_subg.resize(nedges); get_renumbered_indices(J, //in: in=sinks; hash_t, //in: renumbering hash table col_ind_subg); //out:renumbered: subg_col_ind[] //act (or not) on values: // fctrv.update_from(K); } private: VectorI vertexSubset; //original graph vertex indices used in subgraph //#################################### Change 7: // VectorI edgeSubset; //original graph edge indices used in subgraph VectorV vals_subg; //not used for non-valued graphs VectorI row_ptr_subg; VectorI col_ind_subg; //useful for mapping graph <--> subgraph: // VectorI I; //subgraph's set of (original graph) row indices VectorI J; //subgraph's set of (original graph) col indices //hence, (I[k], J[k]) is an edge in subgraph VectorI K; //subgraph's set of (original graph) edge indices VectorI hash_t; const bool is_vertex_extraction; }; //Acyclic Visitor // (A. Alexandrescu, "Modern C++ Design", Section 10.4), // where *concrete* Visitors must be parameterized by all // the possibile template args of the Visited classes (visitees); // //Visitor for SubGraph extraction: // template<typename VectorI, typename VectorV> struct SubGraphExtractorVisitor: VisitorBase, Visitor<Graph<typename VectorI::value_type> >, Visitor<CsrGraph<typename VectorI::value_type> >, Visitor<ValuedCsrGraph<typename VectorI::value_type, typename VectorV::value_type> >, Visitor<MultiValuedCsrGraph<typename VectorI::value_type, typename VectorV::value_type> > { typedef typename VectorI::value_type IndexType_; typedef typename VectorV::value_type ValueType_; typedef typename VectorPtrT<typename VectorI::value_type,VectorI>::PtrT PtrI; //TODO: avoid copy from raw pointer // SubGraphExtractorVisitor(CsrGraph<IndexType_>& graph, const VectorI& vSub, cudaStream_t stream): row_ptr_(graph.get_raw_row_offsets(), graph.get_raw_row_offsets()+graph.get_num_vertices()+1), col_ind_(graph.get_raw_column_indices(), graph.get_raw_column_indices()+graph.get_num_edges()), extractor_(vSub), stream_(stream) { } //TODO: avoid copy from raw pointer // SubGraphExtractorVisitor(CsrGraph<IndexType_>& graph, const VectorI& eSub, cudaStream_t stream, bool use_edges): //just to differentiate vertex vs. edge semantics; value not used row_ptr_(graph.get_raw_row_offsets(), graph.get_raw_row_offsets()+graph.get_num_vertices()+1), col_ind_(graph.get_raw_column_indices(), graph.get_raw_column_indices()+graph.get_num_edges()), extractor_(eSub, false), //different semantics! stream_(stream) { } void Visit(Graph<IndexType_>& graph) { //no-op... } void Visit(CsrGraph<IndexType_>& graph) { // size_t g_nrows = graph.get_num_vertices(); // size_t g_nnz = graph.get_num_edges(); // VectorI row_ptr(graph.get_raw_row_offsets(), graph.get_raw_row_offsets()+g_nrows+1); // VectorI col_ind(graph.get_raw_column_indices(), graph.get_raw_column_indices()+g_nnz); extractor_(row_ptr_, col_ind_);//TODO: modify operator to work directly with PtrI size_t rowptr_sz = extractor_.get_row_ptr().size(); assert( rowptr_sz >= 1 ); size_t subg_nrows = rowptr_sz-1; size_t subg_nnz = extractor_.get_subg_nnz(); subgraph_ = new CsrGraph<IndexType_>(subg_nrows, subg_nnz, stream_); //TODO: more efficient solution: investigate if/how copy can be avoided // thrust::copy(extractor_.get_row_ptr().begin(), extractor_.get_row_ptr().end(), subgraph_->get_raw_row_offsets()); thrust::copy(extractor_.get_col_ind().begin(), extractor_.get_col_ind().end(), subgraph_->get_raw_column_indices()); } //might not need to implement following Visit methods, //the one above for CsrGraph might work for derived //classes... void Visit(ValuedCsrGraph<IndexType_,ValueType_>& graph) { size_t g_nrows = graph.get_num_vertices(); size_t g_nnz = graph.get_num_edges(); // VectorI row_ptr(graph.get_raw_row_offsets(), graph.get_raw_row_offsets()+g_nrows+1); // VectorI col_ind(graph.get_raw_column_indices(), graph.get_raw_column_indices()+g_nnz); VectorV vals(graph.get_raw_values(), graph.get_raw_values()+g_nnz); extractor_(vals, row_ptr_, col_ind_);//TODO: modify operator to work directly with PtrI size_t rowptr_sz = extractor_.get_row_ptr().size(); assert( rowptr_sz >= 1 ); size_t subg_nrows = rowptr_sz-1; size_t subg_nnz = extractor_.get_subg_nnz(); ValuedCsrGraph<IndexType_,ValueType_>* subg = new ValuedCsrGraph<IndexType_,ValueType_>(subg_nrows, subg_nnz, stream_); //TODO: more efficient solution: investigate if/how copy can be avoided // thrust::copy(extractor_.get_row_ptr().begin(), extractor_.get_row_ptr().end(), subg->get_raw_row_offsets()); thrust::copy(extractor_.get_col_ind().begin(), extractor_.get_col_ind().end(), subg->get_raw_column_indices()); thrust::copy(extractor_.get_vals().begin(), extractor_.get_vals().end(), subg->get_raw_values()); subgraph_ = subg; } void Visit(MultiValuedCsrGraph<IndexType_,ValueType_>& graph) { size_t g_nrows = graph.get_num_vertices(); size_t g_nnz = graph.get_num_edges(); // VectorI row_ptr(graph.get_raw_row_offsets(), graph.get_raw_row_offsets()+g_nrows+1); // VectorI col_ind(graph.get_raw_column_indices(), graph.get_raw_column_indices()+g_nnz); /// VectorV vals(graph.get_raw_values(), graph.get_raw_values()+g_nnz); ///extractor_(vals, row_ptr_, col_ind_); extractor_(row_ptr_, col_ind_);//TODO: modify operator to work directly with PtrI size_t rowptr_sz = extractor_.get_row_ptr().size(); assert( rowptr_sz >= 1 ); size_t subg_nrows = rowptr_sz-1; size_t subg_nnz = extractor_.get_subg_nnz(); MultiValuedCsrGraph<IndexType_,ValueType_>* subg = new MultiValuedCsrGraph<IndexType_,ValueType_>(subg_nrows, subg_nnz, stream_); //TODO: more efficient solution: investigate if/how copy can be avoided // thrust::copy(extractor_.get_row_ptr().begin(), extractor_.get_row_ptr().end(), subg->get_raw_row_offsets()); thrust::copy(extractor_.get_col_ind().begin(), extractor_.get_col_ind().end(), subg->get_raw_column_indices()); ///thrust::copy(extractor_.get_vals().begin(), extractor_.get_vals().end(), subg->get_raw_values()); //additional data extraction: // get_vertex_data(graph, extractor_.get_vertex_subset(), *subg); get_edge_data(graph, extractor_.get_K(), *subg); subgraph_ = subg; } const SubGraphExtractorFunctor<VectorI, VectorV>& get_extractor(void) const { return extractor_; } CsrGraph<IndexType_>* get_subgraph(void) // TODO: change to unique_ptr, when moving to C++1* { return subgraph_; } protected: void get_edge_data(MultiValuedCsrGraph<IndexType_,ValueType_>& graph_src, const VectorI& K, //subset of graph edge set MultiValuedCsrGraph<IndexType_,ValueType_>& graph_dest) { typedef thrust::device_ptr<ValueType_> PtrV; size_t ng = graph_src.get_num_edge_dim(); size_t nedges = K.size(); assert( nedges == graph_dest.get_num_edges() ); graph_dest.allocateEdgeData(ng, stream_); for(unsigned int i=0;i<ng;++i) { Vector<ValueType_>& v_src = graph_src.get_edge_dim(i); Vector<ValueType_>& v_dest = graph_dest.get_edge_dim(i); size_t n_src = v_src.get_size(); PtrV ptr_src(v_src.raw()); range_view<PtrV> rv_src(ptr_src, ptr_src+n_src); size_t n_dest = v_dest.get_size(); assert( nedges == n_dest ); PtrV ptr_dest(v_dest.raw()); range_view<PtrV> rv_dest(ptr_dest, ptr_dest+n_dest); thrust::gather(K.begin(), K.end(), //map of indices rv_src.begin(), //source rv_dest.begin()); //source[map] } } void get_vertex_data(MultiValuedCsrGraph<IndexType_,ValueType_>& graph_src, const VectorI& K,// subset of graph vertex set == vSub MultiValuedCsrGraph<IndexType_,ValueType_>& graph_dest) { typedef thrust::device_ptr<ValueType_> PtrV; size_t ng = graph_src.get_num_vertex_dim(); size_t nrows = K.size();//remember, K==vSub, here! assert( nrows == graph_dest.get_num_vertices() ); graph_dest.allocateVertexData(ng, stream_); for(unsigned int i=0;i<ng;++i) { Vector<ValueType_>& v_src = graph_src.get_vertex_dim(i); Vector<ValueType_>& v_dest = graph_dest.get_vertex_dim(i); size_t n_src = v_src.get_size(); PtrV ptr_src(v_src.raw()); range_view<PtrV> rv_src(ptr_src, ptr_src+n_src); size_t n_dest = v_dest.get_size(); assert( nrows == n_dest ); PtrV ptr_dest(v_dest.raw()); range_view<PtrV> rv_dest(ptr_dest, ptr_dest+n_dest); thrust::gather(K.begin(), K.end(), //map of indices rv_src.begin(), //source rv_dest.begin()); //source[map] } } private: VectorI row_ptr_; VectorI col_ind_; SubGraphExtractorFunctor<VectorI, VectorV> extractor_; cudaStream_t stream_; CsrGraph<IndexType_>* subgraph_; // to be constructed }; template<typename T> struct BoundValidator { BoundValidator(const T& lower_bound, const T& upper_bound): lbound_(lower_bound), ubound_(upper_bound) { } __host__ __device__ bool operator() (T k) { return ( k < lbound_ || k > ubound_ ); } private: T lbound_; T ubound_; }; template<typename Container> struct NotSortedAscendingly { typedef typename Container::value_type VType; typedef typename VectorPtrT<VType,Container>::PtrT PtrT; NotSortedAscendingly(Container& rv, const size_t& sz): ptr_(&rv[0]), sz_(sz) { } __host__ __device__ bool operator() (VType k) { if( k+1 < sz_ ) return ptr_[k+1] < ptr_[k]; else return false; } private: PtrT ptr_;//no reference! must be copy constructed size_t sz_; }; template<typename VectorI> void validate_input(VectorI& v, typename VectorI::value_type sz) { typedef typename VectorI::value_type IndexT; size_t n = v.size(); if( n == 0 ) FatalError("0-sized array input in subgraph extraction.",NVGRAPH_ERR_BAD_PARAMETERS); IndexT lb = 0; IndexT ub = sz-1; BoundValidator<IndexT> bvld(lb, ub);//closed interval! typename VectorI::iterator pos = thrust::find_if(v.begin(), v.end(), bvld); if( pos != v.end() ) FatalError("Input is not a valid subset of the graph's corresponding set.",NVGRAPH_ERR_BAD_PARAMETERS); VectorI seq(n,0); thrust::sequence(seq.begin(), seq.end()); NotSortedAscendingly<VectorI> nsa_f(v, n); pos = thrust::find_if(seq.begin(), seq.end(), nsa_f); if( pos != seq.end() ) FatalError("Input array not sorted in ascending order.",NVGRAPH_ERR_BAD_PARAMETERS); pos = thrust::unique(v.begin(), v.end()); if( pos != v.end() ) FatalError("Input array has duplicates.",NVGRAPH_ERR_BAD_PARAMETERS); } template<typename IndexT, typename ValueT> CsrGraph<IndexT>* extract_from_vertex_subset(CsrGraph<IndexT>& graph, IndexT* pV, size_t n, cudaStream_t stream) { typedef thrust::device_vector<IndexT> VectorI; typedef thrust::device_vector<ValueT> VectorV; VectorI vSub(pV, pV+n); validate_input(vSub, graph.get_num_vertices()); SubGraphExtractorVisitor<VectorI, VectorV> visitor(graph, vSub, stream); graph.Accept(visitor); return visitor.get_subgraph(); } template<typename IndexT, typename ValueT> CsrGraph<IndexT>* extract_from_edge_subset(CsrGraph<IndexT>& graph, IndexT* pV, size_t n, cudaStream_t stream) { typedef thrust::device_vector<IndexT> VectorI; typedef thrust::device_vector<ValueT> VectorV; VectorI vSub(pV, pV+n); validate_input(vSub, graph.get_num_edges()); SubGraphExtractorVisitor<VectorI, VectorV> visitor(graph, vSub, stream, true); graph.Accept(visitor); return visitor.get_subgraph(); } }//end namespace #endif
0